ominfra 0.0.0.dev125__py3-none-any.whl → 0.0.0.dev127__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ominfra/clouds/aws/auth.py +1 -1
- ominfra/deploy/_executor.py +1 -1
- ominfra/deploy/poly/_main.py +1 -1
- ominfra/pyremote/_runcommands.py +1 -1
- ominfra/scripts/journald2aws.py +2 -2
- ominfra/scripts/supervisor.py +1825 -1217
- ominfra/supervisor/collections.py +52 -0
- ominfra/supervisor/context.py +2 -336
- ominfra/supervisor/datatypes.py +1 -63
- ominfra/supervisor/dispatchers.py +22 -338
- ominfra/supervisor/dispatchersimpl.py +342 -0
- ominfra/supervisor/groups.py +33 -110
- ominfra/supervisor/groupsimpl.py +86 -0
- ominfra/supervisor/inject.py +45 -13
- ominfra/supervisor/main.py +1 -1
- ominfra/supervisor/pipes.py +83 -0
- ominfra/supervisor/poller.py +6 -3
- ominfra/supervisor/privileges.py +65 -0
- ominfra/supervisor/processes.py +18 -0
- ominfra/supervisor/{process.py → processesimpl.py} +99 -317
- ominfra/supervisor/setup.py +38 -0
- ominfra/supervisor/setupimpl.py +261 -0
- ominfra/supervisor/signals.py +24 -16
- ominfra/supervisor/spawning.py +31 -0
- ominfra/supervisor/spawningimpl.py +347 -0
- ominfra/supervisor/supervisor.py +54 -78
- ominfra/supervisor/types.py +122 -39
- ominfra/supervisor/users.py +64 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/RECORD +34 -23
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/top_level.txt +0 -0
ominfra/scripts/supervisor.py
CHANGED
@@ -95,6 +95,10 @@ TomlParseFloat = ta.Callable[[str], ta.Any]
|
|
95
95
|
TomlKey = ta.Tuple[str, ...]
|
96
96
|
TomlPos = int # ta.TypeAlias
|
97
97
|
|
98
|
+
# ../collections.py
|
99
|
+
K = ta.TypeVar('K')
|
100
|
+
V = ta.TypeVar('V')
|
101
|
+
|
98
102
|
# ../../../omlish/lite/cached.py
|
99
103
|
T = ta.TypeVar('T')
|
100
104
|
|
@@ -102,6 +106,11 @@ T = ta.TypeVar('T')
|
|
102
106
|
SocketAddress = ta.Any
|
103
107
|
SocketHandlerFactory = ta.Callable[[SocketAddress, ta.BinaryIO, ta.BinaryIO], 'SocketHandler']
|
104
108
|
|
109
|
+
# ../../../omlish/lite/typing.py
|
110
|
+
A0 = ta.TypeVar('A0')
|
111
|
+
A1 = ta.TypeVar('A1')
|
112
|
+
A2 = ta.TypeVar('A2')
|
113
|
+
|
105
114
|
# ../events.py
|
106
115
|
EventCallback = ta.Callable[['Event'], None]
|
107
116
|
|
@@ -109,6 +118,7 @@ EventCallback = ta.Callable[['Event'], None]
|
|
109
118
|
HttpHeaders = http.client.HTTPMessage # ta.TypeAlias
|
110
119
|
|
111
120
|
# ../../../omlish/lite/inject.py
|
121
|
+
U = ta.TypeVar('U')
|
112
122
|
InjectorKeyCls = ta.Union[type, ta.NewType]
|
113
123
|
InjectorProviderFn = ta.Callable[['Injector'], ta.Any]
|
114
124
|
InjectorProviderFnMap = ta.Mapping['InjectorKey', 'InjectorProviderFn']
|
@@ -942,6 +952,55 @@ def toml_make_safe_parse_float(parse_float: TomlParseFloat) -> TomlParseFloat:
|
|
942
952
|
return safe_parse_float
|
943
953
|
|
944
954
|
|
955
|
+
########################################
|
956
|
+
# ../collections.py
|
957
|
+
|
958
|
+
|
959
|
+
class KeyedCollectionAccessors(abc.ABC, ta.Generic[K, V]):
|
960
|
+
@property
|
961
|
+
@abc.abstractmethod
|
962
|
+
def _by_key(self) -> ta.Mapping[K, V]:
|
963
|
+
raise NotImplementedError
|
964
|
+
|
965
|
+
def __iter__(self) -> ta.Iterator[V]:
|
966
|
+
return iter(self._by_key.values())
|
967
|
+
|
968
|
+
def __len__(self) -> int:
|
969
|
+
return len(self._by_key)
|
970
|
+
|
971
|
+
def __contains__(self, key: K) -> bool:
|
972
|
+
return key in self._by_key
|
973
|
+
|
974
|
+
def __getitem__(self, key: K) -> V:
|
975
|
+
return self._by_key[key]
|
976
|
+
|
977
|
+
def get(self, key: K, default: ta.Optional[V] = None) -> ta.Optional[V]:
|
978
|
+
return self._by_key.get(key, default)
|
979
|
+
|
980
|
+
def items(self) -> ta.Iterator[ta.Tuple[K, V]]:
|
981
|
+
return iter(self._by_key.items())
|
982
|
+
|
983
|
+
|
984
|
+
class KeyedCollection(KeyedCollectionAccessors[K, V]):
|
985
|
+
def __init__(self, items: ta.Iterable[V]) -> None:
|
986
|
+
super().__init__()
|
987
|
+
|
988
|
+
by_key: ta.Dict[K, V] = {}
|
989
|
+
for v in items:
|
990
|
+
if (k := self._key(v)) in by_key:
|
991
|
+
raise KeyError(f'key {k} of {v} already registered by {by_key[k]}')
|
992
|
+
by_key[k] = v
|
993
|
+
self.__by_key = by_key
|
994
|
+
|
995
|
+
@property
|
996
|
+
def _by_key(self) -> ta.Mapping[K, V]:
|
997
|
+
return self.__by_key
|
998
|
+
|
999
|
+
@abc.abstractmethod
|
1000
|
+
def _key(self, v: V) -> K:
|
1001
|
+
raise NotImplementedError
|
1002
|
+
|
1003
|
+
|
945
1004
|
########################################
|
946
1005
|
# ../datatypes.py
|
947
1006
|
|
@@ -975,43 +1034,7 @@ def logfile_name(val):
|
|
975
1034
|
return existing_dirpath(val)
|
976
1035
|
|
977
1036
|
|
978
|
-
|
979
|
-
try:
|
980
|
-
uid = int(name)
|
981
|
-
except ValueError:
|
982
|
-
try:
|
983
|
-
pwdrec = pwd.getpwnam(name)
|
984
|
-
except KeyError:
|
985
|
-
raise ValueError(f'Invalid user name {name}') # noqa
|
986
|
-
uid = pwdrec[2]
|
987
|
-
else:
|
988
|
-
try:
|
989
|
-
pwd.getpwuid(uid) # check if uid is valid
|
990
|
-
except KeyError:
|
991
|
-
raise ValueError(f'Invalid user id {name}') # noqa
|
992
|
-
return uid
|
993
|
-
|
994
|
-
|
995
|
-
def name_to_gid(name: str) -> int:
|
996
|
-
try:
|
997
|
-
gid = int(name)
|
998
|
-
except ValueError:
|
999
|
-
try:
|
1000
|
-
grprec = grp.getgrnam(name)
|
1001
|
-
except KeyError:
|
1002
|
-
raise ValueError(f'Invalid group name {name}') # noqa
|
1003
|
-
gid = grprec[2]
|
1004
|
-
else:
|
1005
|
-
try:
|
1006
|
-
grp.getgrgid(gid) # check if gid is valid
|
1007
|
-
except KeyError:
|
1008
|
-
raise ValueError(f'Invalid group id {name}') # noqa
|
1009
|
-
return gid
|
1010
|
-
|
1011
|
-
|
1012
|
-
def gid_for_uid(uid: int) -> int:
|
1013
|
-
pwrec = pwd.getpwuid(uid)
|
1014
|
-
return pwrec[3]
|
1037
|
+
##
|
1015
1038
|
|
1016
1039
|
|
1017
1040
|
def octal_type(arg: ta.Union[str, int]) -> int:
|
@@ -1083,29 +1106,6 @@ byte_size = SuffixMultiplier({
|
|
1083
1106
|
})
|
1084
1107
|
|
1085
1108
|
|
1086
|
-
# all valid signal numbers
|
1087
|
-
SIGNUMS = [getattr(signal, k) for k in dir(signal) if k.startswith('SIG')]
|
1088
|
-
|
1089
|
-
|
1090
|
-
def signal_number(value: ta.Union[int, str]) -> int:
|
1091
|
-
try:
|
1092
|
-
num = int(value)
|
1093
|
-
|
1094
|
-
except (ValueError, TypeError):
|
1095
|
-
name = value.strip().upper() # type: ignore
|
1096
|
-
if not name.startswith('SIG'):
|
1097
|
-
name = f'SIG{name}'
|
1098
|
-
|
1099
|
-
num = getattr(signal, name, None) # type: ignore
|
1100
|
-
if num is None:
|
1101
|
-
raise ValueError(f'value {value!r} is not a valid signal name') # noqa
|
1102
|
-
|
1103
|
-
if num not in SIGNUMS:
|
1104
|
-
raise ValueError(f'value {value!r} is not a valid signal number')
|
1105
|
-
|
1106
|
-
return num
|
1107
|
-
|
1108
|
-
|
1109
1109
|
class RestartWhenExitUnexpected:
|
1110
1110
|
pass
|
1111
1111
|
|
@@ -1144,6 +1144,70 @@ class NoPermissionError(ProcessError):
|
|
1144
1144
|
"""
|
1145
1145
|
|
1146
1146
|
|
1147
|
+
########################################
|
1148
|
+
# ../privileges.py
|
1149
|
+
|
1150
|
+
|
1151
|
+
def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
|
1152
|
+
"""
|
1153
|
+
Drop privileges to become the specified user, which may be a username or uid. Called for supervisord startup
|
1154
|
+
and when spawning subprocesses. Returns None on success or a string error message if privileges could not be
|
1155
|
+
dropped.
|
1156
|
+
"""
|
1157
|
+
|
1158
|
+
if user is None:
|
1159
|
+
return 'No user specified to setuid to!'
|
1160
|
+
|
1161
|
+
# get uid for user, which can be a number or username
|
1162
|
+
try:
|
1163
|
+
uid = int(user)
|
1164
|
+
except ValueError:
|
1165
|
+
try:
|
1166
|
+
pwrec = pwd.getpwnam(user) # type: ignore
|
1167
|
+
except KeyError:
|
1168
|
+
return f"Can't find username {user!r}"
|
1169
|
+
uid = pwrec[2]
|
1170
|
+
else:
|
1171
|
+
try:
|
1172
|
+
pwrec = pwd.getpwuid(uid)
|
1173
|
+
except KeyError:
|
1174
|
+
return f"Can't find uid {uid!r}"
|
1175
|
+
|
1176
|
+
current_uid = os.getuid()
|
1177
|
+
|
1178
|
+
if current_uid == uid:
|
1179
|
+
# do nothing and return successfully if the uid is already the current one. this allows a supervisord
|
1180
|
+
# running as an unprivileged user "foo" to start a process where the config has "user=foo" (same user) in
|
1181
|
+
# it.
|
1182
|
+
return None
|
1183
|
+
|
1184
|
+
if current_uid != 0:
|
1185
|
+
return "Can't drop privilege as nonroot user"
|
1186
|
+
|
1187
|
+
gid = pwrec[3]
|
1188
|
+
if hasattr(os, 'setgroups'):
|
1189
|
+
user = pwrec[0]
|
1190
|
+
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
|
1191
|
+
|
1192
|
+
# always put our primary gid first in this list, otherwise we can lose group info since sometimes the first
|
1193
|
+
# group in the setgroups list gets overwritten on the subsequent setgid call (at least on freebsd 9 with
|
1194
|
+
# python 2.7 - this will be safe though for all unix /python version combos)
|
1195
|
+
groups.insert(0, gid)
|
1196
|
+
try:
|
1197
|
+
os.setgroups(groups)
|
1198
|
+
except OSError:
|
1199
|
+
return 'Could not set groups of effective user'
|
1200
|
+
|
1201
|
+
try:
|
1202
|
+
os.setgid(gid)
|
1203
|
+
except OSError:
|
1204
|
+
return 'Could not set group id of effective user'
|
1205
|
+
|
1206
|
+
os.setuid(uid)
|
1207
|
+
|
1208
|
+
return None
|
1209
|
+
|
1210
|
+
|
1147
1211
|
########################################
|
1148
1212
|
# ../signals.py
|
1149
1213
|
|
@@ -1151,25 +1215,33 @@ class NoPermissionError(ProcessError):
|
|
1151
1215
|
##
|
1152
1216
|
|
1153
1217
|
|
1154
|
-
|
1218
|
+
_SIGS_BY_NUM: ta.Mapping[int, signal.Signals] = {s.value: s for s in signal.Signals}
|
1219
|
+
_SIGS_BY_NAME: ta.Mapping[str, signal.Signals] = {s.name: s for s in signal.Signals}
|
1155
1220
|
|
1156
1221
|
|
1157
|
-
def
|
1158
|
-
|
1159
|
-
|
1160
|
-
_SIG_NAMES = _init_sig_names()
|
1161
|
-
return _SIG_NAMES.get(sig) or 'signal %d' % sig
|
1222
|
+
def sig_num(value: ta.Union[int, str]) -> int:
|
1223
|
+
try:
|
1224
|
+
num = int(value)
|
1162
1225
|
|
1226
|
+
except (ValueError, TypeError):
|
1227
|
+
name = value.strip().upper() # type: ignore
|
1228
|
+
if not name.startswith('SIG'):
|
1229
|
+
name = f'SIG{name}'
|
1230
|
+
|
1231
|
+
if (sn := _SIGS_BY_NAME.get(name)) is None:
|
1232
|
+
raise ValueError(f'value {value!r} is not a valid signal name') # noqa
|
1233
|
+
num = sn
|
1234
|
+
|
1235
|
+
if num not in _SIGS_BY_NUM:
|
1236
|
+
raise ValueError(f'value {value!r} is not a valid signal number')
|
1237
|
+
|
1238
|
+
return num
|
1163
1239
|
|
1164
|
-
|
1165
|
-
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1169
|
-
continue
|
1170
|
-
if k_startswith('SIG') and not k_startswith('SIG_'):
|
1171
|
-
d[v] = k
|
1172
|
-
return d
|
1240
|
+
|
1241
|
+
def sig_name(num: int) -> str:
|
1242
|
+
if (sig := _SIGS_BY_NUM.get(num)) is not None:
|
1243
|
+
return sig.name
|
1244
|
+
return f'signal {sig}'
|
1173
1245
|
|
1174
1246
|
|
1175
1247
|
##
|
@@ -1181,7 +1253,7 @@ class SignalReceiver:
|
|
1181
1253
|
|
1182
1254
|
self._signals_recvd: ta.List[int] = []
|
1183
1255
|
|
1184
|
-
def receive(self, sig: int, frame: ta.Any) -> None:
|
1256
|
+
def receive(self, sig: int, frame: ta.Any = None) -> None:
|
1185
1257
|
if sig not in self._signals_recvd:
|
1186
1258
|
self._signals_recvd.append(sig)
|
1187
1259
|
|
@@ -1257,6 +1329,70 @@ class SupervisorState(enum.IntEnum):
|
|
1257
1329
|
SHUTDOWN = -1
|
1258
1330
|
|
1259
1331
|
|
1332
|
+
########################################
|
1333
|
+
# ../users.py
|
1334
|
+
|
1335
|
+
|
1336
|
+
##
|
1337
|
+
|
1338
|
+
|
1339
|
+
def name_to_uid(name: str) -> int:
|
1340
|
+
try:
|
1341
|
+
uid = int(name)
|
1342
|
+
except ValueError:
|
1343
|
+
try:
|
1344
|
+
pwdrec = pwd.getpwnam(name)
|
1345
|
+
except KeyError:
|
1346
|
+
raise ValueError(f'Invalid user name {name}') # noqa
|
1347
|
+
uid = pwdrec[2]
|
1348
|
+
else:
|
1349
|
+
try:
|
1350
|
+
pwd.getpwuid(uid) # check if uid is valid
|
1351
|
+
except KeyError:
|
1352
|
+
raise ValueError(f'Invalid user id {name}') # noqa
|
1353
|
+
return uid
|
1354
|
+
|
1355
|
+
|
1356
|
+
def name_to_gid(name: str) -> int:
|
1357
|
+
try:
|
1358
|
+
gid = int(name)
|
1359
|
+
except ValueError:
|
1360
|
+
try:
|
1361
|
+
grprec = grp.getgrnam(name)
|
1362
|
+
except KeyError:
|
1363
|
+
raise ValueError(f'Invalid group name {name}') # noqa
|
1364
|
+
gid = grprec[2]
|
1365
|
+
else:
|
1366
|
+
try:
|
1367
|
+
grp.getgrgid(gid) # check if gid is valid
|
1368
|
+
except KeyError:
|
1369
|
+
raise ValueError(f'Invalid group id {name}') # noqa
|
1370
|
+
return gid
|
1371
|
+
|
1372
|
+
|
1373
|
+
def gid_for_uid(uid: int) -> int:
|
1374
|
+
pwrec = pwd.getpwuid(uid)
|
1375
|
+
return pwrec[3]
|
1376
|
+
|
1377
|
+
|
1378
|
+
##
|
1379
|
+
|
1380
|
+
|
1381
|
+
@dc.dataclass(frozen=True)
|
1382
|
+
class User:
|
1383
|
+
name: str
|
1384
|
+
uid: int
|
1385
|
+
gid: int
|
1386
|
+
|
1387
|
+
|
1388
|
+
def get_user(name: str) -> User:
|
1389
|
+
return User(
|
1390
|
+
name=name,
|
1391
|
+
uid=(uid := name_to_uid(name)),
|
1392
|
+
gid=gid_for_uid(uid),
|
1393
|
+
)
|
1394
|
+
|
1395
|
+
|
1260
1396
|
########################################
|
1261
1397
|
# ../../../omlish/lite/cached.py
|
1262
1398
|
|
@@ -1566,14 +1702,50 @@ class SocketHandler(abc.ABC):
|
|
1566
1702
|
# ../../../omlish/lite/typing.py
|
1567
1703
|
|
1568
1704
|
|
1705
|
+
##
|
1706
|
+
# A workaround for typing deficiencies (like `Argument 2 to NewType(...) must be subclassable`).
|
1707
|
+
|
1708
|
+
|
1569
1709
|
@dc.dataclass(frozen=True)
|
1570
|
-
class
|
1710
|
+
class AnyFunc(ta.Generic[T]):
|
1571
1711
|
fn: ta.Callable[..., T]
|
1572
1712
|
|
1573
1713
|
def __call__(self, *args: ta.Any, **kwargs: ta.Any) -> T:
|
1574
1714
|
return self.fn(*args, **kwargs)
|
1575
1715
|
|
1576
1716
|
|
1717
|
+
@dc.dataclass(frozen=True)
|
1718
|
+
class Func0(ta.Generic[T]):
|
1719
|
+
fn: ta.Callable[[], T]
|
1720
|
+
|
1721
|
+
def __call__(self) -> T:
|
1722
|
+
return self.fn()
|
1723
|
+
|
1724
|
+
|
1725
|
+
@dc.dataclass(frozen=True)
|
1726
|
+
class Func1(ta.Generic[A0, T]):
|
1727
|
+
fn: ta.Callable[[A0], T]
|
1728
|
+
|
1729
|
+
def __call__(self, a0: A0) -> T:
|
1730
|
+
return self.fn(a0)
|
1731
|
+
|
1732
|
+
|
1733
|
+
@dc.dataclass(frozen=True)
|
1734
|
+
class Func2(ta.Generic[A0, A1, T]):
|
1735
|
+
fn: ta.Callable[[A0, A1], T]
|
1736
|
+
|
1737
|
+
def __call__(self, a0: A0, a1: A1) -> T:
|
1738
|
+
return self.fn(a0, a1)
|
1739
|
+
|
1740
|
+
|
1741
|
+
@dc.dataclass(frozen=True)
|
1742
|
+
class Func3(ta.Generic[A0, A1, A2, T]):
|
1743
|
+
fn: ta.Callable[[A0, A1, A2], T]
|
1744
|
+
|
1745
|
+
def __call__(self, a0: A0, a1: A1, a2: A2) -> T:
|
1746
|
+
return self.fn(a0, a1, a2)
|
1747
|
+
|
1748
|
+
|
1577
1749
|
########################################
|
1578
1750
|
# ../events.py
|
1579
1751
|
|
@@ -1860,37 +2032,74 @@ def get_event_name_by_type(requested):
|
|
1860
2032
|
|
1861
2033
|
|
1862
2034
|
########################################
|
1863
|
-
# ../
|
2035
|
+
# ../setup.py
|
1864
2036
|
|
1865
2037
|
|
1866
2038
|
##
|
1867
2039
|
|
1868
2040
|
|
1869
|
-
|
1870
|
-
if isinstance(s, bytes):
|
1871
|
-
return s
|
1872
|
-
else:
|
1873
|
-
return s.encode(encoding)
|
2041
|
+
SupervisorUser = ta.NewType('SupervisorUser', User)
|
1874
2042
|
|
1875
2043
|
|
1876
|
-
|
1877
|
-
if isinstance(s, str):
|
1878
|
-
return s
|
1879
|
-
else:
|
1880
|
-
return s.decode(encoding)
|
2044
|
+
##
|
1881
2045
|
|
1882
2046
|
|
1883
|
-
|
1884
|
-
|
1885
|
-
|
1886
|
-
l -= 1
|
1887
|
-
return l
|
2047
|
+
class DaemonizeListener(abc.ABC): # noqa
|
2048
|
+
def before_daemonize(self) -> None: # noqa
|
2049
|
+
pass
|
1888
2050
|
|
2051
|
+
def after_daemonize(self) -> None: # noqa
|
2052
|
+
pass
|
1889
2053
|
|
1890
|
-
##
|
1891
2054
|
|
2055
|
+
DaemonizeListeners = ta.NewType('DaemonizeListeners', ta.Sequence[DaemonizeListener])
|
1892
2056
|
|
1893
|
-
|
2057
|
+
|
2058
|
+
##
|
2059
|
+
|
2060
|
+
|
2061
|
+
class SupervisorSetup(abc.ABC):
|
2062
|
+
@abc.abstractmethod
|
2063
|
+
def setup(self) -> None:
|
2064
|
+
raise NotImplementedError
|
2065
|
+
|
2066
|
+
@abc.abstractmethod
|
2067
|
+
def cleanup(self) -> None:
|
2068
|
+
raise NotImplementedError
|
2069
|
+
|
2070
|
+
|
2071
|
+
########################################
|
2072
|
+
# ../utils.py
|
2073
|
+
|
2074
|
+
|
2075
|
+
##
|
2076
|
+
|
2077
|
+
|
2078
|
+
def as_bytes(s: ta.Union[str, bytes], encoding: str = 'utf8') -> bytes:
|
2079
|
+
if isinstance(s, bytes):
|
2080
|
+
return s
|
2081
|
+
else:
|
2082
|
+
return s.encode(encoding)
|
2083
|
+
|
2084
|
+
|
2085
|
+
def as_string(s: ta.Union[str, bytes], encoding: str = 'utf8') -> str:
|
2086
|
+
if isinstance(s, str):
|
2087
|
+
return s
|
2088
|
+
else:
|
2089
|
+
return s.decode(encoding)
|
2090
|
+
|
2091
|
+
|
2092
|
+
def find_prefix_at_end(haystack: bytes, needle: bytes) -> int:
|
2093
|
+
l = len(needle) - 1
|
2094
|
+
while l and not haystack.endswith(needle[:l]):
|
2095
|
+
l -= 1
|
2096
|
+
return l
|
2097
|
+
|
2098
|
+
|
2099
|
+
##
|
2100
|
+
|
2101
|
+
|
2102
|
+
def compact_traceback() -> ta.Tuple[
|
1894
2103
|
ta.Tuple[str, str, int],
|
1895
2104
|
ta.Type[BaseException],
|
1896
2105
|
BaseException,
|
@@ -2059,6 +2268,41 @@ def timeslice(period: int, when: float) -> int:
|
|
2059
2268
|
|
2060
2269
|
########################################
|
2061
2270
|
# ../../../omlish/lite/http/parsing.py
|
2271
|
+
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
2272
|
+
# --------------------------------------------
|
2273
|
+
#
|
2274
|
+
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization
|
2275
|
+
# ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated
|
2276
|
+
# documentation.
|
2277
|
+
#
|
2278
|
+
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive,
|
2279
|
+
# royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative
|
2280
|
+
# works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License
|
2281
|
+
# Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
2282
|
+
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" are retained in Python
|
2283
|
+
# alone or in any derivative version prepared by Licensee.
|
2284
|
+
#
|
2285
|
+
# 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and
|
2286
|
+
# wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in
|
2287
|
+
# any such work a brief summary of the changes made to Python.
|
2288
|
+
#
|
2289
|
+
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES,
|
2290
|
+
# EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY
|
2291
|
+
# OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY
|
2292
|
+
# RIGHTS.
|
2293
|
+
#
|
2294
|
+
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
|
2295
|
+
# DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF
|
2296
|
+
# ADVISED OF THE POSSIBILITY THEREOF.
|
2297
|
+
#
|
2298
|
+
# 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
|
2299
|
+
#
|
2300
|
+
# 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint
|
2301
|
+
# venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade
|
2302
|
+
# name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
|
2303
|
+
#
|
2304
|
+
# 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this
|
2305
|
+
# License Agreement.
|
2062
2306
|
|
2063
2307
|
|
2064
2308
|
##
|
@@ -2489,11 +2733,23 @@ class Injector(abc.ABC):
|
|
2489
2733
|
raise NotImplementedError
|
2490
2734
|
|
2491
2735
|
@abc.abstractmethod
|
2492
|
-
def provide_kwargs(
|
2736
|
+
def provide_kwargs(
|
2737
|
+
self,
|
2738
|
+
obj: ta.Any,
|
2739
|
+
*,
|
2740
|
+
skip_args: int = 0,
|
2741
|
+
skip_kwargs: ta.Optional[ta.Iterable[ta.Any]] = None,
|
2742
|
+
) -> ta.Mapping[str, ta.Any]:
|
2493
2743
|
raise NotImplementedError
|
2494
2744
|
|
2495
2745
|
@abc.abstractmethod
|
2496
|
-
def inject(
|
2746
|
+
def inject(
|
2747
|
+
self,
|
2748
|
+
obj: ta.Any,
|
2749
|
+
*,
|
2750
|
+
args: ta.Optional[ta.Sequence[ta.Any]] = None,
|
2751
|
+
kwargs: ta.Optional[ta.Mapping[str, ta.Any]] = None,
|
2752
|
+
) -> ta.Any:
|
2497
2753
|
raise NotImplementedError
|
2498
2754
|
|
2499
2755
|
def __getitem__(
|
@@ -2507,8 +2763,12 @@ class Injector(abc.ABC):
|
|
2507
2763
|
# exceptions
|
2508
2764
|
|
2509
2765
|
|
2766
|
+
class InjectorError(Exception):
|
2767
|
+
pass
|
2768
|
+
|
2769
|
+
|
2510
2770
|
@dc.dataclass(frozen=True)
|
2511
|
-
class InjectorKeyError(
|
2771
|
+
class InjectorKeyError(InjectorError):
|
2512
2772
|
key: InjectorKey
|
2513
2773
|
|
2514
2774
|
source: ta.Any = None
|
@@ -2715,29 +2975,49 @@ def build_injector_provider_map(bs: InjectorBindings) -> ta.Mapping[InjectorKey,
|
|
2715
2975
|
# inspection
|
2716
2976
|
|
2717
2977
|
|
2718
|
-
# inspect.signature(eval_str=True) was added in 3.10 and we have to support 3.8, so we have to get_type_hints to eval
|
2719
|
-
# str annotations *in addition to* getting the signature for parameter information.
|
2720
2978
|
class _InjectionInspection(ta.NamedTuple):
|
2721
2979
|
signature: inspect.Signature
|
2722
2980
|
type_hints: ta.Mapping[str, ta.Any]
|
2981
|
+
args_offset: int
|
2723
2982
|
|
2724
2983
|
|
2725
2984
|
_INJECTION_INSPECTION_CACHE: ta.MutableMapping[ta.Any, _InjectionInspection] = weakref.WeakKeyDictionary()
|
2726
2985
|
|
2727
2986
|
|
2728
2987
|
def _do_injection_inspect(obj: ta.Any) -> _InjectionInspection:
|
2729
|
-
|
2988
|
+
tgt = obj
|
2989
|
+
if isinstance(tgt, type) and tgt.__init__ is not object.__init__: # type: ignore[misc]
|
2990
|
+
# Python 3.8's inspect.signature can't handle subclasses overriding __new__, always generating *args/**kwargs.
|
2991
|
+
# - https://bugs.python.org/issue40897
|
2992
|
+
# - https://github.com/python/cpython/commit/df7c62980d15acd3125dfbd81546dad359f7add7
|
2993
|
+
tgt = tgt.__init__ # type: ignore[misc]
|
2994
|
+
has_generic_base = True
|
2995
|
+
else:
|
2996
|
+
has_generic_base = False
|
2997
|
+
|
2998
|
+
# inspect.signature(eval_str=True) was added in 3.10 and we have to support 3.8, so we have to get_type_hints to
|
2999
|
+
# eval str annotations *in addition to* getting the signature for parameter information.
|
3000
|
+
uw = tgt
|
3001
|
+
has_partial = False
|
2730
3002
|
while True:
|
2731
3003
|
if isinstance(uw, functools.partial):
|
3004
|
+
has_partial = True
|
2732
3005
|
uw = uw.func
|
2733
3006
|
else:
|
2734
3007
|
if (uw2 := inspect.unwrap(uw)) is uw:
|
2735
3008
|
break
|
2736
3009
|
uw = uw2
|
2737
3010
|
|
3011
|
+
if has_generic_base and has_partial:
|
3012
|
+
raise InjectorError(
|
3013
|
+
'Injector inspection does not currently support both a typing.Generic base and a functools.partial: '
|
3014
|
+
f'{obj}',
|
3015
|
+
)
|
3016
|
+
|
2738
3017
|
return _InjectionInspection(
|
2739
|
-
inspect.signature(
|
3018
|
+
inspect.signature(tgt),
|
2740
3019
|
ta.get_type_hints(uw),
|
3020
|
+
1 if has_generic_base else 0,
|
2741
3021
|
)
|
2742
3022
|
|
2743
3023
|
|
@@ -2768,14 +3048,23 @@ def build_injection_kwargs_target(
|
|
2768
3048
|
obj: ta.Any,
|
2769
3049
|
*,
|
2770
3050
|
skip_args: int = 0,
|
2771
|
-
skip_kwargs: ta.Optional[ta.Iterable[
|
3051
|
+
skip_kwargs: ta.Optional[ta.Iterable[str]] = None,
|
2772
3052
|
raw_optional: bool = False,
|
2773
3053
|
) -> InjectionKwargsTarget:
|
2774
3054
|
insp = _injection_inspect(obj)
|
2775
3055
|
|
2776
|
-
|
3056
|
+
params = list(insp.signature.parameters.values())
|
3057
|
+
|
3058
|
+
skip_names: ta.Set[str] = set()
|
3059
|
+
if skip_kwargs is not None:
|
3060
|
+
skip_names.update(check_not_isinstance(skip_kwargs, str))
|
3061
|
+
|
3062
|
+
seen: ta.Set[InjectorKey] = set()
|
2777
3063
|
kws: ta.List[InjectionKwarg] = []
|
2778
|
-
for p in
|
3064
|
+
for p in params[insp.args_offset + skip_args:]:
|
3065
|
+
if p.name in skip_names:
|
3066
|
+
continue
|
3067
|
+
|
2779
3068
|
if p.annotation is inspect.Signature.empty:
|
2780
3069
|
if p.default is not inspect.Parameter.empty:
|
2781
3070
|
raise KeyError(f'{obj}, {p.name}')
|
@@ -2784,6 +3073,7 @@ def build_injection_kwargs_target(
|
|
2784
3073
|
if p.kind not in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY):
|
2785
3074
|
raise TypeError(insp)
|
2786
3075
|
|
3076
|
+
# 3.8 inspect.signature doesn't eval_str but typing.get_type_hints does, so prefer that.
|
2787
3077
|
ann = insp.type_hints.get(p.name, p.annotation)
|
2788
3078
|
if (
|
2789
3079
|
not raw_optional and
|
@@ -2851,8 +3141,19 @@ class _Injector(Injector):
|
|
2851
3141
|
return v.must()
|
2852
3142
|
raise UnboundInjectorKeyError(key)
|
2853
3143
|
|
2854
|
-
def provide_kwargs(
|
2855
|
-
|
3144
|
+
def provide_kwargs(
|
3145
|
+
self,
|
3146
|
+
obj: ta.Any,
|
3147
|
+
*,
|
3148
|
+
skip_args: int = 0,
|
3149
|
+
skip_kwargs: ta.Optional[ta.Iterable[ta.Any]] = None,
|
3150
|
+
) -> ta.Mapping[str, ta.Any]:
|
3151
|
+
kt = build_injection_kwargs_target(
|
3152
|
+
obj,
|
3153
|
+
skip_args=skip_args,
|
3154
|
+
skip_kwargs=skip_kwargs,
|
3155
|
+
)
|
3156
|
+
|
2856
3157
|
ret: ta.Dict[str, ta.Any] = {}
|
2857
3158
|
for kw in kt.kwargs:
|
2858
3159
|
if kw.has_default:
|
@@ -2864,9 +3165,24 @@ class _Injector(Injector):
|
|
2864
3165
|
ret[kw.name] = v
|
2865
3166
|
return ret
|
2866
3167
|
|
2867
|
-
def inject(
|
2868
|
-
|
2869
|
-
|
3168
|
+
def inject(
|
3169
|
+
self,
|
3170
|
+
obj: ta.Any,
|
3171
|
+
*,
|
3172
|
+
args: ta.Optional[ta.Sequence[ta.Any]] = None,
|
3173
|
+
kwargs: ta.Optional[ta.Mapping[str, ta.Any]] = None,
|
3174
|
+
) -> ta.Any:
|
3175
|
+
provided = self.provide_kwargs(
|
3176
|
+
obj,
|
3177
|
+
skip_args=len(args) if args is not None else 0,
|
3178
|
+
skip_kwargs=kwargs if kwargs is not None else None,
|
3179
|
+
)
|
3180
|
+
|
3181
|
+
return obj(
|
3182
|
+
*(args if args is not None else ()),
|
3183
|
+
**(kwargs if kwargs is not None else {}),
|
3184
|
+
**provided,
|
3185
|
+
)
|
2870
3186
|
|
2871
3187
|
|
2872
3188
|
###
|
@@ -3005,16 +3321,42 @@ class InjectorBinder:
|
|
3005
3321
|
|
3006
3322
|
|
3007
3323
|
def make_injector_factory(
|
3008
|
-
|
3009
|
-
|
3010
|
-
|
3011
|
-
|
3324
|
+
fn: ta.Callable[..., T],
|
3325
|
+
cls: U,
|
3326
|
+
ann: ta.Any = None,
|
3327
|
+
) -> ta.Callable[..., U]:
|
3328
|
+
if ann is None:
|
3329
|
+
ann = cls
|
3330
|
+
|
3331
|
+
def outer(injector: Injector) -> ann:
|
3012
3332
|
def inner(*args, **kwargs):
|
3013
|
-
return injector.inject(
|
3014
|
-
return
|
3333
|
+
return injector.inject(fn, args=args, kwargs=kwargs)
|
3334
|
+
return cls(inner) # type: ignore
|
3335
|
+
|
3015
3336
|
return outer
|
3016
3337
|
|
3017
3338
|
|
3339
|
+
def make_injector_array_type(
|
3340
|
+
ele: ta.Union[InjectorKey, InjectorKeyCls],
|
3341
|
+
cls: U,
|
3342
|
+
ann: ta.Any = None,
|
3343
|
+
) -> ta.Callable[..., U]:
|
3344
|
+
if isinstance(ele, InjectorKey):
|
3345
|
+
if not ele.array:
|
3346
|
+
raise InjectorError('Provided key must be array', ele)
|
3347
|
+
key = ele
|
3348
|
+
else:
|
3349
|
+
key = dc.replace(as_injector_key(ele), array=True)
|
3350
|
+
|
3351
|
+
if ann is None:
|
3352
|
+
ann = cls
|
3353
|
+
|
3354
|
+
def inner(injector: Injector) -> ann:
|
3355
|
+
return cls(injector.provide(key)) # type: ignore[operator]
|
3356
|
+
|
3357
|
+
return inner
|
3358
|
+
|
3359
|
+
|
3018
3360
|
##
|
3019
3361
|
|
3020
3362
|
|
@@ -3049,8 +3391,8 @@ class Injection:
|
|
3049
3391
|
# injector
|
3050
3392
|
|
3051
3393
|
@classmethod
|
3052
|
-
def create_injector(cls, *args: InjectorBindingOrBindings,
|
3053
|
-
return _Injector(as_injector_bindings(*args),
|
3394
|
+
def create_injector(cls, *args: InjectorBindingOrBindings, parent: ta.Optional[Injector] = None) -> Injector:
|
3395
|
+
return _Injector(as_injector_bindings(*args), parent)
|
3054
3396
|
|
3055
3397
|
# binder
|
3056
3398
|
|
@@ -3090,10 +3432,20 @@ class Injection:
|
|
3090
3432
|
@classmethod
|
3091
3433
|
def bind_factory(
|
3092
3434
|
cls,
|
3093
|
-
|
3094
|
-
|
3435
|
+
fn: ta.Callable[..., T],
|
3436
|
+
cls_: U,
|
3437
|
+
ann: ta.Any = None,
|
3438
|
+
) -> InjectorBindingOrBindings:
|
3439
|
+
return cls.bind(make_injector_factory(fn, cls_, ann))
|
3440
|
+
|
3441
|
+
@classmethod
|
3442
|
+
def bind_array_type(
|
3443
|
+
cls,
|
3444
|
+
ele: ta.Union[InjectorKey, InjectorKeyCls],
|
3445
|
+
cls_: U,
|
3446
|
+
ann: ta.Any = None,
|
3095
3447
|
) -> InjectorBindingOrBindings:
|
3096
|
-
return cls.bind(
|
3448
|
+
return cls.bind(make_injector_array_type(ele, cls_, ann))
|
3097
3449
|
|
3098
3450
|
|
3099
3451
|
inj = Injection
|
@@ -3344,7 +3696,7 @@ class StandardLogFormatter(logging.Formatter):
|
|
3344
3696
|
return ct.strftime(datefmt) # noqa
|
3345
3697
|
else:
|
3346
3698
|
t = ct.strftime('%Y-%m-%d %H:%M:%S')
|
3347
|
-
return '%s.%03d' % (t, record.msecs)
|
3699
|
+
return '%s.%03d' % (t, record.msecs) # noqa
|
3348
3700
|
|
3349
3701
|
|
3350
3702
|
##
|
@@ -3930,11 +4282,91 @@ def build_config_named_children(
|
|
3930
4282
|
return lst
|
3931
4283
|
|
3932
4284
|
|
4285
|
+
########################################
|
4286
|
+
# ../pipes.py
|
4287
|
+
|
4288
|
+
|
4289
|
+
@dc.dataclass(frozen=True)
|
4290
|
+
class ProcessPipes:
|
4291
|
+
child_stdin: ta.Optional[int] = None
|
4292
|
+
stdin: ta.Optional[int] = None
|
4293
|
+
|
4294
|
+
stdout: ta.Optional[int] = None
|
4295
|
+
child_stdout: ta.Optional[int] = None
|
4296
|
+
|
4297
|
+
stderr: ta.Optional[int] = None
|
4298
|
+
child_stderr: ta.Optional[int] = None
|
4299
|
+
|
4300
|
+
def child_fds(self) -> ta.List[int]:
|
4301
|
+
return [fd for fd in [self.child_stdin, self.child_stdout, self.child_stderr] if fd is not None]
|
4302
|
+
|
4303
|
+
def parent_fds(self) -> ta.List[int]:
|
4304
|
+
return [fd for fd in [self.stdin, self.stdout, self.stderr] if fd is not None]
|
4305
|
+
|
4306
|
+
|
4307
|
+
def make_process_pipes(stderr=True) -> ProcessPipes:
|
4308
|
+
"""
|
4309
|
+
Create pipes for parent to child stdin/stdout/stderr communications. Open fd in non-blocking mode so we can
|
4310
|
+
read them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
|
4311
|
+
"""
|
4312
|
+
|
4313
|
+
pipes: ta.Dict[str, ta.Optional[int]] = {
|
4314
|
+
'child_stdin': None,
|
4315
|
+
'stdin': None,
|
4316
|
+
|
4317
|
+
'stdout': None,
|
4318
|
+
'child_stdout': None,
|
4319
|
+
|
4320
|
+
'stderr': None,
|
4321
|
+
'child_stderr': None,
|
4322
|
+
}
|
4323
|
+
|
4324
|
+
try:
|
4325
|
+
pipes['child_stdin'], pipes['stdin'] = os.pipe()
|
4326
|
+
pipes['stdout'], pipes['child_stdout'] = os.pipe()
|
4327
|
+
|
4328
|
+
if stderr:
|
4329
|
+
pipes['stderr'], pipes['child_stderr'] = os.pipe()
|
4330
|
+
|
4331
|
+
for fd in (
|
4332
|
+
pipes['stdout'],
|
4333
|
+
pipes['stderr'],
|
4334
|
+
pipes['stdin'],
|
4335
|
+
):
|
4336
|
+
if fd is not None:
|
4337
|
+
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY
|
4338
|
+
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
4339
|
+
|
4340
|
+
return ProcessPipes(**pipes)
|
4341
|
+
|
4342
|
+
except OSError:
|
4343
|
+
for fd in pipes.values():
|
4344
|
+
if fd is not None:
|
4345
|
+
close_fd(fd)
|
4346
|
+
|
4347
|
+
raise
|
4348
|
+
|
4349
|
+
|
4350
|
+
def close_pipes(pipes: ProcessPipes) -> None:
|
4351
|
+
close_parent_pipes(pipes)
|
4352
|
+
close_child_pipes(pipes)
|
4353
|
+
|
4354
|
+
|
4355
|
+
def close_parent_pipes(pipes: ProcessPipes) -> None:
|
4356
|
+
for fd in pipes.parent_fds():
|
4357
|
+
close_fd(fd)
|
4358
|
+
|
4359
|
+
|
4360
|
+
def close_child_pipes(pipes: ProcessPipes) -> None:
|
4361
|
+
for fd in pipes.child_fds():
|
4362
|
+
close_fd(fd)
|
4363
|
+
|
4364
|
+
|
3933
4365
|
########################################
|
3934
4366
|
# ../poller.py
|
3935
4367
|
|
3936
4368
|
|
3937
|
-
class Poller(abc.ABC):
|
4369
|
+
class Poller(DaemonizeListener, abc.ABC):
|
3938
4370
|
def __init__(self) -> None:
|
3939
4371
|
super().__init__()
|
3940
4372
|
|
@@ -4152,8 +4584,9 @@ else:
|
|
4152
4584
|
|
4153
4585
|
def get_poller_impl() -> ta.Type[Poller]:
|
4154
4586
|
if (
|
4155
|
-
sys.platform == 'darwin' or sys.platform.startswith('freebsd') and
|
4156
|
-
hasattr(select, 'kqueue') and
|
4587
|
+
(sys.platform == 'darwin' or sys.platform.startswith('freebsd')) and
|
4588
|
+
hasattr(select, 'kqueue') and
|
4589
|
+
KqueuePoller is not None
|
4157
4590
|
):
|
4158
4591
|
return KqueuePoller
|
4159
4592
|
elif hasattr(select, 'poll'):
|
@@ -4877,6 +5310,32 @@ class CoroHttpServerSocketHandler(SocketHandler):
|
|
4877
5310
|
# ../types.py
|
4878
5311
|
|
4879
5312
|
|
5313
|
+
##
|
5314
|
+
|
5315
|
+
|
5316
|
+
ServerEpoch = ta.NewType('ServerEpoch', int)
|
5317
|
+
|
5318
|
+
|
5319
|
+
##
|
5320
|
+
|
5321
|
+
|
5322
|
+
@functools.total_ordering
|
5323
|
+
class ConfigPriorityOrdered(abc.ABC):
|
5324
|
+
@property
|
5325
|
+
@abc.abstractmethod
|
5326
|
+
def config(self) -> ta.Any:
|
5327
|
+
raise NotImplementedError
|
5328
|
+
|
5329
|
+
def __lt__(self, other):
|
5330
|
+
return self.config.priority < other.config.priority
|
5331
|
+
|
5332
|
+
def __eq__(self, other):
|
5333
|
+
return self.config.priority == other.config.priority
|
5334
|
+
|
5335
|
+
|
5336
|
+
##
|
5337
|
+
|
5338
|
+
|
4880
5339
|
class ServerContext(abc.ABC):
|
4881
5340
|
@property
|
4882
5341
|
@abc.abstractmethod
|
@@ -4898,114 +5357,166 @@ class ServerContext(abc.ABC):
|
|
4898
5357
|
raise NotImplementedError
|
4899
5358
|
|
4900
5359
|
|
4901
|
-
|
4902
|
-
# pass
|
4903
|
-
#
|
4904
|
-
#
|
4905
|
-
# class OutputDispatcher(Dispatcher, abc.ABC):
|
4906
|
-
# pass
|
4907
|
-
#
|
4908
|
-
#
|
4909
|
-
# class InputDispatcher(Dispatcher, abc.ABC):
|
4910
|
-
# pass
|
5360
|
+
##
|
4911
5361
|
|
4912
5362
|
|
4913
|
-
|
4914
|
-
class Process(abc.ABC):
|
5363
|
+
class Dispatcher(abc.ABC):
|
4915
5364
|
@property
|
4916
5365
|
@abc.abstractmethod
|
4917
|
-
def
|
5366
|
+
def process(self) -> 'Process':
|
4918
5367
|
raise NotImplementedError
|
4919
5368
|
|
4920
5369
|
@property
|
4921
5370
|
@abc.abstractmethod
|
4922
|
-
def
|
5371
|
+
def channel(self) -> str:
|
4923
5372
|
raise NotImplementedError
|
4924
5373
|
|
4925
|
-
def __lt__(self, other):
|
4926
|
-
return self.config.priority < other.config.priority
|
4927
|
-
|
4928
|
-
def __eq__(self, other):
|
4929
|
-
return self.config.priority == other.config.priority
|
4930
|
-
|
4931
5374
|
@property
|
4932
5375
|
@abc.abstractmethod
|
4933
|
-
def
|
5376
|
+
def fd(self) -> int:
|
4934
5377
|
raise NotImplementedError
|
4935
5378
|
|
5379
|
+
@property
|
4936
5380
|
@abc.abstractmethod
|
4937
|
-
def
|
5381
|
+
def closed(self) -> bool:
|
4938
5382
|
raise NotImplementedError
|
4939
5383
|
|
5384
|
+
#
|
5385
|
+
|
4940
5386
|
@abc.abstractmethod
|
4941
|
-
def
|
5387
|
+
def close(self) -> None:
|
4942
5388
|
raise NotImplementedError
|
4943
5389
|
|
4944
5390
|
@abc.abstractmethod
|
4945
|
-
def
|
5391
|
+
def handle_error(self) -> None:
|
4946
5392
|
raise NotImplementedError
|
4947
5393
|
|
5394
|
+
#
|
5395
|
+
|
4948
5396
|
@abc.abstractmethod
|
4949
|
-
def
|
5397
|
+
def readable(self) -> bool:
|
4950
5398
|
raise NotImplementedError
|
4951
5399
|
|
4952
5400
|
@abc.abstractmethod
|
4953
|
-
def
|
5401
|
+
def writable(self) -> bool:
|
4954
5402
|
raise NotImplementedError
|
4955
5403
|
|
5404
|
+
#
|
5405
|
+
|
5406
|
+
def handle_read_event(self) -> None:
|
5407
|
+
raise TypeError
|
5408
|
+
|
5409
|
+
def handle_write_event(self) -> None:
|
5410
|
+
raise TypeError
|
5411
|
+
|
5412
|
+
|
5413
|
+
class OutputDispatcher(Dispatcher, abc.ABC):
|
4956
5414
|
@abc.abstractmethod
|
4957
|
-
def
|
5415
|
+
def remove_logs(self) -> None:
|
4958
5416
|
raise NotImplementedError
|
4959
5417
|
|
4960
5418
|
@abc.abstractmethod
|
4961
|
-
def
|
5419
|
+
def reopen_logs(self) -> None:
|
4962
5420
|
raise NotImplementedError
|
4963
5421
|
|
5422
|
+
|
5423
|
+
class InputDispatcher(Dispatcher, abc.ABC):
|
4964
5424
|
@abc.abstractmethod
|
4965
|
-
def
|
5425
|
+
def write(self, chars: ta.Union[bytes, str]) -> None:
|
4966
5426
|
raise NotImplementedError
|
4967
5427
|
|
4968
5428
|
@abc.abstractmethod
|
4969
|
-
def
|
5429
|
+
def flush(self) -> None:
|
4970
5430
|
raise NotImplementedError
|
4971
5431
|
|
4972
5432
|
|
4973
|
-
|
4974
|
-
|
5433
|
+
##
|
5434
|
+
|
5435
|
+
|
5436
|
+
class Process(ConfigPriorityOrdered, abc.ABC):
|
4975
5437
|
@property
|
4976
5438
|
@abc.abstractmethod
|
4977
|
-
def
|
5439
|
+
def name(self) -> str:
|
4978
5440
|
raise NotImplementedError
|
4979
5441
|
|
4980
|
-
|
4981
|
-
|
5442
|
+
@property
|
5443
|
+
@abc.abstractmethod
|
5444
|
+
def config(self) -> ProcessConfig:
|
5445
|
+
raise NotImplementedError
|
4982
5446
|
|
4983
|
-
|
4984
|
-
|
5447
|
+
@property
|
5448
|
+
@abc.abstractmethod
|
5449
|
+
def group(self) -> 'ProcessGroup':
|
5450
|
+
raise NotImplementedError
|
5451
|
+
|
5452
|
+
@property
|
5453
|
+
@abc.abstractmethod
|
5454
|
+
def pid(self) -> int:
|
5455
|
+
raise NotImplementedError
|
5456
|
+
|
5457
|
+
#
|
5458
|
+
|
5459
|
+
@property
|
5460
|
+
@abc.abstractmethod
|
5461
|
+
def context(self) -> ServerContext:
|
5462
|
+
raise NotImplementedError
|
5463
|
+
|
5464
|
+
@abc.abstractmethod
|
5465
|
+
def finish(self, sts: int) -> None:
|
5466
|
+
raise NotImplementedError
|
5467
|
+
|
5468
|
+
@abc.abstractmethod
|
5469
|
+
def stop(self) -> ta.Optional[str]:
|
5470
|
+
raise NotImplementedError
|
5471
|
+
|
5472
|
+
@abc.abstractmethod
|
5473
|
+
def give_up(self) -> None:
|
5474
|
+
raise NotImplementedError
|
4985
5475
|
|
4986
5476
|
@abc.abstractmethod
|
4987
5477
|
def transition(self) -> None:
|
4988
5478
|
raise NotImplementedError
|
4989
5479
|
|
4990
5480
|
@abc.abstractmethod
|
4991
|
-
def
|
5481
|
+
def get_state(self) -> ProcessState:
|
5482
|
+
raise NotImplementedError
|
5483
|
+
|
5484
|
+
@abc.abstractmethod
|
5485
|
+
def after_setuid(self) -> None:
|
5486
|
+
raise NotImplementedError
|
5487
|
+
|
5488
|
+
@abc.abstractmethod
|
5489
|
+
def get_dispatchers(self) -> 'Dispatchers':
|
4992
5490
|
raise NotImplementedError
|
4993
5491
|
|
5492
|
+
|
5493
|
+
##
|
5494
|
+
|
5495
|
+
|
5496
|
+
class ProcessGroup(
|
5497
|
+
ConfigPriorityOrdered,
|
5498
|
+
KeyedCollectionAccessors[str, Process],
|
5499
|
+
abc.ABC,
|
5500
|
+
):
|
4994
5501
|
@property
|
4995
5502
|
@abc.abstractmethod
|
4996
5503
|
def name(self) -> str:
|
4997
5504
|
raise NotImplementedError
|
4998
5505
|
|
5506
|
+
@property
|
4999
5507
|
@abc.abstractmethod
|
5000
|
-
def
|
5508
|
+
def config(self) -> ProcessGroupConfig:
|
5001
5509
|
raise NotImplementedError
|
5002
5510
|
|
5511
|
+
@property
|
5003
5512
|
@abc.abstractmethod
|
5004
|
-
def
|
5513
|
+
def by_name(self) -> ta.Mapping[str, Process]:
|
5005
5514
|
raise NotImplementedError
|
5006
5515
|
|
5516
|
+
#
|
5517
|
+
|
5007
5518
|
@abc.abstractmethod
|
5008
|
-
def
|
5519
|
+
def stop_all(self) -> None:
|
5009
5520
|
raise NotImplementedError
|
5010
5521
|
|
5011
5522
|
@abc.abstractmethod
|
@@ -5013,7 +5524,7 @@ class ProcessGroup(abc.ABC):
|
|
5013
5524
|
raise NotImplementedError
|
5014
5525
|
|
5015
5526
|
@abc.abstractmethod
|
5016
|
-
def
|
5527
|
+
def before_remove(self) -> None:
|
5017
5528
|
raise NotImplementedError
|
5018
5529
|
|
5019
5530
|
|
@@ -5021,9 +5532,6 @@ class ProcessGroup(abc.ABC):
|
|
5021
5532
|
# ../context.py
|
5022
5533
|
|
5023
5534
|
|
5024
|
-
ServerEpoch = ta.NewType('ServerEpoch', int)
|
5025
|
-
|
5026
|
-
|
5027
5535
|
class ServerContextImpl(ServerContext):
|
5028
5536
|
def __init__(
|
5029
5537
|
self,
|
@@ -5041,16 +5549,6 @@ class ServerContextImpl(ServerContext):
|
|
5041
5549
|
self._pid_history: ta.Dict[int, Process] = {}
|
5042
5550
|
self._state: SupervisorState = SupervisorState.RUNNING
|
5043
5551
|
|
5044
|
-
if config.user is not None:
|
5045
|
-
uid = name_to_uid(config.user)
|
5046
|
-
self._uid: ta.Optional[int] = uid
|
5047
|
-
self._gid: ta.Optional[int] = gid_for_uid(uid)
|
5048
|
-
else:
|
5049
|
-
self._uid = None
|
5050
|
-
self._gid = None
|
5051
|
-
|
5052
|
-
self._unlink_pidfile = False
|
5053
|
-
|
5054
5552
|
@property
|
5055
5553
|
def config(self) -> ServerConfig:
|
5056
5554
|
return self._config
|
@@ -5074,15 +5572,7 @@ class ServerContextImpl(ServerContext):
|
|
5074
5572
|
def pid_history(self) -> ta.Dict[int, Process]:
|
5075
5573
|
return self._pid_history
|
5076
5574
|
|
5077
|
-
|
5078
|
-
def uid(self) -> ta.Optional[int]:
|
5079
|
-
return self._uid
|
5080
|
-
|
5081
|
-
@property
|
5082
|
-
def gid(self) -> ta.Optional[int]:
|
5083
|
-
return self._gid
|
5084
|
-
|
5085
|
-
##
|
5575
|
+
#
|
5086
5576
|
|
5087
5577
|
def waitpid(self) -> ta.Tuple[ta.Optional[int], ta.Optional[int]]:
|
5088
5578
|
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
@@ -5102,386 +5592,108 @@ class ServerContextImpl(ServerContext):
|
|
5102
5592
|
pid, sts = None, None
|
5103
5593
|
return pid, sts
|
5104
5594
|
|
5105
|
-
def
|
5106
|
-
|
5107
|
-
|
5108
|
-
|
5109
|
-
|
5595
|
+
def get_auto_child_log_name(self, name: str, identifier: str, channel: str) -> str:
|
5596
|
+
prefix = f'{name}-{channel}---{identifier}-'
|
5597
|
+
logfile = mktempfile(
|
5598
|
+
suffix='.log',
|
5599
|
+
prefix=prefix,
|
5600
|
+
dir=self.config.child_logdir,
|
5601
|
+
)
|
5602
|
+
return logfile
|
5110
5603
|
|
5111
|
-
if self.uid is None:
|
5112
|
-
if os.getuid() == 0:
|
5113
|
-
warnings.warn(
|
5114
|
-
'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
|
5115
|
-
'config file. If you intend to run as root, you can set user=root in the config file to avoid '
|
5116
|
-
'this message.',
|
5117
|
-
)
|
5118
|
-
else:
|
5119
|
-
msg = drop_privileges(self.uid)
|
5120
|
-
if msg is None:
|
5121
|
-
log.info('Set uid to user %s succeeded', self.uid)
|
5122
|
-
else: # failed to drop privileges
|
5123
|
-
raise RuntimeError(msg)
|
5124
5604
|
|
5125
|
-
|
5126
|
-
|
5127
|
-
Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits
|
5128
|
-
the process via usage() if any rlimits could not be set.
|
5129
|
-
"""
|
5605
|
+
########################################
|
5606
|
+
# ../dispatchers.py
|
5130
5607
|
|
5131
|
-
limits = []
|
5132
5608
|
|
5133
|
-
|
5134
|
-
|
5135
|
-
|
5136
|
-
'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
|
5137
|
-
'"minfds" command-line argument or config file setting. The current environment will only allow '
|
5138
|
-
'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
|
5139
|
-
'your environment (see README.rst) or lower the minfds setting in the config file to allow the '
|
5140
|
-
'process to start.'
|
5141
|
-
),
|
5142
|
-
'min': self.config.minfds,
|
5143
|
-
'resource': resource.RLIMIT_NOFILE,
|
5144
|
-
'name': 'RLIMIT_NOFILE',
|
5145
|
-
})
|
5609
|
+
class Dispatchers(KeyedCollection[int, Dispatcher]):
|
5610
|
+
def _key(self, v: Dispatcher) -> int:
|
5611
|
+
return v.fd
|
5146
5612
|
|
5147
|
-
|
5148
|
-
limits.append({
|
5149
|
-
'msg': (
|
5150
|
-
'The minimum number of available processes required to run this program is %(min_limit)s as per '
|
5151
|
-
'the "minprocs" command-line argument or config file setting. The current environment will only '
|
5152
|
-
'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
|
5153
|
-
'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
|
5154
|
-
'program to start.'
|
5155
|
-
),
|
5156
|
-
'min': self.config.minprocs,
|
5157
|
-
'resource': resource.RLIMIT_NPROC,
|
5158
|
-
'name': 'RLIMIT_NPROC',
|
5159
|
-
})
|
5613
|
+
#
|
5160
5614
|
|
5161
|
-
|
5162
|
-
|
5163
|
-
|
5164
|
-
|
5165
|
-
|
5615
|
+
def drain(self) -> None:
|
5616
|
+
for d in self:
|
5617
|
+
# note that we *must* call readable() for every dispatcher, as it may have side effects for a given
|
5618
|
+
# dispatcher (eg. call handle_listener_state_change for event listener processes)
|
5619
|
+
if d.readable():
|
5620
|
+
d.handle_read_event()
|
5621
|
+
if d.writable():
|
5622
|
+
d.handle_write_event()
|
5166
5623
|
|
5167
|
-
|
5624
|
+
#
|
5168
5625
|
|
5169
|
-
|
5170
|
-
|
5171
|
-
|
5172
|
-
|
5173
|
-
# usage
|
5174
|
-
hard = min_limit # type: ignore
|
5626
|
+
def remove_logs(self) -> None:
|
5627
|
+
for d in self:
|
5628
|
+
if isinstance(d, OutputDispatcher):
|
5629
|
+
d.remove_logs()
|
5175
5630
|
|
5176
|
-
|
5177
|
-
|
5178
|
-
|
5179
|
-
|
5180
|
-
raise RuntimeError(msg % dict( # type: ignore # noqa
|
5181
|
-
min_limit=min_limit,
|
5182
|
-
res=res,
|
5183
|
-
name=name,
|
5184
|
-
soft=soft,
|
5185
|
-
hard=hard,
|
5186
|
-
))
|
5631
|
+
def reopen_logs(self) -> None:
|
5632
|
+
for d in self:
|
5633
|
+
if isinstance(d, OutputDispatcher):
|
5634
|
+
d.reopen_logs()
|
5187
5635
|
|
5188
|
-
def cleanup(self) -> None:
|
5189
|
-
if self._unlink_pidfile:
|
5190
|
-
try_unlink(self.config.pidfile)
|
5191
|
-
self._poller.close()
|
5192
5636
|
|
5193
|
-
|
5194
|
-
|
5195
|
-
start = 5
|
5196
|
-
os.closerange(start, self.config.minfds)
|
5637
|
+
########################################
|
5638
|
+
# ../dispatchersimpl.py
|
5197
5639
|
|
5198
|
-
def clear_auto_child_logdir(self) -> None:
|
5199
|
-
# must be called after realize()
|
5200
|
-
child_logdir = self.config.child_logdir
|
5201
|
-
fnre = re.compile(rf'.+?---{self.config.identifier}-\S+\.log\.?\d{{0,4}}')
|
5202
|
-
try:
|
5203
|
-
filenames = os.listdir(child_logdir)
|
5204
|
-
except OSError:
|
5205
|
-
log.warning('Could not clear child_log dir')
|
5206
|
-
return
|
5207
5640
|
|
5208
|
-
|
5209
|
-
|
5210
|
-
|
5211
|
-
|
5212
|
-
|
5213
|
-
|
5214
|
-
|
5641
|
+
class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
5642
|
+
def __init__(
|
5643
|
+
self,
|
5644
|
+
process: Process,
|
5645
|
+
channel: str,
|
5646
|
+
fd: int,
|
5647
|
+
*,
|
5648
|
+
event_callbacks: EventCallbacks,
|
5649
|
+
) -> None:
|
5650
|
+
super().__init__()
|
5215
5651
|
|
5216
|
-
|
5217
|
-
self.
|
5218
|
-
self.
|
5219
|
-
self.
|
5652
|
+
self._process = process # process which "owns" this dispatcher
|
5653
|
+
self._channel = channel # 'stderr' or 'stdout'
|
5654
|
+
self._fd = fd
|
5655
|
+
self._event_callbacks = event_callbacks
|
5220
5656
|
|
5221
|
-
|
5222
|
-
# To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
|
5223
|
-
# our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
|
5224
|
-
# our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
|
5225
|
-
# terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
|
5226
|
-
# use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
|
5227
|
-
# session and process group and setting itself up as a new session leader.
|
5228
|
-
#
|
5229
|
-
# Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
|
5230
|
-
# of ourselves that is guaranteed to not be a session group leader.
|
5231
|
-
#
|
5232
|
-
# We also change directories, set stderr and stdout to null, and change our umask.
|
5233
|
-
#
|
5234
|
-
# This explanation was (gratefully) garnered from
|
5235
|
-
# http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
|
5657
|
+
self._closed = False # True if close() has been called
|
5236
5658
|
|
5237
|
-
|
5238
|
-
if pid != 0:
|
5239
|
-
# Parent
|
5240
|
-
log.debug('supervisord forked; parent exiting')
|
5241
|
-
real_exit(0)
|
5659
|
+
#
|
5242
5660
|
|
5243
|
-
|
5244
|
-
|
5245
|
-
if self.config.directory:
|
5246
|
-
try:
|
5247
|
-
os.chdir(self.config.directory)
|
5248
|
-
except OSError as err:
|
5249
|
-
log.critical("can't chdir into %r: %s", self.config.directory, err)
|
5250
|
-
else:
|
5251
|
-
log.info('set current directory: %r', self.config.directory)
|
5661
|
+
def __repr__(self) -> str:
|
5662
|
+
return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
|
5252
5663
|
|
5253
|
-
|
5254
|
-
os.dup2(1, os.open('/dev/null', os.O_WRONLY))
|
5255
|
-
os.dup2(2, os.open('/dev/null', os.O_WRONLY))
|
5664
|
+
#
|
5256
5665
|
|
5257
|
-
|
5666
|
+
@property
|
5667
|
+
def process(self) -> Process:
|
5668
|
+
return self._process
|
5258
5669
|
|
5259
|
-
|
5670
|
+
@property
|
5671
|
+
def channel(self) -> str:
|
5672
|
+
return self._channel
|
5260
5673
|
|
5261
|
-
|
5262
|
-
|
5263
|
-
|
5674
|
+
@property
|
5675
|
+
def fd(self) -> int:
|
5676
|
+
return self._fd
|
5264
5677
|
|
5265
|
-
|
5266
|
-
|
5267
|
-
|
5268
|
-
suffix='.log',
|
5269
|
-
prefix=prefix,
|
5270
|
-
dir=self.config.child_logdir,
|
5271
|
-
)
|
5272
|
-
return logfile
|
5678
|
+
@property
|
5679
|
+
def closed(self) -> bool:
|
5680
|
+
return self._closed
|
5273
5681
|
|
5274
|
-
|
5275
|
-
pid = os.getpid()
|
5276
|
-
try:
|
5277
|
-
with open(self.config.pidfile, 'w') as f:
|
5278
|
-
f.write(f'{pid}\n')
|
5279
|
-
except OSError:
|
5280
|
-
log.critical('could not write pidfile %s', self.config.pidfile)
|
5281
|
-
else:
|
5282
|
-
self._unlink_pidfile = True
|
5283
|
-
log.info('supervisord started with pid %s', pid)
|
5682
|
+
#
|
5284
5683
|
|
5684
|
+
def close(self) -> None:
|
5685
|
+
if not self._closed:
|
5686
|
+
log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
|
5687
|
+
self._closed = True
|
5285
5688
|
|
5286
|
-
def
|
5287
|
-
|
5288
|
-
Drop privileges to become the specified user, which may be a username or uid. Called for supervisord startup
|
5289
|
-
and when spawning subprocesses. Returns None on success or a string error message if privileges could not be
|
5290
|
-
dropped.
|
5291
|
-
"""
|
5292
|
-
|
5293
|
-
if user is None:
|
5294
|
-
return 'No user specified to setuid to!'
|
5295
|
-
|
5296
|
-
# get uid for user, which can be a number or username
|
5297
|
-
try:
|
5298
|
-
uid = int(user)
|
5299
|
-
except ValueError:
|
5300
|
-
try:
|
5301
|
-
pwrec = pwd.getpwnam(user) # type: ignore
|
5302
|
-
except KeyError:
|
5303
|
-
return f"Can't find username {user!r}"
|
5304
|
-
uid = pwrec[2]
|
5305
|
-
else:
|
5306
|
-
try:
|
5307
|
-
pwrec = pwd.getpwuid(uid)
|
5308
|
-
except KeyError:
|
5309
|
-
return f"Can't find uid {uid!r}"
|
5310
|
-
|
5311
|
-
current_uid = os.getuid()
|
5312
|
-
|
5313
|
-
if current_uid == uid:
|
5314
|
-
# do nothing and return successfully if the uid is already the current one. this allows a supervisord
|
5315
|
-
# running as an unprivileged user "foo" to start a process where the config has "user=foo" (same user) in
|
5316
|
-
# it.
|
5317
|
-
return None
|
5318
|
-
|
5319
|
-
if current_uid != 0:
|
5320
|
-
return "Can't drop privilege as nonroot user"
|
5321
|
-
|
5322
|
-
gid = pwrec[3]
|
5323
|
-
if hasattr(os, 'setgroups'):
|
5324
|
-
user = pwrec[0]
|
5325
|
-
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
|
5326
|
-
|
5327
|
-
# always put our primary gid first in this list, otherwise we can lose group info since sometimes the first
|
5328
|
-
# group in the setgroups list gets overwritten on the subsequent setgid call (at least on freebsd 9 with
|
5329
|
-
# python 2.7 - this will be safe though for all unix /python version combos)
|
5330
|
-
groups.insert(0, gid)
|
5331
|
-
try:
|
5332
|
-
os.setgroups(groups)
|
5333
|
-
except OSError:
|
5334
|
-
return 'Could not set groups of effective user'
|
5335
|
-
|
5336
|
-
try:
|
5337
|
-
os.setgid(gid)
|
5338
|
-
except OSError:
|
5339
|
-
return 'Could not set group id of effective user'
|
5340
|
-
|
5341
|
-
os.setuid(uid)
|
5342
|
-
|
5343
|
-
return None
|
5344
|
-
|
5345
|
-
|
5346
|
-
def make_pipes(stderr=True) -> ta.Mapping[str, int]:
|
5347
|
-
"""
|
5348
|
-
Create pipes for parent to child stdin/stdout/stderr communications. Open fd in non-blocking mode so we can
|
5349
|
-
read them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
|
5350
|
-
"""
|
5351
|
-
|
5352
|
-
pipes: ta.Dict[str, ta.Optional[int]] = {
|
5353
|
-
'child_stdin': None,
|
5354
|
-
'stdin': None,
|
5355
|
-
'stdout': None,
|
5356
|
-
'child_stdout': None,
|
5357
|
-
'stderr': None,
|
5358
|
-
'child_stderr': None,
|
5359
|
-
}
|
5360
|
-
|
5361
|
-
try:
|
5362
|
-
stdin, child_stdin = os.pipe()
|
5363
|
-
pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin
|
5364
|
-
|
5365
|
-
stdout, child_stdout = os.pipe()
|
5366
|
-
pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout
|
5367
|
-
|
5368
|
-
if stderr:
|
5369
|
-
stderr, child_stderr = os.pipe()
|
5370
|
-
pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr
|
5371
|
-
|
5372
|
-
for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']):
|
5373
|
-
if fd is not None:
|
5374
|
-
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY
|
5375
|
-
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
|
5376
|
-
|
5377
|
-
return pipes # type: ignore
|
5378
|
-
|
5379
|
-
except OSError:
|
5380
|
-
for fd in pipes.values():
|
5381
|
-
if fd is not None:
|
5382
|
-
close_fd(fd)
|
5383
|
-
raise
|
5384
|
-
|
5385
|
-
|
5386
|
-
def close_parent_pipes(pipes: ta.Mapping[str, int]) -> None:
|
5387
|
-
for fdname in ('stdin', 'stdout', 'stderr'):
|
5388
|
-
fd = pipes.get(fdname)
|
5389
|
-
if fd is not None:
|
5390
|
-
close_fd(fd)
|
5391
|
-
|
5392
|
-
|
5393
|
-
def close_child_pipes(pipes: ta.Mapping[str, int]) -> None:
|
5394
|
-
for fdname in ('child_stdin', 'child_stdout', 'child_stderr'):
|
5395
|
-
fd = pipes.get(fdname)
|
5396
|
-
if fd is not None:
|
5397
|
-
close_fd(fd)
|
5398
|
-
|
5399
|
-
|
5400
|
-
def check_execv_args(filename, argv, st) -> None:
|
5401
|
-
if st is None:
|
5402
|
-
raise NotFoundError(f"can't find command {filename!r}")
|
5403
|
-
|
5404
|
-
elif stat.S_ISDIR(st[stat.ST_MODE]):
|
5405
|
-
raise NotExecutableError(f'command at {filename!r} is a directory')
|
5406
|
-
|
5407
|
-
elif not (stat.S_IMODE(st[stat.ST_MODE]) & 0o111):
|
5408
|
-
raise NotExecutableError(f'command at {filename!r} is not executable')
|
5409
|
-
|
5410
|
-
elif not os.access(filename, os.X_OK):
|
5411
|
-
raise NoPermissionError(f'no permission to run command {filename!r}')
|
5412
|
-
|
5413
|
-
|
5414
|
-
########################################
|
5415
|
-
# ../dispatchers.py
|
5416
|
-
|
5417
|
-
|
5418
|
-
class Dispatcher(abc.ABC):
|
5419
|
-
def __init__(
|
5420
|
-
self,
|
5421
|
-
process: Process,
|
5422
|
-
channel: str,
|
5423
|
-
fd: int,
|
5424
|
-
*,
|
5425
|
-
event_callbacks: EventCallbacks,
|
5426
|
-
) -> None:
|
5427
|
-
super().__init__()
|
5428
|
-
|
5429
|
-
self._process = process # process which "owns" this dispatcher
|
5430
|
-
self._channel = channel # 'stderr' or 'stdout'
|
5431
|
-
self._fd = fd
|
5432
|
-
self._event_callbacks = event_callbacks
|
5433
|
-
|
5434
|
-
self._closed = False # True if close() has been called
|
5435
|
-
|
5436
|
-
def __repr__(self) -> str:
|
5437
|
-
return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
|
5438
|
-
|
5439
|
-
@property
|
5440
|
-
def process(self) -> Process:
|
5441
|
-
return self._process
|
5442
|
-
|
5443
|
-
@property
|
5444
|
-
def channel(self) -> str:
|
5445
|
-
return self._channel
|
5446
|
-
|
5447
|
-
@property
|
5448
|
-
def fd(self) -> int:
|
5449
|
-
return self._fd
|
5450
|
-
|
5451
|
-
@property
|
5452
|
-
def closed(self) -> bool:
|
5453
|
-
return self._closed
|
5454
|
-
|
5455
|
-
@abc.abstractmethod
|
5456
|
-
def readable(self) -> bool:
|
5457
|
-
raise NotImplementedError
|
5458
|
-
|
5459
|
-
@abc.abstractmethod
|
5460
|
-
def writable(self) -> bool:
|
5461
|
-
raise NotImplementedError
|
5462
|
-
|
5463
|
-
def handle_read_event(self) -> None:
|
5464
|
-
raise TypeError
|
5465
|
-
|
5466
|
-
def handle_write_event(self) -> None:
|
5467
|
-
raise TypeError
|
5468
|
-
|
5469
|
-
def handle_error(self) -> None:
|
5470
|
-
nil, t, v, tbinfo = compact_traceback()
|
5689
|
+
def handle_error(self) -> None:
|
5690
|
+
nil, t, v, tbinfo = compact_traceback()
|
5471
5691
|
|
5472
5692
|
log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
|
5473
5693
|
self.close()
|
5474
5694
|
|
5475
|
-
def close(self) -> None:
|
5476
|
-
if not self._closed:
|
5477
|
-
log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
|
5478
|
-
self._closed = True
|
5479
5695
|
|
5480
|
-
|
5481
|
-
pass
|
5482
|
-
|
5483
|
-
|
5484
|
-
class OutputDispatcher(Dispatcher):
|
5696
|
+
class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
5485
5697
|
"""
|
5486
5698
|
Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
|
5487
5699
|
|
@@ -5495,13 +5707,14 @@ class OutputDispatcher(Dispatcher):
|
|
5495
5707
|
process: Process,
|
5496
5708
|
event_type: ta.Type[ProcessCommunicationEvent],
|
5497
5709
|
fd: int,
|
5498
|
-
|
5710
|
+
*,
|
5711
|
+
event_callbacks: EventCallbacks,
|
5499
5712
|
) -> None:
|
5500
5713
|
super().__init__(
|
5501
5714
|
process,
|
5502
5715
|
event_type.channel,
|
5503
5716
|
fd,
|
5504
|
-
|
5717
|
+
event_callbacks=event_callbacks,
|
5505
5718
|
)
|
5506
5719
|
|
5507
5720
|
self._event_type = event_type
|
@@ -5698,19 +5911,20 @@ class OutputDispatcher(Dispatcher):
|
|
5698
5911
|
self.close()
|
5699
5912
|
|
5700
5913
|
|
5701
|
-
class InputDispatcher
|
5914
|
+
class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
|
5702
5915
|
def __init__(
|
5703
5916
|
self,
|
5704
5917
|
process: Process,
|
5705
5918
|
channel: str,
|
5706
5919
|
fd: int,
|
5707
|
-
|
5920
|
+
*,
|
5921
|
+
event_callbacks: EventCallbacks,
|
5708
5922
|
) -> None:
|
5709
5923
|
super().__init__(
|
5710
5924
|
process,
|
5711
5925
|
channel,
|
5712
5926
|
fd,
|
5713
|
-
|
5927
|
+
event_callbacks=event_callbacks,
|
5714
5928
|
)
|
5715
5929
|
|
5716
5930
|
self._input_buffer = b''
|
@@ -5747,58 +5961,133 @@ class InputDispatcher(Dispatcher):
|
|
5747
5961
|
# ../groups.py
|
5748
5962
|
|
5749
5963
|
|
5750
|
-
|
5964
|
+
class ProcessGroupManager(KeyedCollectionAccessors[str, ProcessGroup]):
|
5965
|
+
def __init__(
|
5966
|
+
self,
|
5967
|
+
*,
|
5968
|
+
event_callbacks: EventCallbacks,
|
5969
|
+
) -> None:
|
5970
|
+
super().__init__()
|
5971
|
+
|
5972
|
+
self._event_callbacks = event_callbacks
|
5973
|
+
|
5974
|
+
self._by_name: ta.Dict[str, ProcessGroup] = {}
|
5975
|
+
|
5976
|
+
@property
|
5977
|
+
def _by_key(self) -> ta.Mapping[str, ProcessGroup]:
|
5978
|
+
return self._by_name
|
5979
|
+
|
5980
|
+
#
|
5981
|
+
|
5982
|
+
def all_processes(self) -> ta.Iterator[Process]:
|
5983
|
+
for g in self:
|
5984
|
+
yield from g
|
5985
|
+
|
5986
|
+
#
|
5987
|
+
|
5988
|
+
def add(self, group: ProcessGroup) -> None:
|
5989
|
+
if (name := group.name) in self._by_name:
|
5990
|
+
raise KeyError(f'Process group already exists: {name}')
|
5991
|
+
|
5992
|
+
self._by_name[name] = group
|
5751
5993
|
|
5994
|
+
self._event_callbacks.notify(ProcessGroupAddedEvent(name))
|
5995
|
+
|
5996
|
+
def remove(self, name: str) -> None:
|
5997
|
+
group = self._by_name[name]
|
5998
|
+
|
5999
|
+
group.before_remove()
|
6000
|
+
|
6001
|
+
del self._by_name[name]
|
6002
|
+
|
6003
|
+
self._event_callbacks.notify(ProcessGroupRemovedEvent(name))
|
6004
|
+
|
6005
|
+
def clear(self) -> None:
|
6006
|
+
# FIXME: events?
|
6007
|
+
self._by_name.clear()
|
6008
|
+
|
6009
|
+
#
|
5752
6010
|
|
5753
|
-
|
6011
|
+
class Diff(ta.NamedTuple):
|
6012
|
+
added: ta.List[ProcessGroupConfig]
|
6013
|
+
changed: ta.List[ProcessGroupConfig]
|
6014
|
+
removed: ta.List[ProcessGroupConfig]
|
6015
|
+
|
6016
|
+
def diff(self, new: ta.Sequence[ProcessGroupConfig]) -> Diff:
|
6017
|
+
cur = [group.config for group in self]
|
6018
|
+
|
6019
|
+
cur_by_name = {cfg.name: cfg for cfg in cur}
|
6020
|
+
new_by_name = {cfg.name: cfg for cfg in new}
|
6021
|
+
|
6022
|
+
added = [cand for cand in new if cand.name not in cur_by_name]
|
6023
|
+
removed = [cand for cand in cur if cand.name not in new_by_name]
|
6024
|
+
changed = [cand for cand in new if cand != cur_by_name.get(cand.name, cand)]
|
6025
|
+
|
6026
|
+
return ProcessGroupManager.Diff(
|
6027
|
+
added,
|
6028
|
+
changed,
|
6029
|
+
removed,
|
6030
|
+
)
|
6031
|
+
|
6032
|
+
|
6033
|
+
########################################
|
6034
|
+
# ../groupsimpl.py
|
6035
|
+
|
6036
|
+
|
6037
|
+
class ProcessFactory(Func2[ProcessConfig, ProcessGroup, Process]):
|
6038
|
+
pass
|
5754
6039
|
|
5755
6040
|
|
5756
6041
|
class ProcessGroupImpl(ProcessGroup):
|
5757
6042
|
def __init__(
|
5758
6043
|
self,
|
5759
6044
|
config: ProcessGroupConfig,
|
5760
|
-
context: ServerContext,
|
5761
6045
|
*,
|
5762
6046
|
process_factory: ProcessFactory,
|
5763
6047
|
):
|
5764
6048
|
super().__init__()
|
5765
6049
|
|
5766
6050
|
self._config = config
|
5767
|
-
self._context = context
|
5768
6051
|
self._process_factory = process_factory
|
5769
6052
|
|
5770
|
-
|
6053
|
+
by_name: ta.Dict[str, Process] = {}
|
5771
6054
|
for pconfig in self._config.processes or []:
|
5772
|
-
|
5773
|
-
|
6055
|
+
p = check_isinstance(self._process_factory(pconfig, self), Process)
|
6056
|
+
if p.name in by_name:
|
6057
|
+
raise KeyError(f'name {p.name} of process {p} already registered by {by_name[p.name]}')
|
6058
|
+
by_name[pconfig.name] = p
|
6059
|
+
self._by_name = by_name
|
5774
6060
|
|
5775
6061
|
@property
|
5776
|
-
def
|
5777
|
-
return self.
|
6062
|
+
def _by_key(self) -> ta.Mapping[str, Process]:
|
6063
|
+
return self._by_name
|
6064
|
+
|
6065
|
+
#
|
6066
|
+
|
6067
|
+
def __repr__(self) -> str:
|
6068
|
+
return f'<{self.__class__.__name__} instance at {id(self)} named {self._config.name}>'
|
6069
|
+
|
6070
|
+
#
|
5778
6071
|
|
5779
6072
|
@property
|
5780
6073
|
def name(self) -> str:
|
5781
6074
|
return self._config.name
|
5782
6075
|
|
5783
6076
|
@property
|
5784
|
-
def
|
5785
|
-
return self.
|
6077
|
+
def config(self) -> ProcessGroupConfig:
|
6078
|
+
return self._config
|
5786
6079
|
|
5787
|
-
|
5788
|
-
|
5789
|
-
|
5790
|
-
return f'<{self.__class__.__name__} instance at {id(self)} named {name}>'
|
6080
|
+
@property
|
6081
|
+
def by_name(self) -> ta.Mapping[str, Process]:
|
6082
|
+
return self._by_name
|
5791
6083
|
|
5792
|
-
|
5793
|
-
for process in self._processes.values():
|
5794
|
-
process.remove_logs()
|
6084
|
+
#
|
5795
6085
|
|
5796
|
-
def
|
5797
|
-
for
|
5798
|
-
process.reopen_logs()
|
6086
|
+
def get_unstopped_processes(self) -> ta.List[Process]:
|
6087
|
+
return [x for x in self if not x.get_state().stopped]
|
5799
6088
|
|
5800
6089
|
def stop_all(self) -> None:
|
5801
|
-
processes = list(self.
|
6090
|
+
processes = list(self._by_name.values())
|
5802
6091
|
processes.sort()
|
5803
6092
|
processes.reverse() # stop in desc priority order
|
5804
6093
|
|
@@ -5816,84 +6105,642 @@ class ProcessGroupImpl(ProcessGroup):
|
|
5816
6105
|
# BACKOFF -> FATAL
|
5817
6106
|
proc.give_up()
|
5818
6107
|
|
5819
|
-
def get_unstopped_processes(self) -> ta.List[Process]:
|
5820
|
-
return [x for x in self._processes.values() if not x.get_state().stopped]
|
5821
|
-
|
5822
|
-
def get_dispatchers(self) -> ta.Dict[int, Dispatcher]:
|
5823
|
-
dispatchers: dict = {}
|
5824
|
-
for process in self._processes.values():
|
5825
|
-
dispatchers.update(process.get_dispatchers())
|
5826
|
-
return dispatchers
|
5827
|
-
|
5828
6108
|
def before_remove(self) -> None:
|
5829
6109
|
pass
|
5830
6110
|
|
5831
|
-
def transition(self) -> None:
|
5832
|
-
for proc in self._processes.values():
|
5833
|
-
proc.transition()
|
5834
6111
|
|
5835
|
-
|
5836
|
-
|
5837
|
-
proc.create_auto_child_logs()
|
6112
|
+
########################################
|
6113
|
+
# ../processes.py
|
5838
6114
|
|
5839
6115
|
|
5840
6116
|
##
|
5841
6117
|
|
5842
6118
|
|
5843
|
-
class
|
5844
|
-
|
5845
|
-
self,
|
5846
|
-
*,
|
5847
|
-
event_callbacks: EventCallbacks,
|
5848
|
-
) -> None:
|
5849
|
-
super().__init__()
|
6119
|
+
class ProcessStateError(RuntimeError):
|
6120
|
+
pass
|
5850
6121
|
|
6122
|
+
|
6123
|
+
##
|
6124
|
+
|
6125
|
+
|
6126
|
+
class PidHistory(ta.Dict[int, Process]):
|
6127
|
+
pass
|
6128
|
+
|
6129
|
+
|
6130
|
+
########################################
|
6131
|
+
# ../setupimpl.py
|
6132
|
+
|
6133
|
+
|
6134
|
+
##
|
6135
|
+
|
6136
|
+
|
6137
|
+
class SupervisorSetupImpl(SupervisorSetup):
|
6138
|
+
def __init__(
|
6139
|
+
self,
|
6140
|
+
*,
|
6141
|
+
config: ServerConfig,
|
6142
|
+
user: ta.Optional[SupervisorUser] = None,
|
6143
|
+
epoch: ServerEpoch = ServerEpoch(0),
|
6144
|
+
daemonize_listeners: DaemonizeListeners = DaemonizeListeners([]),
|
6145
|
+
) -> None:
|
6146
|
+
super().__init__()
|
6147
|
+
|
6148
|
+
self._config = config
|
6149
|
+
self._user = user
|
6150
|
+
self._epoch = epoch
|
6151
|
+
self._daemonize_listeners = daemonize_listeners
|
6152
|
+
|
6153
|
+
#
|
6154
|
+
|
6155
|
+
@property
|
6156
|
+
def first(self) -> bool:
|
6157
|
+
return not self._epoch
|
6158
|
+
|
6159
|
+
#
|
6160
|
+
|
6161
|
+
@cached_nullary
|
6162
|
+
def setup(self) -> None:
|
6163
|
+
if not self.first:
|
6164
|
+
# prevent crash on libdispatch-based systems, at least for the first request
|
6165
|
+
self._cleanup_fds()
|
6166
|
+
|
6167
|
+
self._set_uid_or_exit()
|
6168
|
+
|
6169
|
+
if self.first:
|
6170
|
+
self._set_rlimits_or_exit()
|
6171
|
+
|
6172
|
+
# this sets the options.logger object delay logger instantiation until after setuid
|
6173
|
+
if not self._config.nocleanup:
|
6174
|
+
# clean up old automatic logs
|
6175
|
+
self._clear_auto_child_logdir()
|
6176
|
+
|
6177
|
+
if not self._config.nodaemon and self.first:
|
6178
|
+
self._daemonize()
|
6179
|
+
|
6180
|
+
# writing pid file needs to come *after* daemonizing or pid will be wrong
|
6181
|
+
self._write_pidfile()
|
6182
|
+
|
6183
|
+
@cached_nullary
|
6184
|
+
def cleanup(self) -> None:
|
6185
|
+
self._cleanup_pidfile()
|
6186
|
+
|
6187
|
+
#
|
6188
|
+
|
6189
|
+
def _cleanup_fds(self) -> None:
|
6190
|
+
# try to close any leaked file descriptors (for reload)
|
6191
|
+
start = 5
|
6192
|
+
os.closerange(start, self._config.minfds)
|
6193
|
+
|
6194
|
+
#
|
6195
|
+
|
6196
|
+
def _set_uid_or_exit(self) -> None:
|
6197
|
+
"""
|
6198
|
+
Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
|
6199
|
+
process via usage() if privileges could not be dropped.
|
6200
|
+
"""
|
6201
|
+
|
6202
|
+
if self._user is None:
|
6203
|
+
if os.getuid() == 0:
|
6204
|
+
warnings.warn(
|
6205
|
+
'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
|
6206
|
+
'config file. If you intend to run as root, you can set user=root in the config file to avoid '
|
6207
|
+
'this message.',
|
6208
|
+
)
|
6209
|
+
else:
|
6210
|
+
msg = drop_privileges(self._user.uid)
|
6211
|
+
if msg is None:
|
6212
|
+
log.info('Set uid to user %s succeeded', self._user.uid)
|
6213
|
+
else: # failed to drop privileges
|
6214
|
+
raise RuntimeError(msg)
|
6215
|
+
|
6216
|
+
#
|
6217
|
+
|
6218
|
+
def _set_rlimits_or_exit(self) -> None:
|
6219
|
+
"""
|
6220
|
+
Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits
|
6221
|
+
the process via usage() if any rlimits could not be set.
|
6222
|
+
"""
|
6223
|
+
|
6224
|
+
limits = []
|
6225
|
+
|
6226
|
+
if hasattr(resource, 'RLIMIT_NOFILE'):
|
6227
|
+
limits.append({
|
6228
|
+
'msg': (
|
6229
|
+
'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
|
6230
|
+
'"minfds" command-line argument or config file setting. The current environment will only allow '
|
6231
|
+
'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
|
6232
|
+
'your environment (see README.rst) or lower the minfds setting in the config file to allow the '
|
6233
|
+
'process to start.'
|
6234
|
+
),
|
6235
|
+
'min': self._config.minfds,
|
6236
|
+
'resource': resource.RLIMIT_NOFILE,
|
6237
|
+
'name': 'RLIMIT_NOFILE',
|
6238
|
+
})
|
6239
|
+
|
6240
|
+
if hasattr(resource, 'RLIMIT_NPROC'):
|
6241
|
+
limits.append({
|
6242
|
+
'msg': (
|
6243
|
+
'The minimum number of available processes required to run this program is %(min_limit)s as per '
|
6244
|
+
'the "minprocs" command-line argument or config file setting. The current environment will only '
|
6245
|
+
'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
|
6246
|
+
'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
|
6247
|
+
'program to start.'
|
6248
|
+
),
|
6249
|
+
'min': self._config.minprocs,
|
6250
|
+
'resource': resource.RLIMIT_NPROC,
|
6251
|
+
'name': 'RLIMIT_NPROC',
|
6252
|
+
})
|
6253
|
+
|
6254
|
+
for limit in limits:
|
6255
|
+
min_limit = limit['min']
|
6256
|
+
res = limit['resource']
|
6257
|
+
msg = limit['msg']
|
6258
|
+
name = limit['name']
|
6259
|
+
|
6260
|
+
soft, hard = resource.getrlimit(res) # type: ignore
|
6261
|
+
|
6262
|
+
# -1 means unlimited
|
6263
|
+
if soft < min_limit and soft != -1: # type: ignore
|
6264
|
+
if hard < min_limit and hard != -1: # type: ignore
|
6265
|
+
# setrlimit should increase the hard limit if we are root, if not then setrlimit raises and we print
|
6266
|
+
# usage
|
6267
|
+
hard = min_limit # type: ignore
|
6268
|
+
|
6269
|
+
try:
|
6270
|
+
resource.setrlimit(res, (min_limit, hard)) # type: ignore
|
6271
|
+
log.info('Increased %s limit to %s', name, min_limit)
|
6272
|
+
except (resource.error, ValueError):
|
6273
|
+
raise RuntimeError(msg % dict( # type: ignore # noqa
|
6274
|
+
min_limit=min_limit,
|
6275
|
+
res=res,
|
6276
|
+
name=name,
|
6277
|
+
soft=soft,
|
6278
|
+
hard=hard,
|
6279
|
+
))
|
6280
|
+
|
6281
|
+
#
|
6282
|
+
|
6283
|
+
_unlink_pidfile = False
|
6284
|
+
|
6285
|
+
def _write_pidfile(self) -> None:
|
6286
|
+
pid = os.getpid()
|
6287
|
+
try:
|
6288
|
+
with open(self._config.pidfile, 'w') as f:
|
6289
|
+
f.write(f'{pid}\n')
|
6290
|
+
except OSError:
|
6291
|
+
log.critical('could not write pidfile %s', self._config.pidfile)
|
6292
|
+
else:
|
6293
|
+
self._unlink_pidfile = True
|
6294
|
+
log.info('supervisord started with pid %s', pid)
|
6295
|
+
|
6296
|
+
def _cleanup_pidfile(self) -> None:
|
6297
|
+
if self._unlink_pidfile:
|
6298
|
+
try_unlink(self._config.pidfile)
|
6299
|
+
|
6300
|
+
#
|
6301
|
+
|
6302
|
+
def _clear_auto_child_logdir(self) -> None:
|
6303
|
+
# must be called after realize()
|
6304
|
+
child_logdir = self._config.child_logdir
|
6305
|
+
if child_logdir == '/dev/null':
|
6306
|
+
return
|
6307
|
+
|
6308
|
+
fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
|
6309
|
+
try:
|
6310
|
+
filenames = os.listdir(child_logdir)
|
6311
|
+
except OSError:
|
6312
|
+
log.warning('Could not clear child_log dir')
|
6313
|
+
return
|
6314
|
+
|
6315
|
+
for filename in filenames:
|
6316
|
+
if fnre.match(filename):
|
6317
|
+
pathname = os.path.join(child_logdir, filename)
|
6318
|
+
try:
|
6319
|
+
os.remove(pathname)
|
6320
|
+
except OSError:
|
6321
|
+
log.warning('Failed to clean up %r', pathname)
|
6322
|
+
|
6323
|
+
#
|
6324
|
+
|
6325
|
+
def _daemonize(self) -> None:
|
6326
|
+
for dl in self._daemonize_listeners:
|
6327
|
+
dl.before_daemonize()
|
6328
|
+
|
6329
|
+
self._do_daemonize()
|
6330
|
+
|
6331
|
+
for dl in self._daemonize_listeners:
|
6332
|
+
dl.after_daemonize()
|
6333
|
+
|
6334
|
+
def _do_daemonize(self) -> None:
|
6335
|
+
# To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
|
6336
|
+
# our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
|
6337
|
+
# our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
|
6338
|
+
# terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
|
6339
|
+
# use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
|
6340
|
+
# session and process group and setting itself up as a new session leader.
|
6341
|
+
#
|
6342
|
+
# Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
|
6343
|
+
# of ourselves that is guaranteed to not be a session group leader.
|
6344
|
+
#
|
6345
|
+
# We also change directories, set stderr and stdout to null, and change our umask.
|
6346
|
+
#
|
6347
|
+
# This explanation was (gratefully) garnered from
|
6348
|
+
# http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
|
6349
|
+
|
6350
|
+
pid = os.fork()
|
6351
|
+
if pid != 0:
|
6352
|
+
# Parent
|
6353
|
+
log.debug('supervisord forked; parent exiting')
|
6354
|
+
real_exit(0)
|
6355
|
+
|
6356
|
+
# Child
|
6357
|
+
log.info('daemonizing the supervisord process')
|
6358
|
+
if self._config.directory:
|
6359
|
+
try:
|
6360
|
+
os.chdir(self._config.directory)
|
6361
|
+
except OSError as err:
|
6362
|
+
log.critical("can't chdir into %r: %s", self._config.directory, err)
|
6363
|
+
else:
|
6364
|
+
log.info('set current directory: %r', self._config.directory)
|
6365
|
+
|
6366
|
+
os.dup2(0, os.open('/dev/null', os.O_RDONLY))
|
6367
|
+
os.dup2(1, os.open('/dev/null', os.O_WRONLY))
|
6368
|
+
os.dup2(2, os.open('/dev/null', os.O_WRONLY))
|
6369
|
+
|
6370
|
+
# XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
|
6371
|
+
# file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
|
6372
|
+
# again after the setsid() call, for obscure SVR4 reasons.
|
6373
|
+
os.setsid()
|
6374
|
+
os.umask(self._config.umask)
|
6375
|
+
|
6376
|
+
|
6377
|
+
########################################
|
6378
|
+
# ../spawning.py
|
6379
|
+
|
6380
|
+
|
6381
|
+
@dc.dataclass(frozen=True)
|
6382
|
+
class SpawnedProcess:
|
6383
|
+
pid: int
|
6384
|
+
pipes: ProcessPipes
|
6385
|
+
dispatchers: Dispatchers
|
6386
|
+
|
6387
|
+
|
6388
|
+
class ProcessSpawnError(RuntimeError):
|
6389
|
+
pass
|
6390
|
+
|
6391
|
+
|
6392
|
+
class ProcessSpawning:
|
6393
|
+
@property
|
6394
|
+
@abc.abstractmethod
|
6395
|
+
def process(self) -> Process:
|
6396
|
+
raise NotImplementedError
|
6397
|
+
|
6398
|
+
#
|
6399
|
+
|
6400
|
+
@abc.abstractmethod
|
6401
|
+
def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
|
6402
|
+
raise NotImplementedError
|
6403
|
+
|
6404
|
+
|
6405
|
+
########################################
|
6406
|
+
# ../supervisor.py
|
6407
|
+
|
6408
|
+
|
6409
|
+
##
|
6410
|
+
|
6411
|
+
|
6412
|
+
class SignalHandler:
|
6413
|
+
def __init__(
|
6414
|
+
self,
|
6415
|
+
*,
|
6416
|
+
context: ServerContextImpl,
|
6417
|
+
signal_receiver: SignalReceiver,
|
6418
|
+
process_groups: ProcessGroupManager,
|
6419
|
+
) -> None:
|
6420
|
+
super().__init__()
|
6421
|
+
|
6422
|
+
self._context = context
|
6423
|
+
self._signal_receiver = signal_receiver
|
6424
|
+
self._process_groups = process_groups
|
6425
|
+
|
6426
|
+
def set_signals(self) -> None:
|
6427
|
+
self._signal_receiver.install(
|
6428
|
+
signal.SIGTERM,
|
6429
|
+
signal.SIGINT,
|
6430
|
+
signal.SIGQUIT,
|
6431
|
+
signal.SIGHUP,
|
6432
|
+
signal.SIGCHLD,
|
6433
|
+
signal.SIGUSR2,
|
6434
|
+
)
|
6435
|
+
|
6436
|
+
def handle_signals(self) -> None:
|
6437
|
+
sig = self._signal_receiver.get_signal()
|
6438
|
+
if not sig:
|
6439
|
+
return
|
6440
|
+
|
6441
|
+
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
6442
|
+
log.warning('received %s indicating exit request', sig_name(sig))
|
6443
|
+
self._context.set_state(SupervisorState.SHUTDOWN)
|
6444
|
+
|
6445
|
+
elif sig == signal.SIGHUP:
|
6446
|
+
if self._context.state == SupervisorState.SHUTDOWN:
|
6447
|
+
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
6448
|
+
else:
|
6449
|
+
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
6450
|
+
self._context.set_state(SupervisorState.RESTARTING)
|
6451
|
+
|
6452
|
+
elif sig == signal.SIGCHLD:
|
6453
|
+
log.debug('received %s indicating a child quit', sig_name(sig))
|
6454
|
+
|
6455
|
+
elif sig == signal.SIGUSR2:
|
6456
|
+
log.info('received %s indicating log reopen request', sig_name(sig))
|
6457
|
+
|
6458
|
+
for p in self._process_groups.all_processes():
|
6459
|
+
for d in p.get_dispatchers():
|
6460
|
+
if isinstance(d, OutputDispatcher):
|
6461
|
+
d.reopen_logs()
|
6462
|
+
|
6463
|
+
else:
|
6464
|
+
log.debug('received %s indicating nothing', sig_name(sig))
|
6465
|
+
|
6466
|
+
|
6467
|
+
##
|
6468
|
+
|
6469
|
+
|
6470
|
+
class ProcessGroupFactory(Func1[ProcessGroupConfig, ProcessGroup]):
|
6471
|
+
pass
|
6472
|
+
|
6473
|
+
|
6474
|
+
class Supervisor:
|
6475
|
+
def __init__(
|
6476
|
+
self,
|
6477
|
+
*,
|
6478
|
+
context: ServerContextImpl,
|
6479
|
+
poller: Poller,
|
6480
|
+
process_groups: ProcessGroupManager,
|
6481
|
+
signal_handler: SignalHandler,
|
6482
|
+
event_callbacks: EventCallbacks,
|
6483
|
+
process_group_factory: ProcessGroupFactory,
|
6484
|
+
pid_history: PidHistory,
|
6485
|
+
setup: SupervisorSetup,
|
6486
|
+
) -> None:
|
6487
|
+
super().__init__()
|
6488
|
+
|
6489
|
+
self._context = context
|
6490
|
+
self._poller = poller
|
6491
|
+
self._process_groups = process_groups
|
6492
|
+
self._signal_handler = signal_handler
|
5851
6493
|
self._event_callbacks = event_callbacks
|
6494
|
+
self._process_group_factory = process_group_factory
|
6495
|
+
self._pid_history = pid_history
|
6496
|
+
self._setup = setup
|
6497
|
+
|
6498
|
+
self._ticks: ta.Dict[int, float] = {}
|
6499
|
+
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
6500
|
+
self._stopping = False # set after we detect that we are handling a stop request
|
6501
|
+
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
6502
|
+
|
6503
|
+
#
|
6504
|
+
|
6505
|
+
@property
|
6506
|
+
def context(self) -> ServerContextImpl:
|
6507
|
+
return self._context
|
6508
|
+
|
6509
|
+
def get_state(self) -> SupervisorState:
|
6510
|
+
return self._context.state
|
6511
|
+
|
6512
|
+
#
|
6513
|
+
|
6514
|
+
def add_process_group(self, config: ProcessGroupConfig) -> bool:
|
6515
|
+
if self._process_groups.get(config.name) is not None:
|
6516
|
+
return False
|
6517
|
+
|
6518
|
+
group = check_isinstance(self._process_group_factory(config), ProcessGroup)
|
6519
|
+
for process in group:
|
6520
|
+
process.after_setuid()
|
6521
|
+
|
6522
|
+
self._process_groups.add(group)
|
5852
6523
|
|
5853
|
-
|
6524
|
+
return True
|
5854
6525
|
|
5855
|
-
def
|
5856
|
-
|
6526
|
+
def remove_process_group(self, name: str) -> bool:
|
6527
|
+
if self._process_groups[name].get_unstopped_processes():
|
6528
|
+
return False
|
5857
6529
|
|
5858
|
-
|
5859
|
-
return self._by_name[name]
|
6530
|
+
self._process_groups.remove(name)
|
5860
6531
|
|
5861
|
-
|
5862
|
-
return len(self._by_name)
|
6532
|
+
return True
|
5863
6533
|
|
5864
|
-
|
5865
|
-
return iter(self._by_name.values())
|
6534
|
+
#
|
5866
6535
|
|
5867
|
-
def
|
5868
|
-
|
6536
|
+
def shutdown_report(self) -> ta.List[Process]:
|
6537
|
+
unstopped: ta.List[Process] = []
|
5869
6538
|
|
5870
|
-
|
5871
|
-
|
5872
|
-
raise KeyError(f'Process group already exists: {name}')
|
6539
|
+
for group in self._process_groups:
|
6540
|
+
unstopped.extend(group.get_unstopped_processes())
|
5873
6541
|
|
5874
|
-
|
6542
|
+
if unstopped:
|
6543
|
+
# throttle 'waiting for x to die' reports
|
6544
|
+
now = time.time()
|
6545
|
+
if now > (self._last_shutdown_report + 3): # every 3 secs
|
6546
|
+
names = [as_string(p.config.name) for p in unstopped]
|
6547
|
+
namestr = ', '.join(names)
|
6548
|
+
log.info('waiting for %s to die', namestr)
|
6549
|
+
self._last_shutdown_report = now
|
6550
|
+
for proc in unstopped:
|
6551
|
+
log.debug('%s state: %s', proc.config.name, proc.get_state().name)
|
5875
6552
|
|
5876
|
-
|
6553
|
+
return unstopped
|
5877
6554
|
|
5878
|
-
|
5879
|
-
group = self._by_name[name]
|
6555
|
+
#
|
5880
6556
|
|
5881
|
-
|
6557
|
+
def main(self, **kwargs: ta.Any) -> None:
|
6558
|
+
self._setup.setup()
|
6559
|
+
try:
|
6560
|
+
self.run(**kwargs)
|
6561
|
+
finally:
|
6562
|
+
self._setup.cleanup()
|
5882
6563
|
|
5883
|
-
|
6564
|
+
def run(
|
6565
|
+
self,
|
6566
|
+
*,
|
6567
|
+
callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
|
6568
|
+
) -> None:
|
6569
|
+
self._process_groups.clear()
|
6570
|
+
self._stop_groups = None # clear
|
5884
6571
|
|
5885
|
-
self._event_callbacks.
|
6572
|
+
self._event_callbacks.clear()
|
5886
6573
|
|
5887
|
-
|
5888
|
-
|
5889
|
-
|
6574
|
+
try:
|
6575
|
+
for config in self._context.config.groups or []:
|
6576
|
+
self.add_process_group(config)
|
6577
|
+
|
6578
|
+
self._signal_handler.set_signals()
|
6579
|
+
|
6580
|
+
self._event_callbacks.notify(SupervisorRunningEvent())
|
6581
|
+
|
6582
|
+
while True:
|
6583
|
+
if callback is not None and not callback(self):
|
6584
|
+
break
|
6585
|
+
|
6586
|
+
self._run_once()
|
6587
|
+
|
6588
|
+
finally:
|
6589
|
+
self._poller.close()
|
6590
|
+
|
6591
|
+
#
|
6592
|
+
|
6593
|
+
def _run_once(self) -> None:
|
6594
|
+
self._poll()
|
6595
|
+
self._reap()
|
6596
|
+
self._signal_handler.handle_signals()
|
6597
|
+
self._tick()
|
6598
|
+
|
6599
|
+
if self._context.state < SupervisorState.RUNNING:
|
6600
|
+
self._ordered_stop_groups_phase_2()
|
6601
|
+
|
6602
|
+
def _ordered_stop_groups_phase_1(self) -> None:
|
6603
|
+
if self._stop_groups:
|
6604
|
+
# stop the last group (the one with the "highest" priority)
|
6605
|
+
self._stop_groups[-1].stop_all()
|
6606
|
+
|
6607
|
+
def _ordered_stop_groups_phase_2(self) -> None:
|
6608
|
+
# after phase 1 we've transitioned and reaped, let's see if we can remove the group we stopped from the
|
6609
|
+
# stop_groups queue.
|
6610
|
+
if self._stop_groups:
|
6611
|
+
# pop the last group (the one with the "highest" priority)
|
6612
|
+
group = self._stop_groups.pop()
|
6613
|
+
if group.get_unstopped_processes():
|
6614
|
+
# if any processes in the group aren't yet in a stopped state, we're not yet done shutting this group
|
6615
|
+
# down, so push it back on to the end of the stop group queue
|
6616
|
+
self._stop_groups.append(group)
|
6617
|
+
|
6618
|
+
def get_dispatchers(self) -> Dispatchers:
|
6619
|
+
return Dispatchers(
|
6620
|
+
d
|
6621
|
+
for p in self._process_groups.all_processes()
|
6622
|
+
for d in p.get_dispatchers()
|
6623
|
+
)
|
6624
|
+
|
6625
|
+
def _poll(self) -> None:
|
6626
|
+
dispatchers = self.get_dispatchers()
|
6627
|
+
|
6628
|
+
sorted_groups = list(self._process_groups)
|
6629
|
+
sorted_groups.sort()
|
6630
|
+
|
6631
|
+
if self._context.state < SupervisorState.RUNNING:
|
6632
|
+
if not self._stopping:
|
6633
|
+
# first time, set the stopping flag, do a notification and set stop_groups
|
6634
|
+
self._stopping = True
|
6635
|
+
self._stop_groups = sorted_groups[:]
|
6636
|
+
self._event_callbacks.notify(SupervisorStoppingEvent())
|
6637
|
+
|
6638
|
+
self._ordered_stop_groups_phase_1()
|
6639
|
+
|
6640
|
+
if not self.shutdown_report():
|
6641
|
+
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
6642
|
+
raise ExitNow
|
6643
|
+
|
6644
|
+
for fd, dispatcher in dispatchers.items():
|
6645
|
+
if dispatcher.readable():
|
6646
|
+
self._poller.register_readable(fd)
|
6647
|
+
if dispatcher.writable():
|
6648
|
+
self._poller.register_writable(fd)
|
6649
|
+
|
6650
|
+
timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
|
6651
|
+
r, w = self._poller.poll(timeout)
|
6652
|
+
|
6653
|
+
for fd in r:
|
6654
|
+
if fd in dispatchers:
|
6655
|
+
try:
|
6656
|
+
dispatcher = dispatchers[fd]
|
6657
|
+
log.debug('read event caused by %r', dispatcher)
|
6658
|
+
dispatcher.handle_read_event()
|
6659
|
+
if not dispatcher.readable():
|
6660
|
+
self._poller.unregister_readable(fd)
|
6661
|
+
except ExitNow:
|
6662
|
+
raise
|
6663
|
+
except Exception: # noqa
|
6664
|
+
dispatchers[fd].handle_error()
|
6665
|
+
else:
|
6666
|
+
# if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
|
6667
|
+
# time, which may cause 100% cpu usage
|
6668
|
+
log.debug('unexpected read event from fd %r', fd)
|
6669
|
+
try:
|
6670
|
+
self._poller.unregister_readable(fd)
|
6671
|
+
except Exception: # noqa
|
6672
|
+
pass
|
6673
|
+
|
6674
|
+
for fd in w:
|
6675
|
+
if fd in dispatchers:
|
6676
|
+
try:
|
6677
|
+
dispatcher = dispatchers[fd]
|
6678
|
+
log.debug('write event caused by %r', dispatcher)
|
6679
|
+
dispatcher.handle_write_event()
|
6680
|
+
if not dispatcher.writable():
|
6681
|
+
self._poller.unregister_writable(fd)
|
6682
|
+
except ExitNow:
|
6683
|
+
raise
|
6684
|
+
except Exception: # noqa
|
6685
|
+
dispatchers[fd].handle_error()
|
6686
|
+
else:
|
6687
|
+
log.debug('unexpected write event from fd %r', fd)
|
6688
|
+
try:
|
6689
|
+
self._poller.unregister_writable(fd)
|
6690
|
+
except Exception: # noqa
|
6691
|
+
pass
|
6692
|
+
|
6693
|
+
for group in sorted_groups:
|
6694
|
+
for process in group:
|
6695
|
+
process.transition()
|
6696
|
+
|
6697
|
+
def _reap(self, *, once: bool = False, depth: int = 0) -> None:
|
6698
|
+
if depth >= 100:
|
6699
|
+
return
|
6700
|
+
|
6701
|
+
pid, sts = self._context.waitpid()
|
6702
|
+
if not pid:
|
6703
|
+
return
|
6704
|
+
|
6705
|
+
process = self._pid_history.get(pid, None)
|
6706
|
+
if process is None:
|
6707
|
+
_, msg = decode_wait_status(check_not_none(sts))
|
6708
|
+
log.info('reaped unknown pid %s (%s)', pid, msg)
|
6709
|
+
else:
|
6710
|
+
process.finish(check_not_none(sts))
|
6711
|
+
del self._pid_history[pid]
|
6712
|
+
|
6713
|
+
if not once:
|
6714
|
+
# keep reaping until no more kids to reap, but don't recurse infinitely
|
6715
|
+
self._reap(once=False, depth=depth + 1)
|
6716
|
+
|
6717
|
+
def _tick(self, now: ta.Optional[float] = None) -> None:
|
6718
|
+
"""Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
|
6719
|
+
|
6720
|
+
if now is None:
|
6721
|
+
# now won't be None in unit tests
|
6722
|
+
now = time.time()
|
6723
|
+
|
6724
|
+
for event in TICK_EVENTS:
|
6725
|
+
period = event.period
|
6726
|
+
|
6727
|
+
last_tick = self._ticks.get(period)
|
6728
|
+
if last_tick is None:
|
6729
|
+
# we just started up
|
6730
|
+
last_tick = self._ticks[period] = timeslice(period, now)
|
6731
|
+
|
6732
|
+
this_tick = timeslice(period, now)
|
6733
|
+
if this_tick != last_tick:
|
6734
|
+
self._ticks[period] = this_tick
|
6735
|
+
self._event_callbacks.notify(event(this_tick, self))
|
5890
6736
|
|
5891
6737
|
|
5892
6738
|
########################################
|
5893
|
-
# ../
|
6739
|
+
# ../processesimpl.py
|
5894
6740
|
|
5895
6741
|
|
5896
|
-
|
6742
|
+
class ProcessSpawningFactory(Func1[Process, ProcessSpawning]):
|
6743
|
+
pass
|
5897
6744
|
|
5898
6745
|
|
5899
6746
|
##
|
@@ -5909,19 +6756,22 @@ class ProcessImpl(Process):
|
|
5909
6756
|
*,
|
5910
6757
|
context: ServerContext,
|
5911
6758
|
event_callbacks: EventCallbacks,
|
5912
|
-
|
5913
|
-
inherited_fds: ta.Optional[InheritedFds] = None,
|
6759
|
+
process_spawning_factory: ProcessSpawningFactory,
|
5914
6760
|
) -> None:
|
5915
6761
|
super().__init__()
|
5916
6762
|
|
5917
6763
|
self._config = config
|
5918
6764
|
self._group = group
|
6765
|
+
|
5919
6766
|
self._context = context
|
5920
6767
|
self._event_callbacks = event_callbacks
|
5921
|
-
self._inherited_fds = InheritedFds(frozenset(inherited_fds or []))
|
5922
6768
|
|
5923
|
-
self.
|
5924
|
-
|
6769
|
+
self._spawning = process_spawning_factory(self)
|
6770
|
+
|
6771
|
+
#
|
6772
|
+
|
6773
|
+
self._dispatchers = Dispatchers([])
|
6774
|
+
self._pipes = ProcessPipes()
|
5925
6775
|
|
5926
6776
|
self._state = ProcessState.STOPPED
|
5927
6777
|
self._pid = 0 # 0 when not running
|
@@ -5938,144 +6788,47 @@ class ProcessImpl(Process):
|
|
5938
6788
|
|
5939
6789
|
self._backoff = 0 # backoff counter (to startretries)
|
5940
6790
|
|
5941
|
-
self._exitstatus: ta.Optional[int] = None # status attached to dead process by finish()
|
5942
|
-
self._spawn_err: ta.Optional[str] = None # error message attached by spawn() if any
|
5943
|
-
|
5944
|
-
@property
|
5945
|
-
def pid(self) -> int:
|
5946
|
-
return self._pid
|
5947
|
-
|
5948
|
-
@property
|
5949
|
-
def group(self) -> ProcessGroup:
|
5950
|
-
return self._group
|
5951
|
-
|
5952
|
-
@property
|
5953
|
-
def config(self) -> ProcessConfig:
|
5954
|
-
return self._config
|
5955
|
-
|
5956
|
-
@property
|
5957
|
-
def context(self) -> ServerContext:
|
5958
|
-
return self._context
|
5959
|
-
|
5960
|
-
@property
|
5961
|
-
def state(self) -> ProcessState:
|
5962
|
-
return self._state
|
5963
|
-
|
5964
|
-
@property
|
5965
|
-
def backoff(self) -> int:
|
5966
|
-
return self._backoff
|
5967
|
-
|
5968
|
-
def get_dispatchers(self) -> ta.Mapping[int, Dispatcher]:
|
5969
|
-
return self._dispatchers
|
5970
|
-
|
5971
|
-
def remove_logs(self) -> None:
|
5972
|
-
for dispatcher in self._dispatchers.values():
|
5973
|
-
if hasattr(dispatcher, 'remove_logs'):
|
5974
|
-
dispatcher.remove_logs()
|
5975
|
-
|
5976
|
-
def reopen_logs(self) -> None:
|
5977
|
-
for dispatcher in self._dispatchers.values():
|
5978
|
-
if hasattr(dispatcher, 'reopen_logs'):
|
5979
|
-
dispatcher.reopen_logs()
|
5980
|
-
|
5981
|
-
def drain(self) -> None:
|
5982
|
-
for dispatcher in self._dispatchers.values():
|
5983
|
-
# note that we *must* call readable() for every dispatcher, as it may have side effects for a given
|
5984
|
-
# dispatcher (eg. call handle_listener_state_change for event listener processes)
|
5985
|
-
if dispatcher.readable():
|
5986
|
-
dispatcher.handle_read_event()
|
5987
|
-
if dispatcher.writable():
|
5988
|
-
dispatcher.handle_write_event()
|
5989
|
-
|
5990
|
-
def write(self, chars: ta.Union[bytes, str]) -> None:
|
5991
|
-
if not self.pid or self._killing:
|
5992
|
-
raise OSError(errno.EPIPE, 'Process already closed')
|
5993
|
-
|
5994
|
-
stdin_fd = self._pipes['stdin']
|
5995
|
-
if stdin_fd is None:
|
5996
|
-
raise OSError(errno.EPIPE, 'Process has no stdin channel')
|
5997
|
-
|
5998
|
-
dispatcher = check_isinstance(self._dispatchers[stdin_fd], InputDispatcher)
|
5999
|
-
if dispatcher.closed:
|
6000
|
-
raise OSError(errno.EPIPE, "Process' stdin channel is closed")
|
6001
|
-
|
6002
|
-
dispatcher.write(chars)
|
6003
|
-
dispatcher.flush() # this must raise EPIPE if the pipe is closed
|
6004
|
-
|
6005
|
-
def _get_execv_args(self) -> ta.Tuple[str, ta.Sequence[str]]:
|
6006
|
-
"""
|
6007
|
-
Internal: turn a program name into a file name, using $PATH, make sure it exists / is executable, raising a
|
6008
|
-
ProcessError if not
|
6009
|
-
"""
|
6010
|
-
|
6011
|
-
try:
|
6012
|
-
commandargs = shlex.split(self._config.command)
|
6013
|
-
except ValueError as e:
|
6014
|
-
raise BadCommandError(f"can't parse command {self._config.command!r}: {e}") # noqa
|
6791
|
+
self._exitstatus: ta.Optional[int] = None # status attached to dead process by finish()
|
6792
|
+
self._spawn_err: ta.Optional[str] = None # error message attached by spawn() if any
|
6015
6793
|
|
6016
|
-
|
6017
|
-
program = commandargs[0]
|
6018
|
-
else:
|
6019
|
-
raise BadCommandError('command is empty')
|
6794
|
+
#
|
6020
6795
|
|
6021
|
-
|
6022
|
-
|
6023
|
-
try:
|
6024
|
-
st = os.stat(filename)
|
6025
|
-
except OSError:
|
6026
|
-
st = None
|
6796
|
+
def __repr__(self) -> str:
|
6797
|
+
return f'<Subprocess at {id(self)} with name {self._config.name} in state {self.get_state().name}>'
|
6027
6798
|
|
6028
|
-
|
6029
|
-
path = get_path()
|
6030
|
-
found = None
|
6031
|
-
st = None
|
6032
|
-
for dir in path: # noqa
|
6033
|
-
found = os.path.join(dir, program)
|
6034
|
-
try:
|
6035
|
-
st = os.stat(found)
|
6036
|
-
except OSError:
|
6037
|
-
pass
|
6038
|
-
else:
|
6039
|
-
break
|
6040
|
-
if st is None:
|
6041
|
-
filename = program
|
6042
|
-
else:
|
6043
|
-
filename = found # type: ignore
|
6799
|
+
#
|
6044
6800
|
|
6045
|
-
|
6046
|
-
|
6047
|
-
|
6801
|
+
@property
|
6802
|
+
def name(self) -> str:
|
6803
|
+
return self._config.name
|
6804
|
+
|
6805
|
+
@property
|
6806
|
+
def config(self) -> ProcessConfig:
|
6807
|
+
return self._config
|
6048
6808
|
|
6049
|
-
|
6809
|
+
@property
|
6810
|
+
def group(self) -> ProcessGroup:
|
6811
|
+
return self._group
|
6050
6812
|
|
6051
|
-
|
6052
|
-
|
6053
|
-
|
6054
|
-
return False
|
6813
|
+
@property
|
6814
|
+
def pid(self) -> int:
|
6815
|
+
return self._pid
|
6055
6816
|
|
6056
|
-
|
6057
|
-
if new_state == ProcessState.BACKOFF:
|
6058
|
-
now = time.time()
|
6059
|
-
self._backoff += 1
|
6060
|
-
self._delay = now + self._backoff
|
6817
|
+
#
|
6061
6818
|
|
6062
|
-
|
6063
|
-
|
6064
|
-
|
6065
|
-
self._event_callbacks.notify(event)
|
6819
|
+
@property
|
6820
|
+
def context(self) -> ServerContext:
|
6821
|
+
return self._context
|
6066
6822
|
|
6067
|
-
|
6823
|
+
@property
|
6824
|
+
def state(self) -> ProcessState:
|
6825
|
+
return self._state
|
6068
6826
|
|
6069
|
-
|
6070
|
-
|
6071
|
-
|
6072
|
-
allowable_states = ' '.join(s.name for s in states)
|
6073
|
-
process_name = as_string(self._config.name)
|
6074
|
-
raise RuntimeError('Assertion failed for %s: %s not in %s' % (process_name, current_state, allowable_states)) # noqa
|
6827
|
+
@property
|
6828
|
+
def backoff(self) -> int:
|
6829
|
+
return self._backoff
|
6075
6830
|
|
6076
|
-
|
6077
|
-
self._spawn_err = msg
|
6078
|
-
log.info('_spawn_err: %s', msg)
|
6831
|
+
#
|
6079
6832
|
|
6080
6833
|
def spawn(self) -> ta.Optional[int]:
|
6081
6834
|
process_name = as_string(self._config.name)
|
@@ -6084,6 +6837,13 @@ class ProcessImpl(Process):
|
|
6084
6837
|
log.warning('process \'%s\' already running', process_name)
|
6085
6838
|
return None
|
6086
6839
|
|
6840
|
+
self.check_in_state(
|
6841
|
+
ProcessState.EXITED,
|
6842
|
+
ProcessState.FATAL,
|
6843
|
+
ProcessState.BACKOFF,
|
6844
|
+
ProcessState.STOPPED,
|
6845
|
+
)
|
6846
|
+
|
6087
6847
|
self._killing = False
|
6088
6848
|
self._spawn_err = None
|
6089
6849
|
self._exitstatus = None
|
@@ -6092,183 +6852,73 @@ class ProcessImpl(Process):
|
|
6092
6852
|
|
6093
6853
|
self._last_start = time.time()
|
6094
6854
|
|
6095
|
-
self._check_in_state(
|
6096
|
-
ProcessState.EXITED,
|
6097
|
-
ProcessState.FATAL,
|
6098
|
-
ProcessState.BACKOFF,
|
6099
|
-
ProcessState.STOPPED,
|
6100
|
-
)
|
6101
|
-
|
6102
6855
|
self.change_state(ProcessState.STARTING)
|
6103
6856
|
|
6104
6857
|
try:
|
6105
|
-
|
6106
|
-
except
|
6107
|
-
|
6108
|
-
self.
|
6109
|
-
self.
|
6110
|
-
return None
|
6111
|
-
|
6112
|
-
try:
|
6113
|
-
self._dispatchers, self._pipes = self._make_dispatchers() # type: ignore
|
6114
|
-
except OSError as why:
|
6115
|
-
code = why.args[0]
|
6116
|
-
if code == errno.EMFILE:
|
6117
|
-
# too many file descriptors open
|
6118
|
-
msg = f"too many open files to spawn '{process_name}'"
|
6119
|
-
else:
|
6120
|
-
msg = f"unknown error making dispatchers for '{process_name}': {errno.errorcode.get(code, code)}"
|
6121
|
-
self._record_spawn_err(msg)
|
6122
|
-
self._check_in_state(ProcessState.STARTING)
|
6123
|
-
self.change_state(ProcessState.BACKOFF)
|
6124
|
-
return None
|
6125
|
-
|
6126
|
-
try:
|
6127
|
-
pid = os.fork()
|
6128
|
-
except OSError as why:
|
6129
|
-
code = why.args[0]
|
6130
|
-
if code == errno.EAGAIN:
|
6131
|
-
# process table full
|
6132
|
-
msg = f'Too many processes in process table to spawn \'{process_name}\''
|
6133
|
-
else:
|
6134
|
-
msg = f'unknown error during fork for \'{process_name}\': {errno.errorcode.get(code, code)}'
|
6135
|
-
self._record_spawn_err(msg)
|
6136
|
-
self._check_in_state(ProcessState.STARTING)
|
6858
|
+
sp = self._spawning.spawn()
|
6859
|
+
except ProcessSpawnError as err:
|
6860
|
+
log.exception('Spawn error')
|
6861
|
+
self._spawn_err = err.args[0]
|
6862
|
+
self.check_in_state(ProcessState.STARTING)
|
6137
6863
|
self.change_state(ProcessState.BACKOFF)
|
6138
|
-
close_parent_pipes(self._pipes)
|
6139
|
-
close_child_pipes(self._pipes)
|
6140
|
-
return None
|
6141
|
-
|
6142
|
-
if pid != 0:
|
6143
|
-
return self._spawn_as_parent(pid)
|
6144
|
-
|
6145
|
-
else:
|
6146
|
-
self._spawn_as_child(filename, argv)
|
6147
6864
|
return None
|
6148
6865
|
|
6149
|
-
|
6150
|
-
use_stderr = not self._config.redirect_stderr
|
6151
|
-
|
6152
|
-
p = make_pipes(use_stderr)
|
6153
|
-
stdout_fd, stderr_fd, stdin_fd = p['stdout'], p['stderr'], p['stdin']
|
6866
|
+
log.info("Spawned: '%s' with pid %s", self.name, sp.pid)
|
6154
6867
|
|
6155
|
-
|
6868
|
+
self._pid = sp.pid
|
6869
|
+
self._pipes = sp.pipes
|
6870
|
+
self._dispatchers = sp.dispatchers
|
6156
6871
|
|
6157
|
-
|
6158
|
-
event_callbacks=self._event_callbacks,
|
6159
|
-
)
|
6872
|
+
self._delay = time.time() + self.config.startsecs
|
6160
6873
|
|
6161
|
-
|
6162
|
-
if stdout_fd is not None:
|
6163
|
-
etype = ProcessCommunicationStdoutEvent
|
6164
|
-
dispatchers[stdout_fd] = OutputDispatcher(
|
6165
|
-
self,
|
6166
|
-
etype,
|
6167
|
-
stdout_fd,
|
6168
|
-
**dispatcher_kw,
|
6169
|
-
)
|
6874
|
+
return sp.pid
|
6170
6875
|
|
6171
|
-
|
6172
|
-
|
6173
|
-
dispatchers[stderr_fd] = OutputDispatcher(
|
6174
|
-
self,
|
6175
|
-
etype,
|
6176
|
-
stderr_fd,
|
6177
|
-
**dispatcher_kw,
|
6178
|
-
)
|
6876
|
+
def get_dispatchers(self) -> Dispatchers:
|
6877
|
+
return self._dispatchers
|
6179
6878
|
|
6180
|
-
|
6181
|
-
|
6182
|
-
|
6183
|
-
'stdin',
|
6184
|
-
stdin_fd,
|
6185
|
-
**dispatcher_kw,
|
6186
|
-
)
|
6879
|
+
def write(self, chars: ta.Union[bytes, str]) -> None:
|
6880
|
+
if not self.pid or self._killing:
|
6881
|
+
raise OSError(errno.EPIPE, 'Process already closed')
|
6187
6882
|
|
6188
|
-
|
6883
|
+
stdin_fd = self._pipes.stdin
|
6884
|
+
if stdin_fd is None:
|
6885
|
+
raise OSError(errno.EPIPE, 'Process has no stdin channel')
|
6189
6886
|
|
6190
|
-
|
6191
|
-
|
6192
|
-
|
6193
|
-
close_child_pipes(self._pipes)
|
6194
|
-
log.info('spawned: \'%s\' with pid %s', as_string(self._config.name), pid)
|
6195
|
-
self._spawn_err = None
|
6196
|
-
self._delay = time.time() + self._config.startsecs
|
6197
|
-
self.context.pid_history[pid] = self
|
6198
|
-
return pid
|
6199
|
-
|
6200
|
-
def _prepare_child_fds(self) -> None:
|
6201
|
-
os.dup2(self._pipes['child_stdin'], 0)
|
6202
|
-
os.dup2(self._pipes['child_stdout'], 1)
|
6203
|
-
if self._config.redirect_stderr:
|
6204
|
-
os.dup2(self._pipes['child_stdout'], 2)
|
6205
|
-
else:
|
6206
|
-
os.dup2(self._pipes['child_stderr'], 2)
|
6887
|
+
dispatcher = check_isinstance(self._dispatchers[stdin_fd], InputDispatcher)
|
6888
|
+
if dispatcher.closed:
|
6889
|
+
raise OSError(errno.EPIPE, "Process' stdin channel is closed")
|
6207
6890
|
|
6208
|
-
|
6209
|
-
|
6210
|
-
continue
|
6211
|
-
close_fd(i)
|
6891
|
+
dispatcher.write(chars)
|
6892
|
+
dispatcher.flush() # this must raise EPIPE if the pipe is closed
|
6212
6893
|
|
6213
|
-
|
6214
|
-
try:
|
6215
|
-
# prevent child from receiving signals sent to the parent by calling os.setpgrp to create a new process
|
6216
|
-
# group for the child; this prevents, for instance, the case of child processes being sent a SIGINT when
|
6217
|
-
# running supervisor in foreground mode and Ctrl-C in the terminal window running supervisord is pressed.
|
6218
|
-
# Presumably it also prevents HUP, etc received by supervisord from being sent to children.
|
6219
|
-
os.setpgrp()
|
6894
|
+
#
|
6220
6895
|
|
6221
|
-
|
6222
|
-
|
6896
|
+
def change_state(self, new_state: ProcessState, expected: bool = True) -> bool:
|
6897
|
+
old_state = self._state
|
6898
|
+
if new_state is old_state:
|
6899
|
+
return False
|
6223
6900
|
|
6224
|
-
|
6225
|
-
|
6226
|
-
|
6227
|
-
|
6228
|
-
|
6229
|
-
os.write(2, as_bytes('supervisor: ' + msg))
|
6230
|
-
return # finally clause will exit the child process
|
6901
|
+
self._state = new_state
|
6902
|
+
if new_state == ProcessState.BACKOFF:
|
6903
|
+
now = time.time()
|
6904
|
+
self._backoff += 1
|
6905
|
+
self._delay = now + self._backoff
|
6231
6906
|
|
6232
|
-
|
6233
|
-
|
6234
|
-
|
6235
|
-
|
6236
|
-
if self._group:
|
6237
|
-
env['SUPERVISOR_GROUP_NAME'] = self._group.config.name
|
6238
|
-
if self._config.environment is not None:
|
6239
|
-
env.update(self._config.environment)
|
6240
|
-
|
6241
|
-
# change directory
|
6242
|
-
cwd = self._config.directory
|
6243
|
-
try:
|
6244
|
-
if cwd is not None:
|
6245
|
-
os.chdir(os.path.expanduser(cwd))
|
6246
|
-
except OSError as why:
|
6247
|
-
code = errno.errorcode.get(why.args[0], why.args[0])
|
6248
|
-
msg = f"couldn't chdir to {cwd}: {code}\n"
|
6249
|
-
os.write(2, as_bytes('supervisor: ' + msg))
|
6250
|
-
return # finally clause will exit the child process
|
6907
|
+
event_class = PROCESS_STATE_EVENT_MAP.get(new_state)
|
6908
|
+
if event_class is not None:
|
6909
|
+
event = event_class(self, old_state, expected)
|
6910
|
+
self._event_callbacks.notify(event)
|
6251
6911
|
|
6252
|
-
|
6253
|
-
try:
|
6254
|
-
if self._config.umask is not None:
|
6255
|
-
os.umask(self._config.umask)
|
6256
|
-
os.execve(filename, list(argv), env)
|
6257
|
-
except OSError as why:
|
6258
|
-
code = errno.errorcode.get(why.args[0], why.args[0])
|
6259
|
-
msg = f"couldn't exec {argv[0]}: {code}\n"
|
6260
|
-
os.write(2, as_bytes('supervisor: ' + msg))
|
6261
|
-
except Exception: # noqa
|
6262
|
-
(file, fun, line), t, v, tbinfo = compact_traceback()
|
6263
|
-
error = f'{t}, {v}: file: {file} line: {line}'
|
6264
|
-
msg = f"couldn't exec {filename}: {error}\n"
|
6265
|
-
os.write(2, as_bytes('supervisor: ' + msg))
|
6912
|
+
return True
|
6266
6913
|
|
6267
|
-
|
6914
|
+
def check_in_state(self, *states: ProcessState) -> None:
|
6915
|
+
if self._state not in states:
|
6916
|
+
raise ProcessStateError(
|
6917
|
+
f'Check failed for {self._config.name}: '
|
6918
|
+
f'{self._state.name} not in {" ".join(s.name for s in states)}',
|
6919
|
+
)
|
6268
6920
|
|
6269
|
-
|
6270
|
-
os.write(2, as_bytes('supervisor: child process was not spawned\n'))
|
6271
|
-
real_exit(127) # exit process with code for spawn failure
|
6921
|
+
#
|
6272
6922
|
|
6273
6923
|
def _check_and_adjust_for_system_clock_rollback(self, test_time):
|
6274
6924
|
"""
|
@@ -6314,7 +6964,7 @@ class ProcessImpl(Process):
|
|
6314
6964
|
self._delay = 0
|
6315
6965
|
self._backoff = 0
|
6316
6966
|
self._system_stop = True
|
6317
|
-
self.
|
6967
|
+
self.check_in_state(ProcessState.BACKOFF)
|
6318
6968
|
self.change_state(ProcessState.FATAL)
|
6319
6969
|
|
6320
6970
|
def kill(self, sig: int) -> ta.Optional[str]:
|
@@ -6358,7 +7008,7 @@ class ProcessImpl(Process):
|
|
6358
7008
|
self._killing = True
|
6359
7009
|
self._delay = now + self._config.stopwaitsecs
|
6360
7010
|
# we will already be in the STOPPING state if we're doing a SIGKILL as a result of overrunning stopwaitsecs
|
6361
|
-
self.
|
7011
|
+
self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
|
6362
7012
|
self.change_state(ProcessState.STOPPING)
|
6363
7013
|
|
6364
7014
|
pid = self.pid
|
@@ -6403,7 +7053,7 @@ class ProcessImpl(Process):
|
|
6403
7053
|
|
6404
7054
|
log.debug('sending %s (pid %s) sig %s', process_name, self.pid, sig_name(sig))
|
6405
7055
|
|
6406
|
-
self.
|
7056
|
+
self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
|
6407
7057
|
|
6408
7058
|
try:
|
6409
7059
|
try:
|
@@ -6432,7 +7082,7 @@ class ProcessImpl(Process):
|
|
6432
7082
|
def finish(self, sts: int) -> None:
|
6433
7083
|
"""The process was reaped and we need to report and manage its state."""
|
6434
7084
|
|
6435
|
-
self.drain()
|
7085
|
+
self._dispatchers.drain()
|
6436
7086
|
|
6437
7087
|
es, msg = decode_wait_status(sts)
|
6438
7088
|
|
@@ -6463,7 +7113,7 @@ class ProcessImpl(Process):
|
|
6463
7113
|
self._exitstatus = es
|
6464
7114
|
|
6465
7115
|
fmt, args = 'stopped: %s (%s)', (process_name, msg)
|
6466
|
-
self.
|
7116
|
+
self.check_in_state(ProcessState.STOPPING)
|
6467
7117
|
self.change_state(ProcessState.STOPPED)
|
6468
7118
|
if exit_expected:
|
6469
7119
|
log.info(fmt, *args)
|
@@ -6474,7 +7124,7 @@ class ProcessImpl(Process):
|
|
6474
7124
|
# the program did not stay up long enough to make it to RUNNING implies STARTING -> BACKOFF
|
6475
7125
|
self._exitstatus = None
|
6476
7126
|
self._spawn_err = 'Exited too quickly (process log may have details)'
|
6477
|
-
self.
|
7127
|
+
self.check_in_state(ProcessState.STARTING)
|
6478
7128
|
self.change_state(ProcessState.BACKOFF)
|
6479
7129
|
log.warning('exited: %s (%s)', process_name, msg + '; not expected')
|
6480
7130
|
|
@@ -6490,7 +7140,7 @@ class ProcessImpl(Process):
|
|
6490
7140
|
if self._state == ProcessState.STARTING:
|
6491
7141
|
self.change_state(ProcessState.RUNNING)
|
6492
7142
|
|
6493
|
-
self.
|
7143
|
+
self.check_in_state(ProcessState.RUNNING)
|
6494
7144
|
|
6495
7145
|
if exit_expected:
|
6496
7146
|
# expected exit code
|
@@ -6504,19 +7154,8 @@ class ProcessImpl(Process):
|
|
6504
7154
|
|
6505
7155
|
self._pid = 0
|
6506
7156
|
close_parent_pipes(self._pipes)
|
6507
|
-
self._pipes =
|
6508
|
-
self._dispatchers =
|
6509
|
-
|
6510
|
-
def set_uid(self) -> ta.Optional[str]:
|
6511
|
-
if self._config.uid is None:
|
6512
|
-
return None
|
6513
|
-
msg = drop_privileges(self._config.uid)
|
6514
|
-
return msg
|
6515
|
-
|
6516
|
-
def __repr__(self) -> str:
|
6517
|
-
# repr can't return anything other than a native string, but the name might be unicode - a problem on Python 2.
|
6518
|
-
name = self._config.name
|
6519
|
-
return f'<Subprocess at {id(self)} with name {name} in state {self.get_state().name}>'
|
7157
|
+
self._pipes = ProcessPipes()
|
7158
|
+
self._dispatchers = Dispatchers([])
|
6520
7159
|
|
6521
7160
|
def get_state(self) -> ProcessState:
|
6522
7161
|
return self._state
|
@@ -6558,7 +7197,7 @@ class ProcessImpl(Process):
|
|
6558
7197
|
# proc.config.startsecs,
|
6559
7198
|
self._delay = 0
|
6560
7199
|
self._backoff = 0
|
6561
|
-
self.
|
7200
|
+
self.check_in_state(ProcessState.STARTING)
|
6562
7201
|
self.change_state(ProcessState.RUNNING)
|
6563
7202
|
msg = ('entered RUNNING state, process has stayed up for > than %s seconds (startsecs)' % self._config.startsecs) # noqa
|
6564
7203
|
logger.info('success: %s %s', process_name, msg)
|
@@ -6578,7 +7217,7 @@ class ProcessImpl(Process):
|
|
6578
7217
|
log.warning('killing \'%s\' (%s) with SIGKILL', process_name, self.pid)
|
6579
7218
|
self.kill(signal.SIGKILL)
|
6580
7219
|
|
6581
|
-
def
|
7220
|
+
def after_setuid(self) -> None:
|
6582
7221
|
# temporary logfiles which are erased at start time
|
6583
7222
|
# get_autoname = self.context.get_auto_child_log_name # noqa
|
6584
7223
|
# sid = self.context.config.identifier # noqa
|
@@ -6591,372 +7230,319 @@ class ProcessImpl(Process):
|
|
6591
7230
|
|
6592
7231
|
|
6593
7232
|
########################################
|
6594
|
-
# ../
|
6595
|
-
|
6596
|
-
|
6597
|
-
##
|
6598
|
-
|
6599
|
-
|
6600
|
-
class SignalHandler:
|
6601
|
-
def __init__(
|
6602
|
-
self,
|
6603
|
-
*,
|
6604
|
-
context: ServerContextImpl,
|
6605
|
-
signal_receiver: SignalReceiver,
|
6606
|
-
process_groups: ProcessGroups,
|
6607
|
-
) -> None:
|
6608
|
-
super().__init__()
|
6609
|
-
|
6610
|
-
self._context = context
|
6611
|
-
self._signal_receiver = signal_receiver
|
6612
|
-
self._process_groups = process_groups
|
6613
|
-
|
6614
|
-
def set_signals(self) -> None:
|
6615
|
-
self._signal_receiver.install(
|
6616
|
-
signal.SIGTERM,
|
6617
|
-
signal.SIGINT,
|
6618
|
-
signal.SIGQUIT,
|
6619
|
-
signal.SIGHUP,
|
6620
|
-
signal.SIGCHLD,
|
6621
|
-
signal.SIGUSR2,
|
6622
|
-
)
|
6623
|
-
|
6624
|
-
def handle_signals(self) -> None:
|
6625
|
-
sig = self._signal_receiver.get_signal()
|
6626
|
-
if not sig:
|
6627
|
-
return
|
7233
|
+
# ../spawningimpl.py
|
6628
7234
|
|
6629
|
-
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
6630
|
-
log.warning('received %s indicating exit request', sig_name(sig))
|
6631
|
-
self._context.set_state(SupervisorState.SHUTDOWN)
|
6632
7235
|
|
6633
|
-
|
6634
|
-
|
6635
|
-
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
6636
|
-
else:
|
6637
|
-
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
6638
|
-
self._context.set_state(SupervisorState.RESTARTING)
|
7236
|
+
class OutputDispatcherFactory(Func3[Process, ta.Type[ProcessCommunicationEvent], int, OutputDispatcher]):
|
7237
|
+
pass
|
6639
7238
|
|
6640
|
-
elif sig == signal.SIGCHLD:
|
6641
|
-
log.debug('received %s indicating a child quit', sig_name(sig))
|
6642
7239
|
|
6643
|
-
|
6644
|
-
|
7240
|
+
class InputDispatcherFactory(Func3[Process, str, int, InputDispatcher]):
|
7241
|
+
pass
|
6645
7242
|
|
6646
|
-
for group in self._process_groups:
|
6647
|
-
group.reopen_logs()
|
6648
7243
|
|
6649
|
-
|
6650
|
-
log.debug('received %s indicating nothing', sig_name(sig))
|
7244
|
+
InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[int])
|
6651
7245
|
|
6652
7246
|
|
6653
7247
|
##
|
6654
7248
|
|
6655
7249
|
|
6656
|
-
|
6657
|
-
|
6658
|
-
|
6659
|
-
class Supervisor:
|
7250
|
+
class ProcessSpawningImpl(ProcessSpawning):
|
6660
7251
|
def __init__(
|
6661
7252
|
self,
|
7253
|
+
process: Process,
|
6662
7254
|
*,
|
6663
|
-
|
6664
|
-
|
6665
|
-
process_groups: ProcessGroups,
|
6666
|
-
signal_handler: SignalHandler,
|
6667
|
-
event_callbacks: EventCallbacks,
|
6668
|
-
process_group_factory: ProcessGroupFactory,
|
6669
|
-
) -> None:
|
6670
|
-
super().__init__()
|
6671
|
-
|
6672
|
-
self._context = context
|
6673
|
-
self._poller = poller
|
6674
|
-
self._process_groups = process_groups
|
6675
|
-
self._signal_handler = signal_handler
|
6676
|
-
self._event_callbacks = event_callbacks
|
6677
|
-
self._process_group_factory = process_group_factory
|
6678
|
-
|
6679
|
-
self._ticks: ta.Dict[int, float] = {}
|
6680
|
-
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
6681
|
-
self._stopping = False # set after we detect that we are handling a stop request
|
6682
|
-
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
6683
|
-
|
6684
|
-
#
|
6685
|
-
|
6686
|
-
@property
|
6687
|
-
def context(self) -> ServerContextImpl:
|
6688
|
-
return self._context
|
6689
|
-
|
6690
|
-
def get_state(self) -> SupervisorState:
|
6691
|
-
return self._context.state
|
7255
|
+
server_config: ServerConfig,
|
7256
|
+
pid_history: PidHistory,
|
6692
7257
|
|
6693
|
-
|
6694
|
-
|
6695
|
-
class DiffToActive(ta.NamedTuple):
|
6696
|
-
added: ta.List[ProcessGroupConfig]
|
6697
|
-
changed: ta.List[ProcessGroupConfig]
|
6698
|
-
removed: ta.List[ProcessGroupConfig]
|
6699
|
-
|
6700
|
-
def diff_to_active(self) -> DiffToActive:
|
6701
|
-
new = self._context.config.groups or []
|
6702
|
-
cur = [group.config for group in self._process_groups]
|
7258
|
+
output_dispatcher_factory: OutputDispatcherFactory,
|
7259
|
+
input_dispatcher_factory: InputDispatcherFactory,
|
6703
7260
|
|
6704
|
-
|
6705
|
-
|
7261
|
+
inherited_fds: ta.Optional[InheritedFds] = None,
|
7262
|
+
) -> None:
|
7263
|
+
super().__init__()
|
6706
7264
|
|
6707
|
-
|
6708
|
-
removed = [cand for cand in cur if cand.name not in newdict]
|
7265
|
+
self._process = process
|
6709
7266
|
|
6710
|
-
|
7267
|
+
self._server_config = server_config
|
7268
|
+
self._pid_history = pid_history
|
6711
7269
|
|
6712
|
-
|
7270
|
+
self._output_dispatcher_factory = output_dispatcher_factory
|
7271
|
+
self._input_dispatcher_factory = input_dispatcher_factory
|
6713
7272
|
|
6714
|
-
|
6715
|
-
if self._process_groups.get(config.name) is not None:
|
6716
|
-
return False
|
7273
|
+
self._inherited_fds = InheritedFds(frozenset(inherited_fds or []))
|
6717
7274
|
|
6718
|
-
|
6719
|
-
group.after_setuid()
|
7275
|
+
#
|
6720
7276
|
|
6721
|
-
|
7277
|
+
@property
|
7278
|
+
def process(self) -> Process:
|
7279
|
+
return self._process
|
6722
7280
|
|
6723
|
-
|
7281
|
+
@property
|
7282
|
+
def config(self) -> ProcessConfig:
|
7283
|
+
return self._process.config
|
6724
7284
|
|
6725
|
-
|
6726
|
-
|
6727
|
-
|
7285
|
+
@property
|
7286
|
+
def group(self) -> ProcessGroup:
|
7287
|
+
return self._process.group
|
6728
7288
|
|
6729
|
-
|
7289
|
+
#
|
6730
7290
|
|
6731
|
-
|
7291
|
+
def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
|
7292
|
+
try:
|
7293
|
+
exe, argv = self._get_execv_args()
|
7294
|
+
except ProcessError as exc:
|
7295
|
+
raise ProcessSpawnError(exc.args[0]) from exc
|
6732
7296
|
|
6733
|
-
|
6734
|
-
|
6735
|
-
|
6736
|
-
|
6737
|
-
|
7297
|
+
try:
|
7298
|
+
pipes = make_process_pipes(not self.config.redirect_stderr)
|
7299
|
+
except OSError as exc:
|
7300
|
+
code = exc.args[0]
|
7301
|
+
if code == errno.EMFILE:
|
7302
|
+
# too many file descriptors open
|
7303
|
+
msg = f"Too many open files to spawn '{self.process.name}'"
|
7304
|
+
else:
|
7305
|
+
msg = f"Unknown error making pipes for '{self.process.name}': {errno.errorcode.get(code, code)}"
|
7306
|
+
raise ProcessSpawnError(msg) from exc
|
6738
7307
|
|
6739
|
-
|
6740
|
-
|
7308
|
+
try:
|
7309
|
+
dispatchers = self._make_dispatchers(pipes)
|
7310
|
+
except Exception as exc: # noqa
|
7311
|
+
close_pipes(pipes)
|
7312
|
+
raise ProcessSpawnError(f"Unknown error making dispatchers for '{self.process.name}': {exc}") from exc
|
6741
7313
|
|
6742
|
-
|
6743
|
-
|
7314
|
+
try:
|
7315
|
+
pid = os.fork()
|
7316
|
+
except OSError as exc:
|
7317
|
+
code = exc.args[0]
|
7318
|
+
if code == errno.EAGAIN:
|
7319
|
+
# process table full
|
7320
|
+
msg = f"Too many processes in process table to spawn '{self.process.name}'"
|
7321
|
+
else:
|
7322
|
+
msg = f"Unknown error during fork for '{self.process.name}': {errno.errorcode.get(code, code)}"
|
7323
|
+
err = ProcessSpawnError(msg)
|
7324
|
+
close_pipes(pipes)
|
7325
|
+
raise err from exc
|
6744
7326
|
|
6745
|
-
if
|
6746
|
-
|
6747
|
-
|
6748
|
-
|
6749
|
-
|
6750
|
-
|
6751
|
-
|
6752
|
-
|
6753
|
-
for proc in unstopped:
|
6754
|
-
log.debug('%s state: %s', proc.config.name, proc.get_state().name)
|
7327
|
+
if pid != 0:
|
7328
|
+
sp = SpawnedProcess(
|
7329
|
+
pid,
|
7330
|
+
pipes,
|
7331
|
+
dispatchers,
|
7332
|
+
)
|
7333
|
+
self._spawn_as_parent(sp)
|
7334
|
+
return sp
|
6755
7335
|
|
6756
|
-
|
7336
|
+
else:
|
7337
|
+
self._spawn_as_child(
|
7338
|
+
exe,
|
7339
|
+
argv,
|
7340
|
+
pipes,
|
7341
|
+
)
|
7342
|
+
raise RuntimeError('Unreachable') # noqa
|
6757
7343
|
|
6758
|
-
|
7344
|
+
def _get_execv_args(self) -> ta.Tuple[str, ta.Sequence[str]]:
|
7345
|
+
"""
|
7346
|
+
Internal: turn a program name into a file name, using $PATH, make sure it exists / is executable, raising a
|
7347
|
+
ProcessError if not
|
7348
|
+
"""
|
6759
7349
|
|
6760
|
-
|
6761
|
-
|
6762
|
-
|
7350
|
+
try:
|
7351
|
+
args = shlex.split(self.config.command)
|
7352
|
+
except ValueError as e:
|
7353
|
+
raise BadCommandError(f"Can't parse command {self.config.command!r}: {e}") # noqa
|
6763
7354
|
|
6764
|
-
|
6765
|
-
|
6766
|
-
|
6767
|
-
|
6768
|
-
self._context.cleanup_fds()
|
7355
|
+
if args:
|
7356
|
+
program = args[0]
|
7357
|
+
else:
|
7358
|
+
raise BadCommandError('Command is empty')
|
6769
7359
|
|
6770
|
-
|
7360
|
+
if '/' in program:
|
7361
|
+
exe = program
|
7362
|
+
try:
|
7363
|
+
st = os.stat(exe)
|
7364
|
+
except OSError:
|
7365
|
+
st = None
|
6771
7366
|
|
6772
|
-
|
6773
|
-
|
7367
|
+
else:
|
7368
|
+
path = get_path()
|
7369
|
+
found = None
|
7370
|
+
st = None
|
7371
|
+
for dir in path: # noqa
|
7372
|
+
found = os.path.join(dir, program)
|
7373
|
+
try:
|
7374
|
+
st = os.stat(found)
|
7375
|
+
except OSError:
|
7376
|
+
pass
|
7377
|
+
else:
|
7378
|
+
break
|
6774
7379
|
|
6775
|
-
|
6776
|
-
|
6777
|
-
|
6778
|
-
|
7380
|
+
if st is None:
|
7381
|
+
exe = program
|
7382
|
+
else:
|
7383
|
+
exe = found # type: ignore
|
6779
7384
|
|
6780
|
-
|
6781
|
-
|
6782
|
-
|
6783
|
-
|
6784
|
-
|
6785
|
-
|
6786
|
-
|
7385
|
+
# check_execv_args will raise a ProcessError if the execv args are bogus, we break it out into a separate
|
7386
|
+
# options method call here only to service unit tests
|
7387
|
+
check_execv_args(exe, args, st)
|
7388
|
+
|
7389
|
+
return exe, args
|
7390
|
+
|
7391
|
+
def _make_dispatchers(self, pipes: ProcessPipes) -> Dispatchers:
|
7392
|
+
dispatchers: ta.List[Dispatcher] = []
|
7393
|
+
|
7394
|
+
if pipes.stdout is not None:
|
7395
|
+
dispatchers.append(check_isinstance(self._output_dispatcher_factory(
|
7396
|
+
self.process,
|
7397
|
+
ProcessCommunicationStdoutEvent,
|
7398
|
+
pipes.stdout,
|
7399
|
+
), OutputDispatcher))
|
7400
|
+
|
7401
|
+
if pipes.stderr is not None:
|
7402
|
+
dispatchers.append(check_isinstance(self._output_dispatcher_factory(
|
7403
|
+
self.process,
|
7404
|
+
ProcessCommunicationStderrEvent,
|
7405
|
+
pipes.stderr,
|
7406
|
+
), OutputDispatcher))
|
7407
|
+
|
7408
|
+
if pipes.stdin is not None:
|
7409
|
+
dispatchers.append(check_isinstance(self._input_dispatcher_factory(
|
7410
|
+
self.process,
|
7411
|
+
'stdin',
|
7412
|
+
pipes.stdin,
|
7413
|
+
), InputDispatcher))
|
6787
7414
|
|
6788
|
-
|
7415
|
+
return Dispatchers(dispatchers)
|
6789
7416
|
|
6790
|
-
|
6791
|
-
for config in self._context.config.groups or []:
|
6792
|
-
self.add_process_group(config)
|
7417
|
+
#
|
6793
7418
|
|
6794
|
-
|
7419
|
+
def _spawn_as_parent(self, sp: SpawnedProcess) -> None:
|
7420
|
+
close_child_pipes(sp.pipes)
|
6795
7421
|
|
6796
|
-
|
6797
|
-
self._context.daemonize()
|
7422
|
+
self._pid_history[sp.pid] = self.process
|
6798
7423
|
|
6799
|
-
|
6800
|
-
self._context.write_pidfile()
|
7424
|
+
#
|
6801
7425
|
|
6802
|
-
|
7426
|
+
def _spawn_as_child(
|
7427
|
+
self,
|
7428
|
+
exe: str,
|
7429
|
+
argv: ta.Sequence[str],
|
7430
|
+
pipes: ProcessPipes,
|
7431
|
+
) -> ta.NoReturn:
|
7432
|
+
try:
|
7433
|
+
# Prevent child from receiving signals sent to the parent by calling os.setpgrp to create a new process
|
7434
|
+
# group for the child. This prevents, for instance, the case of child processes being sent a SIGINT when
|
7435
|
+
# running supervisor in foreground mode and Ctrl-C in the terminal window running supervisord is pressed.
|
7436
|
+
# Presumably it also prevents HUP, etc. received by supervisord from being sent to children.
|
7437
|
+
os.setpgrp()
|
6803
7438
|
|
6804
|
-
|
6805
|
-
if callback is not None and not callback(self):
|
6806
|
-
break
|
7439
|
+
#
|
6807
7440
|
|
6808
|
-
|
7441
|
+
# After preparation sending to fd 2 will put this output in the stderr log.
|
7442
|
+
self._prepare_child_fds(pipes)
|
6809
7443
|
|
6810
|
-
|
6811
|
-
self._context.cleanup()
|
7444
|
+
#
|
6812
7445
|
|
6813
|
-
|
7446
|
+
setuid_msg = self._set_uid()
|
7447
|
+
if setuid_msg:
|
7448
|
+
uid = self.config.uid
|
7449
|
+
msg = f"Couldn't setuid to {uid}: {setuid_msg}\n"
|
7450
|
+
os.write(2, as_bytes('supervisor: ' + msg))
|
7451
|
+
raise RuntimeError(msg)
|
6814
7452
|
|
6815
|
-
|
6816
|
-
self._poll()
|
6817
|
-
self._reap()
|
6818
|
-
self._signal_handler.handle_signals()
|
6819
|
-
self._tick()
|
7453
|
+
#
|
6820
7454
|
|
6821
|
-
|
6822
|
-
|
7455
|
+
env = os.environ.copy()
|
7456
|
+
env['SUPERVISOR_ENABLED'] = '1'
|
7457
|
+
env['SUPERVISOR_PROCESS_NAME'] = self.process.name
|
7458
|
+
if self.group:
|
7459
|
+
env['SUPERVISOR_GROUP_NAME'] = self.group.name
|
7460
|
+
if self.config.environment is not None:
|
7461
|
+
env.update(self.config.environment)
|
6823
7462
|
|
6824
|
-
|
6825
|
-
if self._stop_groups:
|
6826
|
-
# stop the last group (the one with the "highest" priority)
|
6827
|
-
self._stop_groups[-1].stop_all()
|
7463
|
+
#
|
6828
7464
|
|
6829
|
-
|
6830
|
-
|
6831
|
-
|
6832
|
-
|
6833
|
-
|
6834
|
-
|
6835
|
-
|
6836
|
-
|
6837
|
-
|
6838
|
-
self._stop_groups.append(group)
|
7465
|
+
cwd = self.config.directory
|
7466
|
+
try:
|
7467
|
+
if cwd is not None:
|
7468
|
+
os.chdir(os.path.expanduser(cwd))
|
7469
|
+
except OSError as exc:
|
7470
|
+
code = errno.errorcode.get(exc.args[0], exc.args[0])
|
7471
|
+
msg = f"Couldn't chdir to {cwd}: {code}\n"
|
7472
|
+
os.write(2, as_bytes('supervisor: ' + msg))
|
7473
|
+
raise RuntimeError(msg) from exc
|
6839
7474
|
|
6840
|
-
|
6841
|
-
combined_map = {}
|
6842
|
-
combined_map.update(self.get_process_map())
|
7475
|
+
#
|
6843
7476
|
|
6844
|
-
|
6845
|
-
|
7477
|
+
try:
|
7478
|
+
if self.config.umask is not None:
|
7479
|
+
os.umask(self.config.umask)
|
7480
|
+
os.execve(exe, list(argv), env)
|
6846
7481
|
|
6847
|
-
|
6848
|
-
|
6849
|
-
|
6850
|
-
|
6851
|
-
self._stop_groups = pgroups[:]
|
6852
|
-
self._event_callbacks.notify(SupervisorStoppingEvent())
|
7482
|
+
except OSError as exc:
|
7483
|
+
code = errno.errorcode.get(exc.args[0], exc.args[0])
|
7484
|
+
msg = f"Couldn't exec {argv[0]}: {code}\n"
|
7485
|
+
os.write(2, as_bytes('supervisor: ' + msg))
|
6853
7486
|
|
6854
|
-
|
7487
|
+
except Exception: # noqa
|
7488
|
+
(file, fun, line), t, v, tb = compact_traceback()
|
7489
|
+
msg = f"Couldn't exec {exe}: {t}, {v}: file: {file} line: {line}\n"
|
7490
|
+
os.write(2, as_bytes('supervisor: ' + msg))
|
6855
7491
|
|
6856
|
-
|
6857
|
-
|
6858
|
-
|
7492
|
+
finally:
|
7493
|
+
os.write(2, as_bytes('supervisor: child process was not spawned\n'))
|
7494
|
+
real_exit(127) # exit process with code for spawn failure
|
6859
7495
|
|
6860
|
-
|
6861
|
-
if dispatcher.readable():
|
6862
|
-
self._poller.register_readable(fd)
|
6863
|
-
if dispatcher.writable():
|
6864
|
-
self._poller.register_writable(fd)
|
7496
|
+
raise RuntimeError('Unreachable')
|
6865
7497
|
|
6866
|
-
|
6867
|
-
|
7498
|
+
def _prepare_child_fds(self, pipes: ProcessPipes) -> None:
|
7499
|
+
os.dup2(check_not_none(pipes.child_stdin), 0)
|
6868
7500
|
|
6869
|
-
|
6870
|
-
if fd in combined_map:
|
6871
|
-
try:
|
6872
|
-
dispatcher = combined_map[fd]
|
6873
|
-
log.debug('read event caused by %r', dispatcher)
|
6874
|
-
dispatcher.handle_read_event()
|
6875
|
-
if not dispatcher.readable():
|
6876
|
-
self._poller.unregister_readable(fd)
|
6877
|
-
except ExitNow:
|
6878
|
-
raise
|
6879
|
-
except Exception: # noqa
|
6880
|
-
combined_map[fd].handle_error()
|
6881
|
-
else:
|
6882
|
-
# if the fd is not in combined_map, we should unregister it. otherwise, it will be polled every
|
6883
|
-
# time, which may cause 100% cpu usage
|
6884
|
-
log.debug('unexpected read event from fd %r', fd)
|
6885
|
-
try:
|
6886
|
-
self._poller.unregister_readable(fd)
|
6887
|
-
except Exception: # noqa
|
6888
|
-
pass
|
7501
|
+
os.dup2(check_not_none(pipes.child_stdout), 1)
|
6889
7502
|
|
6890
|
-
|
6891
|
-
|
6892
|
-
|
6893
|
-
|
6894
|
-
log.debug('write event caused by %r', dispatcher)
|
6895
|
-
dispatcher.handle_write_event()
|
6896
|
-
if not dispatcher.writable():
|
6897
|
-
self._poller.unregister_writable(fd)
|
6898
|
-
except ExitNow:
|
6899
|
-
raise
|
6900
|
-
except Exception: # noqa
|
6901
|
-
combined_map[fd].handle_error()
|
6902
|
-
else:
|
6903
|
-
log.debug('unexpected write event from fd %r', fd)
|
6904
|
-
try:
|
6905
|
-
self._poller.unregister_writable(fd)
|
6906
|
-
except Exception: # noqa
|
6907
|
-
pass
|
7503
|
+
if self.config.redirect_stderr:
|
7504
|
+
os.dup2(check_not_none(pipes.child_stdout), 2)
|
7505
|
+
else:
|
7506
|
+
os.dup2(check_not_none(pipes.child_stderr), 2)
|
6908
7507
|
|
6909
|
-
for
|
6910
|
-
|
7508
|
+
for i in range(3, self._server_config.minfds):
|
7509
|
+
if i in self._inherited_fds:
|
7510
|
+
continue
|
7511
|
+
close_fd(i)
|
6911
7512
|
|
6912
|
-
def
|
6913
|
-
if
|
6914
|
-
return
|
7513
|
+
def _set_uid(self) -> ta.Optional[str]:
|
7514
|
+
if self.config.uid is None:
|
7515
|
+
return None
|
6915
7516
|
|
6916
|
-
|
6917
|
-
|
6918
|
-
return
|
7517
|
+
msg = drop_privileges(self.config.uid)
|
7518
|
+
return msg
|
6919
7519
|
|
6920
|
-
process = self._context.pid_history.get(pid, None)
|
6921
|
-
if process is None:
|
6922
|
-
_, msg = decode_wait_status(check_not_none(sts))
|
6923
|
-
log.info('reaped unknown pid %s (%s)', pid, msg)
|
6924
|
-
else:
|
6925
|
-
process.finish(check_not_none(sts))
|
6926
|
-
del self._context.pid_history[pid]
|
6927
7520
|
|
6928
|
-
|
6929
|
-
# keep reaping until no more kids to reap, but don't recurse infinitely
|
6930
|
-
self._reap(once=False, depth=depth + 1)
|
7521
|
+
##
|
6931
7522
|
|
6932
|
-
def _tick(self, now: ta.Optional[float] = None) -> None:
|
6933
|
-
"""Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
|
6934
7523
|
|
6935
|
-
|
6936
|
-
|
6937
|
-
|
7524
|
+
def check_execv_args(
|
7525
|
+
exe: str,
|
7526
|
+
argv: ta.Sequence[str],
|
7527
|
+
st: ta.Optional[os.stat_result],
|
7528
|
+
) -> None:
|
7529
|
+
if st is None:
|
7530
|
+
raise NotFoundError(f"Can't find command {exe!r}")
|
6938
7531
|
|
6939
|
-
|
6940
|
-
|
7532
|
+
elif stat.S_ISDIR(st[stat.ST_MODE]):
|
7533
|
+
raise NotExecutableError(f'Command at {exe!r} is a directory')
|
6941
7534
|
|
6942
|
-
|
6943
|
-
|
6944
|
-
# we just started up
|
6945
|
-
last_tick = self._ticks[period] = timeslice(period, now)
|
7535
|
+
elif not (stat.S_IMODE(st[stat.ST_MODE]) & 0o111):
|
7536
|
+
raise NotExecutableError(f'Command at {exe!r} is not executable')
|
6946
7537
|
|
6947
|
-
|
6948
|
-
|
6949
|
-
self._ticks[period] = this_tick
|
6950
|
-
self._event_callbacks.notify(event(this_tick, self))
|
7538
|
+
elif not os.access(exe, os.X_OK):
|
7539
|
+
raise NoPermissionError(f'No permission to run command {exe!r}')
|
6951
7540
|
|
6952
7541
|
|
6953
7542
|
########################################
|
6954
7543
|
# ../inject.py
|
6955
7544
|
|
6956
7545
|
|
6957
|
-
##
|
6958
|
-
|
6959
|
-
|
6960
7546
|
def bind_server(
|
6961
7547
|
config: ServerConfig,
|
6962
7548
|
*,
|
@@ -6966,7 +7552,12 @@ def bind_server(
|
|
6966
7552
|
lst: ta.List[InjectorBindingOrBindings] = [
|
6967
7553
|
inj.bind(config),
|
6968
7554
|
|
6969
|
-
inj.
|
7555
|
+
inj.bind_array_type(DaemonizeListener, DaemonizeListeners),
|
7556
|
+
|
7557
|
+
inj.bind(SupervisorSetupImpl, singleton=True),
|
7558
|
+
inj.bind(SupervisorSetup, to_key=SupervisorSetupImpl),
|
7559
|
+
|
7560
|
+
inj.bind(DaemonizeListener, array=True, to_key=Poller),
|
6970
7561
|
|
6971
7562
|
inj.bind(ServerContextImpl, singleton=True),
|
6972
7563
|
inj.bind(ServerContext, to_key=ServerContextImpl),
|
@@ -6976,11 +7567,18 @@ def bind_server(
|
|
6976
7567
|
inj.bind(SignalReceiver, singleton=True),
|
6977
7568
|
|
6978
7569
|
inj.bind(SignalHandler, singleton=True),
|
6979
|
-
inj.bind(
|
7570
|
+
inj.bind(ProcessGroupManager, singleton=True),
|
6980
7571
|
inj.bind(Supervisor, singleton=True),
|
6981
7572
|
|
6982
|
-
inj.
|
6983
|
-
|
7573
|
+
inj.bind(PidHistory()),
|
7574
|
+
|
7575
|
+
inj.bind_factory(ProcessGroupImpl, ProcessGroupFactory),
|
7576
|
+
inj.bind_factory(ProcessImpl, ProcessFactory),
|
7577
|
+
|
7578
|
+
inj.bind_factory(ProcessSpawningImpl, ProcessSpawningFactory),
|
7579
|
+
|
7580
|
+
inj.bind_factory(OutputDispatcherImpl, OutputDispatcherFactory),
|
7581
|
+
inj.bind_factory(InputDispatcherImpl, InputDispatcherFactory),
|
6984
7582
|
]
|
6985
7583
|
|
6986
7584
|
#
|
@@ -6992,6 +7590,16 @@ def bind_server(
|
|
6992
7590
|
|
6993
7591
|
#
|
6994
7592
|
|
7593
|
+
if config.user is not None:
|
7594
|
+
user = get_user(config.user)
|
7595
|
+
lst.append(inj.bind(user, key=SupervisorUser))
|
7596
|
+
|
7597
|
+
#
|
7598
|
+
|
7599
|
+
lst.append(inj.bind(get_poller_impl(), key=Poller, singleton=True))
|
7600
|
+
|
7601
|
+
#
|
7602
|
+
|
6995
7603
|
return inj.as_bindings(*lst)
|
6996
7604
|
|
6997
7605
|
|