ominfra 0.0.0.dev127__py3-none-any.whl → 0.0.0.dev128__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (36) hide show
  1. ominfra/scripts/supervisor.py +723 -731
  2. ominfra/supervisor/configs.py +34 -11
  3. ominfra/supervisor/context.py +5 -9
  4. ominfra/supervisor/dispatchers.py +4 -3
  5. ominfra/supervisor/dispatchersimpl.py +10 -9
  6. ominfra/supervisor/groups.py +1 -1
  7. ominfra/supervisor/inject.py +5 -5
  8. ominfra/supervisor/main.py +2 -2
  9. ominfra/supervisor/pipes.py +15 -13
  10. ominfra/supervisor/poller.py +36 -35
  11. ominfra/supervisor/{processes.py → process.py} +2 -1
  12. ominfra/supervisor/{processesimpl.py → processimpl.py} +35 -40
  13. ominfra/supervisor/setup.py +1 -1
  14. ominfra/supervisor/setupimpl.py +4 -3
  15. ominfra/supervisor/spawning.py +2 -1
  16. ominfra/supervisor/spawningimpl.py +15 -12
  17. ominfra/supervisor/supervisor.py +16 -8
  18. ominfra/supervisor/types.py +7 -9
  19. ominfra/supervisor/utils/__init__.py +0 -0
  20. ominfra/supervisor/utils/diag.py +31 -0
  21. ominfra/supervisor/utils/fds.py +46 -0
  22. ominfra/supervisor/utils/fs.py +47 -0
  23. ominfra/supervisor/utils/os.py +45 -0
  24. ominfra/supervisor/utils/ostypes.py +9 -0
  25. ominfra/supervisor/utils/strings.py +105 -0
  26. ominfra/supervisor/{users.py → utils/users.py} +11 -8
  27. {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev128.dist-info}/METADATA +3 -3
  28. {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev128.dist-info}/RECORD +34 -29
  29. ominfra/supervisor/datatypes.py +0 -113
  30. ominfra/supervisor/utils.py +0 -206
  31. /ominfra/supervisor/{collections.py → utils/collections.py} +0 -0
  32. /ominfra/supervisor/{signals.py → utils/signals.py} +0 -0
  33. {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev128.dist-info}/LICENSE +0 -0
  34. {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev128.dist-info}/WHEEL +0 -0
  35. {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev128.dist-info}/entry_points.txt +0 -0
  36. {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev128.dist-info}/top_level.txt +0 -0
@@ -95,7 +95,7 @@ TomlParseFloat = ta.Callable[[str], ta.Any]
95
95
  TomlKey = ta.Tuple[str, ...]
96
96
  TomlPos = int # ta.TypeAlias
97
97
 
98
- # ../collections.py
98
+ # ../utils/collections.py
99
99
  K = ta.TypeVar('K')
100
100
  V = ta.TypeVar('V')
101
101
 
@@ -952,168 +952,6 @@ def toml_make_safe_parse_float(parse_float: TomlParseFloat) -> TomlParseFloat:
952
952
  return safe_parse_float
953
953
 
954
954
 
955
- ########################################
956
- # ../collections.py
957
-
958
-
959
- class KeyedCollectionAccessors(abc.ABC, ta.Generic[K, V]):
960
- @property
961
- @abc.abstractmethod
962
- def _by_key(self) -> ta.Mapping[K, V]:
963
- raise NotImplementedError
964
-
965
- def __iter__(self) -> ta.Iterator[V]:
966
- return iter(self._by_key.values())
967
-
968
- def __len__(self) -> int:
969
- return len(self._by_key)
970
-
971
- def __contains__(self, key: K) -> bool:
972
- return key in self._by_key
973
-
974
- def __getitem__(self, key: K) -> V:
975
- return self._by_key[key]
976
-
977
- def get(self, key: K, default: ta.Optional[V] = None) -> ta.Optional[V]:
978
- return self._by_key.get(key, default)
979
-
980
- def items(self) -> ta.Iterator[ta.Tuple[K, V]]:
981
- return iter(self._by_key.items())
982
-
983
-
984
- class KeyedCollection(KeyedCollectionAccessors[K, V]):
985
- def __init__(self, items: ta.Iterable[V]) -> None:
986
- super().__init__()
987
-
988
- by_key: ta.Dict[K, V] = {}
989
- for v in items:
990
- if (k := self._key(v)) in by_key:
991
- raise KeyError(f'key {k} of {v} already registered by {by_key[k]}')
992
- by_key[k] = v
993
- self.__by_key = by_key
994
-
995
- @property
996
- def _by_key(self) -> ta.Mapping[K, V]:
997
- return self.__by_key
998
-
999
- @abc.abstractmethod
1000
- def _key(self, v: V) -> K:
1001
- raise NotImplementedError
1002
-
1003
-
1004
- ########################################
1005
- # ../datatypes.py
1006
-
1007
-
1008
- class Automatic:
1009
- pass
1010
-
1011
-
1012
- class Syslog:
1013
- """TODO deprecated; remove this special 'syslog' filename in the future"""
1014
-
1015
-
1016
- LOGFILE_NONES = ('none', 'off', None)
1017
- LOGFILE_AUTOS = (Automatic, 'auto')
1018
- LOGFILE_SYSLOGS = (Syslog, 'syslog')
1019
-
1020
-
1021
- def logfile_name(val):
1022
- if hasattr(val, 'lower'):
1023
- coerced = val.lower()
1024
- else:
1025
- coerced = val
1026
-
1027
- if coerced in LOGFILE_NONES:
1028
- return None
1029
- elif coerced in LOGFILE_AUTOS:
1030
- return Automatic
1031
- elif coerced in LOGFILE_SYSLOGS:
1032
- return Syslog
1033
- else:
1034
- return existing_dirpath(val)
1035
-
1036
-
1037
- ##
1038
-
1039
-
1040
- def octal_type(arg: ta.Union[str, int]) -> int:
1041
- if isinstance(arg, int):
1042
- return arg
1043
- try:
1044
- return int(arg, 8)
1045
- except (TypeError, ValueError):
1046
- raise ValueError(f'{arg} can not be converted to an octal type') # noqa
1047
-
1048
-
1049
- def existing_directory(v: str) -> str:
1050
- nv = os.path.expanduser(v)
1051
- if os.path.isdir(nv):
1052
- return nv
1053
- raise ValueError(f'{v} is not an existing directory')
1054
-
1055
-
1056
- def existing_dirpath(v: str) -> str:
1057
- nv = os.path.expanduser(v)
1058
- dir = os.path.dirname(nv) # noqa
1059
- if not dir:
1060
- # relative pathname with no directory component
1061
- return nv
1062
- if os.path.isdir(dir):
1063
- return nv
1064
- raise ValueError(f'The directory named as part of the path {v} does not exist')
1065
-
1066
-
1067
- def logging_level(value: ta.Union[str, int]) -> int:
1068
- if isinstance(value, int):
1069
- return value
1070
- s = str(value).lower()
1071
- level = logging.getLevelNamesMapping().get(s.upper())
1072
- if level is None:
1073
- raise ValueError(f'bad logging level name {value!r}')
1074
- return level
1075
-
1076
-
1077
- class SuffixMultiplier:
1078
- # d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches
1079
- # are case insensitive. Return values are in the fundamental unit.
1080
- def __init__(self, d, default=1):
1081
- super().__init__()
1082
- self._d = d
1083
- self._default = default
1084
- # all keys must be the same size
1085
- self._keysz = None
1086
- for k in d:
1087
- if self._keysz is None:
1088
- self._keysz = len(k)
1089
- elif self._keysz != len(k): # type: ignore
1090
- raise ValueError(k)
1091
-
1092
- def __call__(self, v: ta.Union[str, int]) -> int:
1093
- if isinstance(v, int):
1094
- return v
1095
- v = v.lower()
1096
- for s, m in self._d.items():
1097
- if v[-self._keysz:] == s: # type: ignore
1098
- return int(v[:-self._keysz]) * m # type: ignore
1099
- return int(v) * self._default
1100
-
1101
-
1102
- byte_size = SuffixMultiplier({
1103
- 'kb': 1024,
1104
- 'mb': 1024 * 1024,
1105
- 'gb': 1024 * 1024 * 1024,
1106
- })
1107
-
1108
-
1109
- class RestartWhenExitUnexpected:
1110
- pass
1111
-
1112
-
1113
- class RestartUnconditionally:
1114
- pass
1115
-
1116
-
1117
955
  ########################################
1118
956
  # ../exceptions.py
1119
957
 
@@ -1209,7 +1047,206 @@ def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
1209
1047
 
1210
1048
 
1211
1049
  ########################################
1212
- # ../signals.py
1050
+ # ../states.py
1051
+
1052
+
1053
+ ##
1054
+
1055
+
1056
+ class ProcessState(enum.IntEnum):
1057
+ STOPPED = 0
1058
+ STARTING = 10
1059
+ RUNNING = 20
1060
+ BACKOFF = 30
1061
+ STOPPING = 40
1062
+ EXITED = 100
1063
+ FATAL = 200
1064
+ UNKNOWN = 1000
1065
+
1066
+ @property
1067
+ def stopped(self) -> bool:
1068
+ return self in STOPPED_STATES
1069
+
1070
+ @property
1071
+ def running(self) -> bool:
1072
+ return self in RUNNING_STATES
1073
+
1074
+ @property
1075
+ def signalable(self) -> bool:
1076
+ return self in SIGNALABLE_STATES
1077
+
1078
+
1079
+ STOPPED_STATES = (
1080
+ ProcessState.STOPPED,
1081
+ ProcessState.EXITED,
1082
+ ProcessState.FATAL,
1083
+ ProcessState.UNKNOWN,
1084
+ )
1085
+
1086
+ RUNNING_STATES = (
1087
+ ProcessState.RUNNING,
1088
+ ProcessState.BACKOFF,
1089
+ ProcessState.STARTING,
1090
+ )
1091
+
1092
+ SIGNALABLE_STATES = (
1093
+ ProcessState.RUNNING,
1094
+ ProcessState.STARTING,
1095
+ ProcessState.STOPPING,
1096
+ )
1097
+
1098
+
1099
+ ##
1100
+
1101
+
1102
+ class SupervisorState(enum.IntEnum):
1103
+ FATAL = 2
1104
+ RUNNING = 1
1105
+ RESTARTING = 0
1106
+ SHUTDOWN = -1
1107
+
1108
+
1109
+ ########################################
1110
+ # ../utils/collections.py
1111
+
1112
+
1113
+ class KeyedCollectionAccessors(abc.ABC, ta.Generic[K, V]):
1114
+ @property
1115
+ @abc.abstractmethod
1116
+ def _by_key(self) -> ta.Mapping[K, V]:
1117
+ raise NotImplementedError
1118
+
1119
+ def __iter__(self) -> ta.Iterator[V]:
1120
+ return iter(self._by_key.values())
1121
+
1122
+ def __len__(self) -> int:
1123
+ return len(self._by_key)
1124
+
1125
+ def __contains__(self, key: K) -> bool:
1126
+ return key in self._by_key
1127
+
1128
+ def __getitem__(self, key: K) -> V:
1129
+ return self._by_key[key]
1130
+
1131
+ def get(self, key: K, default: ta.Optional[V] = None) -> ta.Optional[V]:
1132
+ return self._by_key.get(key, default)
1133
+
1134
+ def items(self) -> ta.Iterator[ta.Tuple[K, V]]:
1135
+ return iter(self._by_key.items())
1136
+
1137
+
1138
+ class KeyedCollection(KeyedCollectionAccessors[K, V]):
1139
+ def __init__(self, items: ta.Iterable[V]) -> None:
1140
+ super().__init__()
1141
+
1142
+ by_key: ta.Dict[K, V] = {}
1143
+ for v in items:
1144
+ if (k := self._key(v)) in by_key:
1145
+ raise KeyError(f'key {k} of {v} already registered by {by_key[k]}')
1146
+ by_key[k] = v
1147
+ self.__by_key = by_key
1148
+
1149
+ @property
1150
+ def _by_key(self) -> ta.Mapping[K, V]:
1151
+ return self.__by_key
1152
+
1153
+ @abc.abstractmethod
1154
+ def _key(self, v: V) -> K:
1155
+ raise NotImplementedError
1156
+
1157
+
1158
+ ########################################
1159
+ # ../utils/diag.py
1160
+
1161
+
1162
+ def compact_traceback() -> ta.Tuple[
1163
+ ta.Tuple[str, str, int],
1164
+ ta.Type[BaseException],
1165
+ BaseException,
1166
+ types.TracebackType,
1167
+ ]:
1168
+ t, v, tb = sys.exc_info()
1169
+ if not tb:
1170
+ raise RuntimeError('No traceback')
1171
+
1172
+ tbinfo = []
1173
+ while tb:
1174
+ tbinfo.append((
1175
+ tb.tb_frame.f_code.co_filename,
1176
+ tb.tb_frame.f_code.co_name,
1177
+ str(tb.tb_lineno),
1178
+ ))
1179
+ tb = tb.tb_next
1180
+
1181
+ # just to be safe
1182
+ del tb
1183
+
1184
+ file, function, line = tbinfo[-1]
1185
+ info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) # noqa
1186
+ return (file, function, line), t, v, info # type: ignore
1187
+
1188
+
1189
+ ########################################
1190
+ # ../utils/fs.py
1191
+
1192
+
1193
+ def try_unlink(path: str) -> bool:
1194
+ try:
1195
+ os.unlink(path)
1196
+ except OSError:
1197
+ return False
1198
+ return True
1199
+
1200
+
1201
+ def mktempfile(suffix: str, prefix: str, dir: str) -> str: # noqa
1202
+ fd, filename = tempfile.mkstemp(suffix, prefix, dir)
1203
+ os.close(fd)
1204
+ return filename
1205
+
1206
+
1207
+ def get_path() -> ta.Sequence[str]:
1208
+ """Return a list corresponding to $PATH, or a default."""
1209
+
1210
+ path = ['/bin', '/usr/bin', '/usr/local/bin']
1211
+ if 'PATH' in os.environ:
1212
+ p = os.environ['PATH']
1213
+ if p:
1214
+ path = p.split(os.pathsep)
1215
+ return path
1216
+
1217
+
1218
+ def check_existing_dir(v: str) -> str:
1219
+ nv = os.path.expanduser(v)
1220
+ if os.path.isdir(nv):
1221
+ return nv
1222
+ raise ValueError(f'{v} is not an existing directory')
1223
+
1224
+
1225
+ def check_path_with_existing_dir(v: str) -> str:
1226
+ nv = os.path.expanduser(v)
1227
+ dir = os.path.dirname(nv) # noqa
1228
+ if not dir:
1229
+ # relative pathname with no directory component
1230
+ return nv
1231
+ if os.path.isdir(dir):
1232
+ return nv
1233
+ raise ValueError(f'The directory named as part of the path {v} does not exist')
1234
+
1235
+
1236
+ ########################################
1237
+ # ../utils/ostypes.py
1238
+
1239
+
1240
+ Fd = ta.NewType('Fd', int)
1241
+ Pid = ta.NewType('Pid', int)
1242
+ Rc = ta.NewType('Rc', int)
1243
+
1244
+ Uid = ta.NewType('Uid', int)
1245
+ Gid = ta.NewType('Gid', int)
1246
+
1247
+
1248
+ ########################################
1249
+ # ../utils/signals.py
1213
1250
 
1214
1251
 
1215
1252
  ##
@@ -1270,127 +1307,110 @@ class SignalReceiver:
1270
1307
 
1271
1308
 
1272
1309
  ########################################
1273
- # ../states.py
1310
+ # ../utils/strings.py
1274
1311
 
1275
1312
 
1276
1313
  ##
1277
1314
 
1278
1315
 
1279
- class ProcessState(enum.IntEnum):
1280
- STOPPED = 0
1281
- STARTING = 10
1282
- RUNNING = 20
1283
- BACKOFF = 30
1284
- STOPPING = 40
1285
- EXITED = 100
1286
- FATAL = 200
1287
- UNKNOWN = 1000
1288
-
1289
- @property
1290
- def stopped(self) -> bool:
1291
- return self in STOPPED_STATES
1292
-
1293
- @property
1294
- def running(self) -> bool:
1295
- return self in RUNNING_STATES
1296
-
1297
- @property
1298
- def signalable(self) -> bool:
1299
- return self in SIGNALABLE_STATES
1300
-
1301
-
1302
- STOPPED_STATES = (
1303
- ProcessState.STOPPED,
1304
- ProcessState.EXITED,
1305
- ProcessState.FATAL,
1306
- ProcessState.UNKNOWN,
1307
- )
1308
-
1309
- RUNNING_STATES = (
1310
- ProcessState.RUNNING,
1311
- ProcessState.BACKOFF,
1312
- ProcessState.STARTING,
1313
- )
1314
-
1315
- SIGNALABLE_STATES = (
1316
- ProcessState.RUNNING,
1317
- ProcessState.STARTING,
1318
- ProcessState.STOPPING,
1319
- )
1320
-
1321
-
1322
- ##
1316
+ def as_bytes(s: ta.Union[str, bytes], encoding: str = 'utf8') -> bytes:
1317
+ if isinstance(s, bytes):
1318
+ return s
1319
+ else:
1320
+ return s.encode(encoding)
1323
1321
 
1324
1322
 
1325
- class SupervisorState(enum.IntEnum):
1326
- FATAL = 2
1327
- RUNNING = 1
1328
- RESTARTING = 0
1329
- SHUTDOWN = -1
1323
+ @ta.overload
1324
+ def find_prefix_at_end(haystack: str, needle: str) -> int:
1325
+ ...
1330
1326
 
1331
1327
 
1332
- ########################################
1333
- # ../users.py
1328
+ @ta.overload
1329
+ def find_prefix_at_end(haystack: bytes, needle: bytes) -> int:
1330
+ ...
1334
1331
 
1335
1332
 
1336
- ##
1333
+ def find_prefix_at_end(haystack, needle):
1334
+ l = len(needle) - 1
1335
+ while l and not haystack.endswith(needle[:l]):
1336
+ l -= 1
1337
+ return l
1337
1338
 
1338
1339
 
1339
- def name_to_uid(name: str) -> int:
1340
- try:
1341
- uid = int(name)
1342
- except ValueError:
1343
- try:
1344
- pwdrec = pwd.getpwnam(name)
1345
- except KeyError:
1346
- raise ValueError(f'Invalid user name {name}') # noqa
1347
- uid = pwdrec[2]
1348
- else:
1349
- try:
1350
- pwd.getpwuid(uid) # check if uid is valid
1351
- except KeyError:
1352
- raise ValueError(f'Invalid user id {name}') # noqa
1353
- return uid
1340
+ ##
1354
1341
 
1355
1342
 
1356
- def name_to_gid(name: str) -> int:
1357
- try:
1358
- gid = int(name)
1359
- except ValueError:
1360
- try:
1361
- grprec = grp.getgrnam(name)
1362
- except KeyError:
1363
- raise ValueError(f'Invalid group name {name}') # noqa
1364
- gid = grprec[2]
1365
- else:
1366
- try:
1367
- grp.getgrgid(gid) # check if gid is valid
1368
- except KeyError:
1369
- raise ValueError(f'Invalid group id {name}') # noqa
1370
- return gid
1343
+ ANSI_ESCAPE_BEGIN = b'\x1b['
1344
+ ANSI_TERMINATORS = (b'H', b'f', b'A', b'B', b'C', b'D', b'R', b's', b'u', b'J', b'K', b'h', b'l', b'p', b'm')
1371
1345
 
1372
1346
 
1373
- def gid_for_uid(uid: int) -> int:
1374
- pwrec = pwd.getpwuid(uid)
1375
- return pwrec[3]
1347
+ def strip_escapes(s: bytes) -> bytes:
1348
+ """Remove all ANSI color escapes from the given string."""
1349
+
1350
+ result = b''
1351
+ show = 1
1352
+ i = 0
1353
+ l = len(s)
1354
+ while i < l:
1355
+ if show == 0 and s[i:i + 1] in ANSI_TERMINATORS:
1356
+ show = 1
1357
+ elif show:
1358
+ n = s.find(ANSI_ESCAPE_BEGIN, i)
1359
+ if n == -1:
1360
+ return result + s[i:]
1361
+ else:
1362
+ result = result + s[i:n]
1363
+ i = n
1364
+ show = 0
1365
+ i += 1
1366
+ return result
1376
1367
 
1377
1368
 
1378
1369
  ##
1379
1370
 
1380
1371
 
1381
- @dc.dataclass(frozen=True)
1382
- class User:
1383
- name: str
1384
- uid: int
1385
- gid: int
1372
+ class SuffixMultiplier:
1373
+ # d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches
1374
+ # are case insensitive. Return values are in the fundamental unit.
1375
+ def __init__(self, d, default=1):
1376
+ super().__init__()
1377
+ self._d = d
1378
+ self._default = default
1379
+ # all keys must be the same size
1380
+ self._keysz = None
1381
+ for k in d:
1382
+ if self._keysz is None:
1383
+ self._keysz = len(k)
1384
+ elif self._keysz != len(k): # type: ignore
1385
+ raise ValueError(k)
1386
1386
 
1387
+ def __call__(self, v: ta.Union[str, int]) -> int:
1388
+ if isinstance(v, int):
1389
+ return v
1390
+ v = v.lower()
1391
+ for s, m in self._d.items():
1392
+ if v[-self._keysz:] == s: # type: ignore
1393
+ return int(v[:-self._keysz]) * m # type: ignore
1394
+ return int(v) * self._default
1387
1395
 
1388
- def get_user(name: str) -> User:
1389
- return User(
1390
- name=name,
1391
- uid=(uid := name_to_uid(name)),
1392
- gid=gid_for_uid(uid),
1393
- )
1396
+
1397
+ parse_bytes_size = SuffixMultiplier({
1398
+ 'kb': 1024,
1399
+ 'mb': 1024 * 1024,
1400
+ 'gb': 1024 * 1024 * 1024,
1401
+ })
1402
+
1403
+
1404
+ #
1405
+
1406
+
1407
+ def parse_octal(arg: ta.Union[str, int]) -> int:
1408
+ if isinstance(arg, int):
1409
+ return arg
1410
+ try:
1411
+ return int(arg, 8)
1412
+ except (TypeError, ValueError):
1413
+ raise ValueError(f'{arg} can not be converted to an octal type') # noqa
1394
1414
 
1395
1415
 
1396
1416
  ########################################
@@ -2032,112 +2052,64 @@ def get_event_name_by_type(requested):
2032
2052
 
2033
2053
 
2034
2054
  ########################################
2035
- # ../setup.py
2036
-
2037
-
2038
- ##
2039
-
2040
-
2041
- SupervisorUser = ta.NewType('SupervisorUser', User)
2055
+ # ../utils/fds.py
2042
2056
 
2043
2057
 
2044
- ##
2058
+ class PipeFds(ta.NamedTuple):
2059
+ r: Fd
2060
+ w: Fd
2045
2061
 
2046
2062
 
2047
- class DaemonizeListener(abc.ABC): # noqa
2048
- def before_daemonize(self) -> None: # noqa
2049
- pass
2063
+ def make_pipe() -> PipeFds:
2064
+ return PipeFds(*os.pipe()) # type: ignore
2050
2065
 
2051
- def after_daemonize(self) -> None: # noqa
2052
- pass
2053
2066
 
2067
+ def read_fd(fd: Fd) -> bytes:
2068
+ try:
2069
+ data = os.read(fd, 2 << 16) # 128K
2070
+ except OSError as why:
2071
+ if why.args[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR):
2072
+ raise
2073
+ data = b''
2074
+ return data
2054
2075
 
2055
- DaemonizeListeners = ta.NewType('DaemonizeListeners', ta.Sequence[DaemonizeListener])
2056
2076
 
2077
+ def close_fd(fd: Fd) -> bool:
2078
+ try:
2079
+ os.close(fd)
2080
+ except OSError:
2081
+ return False
2082
+ return True
2057
2083
 
2058
- ##
2059
2084
 
2085
+ def is_fd_open(fd: Fd) -> bool:
2086
+ try:
2087
+ n = os.dup(fd)
2088
+ except OSError:
2089
+ return False
2090
+ os.close(n)
2091
+ return True
2060
2092
 
2061
- class SupervisorSetup(abc.ABC):
2062
- @abc.abstractmethod
2063
- def setup(self) -> None:
2064
- raise NotImplementedError
2065
2093
 
2066
- @abc.abstractmethod
2067
- def cleanup(self) -> None:
2068
- raise NotImplementedError
2094
+ def get_open_fds(limit: int) -> ta.FrozenSet[Fd]:
2095
+ return frozenset(fd for i in range(limit) if is_fd_open(fd := Fd(i)))
2069
2096
 
2070
2097
 
2071
2098
  ########################################
2072
- # ../utils.py
2073
-
2074
-
2075
- ##
2076
-
2077
-
2078
- def as_bytes(s: ta.Union[str, bytes], encoding: str = 'utf8') -> bytes:
2079
- if isinstance(s, bytes):
2080
- return s
2081
- else:
2082
- return s.encode(encoding)
2083
-
2084
-
2085
- def as_string(s: ta.Union[str, bytes], encoding: str = 'utf8') -> str:
2086
- if isinstance(s, str):
2087
- return s
2088
- else:
2089
- return s.decode(encoding)
2090
-
2091
-
2092
- def find_prefix_at_end(haystack: bytes, needle: bytes) -> int:
2093
- l = len(needle) - 1
2094
- while l and not haystack.endswith(needle[:l]):
2095
- l -= 1
2096
- return l
2099
+ # ../utils/os.py
2097
2100
 
2098
2101
 
2099
2102
  ##
2100
2103
 
2101
2104
 
2102
- def compact_traceback() -> ta.Tuple[
2103
- ta.Tuple[str, str, int],
2104
- ta.Type[BaseException],
2105
- BaseException,
2106
- types.TracebackType,
2107
- ]:
2108
- t, v, tb = sys.exc_info()
2109
- if not tb:
2110
- raise RuntimeError('No traceback')
2111
-
2112
- tbinfo = []
2113
- while tb:
2114
- tbinfo.append((
2115
- tb.tb_frame.f_code.co_filename,
2116
- tb.tb_frame.f_code.co_name,
2117
- str(tb.tb_lineno),
2118
- ))
2119
- tb = tb.tb_next
2120
-
2121
- # just to be safe
2122
- del tb
2123
-
2124
- file, function, line = tbinfo[-1]
2125
- info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) # noqa
2126
- return (file, function, line), t, v, info # type: ignore
2127
-
2128
-
2129
- class ExitNow(Exception): # noqa
2130
- pass
2131
-
2132
-
2133
- def real_exit(code: int) -> None:
2105
+ def real_exit(code: Rc) -> None:
2134
2106
  os._exit(code) # noqa
2135
2107
 
2136
2108
 
2137
2109
  ##
2138
2110
 
2139
2111
 
2140
- def decode_wait_status(sts: int) -> ta.Tuple[int, str]:
2112
+ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
2141
2113
  """
2142
2114
  Decode the status returned by wait() or waitpid().
2143
2115
 
@@ -2148,7 +2120,8 @@ def decode_wait_status(sts: int) -> ta.Tuple[int, str]:
2148
2120
  if os.WIFEXITED(sts):
2149
2121
  es = os.WEXITSTATUS(sts) & 0xffff
2150
2122
  msg = f'exit status {es}'
2151
- return es, msg
2123
+ return Rc(es), msg
2124
+
2152
2125
  elif os.WIFSIGNALED(sts):
2153
2126
  sig = os.WTERMSIG(sts)
2154
2127
  msg = f'terminated by {sig_name(sig)}'
@@ -2156,114 +2129,77 @@ def decode_wait_status(sts: int) -> ta.Tuple[int, str]:
2156
2129
  iscore = os.WCOREDUMP(sts)
2157
2130
  else:
2158
2131
  iscore = bool(sts & 0x80)
2159
- if iscore:
2160
- msg += ' (core dumped)'
2161
- return -1, msg
2162
- else:
2163
- msg = 'unknown termination cause 0x%04x' % sts # noqa
2164
- return -1, msg
2165
-
2166
-
2167
- ##
2168
-
2169
-
2170
- def read_fd(fd: int) -> bytes:
2171
- try:
2172
- data = os.read(fd, 2 << 16) # 128K
2173
- except OSError as why:
2174
- if why.args[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR):
2175
- raise
2176
- data = b''
2177
- return data
2178
-
2179
-
2180
- def try_unlink(path: str) -> bool:
2181
- try:
2182
- os.unlink(path)
2183
- except OSError:
2184
- return False
2185
- return True
2186
-
2187
-
2188
- def close_fd(fd: int) -> bool:
2189
- try:
2190
- os.close(fd)
2191
- except OSError:
2192
- return False
2193
- return True
2194
-
2195
-
2196
- def is_fd_open(fd: int) -> bool:
2197
- try:
2198
- n = os.dup(fd)
2199
- except OSError:
2200
- return False
2201
- os.close(n)
2202
- return True
2203
-
2204
-
2205
- def get_open_fds(limit: int) -> ta.FrozenSet[int]:
2206
- return frozenset(filter(is_fd_open, range(limit)))
2207
-
2208
-
2209
- def mktempfile(suffix: str, prefix: str, dir: str) -> str: # noqa
2210
- fd, filename = tempfile.mkstemp(suffix, prefix, dir)
2211
- os.close(fd)
2212
- return filename
2213
-
2132
+ if iscore:
2133
+ msg += ' (core dumped)'
2134
+ return Rc(-1), msg
2214
2135
 
2215
- ##
2136
+ else:
2137
+ msg = 'unknown termination cause 0x%04x' % sts # noqa
2138
+ return Rc(-1), msg
2216
2139
 
2217
2140
 
2218
- def get_path() -> ta.Sequence[str]:
2219
- """Return a list corresponding to $PATH, or a default."""
2141
+ ########################################
2142
+ # ../utils/users.py
2220
2143
 
2221
- path = ['/bin', '/usr/bin', '/usr/local/bin']
2222
- if 'PATH' in os.environ:
2223
- p = os.environ['PATH']
2224
- if p:
2225
- path = p.split(os.pathsep)
2226
- return path
2227
2144
 
2145
+ ##
2228
2146
 
2229
- def normalize_path(v: str) -> str:
2230
- return os.path.normpath(os.path.abspath(os.path.expanduser(v)))
2231
2147
 
2148
+ def name_to_uid(name: str) -> Uid:
2149
+ try:
2150
+ uid = int(name)
2151
+ except ValueError:
2152
+ try:
2153
+ pwdrec = pwd.getpwnam(name)
2154
+ except KeyError:
2155
+ raise ValueError(f'Invalid user name {name}') # noqa
2156
+ uid = pwdrec[2]
2157
+ else:
2158
+ try:
2159
+ pwd.getpwuid(uid) # check if uid is valid
2160
+ except KeyError:
2161
+ raise ValueError(f'Invalid user id {name}') # noqa
2162
+ return Uid(uid)
2232
2163
 
2233
- ##
2234
2164
 
2165
+ def name_to_gid(name: str) -> Gid:
2166
+ try:
2167
+ gid = int(name)
2168
+ except ValueError:
2169
+ try:
2170
+ grprec = grp.getgrnam(name)
2171
+ except KeyError:
2172
+ raise ValueError(f'Invalid group name {name}') # noqa
2173
+ gid = grprec[2]
2174
+ else:
2175
+ try:
2176
+ grp.getgrgid(gid) # check if gid is valid
2177
+ except KeyError:
2178
+ raise ValueError(f'Invalid group id {name}') # noqa
2179
+ return Gid(gid)
2235
2180
 
2236
- ANSI_ESCAPE_BEGIN = b'\x1b['
2237
- ANSI_TERMINATORS = (b'H', b'f', b'A', b'B', b'C', b'D', b'R', b's', b'u', b'J', b'K', b'h', b'l', b'p', b'm')
2238
2181
 
2182
+ def gid_for_uid(uid: Uid) -> Gid:
2183
+ pwrec = pwd.getpwuid(uid)
2184
+ return Gid(pwrec[3])
2239
2185
 
2240
- def strip_escapes(s: bytes) -> bytes:
2241
- """Remove all ANSI color escapes from the given string."""
2242
2186
 
2243
- result = b''
2244
- show = 1
2245
- i = 0
2246
- l = len(s)
2247
- while i < l:
2248
- if show == 0 and s[i:i + 1] in ANSI_TERMINATORS:
2249
- show = 1
2250
- elif show:
2251
- n = s.find(ANSI_ESCAPE_BEGIN, i)
2252
- if n == -1:
2253
- return result + s[i:]
2254
- else:
2255
- result = result + s[i:n]
2256
- i = n
2257
- show = 0
2258
- i += 1
2259
- return result
2187
+ ##
2260
2188
 
2261
2189
 
2262
- ##
2190
+ @dc.dataclass(frozen=True)
2191
+ class User:
2192
+ name: str
2193
+ uid: Uid
2194
+ gid: Gid
2263
2195
 
2264
2196
 
2265
- def timeslice(period: int, when: float) -> int:
2266
- return int(when - (when % period))
2197
+ def get_user(name: str) -> User:
2198
+ return User(
2199
+ name=name,
2200
+ uid=(uid := name_to_uid(name)),
2201
+ gid=gid_for_uid(uid),
2202
+ )
2267
2203
 
2268
2204
 
2269
2205
  ########################################
@@ -4288,19 +4224,19 @@ def build_config_named_children(
4288
4224
 
4289
4225
  @dc.dataclass(frozen=True)
4290
4226
  class ProcessPipes:
4291
- child_stdin: ta.Optional[int] = None
4292
- stdin: ta.Optional[int] = None
4227
+ child_stdin: ta.Optional[Fd] = None
4228
+ stdin: ta.Optional[Fd] = None
4293
4229
 
4294
- stdout: ta.Optional[int] = None
4295
- child_stdout: ta.Optional[int] = None
4230
+ stdout: ta.Optional[Fd] = None
4231
+ child_stdout: ta.Optional[Fd] = None
4296
4232
 
4297
- stderr: ta.Optional[int] = None
4298
- child_stderr: ta.Optional[int] = None
4233
+ stderr: ta.Optional[Fd] = None
4234
+ child_stderr: ta.Optional[Fd] = None
4299
4235
 
4300
- def child_fds(self) -> ta.List[int]:
4236
+ def child_fds(self) -> ta.List[Fd]:
4301
4237
  return [fd for fd in [self.child_stdin, self.child_stdout, self.child_stderr] if fd is not None]
4302
4238
 
4303
- def parent_fds(self) -> ta.List[int]:
4239
+ def parent_fds(self) -> ta.List[Fd]:
4304
4240
  return [fd for fd in [self.stdin, self.stdout, self.stderr] if fd is not None]
4305
4241
 
4306
4242
 
@@ -4310,56 +4246,268 @@ def make_process_pipes(stderr=True) -> ProcessPipes:
4310
4246
  read them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
4311
4247
  """
4312
4248
 
4313
- pipes: ta.Dict[str, ta.Optional[int]] = {
4314
- 'child_stdin': None,
4315
- 'stdin': None,
4249
+ pipes: ta.Dict[str, ta.Optional[Fd]] = {
4250
+ 'child_stdin': None,
4251
+ 'stdin': None,
4252
+
4253
+ 'stdout': None,
4254
+ 'child_stdout': None,
4255
+
4256
+ 'stderr': None,
4257
+ 'child_stderr': None,
4258
+ }
4259
+
4260
+ try:
4261
+ pipes['child_stdin'], pipes['stdin'] = make_pipe()
4262
+ pipes['stdout'], pipes['child_stdout'] = make_pipe()
4263
+
4264
+ if stderr:
4265
+ pipes['stderr'], pipes['child_stderr'] = make_pipe()
4266
+
4267
+ for fd in (
4268
+ pipes['stdout'],
4269
+ pipes['stderr'],
4270
+ pipes['stdin'],
4271
+ ):
4272
+ if fd is not None:
4273
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY
4274
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
4275
+
4276
+ return ProcessPipes(**pipes)
4277
+
4278
+ except OSError:
4279
+ for fd in pipes.values():
4280
+ if fd is not None:
4281
+ close_fd(fd)
4282
+
4283
+ raise
4284
+
4285
+
4286
+ def close_pipes(pipes: ProcessPipes) -> None:
4287
+ close_parent_pipes(pipes)
4288
+ close_child_pipes(pipes)
4289
+
4290
+
4291
+ def close_parent_pipes(pipes: ProcessPipes) -> None:
4292
+ for fd in pipes.parent_fds():
4293
+ close_fd(fd)
4294
+
4295
+
4296
+ def close_child_pipes(pipes: ProcessPipes) -> None:
4297
+ for fd in pipes.child_fds():
4298
+ close_fd(fd)
4299
+
4300
+
4301
+ ########################################
4302
+ # ../setup.py
4303
+
4304
+
4305
+ ##
4306
+
4307
+
4308
+ SupervisorUser = ta.NewType('SupervisorUser', User)
4309
+
4310
+
4311
+ ##
4312
+
4313
+
4314
+ class DaemonizeListener(abc.ABC): # noqa
4315
+ def before_daemonize(self) -> None: # noqa
4316
+ pass
4317
+
4318
+ def after_daemonize(self) -> None: # noqa
4319
+ pass
4320
+
4321
+
4322
+ DaemonizeListeners = ta.NewType('DaemonizeListeners', ta.Sequence[DaemonizeListener])
4323
+
4324
+
4325
+ ##
4326
+
4327
+
4328
+ class SupervisorSetup(abc.ABC):
4329
+ @abc.abstractmethod
4330
+ def setup(self) -> None:
4331
+ raise NotImplementedError
4332
+
4333
+ @abc.abstractmethod
4334
+ def cleanup(self) -> None:
4335
+ raise NotImplementedError
4336
+
4337
+
4338
+ ########################################
4339
+ # ../../../omlish/lite/http/handlers.py
4340
+
4341
+
4342
+ @dc.dataclass(frozen=True)
4343
+ class HttpHandlerRequest:
4344
+ client_address: SocketAddress
4345
+ method: str
4346
+ path: str
4347
+ headers: HttpHeaders
4348
+ data: ta.Optional[bytes]
4349
+
4350
+
4351
+ @dc.dataclass(frozen=True)
4352
+ class HttpHandlerResponse:
4353
+ status: ta.Union[http.HTTPStatus, int]
4354
+
4355
+ headers: ta.Optional[ta.Mapping[str, str]] = None
4356
+ data: ta.Optional[bytes] = None
4357
+ close_connection: ta.Optional[bool] = None
4358
+
4359
+
4360
+ class HttpHandlerError(Exception):
4361
+ pass
4362
+
4363
+
4364
+ class UnsupportedMethodHttpHandlerError(Exception):
4365
+ pass
4366
+
4367
+
4368
+ ########################################
4369
+ # ../configs.py
4370
+
4371
+
4372
+ ##
4373
+
4374
+
4375
+ class RestartWhenExitUnexpected:
4376
+ pass
4377
+
4378
+
4379
+ class RestartUnconditionally:
4380
+ pass
4381
+
4382
+
4383
+ ##
4384
+
4385
+
4386
+ @dc.dataclass(frozen=True)
4387
+ class ProcessConfig:
4388
+ name: str
4389
+ command: str
4390
+
4391
+ uid: ta.Optional[int] = None
4392
+ directory: ta.Optional[str] = None
4393
+ umask: ta.Optional[int] = None
4394
+ priority: int = 999
4395
+
4396
+ autostart: bool = True
4397
+ autorestart: str = 'unexpected'
4398
+
4399
+ startsecs: int = 1
4400
+ startretries: int = 3
4401
+
4402
+ numprocs: int = 1
4403
+ numprocs_start: int = 0
4404
+
4405
+ @dc.dataclass(frozen=True)
4406
+ class Log:
4407
+ file: ta.Optional[str] = None
4408
+ capture_maxbytes: ta.Optional[int] = None
4409
+ events_enabled: bool = False
4410
+ syslog: bool = False
4411
+ backups: ta.Optional[int] = None
4412
+ maxbytes: ta.Optional[int] = None
4413
+
4414
+ stdout: Log = Log()
4415
+ stderr: Log = Log()
4416
+
4417
+ stopsignal: int = signal.SIGTERM
4418
+ stopwaitsecs: int = 10
4419
+ stopasgroup: bool = False
4420
+
4421
+ killasgroup: bool = False
4422
+
4423
+ exitcodes: ta.Sequence[int] = (0,)
4424
+
4425
+ redirect_stderr: bool = False
4426
+
4427
+ environment: ta.Optional[ta.Mapping[str, str]] = None
4428
+
4429
+
4430
+ @dc.dataclass(frozen=True)
4431
+ class ProcessGroupConfig:
4432
+ name: str
4433
+
4434
+ priority: int = 999
4316
4435
 
4317
- 'stdout': None,
4318
- 'child_stdout': None,
4436
+ processes: ta.Optional[ta.Sequence[ProcessConfig]] = None
4319
4437
 
4320
- 'stderr': None,
4321
- 'child_stderr': None,
4322
- }
4323
4438
 
4324
- try:
4325
- pipes['child_stdin'], pipes['stdin'] = os.pipe()
4326
- pipes['stdout'], pipes['child_stdout'] = os.pipe()
4439
+ @dc.dataclass(frozen=True)
4440
+ class ServerConfig:
4441
+ user: ta.Optional[str] = None
4442
+ nodaemon: bool = False
4443
+ umask: int = 0o22
4444
+ directory: ta.Optional[str] = None
4445
+ logfile: str = 'supervisord.log'
4446
+ logfile_maxbytes: int = 50 * 1024 * 1024
4447
+ logfile_backups: int = 10
4448
+ loglevel: int = logging.INFO
4449
+ pidfile: str = 'supervisord.pid'
4450
+ identifier: str = 'supervisor'
4451
+ child_logdir: str = '/dev/null'
4452
+ minfds: int = 1024
4453
+ minprocs: int = 200
4454
+ nocleanup: bool = False
4455
+ strip_ansi: bool = False
4456
+ silent: bool = False
4327
4457
 
4328
- if stderr:
4329
- pipes['stderr'], pipes['child_stderr'] = os.pipe()
4458
+ groups: ta.Optional[ta.Sequence[ProcessGroupConfig]] = None
4330
4459
 
4331
- for fd in (
4332
- pipes['stdout'],
4333
- pipes['stderr'],
4334
- pipes['stdin'],
4335
- ):
4336
- if fd is not None:
4337
- flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY
4338
- fcntl.fcntl(fd, fcntl.F_SETFL, flags)
4460
+ @classmethod
4461
+ def new(
4462
+ cls,
4463
+ umask: ta.Union[int, str] = 0o22,
4464
+ directory: ta.Optional[str] = None,
4465
+ logfile: str = 'supervisord.log',
4466
+ logfile_maxbytes: ta.Union[int, str] = 50 * 1024 * 1024,
4467
+ loglevel: ta.Union[int, str] = logging.INFO,
4468
+ pidfile: str = 'supervisord.pid',
4469
+ child_logdir: ta.Optional[str] = None,
4470
+ **kwargs: ta.Any,
4471
+ ) -> 'ServerConfig':
4472
+ return cls(
4473
+ umask=parse_octal(umask),
4474
+ directory=check_existing_dir(directory) if directory is not None else None,
4475
+ logfile=check_path_with_existing_dir(logfile),
4476
+ logfile_maxbytes=parse_bytes_size(logfile_maxbytes),
4477
+ loglevel=parse_logging_level(loglevel),
4478
+ pidfile=check_path_with_existing_dir(pidfile),
4479
+ child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
4480
+ **kwargs,
4481
+ )
4339
4482
 
4340
- return ProcessPipes(**pipes)
4341
4483
 
4342
- except OSError:
4343
- for fd in pipes.values():
4344
- if fd is not None:
4345
- close_fd(fd)
4484
+ ##
4346
4485
 
4347
- raise
4486
+
4487
+ def prepare_process_group_config(dct: ConfigMapping) -> ConfigMapping:
4488
+ out = dict(dct)
4489
+ out['processes'] = build_config_named_children(out.get('processes'))
4490
+ return out
4348
4491
 
4349
4492
 
4350
- def close_pipes(pipes: ProcessPipes) -> None:
4351
- close_parent_pipes(pipes)
4352
- close_child_pipes(pipes)
4493
+ def prepare_server_config(dct: ta.Mapping[str, ta.Any]) -> ta.Mapping[str, ta.Any]:
4494
+ out = dict(dct)
4495
+ group_dcts = build_config_named_children(out.get('groups'))
4496
+ out['groups'] = [prepare_process_group_config(group_dct) for group_dct in group_dcts or []]
4497
+ return out
4353
4498
 
4354
4499
 
4355
- def close_parent_pipes(pipes: ProcessPipes) -> None:
4356
- for fd in pipes.parent_fds():
4357
- close_fd(fd)
4500
+ ##
4358
4501
 
4359
4502
 
4360
- def close_child_pipes(pipes: ProcessPipes) -> None:
4361
- for fd in pipes.child_fds():
4362
- close_fd(fd)
4503
+ def parse_logging_level(value: ta.Union[str, int]) -> int:
4504
+ if isinstance(value, int):
4505
+ return value
4506
+ s = str(value).lower()
4507
+ level = logging.getLevelNamesMapping().get(s.upper())
4508
+ if level is None:
4509
+ raise ValueError(f'bad logging level name {value!r}')
4510
+ return level
4363
4511
 
4364
4512
 
4365
4513
  ########################################
@@ -4371,23 +4519,23 @@ class Poller(DaemonizeListener, abc.ABC):
4371
4519
  super().__init__()
4372
4520
 
4373
4521
  @abc.abstractmethod
4374
- def register_readable(self, fd: int) -> None:
4522
+ def register_readable(self, fd: Fd) -> None:
4375
4523
  raise NotImplementedError
4376
4524
 
4377
4525
  @abc.abstractmethod
4378
- def register_writable(self, fd: int) -> None:
4526
+ def register_writable(self, fd: Fd) -> None:
4379
4527
  raise NotImplementedError
4380
4528
 
4381
4529
  @abc.abstractmethod
4382
- def unregister_readable(self, fd: int) -> None:
4530
+ def unregister_readable(self, fd: Fd) -> None:
4383
4531
  raise NotImplementedError
4384
4532
 
4385
4533
  @abc.abstractmethod
4386
- def unregister_writable(self, fd: int) -> None:
4534
+ def unregister_writable(self, fd: Fd) -> None:
4387
4535
  raise NotImplementedError
4388
4536
 
4389
4537
  @abc.abstractmethod
4390
- def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[int], ta.List[int]]:
4538
+ def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
4391
4539
  raise NotImplementedError
4392
4540
 
4393
4541
  def before_daemonize(self) -> None: # noqa
@@ -4404,37 +4552,37 @@ class SelectPoller(Poller):
4404
4552
  def __init__(self) -> None:
4405
4553
  super().__init__()
4406
4554
 
4407
- self._readable: ta.Set[int] = set()
4408
- self._writable: ta.Set[int] = set()
4555
+ self._readable: ta.Set[Fd] = set()
4556
+ self._writable: ta.Set[Fd] = set()
4409
4557
 
4410
- def register_readable(self, fd: int) -> None:
4558
+ def register_readable(self, fd: Fd) -> None:
4411
4559
  self._readable.add(fd)
4412
4560
 
4413
- def register_writable(self, fd: int) -> None:
4561
+ def register_writable(self, fd: Fd) -> None:
4414
4562
  self._writable.add(fd)
4415
4563
 
4416
- def unregister_readable(self, fd: int) -> None:
4564
+ def unregister_readable(self, fd: Fd) -> None:
4417
4565
  self._readable.discard(fd)
4418
4566
 
4419
- def unregister_writable(self, fd: int) -> None:
4567
+ def unregister_writable(self, fd: Fd) -> None:
4420
4568
  self._writable.discard(fd)
4421
4569
 
4422
4570
  def unregister_all(self) -> None:
4423
4571
  self._readable.clear()
4424
4572
  self._writable.clear()
4425
4573
 
4426
- def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[int], ta.List[int]]:
4574
+ def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
4427
4575
  try:
4428
4576
  r, w, x = select.select(
4429
4577
  self._readable,
4430
4578
  self._writable,
4431
4579
  [], timeout,
4432
4580
  )
4433
- except OSError as err:
4434
- if err.args[0] == errno.EINTR:
4581
+ except OSError as exc:
4582
+ if exc.args[0] == errno.EINTR:
4435
4583
  log.debug('EINTR encountered in poll')
4436
4584
  return [], []
4437
- if err.args[0] == errno.EBADF:
4585
+ if exc.args[0] == errno.EBADF:
4438
4586
  log.debug('EBADF encountered in poll')
4439
4587
  self.unregister_all()
4440
4588
  return [], []
@@ -4450,30 +4598,30 @@ class PollPoller(Poller):
4450
4598
  super().__init__()
4451
4599
 
4452
4600
  self._poller = select.poll()
4453
- self._readable: set[int] = set()
4454
- self._writable: set[int] = set()
4601
+ self._readable: set[Fd] = set()
4602
+ self._writable: set[Fd] = set()
4455
4603
 
4456
- def register_readable(self, fd: int) -> None:
4604
+ def register_readable(self, fd: Fd) -> None:
4457
4605
  self._poller.register(fd, self._READ)
4458
4606
  self._readable.add(fd)
4459
4607
 
4460
- def register_writable(self, fd: int) -> None:
4608
+ def register_writable(self, fd: Fd) -> None:
4461
4609
  self._poller.register(fd, self._WRITE)
4462
4610
  self._writable.add(fd)
4463
4611
 
4464
- def unregister_readable(self, fd: int) -> None:
4612
+ def unregister_readable(self, fd: Fd) -> None:
4465
4613
  self._readable.discard(fd)
4466
4614
  self._poller.unregister(fd)
4467
4615
  if fd in self._writable:
4468
4616
  self._poller.register(fd, self._WRITE)
4469
4617
 
4470
- def unregister_writable(self, fd: int) -> None:
4618
+ def unregister_writable(self, fd: Fd) -> None:
4471
4619
  self._writable.discard(fd)
4472
4620
  self._poller.unregister(fd)
4473
4621
  if fd in self._readable:
4474
4622
  self._poller.register(fd, self._READ)
4475
4623
 
4476
- def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[int], ta.List[int]]:
4624
+ def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
4477
4625
  fds = self._poll_fds(timeout) # type: ignore
4478
4626
  readable, writable = [], []
4479
4627
  for fd, eventmask in fds:
@@ -4485,16 +4633,16 @@ class PollPoller(Poller):
4485
4633
  writable.append(fd)
4486
4634
  return readable, writable
4487
4635
 
4488
- def _poll_fds(self, timeout: float) -> ta.List[ta.Tuple[int, int]]:
4636
+ def _poll_fds(self, timeout: float) -> ta.List[ta.Tuple[Fd, Fd]]:
4489
4637
  try:
4490
- return self._poller.poll(timeout * 1000)
4491
- except OSError as err:
4492
- if err.args[0] == errno.EINTR:
4638
+ return self._poller.poll(timeout * 1000) # type: ignore
4639
+ except OSError as exc:
4640
+ if exc.args[0] == errno.EINTR:
4493
4641
  log.debug('EINTR encountered in poll')
4494
4642
  return []
4495
4643
  raise
4496
4644
 
4497
- def _ignore_invalid(self, fd: int, eventmask: int) -> bool:
4645
+ def _ignore_invalid(self, fd: Fd, eventmask: int) -> bool:
4498
4646
  if eventmask & select.POLLNVAL:
4499
4647
  # POLLNVAL means `fd` value is invalid, not open. When a process quits it's `fd`s are closed so there is no
4500
4648
  # more reason to keep this `fd` registered If the process restarts it's `fd`s are registered again.
@@ -4513,30 +4661,30 @@ if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
4513
4661
  super().__init__()
4514
4662
 
4515
4663
  self._kqueue: ta.Optional[ta.Any] = select.kqueue()
4516
- self._readable: set[int] = set()
4517
- self._writable: set[int] = set()
4664
+ self._readable: set[Fd] = set()
4665
+ self._writable: set[Fd] = set()
4518
4666
 
4519
- def register_readable(self, fd: int) -> None:
4667
+ def register_readable(self, fd: Fd) -> None:
4520
4668
  self._readable.add(fd)
4521
4669
  kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD)
4522
4670
  self._kqueue_control(fd, kevent)
4523
4671
 
4524
- def register_writable(self, fd: int) -> None:
4672
+ def register_writable(self, fd: Fd) -> None:
4525
4673
  self._writable.add(fd)
4526
4674
  kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD)
4527
4675
  self._kqueue_control(fd, kevent)
4528
4676
 
4529
- def unregister_readable(self, fd: int) -> None:
4677
+ def unregister_readable(self, fd: Fd) -> None:
4530
4678
  kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_DELETE)
4531
4679
  self._readable.discard(fd)
4532
4680
  self._kqueue_control(fd, kevent)
4533
4681
 
4534
- def unregister_writable(self, fd: int) -> None:
4682
+ def unregister_writable(self, fd: Fd) -> None:
4535
4683
  kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_DELETE)
4536
4684
  self._writable.discard(fd)
4537
4685
  self._kqueue_control(fd, kevent)
4538
4686
 
4539
- def _kqueue_control(self, fd: int, kevent: 'select.kevent') -> None:
4687
+ def _kqueue_control(self, fd: Fd, kevent: 'select.kevent') -> None:
4540
4688
  try:
4541
4689
  self._kqueue.control([kevent], 0) # type: ignore
4542
4690
  except OSError as error:
@@ -4545,7 +4693,7 @@ if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
4545
4693
  else:
4546
4694
  raise
4547
4695
 
4548
- def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[int], ta.List[int]]:
4696
+ def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
4549
4697
  readable, writable = [], [] # type: ignore
4550
4698
 
4551
4699
  try:
@@ -4595,157 +4743,6 @@ def get_poller_impl() -> ta.Type[Poller]:
4595
4743
  return SelectPoller
4596
4744
 
4597
4745
 
4598
- ########################################
4599
- # ../../../omlish/lite/http/handlers.py
4600
-
4601
-
4602
- @dc.dataclass(frozen=True)
4603
- class HttpHandlerRequest:
4604
- client_address: SocketAddress
4605
- method: str
4606
- path: str
4607
- headers: HttpHeaders
4608
- data: ta.Optional[bytes]
4609
-
4610
-
4611
- @dc.dataclass(frozen=True)
4612
- class HttpHandlerResponse:
4613
- status: ta.Union[http.HTTPStatus, int]
4614
-
4615
- headers: ta.Optional[ta.Mapping[str, str]] = None
4616
- data: ta.Optional[bytes] = None
4617
- close_connection: ta.Optional[bool] = None
4618
-
4619
-
4620
- class HttpHandlerError(Exception):
4621
- pass
4622
-
4623
-
4624
- class UnsupportedMethodHttpHandlerError(Exception):
4625
- pass
4626
-
4627
-
4628
- ########################################
4629
- # ../configs.py
4630
-
4631
-
4632
- ##
4633
-
4634
-
4635
- @dc.dataclass(frozen=True)
4636
- class ProcessConfig:
4637
- name: str
4638
- command: str
4639
-
4640
- uid: ta.Optional[int] = None
4641
- directory: ta.Optional[str] = None
4642
- umask: ta.Optional[int] = None
4643
- priority: int = 999
4644
-
4645
- autostart: bool = True
4646
- autorestart: str = 'unexpected'
4647
-
4648
- startsecs: int = 1
4649
- startretries: int = 3
4650
-
4651
- numprocs: int = 1
4652
- numprocs_start: int = 0
4653
-
4654
- @dc.dataclass(frozen=True)
4655
- class Log:
4656
- file: ta.Optional[str] = None
4657
- capture_maxbytes: ta.Optional[int] = None
4658
- events_enabled: bool = False
4659
- syslog: bool = False
4660
- backups: ta.Optional[int] = None
4661
- maxbytes: ta.Optional[int] = None
4662
-
4663
- stdout: Log = Log()
4664
- stderr: Log = Log()
4665
-
4666
- stopsignal: int = signal.SIGTERM
4667
- stopwaitsecs: int = 10
4668
- stopasgroup: bool = False
4669
-
4670
- killasgroup: bool = False
4671
-
4672
- exitcodes: ta.Sequence[int] = (0,)
4673
-
4674
- redirect_stderr: bool = False
4675
-
4676
- environment: ta.Optional[ta.Mapping[str, str]] = None
4677
-
4678
-
4679
- @dc.dataclass(frozen=True)
4680
- class ProcessGroupConfig:
4681
- name: str
4682
-
4683
- priority: int = 999
4684
-
4685
- processes: ta.Optional[ta.Sequence[ProcessConfig]] = None
4686
-
4687
-
4688
- @dc.dataclass(frozen=True)
4689
- class ServerConfig:
4690
- user: ta.Optional[str] = None
4691
- nodaemon: bool = False
4692
- umask: int = 0o22
4693
- directory: ta.Optional[str] = None
4694
- logfile: str = 'supervisord.log'
4695
- logfile_maxbytes: int = 50 * 1024 * 1024
4696
- logfile_backups: int = 10
4697
- loglevel: int = logging.INFO
4698
- pidfile: str = 'supervisord.pid'
4699
- identifier: str = 'supervisor'
4700
- child_logdir: str = '/dev/null'
4701
- minfds: int = 1024
4702
- minprocs: int = 200
4703
- nocleanup: bool = False
4704
- strip_ansi: bool = False
4705
- silent: bool = False
4706
-
4707
- groups: ta.Optional[ta.Sequence[ProcessGroupConfig]] = None
4708
-
4709
- @classmethod
4710
- def new(
4711
- cls,
4712
- umask: ta.Union[int, str] = 0o22,
4713
- directory: ta.Optional[str] = None,
4714
- logfile: str = 'supervisord.log',
4715
- logfile_maxbytes: ta.Union[int, str] = 50 * 1024 * 1024,
4716
- loglevel: ta.Union[int, str] = logging.INFO,
4717
- pidfile: str = 'supervisord.pid',
4718
- child_logdir: ta.Optional[str] = None,
4719
- **kwargs: ta.Any,
4720
- ) -> 'ServerConfig':
4721
- return cls(
4722
- umask=octal_type(umask),
4723
- directory=existing_directory(directory) if directory is not None else None,
4724
- logfile=existing_dirpath(logfile),
4725
- logfile_maxbytes=byte_size(logfile_maxbytes),
4726
- loglevel=logging_level(loglevel),
4727
- pidfile=existing_dirpath(pidfile),
4728
- child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
4729
- **kwargs,
4730
- )
4731
-
4732
-
4733
- ##
4734
-
4735
-
4736
- def prepare_process_group_config(dct: ConfigMapping) -> ConfigMapping:
4737
- out = dict(dct)
4738
- out['processes'] = build_config_named_children(out.get('processes'))
4739
- return out
4740
-
4741
-
4742
- def prepare_server_config(dct: ta.Mapping[str, ta.Any]) -> ta.Mapping[str, ta.Any]:
4743
- out = dict(dct)
4744
- group_dcts = build_config_named_children(out.get('groups'))
4745
- out['groups'] = [prepare_process_group_config(group_dct) for group_dct in group_dcts or []]
4746
- return out
4747
-
4748
-
4749
4746
  ########################################
4750
4747
  # ../../../omlish/lite/http/coroserver.py
4751
4748
  # PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
@@ -5351,11 +5348,6 @@ class ServerContext(abc.ABC):
5351
5348
  def set_state(self, state: SupervisorState) -> None:
5352
5349
  raise NotImplementedError
5353
5350
 
5354
- @property
5355
- @abc.abstractmethod
5356
- def pid_history(self) -> ta.Dict[int, 'Process']:
5357
- raise NotImplementedError
5358
-
5359
5351
 
5360
5352
  ##
5361
5353
 
@@ -5373,7 +5365,7 @@ class Dispatcher(abc.ABC):
5373
5365
 
5374
5366
  @property
5375
5367
  @abc.abstractmethod
5376
- def fd(self) -> int:
5368
+ def fd(self) -> Fd:
5377
5369
  raise NotImplementedError
5378
5370
 
5379
5371
  @property
@@ -5451,7 +5443,7 @@ class Process(ConfigPriorityOrdered, abc.ABC):
5451
5443
 
5452
5444
  @property
5453
5445
  @abc.abstractmethod
5454
- def pid(self) -> int:
5446
+ def pid(self) -> Pid:
5455
5447
  raise NotImplementedError
5456
5448
 
5457
5449
  #
@@ -5462,7 +5454,7 @@ class Process(ConfigPriorityOrdered, abc.ABC):
5462
5454
  raise NotImplementedError
5463
5455
 
5464
5456
  @abc.abstractmethod
5465
- def finish(self, sts: int) -> None:
5457
+ def finish(self, sts: Rc) -> None:
5466
5458
  raise NotImplementedError
5467
5459
 
5468
5460
  @abc.abstractmethod
@@ -5546,7 +5538,6 @@ class ServerContextImpl(ServerContext):
5546
5538
  self._poller = poller
5547
5539
  self._epoch = epoch
5548
5540
 
5549
- self._pid_history: ta.Dict[int, Process] = {}
5550
5541
  self._state: SupervisorState = SupervisorState.RUNNING
5551
5542
 
5552
5543
  @property
@@ -5568,13 +5559,9 @@ class ServerContextImpl(ServerContext):
5568
5559
  def set_state(self, state: SupervisorState) -> None:
5569
5560
  self._state = state
5570
5561
 
5571
- @property
5572
- def pid_history(self) -> ta.Dict[int, Process]:
5573
- return self._pid_history
5574
-
5575
5562
  #
5576
5563
 
5577
- def waitpid(self) -> ta.Tuple[ta.Optional[int], ta.Optional[int]]:
5564
+ def waitpid(self) -> ta.Tuple[ta.Optional[Pid], ta.Optional[Rc]]:
5578
5565
  # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
5579
5566
  # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
5580
5567
  # waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
@@ -5590,7 +5577,7 @@ class ServerContextImpl(ServerContext):
5590
5577
  if code == errno.EINTR:
5591
5578
  log.debug('EINTR during reap')
5592
5579
  pid, sts = None, None
5593
- return pid, sts
5580
+ return pid, sts # type: ignore
5594
5581
 
5595
5582
  def get_auto_child_log_name(self, name: str, identifier: str, channel: str) -> str:
5596
5583
  prefix = f'{name}-{channel}---{identifier}-'
@@ -5606,8 +5593,8 @@ class ServerContextImpl(ServerContext):
5606
5593
  # ../dispatchers.py
5607
5594
 
5608
5595
 
5609
- class Dispatchers(KeyedCollection[int, Dispatcher]):
5610
- def _key(self, v: Dispatcher) -> int:
5596
+ class Dispatchers(KeyedCollection[Fd, Dispatcher]):
5597
+ def _key(self, v: Dispatcher) -> Fd:
5611
5598
  return v.fd
5612
5599
 
5613
5600
  #
@@ -5643,7 +5630,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
5643
5630
  self,
5644
5631
  process: Process,
5645
5632
  channel: str,
5646
- fd: int,
5633
+ fd: Fd,
5647
5634
  *,
5648
5635
  event_callbacks: EventCallbacks,
5649
5636
  ) -> None:
@@ -5672,7 +5659,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
5672
5659
  return self._channel
5673
5660
 
5674
5661
  @property
5675
- def fd(self) -> int:
5662
+ def fd(self) -> Fd:
5676
5663
  return self._fd
5677
5664
 
5678
5665
  @property
@@ -5706,7 +5693,7 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
5706
5693
  self,
5707
5694
  process: Process,
5708
5695
  event_type: ta.Type[ProcessCommunicationEvent],
5709
- fd: int,
5696
+ fd: Fd,
5710
5697
  *,
5711
5698
  event_callbacks: EventCallbacks,
5712
5699
  ) -> None:
@@ -5916,7 +5903,7 @@ class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
5916
5903
  self,
5917
5904
  process: Process,
5918
5905
  channel: str,
5919
- fd: int,
5906
+ fd: Fd,
5920
5907
  *,
5921
5908
  event_callbacks: EventCallbacks,
5922
5909
  ) -> None:
@@ -6110,7 +6097,7 @@ class ProcessGroupImpl(ProcessGroup):
6110
6097
 
6111
6098
 
6112
6099
  ########################################
6113
- # ../processes.py
6100
+ # ../process.py
6114
6101
 
6115
6102
 
6116
6103
  ##
@@ -6123,7 +6110,7 @@ class ProcessStateError(RuntimeError):
6123
6110
  ##
6124
6111
 
6125
6112
 
6126
- class PidHistory(ta.Dict[int, Process]):
6113
+ class PidHistory(ta.Dict[Pid, Process]):
6127
6114
  pass
6128
6115
 
6129
6116
 
@@ -6351,7 +6338,7 @@ class SupervisorSetupImpl(SupervisorSetup):
6351
6338
  if pid != 0:
6352
6339
  # Parent
6353
6340
  log.debug('supervisord forked; parent exiting')
6354
- real_exit(0)
6341
+ real_exit(Rc(0))
6355
6342
 
6356
6343
  # Child
6357
6344
  log.info('daemonizing the supervisord process')
@@ -6380,7 +6367,7 @@ class SupervisorSetupImpl(SupervisorSetup):
6380
6367
 
6381
6368
  @dc.dataclass(frozen=True)
6382
6369
  class SpawnedProcess:
6383
- pid: int
6370
+ pid: Pid
6384
6371
  pipes: ProcessPipes
6385
6372
  dispatchers: Dispatchers
6386
6373
 
@@ -6409,6 +6396,17 @@ class ProcessSpawning:
6409
6396
  ##
6410
6397
 
6411
6398
 
6399
+ class ExitNow(Exception): # noqa
6400
+ pass
6401
+
6402
+
6403
+ def timeslice(period: int, when: float) -> int:
6404
+ return int(when - (when % period))
6405
+
6406
+
6407
+ ##
6408
+
6409
+
6412
6410
  class SignalHandler:
6413
6411
  def __init__(
6414
6412
  self,
@@ -6543,7 +6541,7 @@ class Supervisor:
6543
6541
  # throttle 'waiting for x to die' reports
6544
6542
  now = time.time()
6545
6543
  if now > (self._last_shutdown_report + 3): # every 3 secs
6546
- names = [as_string(p.config.name) for p in unstopped]
6544
+ names = [p.config.name for p in unstopped]
6547
6545
  namestr = ', '.join(names)
6548
6546
  log.info('waiting for %s to die', namestr)
6549
6547
  self._last_shutdown_report = now
@@ -6736,7 +6734,7 @@ class Supervisor:
6736
6734
 
6737
6735
 
6738
6736
  ########################################
6739
- # ../processesimpl.py
6737
+ # ../processimpl.py
6740
6738
 
6741
6739
 
6742
6740
  class ProcessSpawningFactory(Func1[Process, ProcessSpawning]):
@@ -6774,7 +6772,7 @@ class ProcessImpl(Process):
6774
6772
  self._pipes = ProcessPipes()
6775
6773
 
6776
6774
  self._state = ProcessState.STOPPED
6777
- self._pid = 0 # 0 when not running
6775
+ self._pid = Pid(0) # 0 when not running
6778
6776
 
6779
6777
  self._last_start = 0. # Last time the subprocess was started; 0 if never
6780
6778
  self._last_stop = 0. # Last time the subprocess was stopped; 0 if never
@@ -6788,7 +6786,7 @@ class ProcessImpl(Process):
6788
6786
 
6789
6787
  self._backoff = 0 # backoff counter (to startretries)
6790
6788
 
6791
- self._exitstatus: ta.Optional[int] = None # status attached to dead process by finish()
6789
+ self._exitstatus: ta.Optional[Rc] = None # status attached to dead process by finish()
6792
6790
  self._spawn_err: ta.Optional[str] = None # error message attached by spawn() if any
6793
6791
 
6794
6792
  #
@@ -6811,7 +6809,7 @@ class ProcessImpl(Process):
6811
6809
  return self._group
6812
6810
 
6813
6811
  @property
6814
- def pid(self) -> int:
6812
+ def pid(self) -> Pid:
6815
6813
  return self._pid
6816
6814
 
6817
6815
  #
@@ -6830,11 +6828,9 @@ class ProcessImpl(Process):
6830
6828
 
6831
6829
  #
6832
6830
 
6833
- def spawn(self) -> ta.Optional[int]:
6834
- process_name = as_string(self._config.name)
6835
-
6831
+ def spawn(self) -> ta.Optional[Pid]:
6836
6832
  if self.pid:
6837
- log.warning('process \'%s\' already running', process_name)
6833
+ log.warning('process \'%s\' already running', self.name)
6838
6834
  return None
6839
6835
 
6840
6836
  self.check_in_state(
@@ -6957,7 +6953,7 @@ class ProcessImpl(Process):
6957
6953
  self._check_and_adjust_for_system_clock_rollback(now)
6958
6954
 
6959
6955
  if now > (self._last_stop_report + 2): # every 2 seconds
6960
- log.info('waiting for %s to stop', as_string(self._config.name))
6956
+ log.info('waiting for %s to stop', self.name)
6961
6957
  self._last_stop_report = now
6962
6958
 
6963
6959
  def give_up(self) -> None:
@@ -6977,18 +6973,17 @@ class ProcessImpl(Process):
6977
6973
  """
6978
6974
  now = time.time()
6979
6975
 
6980
- process_name = as_string(self._config.name)
6981
6976
  # If the process is in BACKOFF and we want to stop or kill it, then BACKOFF -> STOPPED. This is needed because
6982
6977
  # if startretries is a large number and the process isn't starting successfully, the stop request would be
6983
6978
  # blocked for a long time waiting for the retries.
6984
6979
  if self._state == ProcessState.BACKOFF:
6985
- log.debug('Attempted to kill %s, which is in BACKOFF state.', process_name)
6980
+ log.debug('Attempted to kill %s, which is in BACKOFF state.', self.name)
6986
6981
  self.change_state(ProcessState.STOPPED)
6987
6982
  return None
6988
6983
 
6989
6984
  args: tuple
6990
6985
  if not self.pid:
6991
- fmt, args = "attempted to kill %s with sig %s but it wasn't running", (process_name, sig_name(sig))
6986
+ fmt, args = "attempted to kill %s with sig %s but it wasn't running", (self.name, sig_name(sig))
6992
6987
  log.debug(fmt, *args)
6993
6988
  return fmt % args
6994
6989
 
@@ -7002,7 +6997,7 @@ class ProcessImpl(Process):
7002
6997
  if killasgroup:
7003
6998
  as_group = 'process group '
7004
6999
 
7005
- log.debug('killing %s (pid %s) %s with signal %s', process_name, self.pid, as_group, sig_name(sig))
7000
+ log.debug('killing %s (pid %s) %s with signal %s', self.name, self.pid, as_group, sig_name(sig))
7006
7001
 
7007
7002
  # RUNNING/STARTING/STOPPING -> STOPPING
7008
7003
  self._killing = True
@@ -7011,24 +7006,24 @@ class ProcessImpl(Process):
7011
7006
  self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
7012
7007
  self.change_state(ProcessState.STOPPING)
7013
7008
 
7014
- pid = self.pid
7009
+ kpid = int(self.pid)
7015
7010
  if killasgroup:
7016
7011
  # send to the whole process group instead
7017
- pid = -self.pid
7012
+ kpid = -kpid
7018
7013
 
7019
7014
  try:
7020
7015
  try:
7021
- os.kill(pid, sig)
7016
+ os.kill(kpid, sig)
7022
7017
  except OSError as exc:
7023
7018
  if exc.errno == errno.ESRCH:
7024
- log.debug('unable to signal %s (pid %s), it probably just exited on its own: %s', process_name, self.pid, str(exc)) # noqa
7019
+ log.debug('unable to signal %s (pid %s), it probably just exited on its own: %s', self.name, self.pid, str(exc)) # noqa
7025
7020
  # we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
7026
7021
  # processing.
7027
7022
  return None
7028
7023
  raise
7029
7024
  except Exception: # noqa
7030
7025
  tb = traceback.format_exc()
7031
- fmt, args = 'unknown problem killing %s (%s):%s', (process_name, self.pid, tb)
7026
+ fmt, args = 'unknown problem killing %s (%s):%s', (self.name, self.pid, tb)
7032
7027
  log.critical(fmt, *args)
7033
7028
  self.change_state(ProcessState.UNKNOWN)
7034
7029
  self._killing = False
@@ -7044,14 +7039,13 @@ class ProcessImpl(Process):
7044
7039
  Return None if the signal was sent, or an error message string if an error occurred or if the subprocess is not
7045
7040
  running.
7046
7041
  """
7047
- process_name = as_string(self._config.name)
7048
7042
  args: tuple
7049
7043
  if not self.pid:
7050
- fmt, args = "attempted to send %s sig %s but it wasn't running", (process_name, sig_name(sig))
7044
+ fmt, args = "Attempted to send %s sig %s but it wasn't running", (self.name, sig_name(sig))
7051
7045
  log.debug(fmt, *args)
7052
7046
  return fmt % args
7053
7047
 
7054
- log.debug('sending %s (pid %s) sig %s', process_name, self.pid, sig_name(sig))
7048
+ log.debug('sending %s (pid %s) sig %s', self.name, self.pid, sig_name(sig))
7055
7049
 
7056
7050
  self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
7057
7051
 
@@ -7062,7 +7056,7 @@ class ProcessImpl(Process):
7062
7056
  if exc.errno == errno.ESRCH:
7063
7057
  log.debug(
7064
7058
  'unable to signal %s (pid %s), it probably just now exited on its own: %s',
7065
- process_name,
7059
+ self.name,
7066
7060
  self.pid,
7067
7061
  str(exc),
7068
7062
  )
@@ -7072,14 +7066,14 @@ class ProcessImpl(Process):
7072
7066
  raise
7073
7067
  except Exception: # noqa
7074
7068
  tb = traceback.format_exc()
7075
- fmt, args = 'unknown problem sending sig %s (%s):%s', (process_name, self.pid, tb)
7069
+ fmt, args = 'unknown problem sending sig %s (%s):%s', (self.name, self.pid, tb)
7076
7070
  log.critical(fmt, *args)
7077
7071
  self.change_state(ProcessState.UNKNOWN)
7078
7072
  return fmt % args
7079
7073
 
7080
7074
  return None
7081
7075
 
7082
- def finish(self, sts: int) -> None:
7076
+ def finish(self, sts: Rc) -> None:
7083
7077
  """The process was reaped and we need to report and manage its state."""
7084
7078
 
7085
7079
  self._dispatchers.drain()
@@ -7091,7 +7085,6 @@ class ProcessImpl(Process):
7091
7085
  self._check_and_adjust_for_system_clock_rollback(now)
7092
7086
 
7093
7087
  self._last_stop = now
7094
- process_name = as_string(self._config.name)
7095
7088
 
7096
7089
  if now > self._last_start:
7097
7090
  too_quickly = now - self._last_start < self._config.startsecs
@@ -7100,7 +7093,7 @@ class ProcessImpl(Process):
7100
7093
  log.warning(
7101
7094
  "process '%s' (%s) last_start time is in the future, don't know how long process was running so "
7102
7095
  "assuming it did not exit too quickly",
7103
- process_name,
7096
+ self.name,
7104
7097
  self.pid,
7105
7098
  )
7106
7099
 
@@ -7110,9 +7103,9 @@ class ProcessImpl(Process):
7110
7103
  # likely the result of a stop request implies STOPPING -> STOPPED
7111
7104
  self._killing = False
7112
7105
  self._delay = 0
7113
- self._exitstatus = es
7106
+ self._exitstatus = Rc(es)
7114
7107
 
7115
- fmt, args = 'stopped: %s (%s)', (process_name, msg)
7108
+ fmt, args = 'stopped: %s (%s)', (self.name, msg)
7116
7109
  self.check_in_state(ProcessState.STOPPING)
7117
7110
  self.change_state(ProcessState.STOPPED)
7118
7111
  if exit_expected:
@@ -7126,7 +7119,7 @@ class ProcessImpl(Process):
7126
7119
  self._spawn_err = 'Exited too quickly (process log may have details)'
7127
7120
  self.check_in_state(ProcessState.STARTING)
7128
7121
  self.change_state(ProcessState.BACKOFF)
7129
- log.warning('exited: %s (%s)', process_name, msg + '; not expected')
7122
+ log.warning('exited: %s (%s)', self.name, msg + '; not expected')
7130
7123
 
7131
7124
  else:
7132
7125
  # this finish was not the result of a stop request, the program was in the RUNNING state but exited implies
@@ -7145,14 +7138,14 @@ class ProcessImpl(Process):
7145
7138
  if exit_expected:
7146
7139
  # expected exit code
7147
7140
  self.change_state(ProcessState.EXITED, expected=True)
7148
- log.info('exited: %s (%s)', process_name, msg + '; expected')
7141
+ log.info('exited: %s (%s)', self.name, msg + '; expected')
7149
7142
  else:
7150
7143
  # unexpected exit code
7151
7144
  self._spawn_err = f'Bad exit code {es}'
7152
7145
  self.change_state(ProcessState.EXITED, expected=False)
7153
- log.warning('exited: %s (%s)', process_name, msg + '; not expected')
7146
+ log.warning('exited: %s (%s)', self.name, msg + '; not expected')
7154
7147
 
7155
- self._pid = 0
7148
+ self._pid = Pid(0)
7156
7149
  close_parent_pipes(self._pipes)
7157
7150
  self._pipes = ProcessPipes()
7158
7151
  self._dispatchers = Dispatchers([])
@@ -7190,7 +7183,6 @@ class ProcessImpl(Process):
7190
7183
  # BACKOFF -> STARTING
7191
7184
  self.spawn()
7192
7185
 
7193
- process_name = as_string(self._config.name)
7194
7186
  if state == ProcessState.STARTING:
7195
7187
  if now - self._last_start > self._config.startsecs:
7196
7188
  # STARTING -> RUNNING if the proc has started successfully and it has stayed up for at least
@@ -7200,21 +7192,21 @@ class ProcessImpl(Process):
7200
7192
  self.check_in_state(ProcessState.STARTING)
7201
7193
  self.change_state(ProcessState.RUNNING)
7202
7194
  msg = ('entered RUNNING state, process has stayed up for > than %s seconds (startsecs)' % self._config.startsecs) # noqa
7203
- logger.info('success: %s %s', process_name, msg)
7195
+ logger.info('success: %s %s', self.name, msg)
7204
7196
 
7205
7197
  if state == ProcessState.BACKOFF:
7206
7198
  if self._backoff > self._config.startretries:
7207
7199
  # BACKOFF -> FATAL if the proc has exceeded its number of retries
7208
7200
  self.give_up()
7209
7201
  msg = ('entered FATAL state, too many start retries too quickly')
7210
- logger.info('gave up: %s %s', process_name, msg)
7202
+ logger.info('gave up: %s %s', self.name, msg)
7211
7203
 
7212
7204
  elif state == ProcessState.STOPPING:
7213
7205
  time_left = self._delay - now
7214
7206
  if time_left <= 0:
7215
7207
  # kill processes which are taking too long to stop with a final sigkill. if this doesn't kill it, the
7216
7208
  # process will be stuck in the STOPPING state forever.
7217
- log.warning('killing \'%s\' (%s) with SIGKILL', process_name, self.pid)
7209
+ log.warning('killing \'%s\' (%s) with SIGKILL', self.name, self.pid)
7218
7210
  self.kill(signal.SIGKILL)
7219
7211
 
7220
7212
  def after_setuid(self) -> None:
@@ -7233,15 +7225,15 @@ class ProcessImpl(Process):
7233
7225
  # ../spawningimpl.py
7234
7226
 
7235
7227
 
7236
- class OutputDispatcherFactory(Func3[Process, ta.Type[ProcessCommunicationEvent], int, OutputDispatcher]):
7228
+ class OutputDispatcherFactory(Func3[Process, ta.Type[ProcessCommunicationEvent], Fd, OutputDispatcher]):
7237
7229
  pass
7238
7230
 
7239
7231
 
7240
- class InputDispatcherFactory(Func3[Process, str, int, InputDispatcher]):
7232
+ class InputDispatcherFactory(Func3[Process, str, Fd, InputDispatcher]):
7241
7233
  pass
7242
7234
 
7243
7235
 
7244
- InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[int])
7236
+ InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[Fd])
7245
7237
 
7246
7238
 
7247
7239
  ##
@@ -7312,7 +7304,7 @@ class ProcessSpawningImpl(ProcessSpawning):
7312
7304
  raise ProcessSpawnError(f"Unknown error making dispatchers for '{self.process.name}': {exc}") from exc
7313
7305
 
7314
7306
  try:
7315
- pid = os.fork()
7307
+ pid = Pid(os.fork())
7316
7308
  except OSError as exc:
7317
7309
  code = exc.args[0]
7318
7310
  if code == errno.EAGAIN:
@@ -7491,7 +7483,7 @@ class ProcessSpawningImpl(ProcessSpawning):
7491
7483
 
7492
7484
  finally:
7493
7485
  os.write(2, as_bytes('supervisor: child process was not spawned\n'))
7494
- real_exit(127) # exit process with code for spawn failure
7486
+ real_exit(Rc(127)) # exit process with code for spawn failure
7495
7487
 
7496
7488
  raise RuntimeError('Unreachable')
7497
7489
 
@@ -7508,7 +7500,7 @@ class ProcessSpawningImpl(ProcessSpawning):
7508
7500
  for i in range(3, self._server_config.minfds):
7509
7501
  if i in self._inherited_fds:
7510
7502
  continue
7511
- close_fd(i)
7503
+ close_fd(Fd(i))
7512
7504
 
7513
7505
  def _set_uid(self) -> ta.Optional[str]:
7514
7506
  if self.config.uid is None: