ominfra 0.0.0.dev130__py3-none-any.whl → 0.0.0.dev132__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -90,50 +90,54 @@ if sys.version_info < (3, 8):
90
90
  ########################################
91
91
 
92
92
 
93
- # ../../../omdev/toml/parser.py
93
+ # ../../omdev/toml/parser.py
94
94
  TomlParseFloat = ta.Callable[[str], ta.Any]
95
95
  TomlKey = ta.Tuple[str, ...]
96
96
  TomlPos = int # ta.TypeAlias
97
97
 
98
- # ../utils/collections.py
98
+ # utils/collections.py
99
99
  K = ta.TypeVar('K')
100
100
  V = ta.TypeVar('V')
101
101
 
102
- # ../../../omlish/lite/cached.py
102
+ # ../../omlish/lite/cached.py
103
103
  T = ta.TypeVar('T')
104
104
 
105
- # ../../../omlish/lite/check.py
105
+ # ../../omlish/lite/check.py
106
106
  SizedT = ta.TypeVar('SizedT', bound=ta.Sized)
107
107
 
108
- # ../../../omlish/lite/socket.py
108
+ # ../../omlish/lite/socket.py
109
109
  SocketAddress = ta.Any
110
110
  SocketHandlerFactory = ta.Callable[[SocketAddress, ta.BinaryIO, ta.BinaryIO], 'SocketHandler']
111
111
 
112
- # ../../../omlish/lite/typing.py
112
+ # ../../omlish/lite/typing.py
113
113
  A0 = ta.TypeVar('A0')
114
114
  A1 = ta.TypeVar('A1')
115
115
  A2 = ta.TypeVar('A2')
116
116
 
117
- # ../events.py
117
+ # events.py
118
118
  EventCallback = ta.Callable[['Event'], None]
119
+ ProcessOutputChannel = ta.Literal['stdout', 'stderr'] # ta.TypeAlias
119
120
 
120
- # ../../../omlish/lite/http/parsing.py
121
+ # ../../omlish/lite/contextmanagers.py
122
+ ExitStackedT = ta.TypeVar('ExitStackedT', bound='ExitStacked')
123
+
124
+ # ../../omlish/lite/http/parsing.py
121
125
  HttpHeaders = http.client.HTTPMessage # ta.TypeAlias
122
126
 
123
- # ../../../omlish/lite/inject.py
127
+ # ../../omlish/lite/inject.py
124
128
  U = ta.TypeVar('U')
125
129
  InjectorKeyCls = ta.Union[type, ta.NewType]
126
130
  InjectorProviderFn = ta.Callable[['Injector'], ta.Any]
127
131
  InjectorProviderFnMap = ta.Mapping['InjectorKey', 'InjectorProviderFn']
128
132
  InjectorBindingOrBindings = ta.Union['InjectorBinding', 'InjectorBindings']
129
133
 
130
- # ../../configs.py
134
+ # ../configs.py
131
135
  ConfigMapping = ta.Mapping[str, ta.Any]
132
136
 
133
- # ../../../omlish/lite/http/handlers.py
137
+ # ../../omlish/lite/http/handlers.py
134
138
  HttpHandler = ta.Callable[['HttpHandlerRequest'], 'HttpHandlerResponse']
135
139
 
136
- # ../../../omlish/lite/http/coroserver.py
140
+ # ../../omlish/lite/http/coroserver.py
137
141
  CoroHttpServerFactory = ta.Callable[[SocketAddress], 'CoroHttpServer']
138
142
 
139
143
 
@@ -991,9 +995,8 @@ class NoPermissionError(ProcessError):
991
995
 
992
996
  def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
993
997
  """
994
- Drop privileges to become the specified user, which may be a username or uid. Called for supervisord startup
995
- and when spawning subprocesses. Returns None on success or a string error message if privileges could not be
996
- dropped.
998
+ Drop privileges to become the specified user, which may be a username or uid. Called for supervisord startup and
999
+ when spawning subprocesses. Returns None on success or a string error message if privileges could not be dropped.
997
1000
  """
998
1001
 
999
1002
  if user is None:
@@ -1017,9 +1020,8 @@ def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
1017
1020
  current_uid = os.getuid()
1018
1021
 
1019
1022
  if current_uid == uid:
1020
- # do nothing and return successfully if the uid is already the current one. this allows a supervisord
1021
- # running as an unprivileged user "foo" to start a process where the config has "user=foo" (same user) in
1022
- # it.
1023
+ # do nothing and return successfully if the uid is already the current one. this allows a supervisord running as
1024
+ # an unprivileged user "foo" to start a process where the config has "user=foo" (same user) in it.
1023
1025
  return None
1024
1026
 
1025
1027
  if current_uid != 0:
@@ -1079,6 +1081,17 @@ class ProcessState(enum.IntEnum):
1079
1081
  return self in SIGNALABLE_STATES
1080
1082
 
1081
1083
 
1084
+ # http://supervisord.org/subprocess.html
1085
+ STATE_TRANSITIONS = {
1086
+ ProcessState.STOPPED: (ProcessState.STARTING,),
1087
+ ProcessState.STARTING: (ProcessState.RUNNING, ProcessState.BACKOFF, ProcessState.STOPPING),
1088
+ ProcessState.RUNNING: (ProcessState.STOPPING, ProcessState.EXITED),
1089
+ ProcessState.BACKOFF: (ProcessState.STARTING, ProcessState.FATAL),
1090
+ ProcessState.STOPPING: (ProcessState.STOPPED,),
1091
+ ProcessState.EXITED: (ProcessState.STARTING,),
1092
+ ProcessState.FATAL: (ProcessState.STARTING,),
1093
+ }
1094
+
1082
1095
  STOPPED_STATES = (
1083
1096
  ProcessState.STOPPED,
1084
1097
  ProcessState.EXITED,
@@ -1373,8 +1386,8 @@ def strip_escapes(s: bytes) -> bytes:
1373
1386
 
1374
1387
 
1375
1388
  class SuffixMultiplier:
1376
- # d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches
1377
- # are case insensitive. Return values are in the fundamental unit.
1389
+ # d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches are
1390
+ # case insensitive. Return values are in the fundamental unit.
1378
1391
  def __init__(self, d, default=1):
1379
1392
  super().__init__()
1380
1393
  self._d = d
@@ -1578,16 +1591,16 @@ class FdIoPoller(abc.ABC):
1578
1591
  def register_readable(self, fd: int) -> bool:
1579
1592
  if fd in self._readable:
1580
1593
  return False
1581
- self._readable.add(fd)
1582
1594
  self._register_readable(fd)
1595
+ self._readable.add(fd)
1583
1596
  return True
1584
1597
 
1585
1598
  @ta.final
1586
1599
  def register_writable(self, fd: int) -> bool:
1587
1600
  if fd in self._writable:
1588
1601
  return False
1589
- self._writable.add(fd)
1590
1602
  self._register_writable(fd)
1603
+ self._writable.add(fd)
1591
1604
  return True
1592
1605
 
1593
1606
  @ta.final
@@ -1691,24 +1704,24 @@ if hasattr(select, 'poll'):
1691
1704
 
1692
1705
  #
1693
1706
 
1694
- _READ = select.POLLIN | select.POLLPRI | select.POLLHUP
1695
- _WRITE = select.POLLOUT
1696
-
1697
1707
  def _register_readable(self, fd: int) -> None:
1698
- self._update_registration(fd)
1708
+ self._update_registration(fd, r=True, w=fd in self._writable)
1699
1709
 
1700
1710
  def _register_writable(self, fd: int) -> None:
1701
- self._update_registration(fd)
1711
+ self._update_registration(fd, r=fd in self._readable, w=True)
1702
1712
 
1703
1713
  def _unregister_readable(self, fd: int) -> None:
1704
- self._update_registration(fd)
1714
+ self._update_registration(fd, r=False, w=False)
1705
1715
 
1706
1716
  def _unregister_writable(self, fd: int) -> None:
1707
- self._update_registration(fd)
1717
+ self._update_registration(fd, r=fd in self._readable, w=False)
1708
1718
 
1709
- def _update_registration(self, fd: int) -> None:
1710
- r = fd in self._readable
1711
- w = fd in self._writable
1719
+ #
1720
+
1721
+ _READ = select.POLLIN | select.POLLPRI | select.POLLHUP
1722
+ _WRITE = select.POLLOUT
1723
+
1724
+ def _update_registration(self, fd: int, *, r: bool, w: bool) -> None:
1712
1725
  if r or w:
1713
1726
  self._poller.register(fd, (self._READ if r else 0) | (self._WRITE if w else 0))
1714
1727
  else:
@@ -2121,7 +2134,7 @@ class EventCallbacks:
2121
2134
 
2122
2135
 
2123
2136
  class ProcessLogEvent(Event, abc.ABC):
2124
- channel: ta.Optional[str] = None
2137
+ channel: ta.ClassVar[ProcessOutputChannel]
2125
2138
 
2126
2139
  def __init__(self, process, pid, data):
2127
2140
  super().__init__()
@@ -2146,7 +2159,7 @@ class ProcessCommunicationEvent(Event, abc.ABC):
2146
2159
  BEGIN_TOKEN = b'<!--XSUPERVISOR:BEGIN-->'
2147
2160
  END_TOKEN = b'<!--XSUPERVISOR:END-->'
2148
2161
 
2149
- channel: ta.ClassVar[str]
2162
+ channel: ta.ClassVar[ProcessOutputChannel]
2150
2163
 
2151
2164
  def __init__(self, process, pid, data):
2152
2165
  super().__init__()
@@ -2430,7 +2443,7 @@ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
2430
2443
  Decode the status returned by wait() or waitpid().
2431
2444
 
2432
2445
  Return a tuple (exitstatus, message) where exitstatus is the exit status, or -1 if the process was killed by a
2433
- signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
2446
+ signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
2434
2447
  """
2435
2448
 
2436
2449
  if os.WIFEXITED(sts):
@@ -2518,6 +2531,64 @@ def get_user(name: str) -> User:
2518
2531
  )
2519
2532
 
2520
2533
 
2534
+ ########################################
2535
+ # ../../../omlish/lite/contextmanagers.py
2536
+
2537
+
2538
+ ##
2539
+
2540
+
2541
+ class ExitStacked:
2542
+ _exit_stack: ta.Optional[contextlib.ExitStack] = None
2543
+
2544
+ def __enter__(self: ExitStackedT) -> ExitStackedT:
2545
+ check_state(self._exit_stack is None)
2546
+ es = self._exit_stack = contextlib.ExitStack()
2547
+ es.__enter__()
2548
+ return self
2549
+
2550
+ def __exit__(self, exc_type, exc_val, exc_tb):
2551
+ if (es := self._exit_stack) is None:
2552
+ return None
2553
+ self._exit_contexts()
2554
+ return es.__exit__(exc_type, exc_val, exc_tb)
2555
+
2556
+ def _exit_contexts(self) -> None:
2557
+ pass
2558
+
2559
+ def _enter_context(self, cm: ta.ContextManager[T]) -> T:
2560
+ es = check_not_none(self._exit_stack)
2561
+ return es.enter_context(cm)
2562
+
2563
+
2564
+ ##
2565
+
2566
+
2567
+ @contextlib.contextmanager
2568
+ def defer(fn: ta.Callable) -> ta.Generator[ta.Callable, None, None]:
2569
+ try:
2570
+ yield fn
2571
+ finally:
2572
+ fn()
2573
+
2574
+
2575
+ @contextlib.contextmanager
2576
+ def attr_setting(obj, attr, val, *, default=None): # noqa
2577
+ not_set = object()
2578
+ orig = getattr(obj, attr, not_set)
2579
+ try:
2580
+ setattr(obj, attr, val)
2581
+ if orig is not not_set:
2582
+ yield orig
2583
+ else:
2584
+ yield default
2585
+ finally:
2586
+ if orig is not_set:
2587
+ delattr(obj, attr)
2588
+ else:
2589
+ setattr(obj, attr, orig)
2590
+
2591
+
2521
2592
  ########################################
2522
2593
  # ../../../omlish/lite/fdio/handlers.py
2523
2594
 
@@ -2626,19 +2697,40 @@ if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
2626
2697
  #
2627
2698
 
2628
2699
  def _register_readable(self, fd: int) -> None:
2629
- self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
2700
+ self._update_registration(fd, 'read', 'add')
2630
2701
 
2631
2702
  def _register_writable(self, fd: int) -> None:
2632
- self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
2703
+ self._update_registration(fd, 'write', 'add')
2633
2704
 
2634
2705
  def _unregister_readable(self, fd: int) -> None:
2635
- self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
2706
+ self._update_registration(fd, 'read', 'del')
2636
2707
 
2637
2708
  def _unregister_writable(self, fd: int) -> None:
2638
- self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
2709
+ self._update_registration(fd, 'write', 'del')
2710
+
2711
+ #
2639
2712
 
2640
- def _control(self, fd: int, filter: int, flags: int) -> None: # noqa
2641
- ke = select.kevent(fd, filter=filter, flags=flags)
2713
+ _CONTROL_FILTER_BY_READ_OR_WRITE: ta.ClassVar[ta.Mapping[ta.Literal['read', 'write'], int]] = {
2714
+ 'read': select.KQ_FILTER_READ,
2715
+ 'write': select.KQ_FILTER_WRITE,
2716
+ }
2717
+
2718
+ _CONTROL_FLAGS_BY_ADD_OR_DEL: ta.ClassVar[ta.Mapping[ta.Literal['add', 'del'], int]] = {
2719
+ 'add': select.KQ_EV_ADD,
2720
+ 'del': select.KQ_EV_DELETE,
2721
+ }
2722
+
2723
+ def _update_registration(
2724
+ self,
2725
+ fd: int,
2726
+ read_or_write: ta.Literal['read', 'write'],
2727
+ add_or_del: ta.Literal['add', 'del'],
2728
+ ) -> None: # noqa
2729
+ ke = select.kevent(
2730
+ fd,
2731
+ filter=self._CONTROL_FILTER_BY_READ_OR_WRITE[read_or_write],
2732
+ flags=self._CONTROL_FLAGS_BY_ADD_OR_DEL[add_or_del],
2733
+ )
2642
2734
  kq = self._get_kqueue()
2643
2735
  try:
2644
2736
  kq.control([ke], 0)
@@ -2649,7 +2741,8 @@ if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
2649
2741
  pass
2650
2742
  elif exc.errno == errno.ENOENT:
2651
2743
  # Can happen when trying to remove an already closed socket
2652
- pass
2744
+ if add_or_del == 'add':
2745
+ raise
2653
2746
  else:
2654
2747
  raise
2655
2748
 
@@ -5068,8 +5161,8 @@ class ProcessPipes:
5068
5161
 
5069
5162
  def make_process_pipes(stderr=True) -> ProcessPipes:
5070
5163
  """
5071
- Create pipes for parent to child stdin/stdout/stderr communications. Open fd in non-blocking mode so we can
5072
- read them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
5164
+ Create pipes for parent to child stdin/stdout/stderr communications. Open fd in non-blocking mode so we can read
5165
+ them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
5073
5166
  """
5074
5167
 
5075
5168
  pipes: ta.Dict[str, ta.Optional[Fd]] = {
@@ -5219,32 +5312,32 @@ class ProcessConfig:
5219
5312
  umask: ta.Optional[int] = None
5220
5313
  priority: int = 999
5221
5314
 
5222
- autostart: bool = True
5223
- autorestart: str = 'unexpected'
5315
+ auto_start: bool = True
5316
+ auto_restart: str = 'unexpected'
5224
5317
 
5225
- startsecs: int = 1
5226
- startretries: int = 3
5318
+ start_secs: int = 1
5319
+ start_retries: int = 3
5227
5320
 
5228
- numprocs: int = 1
5229
- numprocs_start: int = 0
5321
+ num_procs: int = 1
5322
+ num_procs_start: int = 0
5230
5323
 
5231
5324
  @dc.dataclass(frozen=True)
5232
5325
  class Log:
5233
5326
  file: ta.Optional[str] = None
5234
- capture_maxbytes: ta.Optional[int] = None
5327
+ capture_max_bytes: ta.Optional[int] = None
5235
5328
  events_enabled: bool = False
5236
5329
  syslog: bool = False
5237
5330
  backups: ta.Optional[int] = None
5238
- maxbytes: ta.Optional[int] = None
5331
+ max_bytes: ta.Optional[int] = None
5239
5332
 
5240
5333
  stdout: Log = Log()
5241
5334
  stderr: Log = Log()
5242
5335
 
5243
- stopsignal: int = signal.SIGTERM
5244
- stopwaitsecs: int = 10
5245
- stopasgroup: bool = False
5336
+ stop_signal: int = signal.SIGTERM
5337
+ stop_wait_secs: int = 10
5338
+ stop_as_group: bool = False
5246
5339
 
5247
- killasgroup: bool = False
5340
+ kill_as_group: bool = False
5248
5341
 
5249
5342
  exitcodes: ta.Sequence[int] = (0,)
5250
5343
 
@@ -5269,14 +5362,14 @@ class ServerConfig:
5269
5362
  umask: int = 0o22
5270
5363
  directory: ta.Optional[str] = None
5271
5364
  logfile: str = 'supervisord.log'
5272
- logfile_maxbytes: int = 50 * 1024 * 1024
5365
+ logfile_max_bytes: int = 50 * 1024 * 1024
5273
5366
  logfile_backups: int = 10
5274
5367
  loglevel: int = logging.INFO
5275
5368
  pidfile: str = 'supervisord.pid'
5276
5369
  identifier: str = 'supervisor'
5277
5370
  child_logdir: str = '/dev/null'
5278
- minfds: int = 1024
5279
- minprocs: int = 200
5371
+ min_fds: int = 1024
5372
+ min_procs: int = 200
5280
5373
  nocleanup: bool = False
5281
5374
  strip_ansi: bool = False
5282
5375
  silent: bool = False
@@ -5289,7 +5382,7 @@ class ServerConfig:
5289
5382
  umask: ta.Union[int, str] = 0o22,
5290
5383
  directory: ta.Optional[str] = None,
5291
5384
  logfile: str = 'supervisord.log',
5292
- logfile_maxbytes: ta.Union[int, str] = 50 * 1024 * 1024,
5385
+ logfile_max_bytes: ta.Union[int, str] = 50 * 1024 * 1024,
5293
5386
  loglevel: ta.Union[int, str] = logging.INFO,
5294
5387
  pidfile: str = 'supervisord.pid',
5295
5388
  child_logdir: ta.Optional[str] = None,
@@ -5299,7 +5392,7 @@ class ServerConfig:
5299
5392
  umask=parse_octal(umask),
5300
5393
  directory=check_existing_dir(directory) if directory is not None else None,
5301
5394
  logfile=check_path_with_existing_dir(logfile),
5302
- logfile_maxbytes=parse_bytes_size(logfile_maxbytes),
5395
+ logfile_max_bytes=parse_bytes_size(logfile_max_bytes),
5303
5396
  loglevel=parse_logging_level(loglevel),
5304
5397
  pidfile=check_path_with_existing_dir(pidfile),
5305
5398
  child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
@@ -5953,7 +6046,7 @@ class HasDispatchers(abc.ABC):
5953
6046
  class ProcessDispatcher(FdIoHandler, abc.ABC):
5954
6047
  @property
5955
6048
  @abc.abstractmethod
5956
- def channel(self) -> str:
6049
+ def channel(self) -> ProcessOutputChannel:
5957
6050
  raise NotImplementedError
5958
6051
 
5959
6052
  @property
@@ -6242,7 +6335,7 @@ class BaseProcessDispatcherImpl(ProcessDispatcher, abc.ABC):
6242
6335
  def __init__(
6243
6336
  self,
6244
6337
  process: Process,
6245
- channel: str,
6338
+ channel: ProcessOutputChannel,
6246
6339
  fd: Fd,
6247
6340
  *,
6248
6341
  event_callbacks: EventCallbacks,
@@ -6270,7 +6363,7 @@ class BaseProcessDispatcherImpl(ProcessDispatcher, abc.ABC):
6270
6363
  return self._process
6271
6364
 
6272
6365
  @property
6273
- def channel(self) -> str:
6366
+ def channel(self) -> ProcessOutputChannel:
6274
6367
  return self._channel
6275
6368
 
6276
6369
  def fd(self) -> Fd:
@@ -6360,7 +6453,7 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
6360
6453
  channel = self._channel # noqa
6361
6454
 
6362
6455
  logfile = self._lc.file
6363
- maxbytes = self._lc.maxbytes # noqa
6456
+ max_bytes = self._lc.max_bytes # noqa
6364
6457
  backups = self._lc.backups # noqa
6365
6458
  to_syslog = self._lc.syslog
6366
6459
 
@@ -6372,8 +6465,8 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
6372
6465
  # self.normal_log,
6373
6466
  # filename=logfile,
6374
6467
  # fmt='%(message)s',
6375
- # rotating=bool(maxbytes), # optimization
6376
- # maxbytes=maxbytes,
6468
+ # rotating=bool(max_bytes), # optimization
6469
+ # max_bytes=max_bytes,
6377
6470
  # backups=backups,
6378
6471
  # )
6379
6472
 
@@ -6385,17 +6478,17 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
6385
6478
 
6386
6479
  def _init_capture_log(self) -> None:
6387
6480
  """
6388
- Configure the capture log for this process. This log is used to temporarily capture output when special output
6481
+ Configure the capture log for this process. This log is used to temporarily capture output when special output
6389
6482
  is detected. Sets self.capture_log if capturing is enabled.
6390
6483
  """
6391
6484
 
6392
- capture_maxbytes = self._lc.capture_maxbytes
6393
- if capture_maxbytes:
6485
+ capture_max_bytes = self._lc.capture_max_bytes
6486
+ if capture_max_bytes:
6394
6487
  self._capture_log = logging.getLogger(__name__)
6395
6488
  # loggers.handle_boundIO(
6396
6489
  # self._capture_log,
6397
6490
  # fmt='%(message)s',
6398
- # maxbytes=capture_maxbytes,
6491
+ # max_bytes=capture_max_bytes,
6399
6492
  # )
6400
6493
 
6401
6494
  def remove_logs(self) -> None:
@@ -6508,7 +6601,7 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
6508
6601
  self._output_buffer += data
6509
6602
  self.record_output()
6510
6603
  if not data:
6511
- # if we get no data back from the pipe, it means that the child process has ended. See
6604
+ # if we get no data back from the pipe, it means that the child process has ended. See
6512
6605
  # mail.python.org/pipermail/python-dev/2004-August/046850.html
6513
6606
  self.close()
6514
6607
 
@@ -6517,7 +6610,7 @@ class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatch
6517
6610
  def __init__(
6518
6611
  self,
6519
6612
  process: Process,
6520
- channel: str,
6613
+ channel: ProcessOutputChannel,
6521
6614
  fd: Fd,
6522
6615
  *,
6523
6616
  event_callbacks: EventCallbacks,
@@ -6717,21 +6810,21 @@ class SupervisorSetupImpl(SupervisorSetup):
6717
6810
  def _cleanup_fds(self) -> None:
6718
6811
  # try to close any leaked file descriptors (for reload)
6719
6812
  start = 5
6720
- os.closerange(start, self._config.minfds)
6813
+ os.closerange(start, self._config.min_fds)
6721
6814
 
6722
6815
  #
6723
6816
 
6724
6817
  def _set_uid_or_exit(self) -> None:
6725
6818
  """
6726
- Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
6819
+ Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
6727
6820
  process via usage() if privileges could not be dropped.
6728
6821
  """
6729
6822
 
6730
6823
  if self._user is None:
6731
6824
  if os.getuid() == 0:
6732
6825
  warnings.warn(
6733
- 'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
6734
- 'config file. If you intend to run as root, you can set user=root in the config file to avoid '
6826
+ 'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
6827
+ 'config file. If you intend to run as root, you can set user=root in the config file to avoid '
6735
6828
  'this message.',
6736
6829
  )
6737
6830
  else:
@@ -6745,8 +6838,8 @@ class SupervisorSetupImpl(SupervisorSetup):
6745
6838
 
6746
6839
  def _set_rlimits_or_exit(self) -> None:
6747
6840
  """
6748
- Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits
6749
- the process via usage() if any rlimits could not be set.
6841
+ Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits the
6842
+ process via usage() if any rlimits could not be set.
6750
6843
  """
6751
6844
 
6752
6845
  limits = []
@@ -6755,12 +6848,12 @@ class SupervisorSetupImpl(SupervisorSetup):
6755
6848
  limits.append({
6756
6849
  'msg': (
6757
6850
  'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
6758
- '"minfds" command-line argument or config file setting. The current environment will only allow '
6759
- 'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
6760
- 'your environment (see README.rst) or lower the minfds setting in the config file to allow the '
6851
+ '"min_fds" command-line argument or config file setting. The current environment will only allow '
6852
+ 'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
6853
+ 'your environment (see README.rst) or lower the min_fds setting in the config file to allow the '
6761
6854
  'process to start.'
6762
6855
  ),
6763
- 'min': self._config.minfds,
6856
+ 'min': self._config.min_fds,
6764
6857
  'resource': resource.RLIMIT_NOFILE,
6765
6858
  'name': 'RLIMIT_NOFILE',
6766
6859
  })
@@ -6770,11 +6863,11 @@ class SupervisorSetupImpl(SupervisorSetup):
6770
6863
  'msg': (
6771
6864
  'The minimum number of available processes required to run this program is %(min_limit)s as per '
6772
6865
  'the "minprocs" command-line argument or config file setting. The current environment will only '
6773
- 'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
6866
+ 'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
6774
6867
  'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
6775
6868
  'program to start.'
6776
6869
  ),
6777
- 'min': self._config.minprocs,
6870
+ 'min': self._config.min_procs,
6778
6871
  'resource': resource.RLIMIT_NPROC,
6779
6872
  'name': 'RLIMIT_NPROC',
6780
6873
  })
@@ -6860,11 +6953,11 @@ class SupervisorSetupImpl(SupervisorSetup):
6860
6953
  dl.after_daemonize()
6861
6954
 
6862
6955
  def _do_daemonize(self) -> None:
6863
- # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
6864
- # our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
6956
+ # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
6957
+ # our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
6865
6958
  # our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
6866
6959
  # terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
6867
- # use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
6960
+ # use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
6868
6961
  # session and process group and setting itself up as a new session leader.
6869
6962
  #
6870
6963
  # Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
@@ -6896,7 +6989,7 @@ class SupervisorSetupImpl(SupervisorSetup):
6896
6989
  os.dup2(2, os.open('/dev/null', os.O_WRONLY))
6897
6990
 
6898
6991
  # XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
6899
- # file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
6992
+ # file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
6900
6993
  # again after the setsid() call, for obscure SVR4 reasons.
6901
6994
  os.setsid()
6902
6995
  os.umask(self._config.umask)
@@ -7026,9 +7119,9 @@ class IoManager(HasDispatchers):
7026
7119
  )
7027
7120
 
7028
7121
  timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
7029
- log.info(f'Polling: {timeout=}') # noqa
7122
+
7030
7123
  polled = self._poller.poll(timeout)
7031
- log.info(f'Polled: {polled=}') # noqa
7124
+
7032
7125
  if polled.msg is not None:
7033
7126
  log.error(polled.msg)
7034
7127
  if polled.exc is not None:
@@ -7153,6 +7246,8 @@ class HttpServer(HasDispatchers):
7153
7246
  self,
7154
7247
  handler: Handler,
7155
7248
  addr: Address = Address(('localhost', 8000)),
7249
+ *,
7250
+ exit_stack: contextlib.ExitStack,
7156
7251
  ) -> None:
7157
7252
  super().__init__()
7158
7253
 
@@ -7163,6 +7258,8 @@ class HttpServer(HasDispatchers):
7163
7258
 
7164
7259
  self._conns: ta.List[CoroHttpServerConnectionFdIoHandler] = []
7165
7260
 
7261
+ exit_stack.enter_context(defer(self._server.close)) # noqa
7262
+
7166
7263
  def get_dispatchers(self) -> Dispatchers:
7167
7264
  l = []
7168
7265
  for c in self._conns:
@@ -7207,6 +7304,7 @@ class SupervisorHttpHandler:
7207
7304
  'processes': {
7208
7305
  p.name: {
7209
7306
  'pid': p.pid,
7307
+ 'state': p.state.name,
7210
7308
  }
7211
7309
  for p in g
7212
7310
  },
@@ -7275,7 +7373,7 @@ class ProcessImpl(Process):
7275
7373
 
7276
7374
  self._killing = False # true if we are trying to kill this process
7277
7375
 
7278
- self._backoff = 0 # backoff counter (to startretries)
7376
+ self._backoff = 0 # backoff counter (to start_retries)
7279
7377
 
7280
7378
  self._exitstatus: ta.Optional[Rc] = None # status attached to dead process by finish()
7281
7379
  self._spawn_err: ta.Optional[str] = None # error message attached by spawn() if any
@@ -7352,7 +7450,7 @@ class ProcessImpl(Process):
7352
7450
  self._pipes = sp.pipes
7353
7451
  self._dispatchers = sp.dispatchers
7354
7452
 
7355
- self._delay = time.time() + self.config.startsecs
7453
+ self._delay = time.time() + self.config.start_secs
7356
7454
 
7357
7455
  return sp.pid
7358
7456
 
@@ -7410,17 +7508,17 @@ class ProcessImpl(Process):
7410
7508
 
7411
7509
  if self._state == ProcessState.STARTING:
7412
7510
  self._last_start = min(test_time, self._last_start)
7413
- if self._delay > 0 and test_time < (self._delay - self._config.startsecs):
7414
- self._delay = test_time + self._config.startsecs
7511
+ if self._delay > 0 and test_time < (self._delay - self._config.start_secs):
7512
+ self._delay = test_time + self._config.start_secs
7415
7513
 
7416
7514
  elif self._state == ProcessState.RUNNING:
7417
- if test_time > self._last_start and test_time < (self._last_start + self._config.startsecs):
7418
- self._last_start = test_time - self._config.startsecs
7515
+ if test_time > self._last_start and test_time < (self._last_start + self._config.start_secs):
7516
+ self._last_start = test_time - self._config.start_secs
7419
7517
 
7420
7518
  elif self._state == ProcessState.STOPPING:
7421
7519
  self._last_stop_report = min(test_time, self._last_stop_report)
7422
- if self._delay > 0 and test_time < (self._delay - self._config.stopwaitsecs):
7423
- self._delay = test_time + self._config.stopwaitsecs
7520
+ if self._delay > 0 and test_time < (self._delay - self._config.stop_wait_secs):
7521
+ self._delay = test_time + self._config.stop_wait_secs
7424
7522
 
7425
7523
  elif self._state == ProcessState.BACKOFF:
7426
7524
  if self._delay > 0 and test_time < (self._delay - self._backoff):
@@ -7429,7 +7527,7 @@ class ProcessImpl(Process):
7429
7527
  def stop(self) -> ta.Optional[str]:
7430
7528
  self._administrative_stop = True
7431
7529
  self._last_stop_report = 0
7432
- return self.kill(self._config.stopsignal)
7530
+ return self.kill(self._config.stop_signal)
7433
7531
 
7434
7532
  def stop_report(self) -> None:
7435
7533
  """Log a 'waiting for x to stop' message with throttling."""
@@ -7452,7 +7550,7 @@ class ProcessImpl(Process):
7452
7550
 
7453
7551
  def kill(self, sig: int) -> ta.Optional[str]:
7454
7552
  """
7455
- Send a signal to the subprocess with the intention to kill it (to make it exit). This may or may not actually
7553
+ Send a signal to the subprocess with the intention to kill it (to make it exit). This may or may not actually
7456
7554
  kill it.
7457
7555
 
7458
7556
  Return None if the signal was sent, or an error message string if an error occurred or if the subprocess is not
@@ -7460,8 +7558,8 @@ class ProcessImpl(Process):
7460
7558
  """
7461
7559
  now = time.time()
7462
7560
 
7463
- # If the process is in BACKOFF and we want to stop or kill it, then BACKOFF -> STOPPED. This is needed because
7464
- # if startretries is a large number and the process isn't starting successfully, the stop request would be
7561
+ # If the process is in BACKOFF and we want to stop or kill it, then BACKOFF -> STOPPED. This is needed because
7562
+ # if start_retries is a large number and the process isn't starting successfully, the stop request would be
7465
7563
  # blocked for a long time waiting for the retries.
7466
7564
  if self._state == ProcessState.BACKOFF:
7467
7565
  log.debug('Attempted to kill %s, which is in BACKOFF state.', self.name)
@@ -7476,25 +7574,25 @@ class ProcessImpl(Process):
7476
7574
 
7477
7575
  # If we're in the stopping state, then we've already sent the stop signal and this is the kill signal
7478
7576
  if self._state == ProcessState.STOPPING:
7479
- killasgroup = self._config.killasgroup
7577
+ kill_as_group = self._config.kill_as_group
7480
7578
  else:
7481
- killasgroup = self._config.stopasgroup
7579
+ kill_as_group = self._config.stop_as_group
7482
7580
 
7483
7581
  as_group = ''
7484
- if killasgroup:
7582
+ if kill_as_group:
7485
7583
  as_group = 'process group '
7486
7584
 
7487
7585
  log.debug('killing %s (pid %s) %s with signal %s', self.name, self.pid, as_group, sig_name(sig))
7488
7586
 
7489
7587
  # RUNNING/STARTING/STOPPING -> STOPPING
7490
7588
  self._killing = True
7491
- self._delay = now + self._config.stopwaitsecs
7492
- # we will already be in the STOPPING state if we're doing a SIGKILL as a result of overrunning stopwaitsecs
7589
+ self._delay = now + self._config.stop_wait_secs
7590
+ # we will already be in the STOPPING state if we're doing a SIGKILL as a result of overrunning stop_wait_secs
7493
7591
  self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
7494
7592
  self.change_state(ProcessState.STOPPING)
7495
7593
 
7496
7594
  kpid = int(self.pid)
7497
- if killasgroup:
7595
+ if kill_as_group:
7498
7596
  # send to the whole process group instead
7499
7597
  kpid = -kpid
7500
7598
 
@@ -7504,7 +7602,7 @@ class ProcessImpl(Process):
7504
7602
  except OSError as exc:
7505
7603
  if exc.errno == errno.ESRCH:
7506
7604
  log.debug('unable to signal %s (pid %s), it probably just exited on its own: %s', self.name, self.pid, str(exc)) # noqa
7507
- # we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
7605
+ # we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
7508
7606
  # processing.
7509
7607
  return None
7510
7608
  raise
@@ -7547,7 +7645,7 @@ class ProcessImpl(Process):
7547
7645
  self.pid,
7548
7646
  str(exc),
7549
7647
  )
7550
- # we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
7648
+ # we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
7551
7649
  # processing.
7552
7650
  return None
7553
7651
  raise
@@ -7574,8 +7672,7 @@ class ProcessImpl(Process):
7574
7672
  self._last_stop = now
7575
7673
 
7576
7674
  if now > self._last_start:
7577
- log.info(f'{now - self._last_start=}') # noqa
7578
- too_quickly = now - self._last_start < self._config.startsecs
7675
+ too_quickly = now - self._last_start < self._config.start_secs
7579
7676
  else:
7580
7677
  too_quickly = False
7581
7678
  log.warning(
@@ -7647,8 +7744,8 @@ class ProcessImpl(Process):
7647
7744
  if self._supervisor_states.state > SupervisorState.RESTARTING:
7648
7745
  # dont start any processes if supervisor is shutting down
7649
7746
  if state == ProcessState.EXITED:
7650
- if self._config.autorestart:
7651
- if self._config.autorestart is RestartUnconditionally:
7747
+ if self._config.auto_restart:
7748
+ if self._config.auto_restart is RestartUnconditionally:
7652
7749
  # EXITED -> STARTING
7653
7750
  self.spawn()
7654
7751
  elif self._exitstatus not in self._config.exitcodes:
@@ -7656,29 +7753,29 @@ class ProcessImpl(Process):
7656
7753
  self.spawn()
7657
7754
 
7658
7755
  elif state == ProcessState.STOPPED and not self._last_start:
7659
- if self._config.autostart:
7756
+ if self._config.auto_start:
7660
7757
  # STOPPED -> STARTING
7661
7758
  self.spawn()
7662
7759
 
7663
7760
  elif state == ProcessState.BACKOFF:
7664
- if self._backoff <= self._config.startretries:
7761
+ if self._backoff <= self._config.start_retries:
7665
7762
  if now > self._delay:
7666
7763
  # BACKOFF -> STARTING
7667
7764
  self.spawn()
7668
7765
 
7669
7766
  if state == ProcessState.STARTING:
7670
- if now - self._last_start > self._config.startsecs:
7767
+ if now - self._last_start > self._config.start_secs:
7671
7768
  # STARTING -> RUNNING if the proc has started successfully and it has stayed up for at least
7672
- # proc.config.startsecs,
7769
+ # proc.config.start_secs,
7673
7770
  self._delay = 0
7674
7771
  self._backoff = 0
7675
7772
  self.check_in_state(ProcessState.STARTING)
7676
7773
  self.change_state(ProcessState.RUNNING)
7677
- msg = ('entered RUNNING state, process has stayed up for > than %s seconds (startsecs)' % self._config.startsecs) # noqa
7774
+ msg = ('entered RUNNING state, process has stayed up for > than %s seconds (start_secs)' % self._config.start_secs) # noqa
7678
7775
  log.info('success: %s %s', self.name, msg)
7679
7776
 
7680
7777
  if state == ProcessState.BACKOFF:
7681
- if self._backoff > self._config.startretries:
7778
+ if self._backoff > self._config.start_retries:
7682
7779
  # BACKOFF -> FATAL if the proc has exceeded its number of retries
7683
7780
  self.give_up()
7684
7781
  msg = ('entered FATAL state, too many start retries too quickly')
@@ -7687,7 +7784,7 @@ class ProcessImpl(Process):
7687
7784
  elif state == ProcessState.STOPPING:
7688
7785
  time_left = self._delay - now
7689
7786
  if time_left <= 0:
7690
- # kill processes which are taking too long to stop with a final sigkill. if this doesn't kill it, the
7787
+ # kill processes which are taking too long to stop with a final sigkill. if this doesn't kill it, the
7691
7788
  # process will be stuck in the STOPPING state forever.
7692
7789
  log.warning('killing \'%s\' (%s) with SIGKILL', self.name, self.pid)
7693
7790
  self.kill(signal.SIGKILL)
@@ -8039,7 +8136,7 @@ class ProcessSpawningImpl(ProcessSpawning):
8039
8136
  else:
8040
8137
  os.dup2(check_not_none(pipes.child_stderr), 2)
8041
8138
 
8042
- for i in range(3, self._server_config.minfds):
8139
+ for i in range(3, self._server_config.min_fds):
8043
8140
  if i in self._inherited_fds:
8044
8141
  continue
8045
8142
  close_fd(Fd(i))
@@ -8229,11 +8326,29 @@ class Supervisor:
8229
8326
  #
8230
8327
 
8231
8328
  def _run_once(self) -> None:
8232
- now = time.time()
8233
- self._poll()
8234
- log.info(f'Poll took {time.time() - now}') # noqa
8329
+ if self._states.state < SupervisorState.RUNNING:
8330
+ if not self._stopping:
8331
+ # first time, set the stopping flag, do a notification and set stop_groups
8332
+ self._stopping = True
8333
+ self._stop_groups = sorted(self._process_groups)
8334
+ self._event_callbacks.notify(SupervisorStoppingEvent())
8335
+
8336
+ self._ordered_stop_groups_phase_1()
8337
+
8338
+ if not self.shutdown_report():
8339
+ # if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
8340
+ raise ExitNow
8341
+
8342
+ self._io.poll()
8343
+
8344
+ for group in sorted(self._process_groups):
8345
+ for process in group:
8346
+ process.transition()
8347
+
8235
8348
  self._reap()
8349
+
8236
8350
  self._signal_handler.handle_signals()
8351
+
8237
8352
  self._tick()
8238
8353
 
8239
8354
  if self._states.state < SupervisorState.RUNNING:
@@ -8255,38 +8370,18 @@ class Supervisor:
8255
8370
  # down, so push it back on to the end of the stop group queue
8256
8371
  self._stop_groups.append(group)
8257
8372
 
8258
- def _poll(self) -> None:
8259
- sorted_groups = list(self._process_groups)
8260
- sorted_groups.sort()
8261
-
8262
- if self._states.state < SupervisorState.RUNNING:
8263
- if not self._stopping:
8264
- # first time, set the stopping flag, do a notification and set stop_groups
8265
- self._stopping = True
8266
- self._stop_groups = sorted_groups[:]
8267
- self._event_callbacks.notify(SupervisorStoppingEvent())
8268
-
8269
- self._ordered_stop_groups_phase_1()
8270
-
8271
- if not self.shutdown_report():
8272
- # if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
8273
- raise ExitNow
8274
-
8275
- self._io.poll()
8276
-
8277
- for group in sorted_groups:
8278
- for process in group:
8279
- process.transition()
8373
+ #
8280
8374
 
8281
8375
  def _reap(self, *, once: bool = False, depth: int = 0) -> None:
8282
8376
  if depth >= 100:
8283
8377
  return
8284
8378
 
8285
8379
  wp = waitpid()
8286
- log.info(f'Waited pid: {wp}') # noqa
8380
+
8287
8381
  if wp is None or not wp.pid:
8288
8382
  return
8289
8383
 
8384
+ log.info(f'Waited pid: {wp}') # noqa
8290
8385
  process = self._pid_history.get(wp.pid, None)
8291
8386
  if process is None:
8292
8387
  _, msg = decode_wait_status(wp.sts)
@@ -8299,6 +8394,8 @@ class Supervisor:
8299
8394
  # keep reaping until no more kids to reap, but don't recurse infinitely
8300
8395
  self._reap(once=False, depth=depth + 1)
8301
8396
 
8397
+ #
8398
+
8302
8399
  def _tick(self, now: ta.Optional[float] = None) -> None:
8303
8400
  """Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
8304
8401
 
@@ -8329,7 +8426,7 @@ class WaitedPid(ta.NamedTuple):
8329
8426
 
8330
8427
 
8331
8428
  def waitpid() -> ta.Optional[WaitedPid]:
8332
- # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
8429
+ # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
8333
8430
  # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
8334
8431
  # waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
8335
8432
  # normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
@@ -8364,6 +8461,7 @@ class _FdIoPollerDaemonizeListener(DaemonizeListener):
8364
8461
 
8365
8462
 
8366
8463
  def bind_server(
8464
+ exit_stack: contextlib.ExitStack,
8367
8465
  config: ServerConfig,
8368
8466
  *,
8369
8467
  server_epoch: ta.Optional[ServerEpoch] = None,
@@ -8372,6 +8470,8 @@ def bind_server(
8372
8470
  lst: ta.List[InjectorBindingOrBindings] = [
8373
8471
  inj.bind(config),
8374
8472
 
8473
+ inj.bind(exit_stack),
8474
+
8375
8475
  inj.bind_array(DaemonizeListener),
8376
8476
  inj.bind_array_type(DaemonizeListener, DaemonizeListeners),
8377
8477
 
@@ -8427,8 +8527,10 @@ def bind_server(
8427
8527
  PollFdIoPoller,
8428
8528
  SelectFdIoPoller,
8429
8529
  ]))
8430
- lst.append(inj.bind(poller_impl, key=FdIoPoller, singleton=True))
8431
- inj.bind(_FdIoPollerDaemonizeListener, array=True, singleton=True)
8530
+ lst.extend([
8531
+ inj.bind(poller_impl, key=FdIoPoller, singleton=True),
8532
+ inj.bind(_FdIoPollerDaemonizeListener, array=True, singleton=True),
8533
+ ])
8432
8534
 
8433
8535
  #
8434
8536
 
@@ -8497,18 +8599,20 @@ def main(
8497
8599
  prepare=prepare_server_config,
8498
8600
  )
8499
8601
 
8500
- injector = inj.create_injector(bind_server(
8501
- config,
8502
- server_epoch=ServerEpoch(epoch),
8503
- inherited_fds=inherited_fds,
8504
- ))
8602
+ with contextlib.ExitStack() as es:
8603
+ injector = inj.create_injector(bind_server(
8604
+ es,
8605
+ config,
8606
+ server_epoch=ServerEpoch(epoch),
8607
+ inherited_fds=inherited_fds,
8608
+ ))
8505
8609
 
8506
- supervisor = injector[Supervisor]
8610
+ supervisor = injector[Supervisor]
8507
8611
 
8508
- try:
8509
- supervisor.main()
8510
- except ExitNow:
8511
- pass
8612
+ try:
8613
+ supervisor.main()
8614
+ except ExitNow:
8615
+ pass
8512
8616
 
8513
8617
  if supervisor.state < SupervisorState.RESTARTING:
8514
8618
  break