ominfra 0.0.0.dev129__py3-none-any.whl → 0.0.0.dev131__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ominfra/deploy/_executor.py +16 -1
- ominfra/deploy/poly/_main.py +2 -2
- ominfra/pyremote/_runcommands.py +16 -1
- ominfra/scripts/journald2aws.py +117 -5
- ominfra/scripts/supervisor.py +1450 -703
- ominfra/supervisor/configs.py +17 -17
- ominfra/supervisor/dispatchers.py +7 -6
- ominfra/supervisor/dispatchersimpl.py +11 -15
- ominfra/supervisor/groups.py +16 -1
- ominfra/supervisor/http.py +130 -0
- ominfra/supervisor/inject.py +44 -5
- ominfra/supervisor/io.py +39 -24
- ominfra/supervisor/pipes.py +2 -2
- ominfra/supervisor/privileges.py +4 -6
- ominfra/supervisor/processimpl.py +33 -34
- ominfra/supervisor/setupimpl.py +16 -16
- ominfra/supervisor/spawningimpl.py +3 -3
- ominfra/supervisor/supervisor.py +6 -3
- ominfra/supervisor/types.py +6 -56
- ominfra/supervisor/utils/os.py +1 -1
- ominfra/supervisor/utils/strings.py +2 -2
- {ominfra-0.0.0.dev129.dist-info → ominfra-0.0.0.dev131.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev129.dist-info → ominfra-0.0.0.dev131.dist-info}/RECORD +27 -27
- ominfra/supervisor/poller.py +0 -240
- {ominfra-0.0.0.dev129.dist-info → ominfra-0.0.0.dev131.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev129.dist-info → ominfra-0.0.0.dev131.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev129.dist-info → ominfra-0.0.0.dev131.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev129.dist-info → ominfra-0.0.0.dev131.dist-info}/top_level.txt +0 -0
ominfra/scripts/supervisor.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3
3
|
# @omlish-lite
|
4
4
|
# @omlish-script
|
5
5
|
# @omlish-amalg-output ../supervisor/main.py
|
6
|
-
# ruff: noqa: N802 UP006 UP007 UP012 UP036
|
6
|
+
# ruff: noqa: N802 U006 UP006 UP007 UP012 UP036
|
7
7
|
# Supervisor is licensed under the following license:
|
8
8
|
#
|
9
9
|
# A copyright notice accompanies this license document that identifies the copyright holders.
|
@@ -90,47 +90,50 @@ if sys.version_info < (3, 8):
|
|
90
90
|
########################################
|
91
91
|
|
92
92
|
|
93
|
-
#
|
93
|
+
# ../../omdev/toml/parser.py
|
94
94
|
TomlParseFloat = ta.Callable[[str], ta.Any]
|
95
95
|
TomlKey = ta.Tuple[str, ...]
|
96
96
|
TomlPos = int # ta.TypeAlias
|
97
97
|
|
98
|
-
#
|
98
|
+
# utils/collections.py
|
99
99
|
K = ta.TypeVar('K')
|
100
100
|
V = ta.TypeVar('V')
|
101
101
|
|
102
|
-
#
|
102
|
+
# ../../omlish/lite/cached.py
|
103
103
|
T = ta.TypeVar('T')
|
104
104
|
|
105
|
-
#
|
105
|
+
# ../../omlish/lite/check.py
|
106
|
+
SizedT = ta.TypeVar('SizedT', bound=ta.Sized)
|
107
|
+
|
108
|
+
# ../../omlish/lite/socket.py
|
106
109
|
SocketAddress = ta.Any
|
107
110
|
SocketHandlerFactory = ta.Callable[[SocketAddress, ta.BinaryIO, ta.BinaryIO], 'SocketHandler']
|
108
111
|
|
109
|
-
#
|
112
|
+
# ../../omlish/lite/typing.py
|
110
113
|
A0 = ta.TypeVar('A0')
|
111
114
|
A1 = ta.TypeVar('A1')
|
112
115
|
A2 = ta.TypeVar('A2')
|
113
116
|
|
114
|
-
#
|
117
|
+
# events.py
|
115
118
|
EventCallback = ta.Callable[['Event'], None]
|
116
119
|
|
117
|
-
#
|
120
|
+
# ../../omlish/lite/http/parsing.py
|
118
121
|
HttpHeaders = http.client.HTTPMessage # ta.TypeAlias
|
119
122
|
|
120
|
-
#
|
123
|
+
# ../../omlish/lite/inject.py
|
121
124
|
U = ta.TypeVar('U')
|
122
125
|
InjectorKeyCls = ta.Union[type, ta.NewType]
|
123
126
|
InjectorProviderFn = ta.Callable[['Injector'], ta.Any]
|
124
127
|
InjectorProviderFnMap = ta.Mapping['InjectorKey', 'InjectorProviderFn']
|
125
128
|
InjectorBindingOrBindings = ta.Union['InjectorBinding', 'InjectorBindings']
|
126
129
|
|
127
|
-
#
|
130
|
+
# ../configs.py
|
128
131
|
ConfigMapping = ta.Mapping[str, ta.Any]
|
129
132
|
|
130
|
-
#
|
133
|
+
# ../../omlish/lite/http/handlers.py
|
131
134
|
HttpHandler = ta.Callable[['HttpHandlerRequest'], 'HttpHandlerResponse']
|
132
135
|
|
133
|
-
#
|
136
|
+
# ../../omlish/lite/http/coroserver.py
|
134
137
|
CoroHttpServerFactory = ta.Callable[[SocketAddress], 'CoroHttpServer']
|
135
138
|
|
136
139
|
|
@@ -988,9 +991,8 @@ class NoPermissionError(ProcessError):
|
|
988
991
|
|
989
992
|
def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
|
990
993
|
"""
|
991
|
-
Drop privileges to become the specified user, which may be a username or uid.
|
992
|
-
|
993
|
-
dropped.
|
994
|
+
Drop privileges to become the specified user, which may be a username or uid. Called for supervisord startup and
|
995
|
+
when spawning subprocesses. Returns None on success or a string error message if privileges could not be dropped.
|
994
996
|
"""
|
995
997
|
|
996
998
|
if user is None:
|
@@ -1014,9 +1016,8 @@ def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
|
|
1014
1016
|
current_uid = os.getuid()
|
1015
1017
|
|
1016
1018
|
if current_uid == uid:
|
1017
|
-
# do nothing and return successfully if the uid is already the current one.
|
1018
|
-
#
|
1019
|
-
# it.
|
1019
|
+
# do nothing and return successfully if the uid is already the current one. this allows a supervisord running as
|
1020
|
+
# an unprivileged user "foo" to start a process where the config has "user=foo" (same user) in it.
|
1020
1021
|
return None
|
1021
1022
|
|
1022
1023
|
if current_uid != 0:
|
@@ -1370,8 +1371,8 @@ def strip_escapes(s: bytes) -> bytes:
|
|
1370
1371
|
|
1371
1372
|
|
1372
1373
|
class SuffixMultiplier:
|
1373
|
-
# d is a dictionary of suffixes to integer multipliers.
|
1374
|
-
#
|
1374
|
+
# d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches are
|
1375
|
+
# case insensitive. Return values are in the fundamental unit.
|
1375
1376
|
def __init__(self, d, default=1):
|
1376
1377
|
super().__init__()
|
1377
1378
|
self._d = d
|
@@ -1523,6 +1524,228 @@ def check_single(vs: ta.Iterable[T]) -> T:
|
|
1523
1524
|
return v
|
1524
1525
|
|
1525
1526
|
|
1527
|
+
def check_empty(v: SizedT) -> SizedT:
|
1528
|
+
if len(v):
|
1529
|
+
raise ValueError(v)
|
1530
|
+
return v
|
1531
|
+
|
1532
|
+
|
1533
|
+
def check_non_empty(v: SizedT) -> SizedT:
|
1534
|
+
if not len(v):
|
1535
|
+
raise ValueError(v)
|
1536
|
+
return v
|
1537
|
+
|
1538
|
+
|
1539
|
+
########################################
|
1540
|
+
# ../../../omlish/lite/fdio/pollers.py
|
1541
|
+
|
1542
|
+
|
1543
|
+
##
|
1544
|
+
|
1545
|
+
|
1546
|
+
class FdIoPoller(abc.ABC):
|
1547
|
+
def __init__(self) -> None:
|
1548
|
+
super().__init__()
|
1549
|
+
|
1550
|
+
self._readable: ta.Set[int] = set()
|
1551
|
+
self._writable: ta.Set[int] = set()
|
1552
|
+
|
1553
|
+
#
|
1554
|
+
|
1555
|
+
def close(self) -> None: # noqa
|
1556
|
+
pass
|
1557
|
+
|
1558
|
+
def reopen(self) -> None: # noqa
|
1559
|
+
pass
|
1560
|
+
|
1561
|
+
#
|
1562
|
+
|
1563
|
+
@property
|
1564
|
+
@ta.final
|
1565
|
+
def readable(self) -> ta.AbstractSet[int]:
|
1566
|
+
return self._readable
|
1567
|
+
|
1568
|
+
@property
|
1569
|
+
@ta.final
|
1570
|
+
def writable(self) -> ta.AbstractSet[int]:
|
1571
|
+
return self._writable
|
1572
|
+
|
1573
|
+
#
|
1574
|
+
|
1575
|
+
@ta.final
|
1576
|
+
def register_readable(self, fd: int) -> bool:
|
1577
|
+
if fd in self._readable:
|
1578
|
+
return False
|
1579
|
+
self._readable.add(fd)
|
1580
|
+
self._register_readable(fd)
|
1581
|
+
return True
|
1582
|
+
|
1583
|
+
@ta.final
|
1584
|
+
def register_writable(self, fd: int) -> bool:
|
1585
|
+
if fd in self._writable:
|
1586
|
+
return False
|
1587
|
+
self._writable.add(fd)
|
1588
|
+
self._register_writable(fd)
|
1589
|
+
return True
|
1590
|
+
|
1591
|
+
@ta.final
|
1592
|
+
def unregister_readable(self, fd: int) -> bool:
|
1593
|
+
if fd not in self._readable:
|
1594
|
+
return False
|
1595
|
+
self._readable.discard(fd)
|
1596
|
+
self._unregister_readable(fd)
|
1597
|
+
return True
|
1598
|
+
|
1599
|
+
@ta.final
|
1600
|
+
def unregister_writable(self, fd: int) -> bool:
|
1601
|
+
if fd not in self._writable:
|
1602
|
+
return False
|
1603
|
+
self._writable.discard(fd)
|
1604
|
+
self._unregister_writable(fd)
|
1605
|
+
return True
|
1606
|
+
|
1607
|
+
#
|
1608
|
+
|
1609
|
+
def _register_readable(self, fd: int) -> None: # noqa
|
1610
|
+
pass
|
1611
|
+
|
1612
|
+
def _register_writable(self, fd: int) -> None: # noqa
|
1613
|
+
pass
|
1614
|
+
|
1615
|
+
def _unregister_readable(self, fd: int) -> None: # noqa
|
1616
|
+
pass
|
1617
|
+
|
1618
|
+
def _unregister_writable(self, fd: int) -> None: # noqa
|
1619
|
+
pass
|
1620
|
+
|
1621
|
+
#
|
1622
|
+
|
1623
|
+
def update(
|
1624
|
+
self,
|
1625
|
+
r: ta.AbstractSet[int],
|
1626
|
+
w: ta.AbstractSet[int],
|
1627
|
+
) -> None:
|
1628
|
+
for f in r - self._readable:
|
1629
|
+
self.register_readable(f)
|
1630
|
+
for f in w - self._writable:
|
1631
|
+
self.register_writable(f)
|
1632
|
+
for f in self._readable - r:
|
1633
|
+
self.unregister_readable(f)
|
1634
|
+
for f in self._writable - w:
|
1635
|
+
self.unregister_writable(f)
|
1636
|
+
|
1637
|
+
#
|
1638
|
+
|
1639
|
+
@dc.dataclass(frozen=True)
|
1640
|
+
class PollResult:
|
1641
|
+
r: ta.Sequence[int] = ()
|
1642
|
+
w: ta.Sequence[int] = ()
|
1643
|
+
|
1644
|
+
inv: ta.Sequence[int] = ()
|
1645
|
+
|
1646
|
+
msg: ta.Optional[str] = None
|
1647
|
+
exc: ta.Optional[BaseException] = None
|
1648
|
+
|
1649
|
+
@abc.abstractmethod
|
1650
|
+
def poll(self, timeout: ta.Optional[float]) -> PollResult:
|
1651
|
+
raise NotImplementedError
|
1652
|
+
|
1653
|
+
|
1654
|
+
##
|
1655
|
+
|
1656
|
+
|
1657
|
+
class SelectFdIoPoller(FdIoPoller):
|
1658
|
+
def poll(self, timeout: ta.Optional[float]) -> FdIoPoller.PollResult:
|
1659
|
+
try:
|
1660
|
+
r, w, x = select.select(
|
1661
|
+
self._readable,
|
1662
|
+
self._writable,
|
1663
|
+
[],
|
1664
|
+
timeout,
|
1665
|
+
)
|
1666
|
+
|
1667
|
+
except OSError as exc:
|
1668
|
+
if exc.errno == errno.EINTR:
|
1669
|
+
return FdIoPoller.PollResult(msg='EINTR encountered in poll', exc=exc)
|
1670
|
+
elif exc.errno == errno.EBADF:
|
1671
|
+
return FdIoPoller.PollResult(msg='EBADF encountered in poll', exc=exc)
|
1672
|
+
else:
|
1673
|
+
raise
|
1674
|
+
|
1675
|
+
return FdIoPoller.PollResult(r, w)
|
1676
|
+
|
1677
|
+
|
1678
|
+
##
|
1679
|
+
|
1680
|
+
|
1681
|
+
PollFdIoPoller: ta.Optional[ta.Type[FdIoPoller]]
|
1682
|
+
if hasattr(select, 'poll'):
|
1683
|
+
|
1684
|
+
class _PollFdIoPoller(FdIoPoller):
|
1685
|
+
def __init__(self) -> None:
|
1686
|
+
super().__init__()
|
1687
|
+
|
1688
|
+
self._poller = select.poll()
|
1689
|
+
|
1690
|
+
#
|
1691
|
+
|
1692
|
+
_READ = select.POLLIN | select.POLLPRI | select.POLLHUP
|
1693
|
+
_WRITE = select.POLLOUT
|
1694
|
+
|
1695
|
+
def _register_readable(self, fd: int) -> None:
|
1696
|
+
self._update_registration(fd)
|
1697
|
+
|
1698
|
+
def _register_writable(self, fd: int) -> None:
|
1699
|
+
self._update_registration(fd)
|
1700
|
+
|
1701
|
+
def _unregister_readable(self, fd: int) -> None:
|
1702
|
+
self._update_registration(fd)
|
1703
|
+
|
1704
|
+
def _unregister_writable(self, fd: int) -> None:
|
1705
|
+
self._update_registration(fd)
|
1706
|
+
|
1707
|
+
def _update_registration(self, fd: int) -> None:
|
1708
|
+
r = fd in self._readable
|
1709
|
+
w = fd in self._writable
|
1710
|
+
if r or w:
|
1711
|
+
self._poller.register(fd, (self._READ if r else 0) | (self._WRITE if w else 0))
|
1712
|
+
else:
|
1713
|
+
self._poller.unregister(fd)
|
1714
|
+
|
1715
|
+
#
|
1716
|
+
|
1717
|
+
def poll(self, timeout: ta.Optional[float]) -> FdIoPoller.PollResult:
|
1718
|
+
polled: ta.List[ta.Tuple[int, int]]
|
1719
|
+
try:
|
1720
|
+
polled = self._poller.poll(timeout * 1000 if timeout is not None else None)
|
1721
|
+
|
1722
|
+
except OSError as exc:
|
1723
|
+
if exc.errno == errno.EINTR:
|
1724
|
+
return FdIoPoller.PollResult(msg='EINTR encountered in poll', exc=exc)
|
1725
|
+
else:
|
1726
|
+
raise
|
1727
|
+
|
1728
|
+
r: ta.List[int] = []
|
1729
|
+
w: ta.List[int] = []
|
1730
|
+
inv: ta.List[int] = []
|
1731
|
+
for fd, mask in polled:
|
1732
|
+
if mask & select.POLLNVAL:
|
1733
|
+
self._poller.unregister(fd)
|
1734
|
+
self._readable.discard(fd)
|
1735
|
+
self._writable.discard(fd)
|
1736
|
+
inv.append(fd)
|
1737
|
+
continue
|
1738
|
+
if mask & self._READ:
|
1739
|
+
r.append(fd)
|
1740
|
+
if mask & self._WRITE:
|
1741
|
+
w.append(fd)
|
1742
|
+
return FdIoPoller.PollResult(r, w, inv=inv)
|
1743
|
+
|
1744
|
+
PollFdIoPoller = _PollFdIoPoller
|
1745
|
+
else:
|
1746
|
+
PollFdIoPoller = None
|
1747
|
+
|
1748
|
+
|
1526
1749
|
########################################
|
1527
1750
|
# ../../../omlish/lite/http/versions.py
|
1528
1751
|
|
@@ -1742,6 +1965,73 @@ class SocketHandler(abc.ABC):
|
|
1742
1965
|
raise NotImplementedError
|
1743
1966
|
|
1744
1967
|
|
1968
|
+
########################################
|
1969
|
+
# ../../../omlish/lite/strings.py
|
1970
|
+
|
1971
|
+
|
1972
|
+
##
|
1973
|
+
|
1974
|
+
|
1975
|
+
def camel_case(name: str, lower: bool = False) -> str:
|
1976
|
+
if not name:
|
1977
|
+
return ''
|
1978
|
+
s = ''.join(map(str.capitalize, name.split('_'))) # noqa
|
1979
|
+
if lower:
|
1980
|
+
s = s[0].lower() + s[1:]
|
1981
|
+
return s
|
1982
|
+
|
1983
|
+
|
1984
|
+
def snake_case(name: str) -> str:
|
1985
|
+
uppers: list[int | None] = [i for i, c in enumerate(name) if c.isupper()]
|
1986
|
+
return '_'.join([name[l:r].lower() for l, r in zip([None, *uppers], [*uppers, None])]).strip('_')
|
1987
|
+
|
1988
|
+
|
1989
|
+
##
|
1990
|
+
|
1991
|
+
|
1992
|
+
def is_dunder(name: str) -> bool:
|
1993
|
+
return (
|
1994
|
+
name[:2] == name[-2:] == '__' and
|
1995
|
+
name[2:3] != '_' and
|
1996
|
+
name[-3:-2] != '_' and
|
1997
|
+
len(name) > 4
|
1998
|
+
)
|
1999
|
+
|
2000
|
+
|
2001
|
+
def is_sunder(name: str) -> bool:
|
2002
|
+
return (
|
2003
|
+
name[0] == name[-1] == '_' and
|
2004
|
+
name[1:2] != '_' and
|
2005
|
+
name[-2:-1] != '_' and
|
2006
|
+
len(name) > 2
|
2007
|
+
)
|
2008
|
+
|
2009
|
+
|
2010
|
+
##
|
2011
|
+
|
2012
|
+
|
2013
|
+
def attr_repr(obj: ta.Any, *attrs: str) -> str:
|
2014
|
+
return f'{type(obj).__name__}({", ".join(f"{attr}={getattr(obj, attr)!r}" for attr in attrs)})'
|
2015
|
+
|
2016
|
+
|
2017
|
+
##
|
2018
|
+
|
2019
|
+
|
2020
|
+
FORMAT_NUM_BYTES_SUFFIXES: ta.Sequence[str] = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB']
|
2021
|
+
|
2022
|
+
|
2023
|
+
def format_num_bytes(num_bytes: int) -> str:
|
2024
|
+
for i, suffix in enumerate(FORMAT_NUM_BYTES_SUFFIXES):
|
2025
|
+
value = num_bytes / 1024 ** i
|
2026
|
+
if num_bytes < 1024 ** (i + 1):
|
2027
|
+
if value.is_integer():
|
2028
|
+
return f'{int(value)}{suffix}'
|
2029
|
+
else:
|
2030
|
+
return f'{value:.2f}{suffix}'
|
2031
|
+
|
2032
|
+
return f'{num_bytes / 1024 ** (len(FORMAT_NUM_BYTES_SUFFIXES) - 1):.2f}{FORMAT_NUM_BYTES_SUFFIXES[-1]}'
|
2033
|
+
|
2034
|
+
|
1745
2035
|
########################################
|
1746
2036
|
# ../../../omlish/lite/typing.py
|
1747
2037
|
|
@@ -2138,7 +2428,7 @@ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
|
|
2138
2428
|
Decode the status returned by wait() or waitpid().
|
2139
2429
|
|
2140
2430
|
Return a tuple (exitstatus, message) where exitstatus is the exit status, or -1 if the process was killed by a
|
2141
|
-
signal; and message is a message telling what happened.
|
2431
|
+
signal; and message is a message telling what happened. It is the caller's responsibility to display the message.
|
2142
2432
|
"""
|
2143
2433
|
|
2144
2434
|
if os.WIFEXITED(sts):
|
@@ -2227,87 +2517,250 @@ def get_user(name: str) -> User:
|
|
2227
2517
|
|
2228
2518
|
|
2229
2519
|
########################################
|
2230
|
-
# ../../../omlish/lite/
|
2231
|
-
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
2232
|
-
# --------------------------------------------
|
2233
|
-
#
|
2234
|
-
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization
|
2235
|
-
# ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated
|
2236
|
-
# documentation.
|
2237
|
-
#
|
2238
|
-
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive,
|
2239
|
-
# royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative
|
2240
|
-
# works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License
|
2241
|
-
# Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
2242
|
-
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" are retained in Python
|
2243
|
-
# alone or in any derivative version prepared by Licensee.
|
2244
|
-
#
|
2245
|
-
# 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and
|
2246
|
-
# wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in
|
2247
|
-
# any such work a brief summary of the changes made to Python.
|
2248
|
-
#
|
2249
|
-
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES,
|
2250
|
-
# EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY
|
2251
|
-
# OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY
|
2252
|
-
# RIGHTS.
|
2253
|
-
#
|
2254
|
-
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
|
2255
|
-
# DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF
|
2256
|
-
# ADVISED OF THE POSSIBILITY THEREOF.
|
2257
|
-
#
|
2258
|
-
# 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
|
2259
|
-
#
|
2260
|
-
# 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint
|
2261
|
-
# venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade
|
2262
|
-
# name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
|
2263
|
-
#
|
2264
|
-
# 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this
|
2265
|
-
# License Agreement.
|
2520
|
+
# ../../../omlish/lite/fdio/handlers.py
|
2266
2521
|
|
2267
2522
|
|
2268
|
-
|
2523
|
+
class FdIoHandler(abc.ABC):
|
2524
|
+
@abc.abstractmethod
|
2525
|
+
def fd(self) -> int:
|
2526
|
+
raise NotImplementedError
|
2269
2527
|
|
2528
|
+
#
|
2270
2529
|
|
2271
|
-
|
2272
|
-
|
2273
|
-
|
2274
|
-
|
2275
|
-
'request_version',
|
2276
|
-
'version',
|
2277
|
-
'headers',
|
2278
|
-
'close_connection',
|
2279
|
-
)
|
2530
|
+
@property
|
2531
|
+
@abc.abstractmethod
|
2532
|
+
def closed(self) -> bool:
|
2533
|
+
raise NotImplementedError
|
2280
2534
|
|
2281
|
-
|
2282
|
-
|
2283
|
-
|
2284
|
-
server_version: HttpProtocolVersion,
|
2285
|
-
request_line: str,
|
2286
|
-
request_version: HttpProtocolVersion,
|
2287
|
-
version: HttpProtocolVersion,
|
2288
|
-
headers: ta.Optional[HttpHeaders],
|
2289
|
-
close_connection: bool,
|
2290
|
-
) -> None:
|
2291
|
-
super().__init__()
|
2535
|
+
@abc.abstractmethod
|
2536
|
+
def close(self) -> None:
|
2537
|
+
raise NotImplementedError
|
2292
2538
|
|
2293
|
-
|
2294
|
-
self.request_line = request_line
|
2295
|
-
self.request_version = request_version
|
2296
|
-
self.version = version
|
2297
|
-
self.headers = headers
|
2298
|
-
self.close_connection = close_connection
|
2539
|
+
#
|
2299
2540
|
|
2300
|
-
def
|
2301
|
-
return
|
2541
|
+
def readable(self) -> bool:
|
2542
|
+
return False
|
2302
2543
|
|
2544
|
+
def writable(self) -> bool:
|
2545
|
+
return False
|
2303
2546
|
|
2304
|
-
|
2305
|
-
pass
|
2547
|
+
#
|
2306
2548
|
|
2549
|
+
def on_readable(self) -> None:
|
2550
|
+
raise TypeError
|
2307
2551
|
|
2308
|
-
|
2309
|
-
|
2310
|
-
|
2552
|
+
def on_writable(self) -> None:
|
2553
|
+
raise TypeError
|
2554
|
+
|
2555
|
+
def on_error(self, exc: ta.Optional[BaseException] = None) -> None: # noqa
|
2556
|
+
pass
|
2557
|
+
|
2558
|
+
|
2559
|
+
class SocketFdIoHandler(FdIoHandler, abc.ABC):
|
2560
|
+
def __init__(
|
2561
|
+
self,
|
2562
|
+
addr: SocketAddress,
|
2563
|
+
sock: socket.socket,
|
2564
|
+
) -> None:
|
2565
|
+
super().__init__()
|
2566
|
+
|
2567
|
+
self._addr = addr
|
2568
|
+
self._sock: ta.Optional[socket.socket] = sock
|
2569
|
+
|
2570
|
+
def fd(self) -> int:
|
2571
|
+
return check_not_none(self._sock).fileno()
|
2572
|
+
|
2573
|
+
@property
|
2574
|
+
def closed(self) -> bool:
|
2575
|
+
return self._sock is None
|
2576
|
+
|
2577
|
+
def close(self) -> None:
|
2578
|
+
if self._sock is not None:
|
2579
|
+
self._sock.close()
|
2580
|
+
self._sock = None
|
2581
|
+
|
2582
|
+
|
2583
|
+
########################################
|
2584
|
+
# ../../../omlish/lite/fdio/kqueue.py
|
2585
|
+
|
2586
|
+
|
2587
|
+
KqueueFdIoPoller: ta.Optional[ta.Type[FdIoPoller]]
|
2588
|
+
if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
|
2589
|
+
|
2590
|
+
class _KqueueFdIoPoller(FdIoPoller):
|
2591
|
+
DEFAULT_MAX_EVENTS = 1000
|
2592
|
+
|
2593
|
+
def __init__(
|
2594
|
+
self,
|
2595
|
+
*,
|
2596
|
+
max_events: int = DEFAULT_MAX_EVENTS,
|
2597
|
+
) -> None:
|
2598
|
+
super().__init__()
|
2599
|
+
|
2600
|
+
self._max_events = max_events
|
2601
|
+
|
2602
|
+
self._kqueue: ta.Optional[ta.Any] = None
|
2603
|
+
|
2604
|
+
#
|
2605
|
+
|
2606
|
+
def _get_kqueue(self) -> 'select.kqueue':
|
2607
|
+
if (kq := self._kqueue) is not None:
|
2608
|
+
return kq
|
2609
|
+
kq = select.kqueue()
|
2610
|
+
self._kqueue = kq
|
2611
|
+
return kq
|
2612
|
+
|
2613
|
+
def close(self) -> None:
|
2614
|
+
if self._kqueue is not None:
|
2615
|
+
self._kqueue.close()
|
2616
|
+
self._kqueue = None
|
2617
|
+
|
2618
|
+
def reopen(self) -> None:
|
2619
|
+
for fd in self._readable:
|
2620
|
+
self._register_readable(fd)
|
2621
|
+
for fd in self._writable:
|
2622
|
+
self._register_writable(fd)
|
2623
|
+
|
2624
|
+
#
|
2625
|
+
|
2626
|
+
def _register_readable(self, fd: int) -> None:
|
2627
|
+
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
|
2628
|
+
|
2629
|
+
def _register_writable(self, fd: int) -> None:
|
2630
|
+
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
|
2631
|
+
|
2632
|
+
def _unregister_readable(self, fd: int) -> None:
|
2633
|
+
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
|
2634
|
+
|
2635
|
+
def _unregister_writable(self, fd: int) -> None:
|
2636
|
+
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
|
2637
|
+
|
2638
|
+
def _control(self, fd: int, filter: int, flags: int) -> None: # noqa
|
2639
|
+
ke = select.kevent(fd, filter=filter, flags=flags)
|
2640
|
+
kq = self._get_kqueue()
|
2641
|
+
try:
|
2642
|
+
kq.control([ke], 0)
|
2643
|
+
|
2644
|
+
except OSError as exc:
|
2645
|
+
if exc.errno == errno.EBADF:
|
2646
|
+
# log.debug('EBADF encountered in kqueue. Invalid file descriptor %s', ke.ident)
|
2647
|
+
pass
|
2648
|
+
elif exc.errno == errno.ENOENT:
|
2649
|
+
# Can happen when trying to remove an already closed socket
|
2650
|
+
pass
|
2651
|
+
else:
|
2652
|
+
raise
|
2653
|
+
|
2654
|
+
#
|
2655
|
+
|
2656
|
+
def poll(self, timeout: ta.Optional[float]) -> FdIoPoller.PollResult:
|
2657
|
+
kq = self._get_kqueue()
|
2658
|
+
try:
|
2659
|
+
kes = kq.control(None, self._max_events, timeout)
|
2660
|
+
|
2661
|
+
except OSError as exc:
|
2662
|
+
if exc.errno == errno.EINTR:
|
2663
|
+
return FdIoPoller.PollResult(msg='EINTR encountered in poll', exc=exc)
|
2664
|
+
else:
|
2665
|
+
raise
|
2666
|
+
|
2667
|
+
r: ta.List[int] = []
|
2668
|
+
w: ta.List[int] = []
|
2669
|
+
for ke in kes:
|
2670
|
+
if ke.filter == select.KQ_FILTER_READ:
|
2671
|
+
r.append(ke.ident)
|
2672
|
+
if ke.filter == select.KQ_FILTER_WRITE:
|
2673
|
+
w.append(ke.ident)
|
2674
|
+
|
2675
|
+
return FdIoPoller.PollResult(r, w)
|
2676
|
+
|
2677
|
+
KqueueFdIoPoller = _KqueueFdIoPoller
|
2678
|
+
else:
|
2679
|
+
KqueueFdIoPoller = None
|
2680
|
+
|
2681
|
+
|
2682
|
+
########################################
|
2683
|
+
# ../../../omlish/lite/http/parsing.py
|
2684
|
+
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
2685
|
+
# --------------------------------------------
|
2686
|
+
#
|
2687
|
+
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization
|
2688
|
+
# ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated
|
2689
|
+
# documentation.
|
2690
|
+
#
|
2691
|
+
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive,
|
2692
|
+
# royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative
|
2693
|
+
# works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License
|
2694
|
+
# Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
2695
|
+
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" are retained in Python
|
2696
|
+
# alone or in any derivative version prepared by Licensee.
|
2697
|
+
#
|
2698
|
+
# 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and
|
2699
|
+
# wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in
|
2700
|
+
# any such work a brief summary of the changes made to Python.
|
2701
|
+
#
|
2702
|
+
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES,
|
2703
|
+
# EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY
|
2704
|
+
# OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY
|
2705
|
+
# RIGHTS.
|
2706
|
+
#
|
2707
|
+
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
|
2708
|
+
# DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF
|
2709
|
+
# ADVISED OF THE POSSIBILITY THEREOF.
|
2710
|
+
#
|
2711
|
+
# 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
|
2712
|
+
#
|
2713
|
+
# 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint
|
2714
|
+
# venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade
|
2715
|
+
# name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
|
2716
|
+
#
|
2717
|
+
# 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this
|
2718
|
+
# License Agreement.
|
2719
|
+
|
2720
|
+
|
2721
|
+
##
|
2722
|
+
|
2723
|
+
|
2724
|
+
class ParseHttpRequestResult(abc.ABC): # noqa
|
2725
|
+
__slots__ = (
|
2726
|
+
'server_version',
|
2727
|
+
'request_line',
|
2728
|
+
'request_version',
|
2729
|
+
'version',
|
2730
|
+
'headers',
|
2731
|
+
'close_connection',
|
2732
|
+
)
|
2733
|
+
|
2734
|
+
def __init__(
|
2735
|
+
self,
|
2736
|
+
*,
|
2737
|
+
server_version: HttpProtocolVersion,
|
2738
|
+
request_line: str,
|
2739
|
+
request_version: HttpProtocolVersion,
|
2740
|
+
version: HttpProtocolVersion,
|
2741
|
+
headers: ta.Optional[HttpHeaders],
|
2742
|
+
close_connection: bool,
|
2743
|
+
) -> None:
|
2744
|
+
super().__init__()
|
2745
|
+
|
2746
|
+
self.server_version = server_version
|
2747
|
+
self.request_line = request_line
|
2748
|
+
self.request_version = request_version
|
2749
|
+
self.version = version
|
2750
|
+
self.headers = headers
|
2751
|
+
self.close_connection = close_connection
|
2752
|
+
|
2753
|
+
def __repr__(self) -> str:
|
2754
|
+
return f'{self.__class__.__name__}({", ".join(f"{a}={getattr(self, a)!r}" for a in self.__slots__)})'
|
2755
|
+
|
2756
|
+
|
2757
|
+
class EmptyParsedHttpResult(ParseHttpRequestResult):
|
2758
|
+
pass
|
2759
|
+
|
2760
|
+
|
2761
|
+
class ParseHttpRequestError(ParseHttpRequestResult):
|
2762
|
+
__slots__ = (
|
2763
|
+
'code',
|
2311
2764
|
'message',
|
2312
2765
|
*ParseHttpRequestResult.__slots__,
|
2313
2766
|
)
|
@@ -2628,11 +3081,6 @@ class HttpRequestParser:
|
|
2628
3081
|
|
2629
3082
|
########################################
|
2630
3083
|
# ../../../omlish/lite/inject.py
|
2631
|
-
"""
|
2632
|
-
TODO:
|
2633
|
-
- recursion detection
|
2634
|
-
- bind empty array
|
2635
|
-
"""
|
2636
3084
|
|
2637
3085
|
|
2638
3086
|
###
|
@@ -2925,7 +3373,11 @@ def build_injector_provider_map(bs: InjectorBindings) -> ta.Mapping[InjectorKey,
|
|
2925
3373
|
|
2926
3374
|
for b in bs.bindings():
|
2927
3375
|
if b.key.array:
|
2928
|
-
am.setdefault(b.key, [])
|
3376
|
+
al = am.setdefault(b.key, [])
|
3377
|
+
if isinstance(b.provider, ArrayInjectorProvider):
|
3378
|
+
al.extend(b.provider.ps)
|
3379
|
+
else:
|
3380
|
+
al.append(b.provider)
|
2929
3381
|
else:
|
2930
3382
|
if b.key in pm:
|
2931
3383
|
raise KeyError(b.key)
|
@@ -3073,6 +3525,14 @@ def build_injection_kwargs_target(
|
|
3073
3525
|
_INJECTOR_INJECTOR_KEY: InjectorKey[Injector] = InjectorKey(Injector)
|
3074
3526
|
|
3075
3527
|
|
3528
|
+
@dc.dataclass(frozen=True)
|
3529
|
+
class _InjectorEager:
|
3530
|
+
key: InjectorKey
|
3531
|
+
|
3532
|
+
|
3533
|
+
_INJECTOR_EAGER_ARRAY_KEY: InjectorKey[_InjectorEager] = InjectorKey(_InjectorEager, array=True)
|
3534
|
+
|
3535
|
+
|
3076
3536
|
class _Injector(Injector):
|
3077
3537
|
def __init__(self, bs: InjectorBindings, p: ta.Optional[Injector] = None) -> None:
|
3078
3538
|
super().__init__()
|
@@ -3087,6 +3547,10 @@ class _Injector(Injector):
|
|
3087
3547
|
|
3088
3548
|
self.__cur_req: ta.Optional[_Injector._Request] = None
|
3089
3549
|
|
3550
|
+
if _INJECTOR_EAGER_ARRAY_KEY in self._pfm:
|
3551
|
+
for e in self.provide(_INJECTOR_EAGER_ARRAY_KEY):
|
3552
|
+
self.provide(e.key)
|
3553
|
+
|
3090
3554
|
class _Request:
|
3091
3555
|
def __init__(self, injector: '_Injector') -> None:
|
3092
3556
|
super().__init__()
|
@@ -3244,6 +3708,8 @@ class InjectorBinder:
|
|
3244
3708
|
to_key: ta.Any = None,
|
3245
3709
|
|
3246
3710
|
singleton: bool = False,
|
3711
|
+
|
3712
|
+
eager: bool = False,
|
3247
3713
|
) -> InjectorBindingOrBindings:
|
3248
3714
|
if obj is None or obj is inspect.Parameter.empty:
|
3249
3715
|
raise TypeError(obj)
|
@@ -3317,13 +3783,21 @@ class InjectorBinder:
|
|
3317
3783
|
if singleton:
|
3318
3784
|
provider = SingletonInjectorProvider(provider)
|
3319
3785
|
|
3786
|
+
binding = InjectorBinding(key, provider)
|
3787
|
+
|
3320
3788
|
##
|
3321
3789
|
|
3322
|
-
|
3790
|
+
extras: ta.List[InjectorBinding] = []
|
3791
|
+
|
3792
|
+
if eager:
|
3793
|
+
extras.append(bind_injector_eager_key(key))
|
3323
3794
|
|
3324
3795
|
##
|
3325
3796
|
|
3326
|
-
|
3797
|
+
if extras:
|
3798
|
+
return as_injector_bindings(binding, *extras)
|
3799
|
+
else:
|
3800
|
+
return binding
|
3327
3801
|
|
3328
3802
|
|
3329
3803
|
###
|
@@ -3346,6 +3820,26 @@ def make_injector_factory(
|
|
3346
3820
|
return outer
|
3347
3821
|
|
3348
3822
|
|
3823
|
+
def bind_injector_array(
|
3824
|
+
obj: ta.Any = None,
|
3825
|
+
*,
|
3826
|
+
tag: ta.Any = None,
|
3827
|
+
) -> InjectorBindingOrBindings:
|
3828
|
+
key = as_injector_key(obj)
|
3829
|
+
if tag is not None:
|
3830
|
+
if key.tag is not None:
|
3831
|
+
raise ValueError('Must not specify multiple tags')
|
3832
|
+
key = dc.replace(key, tag=tag)
|
3833
|
+
|
3834
|
+
if key.array:
|
3835
|
+
raise ValueError('Key must not be array')
|
3836
|
+
|
3837
|
+
return InjectorBinding(
|
3838
|
+
dc.replace(key, array=True),
|
3839
|
+
ArrayInjectorProvider([]),
|
3840
|
+
)
|
3841
|
+
|
3842
|
+
|
3349
3843
|
def make_injector_array_type(
|
3350
3844
|
ele: ta.Union[InjectorKey, InjectorKeyCls],
|
3351
3845
|
cls: U,
|
@@ -3367,6 +3861,10 @@ def make_injector_array_type(
|
|
3367
3861
|
return inner
|
3368
3862
|
|
3369
3863
|
|
3864
|
+
def bind_injector_eager_key(key: ta.Any) -> InjectorBinding:
|
3865
|
+
return InjectorBinding(_INJECTOR_EAGER_ARRAY_KEY, ConstInjectorProvider(_InjectorEager(as_injector_key(key))))
|
3866
|
+
|
3867
|
+
|
3370
3868
|
##
|
3371
3869
|
|
3372
3870
|
|
@@ -3421,6 +3919,8 @@ class Injection:
|
|
3421
3919
|
to_key: ta.Any = None,
|
3422
3920
|
|
3423
3921
|
singleton: bool = False,
|
3922
|
+
|
3923
|
+
eager: bool = False,
|
3424
3924
|
) -> InjectorBindingOrBindings:
|
3425
3925
|
return InjectorBinder.bind(
|
3426
3926
|
obj,
|
@@ -3435,6 +3935,8 @@ class Injection:
|
|
3435
3935
|
to_key=to_key,
|
3436
3936
|
|
3437
3937
|
singleton=singleton,
|
3938
|
+
|
3939
|
+
eager=eager,
|
3438
3940
|
)
|
3439
3941
|
|
3440
3942
|
# helpers
|
@@ -3448,6 +3950,15 @@ class Injection:
|
|
3448
3950
|
) -> InjectorBindingOrBindings:
|
3449
3951
|
return cls.bind(make_injector_factory(fn, cls_, ann))
|
3450
3952
|
|
3953
|
+
@classmethod
|
3954
|
+
def bind_array(
|
3955
|
+
cls,
|
3956
|
+
obj: ta.Any = None,
|
3957
|
+
*,
|
3958
|
+
tag: ta.Any = None,
|
3959
|
+
) -> InjectorBindingOrBindings:
|
3960
|
+
return bind_injector_array(obj, tag=tag)
|
3961
|
+
|
3451
3962
|
@classmethod
|
3452
3963
|
def bind_array_type(
|
3453
3964
|
cls,
|
@@ -3462,98 +3973,320 @@ inj = Injection
|
|
3462
3973
|
|
3463
3974
|
|
3464
3975
|
########################################
|
3465
|
-
# ../../../omlish/lite/
|
3976
|
+
# ../../../omlish/lite/io.py
|
3466
3977
|
|
3467
3978
|
|
3468
|
-
|
3979
|
+
class DelimitingBuffer:
|
3980
|
+
"""
|
3981
|
+
https://github.com/python-trio/trio/issues/796 :|
|
3982
|
+
"""
|
3469
3983
|
|
3984
|
+
#
|
3470
3985
|
|
3471
|
-
class
|
3472
|
-
|
3986
|
+
class Error(Exception):
|
3987
|
+
def __init__(self, buffer: 'DelimitingBuffer') -> None:
|
3988
|
+
super().__init__(buffer)
|
3989
|
+
self.buffer = buffer
|
3473
3990
|
|
3991
|
+
def __repr__(self) -> str:
|
3992
|
+
return attr_repr(self, 'buffer')
|
3474
3993
|
|
3475
|
-
|
3476
|
-
|
3477
|
-
('iov_len', ct.c_size_t), # Length of data.
|
3478
|
-
]
|
3994
|
+
class ClosedError(Error):
|
3995
|
+
pass
|
3479
3996
|
|
3997
|
+
#
|
3480
3998
|
|
3481
|
-
|
3999
|
+
DEFAULT_DELIMITERS: bytes = b'\n'
|
4000
|
+
|
4001
|
+
def __init__(
|
4002
|
+
self,
|
4003
|
+
delimiters: ta.Iterable[int] = DEFAULT_DELIMITERS,
|
4004
|
+
*,
|
4005
|
+
keep_ends: bool = False,
|
4006
|
+
max_size: ta.Optional[int] = None,
|
4007
|
+
) -> None:
|
4008
|
+
super().__init__()
|
3482
4009
|
|
4010
|
+
self._delimiters = frozenset(check_isinstance(d, int) for d in delimiters)
|
4011
|
+
self._keep_ends = keep_ends
|
4012
|
+
self._max_size = max_size
|
3483
4013
|
|
3484
|
-
|
3485
|
-
def sd_libsystemd() -> ta.Any:
|
3486
|
-
lib = ct.CDLL('libsystemd.so.0')
|
4014
|
+
self._buf: ta.Optional[io.BytesIO] = io.BytesIO()
|
3487
4015
|
|
3488
|
-
|
3489
|
-
lib.sd_journal_sendv.argtypes = [ct.POINTER(sd_iovec), ct.c_int]
|
4016
|
+
#
|
3490
4017
|
|
3491
|
-
|
4018
|
+
@property
|
4019
|
+
def is_closed(self) -> bool:
|
4020
|
+
return self._buf is None
|
4021
|
+
|
4022
|
+
def tell(self) -> int:
|
4023
|
+
if (buf := self._buf) is None:
|
4024
|
+
raise self.ClosedError(self)
|
4025
|
+
return buf.tell()
|
4026
|
+
|
4027
|
+
def peek(self) -> bytes:
|
4028
|
+
if (buf := self._buf) is None:
|
4029
|
+
raise self.ClosedError(self)
|
4030
|
+
return buf.getvalue()
|
4031
|
+
|
4032
|
+
def _find_delim(self, data: ta.Union[bytes, bytearray], i: int) -> ta.Optional[int]:
|
4033
|
+
r = None # type: int | None
|
4034
|
+
for d in self._delimiters:
|
4035
|
+
if (p := data.find(d, i)) >= 0:
|
4036
|
+
if r is None or p < r:
|
4037
|
+
r = p
|
4038
|
+
return r
|
4039
|
+
|
4040
|
+
def _append_and_reset(self, chunk: bytes) -> bytes:
|
4041
|
+
buf = check_not_none(self._buf)
|
4042
|
+
if not buf.tell():
|
4043
|
+
return chunk
|
4044
|
+
|
4045
|
+
buf.write(chunk)
|
4046
|
+
ret = buf.getvalue()
|
4047
|
+
buf.seek(0)
|
4048
|
+
buf.truncate()
|
4049
|
+
return ret
|
3492
4050
|
|
4051
|
+
class Incomplete(ta.NamedTuple):
|
4052
|
+
b: bytes
|
3493
4053
|
|
3494
|
-
|
3495
|
-
|
3496
|
-
|
3497
|
-
return sd_libsystemd()
|
3498
|
-
except OSError: # noqa
|
3499
|
-
return None
|
4054
|
+
def feed(self, data: ta.Union[bytes, bytearray]) -> ta.Generator[ta.Union[bytes, Incomplete], None, None]:
|
4055
|
+
if (buf := self._buf) is None:
|
4056
|
+
raise self.ClosedError(self)
|
3500
4057
|
|
4058
|
+
if not data:
|
4059
|
+
self._buf = None
|
3501
4060
|
|
3502
|
-
|
4061
|
+
if buf.tell():
|
4062
|
+
yield self.Incomplete(buf.getvalue())
|
3503
4063
|
|
4064
|
+
return
|
3504
4065
|
|
3505
|
-
|
3506
|
-
|
4066
|
+
l = len(data)
|
4067
|
+
i = 0
|
4068
|
+
while i < l:
|
4069
|
+
if (p := self._find_delim(data, i)) is None:
|
4070
|
+
break
|
3507
4071
|
|
3508
|
-
|
3509
|
-
|
3510
|
-
|
3511
|
-
]
|
4072
|
+
n = p + 1
|
4073
|
+
if self._keep_ends:
|
4074
|
+
p = n
|
3512
4075
|
|
3513
|
-
|
3514
|
-
cl = (ct.c_char_p * len(msgs))() # noqa
|
3515
|
-
for i in range(len(msgs)):
|
3516
|
-
vec[i].iov_base = ct.cast(ct.c_char_p(msgs[i]), ct.c_void_p)
|
3517
|
-
vec[i].iov_len = len(msgs[i]) - 1
|
4076
|
+
yield self._append_and_reset(data[i:p])
|
3518
4077
|
|
3519
|
-
|
4078
|
+
i = n
|
3520
4079
|
|
4080
|
+
if i >= l:
|
4081
|
+
return
|
3521
4082
|
|
3522
|
-
|
4083
|
+
if self._max_size is None:
|
4084
|
+
buf.write(data[i:])
|
4085
|
+
return
|
3523
4086
|
|
4087
|
+
while i < l:
|
4088
|
+
remaining_data_len = l - i
|
4089
|
+
remaining_buf_capacity = self._max_size - buf.tell()
|
3524
4090
|
|
3525
|
-
|
3526
|
-
|
3527
|
-
|
3528
|
-
logging.CRITICAL: syslog.LOG_CRIT,
|
3529
|
-
logging.ERROR: syslog.LOG_ERR,
|
3530
|
-
logging.WARNING: syslog.LOG_WARNING,
|
3531
|
-
# LOG_NOTICE ? # normal but significant condition
|
3532
|
-
logging.INFO: syslog.LOG_INFO,
|
3533
|
-
logging.DEBUG: syslog.LOG_DEBUG,
|
3534
|
-
}
|
4091
|
+
if remaining_data_len < remaining_buf_capacity:
|
4092
|
+
buf.write(data[i:])
|
4093
|
+
return
|
3535
4094
|
|
4095
|
+
p = i + remaining_buf_capacity
|
4096
|
+
yield self.Incomplete(self._append_and_reset(data[i:p]))
|
4097
|
+
i = p
|
3536
4098
|
|
3537
|
-
class JournaldLogHandler(logging.Handler):
|
3538
|
-
"""
|
3539
|
-
TODO:
|
3540
|
-
- fallback handler for when this barfs
|
3541
|
-
"""
|
3542
4099
|
|
3543
|
-
|
3544
|
-
|
3545
|
-
*,
|
3546
|
-
use_formatter_output: bool = False,
|
3547
|
-
) -> None:
|
4100
|
+
class ReadableListBuffer:
|
4101
|
+
def __init__(self) -> None:
|
3548
4102
|
super().__init__()
|
4103
|
+
self._lst: list[bytes] = []
|
3549
4104
|
|
3550
|
-
|
4105
|
+
def feed(self, d: bytes) -> None:
|
4106
|
+
if d:
|
4107
|
+
self._lst.append(d)
|
3551
4108
|
|
3552
|
-
|
4109
|
+
def _chop(self, i: int, e: int) -> bytes:
|
4110
|
+
lst = self._lst
|
4111
|
+
d = lst[i]
|
3553
4112
|
|
3554
|
-
|
4113
|
+
o = b''.join([
|
4114
|
+
*lst[:i],
|
4115
|
+
d[:e],
|
4116
|
+
])
|
3555
4117
|
|
3556
|
-
|
4118
|
+
self._lst = [
|
4119
|
+
*([d[e:]] if e < len(d) else []),
|
4120
|
+
*lst[i + 1:],
|
4121
|
+
]
|
4122
|
+
|
4123
|
+
return o
|
4124
|
+
|
4125
|
+
def read(self, n: ta.Optional[int] = None) -> ta.Optional[bytes]:
|
4126
|
+
if n is None:
|
4127
|
+
o = b''.join(self._lst)
|
4128
|
+
self._lst = []
|
4129
|
+
return o
|
4130
|
+
|
4131
|
+
if not (lst := self._lst):
|
4132
|
+
return None
|
4133
|
+
|
4134
|
+
c = 0
|
4135
|
+
for i, d in enumerate(lst):
|
4136
|
+
r = n - c
|
4137
|
+
if (l := len(d)) >= r:
|
4138
|
+
return self._chop(i, r)
|
4139
|
+
c += l
|
4140
|
+
|
4141
|
+
return None
|
4142
|
+
|
4143
|
+
def read_until(self, delim: bytes = b'\n') -> ta.Optional[bytes]:
|
4144
|
+
if not (lst := self._lst):
|
4145
|
+
return None
|
4146
|
+
|
4147
|
+
for i, d in enumerate(lst):
|
4148
|
+
if (p := d.find(delim)) >= 0:
|
4149
|
+
return self._chop(i, p + len(delim))
|
4150
|
+
|
4151
|
+
return None
|
4152
|
+
|
4153
|
+
|
4154
|
+
class IncrementalWriteBuffer:
|
4155
|
+
def __init__(
|
4156
|
+
self,
|
4157
|
+
data: bytes,
|
4158
|
+
*,
|
4159
|
+
write_size: int = 0x10000,
|
4160
|
+
) -> None:
|
4161
|
+
super().__init__()
|
4162
|
+
|
4163
|
+
check_non_empty(data)
|
4164
|
+
self._len = len(data)
|
4165
|
+
self._write_size = write_size
|
4166
|
+
|
4167
|
+
self._lst = [
|
4168
|
+
data[i:i + write_size]
|
4169
|
+
for i in range(0, len(data), write_size)
|
4170
|
+
]
|
4171
|
+
self._pos = 0
|
4172
|
+
|
4173
|
+
@property
|
4174
|
+
def rem(self) -> int:
|
4175
|
+
return self._len - self._pos
|
4176
|
+
|
4177
|
+
def write(self, fn: ta.Callable[[bytes], int]) -> int:
|
4178
|
+
lst = check_non_empty(self._lst)
|
4179
|
+
|
4180
|
+
t = 0
|
4181
|
+
for i, d in enumerate(lst): # noqa
|
4182
|
+
n = fn(check_non_empty(d))
|
4183
|
+
if not n:
|
4184
|
+
break
|
4185
|
+
t += n
|
4186
|
+
|
4187
|
+
if t:
|
4188
|
+
self._lst = [
|
4189
|
+
*([d[n:]] if n < len(d) else []),
|
4190
|
+
*lst[i + 1:],
|
4191
|
+
]
|
4192
|
+
self._pos += t
|
4193
|
+
|
4194
|
+
return t
|
4195
|
+
|
4196
|
+
|
4197
|
+
########################################
|
4198
|
+
# ../../../omlish/lite/journald.py
|
4199
|
+
|
4200
|
+
|
4201
|
+
##
|
4202
|
+
|
4203
|
+
|
4204
|
+
class sd_iovec(ct.Structure): # noqa
|
4205
|
+
pass
|
4206
|
+
|
4207
|
+
|
4208
|
+
sd_iovec._fields_ = [
|
4209
|
+
('iov_base', ct.c_void_p), # Pointer to data.
|
4210
|
+
('iov_len', ct.c_size_t), # Length of data.
|
4211
|
+
]
|
4212
|
+
|
4213
|
+
|
4214
|
+
##
|
4215
|
+
|
4216
|
+
|
4217
|
+
@cached_nullary
|
4218
|
+
def sd_libsystemd() -> ta.Any:
|
4219
|
+
lib = ct.CDLL('libsystemd.so.0')
|
4220
|
+
|
4221
|
+
lib.sd_journal_sendv.restype = ct.c_int
|
4222
|
+
lib.sd_journal_sendv.argtypes = [ct.POINTER(sd_iovec), ct.c_int]
|
4223
|
+
|
4224
|
+
return lib
|
4225
|
+
|
4226
|
+
|
4227
|
+
@cached_nullary
|
4228
|
+
def sd_try_libsystemd() -> ta.Optional[ta.Any]:
|
4229
|
+
try:
|
4230
|
+
return sd_libsystemd()
|
4231
|
+
except OSError: # noqa
|
4232
|
+
return None
|
4233
|
+
|
4234
|
+
|
4235
|
+
##
|
4236
|
+
|
4237
|
+
|
4238
|
+
def sd_journald_send(**fields: str) -> int:
|
4239
|
+
lib = sd_libsystemd()
|
4240
|
+
|
4241
|
+
msgs = [
|
4242
|
+
f'{k.upper()}={v}\0'.encode('utf-8')
|
4243
|
+
for k, v in fields.items()
|
4244
|
+
]
|
4245
|
+
|
4246
|
+
vec = (sd_iovec * len(msgs))()
|
4247
|
+
cl = (ct.c_char_p * len(msgs))() # noqa
|
4248
|
+
for i in range(len(msgs)):
|
4249
|
+
vec[i].iov_base = ct.cast(ct.c_char_p(msgs[i]), ct.c_void_p)
|
4250
|
+
vec[i].iov_len = len(msgs[i]) - 1
|
4251
|
+
|
4252
|
+
return lib.sd_journal_sendv(vec, len(msgs))
|
4253
|
+
|
4254
|
+
|
4255
|
+
##
|
4256
|
+
|
4257
|
+
|
4258
|
+
SD_LOG_LEVEL_MAP: ta.Mapping[int, int] = {
|
4259
|
+
logging.FATAL: syslog.LOG_EMERG, # system is unusable
|
4260
|
+
# LOG_ALERT ? # action must be taken immediately
|
4261
|
+
logging.CRITICAL: syslog.LOG_CRIT,
|
4262
|
+
logging.ERROR: syslog.LOG_ERR,
|
4263
|
+
logging.WARNING: syslog.LOG_WARNING,
|
4264
|
+
# LOG_NOTICE ? # normal but significant condition
|
4265
|
+
logging.INFO: syslog.LOG_INFO,
|
4266
|
+
logging.DEBUG: syslog.LOG_DEBUG,
|
4267
|
+
}
|
4268
|
+
|
4269
|
+
|
4270
|
+
class JournaldLogHandler(logging.Handler):
|
4271
|
+
"""
|
4272
|
+
TODO:
|
4273
|
+
- fallback handler for when this barfs
|
4274
|
+
"""
|
4275
|
+
|
4276
|
+
def __init__(
|
4277
|
+
self,
|
4278
|
+
*,
|
4279
|
+
use_formatter_output: bool = False,
|
4280
|
+
) -> None:
|
4281
|
+
super().__init__()
|
4282
|
+
|
4283
|
+
sd_libsystemd()
|
4284
|
+
|
4285
|
+
self._use_formatter_output = use_formatter_output
|
4286
|
+
|
4287
|
+
#
|
4288
|
+
|
4289
|
+
EXTRA_RECORD_ATTRS_BY_JOURNALD_FIELD: ta.ClassVar[ta.Mapping[str, str]] = {
|
3557
4290
|
'name': 'name',
|
3558
4291
|
'module': 'module',
|
3559
4292
|
'exception': 'exc_text',
|
@@ -4333,8 +5066,8 @@ class ProcessPipes:
|
|
4333
5066
|
|
4334
5067
|
def make_process_pipes(stderr=True) -> ProcessPipes:
|
4335
5068
|
"""
|
4336
|
-
Create pipes for parent to child stdin/stdout/stderr communications.
|
4337
|
-
|
5069
|
+
Create pipes for parent to child stdin/stdout/stderr communications. Open fd in non-blocking mode so we can read
|
5070
|
+
them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
|
4338
5071
|
"""
|
4339
5072
|
|
4340
5073
|
pipes: ta.Dict[str, ta.Optional[Fd]] = {
|
@@ -4484,32 +5217,32 @@ class ProcessConfig:
|
|
4484
5217
|
umask: ta.Optional[int] = None
|
4485
5218
|
priority: int = 999
|
4486
5219
|
|
4487
|
-
|
4488
|
-
|
5220
|
+
auto_start: bool = True
|
5221
|
+
auto_restart: str = 'unexpected'
|
4489
5222
|
|
4490
|
-
|
4491
|
-
|
5223
|
+
start_secs: int = 1
|
5224
|
+
start_retries: int = 3
|
4492
5225
|
|
4493
|
-
|
4494
|
-
|
5226
|
+
num_procs: int = 1
|
5227
|
+
num_procs_start: int = 0
|
4495
5228
|
|
4496
5229
|
@dc.dataclass(frozen=True)
|
4497
5230
|
class Log:
|
4498
5231
|
file: ta.Optional[str] = None
|
4499
|
-
|
5232
|
+
capture_max_bytes: ta.Optional[int] = None
|
4500
5233
|
events_enabled: bool = False
|
4501
5234
|
syslog: bool = False
|
4502
5235
|
backups: ta.Optional[int] = None
|
4503
|
-
|
5236
|
+
max_bytes: ta.Optional[int] = None
|
4504
5237
|
|
4505
5238
|
stdout: Log = Log()
|
4506
5239
|
stderr: Log = Log()
|
4507
5240
|
|
4508
|
-
|
4509
|
-
|
4510
|
-
|
5241
|
+
stop_signal: int = signal.SIGTERM
|
5242
|
+
stop_wait_secs: int = 10
|
5243
|
+
stop_as_group: bool = False
|
4511
5244
|
|
4512
|
-
|
5245
|
+
kill_as_group: bool = False
|
4513
5246
|
|
4514
5247
|
exitcodes: ta.Sequence[int] = (0,)
|
4515
5248
|
|
@@ -4534,14 +5267,14 @@ class ServerConfig:
|
|
4534
5267
|
umask: int = 0o22
|
4535
5268
|
directory: ta.Optional[str] = None
|
4536
5269
|
logfile: str = 'supervisord.log'
|
4537
|
-
|
5270
|
+
logfile_max_bytes: int = 50 * 1024 * 1024
|
4538
5271
|
logfile_backups: int = 10
|
4539
5272
|
loglevel: int = logging.INFO
|
4540
5273
|
pidfile: str = 'supervisord.pid'
|
4541
5274
|
identifier: str = 'supervisor'
|
4542
5275
|
child_logdir: str = '/dev/null'
|
4543
|
-
|
4544
|
-
|
5276
|
+
min_fds: int = 1024
|
5277
|
+
min_procs: int = 200
|
4545
5278
|
nocleanup: bool = False
|
4546
5279
|
strip_ansi: bool = False
|
4547
5280
|
silent: bool = False
|
@@ -4554,7 +5287,7 @@ class ServerConfig:
|
|
4554
5287
|
umask: ta.Union[int, str] = 0o22,
|
4555
5288
|
directory: ta.Optional[str] = None,
|
4556
5289
|
logfile: str = 'supervisord.log',
|
4557
|
-
|
5290
|
+
logfile_max_bytes: ta.Union[int, str] = 50 * 1024 * 1024,
|
4558
5291
|
loglevel: ta.Union[int, str] = logging.INFO,
|
4559
5292
|
pidfile: str = 'supervisord.pid',
|
4560
5293
|
child_logdir: ta.Optional[str] = None,
|
@@ -4564,7 +5297,7 @@ class ServerConfig:
|
|
4564
5297
|
umask=parse_octal(umask),
|
4565
5298
|
directory=check_existing_dir(directory) if directory is not None else None,
|
4566
5299
|
logfile=check_path_with_existing_dir(logfile),
|
4567
|
-
|
5300
|
+
logfile_max_bytes=parse_bytes_size(logfile_max_bytes),
|
4568
5301
|
loglevel=parse_logging_level(loglevel),
|
4569
5302
|
pidfile=check_path_with_existing_dir(pidfile),
|
4570
5303
|
child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
|
@@ -4601,239 +5334,6 @@ def parse_logging_level(value: ta.Union[str, int]) -> int:
|
|
4601
5334
|
return level
|
4602
5335
|
|
4603
5336
|
|
4604
|
-
########################################
|
4605
|
-
# ../poller.py
|
4606
|
-
|
4607
|
-
|
4608
|
-
class Poller(DaemonizeListener, abc.ABC):
|
4609
|
-
def __init__(self) -> None:
|
4610
|
-
super().__init__()
|
4611
|
-
|
4612
|
-
@abc.abstractmethod
|
4613
|
-
def register_readable(self, fd: Fd) -> None:
|
4614
|
-
raise NotImplementedError
|
4615
|
-
|
4616
|
-
@abc.abstractmethod
|
4617
|
-
def register_writable(self, fd: Fd) -> None:
|
4618
|
-
raise NotImplementedError
|
4619
|
-
|
4620
|
-
@abc.abstractmethod
|
4621
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4622
|
-
raise NotImplementedError
|
4623
|
-
|
4624
|
-
@abc.abstractmethod
|
4625
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4626
|
-
raise NotImplementedError
|
4627
|
-
|
4628
|
-
@abc.abstractmethod
|
4629
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4630
|
-
raise NotImplementedError
|
4631
|
-
|
4632
|
-
def before_daemonize(self) -> None: # noqa
|
4633
|
-
pass
|
4634
|
-
|
4635
|
-
def after_daemonize(self) -> None: # noqa
|
4636
|
-
pass
|
4637
|
-
|
4638
|
-
def close(self) -> None: # noqa
|
4639
|
-
pass
|
4640
|
-
|
4641
|
-
|
4642
|
-
class SelectPoller(Poller):
|
4643
|
-
def __init__(self) -> None:
|
4644
|
-
super().__init__()
|
4645
|
-
|
4646
|
-
self._readable: ta.Set[Fd] = set()
|
4647
|
-
self._writable: ta.Set[Fd] = set()
|
4648
|
-
|
4649
|
-
def register_readable(self, fd: Fd) -> None:
|
4650
|
-
self._readable.add(fd)
|
4651
|
-
|
4652
|
-
def register_writable(self, fd: Fd) -> None:
|
4653
|
-
self._writable.add(fd)
|
4654
|
-
|
4655
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4656
|
-
self._readable.discard(fd)
|
4657
|
-
|
4658
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4659
|
-
self._writable.discard(fd)
|
4660
|
-
|
4661
|
-
def unregister_all(self) -> None:
|
4662
|
-
self._readable.clear()
|
4663
|
-
self._writable.clear()
|
4664
|
-
|
4665
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4666
|
-
try:
|
4667
|
-
r, w, x = select.select(
|
4668
|
-
self._readable,
|
4669
|
-
self._writable,
|
4670
|
-
[], timeout,
|
4671
|
-
)
|
4672
|
-
except OSError as exc:
|
4673
|
-
if exc.args[0] == errno.EINTR:
|
4674
|
-
log.debug('EINTR encountered in poll')
|
4675
|
-
return [], []
|
4676
|
-
if exc.args[0] == errno.EBADF:
|
4677
|
-
log.debug('EBADF encountered in poll')
|
4678
|
-
self.unregister_all()
|
4679
|
-
return [], []
|
4680
|
-
raise
|
4681
|
-
return r, w
|
4682
|
-
|
4683
|
-
|
4684
|
-
class PollPoller(Poller):
|
4685
|
-
_READ = select.POLLIN | select.POLLPRI | select.POLLHUP
|
4686
|
-
_WRITE = select.POLLOUT
|
4687
|
-
|
4688
|
-
def __init__(self) -> None:
|
4689
|
-
super().__init__()
|
4690
|
-
|
4691
|
-
self._poller = select.poll()
|
4692
|
-
self._readable: set[Fd] = set()
|
4693
|
-
self._writable: set[Fd] = set()
|
4694
|
-
|
4695
|
-
def register_readable(self, fd: Fd) -> None:
|
4696
|
-
self._poller.register(fd, self._READ)
|
4697
|
-
self._readable.add(fd)
|
4698
|
-
|
4699
|
-
def register_writable(self, fd: Fd) -> None:
|
4700
|
-
self._poller.register(fd, self._WRITE)
|
4701
|
-
self._writable.add(fd)
|
4702
|
-
|
4703
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4704
|
-
self._readable.discard(fd)
|
4705
|
-
self._poller.unregister(fd)
|
4706
|
-
if fd in self._writable:
|
4707
|
-
self._poller.register(fd, self._WRITE)
|
4708
|
-
|
4709
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4710
|
-
self._writable.discard(fd)
|
4711
|
-
self._poller.unregister(fd)
|
4712
|
-
if fd in self._readable:
|
4713
|
-
self._poller.register(fd, self._READ)
|
4714
|
-
|
4715
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4716
|
-
fds = self._poll_fds(timeout) # type: ignore
|
4717
|
-
readable, writable = [], []
|
4718
|
-
for fd, eventmask in fds:
|
4719
|
-
if self._ignore_invalid(fd, eventmask):
|
4720
|
-
continue
|
4721
|
-
if eventmask & self._READ:
|
4722
|
-
readable.append(fd)
|
4723
|
-
if eventmask & self._WRITE:
|
4724
|
-
writable.append(fd)
|
4725
|
-
return readable, writable
|
4726
|
-
|
4727
|
-
def _poll_fds(self, timeout: float) -> ta.List[ta.Tuple[Fd, Fd]]:
|
4728
|
-
try:
|
4729
|
-
return self._poller.poll(timeout * 1000) # type: ignore
|
4730
|
-
except OSError as exc:
|
4731
|
-
if exc.args[0] == errno.EINTR:
|
4732
|
-
log.debug('EINTR encountered in poll')
|
4733
|
-
return []
|
4734
|
-
raise
|
4735
|
-
|
4736
|
-
def _ignore_invalid(self, fd: Fd, eventmask: int) -> bool:
|
4737
|
-
if eventmask & select.POLLNVAL:
|
4738
|
-
# POLLNVAL means `fd` value is invalid, not open. When a process quits it's `fd`s are closed so there is no
|
4739
|
-
# more reason to keep this `fd` registered If the process restarts it's `fd`s are registered again.
|
4740
|
-
self._poller.unregister(fd)
|
4741
|
-
self._readable.discard(fd)
|
4742
|
-
self._writable.discard(fd)
|
4743
|
-
return True
|
4744
|
-
return False
|
4745
|
-
|
4746
|
-
|
4747
|
-
if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
|
4748
|
-
class KqueuePoller(Poller):
|
4749
|
-
max_events = 1000
|
4750
|
-
|
4751
|
-
def __init__(self) -> None:
|
4752
|
-
super().__init__()
|
4753
|
-
|
4754
|
-
self._kqueue: ta.Optional[ta.Any] = select.kqueue()
|
4755
|
-
self._readable: set[Fd] = set()
|
4756
|
-
self._writable: set[Fd] = set()
|
4757
|
-
|
4758
|
-
def register_readable(self, fd: Fd) -> None:
|
4759
|
-
self._readable.add(fd)
|
4760
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD)
|
4761
|
-
self._kqueue_control(fd, kevent)
|
4762
|
-
|
4763
|
-
def register_writable(self, fd: Fd) -> None:
|
4764
|
-
self._writable.add(fd)
|
4765
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD)
|
4766
|
-
self._kqueue_control(fd, kevent)
|
4767
|
-
|
4768
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4769
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_DELETE)
|
4770
|
-
self._readable.discard(fd)
|
4771
|
-
self._kqueue_control(fd, kevent)
|
4772
|
-
|
4773
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4774
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_DELETE)
|
4775
|
-
self._writable.discard(fd)
|
4776
|
-
self._kqueue_control(fd, kevent)
|
4777
|
-
|
4778
|
-
def _kqueue_control(self, fd: Fd, kevent: 'select.kevent') -> None:
|
4779
|
-
try:
|
4780
|
-
self._kqueue.control([kevent], 0) # type: ignore
|
4781
|
-
except OSError as error:
|
4782
|
-
if error.errno == errno.EBADF:
|
4783
|
-
log.debug('EBADF encountered in kqueue. Invalid file descriptor %s', fd)
|
4784
|
-
else:
|
4785
|
-
raise
|
4786
|
-
|
4787
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4788
|
-
readable, writable = [], [] # type: ignore
|
4789
|
-
|
4790
|
-
try:
|
4791
|
-
kevents = self._kqueue.control(None, self.max_events, timeout) # type: ignore
|
4792
|
-
except OSError as error:
|
4793
|
-
if error.errno == errno.EINTR:
|
4794
|
-
log.debug('EINTR encountered in poll')
|
4795
|
-
return readable, writable
|
4796
|
-
raise
|
4797
|
-
|
4798
|
-
for kevent in kevents:
|
4799
|
-
if kevent.filter == select.KQ_FILTER_READ:
|
4800
|
-
readable.append(kevent.ident)
|
4801
|
-
if kevent.filter == select.KQ_FILTER_WRITE:
|
4802
|
-
writable.append(kevent.ident)
|
4803
|
-
|
4804
|
-
return readable, writable
|
4805
|
-
|
4806
|
-
def before_daemonize(self) -> None:
|
4807
|
-
self.close()
|
4808
|
-
|
4809
|
-
def after_daemonize(self) -> None:
|
4810
|
-
self._kqueue = select.kqueue()
|
4811
|
-
for fd in self._readable:
|
4812
|
-
self.register_readable(fd)
|
4813
|
-
for fd in self._writable:
|
4814
|
-
self.register_writable(fd)
|
4815
|
-
|
4816
|
-
def close(self) -> None:
|
4817
|
-
self._kqueue.close() # type: ignore
|
4818
|
-
self._kqueue = None
|
4819
|
-
|
4820
|
-
else:
|
4821
|
-
KqueuePoller = None
|
4822
|
-
|
4823
|
-
|
4824
|
-
def get_poller_impl() -> ta.Type[Poller]:
|
4825
|
-
if (
|
4826
|
-
(sys.platform == 'darwin' or sys.platform.startswith('freebsd')) and
|
4827
|
-
hasattr(select, 'kqueue') and
|
4828
|
-
KqueuePoller is not None
|
4829
|
-
):
|
4830
|
-
return KqueuePoller
|
4831
|
-
elif hasattr(select, 'poll'):
|
4832
|
-
return PollPoller
|
4833
|
-
else:
|
4834
|
-
return SelectPoller
|
4835
|
-
|
4836
|
-
|
4837
5337
|
########################################
|
4838
5338
|
# ../../../omlish/lite/http/coroserver.py
|
4839
5339
|
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
@@ -5442,86 +5942,35 @@ class SupervisorStateManager(abc.ABC):
|
|
5442
5942
|
##
|
5443
5943
|
|
5444
5944
|
|
5445
|
-
class
|
5446
|
-
@property
|
5945
|
+
class HasDispatchers(abc.ABC):
|
5447
5946
|
@abc.abstractmethod
|
5448
|
-
def
|
5947
|
+
def get_dispatchers(self) -> 'Dispatchers':
|
5449
5948
|
raise NotImplementedError
|
5450
5949
|
|
5950
|
+
|
5951
|
+
class ProcessDispatcher(FdIoHandler, abc.ABC):
|
5451
5952
|
@property
|
5452
5953
|
@abc.abstractmethod
|
5453
|
-
def
|
5954
|
+
def channel(self) -> str:
|
5454
5955
|
raise NotImplementedError
|
5455
5956
|
|
5456
5957
|
@property
|
5457
5958
|
@abc.abstractmethod
|
5458
|
-
def
|
5959
|
+
def process(self) -> 'Process':
|
5459
5960
|
raise NotImplementedError
|
5460
5961
|
|
5461
|
-
#
|
5462
5962
|
|
5963
|
+
class ProcessOutputDispatcher(ProcessDispatcher, abc.ABC):
|
5463
5964
|
@abc.abstractmethod
|
5464
|
-
def
|
5965
|
+
def remove_logs(self) -> None:
|
5465
5966
|
raise NotImplementedError
|
5466
5967
|
|
5467
5968
|
@abc.abstractmethod
|
5468
|
-
def
|
5969
|
+
def reopen_logs(self) -> None:
|
5469
5970
|
raise NotImplementedError
|
5470
5971
|
|
5471
|
-
#
|
5472
5972
|
|
5473
|
-
|
5474
|
-
def readable(self) -> bool:
|
5475
|
-
raise NotImplementedError
|
5476
|
-
|
5477
|
-
@abc.abstractmethod
|
5478
|
-
def writable(self) -> bool:
|
5479
|
-
raise NotImplementedError
|
5480
|
-
|
5481
|
-
#
|
5482
|
-
|
5483
|
-
def handle_read_event(self) -> None:
|
5484
|
-
raise TypeError
|
5485
|
-
|
5486
|
-
def handle_write_event(self) -> None:
|
5487
|
-
raise TypeError
|
5488
|
-
|
5489
|
-
#
|
5490
|
-
|
5491
|
-
def handle_connect(self) -> None:
|
5492
|
-
raise TypeError
|
5493
|
-
|
5494
|
-
def handle_close(self) -> None:
|
5495
|
-
raise TypeError
|
5496
|
-
|
5497
|
-
def handle_accepted(self, sock, addr) -> None:
|
5498
|
-
raise TypeError
|
5499
|
-
|
5500
|
-
|
5501
|
-
class HasDispatchers(abc.ABC):
|
5502
|
-
@abc.abstractmethod
|
5503
|
-
def get_dispatchers(self) -> 'Dispatchers':
|
5504
|
-
raise NotImplementedError
|
5505
|
-
|
5506
|
-
|
5507
|
-
class ProcessDispatcher(Dispatcher, abc.ABC):
|
5508
|
-
@property
|
5509
|
-
@abc.abstractmethod
|
5510
|
-
def process(self) -> 'Process':
|
5511
|
-
raise NotImplementedError
|
5512
|
-
|
5513
|
-
|
5514
|
-
class ProcessOutputDispatcher(ProcessDispatcher, abc.ABC):
|
5515
|
-
@abc.abstractmethod
|
5516
|
-
def remove_logs(self) -> None:
|
5517
|
-
raise NotImplementedError
|
5518
|
-
|
5519
|
-
@abc.abstractmethod
|
5520
|
-
def reopen_logs(self) -> None:
|
5521
|
-
raise NotImplementedError
|
5522
|
-
|
5523
|
-
|
5524
|
-
class ProcessInputDispatcher(ProcessDispatcher, abc.ABC):
|
5973
|
+
class ProcessInputDispatcher(ProcessDispatcher, abc.ABC):
|
5525
5974
|
@abc.abstractmethod
|
5526
5975
|
def write(self, chars: ta.Union[bytes, str]) -> None:
|
5527
5976
|
raise NotImplementedError
|
@@ -5625,13 +6074,139 @@ class ProcessGroup(
|
|
5625
6074
|
raise NotImplementedError
|
5626
6075
|
|
5627
6076
|
|
6077
|
+
########################################
|
6078
|
+
# ../../../omlish/lite/fdio/corohttp.py
|
6079
|
+
|
6080
|
+
|
6081
|
+
class CoroHttpServerConnectionFdIoHandler(SocketFdIoHandler):
|
6082
|
+
def __init__(
|
6083
|
+
self,
|
6084
|
+
addr: SocketAddress,
|
6085
|
+
sock: socket.socket,
|
6086
|
+
handler: HttpHandler,
|
6087
|
+
*,
|
6088
|
+
read_size: int = 0x10000,
|
6089
|
+
write_size: int = 0x10000,
|
6090
|
+
) -> None:
|
6091
|
+
check_state(not sock.getblocking())
|
6092
|
+
|
6093
|
+
super().__init__(addr, sock)
|
6094
|
+
|
6095
|
+
self._handler = handler
|
6096
|
+
self._read_size = read_size
|
6097
|
+
self._write_size = write_size
|
6098
|
+
|
6099
|
+
self._read_buf = ReadableListBuffer()
|
6100
|
+
self._write_buf: IncrementalWriteBuffer | None = None
|
6101
|
+
|
6102
|
+
self._coro_srv = CoroHttpServer(
|
6103
|
+
addr,
|
6104
|
+
handler=self._handler,
|
6105
|
+
)
|
6106
|
+
self._srv_coro: ta.Optional[ta.Generator[CoroHttpServer.Io, ta.Optional[bytes], None]] = self._coro_srv.coro_handle() # noqa
|
6107
|
+
|
6108
|
+
self._cur_io: CoroHttpServer.Io | None = None
|
6109
|
+
self._next_io()
|
6110
|
+
|
6111
|
+
#
|
6112
|
+
|
6113
|
+
def _next_io(self) -> None: # noqa
|
6114
|
+
coro = check_not_none(self._srv_coro)
|
6115
|
+
|
6116
|
+
d: bytes | None = None
|
6117
|
+
o = self._cur_io
|
6118
|
+
while True:
|
6119
|
+
if o is None:
|
6120
|
+
try:
|
6121
|
+
if d is not None:
|
6122
|
+
o = coro.send(d)
|
6123
|
+
d = None
|
6124
|
+
else:
|
6125
|
+
o = next(coro)
|
6126
|
+
except StopIteration:
|
6127
|
+
self.close()
|
6128
|
+
o = None
|
6129
|
+
break
|
6130
|
+
|
6131
|
+
if isinstance(o, CoroHttpServer.AnyLogIo):
|
6132
|
+
print(o)
|
6133
|
+
o = None
|
6134
|
+
|
6135
|
+
elif isinstance(o, CoroHttpServer.ReadIo):
|
6136
|
+
if (d := self._read_buf.read(o.sz)) is None:
|
6137
|
+
break
|
6138
|
+
o = None
|
6139
|
+
|
6140
|
+
elif isinstance(o, CoroHttpServer.ReadLineIo):
|
6141
|
+
if (d := self._read_buf.read_until(b'\n')) is None:
|
6142
|
+
break
|
6143
|
+
o = None
|
6144
|
+
|
6145
|
+
elif isinstance(o, CoroHttpServer.WriteIo):
|
6146
|
+
check_none(self._write_buf)
|
6147
|
+
self._write_buf = IncrementalWriteBuffer(o.data, write_size=self._write_size)
|
6148
|
+
break
|
6149
|
+
|
6150
|
+
else:
|
6151
|
+
raise TypeError(o)
|
6152
|
+
|
6153
|
+
self._cur_io = o
|
6154
|
+
|
6155
|
+
#
|
6156
|
+
|
6157
|
+
def readable(self) -> bool:
|
6158
|
+
return True
|
6159
|
+
|
6160
|
+
def writable(self) -> bool:
|
6161
|
+
return self._write_buf is not None
|
6162
|
+
|
6163
|
+
#
|
6164
|
+
|
6165
|
+
def on_readable(self) -> None:
|
6166
|
+
try:
|
6167
|
+
buf = check_not_none(self._sock).recv(self._read_size)
|
6168
|
+
except BlockingIOError:
|
6169
|
+
return
|
6170
|
+
except ConnectionResetError:
|
6171
|
+
self.close()
|
6172
|
+
return
|
6173
|
+
if not buf:
|
6174
|
+
self.close()
|
6175
|
+
return
|
6176
|
+
|
6177
|
+
self._read_buf.feed(buf)
|
6178
|
+
|
6179
|
+
if isinstance(self._cur_io, CoroHttpServer.AnyReadIo):
|
6180
|
+
self._next_io()
|
6181
|
+
|
6182
|
+
def on_writable(self) -> None:
|
6183
|
+
check_isinstance(self._cur_io, CoroHttpServer.WriteIo)
|
6184
|
+
wb = check_not_none(self._write_buf)
|
6185
|
+
while wb.rem > 0:
|
6186
|
+
def send(d: bytes) -> int:
|
6187
|
+
try:
|
6188
|
+
return check_not_none(self._sock).send(d)
|
6189
|
+
except ConnectionResetError:
|
6190
|
+
self.close()
|
6191
|
+
return 0
|
6192
|
+
except BlockingIOError:
|
6193
|
+
return 0
|
6194
|
+
if not wb.write(send):
|
6195
|
+
break
|
6196
|
+
|
6197
|
+
if wb.rem < 1:
|
6198
|
+
self._write_buf = None
|
6199
|
+
self._cur_io = None
|
6200
|
+
self._next_io()
|
6201
|
+
|
6202
|
+
|
5628
6203
|
########################################
|
5629
6204
|
# ../dispatchers.py
|
5630
6205
|
|
5631
6206
|
|
5632
|
-
class Dispatchers(KeyedCollection[Fd,
|
5633
|
-
def _key(self, v:
|
5634
|
-
return v.fd
|
6207
|
+
class Dispatchers(KeyedCollection[Fd, FdIoHandler]):
|
6208
|
+
def _key(self, v: FdIoHandler) -> Fd:
|
6209
|
+
return Fd(v.fd())
|
5635
6210
|
|
5636
6211
|
#
|
5637
6212
|
|
@@ -5640,9 +6215,9 @@ class Dispatchers(KeyedCollection[Fd, Dispatcher]):
|
|
5640
6215
|
# note that we *must* call readable() for every dispatcher, as it may have side effects for a given
|
5641
6216
|
# dispatcher (eg. call handle_listener_state_change for event listener processes)
|
5642
6217
|
if d.readable():
|
5643
|
-
d.
|
6218
|
+
d.on_readable()
|
5644
6219
|
if d.writable():
|
5645
|
-
d.
|
6220
|
+
d.on_writable()
|
5646
6221
|
|
5647
6222
|
#
|
5648
6223
|
|
@@ -5696,7 +6271,6 @@ class BaseProcessDispatcherImpl(ProcessDispatcher, abc.ABC):
|
|
5696
6271
|
def channel(self) -> str:
|
5697
6272
|
return self._channel
|
5698
6273
|
|
5699
|
-
@property
|
5700
6274
|
def fd(self) -> Fd:
|
5701
6275
|
return self._fd
|
5702
6276
|
|
@@ -5711,7 +6285,7 @@ class BaseProcessDispatcherImpl(ProcessDispatcher, abc.ABC):
|
|
5711
6285
|
log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
|
5712
6286
|
self._closed = True
|
5713
6287
|
|
5714
|
-
def
|
6288
|
+
def on_error(self, exc: ta.Optional[BaseException] = None) -> None:
|
5715
6289
|
nil, t, v, tbinfo = compact_traceback()
|
5716
6290
|
|
5717
6291
|
log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
|
@@ -5784,7 +6358,7 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
|
|
5784
6358
|
channel = self._channel # noqa
|
5785
6359
|
|
5786
6360
|
logfile = self._lc.file
|
5787
|
-
|
6361
|
+
max_bytes = self._lc.max_bytes # noqa
|
5788
6362
|
backups = self._lc.backups # noqa
|
5789
6363
|
to_syslog = self._lc.syslog
|
5790
6364
|
|
@@ -5796,8 +6370,8 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
|
|
5796
6370
|
# self.normal_log,
|
5797
6371
|
# filename=logfile,
|
5798
6372
|
# fmt='%(message)s',
|
5799
|
-
# rotating=bool(
|
5800
|
-
#
|
6373
|
+
# rotating=bool(max_bytes), # optimization
|
6374
|
+
# max_bytes=max_bytes,
|
5801
6375
|
# backups=backups,
|
5802
6376
|
# )
|
5803
6377
|
|
@@ -5809,17 +6383,17 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
|
|
5809
6383
|
|
5810
6384
|
def _init_capture_log(self) -> None:
|
5811
6385
|
"""
|
5812
|
-
Configure the capture log for this process.
|
6386
|
+
Configure the capture log for this process. This log is used to temporarily capture output when special output
|
5813
6387
|
is detected. Sets self.capture_log if capturing is enabled.
|
5814
6388
|
"""
|
5815
6389
|
|
5816
|
-
|
5817
|
-
if
|
6390
|
+
capture_max_bytes = self._lc.capture_max_bytes
|
6391
|
+
if capture_max_bytes:
|
5818
6392
|
self._capture_log = logging.getLogger(__name__)
|
5819
6393
|
# loggers.handle_boundIO(
|
5820
6394
|
# self._capture_log,
|
5821
6395
|
# fmt='%(message)s',
|
5822
|
-
#
|
6396
|
+
# max_bytes=capture_max_bytes,
|
5823
6397
|
# )
|
5824
6398
|
|
5825
6399
|
def remove_logs(self) -> None:
|
@@ -5927,12 +6501,12 @@ class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispat
|
|
5927
6501
|
return False
|
5928
6502
|
return True
|
5929
6503
|
|
5930
|
-
def
|
6504
|
+
def on_readable(self) -> None:
|
5931
6505
|
data = read_fd(self._fd)
|
5932
6506
|
self._output_buffer += data
|
5933
6507
|
self.record_output()
|
5934
6508
|
if not data:
|
5935
|
-
# if we get no data back from the pipe, it means that the child process has ended.
|
6509
|
+
# if we get no data back from the pipe, it means that the child process has ended. See
|
5936
6510
|
# mail.python.org/pipermail/python-dev/2004-August/046850.html
|
5937
6511
|
self.close()
|
5938
6512
|
|
@@ -5965,15 +6539,12 @@ class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatch
|
|
5965
6539
|
return True
|
5966
6540
|
return False
|
5967
6541
|
|
5968
|
-
def readable(self) -> bool:
|
5969
|
-
return False
|
5970
|
-
|
5971
6542
|
def flush(self) -> None:
|
5972
6543
|
# other code depends on this raising EPIPE if the pipe is closed
|
5973
6544
|
sent = os.write(self._fd, as_bytes(self._input_buffer))
|
5974
6545
|
self._input_buffer = self._input_buffer[sent:]
|
5975
6546
|
|
5976
|
-
def
|
6547
|
+
def on_writable(self) -> None:
|
5977
6548
|
if self._input_buffer:
|
5978
6549
|
try:
|
5979
6550
|
self.flush()
|
@@ -5985,79 +6556,6 @@ class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatch
|
|
5985
6556
|
raise
|
5986
6557
|
|
5987
6558
|
|
5988
|
-
########################################
|
5989
|
-
# ../groups.py
|
5990
|
-
|
5991
|
-
|
5992
|
-
class ProcessGroupManager(KeyedCollectionAccessors[str, ProcessGroup]):
|
5993
|
-
def __init__(
|
5994
|
-
self,
|
5995
|
-
*,
|
5996
|
-
event_callbacks: EventCallbacks,
|
5997
|
-
) -> None:
|
5998
|
-
super().__init__()
|
5999
|
-
|
6000
|
-
self._event_callbacks = event_callbacks
|
6001
|
-
|
6002
|
-
self._by_name: ta.Dict[str, ProcessGroup] = {}
|
6003
|
-
|
6004
|
-
@property
|
6005
|
-
def _by_key(self) -> ta.Mapping[str, ProcessGroup]:
|
6006
|
-
return self._by_name
|
6007
|
-
|
6008
|
-
#
|
6009
|
-
|
6010
|
-
def all_processes(self) -> ta.Iterator[Process]:
|
6011
|
-
for g in self:
|
6012
|
-
yield from g
|
6013
|
-
|
6014
|
-
#
|
6015
|
-
|
6016
|
-
def add(self, group: ProcessGroup) -> None:
|
6017
|
-
if (name := group.name) in self._by_name:
|
6018
|
-
raise KeyError(f'Process group already exists: {name}')
|
6019
|
-
|
6020
|
-
self._by_name[name] = group
|
6021
|
-
|
6022
|
-
self._event_callbacks.notify(ProcessGroupAddedEvent(name))
|
6023
|
-
|
6024
|
-
def remove(self, name: str) -> None:
|
6025
|
-
group = self._by_name[name]
|
6026
|
-
|
6027
|
-
group.before_remove()
|
6028
|
-
|
6029
|
-
del self._by_name[name]
|
6030
|
-
|
6031
|
-
self._event_callbacks.notify(ProcessGroupRemovedEvent(name))
|
6032
|
-
|
6033
|
-
def clear(self) -> None:
|
6034
|
-
# FIXME: events?
|
6035
|
-
self._by_name.clear()
|
6036
|
-
|
6037
|
-
#
|
6038
|
-
|
6039
|
-
class Diff(ta.NamedTuple):
|
6040
|
-
added: ta.List[ProcessGroupConfig]
|
6041
|
-
changed: ta.List[ProcessGroupConfig]
|
6042
|
-
removed: ta.List[ProcessGroupConfig]
|
6043
|
-
|
6044
|
-
def diff(self, new: ta.Sequence[ProcessGroupConfig]) -> Diff:
|
6045
|
-
cur = [group.config for group in self]
|
6046
|
-
|
6047
|
-
cur_by_name = {cfg.name: cfg for cfg in cur}
|
6048
|
-
new_by_name = {cfg.name: cfg for cfg in new}
|
6049
|
-
|
6050
|
-
added = [cand for cand in new if cand.name not in cur_by_name]
|
6051
|
-
removed = [cand for cand in cur if cand.name not in new_by_name]
|
6052
|
-
changed = [cand for cand in new if cand != cur_by_name.get(cand.name, cand)]
|
6053
|
-
|
6054
|
-
return ProcessGroupManager.Diff(
|
6055
|
-
added,
|
6056
|
-
changed,
|
6057
|
-
removed,
|
6058
|
-
)
|
6059
|
-
|
6060
|
-
|
6061
6559
|
########################################
|
6062
6560
|
# ../groupsimpl.py
|
6063
6561
|
|
@@ -6217,21 +6715,21 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6217
6715
|
def _cleanup_fds(self) -> None:
|
6218
6716
|
# try to close any leaked file descriptors (for reload)
|
6219
6717
|
start = 5
|
6220
|
-
os.closerange(start, self._config.
|
6718
|
+
os.closerange(start, self._config.min_fds)
|
6221
6719
|
|
6222
6720
|
#
|
6223
6721
|
|
6224
6722
|
def _set_uid_or_exit(self) -> None:
|
6225
6723
|
"""
|
6226
|
-
Set the uid of the supervisord process.
|
6724
|
+
Set the uid of the supervisord process. Called during supervisord startup only. No return value. Exits the
|
6227
6725
|
process via usage() if privileges could not be dropped.
|
6228
6726
|
"""
|
6229
6727
|
|
6230
6728
|
if self._user is None:
|
6231
6729
|
if os.getuid() == 0:
|
6232
6730
|
warnings.warn(
|
6233
|
-
'Supervisor is running as root.
|
6234
|
-
'config file.
|
6731
|
+
'Supervisor is running as root. Privileges were not dropped because no user is specified in the '
|
6732
|
+
'config file. If you intend to run as root, you can set user=root in the config file to avoid '
|
6235
6733
|
'this message.',
|
6236
6734
|
)
|
6237
6735
|
else:
|
@@ -6245,8 +6743,8 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6245
6743
|
|
6246
6744
|
def _set_rlimits_or_exit(self) -> None:
|
6247
6745
|
"""
|
6248
|
-
Set the rlimits of the supervisord process.
|
6249
|
-
|
6746
|
+
Set the rlimits of the supervisord process. Called during supervisord startup only. No return value. Exits the
|
6747
|
+
process via usage() if any rlimits could not be set.
|
6250
6748
|
"""
|
6251
6749
|
|
6252
6750
|
limits = []
|
@@ -6255,12 +6753,12 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6255
6753
|
limits.append({
|
6256
6754
|
'msg': (
|
6257
6755
|
'The minimum number of file descriptors required to run this process is %(min_limit)s as per the '
|
6258
|
-
'"
|
6259
|
-
'you to open %(hard)s file descriptors.
|
6260
|
-
'your environment (see README.rst) or lower the
|
6756
|
+
'"min_fds" command-line argument or config file setting. The current environment will only allow '
|
6757
|
+
'you to open %(hard)s file descriptors. Either raise the number of usable file descriptors in '
|
6758
|
+
'your environment (see README.rst) or lower the min_fds setting in the config file to allow the '
|
6261
6759
|
'process to start.'
|
6262
6760
|
),
|
6263
|
-
'min': self._config.
|
6761
|
+
'min': self._config.min_fds,
|
6264
6762
|
'resource': resource.RLIMIT_NOFILE,
|
6265
6763
|
'name': 'RLIMIT_NOFILE',
|
6266
6764
|
})
|
@@ -6270,11 +6768,11 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6270
6768
|
'msg': (
|
6271
6769
|
'The minimum number of available processes required to run this program is %(min_limit)s as per '
|
6272
6770
|
'the "minprocs" command-line argument or config file setting. The current environment will only '
|
6273
|
-
'allow you to open %(hard)s processes.
|
6771
|
+
'allow you to open %(hard)s processes. Either raise the number of usable processes in your '
|
6274
6772
|
'environment (see README.rst) or lower the minprocs setting in the config file to allow the '
|
6275
6773
|
'program to start.'
|
6276
6774
|
),
|
6277
|
-
'min': self._config.
|
6775
|
+
'min': self._config.min_procs,
|
6278
6776
|
'resource': resource.RLIMIT_NPROC,
|
6279
6777
|
'name': 'RLIMIT_NPROC',
|
6280
6778
|
})
|
@@ -6360,11 +6858,11 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6360
6858
|
dl.after_daemonize()
|
6361
6859
|
|
6362
6860
|
def _do_daemonize(self) -> None:
|
6363
|
-
# To daemonize, we need to become the leader of our own session (process) group.
|
6364
|
-
# our parent process will also be sent to us.
|
6861
|
+
# To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
|
6862
|
+
# our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
|
6365
6863
|
# our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
|
6366
6864
|
# terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
|
6367
|
-
# use os.setsid.
|
6865
|
+
# use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
|
6368
6866
|
# session and process group and setting itself up as a new session leader.
|
6369
6867
|
#
|
6370
6868
|
# Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
|
@@ -6396,12 +6894,98 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6396
6894
|
os.dup2(2, os.open('/dev/null', os.O_WRONLY))
|
6397
6895
|
|
6398
6896
|
# XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
|
6399
|
-
# file descriptors.
|
6897
|
+
# file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
|
6400
6898
|
# again after the setsid() call, for obscure SVR4 reasons.
|
6401
6899
|
os.setsid()
|
6402
6900
|
os.umask(self._config.umask)
|
6403
6901
|
|
6404
6902
|
|
6903
|
+
########################################
|
6904
|
+
# ../groups.py
|
6905
|
+
|
6906
|
+
|
6907
|
+
class ProcessGroupManager(
|
6908
|
+
KeyedCollectionAccessors[str, ProcessGroup],
|
6909
|
+
HasDispatchers,
|
6910
|
+
):
|
6911
|
+
def __init__(
|
6912
|
+
self,
|
6913
|
+
*,
|
6914
|
+
event_callbacks: EventCallbacks,
|
6915
|
+
) -> None:
|
6916
|
+
super().__init__()
|
6917
|
+
|
6918
|
+
self._event_callbacks = event_callbacks
|
6919
|
+
|
6920
|
+
self._by_name: ta.Dict[str, ProcessGroup] = {}
|
6921
|
+
|
6922
|
+
@property
|
6923
|
+
def _by_key(self) -> ta.Mapping[str, ProcessGroup]:
|
6924
|
+
return self._by_name
|
6925
|
+
|
6926
|
+
#
|
6927
|
+
|
6928
|
+
def all_processes(self) -> ta.Iterator[Process]:
|
6929
|
+
for g in self:
|
6930
|
+
yield from g
|
6931
|
+
|
6932
|
+
#
|
6933
|
+
|
6934
|
+
def get_dispatchers(self) -> Dispatchers:
|
6935
|
+
return Dispatchers(
|
6936
|
+
d
|
6937
|
+
for g in self
|
6938
|
+
for p in g
|
6939
|
+
for d in p.get_dispatchers()
|
6940
|
+
)
|
6941
|
+
|
6942
|
+
#
|
6943
|
+
|
6944
|
+
def add(self, group: ProcessGroup) -> None:
|
6945
|
+
if (name := group.name) in self._by_name:
|
6946
|
+
raise KeyError(f'Process group already exists: {name}')
|
6947
|
+
|
6948
|
+
self._by_name[name] = group
|
6949
|
+
|
6950
|
+
self._event_callbacks.notify(ProcessGroupAddedEvent(name))
|
6951
|
+
|
6952
|
+
def remove(self, name: str) -> None:
|
6953
|
+
group = self._by_name[name]
|
6954
|
+
|
6955
|
+
group.before_remove()
|
6956
|
+
|
6957
|
+
del self._by_name[name]
|
6958
|
+
|
6959
|
+
self._event_callbacks.notify(ProcessGroupRemovedEvent(name))
|
6960
|
+
|
6961
|
+
def clear(self) -> None:
|
6962
|
+
# FIXME: events?
|
6963
|
+
self._by_name.clear()
|
6964
|
+
|
6965
|
+
#
|
6966
|
+
|
6967
|
+
class Diff(ta.NamedTuple):
|
6968
|
+
added: ta.List[ProcessGroupConfig]
|
6969
|
+
changed: ta.List[ProcessGroupConfig]
|
6970
|
+
removed: ta.List[ProcessGroupConfig]
|
6971
|
+
|
6972
|
+
def diff(self, new: ta.Sequence[ProcessGroupConfig]) -> Diff:
|
6973
|
+
cur = [group.config for group in self]
|
6974
|
+
|
6975
|
+
cur_by_name = {cfg.name: cfg for cfg in cur}
|
6976
|
+
new_by_name = {cfg.name: cfg for cfg in new}
|
6977
|
+
|
6978
|
+
added = [cand for cand in new if cand.name not in cur_by_name]
|
6979
|
+
removed = [cand for cand in cur if cand.name not in new_by_name]
|
6980
|
+
changed = [cand for cand in new if cand != cur_by_name.get(cand.name, cand)]
|
6981
|
+
|
6982
|
+
return ProcessGroupManager.Diff(
|
6983
|
+
added,
|
6984
|
+
changed,
|
6985
|
+
removed,
|
6986
|
+
)
|
6987
|
+
|
6988
|
+
|
6405
6989
|
########################################
|
6406
6990
|
# ../io.py
|
6407
6991
|
|
@@ -6409,49 +6993,59 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6409
6993
|
##
|
6410
6994
|
|
6411
6995
|
|
6412
|
-
|
6996
|
+
HasDispatchersList = ta.NewType('HasDispatchersList', ta.Sequence[HasDispatchers])
|
6997
|
+
|
6998
|
+
|
6999
|
+
class IoManager(HasDispatchers):
|
6413
7000
|
def __init__(
|
6414
7001
|
self,
|
6415
7002
|
*,
|
6416
|
-
poller:
|
6417
|
-
|
7003
|
+
poller: FdIoPoller,
|
7004
|
+
has_dispatchers_list: HasDispatchersList,
|
6418
7005
|
) -> None:
|
6419
7006
|
super().__init__()
|
6420
7007
|
|
6421
7008
|
self._poller = poller
|
6422
|
-
self.
|
7009
|
+
self._has_dispatchers_list = has_dispatchers_list
|
6423
7010
|
|
6424
7011
|
def get_dispatchers(self) -> Dispatchers:
|
6425
7012
|
return Dispatchers(
|
6426
7013
|
d
|
6427
|
-
for
|
6428
|
-
for d in
|
7014
|
+
for hd in self._has_dispatchers_list
|
7015
|
+
for d in hd.get_dispatchers()
|
6429
7016
|
)
|
6430
7017
|
|
6431
7018
|
def poll(self) -> None:
|
6432
7019
|
dispatchers = self.get_dispatchers()
|
6433
7020
|
|
6434
|
-
|
6435
|
-
if
|
6436
|
-
|
6437
|
-
|
6438
|
-
self._poller.register_writable(fd)
|
7021
|
+
self._poller.update(
|
7022
|
+
{fd for fd, d in dispatchers.items() if d.readable()},
|
7023
|
+
{fd for fd, d in dispatchers.items() if d.writable()},
|
7024
|
+
)
|
6439
7025
|
|
6440
7026
|
timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
|
6441
|
-
|
6442
|
-
|
6443
|
-
|
7027
|
+
log.info(f'Polling: {timeout=}') # noqa
|
7028
|
+
polled = self._poller.poll(timeout)
|
7029
|
+
log.info(f'Polled: {polled=}') # noqa
|
7030
|
+
if polled.msg is not None:
|
7031
|
+
log.error(polled.msg)
|
7032
|
+
if polled.exc is not None:
|
7033
|
+
log.error('Poll exception: %r', polled.exc)
|
7034
|
+
|
7035
|
+
for r in polled.r:
|
7036
|
+
fd = Fd(r)
|
6444
7037
|
if fd in dispatchers:
|
7038
|
+
dispatcher = dispatchers[fd]
|
6445
7039
|
try:
|
6446
|
-
dispatcher = dispatchers[fd]
|
6447
7040
|
log.debug('read event caused by %r', dispatcher)
|
6448
|
-
dispatcher.
|
7041
|
+
dispatcher.on_readable()
|
6449
7042
|
if not dispatcher.readable():
|
6450
7043
|
self._poller.unregister_readable(fd)
|
6451
7044
|
except ExitNow:
|
6452
7045
|
raise
|
6453
|
-
except Exception: # noqa
|
6454
|
-
|
7046
|
+
except Exception as exc: # noqa
|
7047
|
+
log.exception('Error in dispatcher: %r', dispatcher)
|
7048
|
+
dispatcher.on_error(exc)
|
6455
7049
|
else:
|
6456
7050
|
# if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
|
6457
7051
|
# time, which may cause 100% cpu usage
|
@@ -6461,18 +7055,20 @@ class IoManager:
|
|
6461
7055
|
except Exception: # noqa
|
6462
7056
|
pass
|
6463
7057
|
|
6464
|
-
for
|
7058
|
+
for w in polled.w:
|
7059
|
+
fd = Fd(w)
|
6465
7060
|
if fd in dispatchers:
|
7061
|
+
dispatcher = dispatchers[fd]
|
6466
7062
|
try:
|
6467
|
-
dispatcher = dispatchers[fd]
|
6468
7063
|
log.debug('write event caused by %r', dispatcher)
|
6469
|
-
dispatcher.
|
7064
|
+
dispatcher.on_writable()
|
6470
7065
|
if not dispatcher.writable():
|
6471
7066
|
self._poller.unregister_writable(fd)
|
6472
7067
|
except ExitNow:
|
6473
7068
|
raise
|
6474
|
-
except Exception: # noqa
|
6475
|
-
|
7069
|
+
except Exception as exc: # noqa
|
7070
|
+
log.exception('Error in dispatcher: %r', dispatcher)
|
7071
|
+
dispatcher.on_error(exc)
|
6476
7072
|
else:
|
6477
7073
|
log.debug('unexpected write event from fd %r', fd)
|
6478
7074
|
try:
|
@@ -6481,65 +7077,6 @@ class IoManager:
|
|
6481
7077
|
pass
|
6482
7078
|
|
6483
7079
|
|
6484
|
-
########################################
|
6485
|
-
# ../signals.py
|
6486
|
-
|
6487
|
-
|
6488
|
-
class SignalHandler:
|
6489
|
-
def __init__(
|
6490
|
-
self,
|
6491
|
-
*,
|
6492
|
-
states: SupervisorStateManager,
|
6493
|
-
signal_receiver: SignalReceiver,
|
6494
|
-
process_groups: ProcessGroupManager,
|
6495
|
-
) -> None:
|
6496
|
-
super().__init__()
|
6497
|
-
|
6498
|
-
self._states = states
|
6499
|
-
self._signal_receiver = signal_receiver
|
6500
|
-
self._process_groups = process_groups
|
6501
|
-
|
6502
|
-
def set_signals(self) -> None:
|
6503
|
-
self._signal_receiver.install(
|
6504
|
-
signal.SIGTERM,
|
6505
|
-
signal.SIGINT,
|
6506
|
-
signal.SIGQUIT,
|
6507
|
-
signal.SIGHUP,
|
6508
|
-
signal.SIGCHLD,
|
6509
|
-
signal.SIGUSR2,
|
6510
|
-
)
|
6511
|
-
|
6512
|
-
def handle_signals(self) -> None:
|
6513
|
-
sig = self._signal_receiver.get_signal()
|
6514
|
-
if not sig:
|
6515
|
-
return
|
6516
|
-
|
6517
|
-
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
6518
|
-
log.warning('received %s indicating exit request', sig_name(sig))
|
6519
|
-
self._states.set_state(SupervisorState.SHUTDOWN)
|
6520
|
-
|
6521
|
-
elif sig == signal.SIGHUP:
|
6522
|
-
if self._states.state == SupervisorState.SHUTDOWN:
|
6523
|
-
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
6524
|
-
else:
|
6525
|
-
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
6526
|
-
self._states.set_state(SupervisorState.RESTARTING)
|
6527
|
-
|
6528
|
-
elif sig == signal.SIGCHLD:
|
6529
|
-
log.debug('received %s indicating a child quit', sig_name(sig))
|
6530
|
-
|
6531
|
-
elif sig == signal.SIGUSR2:
|
6532
|
-
log.info('received %s indicating log reopen request', sig_name(sig))
|
6533
|
-
|
6534
|
-
for p in self._process_groups.all_processes():
|
6535
|
-
for d in p.get_dispatchers():
|
6536
|
-
if isinstance(d, ProcessOutputDispatcher):
|
6537
|
-
d.reopen_logs()
|
6538
|
-
|
6539
|
-
else:
|
6540
|
-
log.debug('received %s indicating nothing', sig_name(sig))
|
6541
|
-
|
6542
|
-
|
6543
7080
|
########################################
|
6544
7081
|
# ../spawning.py
|
6545
7082
|
|
@@ -6568,6 +7105,123 @@ class ProcessSpawning:
|
|
6568
7105
|
raise NotImplementedError
|
6569
7106
|
|
6570
7107
|
|
7108
|
+
########################################
|
7109
|
+
# ../http.py
|
7110
|
+
|
7111
|
+
|
7112
|
+
##
|
7113
|
+
|
7114
|
+
|
7115
|
+
class SocketServerFdIoHandler(SocketFdIoHandler):
|
7116
|
+
def __init__(
|
7117
|
+
self,
|
7118
|
+
addr: SocketAddress,
|
7119
|
+
on_connect: ta.Callable[[socket.socket, SocketAddress], None],
|
7120
|
+
) -> None:
|
7121
|
+
sock = socket.create_server(addr)
|
7122
|
+
sock.setblocking(False)
|
7123
|
+
|
7124
|
+
super().__init__(addr, sock)
|
7125
|
+
|
7126
|
+
self._on_connect = on_connect
|
7127
|
+
|
7128
|
+
sock.listen(1)
|
7129
|
+
|
7130
|
+
def readable(self) -> bool:
|
7131
|
+
return True
|
7132
|
+
|
7133
|
+
def on_readable(self) -> None:
|
7134
|
+
cli_sock, cli_addr = check_not_none(self._sock).accept()
|
7135
|
+
cli_sock.setblocking(False)
|
7136
|
+
|
7137
|
+
self._on_connect(cli_sock, cli_addr)
|
7138
|
+
|
7139
|
+
|
7140
|
+
##
|
7141
|
+
|
7142
|
+
|
7143
|
+
class HttpServer(HasDispatchers):
|
7144
|
+
class Address(ta.NamedTuple):
|
7145
|
+
a: SocketAddress
|
7146
|
+
|
7147
|
+
class Handler(ta.NamedTuple):
|
7148
|
+
h: HttpHandler
|
7149
|
+
|
7150
|
+
def __init__(
|
7151
|
+
self,
|
7152
|
+
handler: Handler,
|
7153
|
+
addr: Address = Address(('localhost', 8000)),
|
7154
|
+
) -> None:
|
7155
|
+
super().__init__()
|
7156
|
+
|
7157
|
+
self._handler = handler.h
|
7158
|
+
self._addr = addr.a
|
7159
|
+
|
7160
|
+
self._server = SocketServerFdIoHandler(self._addr, self._on_connect)
|
7161
|
+
|
7162
|
+
self._conns: ta.List[CoroHttpServerConnectionFdIoHandler] = []
|
7163
|
+
|
7164
|
+
def get_dispatchers(self) -> Dispatchers:
|
7165
|
+
l = []
|
7166
|
+
for c in self._conns:
|
7167
|
+
if not c.closed:
|
7168
|
+
l.append(c)
|
7169
|
+
self._conns = l
|
7170
|
+
return Dispatchers([
|
7171
|
+
self._server,
|
7172
|
+
*l,
|
7173
|
+
])
|
7174
|
+
|
7175
|
+
def _on_connect(self, sock: socket.socket, addr: SocketAddress) -> None:
|
7176
|
+
conn = CoroHttpServerConnectionFdIoHandler(
|
7177
|
+
addr,
|
7178
|
+
sock,
|
7179
|
+
self._handler,
|
7180
|
+
)
|
7181
|
+
|
7182
|
+
self._conns.append(conn)
|
7183
|
+
|
7184
|
+
|
7185
|
+
##
|
7186
|
+
|
7187
|
+
|
7188
|
+
class SupervisorHttpHandler:
|
7189
|
+
def __init__(
|
7190
|
+
self,
|
7191
|
+
*,
|
7192
|
+
groups: ProcessGroupManager,
|
7193
|
+
) -> None:
|
7194
|
+
super().__init__()
|
7195
|
+
|
7196
|
+
self._groups = groups
|
7197
|
+
|
7198
|
+
def handle(self, req: HttpHandlerRequest) -> HttpHandlerResponse:
|
7199
|
+
dct = {
|
7200
|
+
'method': req.method,
|
7201
|
+
'path': req.path,
|
7202
|
+
'data': len(req.data or b''),
|
7203
|
+
'groups': {
|
7204
|
+
g.name: {
|
7205
|
+
'processes': {
|
7206
|
+
p.name: {
|
7207
|
+
'pid': p.pid,
|
7208
|
+
}
|
7209
|
+
for p in g
|
7210
|
+
},
|
7211
|
+
}
|
7212
|
+
for g in self._groups
|
7213
|
+
},
|
7214
|
+
}
|
7215
|
+
|
7216
|
+
return HttpHandlerResponse(
|
7217
|
+
200,
|
7218
|
+
data=json.dumps(dct, **JSON_PRETTY_KWARGS).encode('utf-8') + b'\n',
|
7219
|
+
headers={
|
7220
|
+
'Content-Type': 'application/json',
|
7221
|
+
},
|
7222
|
+
)
|
7223
|
+
|
7224
|
+
|
6571
7225
|
########################################
|
6572
7226
|
# ../processimpl.py
|
6573
7227
|
|
@@ -6619,7 +7273,7 @@ class ProcessImpl(Process):
|
|
6619
7273
|
|
6620
7274
|
self._killing = False # true if we are trying to kill this process
|
6621
7275
|
|
6622
|
-
self._backoff = 0 # backoff counter (to
|
7276
|
+
self._backoff = 0 # backoff counter (to start_retries)
|
6623
7277
|
|
6624
7278
|
self._exitstatus: ta.Optional[Rc] = None # status attached to dead process by finish()
|
6625
7279
|
self._spawn_err: ta.Optional[str] = None # error message attached by spawn() if any
|
@@ -6696,7 +7350,7 @@ class ProcessImpl(Process):
|
|
6696
7350
|
self._pipes = sp.pipes
|
6697
7351
|
self._dispatchers = sp.dispatchers
|
6698
7352
|
|
6699
|
-
self._delay = time.time() + self.config.
|
7353
|
+
self._delay = time.time() + self.config.start_secs
|
6700
7354
|
|
6701
7355
|
return sp.pid
|
6702
7356
|
|
@@ -6754,17 +7408,17 @@ class ProcessImpl(Process):
|
|
6754
7408
|
|
6755
7409
|
if self._state == ProcessState.STARTING:
|
6756
7410
|
self._last_start = min(test_time, self._last_start)
|
6757
|
-
if self._delay > 0 and test_time < (self._delay - self._config.
|
6758
|
-
self._delay = test_time + self._config.
|
7411
|
+
if self._delay > 0 and test_time < (self._delay - self._config.start_secs):
|
7412
|
+
self._delay = test_time + self._config.start_secs
|
6759
7413
|
|
6760
7414
|
elif self._state == ProcessState.RUNNING:
|
6761
|
-
if test_time > self._last_start and test_time < (self._last_start + self._config.
|
6762
|
-
self._last_start = test_time - self._config.
|
7415
|
+
if test_time > self._last_start and test_time < (self._last_start + self._config.start_secs):
|
7416
|
+
self._last_start = test_time - self._config.start_secs
|
6763
7417
|
|
6764
7418
|
elif self._state == ProcessState.STOPPING:
|
6765
7419
|
self._last_stop_report = min(test_time, self._last_stop_report)
|
6766
|
-
if self._delay > 0 and test_time < (self._delay - self._config.
|
6767
|
-
self._delay = test_time + self._config.
|
7420
|
+
if self._delay > 0 and test_time < (self._delay - self._config.stop_wait_secs):
|
7421
|
+
self._delay = test_time + self._config.stop_wait_secs
|
6768
7422
|
|
6769
7423
|
elif self._state == ProcessState.BACKOFF:
|
6770
7424
|
if self._delay > 0 and test_time < (self._delay - self._backoff):
|
@@ -6773,7 +7427,7 @@ class ProcessImpl(Process):
|
|
6773
7427
|
def stop(self) -> ta.Optional[str]:
|
6774
7428
|
self._administrative_stop = True
|
6775
7429
|
self._last_stop_report = 0
|
6776
|
-
return self.kill(self._config.
|
7430
|
+
return self.kill(self._config.stop_signal)
|
6777
7431
|
|
6778
7432
|
def stop_report(self) -> None:
|
6779
7433
|
"""Log a 'waiting for x to stop' message with throttling."""
|
@@ -6796,7 +7450,7 @@ class ProcessImpl(Process):
|
|
6796
7450
|
|
6797
7451
|
def kill(self, sig: int) -> ta.Optional[str]:
|
6798
7452
|
"""
|
6799
|
-
Send a signal to the subprocess with the intention to kill it (to make it exit).
|
7453
|
+
Send a signal to the subprocess with the intention to kill it (to make it exit). This may or may not actually
|
6800
7454
|
kill it.
|
6801
7455
|
|
6802
7456
|
Return None if the signal was sent, or an error message string if an error occurred or if the subprocess is not
|
@@ -6804,8 +7458,8 @@ class ProcessImpl(Process):
|
|
6804
7458
|
"""
|
6805
7459
|
now = time.time()
|
6806
7460
|
|
6807
|
-
# If the process is in BACKOFF and we want to stop or kill it, then BACKOFF -> STOPPED.
|
6808
|
-
# if
|
7461
|
+
# If the process is in BACKOFF and we want to stop or kill it, then BACKOFF -> STOPPED. This is needed because
|
7462
|
+
# if start_retries is a large number and the process isn't starting successfully, the stop request would be
|
6809
7463
|
# blocked for a long time waiting for the retries.
|
6810
7464
|
if self._state == ProcessState.BACKOFF:
|
6811
7465
|
log.debug('Attempted to kill %s, which is in BACKOFF state.', self.name)
|
@@ -6820,25 +7474,25 @@ class ProcessImpl(Process):
|
|
6820
7474
|
|
6821
7475
|
# If we're in the stopping state, then we've already sent the stop signal and this is the kill signal
|
6822
7476
|
if self._state == ProcessState.STOPPING:
|
6823
|
-
|
7477
|
+
kill_as_group = self._config.kill_as_group
|
6824
7478
|
else:
|
6825
|
-
|
7479
|
+
kill_as_group = self._config.stop_as_group
|
6826
7480
|
|
6827
7481
|
as_group = ''
|
6828
|
-
if
|
7482
|
+
if kill_as_group:
|
6829
7483
|
as_group = 'process group '
|
6830
7484
|
|
6831
7485
|
log.debug('killing %s (pid %s) %s with signal %s', self.name, self.pid, as_group, sig_name(sig))
|
6832
7486
|
|
6833
7487
|
# RUNNING/STARTING/STOPPING -> STOPPING
|
6834
7488
|
self._killing = True
|
6835
|
-
self._delay = now + self._config.
|
6836
|
-
# we will already be in the STOPPING state if we're doing a SIGKILL as a result of overrunning
|
7489
|
+
self._delay = now + self._config.stop_wait_secs
|
7490
|
+
# we will already be in the STOPPING state if we're doing a SIGKILL as a result of overrunning stop_wait_secs
|
6837
7491
|
self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
|
6838
7492
|
self.change_state(ProcessState.STOPPING)
|
6839
7493
|
|
6840
7494
|
kpid = int(self.pid)
|
6841
|
-
if
|
7495
|
+
if kill_as_group:
|
6842
7496
|
# send to the whole process group instead
|
6843
7497
|
kpid = -kpid
|
6844
7498
|
|
@@ -6848,7 +7502,7 @@ class ProcessImpl(Process):
|
|
6848
7502
|
except OSError as exc:
|
6849
7503
|
if exc.errno == errno.ESRCH:
|
6850
7504
|
log.debug('unable to signal %s (pid %s), it probably just exited on its own: %s', self.name, self.pid, str(exc)) # noqa
|
6851
|
-
# we could change the state here but we intentionally do not.
|
7505
|
+
# we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
|
6852
7506
|
# processing.
|
6853
7507
|
return None
|
6854
7508
|
raise
|
@@ -6891,7 +7545,7 @@ class ProcessImpl(Process):
|
|
6891
7545
|
self.pid,
|
6892
7546
|
str(exc),
|
6893
7547
|
)
|
6894
|
-
# we could change the state here but we intentionally do not.
|
7548
|
+
# we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
|
6895
7549
|
# processing.
|
6896
7550
|
return None
|
6897
7551
|
raise
|
@@ -6918,7 +7572,8 @@ class ProcessImpl(Process):
|
|
6918
7572
|
self._last_stop = now
|
6919
7573
|
|
6920
7574
|
if now > self._last_start:
|
6921
|
-
|
7575
|
+
log.info(f'{now - self._last_start=}') # noqa
|
7576
|
+
too_quickly = now - self._last_start < self._config.start_secs
|
6922
7577
|
else:
|
6923
7578
|
too_quickly = False
|
6924
7579
|
log.warning(
|
@@ -6987,13 +7642,11 @@ class ProcessImpl(Process):
|
|
6987
7642
|
|
6988
7643
|
self._check_and_adjust_for_system_clock_rollback(now)
|
6989
7644
|
|
6990
|
-
logger = log
|
6991
|
-
|
6992
7645
|
if self._supervisor_states.state > SupervisorState.RESTARTING:
|
6993
7646
|
# dont start any processes if supervisor is shutting down
|
6994
7647
|
if state == ProcessState.EXITED:
|
6995
|
-
if self._config.
|
6996
|
-
if self._config.
|
7648
|
+
if self._config.auto_restart:
|
7649
|
+
if self._config.auto_restart is RestartUnconditionally:
|
6997
7650
|
# EXITED -> STARTING
|
6998
7651
|
self.spawn()
|
6999
7652
|
elif self._exitstatus not in self._config.exitcodes:
|
@@ -7001,38 +7654,38 @@ class ProcessImpl(Process):
|
|
7001
7654
|
self.spawn()
|
7002
7655
|
|
7003
7656
|
elif state == ProcessState.STOPPED and not self._last_start:
|
7004
|
-
if self._config.
|
7657
|
+
if self._config.auto_start:
|
7005
7658
|
# STOPPED -> STARTING
|
7006
7659
|
self.spawn()
|
7007
7660
|
|
7008
7661
|
elif state == ProcessState.BACKOFF:
|
7009
|
-
if self._backoff <= self._config.
|
7662
|
+
if self._backoff <= self._config.start_retries:
|
7010
7663
|
if now > self._delay:
|
7011
7664
|
# BACKOFF -> STARTING
|
7012
7665
|
self.spawn()
|
7013
7666
|
|
7014
7667
|
if state == ProcessState.STARTING:
|
7015
|
-
if now - self._last_start > self._config.
|
7668
|
+
if now - self._last_start > self._config.start_secs:
|
7016
7669
|
# STARTING -> RUNNING if the proc has started successfully and it has stayed up for at least
|
7017
|
-
# proc.config.
|
7670
|
+
# proc.config.start_secs,
|
7018
7671
|
self._delay = 0
|
7019
7672
|
self._backoff = 0
|
7020
7673
|
self.check_in_state(ProcessState.STARTING)
|
7021
7674
|
self.change_state(ProcessState.RUNNING)
|
7022
|
-
msg = ('entered RUNNING state, process has stayed up for > than %s seconds (
|
7023
|
-
|
7675
|
+
msg = ('entered RUNNING state, process has stayed up for > than %s seconds (start_secs)' % self._config.start_secs) # noqa
|
7676
|
+
log.info('success: %s %s', self.name, msg)
|
7024
7677
|
|
7025
7678
|
if state == ProcessState.BACKOFF:
|
7026
|
-
if self._backoff > self._config.
|
7679
|
+
if self._backoff > self._config.start_retries:
|
7027
7680
|
# BACKOFF -> FATAL if the proc has exceeded its number of retries
|
7028
7681
|
self.give_up()
|
7029
7682
|
msg = ('entered FATAL state, too many start retries too quickly')
|
7030
|
-
|
7683
|
+
log.info('gave up: %s %s', self.name, msg)
|
7031
7684
|
|
7032
7685
|
elif state == ProcessState.STOPPING:
|
7033
7686
|
time_left = self._delay - now
|
7034
7687
|
if time_left <= 0:
|
7035
|
-
# kill processes which are taking too long to stop with a final sigkill.
|
7688
|
+
# kill processes which are taking too long to stop with a final sigkill. if this doesn't kill it, the
|
7036
7689
|
# process will be stuck in the STOPPING state forever.
|
7037
7690
|
log.warning('killing \'%s\' (%s) with SIGKILL', self.name, self.pid)
|
7038
7691
|
self.kill(signal.SIGKILL)
|
@@ -7049,6 +7702,65 @@ class ProcessImpl(Process):
|
|
7049
7702
|
pass
|
7050
7703
|
|
7051
7704
|
|
7705
|
+
########################################
|
7706
|
+
# ../signals.py
|
7707
|
+
|
7708
|
+
|
7709
|
+
class SignalHandler:
|
7710
|
+
def __init__(
|
7711
|
+
self,
|
7712
|
+
*,
|
7713
|
+
states: SupervisorStateManager,
|
7714
|
+
signal_receiver: SignalReceiver,
|
7715
|
+
process_groups: ProcessGroupManager,
|
7716
|
+
) -> None:
|
7717
|
+
super().__init__()
|
7718
|
+
|
7719
|
+
self._states = states
|
7720
|
+
self._signal_receiver = signal_receiver
|
7721
|
+
self._process_groups = process_groups
|
7722
|
+
|
7723
|
+
def set_signals(self) -> None:
|
7724
|
+
self._signal_receiver.install(
|
7725
|
+
signal.SIGTERM,
|
7726
|
+
signal.SIGINT,
|
7727
|
+
signal.SIGQUIT,
|
7728
|
+
signal.SIGHUP,
|
7729
|
+
signal.SIGCHLD,
|
7730
|
+
signal.SIGUSR2,
|
7731
|
+
)
|
7732
|
+
|
7733
|
+
def handle_signals(self) -> None:
|
7734
|
+
sig = self._signal_receiver.get_signal()
|
7735
|
+
if not sig:
|
7736
|
+
return
|
7737
|
+
|
7738
|
+
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
7739
|
+
log.warning('received %s indicating exit request', sig_name(sig))
|
7740
|
+
self._states.set_state(SupervisorState.SHUTDOWN)
|
7741
|
+
|
7742
|
+
elif sig == signal.SIGHUP:
|
7743
|
+
if self._states.state == SupervisorState.SHUTDOWN:
|
7744
|
+
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
7745
|
+
else:
|
7746
|
+
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
7747
|
+
self._states.set_state(SupervisorState.RESTARTING)
|
7748
|
+
|
7749
|
+
elif sig == signal.SIGCHLD:
|
7750
|
+
log.debug('received %s indicating a child quit', sig_name(sig))
|
7751
|
+
|
7752
|
+
elif sig == signal.SIGUSR2:
|
7753
|
+
log.info('received %s indicating log reopen request', sig_name(sig))
|
7754
|
+
|
7755
|
+
for p in self._process_groups.all_processes():
|
7756
|
+
for d in p.get_dispatchers():
|
7757
|
+
if isinstance(d, ProcessOutputDispatcher):
|
7758
|
+
d.reopen_logs()
|
7759
|
+
|
7760
|
+
else:
|
7761
|
+
log.debug('received %s indicating nothing', sig_name(sig))
|
7762
|
+
|
7763
|
+
|
7052
7764
|
########################################
|
7053
7765
|
# ../spawningimpl.py
|
7054
7766
|
|
@@ -7209,7 +7921,7 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7209
7921
|
return exe, args
|
7210
7922
|
|
7211
7923
|
def _make_dispatchers(self, pipes: ProcessPipes) -> Dispatchers:
|
7212
|
-
dispatchers: ta.List[
|
7924
|
+
dispatchers: ta.List[FdIoHandler] = []
|
7213
7925
|
|
7214
7926
|
if pipes.stdout is not None:
|
7215
7927
|
dispatchers.append(check_isinstance(self._output_dispatcher_factory(
|
@@ -7325,7 +8037,7 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7325
8037
|
else:
|
7326
8038
|
os.dup2(check_not_none(pipes.child_stderr), 2)
|
7327
8039
|
|
7328
|
-
for i in range(3, self._server_config.
|
8040
|
+
for i in range(3, self._server_config.min_fds):
|
7329
8041
|
if i in self._inherited_fds:
|
7330
8042
|
continue
|
7331
8043
|
close_fd(Fd(i))
|
@@ -7399,7 +8111,7 @@ class Supervisor:
|
|
7399
8111
|
self,
|
7400
8112
|
*,
|
7401
8113
|
config: ServerConfig,
|
7402
|
-
poller:
|
8114
|
+
poller: FdIoPoller,
|
7403
8115
|
process_groups: ProcessGroupManager,
|
7404
8116
|
signal_handler: SignalHandler,
|
7405
8117
|
event_callbacks: EventCallbacks,
|
@@ -7515,7 +8227,9 @@ class Supervisor:
|
|
7515
8227
|
#
|
7516
8228
|
|
7517
8229
|
def _run_once(self) -> None:
|
8230
|
+
now = time.time()
|
7518
8231
|
self._poll()
|
8232
|
+
log.info(f'Poll took {time.time() - now}') # noqa
|
7519
8233
|
self._reap()
|
7520
8234
|
self._signal_handler.handle_signals()
|
7521
8235
|
self._tick()
|
@@ -7567,6 +8281,7 @@ class Supervisor:
|
|
7567
8281
|
return
|
7568
8282
|
|
7569
8283
|
wp = waitpid()
|
8284
|
+
log.info(f'Waited pid: {wp}') # noqa
|
7570
8285
|
if wp is None or not wp.pid:
|
7571
8286
|
return
|
7572
8287
|
|
@@ -7612,7 +8327,7 @@ class WaitedPid(ta.NamedTuple):
|
|
7612
8327
|
|
7613
8328
|
|
7614
8329
|
def waitpid() -> ta.Optional[WaitedPid]:
|
7615
|
-
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4.
|
8330
|
+
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
7616
8331
|
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
7617
8332
|
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
7618
8333
|
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
@@ -7635,6 +8350,17 @@ def waitpid() -> ta.Optional[WaitedPid]:
|
|
7635
8350
|
# ../inject.py
|
7636
8351
|
|
7637
8352
|
|
8353
|
+
@dc.dataclass(frozen=True)
|
8354
|
+
class _FdIoPollerDaemonizeListener(DaemonizeListener):
|
8355
|
+
_poller: FdIoPoller
|
8356
|
+
|
8357
|
+
def before_daemonize(self) -> None:
|
8358
|
+
self._poller.close()
|
8359
|
+
|
8360
|
+
def after_daemonize(self) -> None:
|
8361
|
+
self._poller.reopen()
|
8362
|
+
|
8363
|
+
|
7638
8364
|
def bind_server(
|
7639
8365
|
config: ServerConfig,
|
7640
8366
|
*,
|
@@ -7644,22 +8370,24 @@ def bind_server(
|
|
7644
8370
|
lst: ta.List[InjectorBindingOrBindings] = [
|
7645
8371
|
inj.bind(config),
|
7646
8372
|
|
8373
|
+
inj.bind_array(DaemonizeListener),
|
7647
8374
|
inj.bind_array_type(DaemonizeListener, DaemonizeListeners),
|
7648
8375
|
|
7649
8376
|
inj.bind(SupervisorSetupImpl, singleton=True),
|
7650
8377
|
inj.bind(SupervisorSetup, to_key=SupervisorSetupImpl),
|
7651
8378
|
|
7652
|
-
inj.bind(DaemonizeListener, array=True, to_key=Poller),
|
7653
|
-
|
7654
8379
|
inj.bind(EventCallbacks, singleton=True),
|
7655
8380
|
|
7656
8381
|
inj.bind(SignalReceiver, singleton=True),
|
7657
8382
|
|
7658
8383
|
inj.bind(IoManager, singleton=True),
|
8384
|
+
inj.bind_array(HasDispatchers),
|
8385
|
+
inj.bind_array_type(HasDispatchers, HasDispatchersList),
|
7659
8386
|
|
7660
8387
|
inj.bind(SignalHandler, singleton=True),
|
7661
8388
|
|
7662
8389
|
inj.bind(ProcessGroupManager, singleton=True),
|
8390
|
+
inj.bind(HasDispatchers, array=True, to_key=ProcessGroupManager),
|
7663
8391
|
|
7664
8392
|
inj.bind(Supervisor, singleton=True),
|
7665
8393
|
|
@@ -7692,7 +8420,26 @@ def bind_server(
|
|
7692
8420
|
|
7693
8421
|
#
|
7694
8422
|
|
7695
|
-
|
8423
|
+
poller_impl = next(filter(None, [
|
8424
|
+
KqueueFdIoPoller,
|
8425
|
+
PollFdIoPoller,
|
8426
|
+
SelectFdIoPoller,
|
8427
|
+
]))
|
8428
|
+
lst.append(inj.bind(poller_impl, key=FdIoPoller, singleton=True))
|
8429
|
+
inj.bind(_FdIoPollerDaemonizeListener, array=True, singleton=True)
|
8430
|
+
|
8431
|
+
#
|
8432
|
+
|
8433
|
+
def _provide_http_handler(s: SupervisorHttpHandler) -> HttpServer.Handler:
|
8434
|
+
return HttpServer.Handler(s.handle)
|
8435
|
+
|
8436
|
+
lst.extend([
|
8437
|
+
inj.bind(HttpServer, singleton=True, eager=True),
|
8438
|
+
inj.bind(HasDispatchers, array=True, to_key=HttpServer),
|
8439
|
+
|
8440
|
+
inj.bind(SupervisorHttpHandler, singleton=True),
|
8441
|
+
inj.bind(_provide_http_handler),
|
8442
|
+
])
|
7696
8443
|
|
7697
8444
|
#
|
7698
8445
|
|