ominfra 0.0.0.dev128__py3-none-any.whl → 0.0.0.dev130__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- ominfra/deploy/_executor.py +39 -0
- ominfra/pyremote/_runcommands.py +39 -0
- ominfra/scripts/journald2aws.py +136 -0
- ominfra/scripts/supervisor.py +1732 -880
- ominfra/supervisor/dispatchers.py +10 -9
- ominfra/supervisor/dispatchersimpl.py +20 -18
- ominfra/supervisor/groups.py +16 -1
- ominfra/supervisor/groupsimpl.py +2 -2
- ominfra/supervisor/http.py +130 -0
- ominfra/supervisor/inject.py +61 -17
- ominfra/supervisor/io.py +97 -0
- ominfra/supervisor/main.py +5 -6
- ominfra/supervisor/processimpl.py +10 -18
- ominfra/supervisor/signals.py +66 -0
- ominfra/supervisor/spawningimpl.py +11 -11
- ominfra/supervisor/supervisor.py +70 -137
- ominfra/supervisor/types.py +21 -58
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev130.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev130.dist-info}/RECORD +23 -22
- ominfra/supervisor/context.py +0 -80
- ominfra/supervisor/poller.py +0 -240
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev130.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev130.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev130.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev130.dist-info}/top_level.txt +0 -0
ominfra/scripts/supervisor.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3
3
|
# @omlish-lite
|
4
4
|
# @omlish-script
|
5
5
|
# @omlish-amalg-output ../supervisor/main.py
|
6
|
-
# ruff: noqa: N802 UP006 UP007 UP012 UP036
|
6
|
+
# ruff: noqa: N802 U006 UP006 UP007 UP012 UP036
|
7
7
|
# Supervisor is licensed under the following license:
|
8
8
|
#
|
9
9
|
# A copyright notice accompanies this license document that identifies the copyright holders.
|
@@ -102,6 +102,9 @@ V = ta.TypeVar('V')
|
|
102
102
|
# ../../../omlish/lite/cached.py
|
103
103
|
T = ta.TypeVar('T')
|
104
104
|
|
105
|
+
# ../../../omlish/lite/check.py
|
106
|
+
SizedT = ta.TypeVar('SizedT', bound=ta.Sized)
|
107
|
+
|
105
108
|
# ../../../omlish/lite/socket.py
|
106
109
|
SocketAddress = ta.Any
|
107
110
|
SocketHandlerFactory = ta.Callable[[SocketAddress, ta.BinaryIO, ta.BinaryIO], 'SocketHandler']
|
@@ -1494,11 +1497,257 @@ def check_not_equal(l: T, r: T) -> T:
|
|
1494
1497
|
return l
|
1495
1498
|
|
1496
1499
|
|
1500
|
+
def check_is(l: T, r: T) -> T:
|
1501
|
+
if l is not r:
|
1502
|
+
raise ValueError(l, r)
|
1503
|
+
return l
|
1504
|
+
|
1505
|
+
|
1506
|
+
def check_is_not(l: T, r: ta.Any) -> T:
|
1507
|
+
if l is r:
|
1508
|
+
raise ValueError(l, r)
|
1509
|
+
return l
|
1510
|
+
|
1511
|
+
|
1512
|
+
def check_in(v: T, c: ta.Container[T]) -> T:
|
1513
|
+
if v not in c:
|
1514
|
+
raise ValueError(v, c)
|
1515
|
+
return v
|
1516
|
+
|
1517
|
+
|
1518
|
+
def check_not_in(v: T, c: ta.Container[T]) -> T:
|
1519
|
+
if v in c:
|
1520
|
+
raise ValueError(v, c)
|
1521
|
+
return v
|
1522
|
+
|
1523
|
+
|
1497
1524
|
def check_single(vs: ta.Iterable[T]) -> T:
|
1498
1525
|
[v] = vs
|
1499
1526
|
return v
|
1500
1527
|
|
1501
1528
|
|
1529
|
+
def check_empty(v: SizedT) -> SizedT:
|
1530
|
+
if len(v):
|
1531
|
+
raise ValueError(v)
|
1532
|
+
return v
|
1533
|
+
|
1534
|
+
|
1535
|
+
def check_non_empty(v: SizedT) -> SizedT:
|
1536
|
+
if not len(v):
|
1537
|
+
raise ValueError(v)
|
1538
|
+
return v
|
1539
|
+
|
1540
|
+
|
1541
|
+
########################################
|
1542
|
+
# ../../../omlish/lite/fdio/pollers.py
|
1543
|
+
|
1544
|
+
|
1545
|
+
##
|
1546
|
+
|
1547
|
+
|
1548
|
+
class FdIoPoller(abc.ABC):
|
1549
|
+
def __init__(self) -> None:
|
1550
|
+
super().__init__()
|
1551
|
+
|
1552
|
+
self._readable: ta.Set[int] = set()
|
1553
|
+
self._writable: ta.Set[int] = set()
|
1554
|
+
|
1555
|
+
#
|
1556
|
+
|
1557
|
+
def close(self) -> None: # noqa
|
1558
|
+
pass
|
1559
|
+
|
1560
|
+
def reopen(self) -> None: # noqa
|
1561
|
+
pass
|
1562
|
+
|
1563
|
+
#
|
1564
|
+
|
1565
|
+
@property
|
1566
|
+
@ta.final
|
1567
|
+
def readable(self) -> ta.AbstractSet[int]:
|
1568
|
+
return self._readable
|
1569
|
+
|
1570
|
+
@property
|
1571
|
+
@ta.final
|
1572
|
+
def writable(self) -> ta.AbstractSet[int]:
|
1573
|
+
return self._writable
|
1574
|
+
|
1575
|
+
#
|
1576
|
+
|
1577
|
+
@ta.final
|
1578
|
+
def register_readable(self, fd: int) -> bool:
|
1579
|
+
if fd in self._readable:
|
1580
|
+
return False
|
1581
|
+
self._readable.add(fd)
|
1582
|
+
self._register_readable(fd)
|
1583
|
+
return True
|
1584
|
+
|
1585
|
+
@ta.final
|
1586
|
+
def register_writable(self, fd: int) -> bool:
|
1587
|
+
if fd in self._writable:
|
1588
|
+
return False
|
1589
|
+
self._writable.add(fd)
|
1590
|
+
self._register_writable(fd)
|
1591
|
+
return True
|
1592
|
+
|
1593
|
+
@ta.final
|
1594
|
+
def unregister_readable(self, fd: int) -> bool:
|
1595
|
+
if fd not in self._readable:
|
1596
|
+
return False
|
1597
|
+
self._readable.discard(fd)
|
1598
|
+
self._unregister_readable(fd)
|
1599
|
+
return True
|
1600
|
+
|
1601
|
+
@ta.final
|
1602
|
+
def unregister_writable(self, fd: int) -> bool:
|
1603
|
+
if fd not in self._writable:
|
1604
|
+
return False
|
1605
|
+
self._writable.discard(fd)
|
1606
|
+
self._unregister_writable(fd)
|
1607
|
+
return True
|
1608
|
+
|
1609
|
+
#
|
1610
|
+
|
1611
|
+
def _register_readable(self, fd: int) -> None: # noqa
|
1612
|
+
pass
|
1613
|
+
|
1614
|
+
def _register_writable(self, fd: int) -> None: # noqa
|
1615
|
+
pass
|
1616
|
+
|
1617
|
+
def _unregister_readable(self, fd: int) -> None: # noqa
|
1618
|
+
pass
|
1619
|
+
|
1620
|
+
def _unregister_writable(self, fd: int) -> None: # noqa
|
1621
|
+
pass
|
1622
|
+
|
1623
|
+
#
|
1624
|
+
|
1625
|
+
def update(
|
1626
|
+
self,
|
1627
|
+
r: ta.AbstractSet[int],
|
1628
|
+
w: ta.AbstractSet[int],
|
1629
|
+
) -> None:
|
1630
|
+
for f in r - self._readable:
|
1631
|
+
self.register_readable(f)
|
1632
|
+
for f in w - self._writable:
|
1633
|
+
self.register_writable(f)
|
1634
|
+
for f in self._readable - r:
|
1635
|
+
self.unregister_readable(f)
|
1636
|
+
for f in self._writable - w:
|
1637
|
+
self.unregister_writable(f)
|
1638
|
+
|
1639
|
+
#
|
1640
|
+
|
1641
|
+
@dc.dataclass(frozen=True)
|
1642
|
+
class PollResult:
|
1643
|
+
r: ta.Sequence[int] = ()
|
1644
|
+
w: ta.Sequence[int] = ()
|
1645
|
+
|
1646
|
+
inv: ta.Sequence[int] = ()
|
1647
|
+
|
1648
|
+
msg: ta.Optional[str] = None
|
1649
|
+
exc: ta.Optional[BaseException] = None
|
1650
|
+
|
1651
|
+
@abc.abstractmethod
|
1652
|
+
def poll(self, timeout: ta.Optional[float]) -> PollResult:
|
1653
|
+
raise NotImplementedError
|
1654
|
+
|
1655
|
+
|
1656
|
+
##
|
1657
|
+
|
1658
|
+
|
1659
|
+
class SelectFdIoPoller(FdIoPoller):
|
1660
|
+
def poll(self, timeout: ta.Optional[float]) -> FdIoPoller.PollResult:
|
1661
|
+
try:
|
1662
|
+
r, w, x = select.select(
|
1663
|
+
self._readable,
|
1664
|
+
self._writable,
|
1665
|
+
[],
|
1666
|
+
timeout,
|
1667
|
+
)
|
1668
|
+
|
1669
|
+
except OSError as exc:
|
1670
|
+
if exc.errno == errno.EINTR:
|
1671
|
+
return FdIoPoller.PollResult(msg='EINTR encountered in poll', exc=exc)
|
1672
|
+
elif exc.errno == errno.EBADF:
|
1673
|
+
return FdIoPoller.PollResult(msg='EBADF encountered in poll', exc=exc)
|
1674
|
+
else:
|
1675
|
+
raise
|
1676
|
+
|
1677
|
+
return FdIoPoller.PollResult(r, w)
|
1678
|
+
|
1679
|
+
|
1680
|
+
##
|
1681
|
+
|
1682
|
+
|
1683
|
+
PollFdIoPoller: ta.Optional[ta.Type[FdIoPoller]]
|
1684
|
+
if hasattr(select, 'poll'):
|
1685
|
+
|
1686
|
+
class _PollFdIoPoller(FdIoPoller):
|
1687
|
+
def __init__(self) -> None:
|
1688
|
+
super().__init__()
|
1689
|
+
|
1690
|
+
self._poller = select.poll()
|
1691
|
+
|
1692
|
+
#
|
1693
|
+
|
1694
|
+
_READ = select.POLLIN | select.POLLPRI | select.POLLHUP
|
1695
|
+
_WRITE = select.POLLOUT
|
1696
|
+
|
1697
|
+
def _register_readable(self, fd: int) -> None:
|
1698
|
+
self._update_registration(fd)
|
1699
|
+
|
1700
|
+
def _register_writable(self, fd: int) -> None:
|
1701
|
+
self._update_registration(fd)
|
1702
|
+
|
1703
|
+
def _unregister_readable(self, fd: int) -> None:
|
1704
|
+
self._update_registration(fd)
|
1705
|
+
|
1706
|
+
def _unregister_writable(self, fd: int) -> None:
|
1707
|
+
self._update_registration(fd)
|
1708
|
+
|
1709
|
+
def _update_registration(self, fd: int) -> None:
|
1710
|
+
r = fd in self._readable
|
1711
|
+
w = fd in self._writable
|
1712
|
+
if r or w:
|
1713
|
+
self._poller.register(fd, (self._READ if r else 0) | (self._WRITE if w else 0))
|
1714
|
+
else:
|
1715
|
+
self._poller.unregister(fd)
|
1716
|
+
|
1717
|
+
#
|
1718
|
+
|
1719
|
+
def poll(self, timeout: ta.Optional[float]) -> FdIoPoller.PollResult:
|
1720
|
+
polled: ta.List[ta.Tuple[int, int]]
|
1721
|
+
try:
|
1722
|
+
polled = self._poller.poll(timeout * 1000 if timeout is not None else None)
|
1723
|
+
|
1724
|
+
except OSError as exc:
|
1725
|
+
if exc.errno == errno.EINTR:
|
1726
|
+
return FdIoPoller.PollResult(msg='EINTR encountered in poll', exc=exc)
|
1727
|
+
else:
|
1728
|
+
raise
|
1729
|
+
|
1730
|
+
r: ta.List[int] = []
|
1731
|
+
w: ta.List[int] = []
|
1732
|
+
inv: ta.List[int] = []
|
1733
|
+
for fd, mask in polled:
|
1734
|
+
if mask & select.POLLNVAL:
|
1735
|
+
self._poller.unregister(fd)
|
1736
|
+
self._readable.discard(fd)
|
1737
|
+
self._writable.discard(fd)
|
1738
|
+
inv.append(fd)
|
1739
|
+
continue
|
1740
|
+
if mask & self._READ:
|
1741
|
+
r.append(fd)
|
1742
|
+
if mask & self._WRITE:
|
1743
|
+
w.append(fd)
|
1744
|
+
return FdIoPoller.PollResult(r, w, inv=inv)
|
1745
|
+
|
1746
|
+
PollFdIoPoller = _PollFdIoPoller
|
1747
|
+
else:
|
1748
|
+
PollFdIoPoller = None
|
1749
|
+
|
1750
|
+
|
1502
1751
|
########################################
|
1503
1752
|
# ../../../omlish/lite/http/versions.py
|
1504
1753
|
|
@@ -1718,6 +1967,73 @@ class SocketHandler(abc.ABC):
|
|
1718
1967
|
raise NotImplementedError
|
1719
1968
|
|
1720
1969
|
|
1970
|
+
########################################
|
1971
|
+
# ../../../omlish/lite/strings.py
|
1972
|
+
|
1973
|
+
|
1974
|
+
##
|
1975
|
+
|
1976
|
+
|
1977
|
+
def camel_case(name: str, lower: bool = False) -> str:
|
1978
|
+
if not name:
|
1979
|
+
return ''
|
1980
|
+
s = ''.join(map(str.capitalize, name.split('_'))) # noqa
|
1981
|
+
if lower:
|
1982
|
+
s = s[0].lower() + s[1:]
|
1983
|
+
return s
|
1984
|
+
|
1985
|
+
|
1986
|
+
def snake_case(name: str) -> str:
|
1987
|
+
uppers: list[int | None] = [i for i, c in enumerate(name) if c.isupper()]
|
1988
|
+
return '_'.join([name[l:r].lower() for l, r in zip([None, *uppers], [*uppers, None])]).strip('_')
|
1989
|
+
|
1990
|
+
|
1991
|
+
##
|
1992
|
+
|
1993
|
+
|
1994
|
+
def is_dunder(name: str) -> bool:
|
1995
|
+
return (
|
1996
|
+
name[:2] == name[-2:] == '__' and
|
1997
|
+
name[2:3] != '_' and
|
1998
|
+
name[-3:-2] != '_' and
|
1999
|
+
len(name) > 4
|
2000
|
+
)
|
2001
|
+
|
2002
|
+
|
2003
|
+
def is_sunder(name: str) -> bool:
|
2004
|
+
return (
|
2005
|
+
name[0] == name[-1] == '_' and
|
2006
|
+
name[1:2] != '_' and
|
2007
|
+
name[-2:-1] != '_' and
|
2008
|
+
len(name) > 2
|
2009
|
+
)
|
2010
|
+
|
2011
|
+
|
2012
|
+
##
|
2013
|
+
|
2014
|
+
|
2015
|
+
def attr_repr(obj: ta.Any, *attrs: str) -> str:
|
2016
|
+
return f'{type(obj).__name__}({", ".join(f"{attr}={getattr(obj, attr)!r}" for attr in attrs)})'
|
2017
|
+
|
2018
|
+
|
2019
|
+
##
|
2020
|
+
|
2021
|
+
|
2022
|
+
FORMAT_NUM_BYTES_SUFFIXES: ta.Sequence[str] = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB']
|
2023
|
+
|
2024
|
+
|
2025
|
+
def format_num_bytes(num_bytes: int) -> str:
|
2026
|
+
for i, suffix in enumerate(FORMAT_NUM_BYTES_SUFFIXES):
|
2027
|
+
value = num_bytes / 1024 ** i
|
2028
|
+
if num_bytes < 1024 ** (i + 1):
|
2029
|
+
if value.is_integer():
|
2030
|
+
return f'{int(value)}{suffix}'
|
2031
|
+
else:
|
2032
|
+
return f'{value:.2f}{suffix}'
|
2033
|
+
|
2034
|
+
return f'{num_bytes / 1024 ** (len(FORMAT_NUM_BYTES_SUFFIXES) - 1):.2f}{FORMAT_NUM_BYTES_SUFFIXES[-1]}'
|
2035
|
+
|
2036
|
+
|
1721
2037
|
########################################
|
1722
2038
|
# ../../../omlish/lite/typing.py
|
1723
2039
|
|
@@ -2203,82 +2519,245 @@ def get_user(name: str) -> User:
|
|
2203
2519
|
|
2204
2520
|
|
2205
2521
|
########################################
|
2206
|
-
# ../../../omlish/lite/
|
2207
|
-
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
2208
|
-
# --------------------------------------------
|
2209
|
-
#
|
2210
|
-
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization
|
2211
|
-
# ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated
|
2212
|
-
# documentation.
|
2213
|
-
#
|
2214
|
-
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive,
|
2215
|
-
# royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative
|
2216
|
-
# works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License
|
2217
|
-
# Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
2218
|
-
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" are retained in Python
|
2219
|
-
# alone or in any derivative version prepared by Licensee.
|
2220
|
-
#
|
2221
|
-
# 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and
|
2222
|
-
# wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in
|
2223
|
-
# any such work a brief summary of the changes made to Python.
|
2224
|
-
#
|
2225
|
-
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES,
|
2226
|
-
# EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY
|
2227
|
-
# OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY
|
2228
|
-
# RIGHTS.
|
2229
|
-
#
|
2230
|
-
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
|
2231
|
-
# DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF
|
2232
|
-
# ADVISED OF THE POSSIBILITY THEREOF.
|
2233
|
-
#
|
2234
|
-
# 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
|
2235
|
-
#
|
2236
|
-
# 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint
|
2237
|
-
# venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade
|
2238
|
-
# name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
|
2239
|
-
#
|
2240
|
-
# 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this
|
2241
|
-
# License Agreement.
|
2522
|
+
# ../../../omlish/lite/fdio/handlers.py
|
2242
2523
|
|
2243
2524
|
|
2244
|
-
|
2525
|
+
class FdIoHandler(abc.ABC):
|
2526
|
+
@abc.abstractmethod
|
2527
|
+
def fd(self) -> int:
|
2528
|
+
raise NotImplementedError
|
2245
2529
|
|
2530
|
+
#
|
2246
2531
|
|
2247
|
-
|
2248
|
-
|
2249
|
-
|
2250
|
-
|
2251
|
-
'request_version',
|
2252
|
-
'version',
|
2253
|
-
'headers',
|
2254
|
-
'close_connection',
|
2255
|
-
)
|
2532
|
+
@property
|
2533
|
+
@abc.abstractmethod
|
2534
|
+
def closed(self) -> bool:
|
2535
|
+
raise NotImplementedError
|
2256
2536
|
|
2257
|
-
|
2258
|
-
|
2259
|
-
|
2260
|
-
server_version: HttpProtocolVersion,
|
2261
|
-
request_line: str,
|
2262
|
-
request_version: HttpProtocolVersion,
|
2263
|
-
version: HttpProtocolVersion,
|
2264
|
-
headers: ta.Optional[HttpHeaders],
|
2265
|
-
close_connection: bool,
|
2266
|
-
) -> None:
|
2267
|
-
super().__init__()
|
2537
|
+
@abc.abstractmethod
|
2538
|
+
def close(self) -> None:
|
2539
|
+
raise NotImplementedError
|
2268
2540
|
|
2269
|
-
|
2270
|
-
self.request_line = request_line
|
2271
|
-
self.request_version = request_version
|
2272
|
-
self.version = version
|
2273
|
-
self.headers = headers
|
2274
|
-
self.close_connection = close_connection
|
2541
|
+
#
|
2275
2542
|
|
2276
|
-
def
|
2277
|
-
return
|
2543
|
+
def readable(self) -> bool:
|
2544
|
+
return False
|
2278
2545
|
|
2546
|
+
def writable(self) -> bool:
|
2547
|
+
return False
|
2279
2548
|
|
2280
|
-
|
2281
|
-
|
2549
|
+
#
|
2550
|
+
|
2551
|
+
def on_readable(self) -> None:
|
2552
|
+
raise TypeError
|
2553
|
+
|
2554
|
+
def on_writable(self) -> None:
|
2555
|
+
raise TypeError
|
2556
|
+
|
2557
|
+
def on_error(self, exc: ta.Optional[BaseException] = None) -> None: # noqa
|
2558
|
+
pass
|
2559
|
+
|
2560
|
+
|
2561
|
+
class SocketFdIoHandler(FdIoHandler, abc.ABC):
|
2562
|
+
def __init__(
|
2563
|
+
self,
|
2564
|
+
addr: SocketAddress,
|
2565
|
+
sock: socket.socket,
|
2566
|
+
) -> None:
|
2567
|
+
super().__init__()
|
2568
|
+
|
2569
|
+
self._addr = addr
|
2570
|
+
self._sock: ta.Optional[socket.socket] = sock
|
2571
|
+
|
2572
|
+
def fd(self) -> int:
|
2573
|
+
return check_not_none(self._sock).fileno()
|
2574
|
+
|
2575
|
+
@property
|
2576
|
+
def closed(self) -> bool:
|
2577
|
+
return self._sock is None
|
2578
|
+
|
2579
|
+
def close(self) -> None:
|
2580
|
+
if self._sock is not None:
|
2581
|
+
self._sock.close()
|
2582
|
+
self._sock = None
|
2583
|
+
|
2584
|
+
|
2585
|
+
########################################
|
2586
|
+
# ../../../omlish/lite/fdio/kqueue.py
|
2587
|
+
|
2588
|
+
|
2589
|
+
KqueueFdIoPoller: ta.Optional[ta.Type[FdIoPoller]]
|
2590
|
+
if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
|
2591
|
+
|
2592
|
+
class _KqueueFdIoPoller(FdIoPoller):
|
2593
|
+
DEFAULT_MAX_EVENTS = 1000
|
2594
|
+
|
2595
|
+
def __init__(
|
2596
|
+
self,
|
2597
|
+
*,
|
2598
|
+
max_events: int = DEFAULT_MAX_EVENTS,
|
2599
|
+
) -> None:
|
2600
|
+
super().__init__()
|
2601
|
+
|
2602
|
+
self._max_events = max_events
|
2603
|
+
|
2604
|
+
self._kqueue: ta.Optional[ta.Any] = None
|
2605
|
+
|
2606
|
+
#
|
2607
|
+
|
2608
|
+
def _get_kqueue(self) -> 'select.kqueue':
|
2609
|
+
if (kq := self._kqueue) is not None:
|
2610
|
+
return kq
|
2611
|
+
kq = select.kqueue()
|
2612
|
+
self._kqueue = kq
|
2613
|
+
return kq
|
2614
|
+
|
2615
|
+
def close(self) -> None:
|
2616
|
+
if self._kqueue is not None:
|
2617
|
+
self._kqueue.close()
|
2618
|
+
self._kqueue = None
|
2619
|
+
|
2620
|
+
def reopen(self) -> None:
|
2621
|
+
for fd in self._readable:
|
2622
|
+
self._register_readable(fd)
|
2623
|
+
for fd in self._writable:
|
2624
|
+
self._register_writable(fd)
|
2625
|
+
|
2626
|
+
#
|
2627
|
+
|
2628
|
+
def _register_readable(self, fd: int) -> None:
|
2629
|
+
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
|
2630
|
+
|
2631
|
+
def _register_writable(self, fd: int) -> None:
|
2632
|
+
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
|
2633
|
+
|
2634
|
+
def _unregister_readable(self, fd: int) -> None:
|
2635
|
+
self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
|
2636
|
+
|
2637
|
+
def _unregister_writable(self, fd: int) -> None:
|
2638
|
+
self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
|
2639
|
+
|
2640
|
+
def _control(self, fd: int, filter: int, flags: int) -> None: # noqa
|
2641
|
+
ke = select.kevent(fd, filter=filter, flags=flags)
|
2642
|
+
kq = self._get_kqueue()
|
2643
|
+
try:
|
2644
|
+
kq.control([ke], 0)
|
2645
|
+
|
2646
|
+
except OSError as exc:
|
2647
|
+
if exc.errno == errno.EBADF:
|
2648
|
+
# log.debug('EBADF encountered in kqueue. Invalid file descriptor %s', ke.ident)
|
2649
|
+
pass
|
2650
|
+
elif exc.errno == errno.ENOENT:
|
2651
|
+
# Can happen when trying to remove an already closed socket
|
2652
|
+
pass
|
2653
|
+
else:
|
2654
|
+
raise
|
2655
|
+
|
2656
|
+
#
|
2657
|
+
|
2658
|
+
def poll(self, timeout: ta.Optional[float]) -> FdIoPoller.PollResult:
|
2659
|
+
kq = self._get_kqueue()
|
2660
|
+
try:
|
2661
|
+
kes = kq.control(None, self._max_events, timeout)
|
2662
|
+
|
2663
|
+
except OSError as exc:
|
2664
|
+
if exc.errno == errno.EINTR:
|
2665
|
+
return FdIoPoller.PollResult(msg='EINTR encountered in poll', exc=exc)
|
2666
|
+
else:
|
2667
|
+
raise
|
2668
|
+
|
2669
|
+
r: ta.List[int] = []
|
2670
|
+
w: ta.List[int] = []
|
2671
|
+
for ke in kes:
|
2672
|
+
if ke.filter == select.KQ_FILTER_READ:
|
2673
|
+
r.append(ke.ident)
|
2674
|
+
if ke.filter == select.KQ_FILTER_WRITE:
|
2675
|
+
w.append(ke.ident)
|
2676
|
+
|
2677
|
+
return FdIoPoller.PollResult(r, w)
|
2678
|
+
|
2679
|
+
KqueueFdIoPoller = _KqueueFdIoPoller
|
2680
|
+
else:
|
2681
|
+
KqueueFdIoPoller = None
|
2682
|
+
|
2683
|
+
|
2684
|
+
########################################
|
2685
|
+
# ../../../omlish/lite/http/parsing.py
|
2686
|
+
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
2687
|
+
# --------------------------------------------
|
2688
|
+
#
|
2689
|
+
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and the Individual or Organization
|
2690
|
+
# ("Licensee") accessing and otherwise using this software ("Python") in source or binary form and its associated
|
2691
|
+
# documentation.
|
2692
|
+
#
|
2693
|
+
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive,
|
2694
|
+
# royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative
|
2695
|
+
# works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License
|
2696
|
+
# Agreement and PSF's notice of copyright, i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
|
2697
|
+
# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" are retained in Python
|
2698
|
+
# alone or in any derivative version prepared by Licensee.
|
2699
|
+
#
|
2700
|
+
# 3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and
|
2701
|
+
# wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in
|
2702
|
+
# any such work a brief summary of the changes made to Python.
|
2703
|
+
#
|
2704
|
+
# 4. PSF is making Python available to Licensee on an "AS IS" basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES,
|
2705
|
+
# EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY
|
2706
|
+
# OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT INFRINGE ANY THIRD PARTY
|
2707
|
+
# RIGHTS.
|
2708
|
+
#
|
2709
|
+
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL
|
2710
|
+
# DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, OR ANY DERIVATIVE THEREOF, EVEN IF
|
2711
|
+
# ADVISED OF THE POSSIBILITY THEREOF.
|
2712
|
+
#
|
2713
|
+
# 6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
|
2714
|
+
#
|
2715
|
+
# 7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint
|
2716
|
+
# venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade
|
2717
|
+
# name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
|
2718
|
+
#
|
2719
|
+
# 8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this
|
2720
|
+
# License Agreement.
|
2721
|
+
|
2722
|
+
|
2723
|
+
##
|
2724
|
+
|
2725
|
+
|
2726
|
+
class ParseHttpRequestResult(abc.ABC): # noqa
|
2727
|
+
__slots__ = (
|
2728
|
+
'server_version',
|
2729
|
+
'request_line',
|
2730
|
+
'request_version',
|
2731
|
+
'version',
|
2732
|
+
'headers',
|
2733
|
+
'close_connection',
|
2734
|
+
)
|
2735
|
+
|
2736
|
+
def __init__(
|
2737
|
+
self,
|
2738
|
+
*,
|
2739
|
+
server_version: HttpProtocolVersion,
|
2740
|
+
request_line: str,
|
2741
|
+
request_version: HttpProtocolVersion,
|
2742
|
+
version: HttpProtocolVersion,
|
2743
|
+
headers: ta.Optional[HttpHeaders],
|
2744
|
+
close_connection: bool,
|
2745
|
+
) -> None:
|
2746
|
+
super().__init__()
|
2747
|
+
|
2748
|
+
self.server_version = server_version
|
2749
|
+
self.request_line = request_line
|
2750
|
+
self.request_version = request_version
|
2751
|
+
self.version = version
|
2752
|
+
self.headers = headers
|
2753
|
+
self.close_connection = close_connection
|
2754
|
+
|
2755
|
+
def __repr__(self) -> str:
|
2756
|
+
return f'{self.__class__.__name__}({", ".join(f"{a}={getattr(self, a)!r}" for a in self.__slots__)})'
|
2757
|
+
|
2758
|
+
|
2759
|
+
class EmptyParsedHttpResult(ParseHttpRequestResult):
|
2760
|
+
pass
|
2282
2761
|
|
2283
2762
|
|
2284
2763
|
class ParseHttpRequestError(ParseHttpRequestResult):
|
@@ -2703,7 +3182,7 @@ class InjectorError(Exception):
|
|
2703
3182
|
pass
|
2704
3183
|
|
2705
3184
|
|
2706
|
-
@dc.dataclass(
|
3185
|
+
@dc.dataclass()
|
2707
3186
|
class InjectorKeyError(InjectorError):
|
2708
3187
|
key: InjectorKey
|
2709
3188
|
|
@@ -2711,16 +3190,18 @@ class InjectorKeyError(InjectorError):
|
|
2711
3190
|
name: ta.Optional[str] = None
|
2712
3191
|
|
2713
3192
|
|
2714
|
-
@dc.dataclass(frozen=True)
|
2715
3193
|
class UnboundInjectorKeyError(InjectorKeyError):
|
2716
3194
|
pass
|
2717
3195
|
|
2718
3196
|
|
2719
|
-
@dc.dataclass(frozen=True)
|
2720
3197
|
class DuplicateInjectorKeyError(InjectorKeyError):
|
2721
3198
|
pass
|
2722
3199
|
|
2723
3200
|
|
3201
|
+
class CyclicDependencyInjectorKeyError(InjectorKeyError):
|
3202
|
+
pass
|
3203
|
+
|
3204
|
+
|
2724
3205
|
###
|
2725
3206
|
# keys
|
2726
3207
|
|
@@ -2894,7 +3375,11 @@ def build_injector_provider_map(bs: InjectorBindings) -> ta.Mapping[InjectorKey,
|
|
2894
3375
|
|
2895
3376
|
for b in bs.bindings():
|
2896
3377
|
if b.key.array:
|
2897
|
-
am.setdefault(b.key, [])
|
3378
|
+
al = am.setdefault(b.key, [])
|
3379
|
+
if isinstance(b.provider, ArrayInjectorProvider):
|
3380
|
+
al.extend(b.provider.ps)
|
3381
|
+
else:
|
3382
|
+
al.append(b.provider)
|
2898
3383
|
else:
|
2899
3384
|
if b.key in pm:
|
2900
3385
|
raise KeyError(b.key)
|
@@ -3042,6 +3527,14 @@ def build_injection_kwargs_target(
|
|
3042
3527
|
_INJECTOR_INJECTOR_KEY: InjectorKey[Injector] = InjectorKey(Injector)
|
3043
3528
|
|
3044
3529
|
|
3530
|
+
@dc.dataclass(frozen=True)
|
3531
|
+
class _InjectorEager:
|
3532
|
+
key: InjectorKey
|
3533
|
+
|
3534
|
+
|
3535
|
+
_INJECTOR_EAGER_ARRAY_KEY: InjectorKey[_InjectorEager] = InjectorKey(_InjectorEager, array=True)
|
3536
|
+
|
3537
|
+
|
3045
3538
|
class _Injector(Injector):
|
3046
3539
|
def __init__(self, bs: InjectorBindings, p: ta.Optional[Injector] = None) -> None:
|
3047
3540
|
super().__init__()
|
@@ -3054,22 +3547,69 @@ class _Injector(Injector):
|
|
3054
3547
|
if _INJECTOR_INJECTOR_KEY in self._pfm:
|
3055
3548
|
raise DuplicateInjectorKeyError(_INJECTOR_INJECTOR_KEY)
|
3056
3549
|
|
3550
|
+
self.__cur_req: ta.Optional[_Injector._Request] = None
|
3551
|
+
|
3552
|
+
if _INJECTOR_EAGER_ARRAY_KEY in self._pfm:
|
3553
|
+
for e in self.provide(_INJECTOR_EAGER_ARRAY_KEY):
|
3554
|
+
self.provide(e.key)
|
3555
|
+
|
3556
|
+
class _Request:
|
3557
|
+
def __init__(self, injector: '_Injector') -> None:
|
3558
|
+
super().__init__()
|
3559
|
+
self._injector = injector
|
3560
|
+
self._provisions: ta.Dict[InjectorKey, Maybe] = {}
|
3561
|
+
self._seen_keys: ta.Set[InjectorKey] = set()
|
3562
|
+
|
3563
|
+
def handle_key(self, key: InjectorKey) -> Maybe[Maybe]:
|
3564
|
+
try:
|
3565
|
+
return Maybe.just(self._provisions[key])
|
3566
|
+
except KeyError:
|
3567
|
+
pass
|
3568
|
+
if key in self._seen_keys:
|
3569
|
+
raise CyclicDependencyInjectorKeyError(key)
|
3570
|
+
self._seen_keys.add(key)
|
3571
|
+
return Maybe.empty()
|
3572
|
+
|
3573
|
+
def handle_provision(self, key: InjectorKey, mv: Maybe) -> Maybe:
|
3574
|
+
check_in(key, self._seen_keys)
|
3575
|
+
check_not_in(key, self._provisions)
|
3576
|
+
self._provisions[key] = mv
|
3577
|
+
return mv
|
3578
|
+
|
3579
|
+
@contextlib.contextmanager
|
3580
|
+
def _current_request(self) -> ta.Generator[_Request, None, None]:
|
3581
|
+
if (cr := self.__cur_req) is not None:
|
3582
|
+
yield cr
|
3583
|
+
return
|
3584
|
+
|
3585
|
+
cr = self._Request(self)
|
3586
|
+
try:
|
3587
|
+
self.__cur_req = cr
|
3588
|
+
yield cr
|
3589
|
+
finally:
|
3590
|
+
self.__cur_req = None
|
3591
|
+
|
3057
3592
|
def try_provide(self, key: ta.Any) -> Maybe[ta.Any]:
|
3058
3593
|
key = as_injector_key(key)
|
3059
3594
|
|
3060
|
-
|
3061
|
-
|
3595
|
+
cr: _Injector._Request
|
3596
|
+
with self._current_request() as cr:
|
3597
|
+
if (rv := cr.handle_key(key)).present:
|
3598
|
+
return rv.must()
|
3062
3599
|
|
3063
|
-
|
3064
|
-
|
3065
|
-
return Maybe.just(fn(self))
|
3600
|
+
if key == _INJECTOR_INJECTOR_KEY:
|
3601
|
+
return cr.handle_provision(key, Maybe.just(self))
|
3066
3602
|
|
3067
|
-
|
3068
|
-
|
3069
|
-
|
3070
|
-
return Maybe.empty()
|
3603
|
+
fn = self._pfm.get(key)
|
3604
|
+
if fn is not None:
|
3605
|
+
return cr.handle_provision(key, Maybe.just(fn(self)))
|
3071
3606
|
|
3072
|
-
|
3607
|
+
if self._p is not None:
|
3608
|
+
pv = self._p.try_provide(key)
|
3609
|
+
if pv is not None:
|
3610
|
+
return cr.handle_provision(key, Maybe.empty())
|
3611
|
+
|
3612
|
+
return cr.handle_provision(key, Maybe.empty())
|
3073
3613
|
|
3074
3614
|
def provide(self, key: ta.Any) -> ta.Any:
|
3075
3615
|
v = self.try_provide(key)
|
@@ -3170,6 +3710,8 @@ class InjectorBinder:
|
|
3170
3710
|
to_key: ta.Any = None,
|
3171
3711
|
|
3172
3712
|
singleton: bool = False,
|
3713
|
+
|
3714
|
+
eager: bool = False,
|
3173
3715
|
) -> InjectorBindingOrBindings:
|
3174
3716
|
if obj is None or obj is inspect.Parameter.empty:
|
3175
3717
|
raise TypeError(obj)
|
@@ -3243,13 +3785,21 @@ class InjectorBinder:
|
|
3243
3785
|
if singleton:
|
3244
3786
|
provider = SingletonInjectorProvider(provider)
|
3245
3787
|
|
3788
|
+
binding = InjectorBinding(key, provider)
|
3789
|
+
|
3246
3790
|
##
|
3247
3791
|
|
3248
|
-
|
3792
|
+
extras: ta.List[InjectorBinding] = []
|
3793
|
+
|
3794
|
+
if eager:
|
3795
|
+
extras.append(bind_injector_eager_key(key))
|
3249
3796
|
|
3250
3797
|
##
|
3251
3798
|
|
3252
|
-
|
3799
|
+
if extras:
|
3800
|
+
return as_injector_bindings(binding, *extras)
|
3801
|
+
else:
|
3802
|
+
return binding
|
3253
3803
|
|
3254
3804
|
|
3255
3805
|
###
|
@@ -3272,6 +3822,26 @@ def make_injector_factory(
|
|
3272
3822
|
return outer
|
3273
3823
|
|
3274
3824
|
|
3825
|
+
def bind_injector_array(
|
3826
|
+
obj: ta.Any = None,
|
3827
|
+
*,
|
3828
|
+
tag: ta.Any = None,
|
3829
|
+
) -> InjectorBindingOrBindings:
|
3830
|
+
key = as_injector_key(obj)
|
3831
|
+
if tag is not None:
|
3832
|
+
if key.tag is not None:
|
3833
|
+
raise ValueError('Must not specify multiple tags')
|
3834
|
+
key = dc.replace(key, tag=tag)
|
3835
|
+
|
3836
|
+
if key.array:
|
3837
|
+
raise ValueError('Key must not be array')
|
3838
|
+
|
3839
|
+
return InjectorBinding(
|
3840
|
+
dc.replace(key, array=True),
|
3841
|
+
ArrayInjectorProvider([]),
|
3842
|
+
)
|
3843
|
+
|
3844
|
+
|
3275
3845
|
def make_injector_array_type(
|
3276
3846
|
ele: ta.Union[InjectorKey, InjectorKeyCls],
|
3277
3847
|
cls: U,
|
@@ -3293,6 +3863,10 @@ def make_injector_array_type(
|
|
3293
3863
|
return inner
|
3294
3864
|
|
3295
3865
|
|
3866
|
+
def bind_injector_eager_key(key: ta.Any) -> InjectorBinding:
|
3867
|
+
return InjectorBinding(_INJECTOR_EAGER_ARRAY_KEY, ConstInjectorProvider(_InjectorEager(as_injector_key(key))))
|
3868
|
+
|
3869
|
+
|
3296
3870
|
##
|
3297
3871
|
|
3298
3872
|
|
@@ -3347,6 +3921,8 @@ class Injection:
|
|
3347
3921
|
to_key: ta.Any = None,
|
3348
3922
|
|
3349
3923
|
singleton: bool = False,
|
3924
|
+
|
3925
|
+
eager: bool = False,
|
3350
3926
|
) -> InjectorBindingOrBindings:
|
3351
3927
|
return InjectorBinder.bind(
|
3352
3928
|
obj,
|
@@ -3361,6 +3937,8 @@ class Injection:
|
|
3361
3937
|
to_key=to_key,
|
3362
3938
|
|
3363
3939
|
singleton=singleton,
|
3940
|
+
|
3941
|
+
eager=eager,
|
3364
3942
|
)
|
3365
3943
|
|
3366
3944
|
# helpers
|
@@ -3374,6 +3952,15 @@ class Injection:
|
|
3374
3952
|
) -> InjectorBindingOrBindings:
|
3375
3953
|
return cls.bind(make_injector_factory(fn, cls_, ann))
|
3376
3954
|
|
3955
|
+
@classmethod
|
3956
|
+
def bind_array(
|
3957
|
+
cls,
|
3958
|
+
obj: ta.Any = None,
|
3959
|
+
*,
|
3960
|
+
tag: ta.Any = None,
|
3961
|
+
) -> InjectorBindingOrBindings:
|
3962
|
+
return bind_injector_array(obj, tag=tag)
|
3963
|
+
|
3377
3964
|
@classmethod
|
3378
3965
|
def bind_array_type(
|
3379
3966
|
cls,
|
@@ -3388,81 +3975,303 @@ inj = Injection
|
|
3388
3975
|
|
3389
3976
|
|
3390
3977
|
########################################
|
3391
|
-
# ../../../omlish/lite/
|
3978
|
+
# ../../../omlish/lite/io.py
|
3392
3979
|
|
3393
3980
|
|
3394
|
-
|
3981
|
+
class DelimitingBuffer:
|
3982
|
+
"""
|
3983
|
+
https://github.com/python-trio/trio/issues/796 :|
|
3984
|
+
"""
|
3395
3985
|
|
3986
|
+
#
|
3396
3987
|
|
3397
|
-
class
|
3398
|
-
|
3988
|
+
class Error(Exception):
|
3989
|
+
def __init__(self, buffer: 'DelimitingBuffer') -> None:
|
3990
|
+
super().__init__(buffer)
|
3991
|
+
self.buffer = buffer
|
3399
3992
|
|
3993
|
+
def __repr__(self) -> str:
|
3994
|
+
return attr_repr(self, 'buffer')
|
3400
3995
|
|
3401
|
-
|
3402
|
-
|
3403
|
-
('iov_len', ct.c_size_t), # Length of data.
|
3404
|
-
]
|
3996
|
+
class ClosedError(Error):
|
3997
|
+
pass
|
3405
3998
|
|
3999
|
+
#
|
3406
4000
|
|
3407
|
-
|
4001
|
+
DEFAULT_DELIMITERS: bytes = b'\n'
|
3408
4002
|
|
4003
|
+
def __init__(
|
4004
|
+
self,
|
4005
|
+
delimiters: ta.Iterable[int] = DEFAULT_DELIMITERS,
|
4006
|
+
*,
|
4007
|
+
keep_ends: bool = False,
|
4008
|
+
max_size: ta.Optional[int] = None,
|
4009
|
+
) -> None:
|
4010
|
+
super().__init__()
|
3409
4011
|
|
3410
|
-
|
3411
|
-
|
3412
|
-
|
4012
|
+
self._delimiters = frozenset(check_isinstance(d, int) for d in delimiters)
|
4013
|
+
self._keep_ends = keep_ends
|
4014
|
+
self._max_size = max_size
|
3413
4015
|
|
3414
|
-
|
3415
|
-
lib.sd_journal_sendv.argtypes = [ct.POINTER(sd_iovec), ct.c_int]
|
4016
|
+
self._buf: ta.Optional[io.BytesIO] = io.BytesIO()
|
3416
4017
|
|
3417
|
-
|
4018
|
+
#
|
3418
4019
|
|
4020
|
+
@property
|
4021
|
+
def is_closed(self) -> bool:
|
4022
|
+
return self._buf is None
|
4023
|
+
|
4024
|
+
def tell(self) -> int:
|
4025
|
+
if (buf := self._buf) is None:
|
4026
|
+
raise self.ClosedError(self)
|
4027
|
+
return buf.tell()
|
4028
|
+
|
4029
|
+
def peek(self) -> bytes:
|
4030
|
+
if (buf := self._buf) is None:
|
4031
|
+
raise self.ClosedError(self)
|
4032
|
+
return buf.getvalue()
|
4033
|
+
|
4034
|
+
def _find_delim(self, data: ta.Union[bytes, bytearray], i: int) -> ta.Optional[int]:
|
4035
|
+
r = None # type: int | None
|
4036
|
+
for d in self._delimiters:
|
4037
|
+
if (p := data.find(d, i)) >= 0:
|
4038
|
+
if r is None or p < r:
|
4039
|
+
r = p
|
4040
|
+
return r
|
4041
|
+
|
4042
|
+
def _append_and_reset(self, chunk: bytes) -> bytes:
|
4043
|
+
buf = check_not_none(self._buf)
|
4044
|
+
if not buf.tell():
|
4045
|
+
return chunk
|
4046
|
+
|
4047
|
+
buf.write(chunk)
|
4048
|
+
ret = buf.getvalue()
|
4049
|
+
buf.seek(0)
|
4050
|
+
buf.truncate()
|
4051
|
+
return ret
|
3419
4052
|
|
3420
|
-
|
3421
|
-
|
3422
|
-
try:
|
3423
|
-
return sd_libsystemd()
|
3424
|
-
except OSError: # noqa
|
3425
|
-
return None
|
4053
|
+
class Incomplete(ta.NamedTuple):
|
4054
|
+
b: bytes
|
3426
4055
|
|
4056
|
+
def feed(self, data: ta.Union[bytes, bytearray]) -> ta.Generator[ta.Union[bytes, Incomplete], None, None]:
|
4057
|
+
if (buf := self._buf) is None:
|
4058
|
+
raise self.ClosedError(self)
|
3427
4059
|
|
3428
|
-
|
4060
|
+
if not data:
|
4061
|
+
self._buf = None
|
3429
4062
|
|
4063
|
+
if buf.tell():
|
4064
|
+
yield self.Incomplete(buf.getvalue())
|
3430
4065
|
|
3431
|
-
|
3432
|
-
lib = sd_libsystemd()
|
4066
|
+
return
|
3433
4067
|
|
3434
|
-
|
3435
|
-
|
3436
|
-
|
3437
|
-
|
4068
|
+
l = len(data)
|
4069
|
+
i = 0
|
4070
|
+
while i < l:
|
4071
|
+
if (p := self._find_delim(data, i)) is None:
|
4072
|
+
break
|
3438
4073
|
|
3439
|
-
|
3440
|
-
|
3441
|
-
|
3442
|
-
vec[i].iov_base = ct.cast(ct.c_char_p(msgs[i]), ct.c_void_p)
|
3443
|
-
vec[i].iov_len = len(msgs[i]) - 1
|
4074
|
+
n = p + 1
|
4075
|
+
if self._keep_ends:
|
4076
|
+
p = n
|
3444
4077
|
|
3445
|
-
|
4078
|
+
yield self._append_and_reset(data[i:p])
|
3446
4079
|
|
4080
|
+
i = n
|
3447
4081
|
|
3448
|
-
|
4082
|
+
if i >= l:
|
4083
|
+
return
|
3449
4084
|
|
4085
|
+
if self._max_size is None:
|
4086
|
+
buf.write(data[i:])
|
4087
|
+
return
|
3450
4088
|
|
3451
|
-
|
3452
|
-
|
3453
|
-
|
3454
|
-
logging.CRITICAL: syslog.LOG_CRIT,
|
3455
|
-
logging.ERROR: syslog.LOG_ERR,
|
3456
|
-
logging.WARNING: syslog.LOG_WARNING,
|
3457
|
-
# LOG_NOTICE ? # normal but significant condition
|
3458
|
-
logging.INFO: syslog.LOG_INFO,
|
3459
|
-
logging.DEBUG: syslog.LOG_DEBUG,
|
3460
|
-
}
|
4089
|
+
while i < l:
|
4090
|
+
remaining_data_len = l - i
|
4091
|
+
remaining_buf_capacity = self._max_size - buf.tell()
|
3461
4092
|
|
4093
|
+
if remaining_data_len < remaining_buf_capacity:
|
4094
|
+
buf.write(data[i:])
|
4095
|
+
return
|
3462
4096
|
|
3463
|
-
|
3464
|
-
|
3465
|
-
|
4097
|
+
p = i + remaining_buf_capacity
|
4098
|
+
yield self.Incomplete(self._append_and_reset(data[i:p]))
|
4099
|
+
i = p
|
4100
|
+
|
4101
|
+
|
4102
|
+
class ReadableListBuffer:
|
4103
|
+
def __init__(self) -> None:
|
4104
|
+
super().__init__()
|
4105
|
+
self._lst: list[bytes] = []
|
4106
|
+
|
4107
|
+
def feed(self, d: bytes) -> None:
|
4108
|
+
if d:
|
4109
|
+
self._lst.append(d)
|
4110
|
+
|
4111
|
+
def _chop(self, i: int, e: int) -> bytes:
|
4112
|
+
lst = self._lst
|
4113
|
+
d = lst[i]
|
4114
|
+
|
4115
|
+
o = b''.join([
|
4116
|
+
*lst[:i],
|
4117
|
+
d[:e],
|
4118
|
+
])
|
4119
|
+
|
4120
|
+
self._lst = [
|
4121
|
+
*([d[e:]] if e < len(d) else []),
|
4122
|
+
*lst[i + 1:],
|
4123
|
+
]
|
4124
|
+
|
4125
|
+
return o
|
4126
|
+
|
4127
|
+
def read(self, n: ta.Optional[int] = None) -> ta.Optional[bytes]:
|
4128
|
+
if n is None:
|
4129
|
+
o = b''.join(self._lst)
|
4130
|
+
self._lst = []
|
4131
|
+
return o
|
4132
|
+
|
4133
|
+
if not (lst := self._lst):
|
4134
|
+
return None
|
4135
|
+
|
4136
|
+
c = 0
|
4137
|
+
for i, d in enumerate(lst):
|
4138
|
+
r = n - c
|
4139
|
+
if (l := len(d)) >= r:
|
4140
|
+
return self._chop(i, r)
|
4141
|
+
c += l
|
4142
|
+
|
4143
|
+
return None
|
4144
|
+
|
4145
|
+
def read_until(self, delim: bytes = b'\n') -> ta.Optional[bytes]:
|
4146
|
+
if not (lst := self._lst):
|
4147
|
+
return None
|
4148
|
+
|
4149
|
+
for i, d in enumerate(lst):
|
4150
|
+
if (p := d.find(delim)) >= 0:
|
4151
|
+
return self._chop(i, p + len(delim))
|
4152
|
+
|
4153
|
+
return None
|
4154
|
+
|
4155
|
+
|
4156
|
+
class IncrementalWriteBuffer:
|
4157
|
+
def __init__(
|
4158
|
+
self,
|
4159
|
+
data: bytes,
|
4160
|
+
*,
|
4161
|
+
write_size: int = 0x10000,
|
4162
|
+
) -> None:
|
4163
|
+
super().__init__()
|
4164
|
+
|
4165
|
+
check_non_empty(data)
|
4166
|
+
self._len = len(data)
|
4167
|
+
self._write_size = write_size
|
4168
|
+
|
4169
|
+
self._lst = [
|
4170
|
+
data[i:i + write_size]
|
4171
|
+
for i in range(0, len(data), write_size)
|
4172
|
+
]
|
4173
|
+
self._pos = 0
|
4174
|
+
|
4175
|
+
@property
|
4176
|
+
def rem(self) -> int:
|
4177
|
+
return self._len - self._pos
|
4178
|
+
|
4179
|
+
def write(self, fn: ta.Callable[[bytes], int]) -> int:
|
4180
|
+
lst = check_non_empty(self._lst)
|
4181
|
+
|
4182
|
+
t = 0
|
4183
|
+
for i, d in enumerate(lst): # noqa
|
4184
|
+
n = fn(check_non_empty(d))
|
4185
|
+
if not n:
|
4186
|
+
break
|
4187
|
+
t += n
|
4188
|
+
|
4189
|
+
if t:
|
4190
|
+
self._lst = [
|
4191
|
+
*([d[n:]] if n < len(d) else []),
|
4192
|
+
*lst[i + 1:],
|
4193
|
+
]
|
4194
|
+
self._pos += t
|
4195
|
+
|
4196
|
+
return t
|
4197
|
+
|
4198
|
+
|
4199
|
+
########################################
|
4200
|
+
# ../../../omlish/lite/journald.py
|
4201
|
+
|
4202
|
+
|
4203
|
+
##
|
4204
|
+
|
4205
|
+
|
4206
|
+
class sd_iovec(ct.Structure): # noqa
|
4207
|
+
pass
|
4208
|
+
|
4209
|
+
|
4210
|
+
sd_iovec._fields_ = [
|
4211
|
+
('iov_base', ct.c_void_p), # Pointer to data.
|
4212
|
+
('iov_len', ct.c_size_t), # Length of data.
|
4213
|
+
]
|
4214
|
+
|
4215
|
+
|
4216
|
+
##
|
4217
|
+
|
4218
|
+
|
4219
|
+
@cached_nullary
|
4220
|
+
def sd_libsystemd() -> ta.Any:
|
4221
|
+
lib = ct.CDLL('libsystemd.so.0')
|
4222
|
+
|
4223
|
+
lib.sd_journal_sendv.restype = ct.c_int
|
4224
|
+
lib.sd_journal_sendv.argtypes = [ct.POINTER(sd_iovec), ct.c_int]
|
4225
|
+
|
4226
|
+
return lib
|
4227
|
+
|
4228
|
+
|
4229
|
+
@cached_nullary
|
4230
|
+
def sd_try_libsystemd() -> ta.Optional[ta.Any]:
|
4231
|
+
try:
|
4232
|
+
return sd_libsystemd()
|
4233
|
+
except OSError: # noqa
|
4234
|
+
return None
|
4235
|
+
|
4236
|
+
|
4237
|
+
##
|
4238
|
+
|
4239
|
+
|
4240
|
+
def sd_journald_send(**fields: str) -> int:
|
4241
|
+
lib = sd_libsystemd()
|
4242
|
+
|
4243
|
+
msgs = [
|
4244
|
+
f'{k.upper()}={v}\0'.encode('utf-8')
|
4245
|
+
for k, v in fields.items()
|
4246
|
+
]
|
4247
|
+
|
4248
|
+
vec = (sd_iovec * len(msgs))()
|
4249
|
+
cl = (ct.c_char_p * len(msgs))() # noqa
|
4250
|
+
for i in range(len(msgs)):
|
4251
|
+
vec[i].iov_base = ct.cast(ct.c_char_p(msgs[i]), ct.c_void_p)
|
4252
|
+
vec[i].iov_len = len(msgs[i]) - 1
|
4253
|
+
|
4254
|
+
return lib.sd_journal_sendv(vec, len(msgs))
|
4255
|
+
|
4256
|
+
|
4257
|
+
##
|
4258
|
+
|
4259
|
+
|
4260
|
+
SD_LOG_LEVEL_MAP: ta.Mapping[int, int] = {
|
4261
|
+
logging.FATAL: syslog.LOG_EMERG, # system is unusable
|
4262
|
+
# LOG_ALERT ? # action must be taken immediately
|
4263
|
+
logging.CRITICAL: syslog.LOG_CRIT,
|
4264
|
+
logging.ERROR: syslog.LOG_ERR,
|
4265
|
+
logging.WARNING: syslog.LOG_WARNING,
|
4266
|
+
# LOG_NOTICE ? # normal but significant condition
|
4267
|
+
logging.INFO: syslog.LOG_INFO,
|
4268
|
+
logging.DEBUG: syslog.LOG_DEBUG,
|
4269
|
+
}
|
4270
|
+
|
4271
|
+
|
4272
|
+
class JournaldLogHandler(logging.Handler):
|
4273
|
+
"""
|
4274
|
+
TODO:
|
3466
4275
|
- fallback handler for when this barfs
|
3467
4276
|
"""
|
3468
4277
|
|
@@ -4158,6 +4967,23 @@ def unmarshal_obj(o: ta.Any, ty: ta.Union[ta.Type[T], ta.Any]) -> T:
|
|
4158
4967
|
return get_obj_marshaler(ty).unmarshal(o)
|
4159
4968
|
|
4160
4969
|
|
4970
|
+
########################################
|
4971
|
+
# ../../../omlish/lite/runtime.py
|
4972
|
+
|
4973
|
+
|
4974
|
+
@cached_nullary
|
4975
|
+
def is_debugger_attached() -> bool:
|
4976
|
+
return any(frame[1].endswith('pydevd.py') for frame in inspect.stack())
|
4977
|
+
|
4978
|
+
|
4979
|
+
REQUIRED_PYTHON_VERSION = (3, 8)
|
4980
|
+
|
4981
|
+
|
4982
|
+
def check_runtime_version() -> None:
|
4983
|
+
if sys.version_info < REQUIRED_PYTHON_VERSION:
|
4984
|
+
raise OSError(f'Requires python {REQUIRED_PYTHON_VERSION}, got {sys.version_info} from {sys.executable}') # noqa
|
4985
|
+
|
4986
|
+
|
4161
4987
|
########################################
|
4162
4988
|
# ../../configs.py
|
4163
4989
|
|
@@ -4510,239 +5336,6 @@ def parse_logging_level(value: ta.Union[str, int]) -> int:
|
|
4510
5336
|
return level
|
4511
5337
|
|
4512
5338
|
|
4513
|
-
########################################
|
4514
|
-
# ../poller.py
|
4515
|
-
|
4516
|
-
|
4517
|
-
class Poller(DaemonizeListener, abc.ABC):
|
4518
|
-
def __init__(self) -> None:
|
4519
|
-
super().__init__()
|
4520
|
-
|
4521
|
-
@abc.abstractmethod
|
4522
|
-
def register_readable(self, fd: Fd) -> None:
|
4523
|
-
raise NotImplementedError
|
4524
|
-
|
4525
|
-
@abc.abstractmethod
|
4526
|
-
def register_writable(self, fd: Fd) -> None:
|
4527
|
-
raise NotImplementedError
|
4528
|
-
|
4529
|
-
@abc.abstractmethod
|
4530
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4531
|
-
raise NotImplementedError
|
4532
|
-
|
4533
|
-
@abc.abstractmethod
|
4534
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4535
|
-
raise NotImplementedError
|
4536
|
-
|
4537
|
-
@abc.abstractmethod
|
4538
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4539
|
-
raise NotImplementedError
|
4540
|
-
|
4541
|
-
def before_daemonize(self) -> None: # noqa
|
4542
|
-
pass
|
4543
|
-
|
4544
|
-
def after_daemonize(self) -> None: # noqa
|
4545
|
-
pass
|
4546
|
-
|
4547
|
-
def close(self) -> None: # noqa
|
4548
|
-
pass
|
4549
|
-
|
4550
|
-
|
4551
|
-
class SelectPoller(Poller):
|
4552
|
-
def __init__(self) -> None:
|
4553
|
-
super().__init__()
|
4554
|
-
|
4555
|
-
self._readable: ta.Set[Fd] = set()
|
4556
|
-
self._writable: ta.Set[Fd] = set()
|
4557
|
-
|
4558
|
-
def register_readable(self, fd: Fd) -> None:
|
4559
|
-
self._readable.add(fd)
|
4560
|
-
|
4561
|
-
def register_writable(self, fd: Fd) -> None:
|
4562
|
-
self._writable.add(fd)
|
4563
|
-
|
4564
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4565
|
-
self._readable.discard(fd)
|
4566
|
-
|
4567
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4568
|
-
self._writable.discard(fd)
|
4569
|
-
|
4570
|
-
def unregister_all(self) -> None:
|
4571
|
-
self._readable.clear()
|
4572
|
-
self._writable.clear()
|
4573
|
-
|
4574
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4575
|
-
try:
|
4576
|
-
r, w, x = select.select(
|
4577
|
-
self._readable,
|
4578
|
-
self._writable,
|
4579
|
-
[], timeout,
|
4580
|
-
)
|
4581
|
-
except OSError as exc:
|
4582
|
-
if exc.args[0] == errno.EINTR:
|
4583
|
-
log.debug('EINTR encountered in poll')
|
4584
|
-
return [], []
|
4585
|
-
if exc.args[0] == errno.EBADF:
|
4586
|
-
log.debug('EBADF encountered in poll')
|
4587
|
-
self.unregister_all()
|
4588
|
-
return [], []
|
4589
|
-
raise
|
4590
|
-
return r, w
|
4591
|
-
|
4592
|
-
|
4593
|
-
class PollPoller(Poller):
|
4594
|
-
_READ = select.POLLIN | select.POLLPRI | select.POLLHUP
|
4595
|
-
_WRITE = select.POLLOUT
|
4596
|
-
|
4597
|
-
def __init__(self) -> None:
|
4598
|
-
super().__init__()
|
4599
|
-
|
4600
|
-
self._poller = select.poll()
|
4601
|
-
self._readable: set[Fd] = set()
|
4602
|
-
self._writable: set[Fd] = set()
|
4603
|
-
|
4604
|
-
def register_readable(self, fd: Fd) -> None:
|
4605
|
-
self._poller.register(fd, self._READ)
|
4606
|
-
self._readable.add(fd)
|
4607
|
-
|
4608
|
-
def register_writable(self, fd: Fd) -> None:
|
4609
|
-
self._poller.register(fd, self._WRITE)
|
4610
|
-
self._writable.add(fd)
|
4611
|
-
|
4612
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4613
|
-
self._readable.discard(fd)
|
4614
|
-
self._poller.unregister(fd)
|
4615
|
-
if fd in self._writable:
|
4616
|
-
self._poller.register(fd, self._WRITE)
|
4617
|
-
|
4618
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4619
|
-
self._writable.discard(fd)
|
4620
|
-
self._poller.unregister(fd)
|
4621
|
-
if fd in self._readable:
|
4622
|
-
self._poller.register(fd, self._READ)
|
4623
|
-
|
4624
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4625
|
-
fds = self._poll_fds(timeout) # type: ignore
|
4626
|
-
readable, writable = [], []
|
4627
|
-
for fd, eventmask in fds:
|
4628
|
-
if self._ignore_invalid(fd, eventmask):
|
4629
|
-
continue
|
4630
|
-
if eventmask & self._READ:
|
4631
|
-
readable.append(fd)
|
4632
|
-
if eventmask & self._WRITE:
|
4633
|
-
writable.append(fd)
|
4634
|
-
return readable, writable
|
4635
|
-
|
4636
|
-
def _poll_fds(self, timeout: float) -> ta.List[ta.Tuple[Fd, Fd]]:
|
4637
|
-
try:
|
4638
|
-
return self._poller.poll(timeout * 1000) # type: ignore
|
4639
|
-
except OSError as exc:
|
4640
|
-
if exc.args[0] == errno.EINTR:
|
4641
|
-
log.debug('EINTR encountered in poll')
|
4642
|
-
return []
|
4643
|
-
raise
|
4644
|
-
|
4645
|
-
def _ignore_invalid(self, fd: Fd, eventmask: int) -> bool:
|
4646
|
-
if eventmask & select.POLLNVAL:
|
4647
|
-
# POLLNVAL means `fd` value is invalid, not open. When a process quits it's `fd`s are closed so there is no
|
4648
|
-
# more reason to keep this `fd` registered If the process restarts it's `fd`s are registered again.
|
4649
|
-
self._poller.unregister(fd)
|
4650
|
-
self._readable.discard(fd)
|
4651
|
-
self._writable.discard(fd)
|
4652
|
-
return True
|
4653
|
-
return False
|
4654
|
-
|
4655
|
-
|
4656
|
-
if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
|
4657
|
-
class KqueuePoller(Poller):
|
4658
|
-
max_events = 1000
|
4659
|
-
|
4660
|
-
def __init__(self) -> None:
|
4661
|
-
super().__init__()
|
4662
|
-
|
4663
|
-
self._kqueue: ta.Optional[ta.Any] = select.kqueue()
|
4664
|
-
self._readable: set[Fd] = set()
|
4665
|
-
self._writable: set[Fd] = set()
|
4666
|
-
|
4667
|
-
def register_readable(self, fd: Fd) -> None:
|
4668
|
-
self._readable.add(fd)
|
4669
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD)
|
4670
|
-
self._kqueue_control(fd, kevent)
|
4671
|
-
|
4672
|
-
def register_writable(self, fd: Fd) -> None:
|
4673
|
-
self._writable.add(fd)
|
4674
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD)
|
4675
|
-
self._kqueue_control(fd, kevent)
|
4676
|
-
|
4677
|
-
def unregister_readable(self, fd: Fd) -> None:
|
4678
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_DELETE)
|
4679
|
-
self._readable.discard(fd)
|
4680
|
-
self._kqueue_control(fd, kevent)
|
4681
|
-
|
4682
|
-
def unregister_writable(self, fd: Fd) -> None:
|
4683
|
-
kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_DELETE)
|
4684
|
-
self._writable.discard(fd)
|
4685
|
-
self._kqueue_control(fd, kevent)
|
4686
|
-
|
4687
|
-
def _kqueue_control(self, fd: Fd, kevent: 'select.kevent') -> None:
|
4688
|
-
try:
|
4689
|
-
self._kqueue.control([kevent], 0) # type: ignore
|
4690
|
-
except OSError as error:
|
4691
|
-
if error.errno == errno.EBADF:
|
4692
|
-
log.debug('EBADF encountered in kqueue. Invalid file descriptor %s', fd)
|
4693
|
-
else:
|
4694
|
-
raise
|
4695
|
-
|
4696
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4697
|
-
readable, writable = [], [] # type: ignore
|
4698
|
-
|
4699
|
-
try:
|
4700
|
-
kevents = self._kqueue.control(None, self.max_events, timeout) # type: ignore
|
4701
|
-
except OSError as error:
|
4702
|
-
if error.errno == errno.EINTR:
|
4703
|
-
log.debug('EINTR encountered in poll')
|
4704
|
-
return readable, writable
|
4705
|
-
raise
|
4706
|
-
|
4707
|
-
for kevent in kevents:
|
4708
|
-
if kevent.filter == select.KQ_FILTER_READ:
|
4709
|
-
readable.append(kevent.ident)
|
4710
|
-
if kevent.filter == select.KQ_FILTER_WRITE:
|
4711
|
-
writable.append(kevent.ident)
|
4712
|
-
|
4713
|
-
return readable, writable
|
4714
|
-
|
4715
|
-
def before_daemonize(self) -> None:
|
4716
|
-
self.close()
|
4717
|
-
|
4718
|
-
def after_daemonize(self) -> None:
|
4719
|
-
self._kqueue = select.kqueue()
|
4720
|
-
for fd in self._readable:
|
4721
|
-
self.register_readable(fd)
|
4722
|
-
for fd in self._writable:
|
4723
|
-
self.register_writable(fd)
|
4724
|
-
|
4725
|
-
def close(self) -> None:
|
4726
|
-
self._kqueue.close() # type: ignore
|
4727
|
-
self._kqueue = None
|
4728
|
-
|
4729
|
-
else:
|
4730
|
-
KqueuePoller = None
|
4731
|
-
|
4732
|
-
|
4733
|
-
def get_poller_impl() -> ta.Type[Poller]:
|
4734
|
-
if (
|
4735
|
-
(sys.platform == 'darwin' or sys.platform.startswith('freebsd')) and
|
4736
|
-
hasattr(select, 'kqueue') and
|
4737
|
-
KqueuePoller is not None
|
4738
|
-
):
|
4739
|
-
return KqueuePoller
|
4740
|
-
elif hasattr(select, 'poll'):
|
4741
|
-
return PollPoller
|
4742
|
-
else:
|
4743
|
-
return SelectPoller
|
4744
|
-
|
4745
|
-
|
4746
5339
|
########################################
|
4747
5340
|
# ../../../omlish/lite/http/coroserver.py
|
4748
5341
|
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
@@ -5310,6 +5903,10 @@ class CoroHttpServerSocketHandler(SocketHandler):
|
|
5310
5903
|
##
|
5311
5904
|
|
5312
5905
|
|
5906
|
+
class ExitNow(Exception): # noqa
|
5907
|
+
pass
|
5908
|
+
|
5909
|
+
|
5313
5910
|
ServerEpoch = ta.NewType('ServerEpoch', int)
|
5314
5911
|
|
5315
5912
|
|
@@ -5333,12 +5930,7 @@ class ConfigPriorityOrdered(abc.ABC):
|
|
5333
5930
|
##
|
5334
5931
|
|
5335
5932
|
|
5336
|
-
class
|
5337
|
-
@property
|
5338
|
-
@abc.abstractmethod
|
5339
|
-
def config(self) -> ServerConfig:
|
5340
|
-
raise NotImplementedError
|
5341
|
-
|
5933
|
+
class SupervisorStateManager(abc.ABC):
|
5342
5934
|
@property
|
5343
5935
|
@abc.abstractmethod
|
5344
5936
|
def state(self) -> SupervisorState:
|
@@ -5352,12 +5944,13 @@ class ServerContext(abc.ABC):
|
|
5352
5944
|
##
|
5353
5945
|
|
5354
5946
|
|
5355
|
-
class
|
5356
|
-
@property
|
5947
|
+
class HasDispatchers(abc.ABC):
|
5357
5948
|
@abc.abstractmethod
|
5358
|
-
def
|
5949
|
+
def get_dispatchers(self) -> 'Dispatchers':
|
5359
5950
|
raise NotImplementedError
|
5360
5951
|
|
5952
|
+
|
5953
|
+
class ProcessDispatcher(FdIoHandler, abc.ABC):
|
5361
5954
|
@property
|
5362
5955
|
@abc.abstractmethod
|
5363
5956
|
def channel(self) -> str:
|
@@ -5365,44 +5958,11 @@ class Dispatcher(abc.ABC):
|
|
5365
5958
|
|
5366
5959
|
@property
|
5367
5960
|
@abc.abstractmethod
|
5368
|
-
def
|
5961
|
+
def process(self) -> 'Process':
|
5369
5962
|
raise NotImplementedError
|
5370
5963
|
|
5371
|
-
@property
|
5372
|
-
@abc.abstractmethod
|
5373
|
-
def closed(self) -> bool:
|
5374
|
-
raise NotImplementedError
|
5375
|
-
|
5376
|
-
#
|
5377
|
-
|
5378
|
-
@abc.abstractmethod
|
5379
|
-
def close(self) -> None:
|
5380
|
-
raise NotImplementedError
|
5381
|
-
|
5382
|
-
@abc.abstractmethod
|
5383
|
-
def handle_error(self) -> None:
|
5384
|
-
raise NotImplementedError
|
5385
|
-
|
5386
|
-
#
|
5387
|
-
|
5388
|
-
@abc.abstractmethod
|
5389
|
-
def readable(self) -> bool:
|
5390
|
-
raise NotImplementedError
|
5391
|
-
|
5392
|
-
@abc.abstractmethod
|
5393
|
-
def writable(self) -> bool:
|
5394
|
-
raise NotImplementedError
|
5395
|
-
|
5396
|
-
#
|
5397
5964
|
|
5398
|
-
|
5399
|
-
raise TypeError
|
5400
|
-
|
5401
|
-
def handle_write_event(self) -> None:
|
5402
|
-
raise TypeError
|
5403
|
-
|
5404
|
-
|
5405
|
-
class OutputDispatcher(Dispatcher, abc.ABC):
|
5965
|
+
class ProcessOutputDispatcher(ProcessDispatcher, abc.ABC):
|
5406
5966
|
@abc.abstractmethod
|
5407
5967
|
def remove_logs(self) -> None:
|
5408
5968
|
raise NotImplementedError
|
@@ -5412,7 +5972,7 @@ class OutputDispatcher(Dispatcher, abc.ABC):
|
|
5412
5972
|
raise NotImplementedError
|
5413
5973
|
|
5414
5974
|
|
5415
|
-
class
|
5975
|
+
class ProcessInputDispatcher(ProcessDispatcher, abc.ABC):
|
5416
5976
|
@abc.abstractmethod
|
5417
5977
|
def write(self, chars: ta.Union[bytes, str]) -> None:
|
5418
5978
|
raise NotImplementedError
|
@@ -5425,7 +5985,11 @@ class InputDispatcher(Dispatcher, abc.ABC):
|
|
5425
5985
|
##
|
5426
5986
|
|
5427
5987
|
|
5428
|
-
class Process(
|
5988
|
+
class Process(
|
5989
|
+
ConfigPriorityOrdered,
|
5990
|
+
HasDispatchers,
|
5991
|
+
abc.ABC,
|
5992
|
+
):
|
5429
5993
|
@property
|
5430
5994
|
@abc.abstractmethod
|
5431
5995
|
def name(self) -> str:
|
@@ -5448,11 +6012,6 @@ class Process(ConfigPriorityOrdered, abc.ABC):
|
|
5448
6012
|
|
5449
6013
|
#
|
5450
6014
|
|
5451
|
-
@property
|
5452
|
-
@abc.abstractmethod
|
5453
|
-
def context(self) -> ServerContext:
|
5454
|
-
raise NotImplementedError
|
5455
|
-
|
5456
6015
|
@abc.abstractmethod
|
5457
6016
|
def finish(self, sts: Rc) -> None:
|
5458
6017
|
raise NotImplementedError
|
@@ -5469,18 +6028,15 @@ class Process(ConfigPriorityOrdered, abc.ABC):
|
|
5469
6028
|
def transition(self) -> None:
|
5470
6029
|
raise NotImplementedError
|
5471
6030
|
|
6031
|
+
@property
|
5472
6032
|
@abc.abstractmethod
|
5473
|
-
def
|
6033
|
+
def state(self) -> ProcessState:
|
5474
6034
|
raise NotImplementedError
|
5475
6035
|
|
5476
6036
|
@abc.abstractmethod
|
5477
6037
|
def after_setuid(self) -> None:
|
5478
6038
|
raise NotImplementedError
|
5479
6039
|
|
5480
|
-
@abc.abstractmethod
|
5481
|
-
def get_dispatchers(self) -> 'Dispatchers':
|
5482
|
-
raise NotImplementedError
|
5483
|
-
|
5484
6040
|
|
5485
6041
|
##
|
5486
6042
|
|
@@ -5521,81 +6077,138 @@ class ProcessGroup(
|
|
5521
6077
|
|
5522
6078
|
|
5523
6079
|
########################################
|
5524
|
-
#
|
6080
|
+
# ../../../omlish/lite/fdio/corohttp.py
|
5525
6081
|
|
5526
6082
|
|
5527
|
-
class
|
6083
|
+
class CoroHttpServerConnectionFdIoHandler(SocketFdIoHandler):
|
5528
6084
|
def __init__(
|
5529
6085
|
self,
|
5530
|
-
|
5531
|
-
|
6086
|
+
addr: SocketAddress,
|
6087
|
+
sock: socket.socket,
|
6088
|
+
handler: HttpHandler,
|
5532
6089
|
*,
|
5533
|
-
|
6090
|
+
read_size: int = 0x10000,
|
6091
|
+
write_size: int = 0x10000,
|
5534
6092
|
) -> None:
|
5535
|
-
|
6093
|
+
check_state(not sock.getblocking())
|
5536
6094
|
|
5537
|
-
|
5538
|
-
self._poller = poller
|
5539
|
-
self._epoch = epoch
|
6095
|
+
super().__init__(addr, sock)
|
5540
6096
|
|
5541
|
-
self.
|
6097
|
+
self._handler = handler
|
6098
|
+
self._read_size = read_size
|
6099
|
+
self._write_size = write_size
|
5542
6100
|
|
5543
|
-
|
5544
|
-
|
5545
|
-
return self._config
|
6101
|
+
self._read_buf = ReadableListBuffer()
|
6102
|
+
self._write_buf: IncrementalWriteBuffer | None = None
|
5546
6103
|
|
5547
|
-
|
5548
|
-
|
5549
|
-
|
6104
|
+
self._coro_srv = CoroHttpServer(
|
6105
|
+
addr,
|
6106
|
+
handler=self._handler,
|
6107
|
+
)
|
6108
|
+
self._srv_coro: ta.Optional[ta.Generator[CoroHttpServer.Io, ta.Optional[bytes], None]] = self._coro_srv.coro_handle() # noqa
|
5550
6109
|
|
5551
|
-
|
5552
|
-
|
5553
|
-
return not self._epoch
|
6110
|
+
self._cur_io: CoroHttpServer.Io | None = None
|
6111
|
+
self._next_io()
|
5554
6112
|
|
5555
|
-
|
5556
|
-
def state(self) -> SupervisorState:
|
5557
|
-
return self._state
|
6113
|
+
#
|
5558
6114
|
|
5559
|
-
def
|
5560
|
-
|
6115
|
+
def _next_io(self) -> None: # noqa
|
6116
|
+
coro = check_not_none(self._srv_coro)
|
6117
|
+
|
6118
|
+
d: bytes | None = None
|
6119
|
+
o = self._cur_io
|
6120
|
+
while True:
|
6121
|
+
if o is None:
|
6122
|
+
try:
|
6123
|
+
if d is not None:
|
6124
|
+
o = coro.send(d)
|
6125
|
+
d = None
|
6126
|
+
else:
|
6127
|
+
o = next(coro)
|
6128
|
+
except StopIteration:
|
6129
|
+
self.close()
|
6130
|
+
o = None
|
6131
|
+
break
|
6132
|
+
|
6133
|
+
if isinstance(o, CoroHttpServer.AnyLogIo):
|
6134
|
+
print(o)
|
6135
|
+
o = None
|
6136
|
+
|
6137
|
+
elif isinstance(o, CoroHttpServer.ReadIo):
|
6138
|
+
if (d := self._read_buf.read(o.sz)) is None:
|
6139
|
+
break
|
6140
|
+
o = None
|
6141
|
+
|
6142
|
+
elif isinstance(o, CoroHttpServer.ReadLineIo):
|
6143
|
+
if (d := self._read_buf.read_until(b'\n')) is None:
|
6144
|
+
break
|
6145
|
+
o = None
|
6146
|
+
|
6147
|
+
elif isinstance(o, CoroHttpServer.WriteIo):
|
6148
|
+
check_none(self._write_buf)
|
6149
|
+
self._write_buf = IncrementalWriteBuffer(o.data, write_size=self._write_size)
|
6150
|
+
break
|
6151
|
+
|
6152
|
+
else:
|
6153
|
+
raise TypeError(o)
|
6154
|
+
|
6155
|
+
self._cur_io = o
|
6156
|
+
|
6157
|
+
#
|
6158
|
+
|
6159
|
+
def readable(self) -> bool:
|
6160
|
+
return True
|
6161
|
+
|
6162
|
+
def writable(self) -> bool:
|
6163
|
+
return self._write_buf is not None
|
5561
6164
|
|
5562
6165
|
#
|
5563
6166
|
|
5564
|
-
def
|
5565
|
-
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
5566
|
-
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
5567
|
-
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
5568
|
-
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
5569
|
-
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
5570
|
-
# lying around.
|
6167
|
+
def on_readable(self) -> None:
|
5571
6168
|
try:
|
5572
|
-
|
5573
|
-
except
|
5574
|
-
|
5575
|
-
|
5576
|
-
|
5577
|
-
|
5578
|
-
|
5579
|
-
|
5580
|
-
|
5581
|
-
|
5582
|
-
|
5583
|
-
|
5584
|
-
|
5585
|
-
|
5586
|
-
|
5587
|
-
|
5588
|
-
)
|
5589
|
-
|
6169
|
+
buf = check_not_none(self._sock).recv(self._read_size)
|
6170
|
+
except BlockingIOError:
|
6171
|
+
return
|
6172
|
+
except ConnectionResetError:
|
6173
|
+
self.close()
|
6174
|
+
return
|
6175
|
+
if not buf:
|
6176
|
+
self.close()
|
6177
|
+
return
|
6178
|
+
|
6179
|
+
self._read_buf.feed(buf)
|
6180
|
+
|
6181
|
+
if isinstance(self._cur_io, CoroHttpServer.AnyReadIo):
|
6182
|
+
self._next_io()
|
6183
|
+
|
6184
|
+
def on_writable(self) -> None:
|
6185
|
+
check_isinstance(self._cur_io, CoroHttpServer.WriteIo)
|
6186
|
+
wb = check_not_none(self._write_buf)
|
6187
|
+
while wb.rem > 0:
|
6188
|
+
def send(d: bytes) -> int:
|
6189
|
+
try:
|
6190
|
+
return check_not_none(self._sock).send(d)
|
6191
|
+
except ConnectionResetError:
|
6192
|
+
self.close()
|
6193
|
+
return 0
|
6194
|
+
except BlockingIOError:
|
6195
|
+
return 0
|
6196
|
+
if not wb.write(send):
|
6197
|
+
break
|
6198
|
+
|
6199
|
+
if wb.rem < 1:
|
6200
|
+
self._write_buf = None
|
6201
|
+
self._cur_io = None
|
6202
|
+
self._next_io()
|
5590
6203
|
|
5591
6204
|
|
5592
6205
|
########################################
|
5593
6206
|
# ../dispatchers.py
|
5594
6207
|
|
5595
6208
|
|
5596
|
-
class Dispatchers(KeyedCollection[Fd,
|
5597
|
-
def _key(self, v:
|
5598
|
-
return v.fd
|
6209
|
+
class Dispatchers(KeyedCollection[Fd, FdIoHandler]):
|
6210
|
+
def _key(self, v: FdIoHandler) -> Fd:
|
6211
|
+
return Fd(v.fd())
|
5599
6212
|
|
5600
6213
|
#
|
5601
6214
|
|
@@ -5604,20 +6217,20 @@ class Dispatchers(KeyedCollection[Fd, Dispatcher]):
|
|
5604
6217
|
# note that we *must* call readable() for every dispatcher, as it may have side effects for a given
|
5605
6218
|
# dispatcher (eg. call handle_listener_state_change for event listener processes)
|
5606
6219
|
if d.readable():
|
5607
|
-
d.
|
6220
|
+
d.on_readable()
|
5608
6221
|
if d.writable():
|
5609
|
-
d.
|
6222
|
+
d.on_writable()
|
5610
6223
|
|
5611
6224
|
#
|
5612
6225
|
|
5613
6226
|
def remove_logs(self) -> None:
|
5614
6227
|
for d in self:
|
5615
|
-
if isinstance(d,
|
6228
|
+
if isinstance(d, ProcessOutputDispatcher):
|
5616
6229
|
d.remove_logs()
|
5617
6230
|
|
5618
6231
|
def reopen_logs(self) -> None:
|
5619
6232
|
for d in self:
|
5620
|
-
if isinstance(d,
|
6233
|
+
if isinstance(d, ProcessOutputDispatcher):
|
5621
6234
|
d.reopen_logs()
|
5622
6235
|
|
5623
6236
|
|
@@ -5625,7 +6238,7 @@ class Dispatchers(KeyedCollection[Fd, Dispatcher]):
|
|
5625
6238
|
# ../dispatchersimpl.py
|
5626
6239
|
|
5627
6240
|
|
5628
|
-
class
|
6241
|
+
class BaseProcessDispatcherImpl(ProcessDispatcher, abc.ABC):
|
5629
6242
|
def __init__(
|
5630
6243
|
self,
|
5631
6244
|
process: Process,
|
@@ -5633,6 +6246,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5633
6246
|
fd: Fd,
|
5634
6247
|
*,
|
5635
6248
|
event_callbacks: EventCallbacks,
|
6249
|
+
server_config: ServerConfig,
|
5636
6250
|
) -> None:
|
5637
6251
|
super().__init__()
|
5638
6252
|
|
@@ -5640,6 +6254,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5640
6254
|
self._channel = channel # 'stderr' or 'stdout'
|
5641
6255
|
self._fd = fd
|
5642
6256
|
self._event_callbacks = event_callbacks
|
6257
|
+
self._server_config = server_config
|
5643
6258
|
|
5644
6259
|
self._closed = False # True if close() has been called
|
5645
6260
|
|
@@ -5658,7 +6273,6 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5658
6273
|
def channel(self) -> str:
|
5659
6274
|
return self._channel
|
5660
6275
|
|
5661
|
-
@property
|
5662
6276
|
def fd(self) -> Fd:
|
5663
6277
|
return self._fd
|
5664
6278
|
|
@@ -5673,14 +6287,14 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5673
6287
|
log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
|
5674
6288
|
self._closed = True
|
5675
6289
|
|
5676
|
-
def
|
6290
|
+
def on_error(self, exc: ta.Optional[BaseException] = None) -> None:
|
5677
6291
|
nil, t, v, tbinfo = compact_traceback()
|
5678
6292
|
|
5679
6293
|
log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
|
5680
6294
|
self.close()
|
5681
6295
|
|
5682
6296
|
|
5683
|
-
class
|
6297
|
+
class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispatcher):
|
5684
6298
|
"""
|
5685
6299
|
Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
|
5686
6300
|
|
@@ -5696,12 +6310,14 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5696
6310
|
fd: Fd,
|
5697
6311
|
*,
|
5698
6312
|
event_callbacks: EventCallbacks,
|
6313
|
+
server_config: ServerConfig,
|
5699
6314
|
) -> None:
|
5700
6315
|
super().__init__(
|
5701
6316
|
process,
|
5702
6317
|
event_type.channel,
|
5703
6318
|
fd,
|
5704
6319
|
event_callbacks=event_callbacks,
|
6320
|
+
server_config=server_config,
|
5705
6321
|
)
|
5706
6322
|
|
5707
6323
|
self._event_type = event_type
|
@@ -5725,11 +6341,10 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5725
6341
|
|
5726
6342
|
self._main_log_level = logging.DEBUG
|
5727
6343
|
|
5728
|
-
self._log_to_main_log =
|
6344
|
+
self._log_to_main_log = self._server_config.loglevel <= self._main_log_level
|
5729
6345
|
|
5730
|
-
|
5731
|
-
self.
|
5732
|
-
self._stderr_events_enabled = config.stderr.events_enabled
|
6346
|
+
self._stdout_events_enabled = self._process.config.stdout.events_enabled
|
6347
|
+
self._stderr_events_enabled = self._process.config.stderr.events_enabled
|
5733
6348
|
|
5734
6349
|
_child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
|
5735
6350
|
_normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
|
@@ -5800,7 +6415,7 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5800
6415
|
if not data:
|
5801
6416
|
return
|
5802
6417
|
|
5803
|
-
if self.
|
6418
|
+
if self._server_config.strip_ansi:
|
5804
6419
|
data = strip_escapes(as_bytes(data))
|
5805
6420
|
|
5806
6421
|
if self._child_log:
|
@@ -5888,7 +6503,7 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5888
6503
|
return False
|
5889
6504
|
return True
|
5890
6505
|
|
5891
|
-
def
|
6506
|
+
def on_readable(self) -> None:
|
5892
6507
|
data = read_fd(self._fd)
|
5893
6508
|
self._output_buffer += data
|
5894
6509
|
self.record_output()
|
@@ -5898,7 +6513,7 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5898
6513
|
self.close()
|
5899
6514
|
|
5900
6515
|
|
5901
|
-
class
|
6516
|
+
class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatcher):
|
5902
6517
|
def __init__(
|
5903
6518
|
self,
|
5904
6519
|
process: Process,
|
@@ -5906,12 +6521,14 @@ class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
|
|
5906
6521
|
fd: Fd,
|
5907
6522
|
*,
|
5908
6523
|
event_callbacks: EventCallbacks,
|
6524
|
+
server_config: ServerConfig,
|
5909
6525
|
) -> None:
|
5910
6526
|
super().__init__(
|
5911
6527
|
process,
|
5912
6528
|
channel,
|
5913
6529
|
fd,
|
5914
6530
|
event_callbacks=event_callbacks,
|
6531
|
+
server_config=server_config,
|
5915
6532
|
)
|
5916
6533
|
|
5917
6534
|
self._input_buffer = b''
|
@@ -5924,15 +6541,12 @@ class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
|
|
5924
6541
|
return True
|
5925
6542
|
return False
|
5926
6543
|
|
5927
|
-
def readable(self) -> bool:
|
5928
|
-
return False
|
5929
|
-
|
5930
6544
|
def flush(self) -> None:
|
5931
6545
|
# other code depends on this raising EPIPE if the pipe is closed
|
5932
6546
|
sent = os.write(self._fd, as_bytes(self._input_buffer))
|
5933
6547
|
self._input_buffer = self._input_buffer[sent:]
|
5934
6548
|
|
5935
|
-
def
|
6549
|
+
def on_writable(self) -> None:
|
5936
6550
|
if self._input_buffer:
|
5937
6551
|
try:
|
5938
6552
|
self.flush()
|
@@ -5944,79 +6558,6 @@ class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
|
|
5944
6558
|
raise
|
5945
6559
|
|
5946
6560
|
|
5947
|
-
########################################
|
5948
|
-
# ../groups.py
|
5949
|
-
|
5950
|
-
|
5951
|
-
class ProcessGroupManager(KeyedCollectionAccessors[str, ProcessGroup]):
|
5952
|
-
def __init__(
|
5953
|
-
self,
|
5954
|
-
*,
|
5955
|
-
event_callbacks: EventCallbacks,
|
5956
|
-
) -> None:
|
5957
|
-
super().__init__()
|
5958
|
-
|
5959
|
-
self._event_callbacks = event_callbacks
|
5960
|
-
|
5961
|
-
self._by_name: ta.Dict[str, ProcessGroup] = {}
|
5962
|
-
|
5963
|
-
@property
|
5964
|
-
def _by_key(self) -> ta.Mapping[str, ProcessGroup]:
|
5965
|
-
return self._by_name
|
5966
|
-
|
5967
|
-
#
|
5968
|
-
|
5969
|
-
def all_processes(self) -> ta.Iterator[Process]:
|
5970
|
-
for g in self:
|
5971
|
-
yield from g
|
5972
|
-
|
5973
|
-
#
|
5974
|
-
|
5975
|
-
def add(self, group: ProcessGroup) -> None:
|
5976
|
-
if (name := group.name) in self._by_name:
|
5977
|
-
raise KeyError(f'Process group already exists: {name}')
|
5978
|
-
|
5979
|
-
self._by_name[name] = group
|
5980
|
-
|
5981
|
-
self._event_callbacks.notify(ProcessGroupAddedEvent(name))
|
5982
|
-
|
5983
|
-
def remove(self, name: str) -> None:
|
5984
|
-
group = self._by_name[name]
|
5985
|
-
|
5986
|
-
group.before_remove()
|
5987
|
-
|
5988
|
-
del self._by_name[name]
|
5989
|
-
|
5990
|
-
self._event_callbacks.notify(ProcessGroupRemovedEvent(name))
|
5991
|
-
|
5992
|
-
def clear(self) -> None:
|
5993
|
-
# FIXME: events?
|
5994
|
-
self._by_name.clear()
|
5995
|
-
|
5996
|
-
#
|
5997
|
-
|
5998
|
-
class Diff(ta.NamedTuple):
|
5999
|
-
added: ta.List[ProcessGroupConfig]
|
6000
|
-
changed: ta.List[ProcessGroupConfig]
|
6001
|
-
removed: ta.List[ProcessGroupConfig]
|
6002
|
-
|
6003
|
-
def diff(self, new: ta.Sequence[ProcessGroupConfig]) -> Diff:
|
6004
|
-
cur = [group.config for group in self]
|
6005
|
-
|
6006
|
-
cur_by_name = {cfg.name: cfg for cfg in cur}
|
6007
|
-
new_by_name = {cfg.name: cfg for cfg in new}
|
6008
|
-
|
6009
|
-
added = [cand for cand in new if cand.name not in cur_by_name]
|
6010
|
-
removed = [cand for cand in cur if cand.name not in new_by_name]
|
6011
|
-
changed = [cand for cand in new if cand != cur_by_name.get(cand.name, cand)]
|
6012
|
-
|
6013
|
-
return ProcessGroupManager.Diff(
|
6014
|
-
added,
|
6015
|
-
changed,
|
6016
|
-
removed,
|
6017
|
-
)
|
6018
|
-
|
6019
|
-
|
6020
6561
|
########################################
|
6021
6562
|
# ../groupsimpl.py
|
6022
6563
|
|
@@ -6071,7 +6612,7 @@ class ProcessGroupImpl(ProcessGroup):
|
|
6071
6612
|
#
|
6072
6613
|
|
6073
6614
|
def get_unstopped_processes(self) -> ta.List[Process]:
|
6074
|
-
return [x for x in self if not x.
|
6615
|
+
return [x for x in self if not x.state.stopped]
|
6075
6616
|
|
6076
6617
|
def stop_all(self) -> None:
|
6077
6618
|
processes = list(self._by_name.values())
|
@@ -6079,7 +6620,7 @@ class ProcessGroupImpl(ProcessGroup):
|
|
6079
6620
|
processes.reverse() # stop in desc priority order
|
6080
6621
|
|
6081
6622
|
for proc in processes:
|
6082
|
-
state = proc.
|
6623
|
+
state = proc.state
|
6083
6624
|
if state == ProcessState.RUNNING:
|
6084
6625
|
# RUNNING -> STOPPING
|
6085
6626
|
proc.stop()
|
@@ -6362,304 +6903,151 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6362
6903
|
|
6363
6904
|
|
6364
6905
|
########################################
|
6365
|
-
# ../
|
6366
|
-
|
6906
|
+
# ../groups.py
|
6367
6907
|
|
6368
|
-
@dc.dataclass(frozen=True)
|
6369
|
-
class SpawnedProcess:
|
6370
|
-
pid: Pid
|
6371
|
-
pipes: ProcessPipes
|
6372
|
-
dispatchers: Dispatchers
|
6373
6908
|
|
6909
|
+
class ProcessGroupManager(
|
6910
|
+
KeyedCollectionAccessors[str, ProcessGroup],
|
6911
|
+
HasDispatchers,
|
6912
|
+
):
|
6913
|
+
def __init__(
|
6914
|
+
self,
|
6915
|
+
*,
|
6916
|
+
event_callbacks: EventCallbacks,
|
6917
|
+
) -> None:
|
6918
|
+
super().__init__()
|
6374
6919
|
|
6375
|
-
|
6376
|
-
pass
|
6920
|
+
self._event_callbacks = event_callbacks
|
6377
6921
|
|
6922
|
+
self._by_name: ta.Dict[str, ProcessGroup] = {}
|
6378
6923
|
|
6379
|
-
class ProcessSpawning:
|
6380
6924
|
@property
|
6381
|
-
|
6382
|
-
|
6383
|
-
raise NotImplementedError
|
6925
|
+
def _by_key(self) -> ta.Mapping[str, ProcessGroup]:
|
6926
|
+
return self._by_name
|
6384
6927
|
|
6385
6928
|
#
|
6386
6929
|
|
6387
|
-
|
6388
|
-
|
6389
|
-
|
6930
|
+
def all_processes(self) -> ta.Iterator[Process]:
|
6931
|
+
for g in self:
|
6932
|
+
yield from g
|
6390
6933
|
|
6934
|
+
#
|
6391
6935
|
|
6392
|
-
|
6393
|
-
|
6936
|
+
def get_dispatchers(self) -> Dispatchers:
|
6937
|
+
return Dispatchers(
|
6938
|
+
d
|
6939
|
+
for g in self
|
6940
|
+
for p in g
|
6941
|
+
for d in p.get_dispatchers()
|
6942
|
+
)
|
6394
6943
|
|
6944
|
+
#
|
6395
6945
|
|
6396
|
-
|
6946
|
+
def add(self, group: ProcessGroup) -> None:
|
6947
|
+
if (name := group.name) in self._by_name:
|
6948
|
+
raise KeyError(f'Process group already exists: {name}')
|
6397
6949
|
|
6950
|
+
self._by_name[name] = group
|
6398
6951
|
|
6399
|
-
|
6400
|
-
pass
|
6952
|
+
self._event_callbacks.notify(ProcessGroupAddedEvent(name))
|
6401
6953
|
|
6954
|
+
def remove(self, name: str) -> None:
|
6955
|
+
group = self._by_name[name]
|
6402
6956
|
|
6403
|
-
|
6404
|
-
return int(when - (when % period))
|
6957
|
+
group.before_remove()
|
6405
6958
|
|
6959
|
+
del self._by_name[name]
|
6406
6960
|
|
6407
|
-
|
6961
|
+
self._event_callbacks.notify(ProcessGroupRemovedEvent(name))
|
6408
6962
|
|
6963
|
+
def clear(self) -> None:
|
6964
|
+
# FIXME: events?
|
6965
|
+
self._by_name.clear()
|
6409
6966
|
|
6410
|
-
|
6411
|
-
def __init__(
|
6412
|
-
self,
|
6413
|
-
*,
|
6414
|
-
context: ServerContextImpl,
|
6415
|
-
signal_receiver: SignalReceiver,
|
6416
|
-
process_groups: ProcessGroupManager,
|
6417
|
-
) -> None:
|
6418
|
-
super().__init__()
|
6967
|
+
#
|
6419
6968
|
|
6420
|
-
|
6421
|
-
|
6422
|
-
|
6423
|
-
|
6424
|
-
def set_signals(self) -> None:
|
6425
|
-
self._signal_receiver.install(
|
6426
|
-
signal.SIGTERM,
|
6427
|
-
signal.SIGINT,
|
6428
|
-
signal.SIGQUIT,
|
6429
|
-
signal.SIGHUP,
|
6430
|
-
signal.SIGCHLD,
|
6431
|
-
signal.SIGUSR2,
|
6432
|
-
)
|
6433
|
-
|
6434
|
-
def handle_signals(self) -> None:
|
6435
|
-
sig = self._signal_receiver.get_signal()
|
6436
|
-
if not sig:
|
6437
|
-
return
|
6969
|
+
class Diff(ta.NamedTuple):
|
6970
|
+
added: ta.List[ProcessGroupConfig]
|
6971
|
+
changed: ta.List[ProcessGroupConfig]
|
6972
|
+
removed: ta.List[ProcessGroupConfig]
|
6438
6973
|
|
6439
|
-
|
6440
|
-
|
6441
|
-
self._context.set_state(SupervisorState.SHUTDOWN)
|
6974
|
+
def diff(self, new: ta.Sequence[ProcessGroupConfig]) -> Diff:
|
6975
|
+
cur = [group.config for group in self]
|
6442
6976
|
|
6443
|
-
|
6444
|
-
|
6445
|
-
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
6446
|
-
else:
|
6447
|
-
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
6448
|
-
self._context.set_state(SupervisorState.RESTARTING)
|
6977
|
+
cur_by_name = {cfg.name: cfg for cfg in cur}
|
6978
|
+
new_by_name = {cfg.name: cfg for cfg in new}
|
6449
6979
|
|
6450
|
-
|
6451
|
-
|
6980
|
+
added = [cand for cand in new if cand.name not in cur_by_name]
|
6981
|
+
removed = [cand for cand in cur if cand.name not in new_by_name]
|
6982
|
+
changed = [cand for cand in new if cand != cur_by_name.get(cand.name, cand)]
|
6452
6983
|
|
6453
|
-
|
6454
|
-
|
6984
|
+
return ProcessGroupManager.Diff(
|
6985
|
+
added,
|
6986
|
+
changed,
|
6987
|
+
removed,
|
6988
|
+
)
|
6455
6989
|
|
6456
|
-
for p in self._process_groups.all_processes():
|
6457
|
-
for d in p.get_dispatchers():
|
6458
|
-
if isinstance(d, OutputDispatcher):
|
6459
|
-
d.reopen_logs()
|
6460
6990
|
|
6461
|
-
|
6462
|
-
|
6991
|
+
########################################
|
6992
|
+
# ../io.py
|
6463
6993
|
|
6464
6994
|
|
6465
6995
|
##
|
6466
6996
|
|
6467
6997
|
|
6468
|
-
|
6469
|
-
pass
|
6998
|
+
HasDispatchersList = ta.NewType('HasDispatchersList', ta.Sequence[HasDispatchers])
|
6470
6999
|
|
6471
7000
|
|
6472
|
-
class
|
7001
|
+
class IoManager(HasDispatchers):
|
6473
7002
|
def __init__(
|
6474
7003
|
self,
|
6475
7004
|
*,
|
6476
|
-
|
6477
|
-
|
6478
|
-
process_groups: ProcessGroupManager,
|
6479
|
-
signal_handler: SignalHandler,
|
6480
|
-
event_callbacks: EventCallbacks,
|
6481
|
-
process_group_factory: ProcessGroupFactory,
|
6482
|
-
pid_history: PidHistory,
|
6483
|
-
setup: SupervisorSetup,
|
7005
|
+
poller: FdIoPoller,
|
7006
|
+
has_dispatchers_list: HasDispatchersList,
|
6484
7007
|
) -> None:
|
6485
7008
|
super().__init__()
|
6486
7009
|
|
6487
|
-
self._context = context
|
6488
7010
|
self._poller = poller
|
6489
|
-
self.
|
6490
|
-
self._signal_handler = signal_handler
|
6491
|
-
self._event_callbacks = event_callbacks
|
6492
|
-
self._process_group_factory = process_group_factory
|
6493
|
-
self._pid_history = pid_history
|
6494
|
-
self._setup = setup
|
6495
|
-
|
6496
|
-
self._ticks: ta.Dict[int, float] = {}
|
6497
|
-
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
6498
|
-
self._stopping = False # set after we detect that we are handling a stop request
|
6499
|
-
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
6500
|
-
|
6501
|
-
#
|
6502
|
-
|
6503
|
-
@property
|
6504
|
-
def context(self) -> ServerContextImpl:
|
6505
|
-
return self._context
|
6506
|
-
|
6507
|
-
def get_state(self) -> SupervisorState:
|
6508
|
-
return self._context.state
|
6509
|
-
|
6510
|
-
#
|
6511
|
-
|
6512
|
-
def add_process_group(self, config: ProcessGroupConfig) -> bool:
|
6513
|
-
if self._process_groups.get(config.name) is not None:
|
6514
|
-
return False
|
6515
|
-
|
6516
|
-
group = check_isinstance(self._process_group_factory(config), ProcessGroup)
|
6517
|
-
for process in group:
|
6518
|
-
process.after_setuid()
|
6519
|
-
|
6520
|
-
self._process_groups.add(group)
|
6521
|
-
|
6522
|
-
return True
|
6523
|
-
|
6524
|
-
def remove_process_group(self, name: str) -> bool:
|
6525
|
-
if self._process_groups[name].get_unstopped_processes():
|
6526
|
-
return False
|
6527
|
-
|
6528
|
-
self._process_groups.remove(name)
|
6529
|
-
|
6530
|
-
return True
|
6531
|
-
|
6532
|
-
#
|
6533
|
-
|
6534
|
-
def shutdown_report(self) -> ta.List[Process]:
|
6535
|
-
unstopped: ta.List[Process] = []
|
6536
|
-
|
6537
|
-
for group in self._process_groups:
|
6538
|
-
unstopped.extend(group.get_unstopped_processes())
|
6539
|
-
|
6540
|
-
if unstopped:
|
6541
|
-
# throttle 'waiting for x to die' reports
|
6542
|
-
now = time.time()
|
6543
|
-
if now > (self._last_shutdown_report + 3): # every 3 secs
|
6544
|
-
names = [p.config.name for p in unstopped]
|
6545
|
-
namestr = ', '.join(names)
|
6546
|
-
log.info('waiting for %s to die', namestr)
|
6547
|
-
self._last_shutdown_report = now
|
6548
|
-
for proc in unstopped:
|
6549
|
-
log.debug('%s state: %s', proc.config.name, proc.get_state().name)
|
6550
|
-
|
6551
|
-
return unstopped
|
6552
|
-
|
6553
|
-
#
|
6554
|
-
|
6555
|
-
def main(self, **kwargs: ta.Any) -> None:
|
6556
|
-
self._setup.setup()
|
6557
|
-
try:
|
6558
|
-
self.run(**kwargs)
|
6559
|
-
finally:
|
6560
|
-
self._setup.cleanup()
|
6561
|
-
|
6562
|
-
def run(
|
6563
|
-
self,
|
6564
|
-
*,
|
6565
|
-
callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
|
6566
|
-
) -> None:
|
6567
|
-
self._process_groups.clear()
|
6568
|
-
self._stop_groups = None # clear
|
6569
|
-
|
6570
|
-
self._event_callbacks.clear()
|
6571
|
-
|
6572
|
-
try:
|
6573
|
-
for config in self._context.config.groups or []:
|
6574
|
-
self.add_process_group(config)
|
6575
|
-
|
6576
|
-
self._signal_handler.set_signals()
|
6577
|
-
|
6578
|
-
self._event_callbacks.notify(SupervisorRunningEvent())
|
6579
|
-
|
6580
|
-
while True:
|
6581
|
-
if callback is not None and not callback(self):
|
6582
|
-
break
|
6583
|
-
|
6584
|
-
self._run_once()
|
6585
|
-
|
6586
|
-
finally:
|
6587
|
-
self._poller.close()
|
6588
|
-
|
6589
|
-
#
|
6590
|
-
|
6591
|
-
def _run_once(self) -> None:
|
6592
|
-
self._poll()
|
6593
|
-
self._reap()
|
6594
|
-
self._signal_handler.handle_signals()
|
6595
|
-
self._tick()
|
6596
|
-
|
6597
|
-
if self._context.state < SupervisorState.RUNNING:
|
6598
|
-
self._ordered_stop_groups_phase_2()
|
6599
|
-
|
6600
|
-
def _ordered_stop_groups_phase_1(self) -> None:
|
6601
|
-
if self._stop_groups:
|
6602
|
-
# stop the last group (the one with the "highest" priority)
|
6603
|
-
self._stop_groups[-1].stop_all()
|
6604
|
-
|
6605
|
-
def _ordered_stop_groups_phase_2(self) -> None:
|
6606
|
-
# after phase 1 we've transitioned and reaped, let's see if we can remove the group we stopped from the
|
6607
|
-
# stop_groups queue.
|
6608
|
-
if self._stop_groups:
|
6609
|
-
# pop the last group (the one with the "highest" priority)
|
6610
|
-
group = self._stop_groups.pop()
|
6611
|
-
if group.get_unstopped_processes():
|
6612
|
-
# if any processes in the group aren't yet in a stopped state, we're not yet done shutting this group
|
6613
|
-
# down, so push it back on to the end of the stop group queue
|
6614
|
-
self._stop_groups.append(group)
|
7011
|
+
self._has_dispatchers_list = has_dispatchers_list
|
6615
7012
|
|
6616
7013
|
def get_dispatchers(self) -> Dispatchers:
|
6617
7014
|
return Dispatchers(
|
6618
7015
|
d
|
6619
|
-
for
|
6620
|
-
for d in
|
7016
|
+
for hd in self._has_dispatchers_list
|
7017
|
+
for d in hd.get_dispatchers()
|
6621
7018
|
)
|
6622
7019
|
|
6623
|
-
def
|
7020
|
+
def poll(self) -> None:
|
6624
7021
|
dispatchers = self.get_dispatchers()
|
6625
7022
|
|
6626
|
-
|
6627
|
-
|
6628
|
-
|
6629
|
-
|
6630
|
-
if not self._stopping:
|
6631
|
-
# first time, set the stopping flag, do a notification and set stop_groups
|
6632
|
-
self._stopping = True
|
6633
|
-
self._stop_groups = sorted_groups[:]
|
6634
|
-
self._event_callbacks.notify(SupervisorStoppingEvent())
|
6635
|
-
|
6636
|
-
self._ordered_stop_groups_phase_1()
|
6637
|
-
|
6638
|
-
if not self.shutdown_report():
|
6639
|
-
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
6640
|
-
raise ExitNow
|
6641
|
-
|
6642
|
-
for fd, dispatcher in dispatchers.items():
|
6643
|
-
if dispatcher.readable():
|
6644
|
-
self._poller.register_readable(fd)
|
6645
|
-
if dispatcher.writable():
|
6646
|
-
self._poller.register_writable(fd)
|
7023
|
+
self._poller.update(
|
7024
|
+
{fd for fd, d in dispatchers.items() if d.readable()},
|
7025
|
+
{fd for fd, d in dispatchers.items() if d.writable()},
|
7026
|
+
)
|
6647
7027
|
|
6648
7028
|
timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
|
6649
|
-
|
6650
|
-
|
6651
|
-
|
7029
|
+
log.info(f'Polling: {timeout=}') # noqa
|
7030
|
+
polled = self._poller.poll(timeout)
|
7031
|
+
log.info(f'Polled: {polled=}') # noqa
|
7032
|
+
if polled.msg is not None:
|
7033
|
+
log.error(polled.msg)
|
7034
|
+
if polled.exc is not None:
|
7035
|
+
log.error('Poll exception: %r', polled.exc)
|
7036
|
+
|
7037
|
+
for r in polled.r:
|
7038
|
+
fd = Fd(r)
|
6652
7039
|
if fd in dispatchers:
|
7040
|
+
dispatcher = dispatchers[fd]
|
6653
7041
|
try:
|
6654
|
-
dispatcher = dispatchers[fd]
|
6655
7042
|
log.debug('read event caused by %r', dispatcher)
|
6656
|
-
dispatcher.
|
7043
|
+
dispatcher.on_readable()
|
6657
7044
|
if not dispatcher.readable():
|
6658
7045
|
self._poller.unregister_readable(fd)
|
6659
7046
|
except ExitNow:
|
6660
7047
|
raise
|
6661
|
-
except Exception: # noqa
|
6662
|
-
|
7048
|
+
except Exception as exc: # noqa
|
7049
|
+
log.exception('Error in dispatcher: %r', dispatcher)
|
7050
|
+
dispatcher.on_error(exc)
|
6663
7051
|
else:
|
6664
7052
|
# if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
|
6665
7053
|
# time, which may cause 100% cpu usage
|
@@ -6669,18 +7057,20 @@ class Supervisor:
|
|
6669
7057
|
except Exception: # noqa
|
6670
7058
|
pass
|
6671
7059
|
|
6672
|
-
for
|
7060
|
+
for w in polled.w:
|
7061
|
+
fd = Fd(w)
|
6673
7062
|
if fd in dispatchers:
|
7063
|
+
dispatcher = dispatchers[fd]
|
6674
7064
|
try:
|
6675
|
-
dispatcher = dispatchers[fd]
|
6676
7065
|
log.debug('write event caused by %r', dispatcher)
|
6677
|
-
dispatcher.
|
7066
|
+
dispatcher.on_writable()
|
6678
7067
|
if not dispatcher.writable():
|
6679
7068
|
self._poller.unregister_writable(fd)
|
6680
7069
|
except ExitNow:
|
6681
7070
|
raise
|
6682
|
-
except Exception: # noqa
|
6683
|
-
|
7071
|
+
except Exception as exc: # noqa
|
7072
|
+
log.exception('Error in dispatcher: %r', dispatcher)
|
7073
|
+
dispatcher.on_error(exc)
|
6684
7074
|
else:
|
6685
7075
|
log.debug('unexpected write event from fd %r', fd)
|
6686
7076
|
try:
|
@@ -6688,49 +7078,150 @@ class Supervisor:
|
|
6688
7078
|
except Exception: # noqa
|
6689
7079
|
pass
|
6690
7080
|
|
6691
|
-
for group in sorted_groups:
|
6692
|
-
for process in group:
|
6693
|
-
process.transition()
|
6694
7081
|
|
6695
|
-
|
6696
|
-
|
6697
|
-
return
|
7082
|
+
########################################
|
7083
|
+
# ../spawning.py
|
6698
7084
|
|
6699
|
-
pid, sts = self._context.waitpid()
|
6700
|
-
if not pid:
|
6701
|
-
return
|
6702
7085
|
|
6703
|
-
|
6704
|
-
|
6705
|
-
|
6706
|
-
|
6707
|
-
|
6708
|
-
process.finish(check_not_none(sts))
|
6709
|
-
del self._pid_history[pid]
|
7086
|
+
@dc.dataclass(frozen=True)
|
7087
|
+
class SpawnedProcess:
|
7088
|
+
pid: Pid
|
7089
|
+
pipes: ProcessPipes
|
7090
|
+
dispatchers: Dispatchers
|
6710
7091
|
|
6711
|
-
if not once:
|
6712
|
-
# keep reaping until no more kids to reap, but don't recurse infinitely
|
6713
|
-
self._reap(once=False, depth=depth + 1)
|
6714
7092
|
|
6715
|
-
|
6716
|
-
|
7093
|
+
class ProcessSpawnError(RuntimeError):
|
7094
|
+
pass
|
6717
7095
|
|
6718
|
-
if now is None:
|
6719
|
-
# now won't be None in unit tests
|
6720
|
-
now = time.time()
|
6721
7096
|
|
6722
|
-
|
6723
|
-
|
7097
|
+
class ProcessSpawning:
|
7098
|
+
@property
|
7099
|
+
@abc.abstractmethod
|
7100
|
+
def process(self) -> Process:
|
7101
|
+
raise NotImplementedError
|
6724
7102
|
|
6725
|
-
|
6726
|
-
|
6727
|
-
|
6728
|
-
|
7103
|
+
#
|
7104
|
+
|
7105
|
+
@abc.abstractmethod
|
7106
|
+
def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
|
7107
|
+
raise NotImplementedError
|
7108
|
+
|
7109
|
+
|
7110
|
+
########################################
|
7111
|
+
# ../http.py
|
7112
|
+
|
7113
|
+
|
7114
|
+
##
|
7115
|
+
|
7116
|
+
|
7117
|
+
class SocketServerFdIoHandler(SocketFdIoHandler):
|
7118
|
+
def __init__(
|
7119
|
+
self,
|
7120
|
+
addr: SocketAddress,
|
7121
|
+
on_connect: ta.Callable[[socket.socket, SocketAddress], None],
|
7122
|
+
) -> None:
|
7123
|
+
sock = socket.create_server(addr)
|
7124
|
+
sock.setblocking(False)
|
7125
|
+
|
7126
|
+
super().__init__(addr, sock)
|
7127
|
+
|
7128
|
+
self._on_connect = on_connect
|
7129
|
+
|
7130
|
+
sock.listen(1)
|
7131
|
+
|
7132
|
+
def readable(self) -> bool:
|
7133
|
+
return True
|
7134
|
+
|
7135
|
+
def on_readable(self) -> None:
|
7136
|
+
cli_sock, cli_addr = check_not_none(self._sock).accept()
|
7137
|
+
cli_sock.setblocking(False)
|
7138
|
+
|
7139
|
+
self._on_connect(cli_sock, cli_addr)
|
6729
7140
|
|
6730
|
-
|
6731
|
-
|
6732
|
-
|
6733
|
-
|
7141
|
+
|
7142
|
+
##
|
7143
|
+
|
7144
|
+
|
7145
|
+
class HttpServer(HasDispatchers):
|
7146
|
+
class Address(ta.NamedTuple):
|
7147
|
+
a: SocketAddress
|
7148
|
+
|
7149
|
+
class Handler(ta.NamedTuple):
|
7150
|
+
h: HttpHandler
|
7151
|
+
|
7152
|
+
def __init__(
|
7153
|
+
self,
|
7154
|
+
handler: Handler,
|
7155
|
+
addr: Address = Address(('localhost', 8000)),
|
7156
|
+
) -> None:
|
7157
|
+
super().__init__()
|
7158
|
+
|
7159
|
+
self._handler = handler.h
|
7160
|
+
self._addr = addr.a
|
7161
|
+
|
7162
|
+
self._server = SocketServerFdIoHandler(self._addr, self._on_connect)
|
7163
|
+
|
7164
|
+
self._conns: ta.List[CoroHttpServerConnectionFdIoHandler] = []
|
7165
|
+
|
7166
|
+
def get_dispatchers(self) -> Dispatchers:
|
7167
|
+
l = []
|
7168
|
+
for c in self._conns:
|
7169
|
+
if not c.closed:
|
7170
|
+
l.append(c)
|
7171
|
+
self._conns = l
|
7172
|
+
return Dispatchers([
|
7173
|
+
self._server,
|
7174
|
+
*l,
|
7175
|
+
])
|
7176
|
+
|
7177
|
+
def _on_connect(self, sock: socket.socket, addr: SocketAddress) -> None:
|
7178
|
+
conn = CoroHttpServerConnectionFdIoHandler(
|
7179
|
+
addr,
|
7180
|
+
sock,
|
7181
|
+
self._handler,
|
7182
|
+
)
|
7183
|
+
|
7184
|
+
self._conns.append(conn)
|
7185
|
+
|
7186
|
+
|
7187
|
+
##
|
7188
|
+
|
7189
|
+
|
7190
|
+
class SupervisorHttpHandler:
|
7191
|
+
def __init__(
|
7192
|
+
self,
|
7193
|
+
*,
|
7194
|
+
groups: ProcessGroupManager,
|
7195
|
+
) -> None:
|
7196
|
+
super().__init__()
|
7197
|
+
|
7198
|
+
self._groups = groups
|
7199
|
+
|
7200
|
+
def handle(self, req: HttpHandlerRequest) -> HttpHandlerResponse:
|
7201
|
+
dct = {
|
7202
|
+
'method': req.method,
|
7203
|
+
'path': req.path,
|
7204
|
+
'data': len(req.data or b''),
|
7205
|
+
'groups': {
|
7206
|
+
g.name: {
|
7207
|
+
'processes': {
|
7208
|
+
p.name: {
|
7209
|
+
'pid': p.pid,
|
7210
|
+
}
|
7211
|
+
for p in g
|
7212
|
+
},
|
7213
|
+
}
|
7214
|
+
for g in self._groups
|
7215
|
+
},
|
7216
|
+
}
|
7217
|
+
|
7218
|
+
return HttpHandlerResponse(
|
7219
|
+
200,
|
7220
|
+
data=json.dumps(dct, **JSON_PRETTY_KWARGS).encode('utf-8') + b'\n',
|
7221
|
+
headers={
|
7222
|
+
'Content-Type': 'application/json',
|
7223
|
+
},
|
7224
|
+
)
|
6734
7225
|
|
6735
7226
|
|
6736
7227
|
########################################
|
@@ -6752,7 +7243,7 @@ class ProcessImpl(Process):
|
|
6752
7243
|
config: ProcessConfig,
|
6753
7244
|
group: ProcessGroup,
|
6754
7245
|
*,
|
6755
|
-
|
7246
|
+
supervisor_states: SupervisorStateManager,
|
6756
7247
|
event_callbacks: EventCallbacks,
|
6757
7248
|
process_spawning_factory: ProcessSpawningFactory,
|
6758
7249
|
) -> None:
|
@@ -6761,7 +7252,7 @@ class ProcessImpl(Process):
|
|
6761
7252
|
self._config = config
|
6762
7253
|
self._group = group
|
6763
7254
|
|
6764
|
-
self.
|
7255
|
+
self._supervisor_states = supervisor_states
|
6765
7256
|
self._event_callbacks = event_callbacks
|
6766
7257
|
|
6767
7258
|
self._spawning = process_spawning_factory(self)
|
@@ -6792,7 +7283,7 @@ class ProcessImpl(Process):
|
|
6792
7283
|
#
|
6793
7284
|
|
6794
7285
|
def __repr__(self) -> str:
|
6795
|
-
return f'<Subprocess at {id(self)} with name {self._config.name} in state {self.
|
7286
|
+
return f'<Subprocess at {id(self)} with name {self._config.name} in state {self._state.name}>'
|
6796
7287
|
|
6797
7288
|
#
|
6798
7289
|
|
@@ -6814,10 +7305,6 @@ class ProcessImpl(Process):
|
|
6814
7305
|
|
6815
7306
|
#
|
6816
7307
|
|
6817
|
-
@property
|
6818
|
-
def context(self) -> ServerContext:
|
6819
|
-
return self._context
|
6820
|
-
|
6821
7308
|
@property
|
6822
7309
|
def state(self) -> ProcessState:
|
6823
7310
|
return self._state
|
@@ -6880,7 +7367,7 @@ class ProcessImpl(Process):
|
|
6880
7367
|
if stdin_fd is None:
|
6881
7368
|
raise OSError(errno.EPIPE, 'Process has no stdin channel')
|
6882
7369
|
|
6883
|
-
dispatcher = check_isinstance(self._dispatchers[stdin_fd],
|
7370
|
+
dispatcher = check_isinstance(self._dispatchers[stdin_fd], ProcessInputDispatcher)
|
6884
7371
|
if dispatcher.closed:
|
6885
7372
|
raise OSError(errno.EPIPE, "Process' stdin channel is closed")
|
6886
7373
|
|
@@ -7087,6 +7574,7 @@ class ProcessImpl(Process):
|
|
7087
7574
|
self._last_stop = now
|
7088
7575
|
|
7089
7576
|
if now > self._last_start:
|
7577
|
+
log.info(f'{now - self._last_start=}') # noqa
|
7090
7578
|
too_quickly = now - self._last_start < self._config.startsecs
|
7091
7579
|
else:
|
7092
7580
|
too_quickly = False
|
@@ -7150,18 +7638,13 @@ class ProcessImpl(Process):
|
|
7150
7638
|
self._pipes = ProcessPipes()
|
7151
7639
|
self._dispatchers = Dispatchers([])
|
7152
7640
|
|
7153
|
-
def get_state(self) -> ProcessState:
|
7154
|
-
return self._state
|
7155
|
-
|
7156
7641
|
def transition(self) -> None:
|
7157
7642
|
now = time.time()
|
7158
7643
|
state = self._state
|
7159
7644
|
|
7160
7645
|
self._check_and_adjust_for_system_clock_rollback(now)
|
7161
7646
|
|
7162
|
-
|
7163
|
-
|
7164
|
-
if self.context.state > SupervisorState.RESTARTING:
|
7647
|
+
if self._supervisor_states.state > SupervisorState.RESTARTING:
|
7165
7648
|
# dont start any processes if supervisor is shutting down
|
7166
7649
|
if state == ProcessState.EXITED:
|
7167
7650
|
if self._config.autorestart:
|
@@ -7192,14 +7675,14 @@ class ProcessImpl(Process):
|
|
7192
7675
|
self.check_in_state(ProcessState.STARTING)
|
7193
7676
|
self.change_state(ProcessState.RUNNING)
|
7194
7677
|
msg = ('entered RUNNING state, process has stayed up for > than %s seconds (startsecs)' % self._config.startsecs) # noqa
|
7195
|
-
|
7678
|
+
log.info('success: %s %s', self.name, msg)
|
7196
7679
|
|
7197
7680
|
if state == ProcessState.BACKOFF:
|
7198
7681
|
if self._backoff > self._config.startretries:
|
7199
7682
|
# BACKOFF -> FATAL if the proc has exceeded its number of retries
|
7200
7683
|
self.give_up()
|
7201
7684
|
msg = ('entered FATAL state, too many start retries too quickly')
|
7202
|
-
|
7685
|
+
log.info('gave up: %s %s', self.name, msg)
|
7203
7686
|
|
7204
7687
|
elif state == ProcessState.STOPPING:
|
7205
7688
|
time_left = self._delay - now
|
@@ -7221,15 +7704,74 @@ class ProcessImpl(Process):
|
|
7221
7704
|
pass
|
7222
7705
|
|
7223
7706
|
|
7707
|
+
########################################
|
7708
|
+
# ../signals.py
|
7709
|
+
|
7710
|
+
|
7711
|
+
class SignalHandler:
|
7712
|
+
def __init__(
|
7713
|
+
self,
|
7714
|
+
*,
|
7715
|
+
states: SupervisorStateManager,
|
7716
|
+
signal_receiver: SignalReceiver,
|
7717
|
+
process_groups: ProcessGroupManager,
|
7718
|
+
) -> None:
|
7719
|
+
super().__init__()
|
7720
|
+
|
7721
|
+
self._states = states
|
7722
|
+
self._signal_receiver = signal_receiver
|
7723
|
+
self._process_groups = process_groups
|
7724
|
+
|
7725
|
+
def set_signals(self) -> None:
|
7726
|
+
self._signal_receiver.install(
|
7727
|
+
signal.SIGTERM,
|
7728
|
+
signal.SIGINT,
|
7729
|
+
signal.SIGQUIT,
|
7730
|
+
signal.SIGHUP,
|
7731
|
+
signal.SIGCHLD,
|
7732
|
+
signal.SIGUSR2,
|
7733
|
+
)
|
7734
|
+
|
7735
|
+
def handle_signals(self) -> None:
|
7736
|
+
sig = self._signal_receiver.get_signal()
|
7737
|
+
if not sig:
|
7738
|
+
return
|
7739
|
+
|
7740
|
+
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
7741
|
+
log.warning('received %s indicating exit request', sig_name(sig))
|
7742
|
+
self._states.set_state(SupervisorState.SHUTDOWN)
|
7743
|
+
|
7744
|
+
elif sig == signal.SIGHUP:
|
7745
|
+
if self._states.state == SupervisorState.SHUTDOWN:
|
7746
|
+
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
7747
|
+
else:
|
7748
|
+
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
7749
|
+
self._states.set_state(SupervisorState.RESTARTING)
|
7750
|
+
|
7751
|
+
elif sig == signal.SIGCHLD:
|
7752
|
+
log.debug('received %s indicating a child quit', sig_name(sig))
|
7753
|
+
|
7754
|
+
elif sig == signal.SIGUSR2:
|
7755
|
+
log.info('received %s indicating log reopen request', sig_name(sig))
|
7756
|
+
|
7757
|
+
for p in self._process_groups.all_processes():
|
7758
|
+
for d in p.get_dispatchers():
|
7759
|
+
if isinstance(d, ProcessOutputDispatcher):
|
7760
|
+
d.reopen_logs()
|
7761
|
+
|
7762
|
+
else:
|
7763
|
+
log.debug('received %s indicating nothing', sig_name(sig))
|
7764
|
+
|
7765
|
+
|
7224
7766
|
########################################
|
7225
7767
|
# ../spawningimpl.py
|
7226
7768
|
|
7227
7769
|
|
7228
|
-
class
|
7770
|
+
class ProcessOutputDispatcherFactory(Func3[Process, ta.Type[ProcessCommunicationEvent], Fd, ProcessOutputDispatcher]):
|
7229
7771
|
pass
|
7230
7772
|
|
7231
7773
|
|
7232
|
-
class
|
7774
|
+
class ProcessInputDispatcherFactory(Func3[Process, str, Fd, ProcessInputDispatcher]):
|
7233
7775
|
pass
|
7234
7776
|
|
7235
7777
|
|
@@ -7247,8 +7789,8 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7247
7789
|
server_config: ServerConfig,
|
7248
7790
|
pid_history: PidHistory,
|
7249
7791
|
|
7250
|
-
output_dispatcher_factory:
|
7251
|
-
input_dispatcher_factory:
|
7792
|
+
output_dispatcher_factory: ProcessOutputDispatcherFactory,
|
7793
|
+
input_dispatcher_factory: ProcessInputDispatcherFactory,
|
7252
7794
|
|
7253
7795
|
inherited_fds: ta.Optional[InheritedFds] = None,
|
7254
7796
|
) -> None:
|
@@ -7381,28 +7923,28 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7381
7923
|
return exe, args
|
7382
7924
|
|
7383
7925
|
def _make_dispatchers(self, pipes: ProcessPipes) -> Dispatchers:
|
7384
|
-
dispatchers: ta.List[
|
7926
|
+
dispatchers: ta.List[FdIoHandler] = []
|
7385
7927
|
|
7386
7928
|
if pipes.stdout is not None:
|
7387
7929
|
dispatchers.append(check_isinstance(self._output_dispatcher_factory(
|
7388
7930
|
self.process,
|
7389
7931
|
ProcessCommunicationStdoutEvent,
|
7390
7932
|
pipes.stdout,
|
7391
|
-
),
|
7933
|
+
), ProcessOutputDispatcher))
|
7392
7934
|
|
7393
7935
|
if pipes.stderr is not None:
|
7394
7936
|
dispatchers.append(check_isinstance(self._output_dispatcher_factory(
|
7395
7937
|
self.process,
|
7396
7938
|
ProcessCommunicationStderrEvent,
|
7397
7939
|
pipes.stderr,
|
7398
|
-
),
|
7940
|
+
), ProcessOutputDispatcher))
|
7399
7941
|
|
7400
7942
|
if pipes.stdin is not None:
|
7401
7943
|
dispatchers.append(check_isinstance(self._input_dispatcher_factory(
|
7402
7944
|
self.process,
|
7403
7945
|
'stdin',
|
7404
7946
|
pipes.stdin,
|
7405
|
-
),
|
7947
|
+
), ProcessInputDispatcher))
|
7406
7948
|
|
7407
7949
|
return Dispatchers(dispatchers)
|
7408
7950
|
|
@@ -7531,10 +8073,296 @@ def check_execv_args(
|
|
7531
8073
|
raise NoPermissionError(f'No permission to run command {exe!r}')
|
7532
8074
|
|
7533
8075
|
|
8076
|
+
########################################
|
8077
|
+
# ../supervisor.py
|
8078
|
+
|
8079
|
+
|
8080
|
+
##
|
8081
|
+
|
8082
|
+
|
8083
|
+
def timeslice(period: int, when: float) -> int:
|
8084
|
+
return int(when - (when % period))
|
8085
|
+
|
8086
|
+
|
8087
|
+
##
|
8088
|
+
|
8089
|
+
|
8090
|
+
class SupervisorStateManagerImpl(SupervisorStateManager):
|
8091
|
+
def __init__(self) -> None:
|
8092
|
+
super().__init__()
|
8093
|
+
|
8094
|
+
self._state: SupervisorState = SupervisorState.RUNNING
|
8095
|
+
|
8096
|
+
@property
|
8097
|
+
def state(self) -> SupervisorState:
|
8098
|
+
return self._state
|
8099
|
+
|
8100
|
+
def set_state(self, state: SupervisorState) -> None:
|
8101
|
+
self._state = state
|
8102
|
+
|
8103
|
+
|
8104
|
+
##
|
8105
|
+
|
8106
|
+
|
8107
|
+
class ProcessGroupFactory(Func1[ProcessGroupConfig, ProcessGroup]):
|
8108
|
+
pass
|
8109
|
+
|
8110
|
+
|
8111
|
+
class Supervisor:
|
8112
|
+
def __init__(
|
8113
|
+
self,
|
8114
|
+
*,
|
8115
|
+
config: ServerConfig,
|
8116
|
+
poller: FdIoPoller,
|
8117
|
+
process_groups: ProcessGroupManager,
|
8118
|
+
signal_handler: SignalHandler,
|
8119
|
+
event_callbacks: EventCallbacks,
|
8120
|
+
process_group_factory: ProcessGroupFactory,
|
8121
|
+
pid_history: PidHistory,
|
8122
|
+
setup: SupervisorSetup,
|
8123
|
+
states: SupervisorStateManager,
|
8124
|
+
io: IoManager,
|
8125
|
+
) -> None:
|
8126
|
+
super().__init__()
|
8127
|
+
|
8128
|
+
self._config = config
|
8129
|
+
self._poller = poller
|
8130
|
+
self._process_groups = process_groups
|
8131
|
+
self._signal_handler = signal_handler
|
8132
|
+
self._event_callbacks = event_callbacks
|
8133
|
+
self._process_group_factory = process_group_factory
|
8134
|
+
self._pid_history = pid_history
|
8135
|
+
self._setup = setup
|
8136
|
+
self._states = states
|
8137
|
+
self._io = io
|
8138
|
+
|
8139
|
+
self._ticks: ta.Dict[int, float] = {}
|
8140
|
+
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
8141
|
+
self._stopping = False # set after we detect that we are handling a stop request
|
8142
|
+
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
8143
|
+
|
8144
|
+
#
|
8145
|
+
|
8146
|
+
@property
|
8147
|
+
def state(self) -> SupervisorState:
|
8148
|
+
return self._states.state
|
8149
|
+
|
8150
|
+
#
|
8151
|
+
|
8152
|
+
def add_process_group(self, config: ProcessGroupConfig) -> bool:
|
8153
|
+
if self._process_groups.get(config.name) is not None:
|
8154
|
+
return False
|
8155
|
+
|
8156
|
+
group = check_isinstance(self._process_group_factory(config), ProcessGroup)
|
8157
|
+
for process in group:
|
8158
|
+
process.after_setuid()
|
8159
|
+
|
8160
|
+
self._process_groups.add(group)
|
8161
|
+
|
8162
|
+
return True
|
8163
|
+
|
8164
|
+
def remove_process_group(self, name: str) -> bool:
|
8165
|
+
if self._process_groups[name].get_unstopped_processes():
|
8166
|
+
return False
|
8167
|
+
|
8168
|
+
self._process_groups.remove(name)
|
8169
|
+
|
8170
|
+
return True
|
8171
|
+
|
8172
|
+
#
|
8173
|
+
|
8174
|
+
def shutdown_report(self) -> ta.List[Process]:
|
8175
|
+
unstopped: ta.List[Process] = []
|
8176
|
+
|
8177
|
+
for group in self._process_groups:
|
8178
|
+
unstopped.extend(group.get_unstopped_processes())
|
8179
|
+
|
8180
|
+
if unstopped:
|
8181
|
+
# throttle 'waiting for x to die' reports
|
8182
|
+
now = time.time()
|
8183
|
+
if now > (self._last_shutdown_report + 3): # every 3 secs
|
8184
|
+
names = [p.config.name for p in unstopped]
|
8185
|
+
namestr = ', '.join(names)
|
8186
|
+
log.info('waiting for %s to die', namestr)
|
8187
|
+
self._last_shutdown_report = now
|
8188
|
+
for proc in unstopped:
|
8189
|
+
log.debug('%s state: %s', proc.config.name, proc.state.name)
|
8190
|
+
|
8191
|
+
return unstopped
|
8192
|
+
|
8193
|
+
#
|
8194
|
+
|
8195
|
+
def main(self, **kwargs: ta.Any) -> None:
|
8196
|
+
self._setup.setup()
|
8197
|
+
try:
|
8198
|
+
self.run(**kwargs)
|
8199
|
+
finally:
|
8200
|
+
self._setup.cleanup()
|
8201
|
+
|
8202
|
+
def run(
|
8203
|
+
self,
|
8204
|
+
*,
|
8205
|
+
callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
|
8206
|
+
) -> None:
|
8207
|
+
self._process_groups.clear()
|
8208
|
+
self._stop_groups = None # clear
|
8209
|
+
|
8210
|
+
self._event_callbacks.clear()
|
8211
|
+
|
8212
|
+
try:
|
8213
|
+
for config in self._config.groups or []:
|
8214
|
+
self.add_process_group(config)
|
8215
|
+
|
8216
|
+
self._signal_handler.set_signals()
|
8217
|
+
|
8218
|
+
self._event_callbacks.notify(SupervisorRunningEvent())
|
8219
|
+
|
8220
|
+
while True:
|
8221
|
+
if callback is not None and not callback(self):
|
8222
|
+
break
|
8223
|
+
|
8224
|
+
self._run_once()
|
8225
|
+
|
8226
|
+
finally:
|
8227
|
+
self._poller.close()
|
8228
|
+
|
8229
|
+
#
|
8230
|
+
|
8231
|
+
def _run_once(self) -> None:
|
8232
|
+
now = time.time()
|
8233
|
+
self._poll()
|
8234
|
+
log.info(f'Poll took {time.time() - now}') # noqa
|
8235
|
+
self._reap()
|
8236
|
+
self._signal_handler.handle_signals()
|
8237
|
+
self._tick()
|
8238
|
+
|
8239
|
+
if self._states.state < SupervisorState.RUNNING:
|
8240
|
+
self._ordered_stop_groups_phase_2()
|
8241
|
+
|
8242
|
+
def _ordered_stop_groups_phase_1(self) -> None:
|
8243
|
+
if self._stop_groups:
|
8244
|
+
# stop the last group (the one with the "highest" priority)
|
8245
|
+
self._stop_groups[-1].stop_all()
|
8246
|
+
|
8247
|
+
def _ordered_stop_groups_phase_2(self) -> None:
|
8248
|
+
# after phase 1 we've transitioned and reaped, let's see if we can remove the group we stopped from the
|
8249
|
+
# stop_groups queue.
|
8250
|
+
if self._stop_groups:
|
8251
|
+
# pop the last group (the one with the "highest" priority)
|
8252
|
+
group = self._stop_groups.pop()
|
8253
|
+
if group.get_unstopped_processes():
|
8254
|
+
# if any processes in the group aren't yet in a stopped state, we're not yet done shutting this group
|
8255
|
+
# down, so push it back on to the end of the stop group queue
|
8256
|
+
self._stop_groups.append(group)
|
8257
|
+
|
8258
|
+
def _poll(self) -> None:
|
8259
|
+
sorted_groups = list(self._process_groups)
|
8260
|
+
sorted_groups.sort()
|
8261
|
+
|
8262
|
+
if self._states.state < SupervisorState.RUNNING:
|
8263
|
+
if not self._stopping:
|
8264
|
+
# first time, set the stopping flag, do a notification and set stop_groups
|
8265
|
+
self._stopping = True
|
8266
|
+
self._stop_groups = sorted_groups[:]
|
8267
|
+
self._event_callbacks.notify(SupervisorStoppingEvent())
|
8268
|
+
|
8269
|
+
self._ordered_stop_groups_phase_1()
|
8270
|
+
|
8271
|
+
if not self.shutdown_report():
|
8272
|
+
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
8273
|
+
raise ExitNow
|
8274
|
+
|
8275
|
+
self._io.poll()
|
8276
|
+
|
8277
|
+
for group in sorted_groups:
|
8278
|
+
for process in group:
|
8279
|
+
process.transition()
|
8280
|
+
|
8281
|
+
def _reap(self, *, once: bool = False, depth: int = 0) -> None:
|
8282
|
+
if depth >= 100:
|
8283
|
+
return
|
8284
|
+
|
8285
|
+
wp = waitpid()
|
8286
|
+
log.info(f'Waited pid: {wp}') # noqa
|
8287
|
+
if wp is None or not wp.pid:
|
8288
|
+
return
|
8289
|
+
|
8290
|
+
process = self._pid_history.get(wp.pid, None)
|
8291
|
+
if process is None:
|
8292
|
+
_, msg = decode_wait_status(wp.sts)
|
8293
|
+
log.info('reaped unknown pid %s (%s)', wp.pid, msg)
|
8294
|
+
else:
|
8295
|
+
process.finish(wp.sts)
|
8296
|
+
del self._pid_history[wp.pid]
|
8297
|
+
|
8298
|
+
if not once:
|
8299
|
+
# keep reaping until no more kids to reap, but don't recurse infinitely
|
8300
|
+
self._reap(once=False, depth=depth + 1)
|
8301
|
+
|
8302
|
+
def _tick(self, now: ta.Optional[float] = None) -> None:
|
8303
|
+
"""Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
|
8304
|
+
|
8305
|
+
if now is None:
|
8306
|
+
# now won't be None in unit tests
|
8307
|
+
now = time.time()
|
8308
|
+
|
8309
|
+
for event in TICK_EVENTS:
|
8310
|
+
period = event.period
|
8311
|
+
|
8312
|
+
last_tick = self._ticks.get(period)
|
8313
|
+
if last_tick is None:
|
8314
|
+
# we just started up
|
8315
|
+
last_tick = self._ticks[period] = timeslice(period, now)
|
8316
|
+
|
8317
|
+
this_tick = timeslice(period, now)
|
8318
|
+
if this_tick != last_tick:
|
8319
|
+
self._ticks[period] = this_tick
|
8320
|
+
self._event_callbacks.notify(event(this_tick, self))
|
8321
|
+
|
8322
|
+
|
8323
|
+
##
|
8324
|
+
|
8325
|
+
|
8326
|
+
class WaitedPid(ta.NamedTuple):
|
8327
|
+
pid: Pid
|
8328
|
+
sts: Rc
|
8329
|
+
|
8330
|
+
|
8331
|
+
def waitpid() -> ta.Optional[WaitedPid]:
|
8332
|
+
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
8333
|
+
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
8334
|
+
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
8335
|
+
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
8336
|
+
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
8337
|
+
# lying around.
|
8338
|
+
try:
|
8339
|
+
pid, sts = os.waitpid(-1, os.WNOHANG)
|
8340
|
+
except OSError as exc:
|
8341
|
+
code = exc.args[0]
|
8342
|
+
if code not in (errno.ECHILD, errno.EINTR):
|
8343
|
+
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
8344
|
+
if code == errno.EINTR:
|
8345
|
+
log.debug('EINTR during reap')
|
8346
|
+
return None
|
8347
|
+
else:
|
8348
|
+
return WaitedPid(pid, sts) # type: ignore
|
8349
|
+
|
8350
|
+
|
7534
8351
|
########################################
|
7535
8352
|
# ../inject.py
|
7536
8353
|
|
7537
8354
|
|
8355
|
+
@dc.dataclass(frozen=True)
|
8356
|
+
class _FdIoPollerDaemonizeListener(DaemonizeListener):
|
8357
|
+
_poller: FdIoPoller
|
8358
|
+
|
8359
|
+
def before_daemonize(self) -> None:
|
8360
|
+
self._poller.close()
|
8361
|
+
|
8362
|
+
def after_daemonize(self) -> None:
|
8363
|
+
self._poller.reopen()
|
8364
|
+
|
8365
|
+
|
7538
8366
|
def bind_server(
|
7539
8367
|
config: ServerConfig,
|
7540
8368
|
*,
|
@@ -7544,24 +8372,30 @@ def bind_server(
|
|
7544
8372
|
lst: ta.List[InjectorBindingOrBindings] = [
|
7545
8373
|
inj.bind(config),
|
7546
8374
|
|
8375
|
+
inj.bind_array(DaemonizeListener),
|
7547
8376
|
inj.bind_array_type(DaemonizeListener, DaemonizeListeners),
|
7548
8377
|
|
7549
8378
|
inj.bind(SupervisorSetupImpl, singleton=True),
|
7550
8379
|
inj.bind(SupervisorSetup, to_key=SupervisorSetupImpl),
|
7551
8380
|
|
7552
|
-
inj.bind(DaemonizeListener, array=True, to_key=Poller),
|
7553
|
-
|
7554
|
-
inj.bind(ServerContextImpl, singleton=True),
|
7555
|
-
inj.bind(ServerContext, to_key=ServerContextImpl),
|
7556
|
-
|
7557
8381
|
inj.bind(EventCallbacks, singleton=True),
|
7558
8382
|
|
7559
8383
|
inj.bind(SignalReceiver, singleton=True),
|
7560
8384
|
|
8385
|
+
inj.bind(IoManager, singleton=True),
|
8386
|
+
inj.bind_array(HasDispatchers),
|
8387
|
+
inj.bind_array_type(HasDispatchers, HasDispatchersList),
|
8388
|
+
|
7561
8389
|
inj.bind(SignalHandler, singleton=True),
|
8390
|
+
|
7562
8391
|
inj.bind(ProcessGroupManager, singleton=True),
|
8392
|
+
inj.bind(HasDispatchers, array=True, to_key=ProcessGroupManager),
|
8393
|
+
|
7563
8394
|
inj.bind(Supervisor, singleton=True),
|
7564
8395
|
|
8396
|
+
inj.bind(SupervisorStateManagerImpl, singleton=True),
|
8397
|
+
inj.bind(SupervisorStateManager, to_key=SupervisorStateManagerImpl),
|
8398
|
+
|
7565
8399
|
inj.bind(PidHistory()),
|
7566
8400
|
|
7567
8401
|
inj.bind_factory(ProcessGroupImpl, ProcessGroupFactory),
|
@@ -7569,8 +8403,8 @@ def bind_server(
|
|
7569
8403
|
|
7570
8404
|
inj.bind_factory(ProcessSpawningImpl, ProcessSpawningFactory),
|
7571
8405
|
|
7572
|
-
inj.bind_factory(
|
7573
|
-
inj.bind_factory(
|
8406
|
+
inj.bind_factory(ProcessOutputDispatcherImpl, ProcessOutputDispatcherFactory),
|
8407
|
+
inj.bind_factory(ProcessInputDispatcherImpl, ProcessInputDispatcherFactory),
|
7574
8408
|
]
|
7575
8409
|
|
7576
8410
|
#
|
@@ -7588,7 +8422,26 @@ def bind_server(
|
|
7588
8422
|
|
7589
8423
|
#
|
7590
8424
|
|
7591
|
-
|
8425
|
+
poller_impl = next(filter(None, [
|
8426
|
+
KqueueFdIoPoller,
|
8427
|
+
PollFdIoPoller,
|
8428
|
+
SelectFdIoPoller,
|
8429
|
+
]))
|
8430
|
+
lst.append(inj.bind(poller_impl, key=FdIoPoller, singleton=True))
|
8431
|
+
inj.bind(_FdIoPollerDaemonizeListener, array=True, singleton=True)
|
8432
|
+
|
8433
|
+
#
|
8434
|
+
|
8435
|
+
def _provide_http_handler(s: SupervisorHttpHandler) -> HttpServer.Handler:
|
8436
|
+
return HttpServer.Handler(s.handle)
|
8437
|
+
|
8438
|
+
lst.extend([
|
8439
|
+
inj.bind(HttpServer, singleton=True, eager=True),
|
8440
|
+
inj.bind(HasDispatchers, array=True, to_key=HttpServer),
|
8441
|
+
|
8442
|
+
inj.bind(SupervisorHttpHandler, singleton=True),
|
8443
|
+
inj.bind(_provide_http_handler),
|
8444
|
+
])
|
7592
8445
|
|
7593
8446
|
#
|
7594
8447
|
|
@@ -7627,7 +8480,7 @@ def main(
|
|
7627
8480
|
if not no_logging:
|
7628
8481
|
configure_standard_logging(
|
7629
8482
|
'INFO',
|
7630
|
-
handler_factory=journald_log_handler_factory if not args.no_journald else None,
|
8483
|
+
handler_factory=journald_log_handler_factory if not (args.no_journald or is_debugger_attached()) else None,
|
7631
8484
|
)
|
7632
8485
|
|
7633
8486
|
#
|
@@ -7650,7 +8503,6 @@ def main(
|
|
7650
8503
|
inherited_fds=inherited_fds,
|
7651
8504
|
))
|
7652
8505
|
|
7653
|
-
context = injector[ServerContextImpl]
|
7654
8506
|
supervisor = injector[Supervisor]
|
7655
8507
|
|
7656
8508
|
try:
|
@@ -7658,7 +8510,7 @@ def main(
|
|
7658
8510
|
except ExitNow:
|
7659
8511
|
pass
|
7660
8512
|
|
7661
|
-
if
|
8513
|
+
if supervisor.state < SupervisorState.RESTARTING:
|
7662
8514
|
break
|
7663
8515
|
|
7664
8516
|
|