ominfra 0.0.0.dev126__py3-none-any.whl → 0.0.0.dev127__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (34) hide show
  1. ominfra/clouds/aws/auth.py +1 -1
  2. ominfra/deploy/_executor.py +1 -1
  3. ominfra/deploy/poly/_main.py +1 -1
  4. ominfra/pyremote/_runcommands.py +1 -1
  5. ominfra/scripts/journald2aws.py +2 -2
  6. ominfra/scripts/supervisor.py +1796 -1218
  7. ominfra/supervisor/collections.py +52 -0
  8. ominfra/supervisor/context.py +2 -336
  9. ominfra/supervisor/datatypes.py +1 -63
  10. ominfra/supervisor/dispatchers.py +20 -324
  11. ominfra/supervisor/dispatchersimpl.py +342 -0
  12. ominfra/supervisor/groups.py +33 -111
  13. ominfra/supervisor/groupsimpl.py +86 -0
  14. ominfra/supervisor/inject.py +44 -19
  15. ominfra/supervisor/main.py +1 -1
  16. ominfra/supervisor/pipes.py +83 -0
  17. ominfra/supervisor/poller.py +6 -3
  18. ominfra/supervisor/privileges.py +65 -0
  19. ominfra/supervisor/processes.py +18 -0
  20. ominfra/supervisor/{process.py → processesimpl.py} +96 -330
  21. ominfra/supervisor/setup.py +38 -0
  22. ominfra/supervisor/setupimpl.py +261 -0
  23. ominfra/supervisor/signals.py +24 -16
  24. ominfra/supervisor/spawning.py +31 -0
  25. ominfra/supervisor/spawningimpl.py +347 -0
  26. ominfra/supervisor/supervisor.py +52 -77
  27. ominfra/supervisor/types.py +101 -45
  28. ominfra/supervisor/users.py +64 -0
  29. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/METADATA +3 -3
  30. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/RECORD +34 -23
  31. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/LICENSE +0 -0
  32. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/WHEEL +0 -0
  33. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/entry_points.txt +0 -0
  34. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/top_level.txt +0 -0
@@ -3,25 +3,27 @@ import signal
3
3
  import time
4
4
  import typing as ta
5
5
 
6
- from omlish.lite.cached import cached_nullary
7
6
  from omlish.lite.check import check_isinstance
8
7
  from omlish.lite.check import check_not_none
9
8
  from omlish.lite.logs import log
10
- from omlish.lite.typing import Func
9
+ from omlish.lite.typing import Func1
11
10
 
12
11
  from .configs import ProcessGroupConfig
13
12
  from .context import ServerContextImpl
13
+ from .dispatchers import Dispatchers
14
14
  from .events import TICK_EVENTS
15
15
  from .events import EventCallbacks
16
16
  from .events import SupervisorRunningEvent
17
17
  from .events import SupervisorStoppingEvent
18
18
  from .groups import ProcessGroup
19
- from .groups import ProcessGroups
19
+ from .groups import ProcessGroupManager
20
20
  from .poller import Poller
21
+ from .processes import PidHistory
22
+ from .setup import SupervisorSetup
21
23
  from .signals import SignalReceiver
22
24
  from .signals import sig_name
23
25
  from .states import SupervisorState
24
- from .types import Dispatcher
26
+ from .types import OutputDispatcher
25
27
  from .types import Process
26
28
  from .utils import ExitNow
27
29
  from .utils import as_string
@@ -38,7 +40,7 @@ class SignalHandler:
38
40
  *,
39
41
  context: ServerContextImpl,
40
42
  signal_receiver: SignalReceiver,
41
- process_groups: ProcessGroups,
43
+ process_groups: ProcessGroupManager,
42
44
  ) -> None:
43
45
  super().__init__()
44
46
 
@@ -78,8 +80,10 @@ class SignalHandler:
78
80
  elif sig == signal.SIGUSR2:
79
81
  log.info('received %s indicating log reopen request', sig_name(sig))
80
82
 
81
- for group in self._process_groups:
82
- group.reopen_logs()
83
+ for p in self._process_groups.all_processes():
84
+ for d in p.get_dispatchers():
85
+ if isinstance(d, OutputDispatcher):
86
+ d.reopen_logs()
83
87
 
84
88
  else:
85
89
  log.debug('received %s indicating nothing', sig_name(sig))
@@ -88,7 +92,8 @@ class SignalHandler:
88
92
  ##
89
93
 
90
94
 
91
- ProcessGroupFactory = ta.NewType('ProcessGroupFactory', Func[ProcessGroup]) # (config: ProcessGroupConfig)
95
+ class ProcessGroupFactory(Func1[ProcessGroupConfig, ProcessGroup]):
96
+ pass
92
97
 
93
98
 
94
99
  class Supervisor:
@@ -97,10 +102,12 @@ class Supervisor:
97
102
  *,
98
103
  context: ServerContextImpl,
99
104
  poller: Poller,
100
- process_groups: ProcessGroups,
105
+ process_groups: ProcessGroupManager,
101
106
  signal_handler: SignalHandler,
102
107
  event_callbacks: EventCallbacks,
103
108
  process_group_factory: ProcessGroupFactory,
109
+ pid_history: PidHistory,
110
+ setup: SupervisorSetup,
104
111
  ) -> None:
105
112
  super().__init__()
106
113
 
@@ -110,6 +117,8 @@ class Supervisor:
110
117
  self._signal_handler = signal_handler
111
118
  self._event_callbacks = event_callbacks
112
119
  self._process_group_factory = process_group_factory
120
+ self._pid_history = pid_history
121
+ self._setup = setup
113
122
 
114
123
  self._ticks: ta.Dict[int, float] = {}
115
124
  self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
@@ -127,31 +136,13 @@ class Supervisor:
127
136
 
128
137
  #
129
138
 
130
- class DiffToActive(ta.NamedTuple):
131
- added: ta.List[ProcessGroupConfig]
132
- changed: ta.List[ProcessGroupConfig]
133
- removed: ta.List[ProcessGroupConfig]
134
-
135
- def diff_to_active(self) -> DiffToActive:
136
- new = self._context.config.groups or []
137
- cur = [group.config for group in self._process_groups]
138
-
139
- curdict = dict(zip([cfg.name for cfg in cur], cur))
140
- newdict = dict(zip([cfg.name for cfg in new], new))
141
-
142
- added = [cand for cand in new if cand.name not in curdict]
143
- removed = [cand for cand in cur if cand.name not in newdict]
144
-
145
- changed = [cand for cand in new if cand != curdict.get(cand.name, cand)]
146
-
147
- return Supervisor.DiffToActive(added, changed, removed)
148
-
149
139
  def add_process_group(self, config: ProcessGroupConfig) -> bool:
150
140
  if self._process_groups.get(config.name) is not None:
151
141
  return False
152
142
 
153
143
  group = check_isinstance(self._process_group_factory(config), ProcessGroup)
154
- group.after_setuid()
144
+ for process in group:
145
+ process.after_setuid()
155
146
 
156
147
  self._process_groups.add(group)
157
148
 
@@ -165,11 +156,7 @@ class Supervisor:
165
156
 
166
157
  return True
167
158
 
168
- def get_process_map(self) -> ta.Dict[int, Dispatcher]:
169
- process_map: ta.Dict[int, Dispatcher] = {}
170
- for group in self._process_groups:
171
- process_map.update(group.get_dispatchers())
172
- return process_map
159
+ #
173
160
 
174
161
  def shutdown_report(self) -> ta.List[Process]:
175
162
  unstopped: ta.List[Process] = []
@@ -192,25 +179,12 @@ class Supervisor:
192
179
 
193
180
  #
194
181
 
195
- def main(self) -> None:
196
- self.setup()
197
- self.run()
198
-
199
- @cached_nullary
200
- def setup(self) -> None:
201
- if not self._context.first:
202
- # prevent crash on libdispatch-based systems, at least for the first request
203
- self._context.cleanup_fds()
204
-
205
- self._context.set_uid_or_exit()
206
-
207
- if self._context.first:
208
- self._context.set_rlimits_or_exit()
209
-
210
- # this sets the options.logger object delay logger instantiation until after setuid
211
- if not self._context.config.nocleanup:
212
- # clean up old automatic logs
213
- self._context.clear_auto_child_logdir()
182
+ def main(self, **kwargs: ta.Any) -> None:
183
+ self._setup.setup()
184
+ try:
185
+ self.run(**kwargs)
186
+ finally:
187
+ self._setup.cleanup()
214
188
 
215
189
  def run(
216
190
  self,
@@ -228,12 +202,6 @@ class Supervisor:
228
202
 
229
203
  self._signal_handler.set_signals()
230
204
 
231
- if not self._context.config.nodaemon and self._context.first:
232
- self._context.daemonize()
233
-
234
- # writing pid file needs to come *after* daemonizing or pid will be wrong
235
- self._context.write_pidfile()
236
-
237
205
  self._event_callbacks.notify(SupervisorRunningEvent())
238
206
 
239
207
  while True:
@@ -243,7 +211,7 @@ class Supervisor:
243
211
  self._run_once()
244
212
 
245
213
  finally:
246
- self._context.cleanup()
214
+ self._poller.close()
247
215
 
248
216
  #
249
217
 
@@ -272,18 +240,24 @@ class Supervisor:
272
240
  # down, so push it back on to the end of the stop group queue
273
241
  self._stop_groups.append(group)
274
242
 
243
+ def get_dispatchers(self) -> Dispatchers:
244
+ return Dispatchers(
245
+ d
246
+ for p in self._process_groups.all_processes()
247
+ for d in p.get_dispatchers()
248
+ )
249
+
275
250
  def _poll(self) -> None:
276
- combined_map = {}
277
- combined_map.update(self.get_process_map())
251
+ dispatchers = self.get_dispatchers()
278
252
 
279
- pgroups = list(self._process_groups)
280
- pgroups.sort()
253
+ sorted_groups = list(self._process_groups)
254
+ sorted_groups.sort()
281
255
 
282
256
  if self._context.state < SupervisorState.RUNNING:
283
257
  if not self._stopping:
284
258
  # first time, set the stopping flag, do a notification and set stop_groups
285
259
  self._stopping = True
286
- self._stop_groups = pgroups[:]
260
+ self._stop_groups = sorted_groups[:]
287
261
  self._event_callbacks.notify(SupervisorStoppingEvent())
288
262
 
289
263
  self._ordered_stop_groups_phase_1()
@@ -292,7 +266,7 @@ class Supervisor:
292
266
  # if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
293
267
  raise ExitNow
294
268
 
295
- for fd, dispatcher in combined_map.items():
269
+ for fd, dispatcher in dispatchers.items():
296
270
  if dispatcher.readable():
297
271
  self._poller.register_readable(fd)
298
272
  if dispatcher.writable():
@@ -302,9 +276,9 @@ class Supervisor:
302
276
  r, w = self._poller.poll(timeout)
303
277
 
304
278
  for fd in r:
305
- if fd in combined_map:
279
+ if fd in dispatchers:
306
280
  try:
307
- dispatcher = combined_map[fd]
281
+ dispatcher = dispatchers[fd]
308
282
  log.debug('read event caused by %r', dispatcher)
309
283
  dispatcher.handle_read_event()
310
284
  if not dispatcher.readable():
@@ -312,9 +286,9 @@ class Supervisor:
312
286
  except ExitNow:
313
287
  raise
314
288
  except Exception: # noqa
315
- combined_map[fd].handle_error()
289
+ dispatchers[fd].handle_error()
316
290
  else:
317
- # if the fd is not in combined_map, we should unregister it. otherwise, it will be polled every
291
+ # if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
318
292
  # time, which may cause 100% cpu usage
319
293
  log.debug('unexpected read event from fd %r', fd)
320
294
  try:
@@ -323,9 +297,9 @@ class Supervisor:
323
297
  pass
324
298
 
325
299
  for fd in w:
326
- if fd in combined_map:
300
+ if fd in dispatchers:
327
301
  try:
328
- dispatcher = combined_map[fd]
302
+ dispatcher = dispatchers[fd]
329
303
  log.debug('write event caused by %r', dispatcher)
330
304
  dispatcher.handle_write_event()
331
305
  if not dispatcher.writable():
@@ -333,7 +307,7 @@ class Supervisor:
333
307
  except ExitNow:
334
308
  raise
335
309
  except Exception: # noqa
336
- combined_map[fd].handle_error()
310
+ dispatchers[fd].handle_error()
337
311
  else:
338
312
  log.debug('unexpected write event from fd %r', fd)
339
313
  try:
@@ -341,8 +315,9 @@ class Supervisor:
341
315
  except Exception: # noqa
342
316
  pass
343
317
 
344
- for group in pgroups:
345
- group.transition()
318
+ for group in sorted_groups:
319
+ for process in group:
320
+ process.transition()
346
321
 
347
322
  def _reap(self, *, once: bool = False, depth: int = 0) -> None:
348
323
  if depth >= 100:
@@ -352,13 +327,13 @@ class Supervisor:
352
327
  if not pid:
353
328
  return
354
329
 
355
- process = self._context.pid_history.get(pid, None)
330
+ process = self._pid_history.get(pid, None)
356
331
  if process is None:
357
332
  _, msg = decode_wait_status(check_not_none(sts))
358
333
  log.info('reaped unknown pid %s (%s)', pid, msg)
359
334
  else:
360
335
  process.finish(check_not_none(sts))
361
- del self._context.pid_history[pid]
336
+ del self._pid_history[pid]
362
337
 
363
338
  if not once:
364
339
  # keep reaping until no more kids to reap, but don't recurse infinitely
@@ -3,6 +3,7 @@ import abc
3
3
  import functools
4
4
  import typing as ta
5
5
 
6
+ from .collections import KeyedCollectionAccessors
6
7
  from .configs import ProcessConfig
7
8
  from .configs import ProcessGroupConfig
8
9
  from .configs import ServerConfig
@@ -10,6 +11,36 @@ from .states import ProcessState
10
11
  from .states import SupervisorState
11
12
 
12
13
 
14
+ if ta.TYPE_CHECKING:
15
+ from .dispatchers import Dispatchers
16
+
17
+
18
+ ##
19
+
20
+
21
+ ServerEpoch = ta.NewType('ServerEpoch', int)
22
+
23
+
24
+ ##
25
+
26
+
27
+ @functools.total_ordering
28
+ class ConfigPriorityOrdered(abc.ABC):
29
+ @property
30
+ @abc.abstractmethod
31
+ def config(self) -> ta.Any:
32
+ raise NotImplementedError
33
+
34
+ def __lt__(self, other):
35
+ return self.config.priority < other.config.priority
36
+
37
+ def __eq__(self, other):
38
+ return self.config.priority == other.config.priority
39
+
40
+
41
+ ##
42
+
43
+
13
44
  class ServerContext(abc.ABC):
14
45
  @property
15
46
  @abc.abstractmethod
@@ -31,7 +62,42 @@ class ServerContext(abc.ABC):
31
62
  raise NotImplementedError
32
63
 
33
64
 
65
+ ##
66
+
67
+
34
68
  class Dispatcher(abc.ABC):
69
+ @property
70
+ @abc.abstractmethod
71
+ def process(self) -> 'Process':
72
+ raise NotImplementedError
73
+
74
+ @property
75
+ @abc.abstractmethod
76
+ def channel(self) -> str:
77
+ raise NotImplementedError
78
+
79
+ @property
80
+ @abc.abstractmethod
81
+ def fd(self) -> int:
82
+ raise NotImplementedError
83
+
84
+ @property
85
+ @abc.abstractmethod
86
+ def closed(self) -> bool:
87
+ raise NotImplementedError
88
+
89
+ #
90
+
91
+ @abc.abstractmethod
92
+ def close(self) -> None:
93
+ raise NotImplementedError
94
+
95
+ @abc.abstractmethod
96
+ def handle_error(self) -> None:
97
+ raise NotImplementedError
98
+
99
+ #
100
+
35
101
  @abc.abstractmethod
36
102
  def readable(self) -> bool:
37
103
  raise NotImplementedError
@@ -40,26 +106,25 @@ class Dispatcher(abc.ABC):
40
106
  def writable(self) -> bool:
41
107
  raise NotImplementedError
42
108
 
109
+ #
110
+
43
111
  def handle_read_event(self) -> None:
44
112
  raise TypeError
45
113
 
46
114
  def handle_write_event(self) -> None:
47
115
  raise TypeError
48
116
 
117
+
118
+ class OutputDispatcher(Dispatcher, abc.ABC):
49
119
  @abc.abstractmethod
50
- def handle_error(self) -> None:
120
+ def remove_logs(self) -> None:
51
121
  raise NotImplementedError
52
122
 
53
- @property
54
123
  @abc.abstractmethod
55
- def closed(self) -> bool:
124
+ def reopen_logs(self) -> None:
56
125
  raise NotImplementedError
57
126
 
58
127
 
59
- class OutputDispatcher(Dispatcher, abc.ABC):
60
- pass
61
-
62
-
63
128
  class InputDispatcher(Dispatcher, abc.ABC):
64
129
  @abc.abstractmethod
65
130
  def write(self, chars: ta.Union[bytes, str]) -> None:
@@ -70,11 +135,13 @@ class InputDispatcher(Dispatcher, abc.ABC):
70
135
  raise NotImplementedError
71
136
 
72
137
 
73
- @functools.total_ordering
74
- class Process(abc.ABC):
138
+ ##
139
+
140
+
141
+ class Process(ConfigPriorityOrdered, abc.ABC):
75
142
  @property
76
143
  @abc.abstractmethod
77
- def pid(self) -> int:
144
+ def name(self) -> str:
78
145
  raise NotImplementedError
79
146
 
80
147
  @property
@@ -82,27 +149,25 @@ class Process(abc.ABC):
82
149
  def config(self) -> ProcessConfig:
83
150
  raise NotImplementedError
84
151
 
85
- def __lt__(self, other):
86
- return self.config.priority < other.config.priority
87
-
88
- def __eq__(self, other):
89
- return self.config.priority == other.config.priority
90
-
91
152
  @property
92
153
  @abc.abstractmethod
93
- def context(self) -> ServerContext:
154
+ def group(self) -> 'ProcessGroup':
94
155
  raise NotImplementedError
95
156
 
157
+ @property
96
158
  @abc.abstractmethod
97
- def finish(self, sts: int) -> None:
159
+ def pid(self) -> int:
98
160
  raise NotImplementedError
99
161
 
162
+ #
163
+
164
+ @property
100
165
  @abc.abstractmethod
101
- def remove_logs(self) -> None:
166
+ def context(self) -> ServerContext:
102
167
  raise NotImplementedError
103
168
 
104
169
  @abc.abstractmethod
105
- def reopen_logs(self) -> None:
170
+ def finish(self, sts: int) -> None:
106
171
  raise NotImplementedError
107
172
 
108
173
  @abc.abstractmethod
@@ -122,50 +187,41 @@ class Process(abc.ABC):
122
187
  raise NotImplementedError
123
188
 
124
189
  @abc.abstractmethod
125
- def create_auto_child_logs(self) -> None:
126
- raise NotImplementedError
127
-
128
- @abc.abstractmethod
129
- def get_dispatchers(self) -> ta.Mapping[int, Dispatcher]:
190
+ def after_setuid(self) -> None:
130
191
  raise NotImplementedError
131
192
 
132
-
133
- @functools.total_ordering
134
- class ProcessGroup(abc.ABC):
135
- @property
136
193
  @abc.abstractmethod
137
- def config(self) -> ProcessGroupConfig:
194
+ def get_dispatchers(self) -> 'Dispatchers':
138
195
  raise NotImplementedError
139
196
 
140
- def __lt__(self, other):
141
- return self.config.priority < other.config.priority
142
197
 
143
- def __eq__(self, other):
144
- return self.config.priority == other.config.priority
198
+ ##
145
199
 
146
- @abc.abstractmethod
147
- def transition(self) -> None:
148
- raise NotImplementedError
149
-
150
- @abc.abstractmethod
151
- def stop_all(self) -> None:
152
- raise NotImplementedError
153
200
 
201
+ class ProcessGroup(
202
+ ConfigPriorityOrdered,
203
+ KeyedCollectionAccessors[str, Process],
204
+ abc.ABC,
205
+ ):
154
206
  @property
155
207
  @abc.abstractmethod
156
208
  def name(self) -> str:
157
209
  raise NotImplementedError
158
210
 
211
+ @property
159
212
  @abc.abstractmethod
160
- def before_remove(self) -> None:
213
+ def config(self) -> ProcessGroupConfig:
161
214
  raise NotImplementedError
162
215
 
216
+ @property
163
217
  @abc.abstractmethod
164
- def get_dispatchers(self) -> ta.Mapping[int, Dispatcher]:
218
+ def by_name(self) -> ta.Mapping[str, Process]:
165
219
  raise NotImplementedError
166
220
 
221
+ #
222
+
167
223
  @abc.abstractmethod
168
- def reopen_logs(self) -> None:
224
+ def stop_all(self) -> None:
169
225
  raise NotImplementedError
170
226
 
171
227
  @abc.abstractmethod
@@ -173,5 +229,5 @@ class ProcessGroup(abc.ABC):
173
229
  raise NotImplementedError
174
230
 
175
231
  @abc.abstractmethod
176
- def after_setuid(self) -> None:
232
+ def before_remove(self) -> None:
177
233
  raise NotImplementedError
@@ -0,0 +1,64 @@
1
+ # ruff: noqa: UP007
2
+ import dataclasses as dc
3
+ import grp
4
+ import pwd
5
+
6
+
7
+ ##
8
+
9
+
10
+ def name_to_uid(name: str) -> int:
11
+ try:
12
+ uid = int(name)
13
+ except ValueError:
14
+ try:
15
+ pwdrec = pwd.getpwnam(name)
16
+ except KeyError:
17
+ raise ValueError(f'Invalid user name {name}') # noqa
18
+ uid = pwdrec[2]
19
+ else:
20
+ try:
21
+ pwd.getpwuid(uid) # check if uid is valid
22
+ except KeyError:
23
+ raise ValueError(f'Invalid user id {name}') # noqa
24
+ return uid
25
+
26
+
27
+ def name_to_gid(name: str) -> int:
28
+ try:
29
+ gid = int(name)
30
+ except ValueError:
31
+ try:
32
+ grprec = grp.getgrnam(name)
33
+ except KeyError:
34
+ raise ValueError(f'Invalid group name {name}') # noqa
35
+ gid = grprec[2]
36
+ else:
37
+ try:
38
+ grp.getgrgid(gid) # check if gid is valid
39
+ except KeyError:
40
+ raise ValueError(f'Invalid group id {name}') # noqa
41
+ return gid
42
+
43
+
44
+ def gid_for_uid(uid: int) -> int:
45
+ pwrec = pwd.getpwuid(uid)
46
+ return pwrec[3]
47
+
48
+
49
+ ##
50
+
51
+
52
+ @dc.dataclass(frozen=True)
53
+ class User:
54
+ name: str
55
+ uid: int
56
+ gid: int
57
+
58
+
59
+ def get_user(name: str) -> User:
60
+ return User(
61
+ name=name,
62
+ uid=(uid := name_to_uid(name)),
63
+ gid=gid_for_uid(uid),
64
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ominfra
3
- Version: 0.0.0.dev126
3
+ Version: 0.0.0.dev127
4
4
  Summary: ominfra
5
5
  Author: wrmsr
6
6
  License: BSD-3-Clause
@@ -12,8 +12,8 @@ Classifier: Operating System :: OS Independent
12
12
  Classifier: Operating System :: POSIX
13
13
  Requires-Python: >=3.12
14
14
  License-File: LICENSE
15
- Requires-Dist: omdev==0.0.0.dev126
16
- Requires-Dist: omlish==0.0.0.dev126
15
+ Requires-Dist: omdev==0.0.0.dev127
16
+ Requires-Dist: omlish==0.0.0.dev127
17
17
  Provides-Extra: all
18
18
  Requires-Dist: paramiko~=3.5; extra == "all"
19
19
  Requires-Dist: asyncssh~=2.18; extra == "all"