ominfra 0.0.0.dev126__py3-none-any.whl → 0.0.0.dev128__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (44) hide show
  1. ominfra/clouds/aws/auth.py +1 -1
  2. ominfra/deploy/_executor.py +1 -1
  3. ominfra/deploy/poly/_main.py +1 -1
  4. ominfra/pyremote/_runcommands.py +1 -1
  5. ominfra/scripts/journald2aws.py +2 -2
  6. ominfra/scripts/supervisor.py +4736 -4166
  7. ominfra/supervisor/configs.py +34 -11
  8. ominfra/supervisor/context.py +7 -345
  9. ominfra/supervisor/dispatchers.py +21 -324
  10. ominfra/supervisor/dispatchersimpl.py +343 -0
  11. ominfra/supervisor/groups.py +33 -111
  12. ominfra/supervisor/groupsimpl.py +86 -0
  13. ominfra/supervisor/inject.py +45 -20
  14. ominfra/supervisor/main.py +3 -3
  15. ominfra/supervisor/pipes.py +85 -0
  16. ominfra/supervisor/poller.py +42 -38
  17. ominfra/supervisor/privileges.py +65 -0
  18. ominfra/supervisor/process.py +6 -742
  19. ominfra/supervisor/processimpl.py +516 -0
  20. ominfra/supervisor/setup.py +38 -0
  21. ominfra/supervisor/setupimpl.py +262 -0
  22. ominfra/supervisor/spawning.py +32 -0
  23. ominfra/supervisor/spawningimpl.py +350 -0
  24. ominfra/supervisor/supervisor.py +67 -84
  25. ominfra/supervisor/types.py +101 -47
  26. ominfra/supervisor/utils/__init__.py +0 -0
  27. ominfra/supervisor/utils/collections.py +52 -0
  28. ominfra/supervisor/utils/diag.py +31 -0
  29. ominfra/supervisor/utils/fds.py +46 -0
  30. ominfra/supervisor/utils/fs.py +47 -0
  31. ominfra/supervisor/utils/os.py +45 -0
  32. ominfra/supervisor/utils/ostypes.py +9 -0
  33. ominfra/supervisor/utils/signals.py +60 -0
  34. ominfra/supervisor/utils/strings.py +105 -0
  35. ominfra/supervisor/utils/users.py +67 -0
  36. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/METADATA +3 -3
  37. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/RECORD +41 -25
  38. ominfra/supervisor/datatypes.py +0 -175
  39. ominfra/supervisor/signals.py +0 -52
  40. ominfra/supervisor/utils.py +0 -206
  41. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/LICENSE +0 -0
  42. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/WHEEL +0 -0
  43. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/entry_points.txt +0 -0
  44. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/top_level.txt +0 -0
@@ -3,30 +3,40 @@ import signal
3
3
  import time
4
4
  import typing as ta
5
5
 
6
- from omlish.lite.cached import cached_nullary
7
6
  from omlish.lite.check import check_isinstance
8
7
  from omlish.lite.check import check_not_none
9
8
  from omlish.lite.logs import log
10
- from omlish.lite.typing import Func
9
+ from omlish.lite.typing import Func1
11
10
 
12
11
  from .configs import ProcessGroupConfig
13
12
  from .context import ServerContextImpl
13
+ from .dispatchers import Dispatchers
14
14
  from .events import TICK_EVENTS
15
15
  from .events import EventCallbacks
16
16
  from .events import SupervisorRunningEvent
17
17
  from .events import SupervisorStoppingEvent
18
18
  from .groups import ProcessGroup
19
- from .groups import ProcessGroups
19
+ from .groups import ProcessGroupManager
20
20
  from .poller import Poller
21
- from .signals import SignalReceiver
22
- from .signals import sig_name
21
+ from .process import PidHistory
22
+ from .setup import SupervisorSetup
23
23
  from .states import SupervisorState
24
- from .types import Dispatcher
24
+ from .types import OutputDispatcher
25
25
  from .types import Process
26
- from .utils import ExitNow
27
- from .utils import as_string
28
- from .utils import decode_wait_status
29
- from .utils import timeslice
26
+ from .utils.os import decode_wait_status
27
+ from .utils.signals import SignalReceiver
28
+ from .utils.signals import sig_name
29
+
30
+
31
+ ##
32
+
33
+
34
+ class ExitNow(Exception): # noqa
35
+ pass
36
+
37
+
38
+ def timeslice(period: int, when: float) -> int:
39
+ return int(when - (when % period))
30
40
 
31
41
 
32
42
  ##
@@ -38,7 +48,7 @@ class SignalHandler:
38
48
  *,
39
49
  context: ServerContextImpl,
40
50
  signal_receiver: SignalReceiver,
41
- process_groups: ProcessGroups,
51
+ process_groups: ProcessGroupManager,
42
52
  ) -> None:
43
53
  super().__init__()
44
54
 
@@ -78,8 +88,10 @@ class SignalHandler:
78
88
  elif sig == signal.SIGUSR2:
79
89
  log.info('received %s indicating log reopen request', sig_name(sig))
80
90
 
81
- for group in self._process_groups:
82
- group.reopen_logs()
91
+ for p in self._process_groups.all_processes():
92
+ for d in p.get_dispatchers():
93
+ if isinstance(d, OutputDispatcher):
94
+ d.reopen_logs()
83
95
 
84
96
  else:
85
97
  log.debug('received %s indicating nothing', sig_name(sig))
@@ -88,7 +100,8 @@ class SignalHandler:
88
100
  ##
89
101
 
90
102
 
91
- ProcessGroupFactory = ta.NewType('ProcessGroupFactory', Func[ProcessGroup]) # (config: ProcessGroupConfig)
103
+ class ProcessGroupFactory(Func1[ProcessGroupConfig, ProcessGroup]):
104
+ pass
92
105
 
93
106
 
94
107
  class Supervisor:
@@ -97,10 +110,12 @@ class Supervisor:
97
110
  *,
98
111
  context: ServerContextImpl,
99
112
  poller: Poller,
100
- process_groups: ProcessGroups,
113
+ process_groups: ProcessGroupManager,
101
114
  signal_handler: SignalHandler,
102
115
  event_callbacks: EventCallbacks,
103
116
  process_group_factory: ProcessGroupFactory,
117
+ pid_history: PidHistory,
118
+ setup: SupervisorSetup,
104
119
  ) -> None:
105
120
  super().__init__()
106
121
 
@@ -110,6 +125,8 @@ class Supervisor:
110
125
  self._signal_handler = signal_handler
111
126
  self._event_callbacks = event_callbacks
112
127
  self._process_group_factory = process_group_factory
128
+ self._pid_history = pid_history
129
+ self._setup = setup
113
130
 
114
131
  self._ticks: ta.Dict[int, float] = {}
115
132
  self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
@@ -127,31 +144,13 @@ class Supervisor:
127
144
 
128
145
  #
129
146
 
130
- class DiffToActive(ta.NamedTuple):
131
- added: ta.List[ProcessGroupConfig]
132
- changed: ta.List[ProcessGroupConfig]
133
- removed: ta.List[ProcessGroupConfig]
134
-
135
- def diff_to_active(self) -> DiffToActive:
136
- new = self._context.config.groups or []
137
- cur = [group.config for group in self._process_groups]
138
-
139
- curdict = dict(zip([cfg.name for cfg in cur], cur))
140
- newdict = dict(zip([cfg.name for cfg in new], new))
141
-
142
- added = [cand for cand in new if cand.name not in curdict]
143
- removed = [cand for cand in cur if cand.name not in newdict]
144
-
145
- changed = [cand for cand in new if cand != curdict.get(cand.name, cand)]
146
-
147
- return Supervisor.DiffToActive(added, changed, removed)
148
-
149
147
  def add_process_group(self, config: ProcessGroupConfig) -> bool:
150
148
  if self._process_groups.get(config.name) is not None:
151
149
  return False
152
150
 
153
151
  group = check_isinstance(self._process_group_factory(config), ProcessGroup)
154
- group.after_setuid()
152
+ for process in group:
153
+ process.after_setuid()
155
154
 
156
155
  self._process_groups.add(group)
157
156
 
@@ -165,11 +164,7 @@ class Supervisor:
165
164
 
166
165
  return True
167
166
 
168
- def get_process_map(self) -> ta.Dict[int, Dispatcher]:
169
- process_map: ta.Dict[int, Dispatcher] = {}
170
- for group in self._process_groups:
171
- process_map.update(group.get_dispatchers())
172
- return process_map
167
+ #
173
168
 
174
169
  def shutdown_report(self) -> ta.List[Process]:
175
170
  unstopped: ta.List[Process] = []
@@ -181,7 +176,7 @@ class Supervisor:
181
176
  # throttle 'waiting for x to die' reports
182
177
  now = time.time()
183
178
  if now > (self._last_shutdown_report + 3): # every 3 secs
184
- names = [as_string(p.config.name) for p in unstopped]
179
+ names = [p.config.name for p in unstopped]
185
180
  namestr = ', '.join(names)
186
181
  log.info('waiting for %s to die', namestr)
187
182
  self._last_shutdown_report = now
@@ -192,25 +187,12 @@ class Supervisor:
192
187
 
193
188
  #
194
189
 
195
- def main(self) -> None:
196
- self.setup()
197
- self.run()
198
-
199
- @cached_nullary
200
- def setup(self) -> None:
201
- if not self._context.first:
202
- # prevent crash on libdispatch-based systems, at least for the first request
203
- self._context.cleanup_fds()
204
-
205
- self._context.set_uid_or_exit()
206
-
207
- if self._context.first:
208
- self._context.set_rlimits_or_exit()
209
-
210
- # this sets the options.logger object delay logger instantiation until after setuid
211
- if not self._context.config.nocleanup:
212
- # clean up old automatic logs
213
- self._context.clear_auto_child_logdir()
190
+ def main(self, **kwargs: ta.Any) -> None:
191
+ self._setup.setup()
192
+ try:
193
+ self.run(**kwargs)
194
+ finally:
195
+ self._setup.cleanup()
214
196
 
215
197
  def run(
216
198
  self,
@@ -228,12 +210,6 @@ class Supervisor:
228
210
 
229
211
  self._signal_handler.set_signals()
230
212
 
231
- if not self._context.config.nodaemon and self._context.first:
232
- self._context.daemonize()
233
-
234
- # writing pid file needs to come *after* daemonizing or pid will be wrong
235
- self._context.write_pidfile()
236
-
237
213
  self._event_callbacks.notify(SupervisorRunningEvent())
238
214
 
239
215
  while True:
@@ -243,7 +219,7 @@ class Supervisor:
243
219
  self._run_once()
244
220
 
245
221
  finally:
246
- self._context.cleanup()
222
+ self._poller.close()
247
223
 
248
224
  #
249
225
 
@@ -272,18 +248,24 @@ class Supervisor:
272
248
  # down, so push it back on to the end of the stop group queue
273
249
  self._stop_groups.append(group)
274
250
 
251
+ def get_dispatchers(self) -> Dispatchers:
252
+ return Dispatchers(
253
+ d
254
+ for p in self._process_groups.all_processes()
255
+ for d in p.get_dispatchers()
256
+ )
257
+
275
258
  def _poll(self) -> None:
276
- combined_map = {}
277
- combined_map.update(self.get_process_map())
259
+ dispatchers = self.get_dispatchers()
278
260
 
279
- pgroups = list(self._process_groups)
280
- pgroups.sort()
261
+ sorted_groups = list(self._process_groups)
262
+ sorted_groups.sort()
281
263
 
282
264
  if self._context.state < SupervisorState.RUNNING:
283
265
  if not self._stopping:
284
266
  # first time, set the stopping flag, do a notification and set stop_groups
285
267
  self._stopping = True
286
- self._stop_groups = pgroups[:]
268
+ self._stop_groups = sorted_groups[:]
287
269
  self._event_callbacks.notify(SupervisorStoppingEvent())
288
270
 
289
271
  self._ordered_stop_groups_phase_1()
@@ -292,7 +274,7 @@ class Supervisor:
292
274
  # if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
293
275
  raise ExitNow
294
276
 
295
- for fd, dispatcher in combined_map.items():
277
+ for fd, dispatcher in dispatchers.items():
296
278
  if dispatcher.readable():
297
279
  self._poller.register_readable(fd)
298
280
  if dispatcher.writable():
@@ -302,9 +284,9 @@ class Supervisor:
302
284
  r, w = self._poller.poll(timeout)
303
285
 
304
286
  for fd in r:
305
- if fd in combined_map:
287
+ if fd in dispatchers:
306
288
  try:
307
- dispatcher = combined_map[fd]
289
+ dispatcher = dispatchers[fd]
308
290
  log.debug('read event caused by %r', dispatcher)
309
291
  dispatcher.handle_read_event()
310
292
  if not dispatcher.readable():
@@ -312,9 +294,9 @@ class Supervisor:
312
294
  except ExitNow:
313
295
  raise
314
296
  except Exception: # noqa
315
- combined_map[fd].handle_error()
297
+ dispatchers[fd].handle_error()
316
298
  else:
317
- # if the fd is not in combined_map, we should unregister it. otherwise, it will be polled every
299
+ # if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
318
300
  # time, which may cause 100% cpu usage
319
301
  log.debug('unexpected read event from fd %r', fd)
320
302
  try:
@@ -323,9 +305,9 @@ class Supervisor:
323
305
  pass
324
306
 
325
307
  for fd in w:
326
- if fd in combined_map:
308
+ if fd in dispatchers:
327
309
  try:
328
- dispatcher = combined_map[fd]
310
+ dispatcher = dispatchers[fd]
329
311
  log.debug('write event caused by %r', dispatcher)
330
312
  dispatcher.handle_write_event()
331
313
  if not dispatcher.writable():
@@ -333,7 +315,7 @@ class Supervisor:
333
315
  except ExitNow:
334
316
  raise
335
317
  except Exception: # noqa
336
- combined_map[fd].handle_error()
318
+ dispatchers[fd].handle_error()
337
319
  else:
338
320
  log.debug('unexpected write event from fd %r', fd)
339
321
  try:
@@ -341,8 +323,9 @@ class Supervisor:
341
323
  except Exception: # noqa
342
324
  pass
343
325
 
344
- for group in pgroups:
345
- group.transition()
326
+ for group in sorted_groups:
327
+ for process in group:
328
+ process.transition()
346
329
 
347
330
  def _reap(self, *, once: bool = False, depth: int = 0) -> None:
348
331
  if depth >= 100:
@@ -352,13 +335,13 @@ class Supervisor:
352
335
  if not pid:
353
336
  return
354
337
 
355
- process = self._context.pid_history.get(pid, None)
338
+ process = self._pid_history.get(pid, None)
356
339
  if process is None:
357
340
  _, msg = decode_wait_status(check_not_none(sts))
358
341
  log.info('reaped unknown pid %s (%s)', pid, msg)
359
342
  else:
360
343
  process.finish(check_not_none(sts))
361
- del self._context.pid_history[pid]
344
+ del self._pid_history[pid]
362
345
 
363
346
  if not once:
364
347
  # keep reaping until no more kids to reap, but don't recurse infinitely
@@ -8,6 +8,40 @@ from .configs import ProcessGroupConfig
8
8
  from .configs import ServerConfig
9
9
  from .states import ProcessState
10
10
  from .states import SupervisorState
11
+ from .utils.collections import KeyedCollectionAccessors
12
+ from .utils.ostypes import Fd
13
+ from .utils.ostypes import Pid
14
+ from .utils.ostypes import Rc
15
+
16
+
17
+ if ta.TYPE_CHECKING:
18
+ from .dispatchers import Dispatchers
19
+
20
+
21
+ ##
22
+
23
+
24
+ ServerEpoch = ta.NewType('ServerEpoch', int)
25
+
26
+
27
+ ##
28
+
29
+
30
+ @functools.total_ordering
31
+ class ConfigPriorityOrdered(abc.ABC):
32
+ @property
33
+ @abc.abstractmethod
34
+ def config(self) -> ta.Any:
35
+ raise NotImplementedError
36
+
37
+ def __lt__(self, other):
38
+ return self.config.priority < other.config.priority
39
+
40
+ def __eq__(self, other):
41
+ return self.config.priority == other.config.priority
42
+
43
+
44
+ ##
11
45
 
12
46
 
13
47
  class ServerContext(abc.ABC):
@@ -25,13 +59,43 @@ class ServerContext(abc.ABC):
25
59
  def set_state(self, state: SupervisorState) -> None:
26
60
  raise NotImplementedError
27
61
 
62
+
63
+ ##
64
+
65
+
66
+ class Dispatcher(abc.ABC):
67
+ @property
68
+ @abc.abstractmethod
69
+ def process(self) -> 'Process':
70
+ raise NotImplementedError
71
+
28
72
  @property
29
73
  @abc.abstractmethod
30
- def pid_history(self) -> ta.Dict[int, 'Process']:
74
+ def channel(self) -> str:
31
75
  raise NotImplementedError
32
76
 
77
+ @property
78
+ @abc.abstractmethod
79
+ def fd(self) -> Fd:
80
+ raise NotImplementedError
81
+
82
+ @property
83
+ @abc.abstractmethod
84
+ def closed(self) -> bool:
85
+ raise NotImplementedError
86
+
87
+ #
88
+
89
+ @abc.abstractmethod
90
+ def close(self) -> None:
91
+ raise NotImplementedError
92
+
93
+ @abc.abstractmethod
94
+ def handle_error(self) -> None:
95
+ raise NotImplementedError
96
+
97
+ #
33
98
 
34
- class Dispatcher(abc.ABC):
35
99
  @abc.abstractmethod
36
100
  def readable(self) -> bool:
37
101
  raise NotImplementedError
@@ -40,26 +104,25 @@ class Dispatcher(abc.ABC):
40
104
  def writable(self) -> bool:
41
105
  raise NotImplementedError
42
106
 
107
+ #
108
+
43
109
  def handle_read_event(self) -> None:
44
110
  raise TypeError
45
111
 
46
112
  def handle_write_event(self) -> None:
47
113
  raise TypeError
48
114
 
115
+
116
+ class OutputDispatcher(Dispatcher, abc.ABC):
49
117
  @abc.abstractmethod
50
- def handle_error(self) -> None:
118
+ def remove_logs(self) -> None:
51
119
  raise NotImplementedError
52
120
 
53
- @property
54
121
  @abc.abstractmethod
55
- def closed(self) -> bool:
122
+ def reopen_logs(self) -> None:
56
123
  raise NotImplementedError
57
124
 
58
125
 
59
- class OutputDispatcher(Dispatcher, abc.ABC):
60
- pass
61
-
62
-
63
126
  class InputDispatcher(Dispatcher, abc.ABC):
64
127
  @abc.abstractmethod
65
128
  def write(self, chars: ta.Union[bytes, str]) -> None:
@@ -70,11 +133,13 @@ class InputDispatcher(Dispatcher, abc.ABC):
70
133
  raise NotImplementedError
71
134
 
72
135
 
73
- @functools.total_ordering
74
- class Process(abc.ABC):
136
+ ##
137
+
138
+
139
+ class Process(ConfigPriorityOrdered, abc.ABC):
75
140
  @property
76
141
  @abc.abstractmethod
77
- def pid(self) -> int:
142
+ def name(self) -> str:
78
143
  raise NotImplementedError
79
144
 
80
145
  @property
@@ -82,27 +147,25 @@ class Process(abc.ABC):
82
147
  def config(self) -> ProcessConfig:
83
148
  raise NotImplementedError
84
149
 
85
- def __lt__(self, other):
86
- return self.config.priority < other.config.priority
87
-
88
- def __eq__(self, other):
89
- return self.config.priority == other.config.priority
90
-
91
150
  @property
92
151
  @abc.abstractmethod
93
- def context(self) -> ServerContext:
152
+ def group(self) -> 'ProcessGroup':
94
153
  raise NotImplementedError
95
154
 
155
+ @property
96
156
  @abc.abstractmethod
97
- def finish(self, sts: int) -> None:
157
+ def pid(self) -> Pid:
98
158
  raise NotImplementedError
99
159
 
160
+ #
161
+
162
+ @property
100
163
  @abc.abstractmethod
101
- def remove_logs(self) -> None:
164
+ def context(self) -> ServerContext:
102
165
  raise NotImplementedError
103
166
 
104
167
  @abc.abstractmethod
105
- def reopen_logs(self) -> None:
168
+ def finish(self, sts: Rc) -> None:
106
169
  raise NotImplementedError
107
170
 
108
171
  @abc.abstractmethod
@@ -122,50 +185,41 @@ class Process(abc.ABC):
122
185
  raise NotImplementedError
123
186
 
124
187
  @abc.abstractmethod
125
- def create_auto_child_logs(self) -> None:
126
- raise NotImplementedError
127
-
128
- @abc.abstractmethod
129
- def get_dispatchers(self) -> ta.Mapping[int, Dispatcher]:
188
+ def after_setuid(self) -> None:
130
189
  raise NotImplementedError
131
190
 
132
-
133
- @functools.total_ordering
134
- class ProcessGroup(abc.ABC):
135
- @property
136
191
  @abc.abstractmethod
137
- def config(self) -> ProcessGroupConfig:
192
+ def get_dispatchers(self) -> 'Dispatchers':
138
193
  raise NotImplementedError
139
194
 
140
- def __lt__(self, other):
141
- return self.config.priority < other.config.priority
142
-
143
- def __eq__(self, other):
144
- return self.config.priority == other.config.priority
145
195
 
146
- @abc.abstractmethod
147
- def transition(self) -> None:
148
- raise NotImplementedError
196
+ ##
149
197
 
150
- @abc.abstractmethod
151
- def stop_all(self) -> None:
152
- raise NotImplementedError
153
198
 
199
+ class ProcessGroup(
200
+ ConfigPriorityOrdered,
201
+ KeyedCollectionAccessors[str, Process],
202
+ abc.ABC,
203
+ ):
154
204
  @property
155
205
  @abc.abstractmethod
156
206
  def name(self) -> str:
157
207
  raise NotImplementedError
158
208
 
209
+ @property
159
210
  @abc.abstractmethod
160
- def before_remove(self) -> None:
211
+ def config(self) -> ProcessGroupConfig:
161
212
  raise NotImplementedError
162
213
 
214
+ @property
163
215
  @abc.abstractmethod
164
- def get_dispatchers(self) -> ta.Mapping[int, Dispatcher]:
216
+ def by_name(self) -> ta.Mapping[str, Process]:
165
217
  raise NotImplementedError
166
218
 
219
+ #
220
+
167
221
  @abc.abstractmethod
168
- def reopen_logs(self) -> None:
222
+ def stop_all(self) -> None:
169
223
  raise NotImplementedError
170
224
 
171
225
  @abc.abstractmethod
@@ -173,5 +227,5 @@ class ProcessGroup(abc.ABC):
173
227
  raise NotImplementedError
174
228
 
175
229
  @abc.abstractmethod
176
- def after_setuid(self) -> None:
230
+ def before_remove(self) -> None:
177
231
  raise NotImplementedError
File without changes
@@ -0,0 +1,52 @@
1
+ # ruff: noqa: UP006 UP007
2
+ import abc
3
+ import typing as ta
4
+
5
+
6
+ K = ta.TypeVar('K')
7
+ V = ta.TypeVar('V')
8
+
9
+
10
+ class KeyedCollectionAccessors(abc.ABC, ta.Generic[K, V]):
11
+ @property
12
+ @abc.abstractmethod
13
+ def _by_key(self) -> ta.Mapping[K, V]:
14
+ raise NotImplementedError
15
+
16
+ def __iter__(self) -> ta.Iterator[V]:
17
+ return iter(self._by_key.values())
18
+
19
+ def __len__(self) -> int:
20
+ return len(self._by_key)
21
+
22
+ def __contains__(self, key: K) -> bool:
23
+ return key in self._by_key
24
+
25
+ def __getitem__(self, key: K) -> V:
26
+ return self._by_key[key]
27
+
28
+ def get(self, key: K, default: ta.Optional[V] = None) -> ta.Optional[V]:
29
+ return self._by_key.get(key, default)
30
+
31
+ def items(self) -> ta.Iterator[ta.Tuple[K, V]]:
32
+ return iter(self._by_key.items())
33
+
34
+
35
+ class KeyedCollection(KeyedCollectionAccessors[K, V]):
36
+ def __init__(self, items: ta.Iterable[V]) -> None:
37
+ super().__init__()
38
+
39
+ by_key: ta.Dict[K, V] = {}
40
+ for v in items:
41
+ if (k := self._key(v)) in by_key:
42
+ raise KeyError(f'key {k} of {v} already registered by {by_key[k]}')
43
+ by_key[k] = v
44
+ self.__by_key = by_key
45
+
46
+ @property
47
+ def _by_key(self) -> ta.Mapping[K, V]:
48
+ return self.__by_key
49
+
50
+ @abc.abstractmethod
51
+ def _key(self, v: V) -> K:
52
+ raise NotImplementedError
@@ -0,0 +1,31 @@
1
+ # ruff: noqa: UP006 UP007
2
+ import sys
3
+ import types
4
+ import typing as ta
5
+
6
+
7
+ def compact_traceback() -> ta.Tuple[
8
+ ta.Tuple[str, str, int],
9
+ ta.Type[BaseException],
10
+ BaseException,
11
+ types.TracebackType,
12
+ ]:
13
+ t, v, tb = sys.exc_info()
14
+ if not tb:
15
+ raise RuntimeError('No traceback')
16
+
17
+ tbinfo = []
18
+ while tb:
19
+ tbinfo.append((
20
+ tb.tb_frame.f_code.co_filename,
21
+ tb.tb_frame.f_code.co_name,
22
+ str(tb.tb_lineno),
23
+ ))
24
+ tb = tb.tb_next
25
+
26
+ # just to be safe
27
+ del tb
28
+
29
+ file, function, line = tbinfo[-1]
30
+ info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) # noqa
31
+ return (file, function, line), t, v, info # type: ignore
@@ -0,0 +1,46 @@
1
+ # ruff: noqa: UP006 UP007
2
+ import errno
3
+ import os
4
+ import typing as ta
5
+
6
+ from .ostypes import Fd
7
+
8
+
9
+ class PipeFds(ta.NamedTuple):
10
+ r: Fd
11
+ w: Fd
12
+
13
+
14
+ def make_pipe() -> PipeFds:
15
+ return PipeFds(*os.pipe()) # type: ignore
16
+
17
+
18
+ def read_fd(fd: Fd) -> bytes:
19
+ try:
20
+ data = os.read(fd, 2 << 16) # 128K
21
+ except OSError as why:
22
+ if why.args[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR):
23
+ raise
24
+ data = b''
25
+ return data
26
+
27
+
28
+ def close_fd(fd: Fd) -> bool:
29
+ try:
30
+ os.close(fd)
31
+ except OSError:
32
+ return False
33
+ return True
34
+
35
+
36
+ def is_fd_open(fd: Fd) -> bool:
37
+ try:
38
+ n = os.dup(fd)
39
+ except OSError:
40
+ return False
41
+ os.close(n)
42
+ return True
43
+
44
+
45
+ def get_open_fds(limit: int) -> ta.FrozenSet[Fd]:
46
+ return frozenset(fd for i in range(limit) if is_fd_open(fd := Fd(i)))