ominfra 0.0.0.dev126__py3-none-any.whl → 0.0.0.dev128__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (44) hide show
  1. ominfra/clouds/aws/auth.py +1 -1
  2. ominfra/deploy/_executor.py +1 -1
  3. ominfra/deploy/poly/_main.py +1 -1
  4. ominfra/pyremote/_runcommands.py +1 -1
  5. ominfra/scripts/journald2aws.py +2 -2
  6. ominfra/scripts/supervisor.py +4736 -4166
  7. ominfra/supervisor/configs.py +34 -11
  8. ominfra/supervisor/context.py +7 -345
  9. ominfra/supervisor/dispatchers.py +21 -324
  10. ominfra/supervisor/dispatchersimpl.py +343 -0
  11. ominfra/supervisor/groups.py +33 -111
  12. ominfra/supervisor/groupsimpl.py +86 -0
  13. ominfra/supervisor/inject.py +45 -20
  14. ominfra/supervisor/main.py +3 -3
  15. ominfra/supervisor/pipes.py +85 -0
  16. ominfra/supervisor/poller.py +42 -38
  17. ominfra/supervisor/privileges.py +65 -0
  18. ominfra/supervisor/process.py +6 -742
  19. ominfra/supervisor/processimpl.py +516 -0
  20. ominfra/supervisor/setup.py +38 -0
  21. ominfra/supervisor/setupimpl.py +262 -0
  22. ominfra/supervisor/spawning.py +32 -0
  23. ominfra/supervisor/spawningimpl.py +350 -0
  24. ominfra/supervisor/supervisor.py +67 -84
  25. ominfra/supervisor/types.py +101 -47
  26. ominfra/supervisor/utils/__init__.py +0 -0
  27. ominfra/supervisor/utils/collections.py +52 -0
  28. ominfra/supervisor/utils/diag.py +31 -0
  29. ominfra/supervisor/utils/fds.py +46 -0
  30. ominfra/supervisor/utils/fs.py +47 -0
  31. ominfra/supervisor/utils/os.py +45 -0
  32. ominfra/supervisor/utils/ostypes.py +9 -0
  33. ominfra/supervisor/utils/signals.py +60 -0
  34. ominfra/supervisor/utils/strings.py +105 -0
  35. ominfra/supervisor/utils/users.py +67 -0
  36. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/METADATA +3 -3
  37. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/RECORD +41 -25
  38. ominfra/supervisor/datatypes.py +0 -175
  39. ominfra/supervisor/signals.py +0 -52
  40. ominfra/supervisor/utils.py +0 -206
  41. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/LICENSE +0 -0
  42. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/WHEEL +0 -0
  43. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/entry_points.txt +0 -0
  44. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev128.dist-info}/top_level.txt +0 -0
@@ -1,336 +1,33 @@
1
1
  # ruff: noqa: UP006 UP007
2
- import abc
3
- import errno
4
- import logging
5
- import os
6
- import typing as ta
7
-
8
- from omlish.lite.logs import log
9
-
10
- from .configs import ProcessConfig
11
- from .events import EventCallbacks
12
- from .events import ProcessCommunicationEvent
13
- from .events import ProcessLogStderrEvent
14
- from .events import ProcessLogStdoutEvent
15
2
  from .types import Dispatcher
16
- from .types import InputDispatcher
17
3
  from .types import OutputDispatcher
18
- from .types import Process
19
- from .utils import as_bytes
20
- from .utils import compact_traceback
21
- from .utils import find_prefix_at_end
22
- from .utils import read_fd
23
- from .utils import strip_escapes
24
-
25
-
26
- class BaseDispatcherImpl(Dispatcher, abc.ABC):
27
- def __init__(
28
- self,
29
- process: Process,
30
- channel: str,
31
- fd: int,
32
- *,
33
- event_callbacks: EventCallbacks,
34
- ) -> None:
35
- super().__init__()
36
-
37
- self._process = process # process which "owns" this dispatcher
38
- self._channel = channel # 'stderr' or 'stdout'
39
- self._fd = fd
40
- self._event_callbacks = event_callbacks
41
-
42
- self._closed = False # True if close() has been called
43
-
44
- def __repr__(self) -> str:
45
- return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
46
-
47
- @property
48
- def process(self) -> Process:
49
- return self._process
50
-
51
- @property
52
- def channel(self) -> str:
53
- return self._channel
54
-
55
- @property
56
- def fd(self) -> int:
57
- return self._fd
58
-
59
- @property
60
- def closed(self) -> bool:
61
- return self._closed
62
-
63
- def handle_error(self) -> None:
64
- nil, t, v, tbinfo = compact_traceback()
65
-
66
- log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
67
- self.close()
68
-
69
- def close(self) -> None:
70
- if not self._closed:
71
- log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
72
- self._closed = True
73
-
74
-
75
- class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
76
- """
77
- Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
78
-
79
- - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
80
- ProcessCommunicationEvent by calling notify_event(event).
81
- - route the output to the appropriate log handlers as specified in the config.
82
- """
83
-
84
- def __init__(
85
- self,
86
- process: Process,
87
- event_type: ta.Type[ProcessCommunicationEvent],
88
- fd: int,
89
- *,
90
- event_callbacks: EventCallbacks,
91
- ) -> None:
92
- super().__init__(
93
- process,
94
- event_type.channel,
95
- fd,
96
- event_callbacks=event_callbacks,
97
- )
98
-
99
- self._event_type = event_type
100
-
101
- self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
102
-
103
- self._init_normal_log()
104
- self._init_capture_log()
105
-
106
- self._child_log = self._normal_log
107
-
108
- self._capture_mode = False # are we capturing process event data
109
- self._output_buffer = b'' # data waiting to be logged
110
-
111
- # all code below is purely for minor speedups
112
-
113
- begin_token = self._event_type.BEGIN_TOKEN
114
- end_token = self._event_type.END_TOKEN
115
- self._begin_token_data = (begin_token, len(begin_token))
116
- self._end_token_data = (end_token, len(end_token))
117
-
118
- self._main_log_level = logging.DEBUG
119
-
120
- self._log_to_main_log = process.context.config.loglevel <= self._main_log_level
121
-
122
- config = self._process.config
123
- self._stdout_events_enabled = config.stdout.events_enabled
124
- self._stderr_events_enabled = config.stderr.events_enabled
125
-
126
- _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
127
- _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
128
- _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
4
+ from .utils.collections import KeyedCollection
5
+ from .utils.ostypes import Fd
129
6
 
130
- def _init_normal_log(self) -> None:
131
- """
132
- Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
133
- enabled.
134
- """
135
7
 
136
- config = self._process.config # noqa
137
- channel = self._channel # noqa
8
+ class Dispatchers(KeyedCollection[Fd, Dispatcher]):
9
+ def _key(self, v: Dispatcher) -> Fd:
10
+ return v.fd
138
11
 
139
- logfile = self._lc.file
140
- maxbytes = self._lc.maxbytes # noqa
141
- backups = self._lc.backups # noqa
142
- to_syslog = self._lc.syslog
12
+ #
143
13
 
144
- if logfile or to_syslog:
145
- self._normal_log = logging.getLogger(__name__)
14
+ def drain(self) -> None:
15
+ for d in self:
16
+ # note that we *must* call readable() for every dispatcher, as it may have side effects for a given
17
+ # dispatcher (eg. call handle_listener_state_change for event listener processes)
18
+ if d.readable():
19
+ d.handle_read_event()
20
+ if d.writable():
21
+ d.handle_write_event()
146
22
 
147
- # if logfile:
148
- # loggers.handle_file(
149
- # self.normal_log,
150
- # filename=logfile,
151
- # fmt='%(message)s',
152
- # rotating=bool(maxbytes), # optimization
153
- # maxbytes=maxbytes,
154
- # backups=backups,
155
- # )
156
-
157
- # if to_syslog:
158
- # loggers.handle_syslog(
159
- # self.normal_log,
160
- # fmt=config.name + ' %(message)s',
161
- # )
162
-
163
- def _init_capture_log(self) -> None:
164
- """
165
- Configure the capture log for this process. This log is used to temporarily capture output when special output
166
- is detected. Sets self.capture_log if capturing is enabled.
167
- """
168
-
169
- capture_maxbytes = self._lc.capture_maxbytes
170
- if capture_maxbytes:
171
- self._capture_log = logging.getLogger(__name__)
172
- # loggers.handle_boundIO(
173
- # self._capture_log,
174
- # fmt='%(message)s',
175
- # maxbytes=capture_maxbytes,
176
- # )
23
+ #
177
24
 
178
25
  def remove_logs(self) -> None:
179
- for l in (self._normal_log, self._capture_log):
180
- if l is not None:
181
- for handler in l.handlers:
182
- handler.remove() # type: ignore
183
- handler.reopen() # type: ignore
26
+ for d in self:
27
+ if isinstance(d, OutputDispatcher):
28
+ d.remove_logs()
184
29
 
185
30
  def reopen_logs(self) -> None:
186
- for l in (self._normal_log, self._capture_log):
187
- if l is not None:
188
- for handler in l.handlers:
189
- handler.reopen() # type: ignore
190
-
191
- def _log(self, data: ta.Union[str, bytes, None]) -> None:
192
- if not data:
193
- return
194
-
195
- if self._process.context.config.strip_ansi:
196
- data = strip_escapes(as_bytes(data))
197
-
198
- if self._child_log:
199
- self._child_log.info(data)
200
-
201
- if self._log_to_main_log:
202
- if not isinstance(data, bytes):
203
- text = data
204
- else:
205
- try:
206
- text = data.decode('utf-8')
207
- except UnicodeDecodeError:
208
- text = f'Undecodable: {data!r}'
209
- log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
210
-
211
- if self._channel == 'stdout':
212
- if self._stdout_events_enabled:
213
- self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
214
-
215
- elif self._stderr_events_enabled:
216
- self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
217
-
218
- def record_output(self) -> None:
219
- if self._capture_log is None:
220
- # shortcut trying to find capture data
221
- data = self._output_buffer
222
- self._output_buffer = b''
223
- self._log(data)
224
- return
225
-
226
- if self._capture_mode:
227
- token, token_len = self._end_token_data
228
- else:
229
- token, token_len = self._begin_token_data
230
-
231
- if len(self._output_buffer) <= token_len:
232
- return # not enough data
233
-
234
- data = self._output_buffer
235
- self._output_buffer = b''
236
-
237
- try:
238
- before, after = data.split(token, 1)
239
- except ValueError:
240
- after = None
241
- index = find_prefix_at_end(data, token)
242
- if index:
243
- self._output_buffer = self._output_buffer + data[-index:]
244
- data = data[:-index]
245
- self._log(data)
246
- else:
247
- self._log(before)
248
- self.toggle_capture_mode()
249
- self._output_buffer = after # type: ignore
250
-
251
- if after:
252
- self.record_output()
253
-
254
- def toggle_capture_mode(self) -> None:
255
- self._capture_mode = not self._capture_mode
256
-
257
- if self._capture_log is not None:
258
- if self._capture_mode:
259
- self._child_log = self._capture_log
260
- else:
261
- for handler in self._capture_log.handlers:
262
- handler.flush()
263
- data = self._capture_log.getvalue() # type: ignore
264
- channel = self._channel
265
- procname = self._process.config.name
266
- event = self._event_type(self._process, self._process.pid, data)
267
- self._event_callbacks.notify(event)
268
-
269
- log.debug('%r %s emitted a comm event', procname, channel)
270
- for handler in self._capture_log.handlers:
271
- handler.remove() # type: ignore
272
- handler.reopen() # type: ignore
273
- self._child_log = self._normal_log
274
-
275
- def writable(self) -> bool:
276
- return False
277
-
278
- def readable(self) -> bool:
279
- if self._closed:
280
- return False
281
- return True
282
-
283
- def handle_read_event(self) -> None:
284
- data = read_fd(self._fd)
285
- self._output_buffer += data
286
- self.record_output()
287
- if not data:
288
- # if we get no data back from the pipe, it means that the child process has ended. See
289
- # mail.python.org/pipermail/python-dev/2004-August/046850.html
290
- self.close()
291
-
292
-
293
- class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
294
- def __init__(
295
- self,
296
- process: Process,
297
- channel: str,
298
- fd: int,
299
- *,
300
- event_callbacks: EventCallbacks,
301
- ) -> None:
302
- super().__init__(
303
- process,
304
- channel,
305
- fd,
306
- event_callbacks=event_callbacks,
307
- )
308
-
309
- self._input_buffer = b''
310
-
311
- def write(self, chars: ta.Union[bytes, str]) -> None:
312
- self._input_buffer += as_bytes(chars)
313
-
314
- def writable(self) -> bool:
315
- if self._input_buffer and not self._closed:
316
- return True
317
- return False
318
-
319
- def readable(self) -> bool:
320
- return False
321
-
322
- def flush(self) -> None:
323
- # other code depends on this raising EPIPE if the pipe is closed
324
- sent = os.write(self._fd, as_bytes(self._input_buffer))
325
- self._input_buffer = self._input_buffer[sent:]
326
-
327
- def handle_write_event(self) -> None:
328
- if self._input_buffer:
329
- try:
330
- self.flush()
331
- except OSError as why:
332
- if why.args[0] == errno.EPIPE:
333
- self._input_buffer = b''
334
- self.close()
335
- else:
336
- raise
31
+ for d in self:
32
+ if isinstance(d, OutputDispatcher):
33
+ d.reopen_logs()
@@ -0,0 +1,343 @@
1
+ # ruff: noqa: UP006 UP007
2
+ import abc
3
+ import errno
4
+ import logging
5
+ import os
6
+ import typing as ta
7
+
8
+ from omlish.lite.logs import log
9
+
10
+ from .configs import ProcessConfig
11
+ from .events import EventCallbacks
12
+ from .events import ProcessCommunicationEvent
13
+ from .events import ProcessLogStderrEvent
14
+ from .events import ProcessLogStdoutEvent
15
+ from .types import Dispatcher
16
+ from .types import InputDispatcher
17
+ from .types import OutputDispatcher
18
+ from .types import Process
19
+ from .utils.diag import compact_traceback
20
+ from .utils.fds import read_fd
21
+ from .utils.ostypes import Fd
22
+ from .utils.strings import as_bytes
23
+ from .utils.strings import find_prefix_at_end
24
+ from .utils.strings import strip_escapes
25
+
26
+
27
+ class BaseDispatcherImpl(Dispatcher, abc.ABC):
28
+ def __init__(
29
+ self,
30
+ process: Process,
31
+ channel: str,
32
+ fd: Fd,
33
+ *,
34
+ event_callbacks: EventCallbacks,
35
+ ) -> None:
36
+ super().__init__()
37
+
38
+ self._process = process # process which "owns" this dispatcher
39
+ self._channel = channel # 'stderr' or 'stdout'
40
+ self._fd = fd
41
+ self._event_callbacks = event_callbacks
42
+
43
+ self._closed = False # True if close() has been called
44
+
45
+ #
46
+
47
+ def __repr__(self) -> str:
48
+ return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
49
+
50
+ #
51
+
52
+ @property
53
+ def process(self) -> Process:
54
+ return self._process
55
+
56
+ @property
57
+ def channel(self) -> str:
58
+ return self._channel
59
+
60
+ @property
61
+ def fd(self) -> Fd:
62
+ return self._fd
63
+
64
+ @property
65
+ def closed(self) -> bool:
66
+ return self._closed
67
+
68
+ #
69
+
70
+ def close(self) -> None:
71
+ if not self._closed:
72
+ log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
73
+ self._closed = True
74
+
75
+ def handle_error(self) -> None:
76
+ nil, t, v, tbinfo = compact_traceback()
77
+
78
+ log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
79
+ self.close()
80
+
81
+
82
+ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
83
+ """
84
+ Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
85
+
86
+ - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
87
+ ProcessCommunicationEvent by calling notify_event(event).
88
+ - route the output to the appropriate log handlers as specified in the config.
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ process: Process,
94
+ event_type: ta.Type[ProcessCommunicationEvent],
95
+ fd: Fd,
96
+ *,
97
+ event_callbacks: EventCallbacks,
98
+ ) -> None:
99
+ super().__init__(
100
+ process,
101
+ event_type.channel,
102
+ fd,
103
+ event_callbacks=event_callbacks,
104
+ )
105
+
106
+ self._event_type = event_type
107
+
108
+ self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
109
+
110
+ self._init_normal_log()
111
+ self._init_capture_log()
112
+
113
+ self._child_log = self._normal_log
114
+
115
+ self._capture_mode = False # are we capturing process event data
116
+ self._output_buffer = b'' # data waiting to be logged
117
+
118
+ # all code below is purely for minor speedups
119
+
120
+ begin_token = self._event_type.BEGIN_TOKEN
121
+ end_token = self._event_type.END_TOKEN
122
+ self._begin_token_data = (begin_token, len(begin_token))
123
+ self._end_token_data = (end_token, len(end_token))
124
+
125
+ self._main_log_level = logging.DEBUG
126
+
127
+ self._log_to_main_log = process.context.config.loglevel <= self._main_log_level
128
+
129
+ config = self._process.config
130
+ self._stdout_events_enabled = config.stdout.events_enabled
131
+ self._stderr_events_enabled = config.stderr.events_enabled
132
+
133
+ _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
134
+ _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
135
+ _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
136
+
137
+ def _init_normal_log(self) -> None:
138
+ """
139
+ Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
140
+ enabled.
141
+ """
142
+
143
+ config = self._process.config # noqa
144
+ channel = self._channel # noqa
145
+
146
+ logfile = self._lc.file
147
+ maxbytes = self._lc.maxbytes # noqa
148
+ backups = self._lc.backups # noqa
149
+ to_syslog = self._lc.syslog
150
+
151
+ if logfile or to_syslog:
152
+ self._normal_log = logging.getLogger(__name__)
153
+
154
+ # if logfile:
155
+ # loggers.handle_file(
156
+ # self.normal_log,
157
+ # filename=logfile,
158
+ # fmt='%(message)s',
159
+ # rotating=bool(maxbytes), # optimization
160
+ # maxbytes=maxbytes,
161
+ # backups=backups,
162
+ # )
163
+
164
+ # if to_syslog:
165
+ # loggers.handle_syslog(
166
+ # self.normal_log,
167
+ # fmt=config.name + ' %(message)s',
168
+ # )
169
+
170
+ def _init_capture_log(self) -> None:
171
+ """
172
+ Configure the capture log for this process. This log is used to temporarily capture output when special output
173
+ is detected. Sets self.capture_log if capturing is enabled.
174
+ """
175
+
176
+ capture_maxbytes = self._lc.capture_maxbytes
177
+ if capture_maxbytes:
178
+ self._capture_log = logging.getLogger(__name__)
179
+ # loggers.handle_boundIO(
180
+ # self._capture_log,
181
+ # fmt='%(message)s',
182
+ # maxbytes=capture_maxbytes,
183
+ # )
184
+
185
+ def remove_logs(self) -> None:
186
+ for l in (self._normal_log, self._capture_log):
187
+ if l is not None:
188
+ for handler in l.handlers:
189
+ handler.remove() # type: ignore
190
+ handler.reopen() # type: ignore
191
+
192
+ def reopen_logs(self) -> None:
193
+ for l in (self._normal_log, self._capture_log):
194
+ if l is not None:
195
+ for handler in l.handlers:
196
+ handler.reopen() # type: ignore
197
+
198
+ def _log(self, data: ta.Union[str, bytes, None]) -> None:
199
+ if not data:
200
+ return
201
+
202
+ if self._process.context.config.strip_ansi:
203
+ data = strip_escapes(as_bytes(data))
204
+
205
+ if self._child_log:
206
+ self._child_log.info(data)
207
+
208
+ if self._log_to_main_log:
209
+ if not isinstance(data, bytes):
210
+ text = data
211
+ else:
212
+ try:
213
+ text = data.decode('utf-8')
214
+ except UnicodeDecodeError:
215
+ text = f'Undecodable: {data!r}'
216
+ log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
217
+
218
+ if self._channel == 'stdout':
219
+ if self._stdout_events_enabled:
220
+ self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
221
+
222
+ elif self._stderr_events_enabled:
223
+ self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
224
+
225
+ def record_output(self) -> None:
226
+ if self._capture_log is None:
227
+ # shortcut trying to find capture data
228
+ data = self._output_buffer
229
+ self._output_buffer = b''
230
+ self._log(data)
231
+ return
232
+
233
+ if self._capture_mode:
234
+ token, token_len = self._end_token_data
235
+ else:
236
+ token, token_len = self._begin_token_data
237
+
238
+ if len(self._output_buffer) <= token_len:
239
+ return # not enough data
240
+
241
+ data = self._output_buffer
242
+ self._output_buffer = b''
243
+
244
+ try:
245
+ before, after = data.split(token, 1)
246
+ except ValueError:
247
+ after = None
248
+ index = find_prefix_at_end(data, token)
249
+ if index:
250
+ self._output_buffer = self._output_buffer + data[-index:]
251
+ data = data[:-index]
252
+ self._log(data)
253
+ else:
254
+ self._log(before)
255
+ self.toggle_capture_mode()
256
+ self._output_buffer = after # type: ignore
257
+
258
+ if after:
259
+ self.record_output()
260
+
261
+ def toggle_capture_mode(self) -> None:
262
+ self._capture_mode = not self._capture_mode
263
+
264
+ if self._capture_log is not None:
265
+ if self._capture_mode:
266
+ self._child_log = self._capture_log
267
+ else:
268
+ for handler in self._capture_log.handlers:
269
+ handler.flush()
270
+ data = self._capture_log.getvalue() # type: ignore
271
+ channel = self._channel
272
+ procname = self._process.config.name
273
+ event = self._event_type(self._process, self._process.pid, data)
274
+ self._event_callbacks.notify(event)
275
+
276
+ log.debug('%r %s emitted a comm event', procname, channel)
277
+ for handler in self._capture_log.handlers:
278
+ handler.remove() # type: ignore
279
+ handler.reopen() # type: ignore
280
+ self._child_log = self._normal_log
281
+
282
+ def writable(self) -> bool:
283
+ return False
284
+
285
+ def readable(self) -> bool:
286
+ if self._closed:
287
+ return False
288
+ return True
289
+
290
+ def handle_read_event(self) -> None:
291
+ data = read_fd(self._fd)
292
+ self._output_buffer += data
293
+ self.record_output()
294
+ if not data:
295
+ # if we get no data back from the pipe, it means that the child process has ended. See
296
+ # mail.python.org/pipermail/python-dev/2004-August/046850.html
297
+ self.close()
298
+
299
+
300
+ class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
301
+ def __init__(
302
+ self,
303
+ process: Process,
304
+ channel: str,
305
+ fd: Fd,
306
+ *,
307
+ event_callbacks: EventCallbacks,
308
+ ) -> None:
309
+ super().__init__(
310
+ process,
311
+ channel,
312
+ fd,
313
+ event_callbacks=event_callbacks,
314
+ )
315
+
316
+ self._input_buffer = b''
317
+
318
+ def write(self, chars: ta.Union[bytes, str]) -> None:
319
+ self._input_buffer += as_bytes(chars)
320
+
321
+ def writable(self) -> bool:
322
+ if self._input_buffer and not self._closed:
323
+ return True
324
+ return False
325
+
326
+ def readable(self) -> bool:
327
+ return False
328
+
329
+ def flush(self) -> None:
330
+ # other code depends on this raising EPIPE if the pipe is closed
331
+ sent = os.write(self._fd, as_bytes(self._input_buffer))
332
+ self._input_buffer = self._input_buffer[sent:]
333
+
334
+ def handle_write_event(self) -> None:
335
+ if self._input_buffer:
336
+ try:
337
+ self.flush()
338
+ except OSError as why:
339
+ if why.args[0] == errno.EPIPE:
340
+ self._input_buffer = b''
341
+ self.close()
342
+ else:
343
+ raise