ominfra 0.0.0.dev126__py3-none-any.whl → 0.0.0.dev127__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (34) hide show
  1. ominfra/clouds/aws/auth.py +1 -1
  2. ominfra/deploy/_executor.py +1 -1
  3. ominfra/deploy/poly/_main.py +1 -1
  4. ominfra/pyremote/_runcommands.py +1 -1
  5. ominfra/scripts/journald2aws.py +2 -2
  6. ominfra/scripts/supervisor.py +1796 -1218
  7. ominfra/supervisor/collections.py +52 -0
  8. ominfra/supervisor/context.py +2 -336
  9. ominfra/supervisor/datatypes.py +1 -63
  10. ominfra/supervisor/dispatchers.py +20 -324
  11. ominfra/supervisor/dispatchersimpl.py +342 -0
  12. ominfra/supervisor/groups.py +33 -111
  13. ominfra/supervisor/groupsimpl.py +86 -0
  14. ominfra/supervisor/inject.py +44 -19
  15. ominfra/supervisor/main.py +1 -1
  16. ominfra/supervisor/pipes.py +83 -0
  17. ominfra/supervisor/poller.py +6 -3
  18. ominfra/supervisor/privileges.py +65 -0
  19. ominfra/supervisor/processes.py +18 -0
  20. ominfra/supervisor/{process.py → processesimpl.py} +96 -330
  21. ominfra/supervisor/setup.py +38 -0
  22. ominfra/supervisor/setupimpl.py +261 -0
  23. ominfra/supervisor/signals.py +24 -16
  24. ominfra/supervisor/spawning.py +31 -0
  25. ominfra/supervisor/spawningimpl.py +347 -0
  26. ominfra/supervisor/supervisor.py +52 -77
  27. ominfra/supervisor/types.py +101 -45
  28. ominfra/supervisor/users.py +64 -0
  29. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/METADATA +3 -3
  30. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/RECORD +34 -23
  31. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/LICENSE +0 -0
  32. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/WHEEL +0 -0
  33. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/entry_points.txt +0 -0
  34. {ominfra-0.0.0.dev126.dist-info → ominfra-0.0.0.dev127.dist-info}/top_level.txt +0 -0
@@ -1,336 +1,32 @@
1
1
  # ruff: noqa: UP006 UP007
2
- import abc
3
- import errno
4
- import logging
5
- import os
6
- import typing as ta
7
-
8
- from omlish.lite.logs import log
9
-
10
- from .configs import ProcessConfig
11
- from .events import EventCallbacks
12
- from .events import ProcessCommunicationEvent
13
- from .events import ProcessLogStderrEvent
14
- from .events import ProcessLogStdoutEvent
2
+ from .collections import KeyedCollection
15
3
  from .types import Dispatcher
16
- from .types import InputDispatcher
17
4
  from .types import OutputDispatcher
18
- from .types import Process
19
- from .utils import as_bytes
20
- from .utils import compact_traceback
21
- from .utils import find_prefix_at_end
22
- from .utils import read_fd
23
- from .utils import strip_escapes
24
-
25
-
26
- class BaseDispatcherImpl(Dispatcher, abc.ABC):
27
- def __init__(
28
- self,
29
- process: Process,
30
- channel: str,
31
- fd: int,
32
- *,
33
- event_callbacks: EventCallbacks,
34
- ) -> None:
35
- super().__init__()
36
-
37
- self._process = process # process which "owns" this dispatcher
38
- self._channel = channel # 'stderr' or 'stdout'
39
- self._fd = fd
40
- self._event_callbacks = event_callbacks
41
-
42
- self._closed = False # True if close() has been called
43
-
44
- def __repr__(self) -> str:
45
- return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
46
-
47
- @property
48
- def process(self) -> Process:
49
- return self._process
50
-
51
- @property
52
- def channel(self) -> str:
53
- return self._channel
54
-
55
- @property
56
- def fd(self) -> int:
57
- return self._fd
58
-
59
- @property
60
- def closed(self) -> bool:
61
- return self._closed
62
-
63
- def handle_error(self) -> None:
64
- nil, t, v, tbinfo = compact_traceback()
65
-
66
- log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
67
- self.close()
68
-
69
- def close(self) -> None:
70
- if not self._closed:
71
- log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
72
- self._closed = True
73
-
74
-
75
- class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
76
- """
77
- Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
78
-
79
- - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
80
- ProcessCommunicationEvent by calling notify_event(event).
81
- - route the output to the appropriate log handlers as specified in the config.
82
- """
83
-
84
- def __init__(
85
- self,
86
- process: Process,
87
- event_type: ta.Type[ProcessCommunicationEvent],
88
- fd: int,
89
- *,
90
- event_callbacks: EventCallbacks,
91
- ) -> None:
92
- super().__init__(
93
- process,
94
- event_type.channel,
95
- fd,
96
- event_callbacks=event_callbacks,
97
- )
98
-
99
- self._event_type = event_type
100
-
101
- self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
102
-
103
- self._init_normal_log()
104
- self._init_capture_log()
105
-
106
- self._child_log = self._normal_log
107
-
108
- self._capture_mode = False # are we capturing process event data
109
- self._output_buffer = b'' # data waiting to be logged
110
-
111
- # all code below is purely for minor speedups
112
-
113
- begin_token = self._event_type.BEGIN_TOKEN
114
- end_token = self._event_type.END_TOKEN
115
- self._begin_token_data = (begin_token, len(begin_token))
116
- self._end_token_data = (end_token, len(end_token))
117
-
118
- self._main_log_level = logging.DEBUG
119
-
120
- self._log_to_main_log = process.context.config.loglevel <= self._main_log_level
121
-
122
- config = self._process.config
123
- self._stdout_events_enabled = config.stdout.events_enabled
124
- self._stderr_events_enabled = config.stderr.events_enabled
125
-
126
- _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
127
- _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
128
- _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
129
5
 
130
- def _init_normal_log(self) -> None:
131
- """
132
- Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
133
- enabled.
134
- """
135
6
 
136
- config = self._process.config # noqa
137
- channel = self._channel # noqa
7
+ class Dispatchers(KeyedCollection[int, Dispatcher]):
8
+ def _key(self, v: Dispatcher) -> int:
9
+ return v.fd
138
10
 
139
- logfile = self._lc.file
140
- maxbytes = self._lc.maxbytes # noqa
141
- backups = self._lc.backups # noqa
142
- to_syslog = self._lc.syslog
11
+ #
143
12
 
144
- if logfile or to_syslog:
145
- self._normal_log = logging.getLogger(__name__)
13
+ def drain(self) -> None:
14
+ for d in self:
15
+ # note that we *must* call readable() for every dispatcher, as it may have side effects for a given
16
+ # dispatcher (eg. call handle_listener_state_change for event listener processes)
17
+ if d.readable():
18
+ d.handle_read_event()
19
+ if d.writable():
20
+ d.handle_write_event()
146
21
 
147
- # if logfile:
148
- # loggers.handle_file(
149
- # self.normal_log,
150
- # filename=logfile,
151
- # fmt='%(message)s',
152
- # rotating=bool(maxbytes), # optimization
153
- # maxbytes=maxbytes,
154
- # backups=backups,
155
- # )
156
-
157
- # if to_syslog:
158
- # loggers.handle_syslog(
159
- # self.normal_log,
160
- # fmt=config.name + ' %(message)s',
161
- # )
162
-
163
- def _init_capture_log(self) -> None:
164
- """
165
- Configure the capture log for this process. This log is used to temporarily capture output when special output
166
- is detected. Sets self.capture_log if capturing is enabled.
167
- """
168
-
169
- capture_maxbytes = self._lc.capture_maxbytes
170
- if capture_maxbytes:
171
- self._capture_log = logging.getLogger(__name__)
172
- # loggers.handle_boundIO(
173
- # self._capture_log,
174
- # fmt='%(message)s',
175
- # maxbytes=capture_maxbytes,
176
- # )
22
+ #
177
23
 
178
24
  def remove_logs(self) -> None:
179
- for l in (self._normal_log, self._capture_log):
180
- if l is not None:
181
- for handler in l.handlers:
182
- handler.remove() # type: ignore
183
- handler.reopen() # type: ignore
25
+ for d in self:
26
+ if isinstance(d, OutputDispatcher):
27
+ d.remove_logs()
184
28
 
185
29
  def reopen_logs(self) -> None:
186
- for l in (self._normal_log, self._capture_log):
187
- if l is not None:
188
- for handler in l.handlers:
189
- handler.reopen() # type: ignore
190
-
191
- def _log(self, data: ta.Union[str, bytes, None]) -> None:
192
- if not data:
193
- return
194
-
195
- if self._process.context.config.strip_ansi:
196
- data = strip_escapes(as_bytes(data))
197
-
198
- if self._child_log:
199
- self._child_log.info(data)
200
-
201
- if self._log_to_main_log:
202
- if not isinstance(data, bytes):
203
- text = data
204
- else:
205
- try:
206
- text = data.decode('utf-8')
207
- except UnicodeDecodeError:
208
- text = f'Undecodable: {data!r}'
209
- log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
210
-
211
- if self._channel == 'stdout':
212
- if self._stdout_events_enabled:
213
- self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
214
-
215
- elif self._stderr_events_enabled:
216
- self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
217
-
218
- def record_output(self) -> None:
219
- if self._capture_log is None:
220
- # shortcut trying to find capture data
221
- data = self._output_buffer
222
- self._output_buffer = b''
223
- self._log(data)
224
- return
225
-
226
- if self._capture_mode:
227
- token, token_len = self._end_token_data
228
- else:
229
- token, token_len = self._begin_token_data
230
-
231
- if len(self._output_buffer) <= token_len:
232
- return # not enough data
233
-
234
- data = self._output_buffer
235
- self._output_buffer = b''
236
-
237
- try:
238
- before, after = data.split(token, 1)
239
- except ValueError:
240
- after = None
241
- index = find_prefix_at_end(data, token)
242
- if index:
243
- self._output_buffer = self._output_buffer + data[-index:]
244
- data = data[:-index]
245
- self._log(data)
246
- else:
247
- self._log(before)
248
- self.toggle_capture_mode()
249
- self._output_buffer = after # type: ignore
250
-
251
- if after:
252
- self.record_output()
253
-
254
- def toggle_capture_mode(self) -> None:
255
- self._capture_mode = not self._capture_mode
256
-
257
- if self._capture_log is not None:
258
- if self._capture_mode:
259
- self._child_log = self._capture_log
260
- else:
261
- for handler in self._capture_log.handlers:
262
- handler.flush()
263
- data = self._capture_log.getvalue() # type: ignore
264
- channel = self._channel
265
- procname = self._process.config.name
266
- event = self._event_type(self._process, self._process.pid, data)
267
- self._event_callbacks.notify(event)
268
-
269
- log.debug('%r %s emitted a comm event', procname, channel)
270
- for handler in self._capture_log.handlers:
271
- handler.remove() # type: ignore
272
- handler.reopen() # type: ignore
273
- self._child_log = self._normal_log
274
-
275
- def writable(self) -> bool:
276
- return False
277
-
278
- def readable(self) -> bool:
279
- if self._closed:
280
- return False
281
- return True
282
-
283
- def handle_read_event(self) -> None:
284
- data = read_fd(self._fd)
285
- self._output_buffer += data
286
- self.record_output()
287
- if not data:
288
- # if we get no data back from the pipe, it means that the child process has ended. See
289
- # mail.python.org/pipermail/python-dev/2004-August/046850.html
290
- self.close()
291
-
292
-
293
- class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
294
- def __init__(
295
- self,
296
- process: Process,
297
- channel: str,
298
- fd: int,
299
- *,
300
- event_callbacks: EventCallbacks,
301
- ) -> None:
302
- super().__init__(
303
- process,
304
- channel,
305
- fd,
306
- event_callbacks=event_callbacks,
307
- )
308
-
309
- self._input_buffer = b''
310
-
311
- def write(self, chars: ta.Union[bytes, str]) -> None:
312
- self._input_buffer += as_bytes(chars)
313
-
314
- def writable(self) -> bool:
315
- if self._input_buffer and not self._closed:
316
- return True
317
- return False
318
-
319
- def readable(self) -> bool:
320
- return False
321
-
322
- def flush(self) -> None:
323
- # other code depends on this raising EPIPE if the pipe is closed
324
- sent = os.write(self._fd, as_bytes(self._input_buffer))
325
- self._input_buffer = self._input_buffer[sent:]
326
-
327
- def handle_write_event(self) -> None:
328
- if self._input_buffer:
329
- try:
330
- self.flush()
331
- except OSError as why:
332
- if why.args[0] == errno.EPIPE:
333
- self._input_buffer = b''
334
- self.close()
335
- else:
336
- raise
30
+ for d in self:
31
+ if isinstance(d, OutputDispatcher):
32
+ d.reopen_logs()
@@ -0,0 +1,342 @@
1
+ # ruff: noqa: UP006 UP007
2
+ import abc
3
+ import errno
4
+ import logging
5
+ import os
6
+ import typing as ta
7
+
8
+ from omlish.lite.logs import log
9
+
10
+ from .configs import ProcessConfig
11
+ from .events import EventCallbacks
12
+ from .events import ProcessCommunicationEvent
13
+ from .events import ProcessLogStderrEvent
14
+ from .events import ProcessLogStdoutEvent
15
+ from .types import Dispatcher
16
+ from .types import InputDispatcher
17
+ from .types import OutputDispatcher
18
+ from .types import Process
19
+ from .utils import as_bytes
20
+ from .utils import compact_traceback
21
+ from .utils import find_prefix_at_end
22
+ from .utils import read_fd
23
+ from .utils import strip_escapes
24
+
25
+
26
+ class BaseDispatcherImpl(Dispatcher, abc.ABC):
27
+ def __init__(
28
+ self,
29
+ process: Process,
30
+ channel: str,
31
+ fd: int,
32
+ *,
33
+ event_callbacks: EventCallbacks,
34
+ ) -> None:
35
+ super().__init__()
36
+
37
+ self._process = process # process which "owns" this dispatcher
38
+ self._channel = channel # 'stderr' or 'stdout'
39
+ self._fd = fd
40
+ self._event_callbacks = event_callbacks
41
+
42
+ self._closed = False # True if close() has been called
43
+
44
+ #
45
+
46
+ def __repr__(self) -> str:
47
+ return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
48
+
49
+ #
50
+
51
+ @property
52
+ def process(self) -> Process:
53
+ return self._process
54
+
55
+ @property
56
+ def channel(self) -> str:
57
+ return self._channel
58
+
59
+ @property
60
+ def fd(self) -> int:
61
+ return self._fd
62
+
63
+ @property
64
+ def closed(self) -> bool:
65
+ return self._closed
66
+
67
+ #
68
+
69
+ def close(self) -> None:
70
+ if not self._closed:
71
+ log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
72
+ self._closed = True
73
+
74
+ def handle_error(self) -> None:
75
+ nil, t, v, tbinfo = compact_traceback()
76
+
77
+ log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
78
+ self.close()
79
+
80
+
81
+ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
82
+ """
83
+ Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
84
+
85
+ - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
86
+ ProcessCommunicationEvent by calling notify_event(event).
87
+ - route the output to the appropriate log handlers as specified in the config.
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ process: Process,
93
+ event_type: ta.Type[ProcessCommunicationEvent],
94
+ fd: int,
95
+ *,
96
+ event_callbacks: EventCallbacks,
97
+ ) -> None:
98
+ super().__init__(
99
+ process,
100
+ event_type.channel,
101
+ fd,
102
+ event_callbacks=event_callbacks,
103
+ )
104
+
105
+ self._event_type = event_type
106
+
107
+ self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
108
+
109
+ self._init_normal_log()
110
+ self._init_capture_log()
111
+
112
+ self._child_log = self._normal_log
113
+
114
+ self._capture_mode = False # are we capturing process event data
115
+ self._output_buffer = b'' # data waiting to be logged
116
+
117
+ # all code below is purely for minor speedups
118
+
119
+ begin_token = self._event_type.BEGIN_TOKEN
120
+ end_token = self._event_type.END_TOKEN
121
+ self._begin_token_data = (begin_token, len(begin_token))
122
+ self._end_token_data = (end_token, len(end_token))
123
+
124
+ self._main_log_level = logging.DEBUG
125
+
126
+ self._log_to_main_log = process.context.config.loglevel <= self._main_log_level
127
+
128
+ config = self._process.config
129
+ self._stdout_events_enabled = config.stdout.events_enabled
130
+ self._stderr_events_enabled = config.stderr.events_enabled
131
+
132
+ _child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
133
+ _normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
134
+ _capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
135
+
136
+ def _init_normal_log(self) -> None:
137
+ """
138
+ Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
139
+ enabled.
140
+ """
141
+
142
+ config = self._process.config # noqa
143
+ channel = self._channel # noqa
144
+
145
+ logfile = self._lc.file
146
+ maxbytes = self._lc.maxbytes # noqa
147
+ backups = self._lc.backups # noqa
148
+ to_syslog = self._lc.syslog
149
+
150
+ if logfile or to_syslog:
151
+ self._normal_log = logging.getLogger(__name__)
152
+
153
+ # if logfile:
154
+ # loggers.handle_file(
155
+ # self.normal_log,
156
+ # filename=logfile,
157
+ # fmt='%(message)s',
158
+ # rotating=bool(maxbytes), # optimization
159
+ # maxbytes=maxbytes,
160
+ # backups=backups,
161
+ # )
162
+
163
+ # if to_syslog:
164
+ # loggers.handle_syslog(
165
+ # self.normal_log,
166
+ # fmt=config.name + ' %(message)s',
167
+ # )
168
+
169
+ def _init_capture_log(self) -> None:
170
+ """
171
+ Configure the capture log for this process. This log is used to temporarily capture output when special output
172
+ is detected. Sets self.capture_log if capturing is enabled.
173
+ """
174
+
175
+ capture_maxbytes = self._lc.capture_maxbytes
176
+ if capture_maxbytes:
177
+ self._capture_log = logging.getLogger(__name__)
178
+ # loggers.handle_boundIO(
179
+ # self._capture_log,
180
+ # fmt='%(message)s',
181
+ # maxbytes=capture_maxbytes,
182
+ # )
183
+
184
+ def remove_logs(self) -> None:
185
+ for l in (self._normal_log, self._capture_log):
186
+ if l is not None:
187
+ for handler in l.handlers:
188
+ handler.remove() # type: ignore
189
+ handler.reopen() # type: ignore
190
+
191
+ def reopen_logs(self) -> None:
192
+ for l in (self._normal_log, self._capture_log):
193
+ if l is not None:
194
+ for handler in l.handlers:
195
+ handler.reopen() # type: ignore
196
+
197
+ def _log(self, data: ta.Union[str, bytes, None]) -> None:
198
+ if not data:
199
+ return
200
+
201
+ if self._process.context.config.strip_ansi:
202
+ data = strip_escapes(as_bytes(data))
203
+
204
+ if self._child_log:
205
+ self._child_log.info(data)
206
+
207
+ if self._log_to_main_log:
208
+ if not isinstance(data, bytes):
209
+ text = data
210
+ else:
211
+ try:
212
+ text = data.decode('utf-8')
213
+ except UnicodeDecodeError:
214
+ text = f'Undecodable: {data!r}'
215
+ log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
216
+
217
+ if self._channel == 'stdout':
218
+ if self._stdout_events_enabled:
219
+ self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
220
+
221
+ elif self._stderr_events_enabled:
222
+ self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
223
+
224
+ def record_output(self) -> None:
225
+ if self._capture_log is None:
226
+ # shortcut trying to find capture data
227
+ data = self._output_buffer
228
+ self._output_buffer = b''
229
+ self._log(data)
230
+ return
231
+
232
+ if self._capture_mode:
233
+ token, token_len = self._end_token_data
234
+ else:
235
+ token, token_len = self._begin_token_data
236
+
237
+ if len(self._output_buffer) <= token_len:
238
+ return # not enough data
239
+
240
+ data = self._output_buffer
241
+ self._output_buffer = b''
242
+
243
+ try:
244
+ before, after = data.split(token, 1)
245
+ except ValueError:
246
+ after = None
247
+ index = find_prefix_at_end(data, token)
248
+ if index:
249
+ self._output_buffer = self._output_buffer + data[-index:]
250
+ data = data[:-index]
251
+ self._log(data)
252
+ else:
253
+ self._log(before)
254
+ self.toggle_capture_mode()
255
+ self._output_buffer = after # type: ignore
256
+
257
+ if after:
258
+ self.record_output()
259
+
260
+ def toggle_capture_mode(self) -> None:
261
+ self._capture_mode = not self._capture_mode
262
+
263
+ if self._capture_log is not None:
264
+ if self._capture_mode:
265
+ self._child_log = self._capture_log
266
+ else:
267
+ for handler in self._capture_log.handlers:
268
+ handler.flush()
269
+ data = self._capture_log.getvalue() # type: ignore
270
+ channel = self._channel
271
+ procname = self._process.config.name
272
+ event = self._event_type(self._process, self._process.pid, data)
273
+ self._event_callbacks.notify(event)
274
+
275
+ log.debug('%r %s emitted a comm event', procname, channel)
276
+ for handler in self._capture_log.handlers:
277
+ handler.remove() # type: ignore
278
+ handler.reopen() # type: ignore
279
+ self._child_log = self._normal_log
280
+
281
+ def writable(self) -> bool:
282
+ return False
283
+
284
+ def readable(self) -> bool:
285
+ if self._closed:
286
+ return False
287
+ return True
288
+
289
+ def handle_read_event(self) -> None:
290
+ data = read_fd(self._fd)
291
+ self._output_buffer += data
292
+ self.record_output()
293
+ if not data:
294
+ # if we get no data back from the pipe, it means that the child process has ended. See
295
+ # mail.python.org/pipermail/python-dev/2004-August/046850.html
296
+ self.close()
297
+
298
+
299
+ class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
300
+ def __init__(
301
+ self,
302
+ process: Process,
303
+ channel: str,
304
+ fd: int,
305
+ *,
306
+ event_callbacks: EventCallbacks,
307
+ ) -> None:
308
+ super().__init__(
309
+ process,
310
+ channel,
311
+ fd,
312
+ event_callbacks=event_callbacks,
313
+ )
314
+
315
+ self._input_buffer = b''
316
+
317
+ def write(self, chars: ta.Union[bytes, str]) -> None:
318
+ self._input_buffer += as_bytes(chars)
319
+
320
+ def writable(self) -> bool:
321
+ if self._input_buffer and not self._closed:
322
+ return True
323
+ return False
324
+
325
+ def readable(self) -> bool:
326
+ return False
327
+
328
+ def flush(self) -> None:
329
+ # other code depends on this raising EPIPE if the pipe is closed
330
+ sent = os.write(self._fd, as_bytes(self._input_buffer))
331
+ self._input_buffer = self._input_buffer[sent:]
332
+
333
+ def handle_write_event(self) -> None:
334
+ if self._input_buffer:
335
+ try:
336
+ self.flush()
337
+ except OSError as why:
338
+ if why.args[0] == errno.EPIPE:
339
+ self._input_buffer = b''
340
+ self.close()
341
+ else:
342
+ raise