ominfra 0.0.0.dev125__py3-none-any.whl → 0.0.0.dev127__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ominfra/clouds/aws/auth.py +1 -1
- ominfra/deploy/_executor.py +1 -1
- ominfra/deploy/poly/_main.py +1 -1
- ominfra/pyremote/_runcommands.py +1 -1
- ominfra/scripts/journald2aws.py +2 -2
- ominfra/scripts/supervisor.py +1825 -1217
- ominfra/supervisor/collections.py +52 -0
- ominfra/supervisor/context.py +2 -336
- ominfra/supervisor/datatypes.py +1 -63
- ominfra/supervisor/dispatchers.py +22 -338
- ominfra/supervisor/dispatchersimpl.py +342 -0
- ominfra/supervisor/groups.py +33 -110
- ominfra/supervisor/groupsimpl.py +86 -0
- ominfra/supervisor/inject.py +45 -13
- ominfra/supervisor/main.py +1 -1
- ominfra/supervisor/pipes.py +83 -0
- ominfra/supervisor/poller.py +6 -3
- ominfra/supervisor/privileges.py +65 -0
- ominfra/supervisor/processes.py +18 -0
- ominfra/supervisor/{process.py → processesimpl.py} +99 -317
- ominfra/supervisor/setup.py +38 -0
- ominfra/supervisor/setupimpl.py +261 -0
- ominfra/supervisor/signals.py +24 -16
- ominfra/supervisor/spawning.py +31 -0
- ominfra/supervisor/spawningimpl.py +347 -0
- ominfra/supervisor/supervisor.py +54 -78
- ominfra/supervisor/types.py +122 -39
- ominfra/supervisor/users.py +64 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/RECORD +34 -23
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev125.dist-info → ominfra-0.0.0.dev127.dist-info}/top_level.txt +0 -0
@@ -1,348 +1,32 @@
|
|
1
1
|
# ruff: noqa: UP006 UP007
|
2
|
-
import
|
3
|
-
import
|
4
|
-
import
|
5
|
-
import os
|
6
|
-
import typing as ta
|
2
|
+
from .collections import KeyedCollection
|
3
|
+
from .types import Dispatcher
|
4
|
+
from .types import OutputDispatcher
|
7
5
|
|
8
|
-
from omlish.lite.logs import log
|
9
6
|
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
from .events import ProcessLogStderrEvent
|
14
|
-
from .events import ProcessLogStdoutEvent
|
15
|
-
from .types import Process
|
16
|
-
from .utils import as_bytes
|
17
|
-
from .utils import compact_traceback
|
18
|
-
from .utils import find_prefix_at_end
|
19
|
-
from .utils import read_fd
|
20
|
-
from .utils import strip_escapes
|
7
|
+
class Dispatchers(KeyedCollection[int, Dispatcher]):
|
8
|
+
def _key(self, v: Dispatcher) -> int:
|
9
|
+
return v.fd
|
21
10
|
|
11
|
+
#
|
22
12
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
) -> None:
|
32
|
-
super().__init__()
|
13
|
+
def drain(self) -> None:
|
14
|
+
for d in self:
|
15
|
+
# note that we *must* call readable() for every dispatcher, as it may have side effects for a given
|
16
|
+
# dispatcher (eg. call handle_listener_state_change for event listener processes)
|
17
|
+
if d.readable():
|
18
|
+
d.handle_read_event()
|
19
|
+
if d.writable():
|
20
|
+
d.handle_write_event()
|
33
21
|
|
34
|
-
|
35
|
-
self._channel = channel # 'stderr' or 'stdout'
|
36
|
-
self._fd = fd
|
37
|
-
self._event_callbacks = event_callbacks
|
38
|
-
|
39
|
-
self._closed = False # True if close() has been called
|
40
|
-
|
41
|
-
def __repr__(self) -> str:
|
42
|
-
return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
|
43
|
-
|
44
|
-
@property
|
45
|
-
def process(self) -> Process:
|
46
|
-
return self._process
|
47
|
-
|
48
|
-
@property
|
49
|
-
def channel(self) -> str:
|
50
|
-
return self._channel
|
51
|
-
|
52
|
-
@property
|
53
|
-
def fd(self) -> int:
|
54
|
-
return self._fd
|
55
|
-
|
56
|
-
@property
|
57
|
-
def closed(self) -> bool:
|
58
|
-
return self._closed
|
59
|
-
|
60
|
-
@abc.abstractmethod
|
61
|
-
def readable(self) -> bool:
|
62
|
-
raise NotImplementedError
|
63
|
-
|
64
|
-
@abc.abstractmethod
|
65
|
-
def writable(self) -> bool:
|
66
|
-
raise NotImplementedError
|
67
|
-
|
68
|
-
def handle_read_event(self) -> None:
|
69
|
-
raise TypeError
|
70
|
-
|
71
|
-
def handle_write_event(self) -> None:
|
72
|
-
raise TypeError
|
73
|
-
|
74
|
-
def handle_error(self) -> None:
|
75
|
-
nil, t, v, tbinfo = compact_traceback()
|
76
|
-
|
77
|
-
log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
|
78
|
-
self.close()
|
79
|
-
|
80
|
-
def close(self) -> None:
|
81
|
-
if not self._closed:
|
82
|
-
log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
|
83
|
-
self._closed = True
|
84
|
-
|
85
|
-
def flush(self) -> None: # noqa
|
86
|
-
pass
|
87
|
-
|
88
|
-
|
89
|
-
class OutputDispatcher(Dispatcher):
|
90
|
-
"""
|
91
|
-
Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
|
92
|
-
|
93
|
-
- capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
|
94
|
-
ProcessCommunicationEvent by calling notify_event(event).
|
95
|
-
- route the output to the appropriate log handlers as specified in the config.
|
96
|
-
"""
|
97
|
-
|
98
|
-
def __init__(
|
99
|
-
self,
|
100
|
-
process: Process,
|
101
|
-
event_type: ta.Type[ProcessCommunicationEvent],
|
102
|
-
fd: int,
|
103
|
-
**kwargs: ta.Any,
|
104
|
-
) -> None:
|
105
|
-
super().__init__(
|
106
|
-
process,
|
107
|
-
event_type.channel,
|
108
|
-
fd,
|
109
|
-
**kwargs,
|
110
|
-
)
|
111
|
-
|
112
|
-
self._event_type = event_type
|
113
|
-
|
114
|
-
self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
|
115
|
-
|
116
|
-
self._init_normal_log()
|
117
|
-
self._init_capture_log()
|
118
|
-
|
119
|
-
self._child_log = self._normal_log
|
120
|
-
|
121
|
-
self._capture_mode = False # are we capturing process event data
|
122
|
-
self._output_buffer = b'' # data waiting to be logged
|
123
|
-
|
124
|
-
# all code below is purely for minor speedups
|
125
|
-
|
126
|
-
begin_token = self._event_type.BEGIN_TOKEN
|
127
|
-
end_token = self._event_type.END_TOKEN
|
128
|
-
self._begin_token_data = (begin_token, len(begin_token))
|
129
|
-
self._end_token_data = (end_token, len(end_token))
|
130
|
-
|
131
|
-
self._main_log_level = logging.DEBUG
|
132
|
-
|
133
|
-
self._log_to_main_log = process.context.config.loglevel <= self._main_log_level
|
134
|
-
|
135
|
-
config = self._process.config
|
136
|
-
self._stdout_events_enabled = config.stdout.events_enabled
|
137
|
-
self._stderr_events_enabled = config.stderr.events_enabled
|
138
|
-
|
139
|
-
_child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
|
140
|
-
_normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
|
141
|
-
_capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
|
142
|
-
|
143
|
-
def _init_normal_log(self) -> None:
|
144
|
-
"""
|
145
|
-
Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
|
146
|
-
enabled.
|
147
|
-
"""
|
148
|
-
|
149
|
-
config = self._process.config # noqa
|
150
|
-
channel = self._channel # noqa
|
151
|
-
|
152
|
-
logfile = self._lc.file
|
153
|
-
maxbytes = self._lc.maxbytes # noqa
|
154
|
-
backups = self._lc.backups # noqa
|
155
|
-
to_syslog = self._lc.syslog
|
156
|
-
|
157
|
-
if logfile or to_syslog:
|
158
|
-
self._normal_log = logging.getLogger(__name__)
|
159
|
-
|
160
|
-
# if logfile:
|
161
|
-
# loggers.handle_file(
|
162
|
-
# self.normal_log,
|
163
|
-
# filename=logfile,
|
164
|
-
# fmt='%(message)s',
|
165
|
-
# rotating=bool(maxbytes), # optimization
|
166
|
-
# maxbytes=maxbytes,
|
167
|
-
# backups=backups,
|
168
|
-
# )
|
169
|
-
|
170
|
-
# if to_syslog:
|
171
|
-
# loggers.handle_syslog(
|
172
|
-
# self.normal_log,
|
173
|
-
# fmt=config.name + ' %(message)s',
|
174
|
-
# )
|
175
|
-
|
176
|
-
def _init_capture_log(self) -> None:
|
177
|
-
"""
|
178
|
-
Configure the capture log for this process. This log is used to temporarily capture output when special output
|
179
|
-
is detected. Sets self.capture_log if capturing is enabled.
|
180
|
-
"""
|
181
|
-
|
182
|
-
capture_maxbytes = self._lc.capture_maxbytes
|
183
|
-
if capture_maxbytes:
|
184
|
-
self._capture_log = logging.getLogger(__name__)
|
185
|
-
# loggers.handle_boundIO(
|
186
|
-
# self._capture_log,
|
187
|
-
# fmt='%(message)s',
|
188
|
-
# maxbytes=capture_maxbytes,
|
189
|
-
# )
|
22
|
+
#
|
190
23
|
|
191
24
|
def remove_logs(self) -> None:
|
192
|
-
for
|
193
|
-
if
|
194
|
-
|
195
|
-
handler.remove() # type: ignore
|
196
|
-
handler.reopen() # type: ignore
|
25
|
+
for d in self:
|
26
|
+
if isinstance(d, OutputDispatcher):
|
27
|
+
d.remove_logs()
|
197
28
|
|
198
29
|
def reopen_logs(self) -> None:
|
199
|
-
for
|
200
|
-
if
|
201
|
-
|
202
|
-
handler.reopen() # type: ignore
|
203
|
-
|
204
|
-
def _log(self, data: ta.Union[str, bytes, None]) -> None:
|
205
|
-
if not data:
|
206
|
-
return
|
207
|
-
|
208
|
-
if self._process.context.config.strip_ansi:
|
209
|
-
data = strip_escapes(as_bytes(data))
|
210
|
-
|
211
|
-
if self._child_log:
|
212
|
-
self._child_log.info(data)
|
213
|
-
|
214
|
-
if self._log_to_main_log:
|
215
|
-
if not isinstance(data, bytes):
|
216
|
-
text = data
|
217
|
-
else:
|
218
|
-
try:
|
219
|
-
text = data.decode('utf-8')
|
220
|
-
except UnicodeDecodeError:
|
221
|
-
text = f'Undecodable: {data!r}'
|
222
|
-
log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
|
223
|
-
|
224
|
-
if self._channel == 'stdout':
|
225
|
-
if self._stdout_events_enabled:
|
226
|
-
self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
|
227
|
-
|
228
|
-
elif self._stderr_events_enabled:
|
229
|
-
self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
|
230
|
-
|
231
|
-
def record_output(self) -> None:
|
232
|
-
if self._capture_log is None:
|
233
|
-
# shortcut trying to find capture data
|
234
|
-
data = self._output_buffer
|
235
|
-
self._output_buffer = b''
|
236
|
-
self._log(data)
|
237
|
-
return
|
238
|
-
|
239
|
-
if self._capture_mode:
|
240
|
-
token, token_len = self._end_token_data
|
241
|
-
else:
|
242
|
-
token, token_len = self._begin_token_data
|
243
|
-
|
244
|
-
if len(self._output_buffer) <= token_len:
|
245
|
-
return # not enough data
|
246
|
-
|
247
|
-
data = self._output_buffer
|
248
|
-
self._output_buffer = b''
|
249
|
-
|
250
|
-
try:
|
251
|
-
before, after = data.split(token, 1)
|
252
|
-
except ValueError:
|
253
|
-
after = None
|
254
|
-
index = find_prefix_at_end(data, token)
|
255
|
-
if index:
|
256
|
-
self._output_buffer = self._output_buffer + data[-index:]
|
257
|
-
data = data[:-index]
|
258
|
-
self._log(data)
|
259
|
-
else:
|
260
|
-
self._log(before)
|
261
|
-
self.toggle_capture_mode()
|
262
|
-
self._output_buffer = after # type: ignore
|
263
|
-
|
264
|
-
if after:
|
265
|
-
self.record_output()
|
266
|
-
|
267
|
-
def toggle_capture_mode(self) -> None:
|
268
|
-
self._capture_mode = not self._capture_mode
|
269
|
-
|
270
|
-
if self._capture_log is not None:
|
271
|
-
if self._capture_mode:
|
272
|
-
self._child_log = self._capture_log
|
273
|
-
else:
|
274
|
-
for handler in self._capture_log.handlers:
|
275
|
-
handler.flush()
|
276
|
-
data = self._capture_log.getvalue() # type: ignore
|
277
|
-
channel = self._channel
|
278
|
-
procname = self._process.config.name
|
279
|
-
event = self._event_type(self._process, self._process.pid, data)
|
280
|
-
self._event_callbacks.notify(event)
|
281
|
-
|
282
|
-
log.debug('%r %s emitted a comm event', procname, channel)
|
283
|
-
for handler in self._capture_log.handlers:
|
284
|
-
handler.remove() # type: ignore
|
285
|
-
handler.reopen() # type: ignore
|
286
|
-
self._child_log = self._normal_log
|
287
|
-
|
288
|
-
def writable(self) -> bool:
|
289
|
-
return False
|
290
|
-
|
291
|
-
def readable(self) -> bool:
|
292
|
-
if self._closed:
|
293
|
-
return False
|
294
|
-
return True
|
295
|
-
|
296
|
-
def handle_read_event(self) -> None:
|
297
|
-
data = read_fd(self._fd)
|
298
|
-
self._output_buffer += data
|
299
|
-
self.record_output()
|
300
|
-
if not data:
|
301
|
-
# if we get no data back from the pipe, it means that the child process has ended. See
|
302
|
-
# mail.python.org/pipermail/python-dev/2004-August/046850.html
|
303
|
-
self.close()
|
304
|
-
|
305
|
-
|
306
|
-
class InputDispatcher(Dispatcher):
|
307
|
-
def __init__(
|
308
|
-
self,
|
309
|
-
process: Process,
|
310
|
-
channel: str,
|
311
|
-
fd: int,
|
312
|
-
**kwargs: ta.Any,
|
313
|
-
) -> None:
|
314
|
-
super().__init__(
|
315
|
-
process,
|
316
|
-
channel,
|
317
|
-
fd,
|
318
|
-
**kwargs,
|
319
|
-
)
|
320
|
-
|
321
|
-
self._input_buffer = b''
|
322
|
-
|
323
|
-
def write(self, chars: ta.Union[bytes, str]) -> None:
|
324
|
-
self._input_buffer += as_bytes(chars)
|
325
|
-
|
326
|
-
def writable(self) -> bool:
|
327
|
-
if self._input_buffer and not self._closed:
|
328
|
-
return True
|
329
|
-
return False
|
330
|
-
|
331
|
-
def readable(self) -> bool:
|
332
|
-
return False
|
333
|
-
|
334
|
-
def flush(self) -> None:
|
335
|
-
# other code depends on this raising EPIPE if the pipe is closed
|
336
|
-
sent = os.write(self._fd, as_bytes(self._input_buffer))
|
337
|
-
self._input_buffer = self._input_buffer[sent:]
|
338
|
-
|
339
|
-
def handle_write_event(self) -> None:
|
340
|
-
if self._input_buffer:
|
341
|
-
try:
|
342
|
-
self.flush()
|
343
|
-
except OSError as why:
|
344
|
-
if why.args[0] == errno.EPIPE:
|
345
|
-
self._input_buffer = b''
|
346
|
-
self.close()
|
347
|
-
else:
|
348
|
-
raise
|
30
|
+
for d in self:
|
31
|
+
if isinstance(d, OutputDispatcher):
|
32
|
+
d.reopen_logs()
|
@@ -0,0 +1,342 @@
|
|
1
|
+
# ruff: noqa: UP006 UP007
|
2
|
+
import abc
|
3
|
+
import errno
|
4
|
+
import logging
|
5
|
+
import os
|
6
|
+
import typing as ta
|
7
|
+
|
8
|
+
from omlish.lite.logs import log
|
9
|
+
|
10
|
+
from .configs import ProcessConfig
|
11
|
+
from .events import EventCallbacks
|
12
|
+
from .events import ProcessCommunicationEvent
|
13
|
+
from .events import ProcessLogStderrEvent
|
14
|
+
from .events import ProcessLogStdoutEvent
|
15
|
+
from .types import Dispatcher
|
16
|
+
from .types import InputDispatcher
|
17
|
+
from .types import OutputDispatcher
|
18
|
+
from .types import Process
|
19
|
+
from .utils import as_bytes
|
20
|
+
from .utils import compact_traceback
|
21
|
+
from .utils import find_prefix_at_end
|
22
|
+
from .utils import read_fd
|
23
|
+
from .utils import strip_escapes
|
24
|
+
|
25
|
+
|
26
|
+
class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
27
|
+
def __init__(
|
28
|
+
self,
|
29
|
+
process: Process,
|
30
|
+
channel: str,
|
31
|
+
fd: int,
|
32
|
+
*,
|
33
|
+
event_callbacks: EventCallbacks,
|
34
|
+
) -> None:
|
35
|
+
super().__init__()
|
36
|
+
|
37
|
+
self._process = process # process which "owns" this dispatcher
|
38
|
+
self._channel = channel # 'stderr' or 'stdout'
|
39
|
+
self._fd = fd
|
40
|
+
self._event_callbacks = event_callbacks
|
41
|
+
|
42
|
+
self._closed = False # True if close() has been called
|
43
|
+
|
44
|
+
#
|
45
|
+
|
46
|
+
def __repr__(self) -> str:
|
47
|
+
return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
|
48
|
+
|
49
|
+
#
|
50
|
+
|
51
|
+
@property
|
52
|
+
def process(self) -> Process:
|
53
|
+
return self._process
|
54
|
+
|
55
|
+
@property
|
56
|
+
def channel(self) -> str:
|
57
|
+
return self._channel
|
58
|
+
|
59
|
+
@property
|
60
|
+
def fd(self) -> int:
|
61
|
+
return self._fd
|
62
|
+
|
63
|
+
@property
|
64
|
+
def closed(self) -> bool:
|
65
|
+
return self._closed
|
66
|
+
|
67
|
+
#
|
68
|
+
|
69
|
+
def close(self) -> None:
|
70
|
+
if not self._closed:
|
71
|
+
log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
|
72
|
+
self._closed = True
|
73
|
+
|
74
|
+
def handle_error(self) -> None:
|
75
|
+
nil, t, v, tbinfo = compact_traceback()
|
76
|
+
|
77
|
+
log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
|
78
|
+
self.close()
|
79
|
+
|
80
|
+
|
81
|
+
class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
82
|
+
"""
|
83
|
+
Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
|
84
|
+
|
85
|
+
- capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
|
86
|
+
ProcessCommunicationEvent by calling notify_event(event).
|
87
|
+
- route the output to the appropriate log handlers as specified in the config.
|
88
|
+
"""
|
89
|
+
|
90
|
+
def __init__(
|
91
|
+
self,
|
92
|
+
process: Process,
|
93
|
+
event_type: ta.Type[ProcessCommunicationEvent],
|
94
|
+
fd: int,
|
95
|
+
*,
|
96
|
+
event_callbacks: EventCallbacks,
|
97
|
+
) -> None:
|
98
|
+
super().__init__(
|
99
|
+
process,
|
100
|
+
event_type.channel,
|
101
|
+
fd,
|
102
|
+
event_callbacks=event_callbacks,
|
103
|
+
)
|
104
|
+
|
105
|
+
self._event_type = event_type
|
106
|
+
|
107
|
+
self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
|
108
|
+
|
109
|
+
self._init_normal_log()
|
110
|
+
self._init_capture_log()
|
111
|
+
|
112
|
+
self._child_log = self._normal_log
|
113
|
+
|
114
|
+
self._capture_mode = False # are we capturing process event data
|
115
|
+
self._output_buffer = b'' # data waiting to be logged
|
116
|
+
|
117
|
+
# all code below is purely for minor speedups
|
118
|
+
|
119
|
+
begin_token = self._event_type.BEGIN_TOKEN
|
120
|
+
end_token = self._event_type.END_TOKEN
|
121
|
+
self._begin_token_data = (begin_token, len(begin_token))
|
122
|
+
self._end_token_data = (end_token, len(end_token))
|
123
|
+
|
124
|
+
self._main_log_level = logging.DEBUG
|
125
|
+
|
126
|
+
self._log_to_main_log = process.context.config.loglevel <= self._main_log_level
|
127
|
+
|
128
|
+
config = self._process.config
|
129
|
+
self._stdout_events_enabled = config.stdout.events_enabled
|
130
|
+
self._stderr_events_enabled = config.stderr.events_enabled
|
131
|
+
|
132
|
+
_child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
|
133
|
+
_normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
|
134
|
+
_capture_log: ta.Optional[logging.Logger] = None # the logger used while we're in capture_mode
|
135
|
+
|
136
|
+
def _init_normal_log(self) -> None:
|
137
|
+
"""
|
138
|
+
Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
|
139
|
+
enabled.
|
140
|
+
"""
|
141
|
+
|
142
|
+
config = self._process.config # noqa
|
143
|
+
channel = self._channel # noqa
|
144
|
+
|
145
|
+
logfile = self._lc.file
|
146
|
+
maxbytes = self._lc.maxbytes # noqa
|
147
|
+
backups = self._lc.backups # noqa
|
148
|
+
to_syslog = self._lc.syslog
|
149
|
+
|
150
|
+
if logfile or to_syslog:
|
151
|
+
self._normal_log = logging.getLogger(__name__)
|
152
|
+
|
153
|
+
# if logfile:
|
154
|
+
# loggers.handle_file(
|
155
|
+
# self.normal_log,
|
156
|
+
# filename=logfile,
|
157
|
+
# fmt='%(message)s',
|
158
|
+
# rotating=bool(maxbytes), # optimization
|
159
|
+
# maxbytes=maxbytes,
|
160
|
+
# backups=backups,
|
161
|
+
# )
|
162
|
+
|
163
|
+
# if to_syslog:
|
164
|
+
# loggers.handle_syslog(
|
165
|
+
# self.normal_log,
|
166
|
+
# fmt=config.name + ' %(message)s',
|
167
|
+
# )
|
168
|
+
|
169
|
+
def _init_capture_log(self) -> None:
|
170
|
+
"""
|
171
|
+
Configure the capture log for this process. This log is used to temporarily capture output when special output
|
172
|
+
is detected. Sets self.capture_log if capturing is enabled.
|
173
|
+
"""
|
174
|
+
|
175
|
+
capture_maxbytes = self._lc.capture_maxbytes
|
176
|
+
if capture_maxbytes:
|
177
|
+
self._capture_log = logging.getLogger(__name__)
|
178
|
+
# loggers.handle_boundIO(
|
179
|
+
# self._capture_log,
|
180
|
+
# fmt='%(message)s',
|
181
|
+
# maxbytes=capture_maxbytes,
|
182
|
+
# )
|
183
|
+
|
184
|
+
def remove_logs(self) -> None:
|
185
|
+
for l in (self._normal_log, self._capture_log):
|
186
|
+
if l is not None:
|
187
|
+
for handler in l.handlers:
|
188
|
+
handler.remove() # type: ignore
|
189
|
+
handler.reopen() # type: ignore
|
190
|
+
|
191
|
+
def reopen_logs(self) -> None:
|
192
|
+
for l in (self._normal_log, self._capture_log):
|
193
|
+
if l is not None:
|
194
|
+
for handler in l.handlers:
|
195
|
+
handler.reopen() # type: ignore
|
196
|
+
|
197
|
+
def _log(self, data: ta.Union[str, bytes, None]) -> None:
|
198
|
+
if not data:
|
199
|
+
return
|
200
|
+
|
201
|
+
if self._process.context.config.strip_ansi:
|
202
|
+
data = strip_escapes(as_bytes(data))
|
203
|
+
|
204
|
+
if self._child_log:
|
205
|
+
self._child_log.info(data)
|
206
|
+
|
207
|
+
if self._log_to_main_log:
|
208
|
+
if not isinstance(data, bytes):
|
209
|
+
text = data
|
210
|
+
else:
|
211
|
+
try:
|
212
|
+
text = data.decode('utf-8')
|
213
|
+
except UnicodeDecodeError:
|
214
|
+
text = f'Undecodable: {data!r}'
|
215
|
+
log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
|
216
|
+
|
217
|
+
if self._channel == 'stdout':
|
218
|
+
if self._stdout_events_enabled:
|
219
|
+
self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
|
220
|
+
|
221
|
+
elif self._stderr_events_enabled:
|
222
|
+
self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
|
223
|
+
|
224
|
+
def record_output(self) -> None:
|
225
|
+
if self._capture_log is None:
|
226
|
+
# shortcut trying to find capture data
|
227
|
+
data = self._output_buffer
|
228
|
+
self._output_buffer = b''
|
229
|
+
self._log(data)
|
230
|
+
return
|
231
|
+
|
232
|
+
if self._capture_mode:
|
233
|
+
token, token_len = self._end_token_data
|
234
|
+
else:
|
235
|
+
token, token_len = self._begin_token_data
|
236
|
+
|
237
|
+
if len(self._output_buffer) <= token_len:
|
238
|
+
return # not enough data
|
239
|
+
|
240
|
+
data = self._output_buffer
|
241
|
+
self._output_buffer = b''
|
242
|
+
|
243
|
+
try:
|
244
|
+
before, after = data.split(token, 1)
|
245
|
+
except ValueError:
|
246
|
+
after = None
|
247
|
+
index = find_prefix_at_end(data, token)
|
248
|
+
if index:
|
249
|
+
self._output_buffer = self._output_buffer + data[-index:]
|
250
|
+
data = data[:-index]
|
251
|
+
self._log(data)
|
252
|
+
else:
|
253
|
+
self._log(before)
|
254
|
+
self.toggle_capture_mode()
|
255
|
+
self._output_buffer = after # type: ignore
|
256
|
+
|
257
|
+
if after:
|
258
|
+
self.record_output()
|
259
|
+
|
260
|
+
def toggle_capture_mode(self) -> None:
|
261
|
+
self._capture_mode = not self._capture_mode
|
262
|
+
|
263
|
+
if self._capture_log is not None:
|
264
|
+
if self._capture_mode:
|
265
|
+
self._child_log = self._capture_log
|
266
|
+
else:
|
267
|
+
for handler in self._capture_log.handlers:
|
268
|
+
handler.flush()
|
269
|
+
data = self._capture_log.getvalue() # type: ignore
|
270
|
+
channel = self._channel
|
271
|
+
procname = self._process.config.name
|
272
|
+
event = self._event_type(self._process, self._process.pid, data)
|
273
|
+
self._event_callbacks.notify(event)
|
274
|
+
|
275
|
+
log.debug('%r %s emitted a comm event', procname, channel)
|
276
|
+
for handler in self._capture_log.handlers:
|
277
|
+
handler.remove() # type: ignore
|
278
|
+
handler.reopen() # type: ignore
|
279
|
+
self._child_log = self._normal_log
|
280
|
+
|
281
|
+
def writable(self) -> bool:
|
282
|
+
return False
|
283
|
+
|
284
|
+
def readable(self) -> bool:
|
285
|
+
if self._closed:
|
286
|
+
return False
|
287
|
+
return True
|
288
|
+
|
289
|
+
def handle_read_event(self) -> None:
|
290
|
+
data = read_fd(self._fd)
|
291
|
+
self._output_buffer += data
|
292
|
+
self.record_output()
|
293
|
+
if not data:
|
294
|
+
# if we get no data back from the pipe, it means that the child process has ended. See
|
295
|
+
# mail.python.org/pipermail/python-dev/2004-August/046850.html
|
296
|
+
self.close()
|
297
|
+
|
298
|
+
|
299
|
+
class InputDispatcherImpl(BaseDispatcherImpl, InputDispatcher):
|
300
|
+
def __init__(
|
301
|
+
self,
|
302
|
+
process: Process,
|
303
|
+
channel: str,
|
304
|
+
fd: int,
|
305
|
+
*,
|
306
|
+
event_callbacks: EventCallbacks,
|
307
|
+
) -> None:
|
308
|
+
super().__init__(
|
309
|
+
process,
|
310
|
+
channel,
|
311
|
+
fd,
|
312
|
+
event_callbacks=event_callbacks,
|
313
|
+
)
|
314
|
+
|
315
|
+
self._input_buffer = b''
|
316
|
+
|
317
|
+
def write(self, chars: ta.Union[bytes, str]) -> None:
|
318
|
+
self._input_buffer += as_bytes(chars)
|
319
|
+
|
320
|
+
def writable(self) -> bool:
|
321
|
+
if self._input_buffer and not self._closed:
|
322
|
+
return True
|
323
|
+
return False
|
324
|
+
|
325
|
+
def readable(self) -> bool:
|
326
|
+
return False
|
327
|
+
|
328
|
+
def flush(self) -> None:
|
329
|
+
# other code depends on this raising EPIPE if the pipe is closed
|
330
|
+
sent = os.write(self._fd, as_bytes(self._input_buffer))
|
331
|
+
self._input_buffer = self._input_buffer[sent:]
|
332
|
+
|
333
|
+
def handle_write_event(self) -> None:
|
334
|
+
if self._input_buffer:
|
335
|
+
try:
|
336
|
+
self.flush()
|
337
|
+
except OSError as why:
|
338
|
+
if why.args[0] == errno.EPIPE:
|
339
|
+
self._input_buffer = b''
|
340
|
+
self.close()
|
341
|
+
else:
|
342
|
+
raise
|