ominfra 0.0.0.dev76__py3-none-any.whl → 0.0.0.dev77__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,304 @@
1
+ import abc
2
+ import errno
3
+ import logging
4
+ import os
5
+
6
+ from .compat import as_bytes
7
+ from .compat import compact_traceback
8
+ from .compat import find_prefix_at_end
9
+ from .compat import readfd
10
+ from .compat import strip_escapes
11
+ from .configs import ProcessConfig
12
+ from .events import ProcessLogStderrEvent
13
+ from .events import ProcessLogStdoutEvent
14
+ from .events import notify_event
15
+ from .types import AbstractSubprocess
16
+
17
+
18
+ log = logging.getLogger(__name__)
19
+
20
+
21
+ class Dispatcher(abc.ABC):
22
+
23
+ def __init__(self, process: AbstractSubprocess, channel: str, fd: int) -> None:
24
+ super().__init__()
25
+
26
+ self._process = process # process which "owns" this dispatcher
27
+ self._channel = channel # 'stderr' or 'stdout'
28
+ self._fd = fd
29
+ self._closed = False # True if close() has been called
30
+
31
+ def __repr__(self) -> str:
32
+ return f'<{self.__class__.__name__} at {id(self)} for {self._process} ({self._channel})>'
33
+
34
+ @property
35
+ def process(self) -> AbstractSubprocess:
36
+ return self._process
37
+
38
+ @property
39
+ def channel(self) -> str:
40
+ return self._channel
41
+
42
+ @property
43
+ def fd(self) -> int:
44
+ return self._fd
45
+
46
+ @property
47
+ def closed(self) -> bool:
48
+ return self._closed
49
+
50
+ @abc.abstractmethod
51
+ def readable(self) -> bool:
52
+ raise NotImplementedError
53
+
54
+ @abc.abstractmethod
55
+ def writable(self) -> bool:
56
+ raise NotImplementedError
57
+
58
+ def handle_read_event(self) -> None:
59
+ raise TypeError
60
+
61
+ def handle_write_event(self) -> None:
62
+ raise TypeError
63
+
64
+ def handle_error(self) -> None:
65
+ nil, t, v, tbinfo = compact_traceback()
66
+
67
+ log.critical('uncaptured python exception, closing channel %s (%s:%s %s)', repr(self), t, v, tbinfo)
68
+ self.close()
69
+
70
+ def close(self) -> None:
71
+ if not self._closed:
72
+ log.debug('fd %s closed, stopped monitoring %s', self._fd, self)
73
+ self._closed = True
74
+
75
+ def flush(self) -> None: # noqa
76
+ pass
77
+
78
+
79
+ class OutputDispatcher(Dispatcher):
80
+ """
81
+ Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
82
+
83
+ - capture output sent within <!--XSUPERVISOR:BEGIN--> and <!--XSUPERVISOR:END--> tags and signal a
84
+ ProcessCommunicationEvent by calling notify_event(event).
85
+ - route the output to the appropriate log handlers as specified in the config.
86
+ """
87
+
88
+ child_log = None # the current logger (normal_log or capture_log)
89
+ normal_log = None # the "normal" (non-capture) logger
90
+ capture_log = None # the logger used while we're in capture_mode
91
+ capture_mode = False # are we capturing process event data
92
+ output_buffer = b'' # data waiting to be logged
93
+
94
+ def __init__(self, process: AbstractSubprocess, event_type, fd):
95
+ """
96
+ Initialize the dispatcher.
97
+
98
+ `event_type` should be one of ProcessLogStdoutEvent or ProcessLogStderrEvent
99
+ """
100
+ super().__init__(process, event_type.channel, fd)
101
+ self.event_type = event_type
102
+
103
+ self.lc: ProcessConfig.Log = getattr(process.config, self._channel)
104
+
105
+ self._init_normal_log()
106
+ self._init_capture_log()
107
+
108
+ self.child_log = self.normal_log
109
+
110
+ # all code below is purely for minor speedups
111
+ begin_token = self.event_type.BEGIN_TOKEN
112
+ end_token = self.event_type.END_TOKEN
113
+ self.begin_token_data = (begin_token, len(begin_token))
114
+ self.end_token_data = (end_token, len(end_token))
115
+ self.main_log_level = logging.DEBUG
116
+ config = self._process.config
117
+ self.log_to_main_log = process.context.config.loglevel <= self.main_log_level
118
+ self.stdout_events_enabled = config.stdout.events_enabled
119
+ self.stderr_events_enabled = config.stderr.events_enabled
120
+
121
+ def _init_normal_log(self) -> None:
122
+ """
123
+ Configure the "normal" (non-capture) log for this channel of this process. Sets self.normal_log if logging is
124
+ enabled.
125
+ """
126
+ config = self._process.config # noqa
127
+ channel = self._channel # noqa
128
+
129
+ logfile = self.lc.file
130
+ maxbytes = self.lc.maxbytes # noqa
131
+ backups = self.lc.backups # noqa
132
+ to_syslog = self.lc.syslog
133
+
134
+ if logfile or to_syslog:
135
+ self.normal_log = logging.getLogger(__name__)
136
+
137
+ # if logfile:
138
+ # loggers.handle_file(
139
+ # self.normal_log,
140
+ # filename=logfile,
141
+ # fmt='%(message)s',
142
+ # rotating=bool(maxbytes), # optimization
143
+ # maxbytes=maxbytes,
144
+ # backups=backups,
145
+ # )
146
+ #
147
+ # if to_syslog:
148
+ # loggers.handle_syslog(
149
+ # self.normal_log,
150
+ # fmt=config.name + ' %(message)s',
151
+ # )
152
+
153
+ def _init_capture_log(self):
154
+ """
155
+ Configure the capture log for this process. This log is used to temporarily capture output when special output
156
+ is detected. Sets self.capture_log if capturing is enabled.
157
+ """
158
+ capture_maxbytes = self.lc.capture_maxbytes
159
+ if capture_maxbytes:
160
+ self.capture_log = logging.getLogger(__name__)
161
+ # loggers.handle_boundIO(
162
+ # self.capture_log,
163
+ # fmt='%(message)s',
164
+ # maxbytes=capture_maxbytes,
165
+ # )
166
+
167
+ def remove_logs(self):
168
+ for log in (self.normal_log, self.capture_log):
169
+ if log is not None:
170
+ for handler in log.handlers:
171
+ handler.remove() # type: ignore
172
+ handler.reopen() # type: ignore
173
+
174
+ def reopen_logs(self):
175
+ for log in (self.normal_log, self.capture_log):
176
+ if log is not None:
177
+ for handler in log.handlers:
178
+ handler.reopen() # type: ignore
179
+
180
+ def _log(self, data):
181
+ if data:
182
+ if self._process.context.config.strip_ansi:
183
+ data = strip_escapes(data)
184
+ if self.child_log:
185
+ self.child_log.info(data) # type: ignore
186
+ if self.log_to_main_log:
187
+ if not isinstance(data, bytes):
188
+ text = data
189
+ else:
190
+ try:
191
+ text = data.decode('utf-8')
192
+ except UnicodeDecodeError:
193
+ text = f'Undecodable: {data!r}'
194
+ log.log(self.main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
195
+ if self._channel == 'stdout':
196
+ if self.stdout_events_enabled:
197
+ notify_event(ProcessLogStdoutEvent(self._process, self._process.pid, data))
198
+ elif self.stderr_events_enabled:
199
+ notify_event(ProcessLogStderrEvent(self._process, self._process.pid, data))
200
+
201
+ def record_output(self):
202
+ if self.capture_log is None:
203
+ # shortcut trying to find capture data
204
+ data = self.output_buffer
205
+ self.output_buffer = b''
206
+ self._log(data)
207
+ return
208
+
209
+ if self.capture_mode:
210
+ token, tokenlen = self.end_token_data
211
+ else:
212
+ token, tokenlen = self.begin_token_data
213
+
214
+ if len(self.output_buffer) <= tokenlen:
215
+ return # not enough data
216
+
217
+ data = self.output_buffer
218
+ self.output_buffer = b''
219
+
220
+ try:
221
+ before, after = data.split(token, 1)
222
+ except ValueError:
223
+ after = None
224
+ index = find_prefix_at_end(data, token)
225
+ if index:
226
+ self.output_buffer = self.output_buffer + data[-index:]
227
+ data = data[:-index]
228
+ self._log(data)
229
+ else:
230
+ self._log(before)
231
+ self.toggle_capture_mode()
232
+ self.output_buffer = after # type: ignore
233
+
234
+ if after:
235
+ self.record_output()
236
+
237
+ def toggle_capture_mode(self):
238
+ self.capture_mode = not self.capture_mode
239
+
240
+ if self.capture_log is not None:
241
+ if self.capture_mode:
242
+ self.child_log = self.capture_log
243
+ else:
244
+ for handler in self.capture_log.handlers:
245
+ handler.flush()
246
+ data = self.capture_log.getvalue() # type: ignore
247
+ channel = self._channel
248
+ procname = self._process.config.name
249
+ event = self.event_type(self._process, self._process.pid, data)
250
+ notify_event(event)
251
+
252
+ log.debug('%r %s emitted a comm event', procname, channel)
253
+ for handler in self.capture_log.handlers:
254
+ handler.remove() # type: ignore
255
+ handler.reopen() # type: ignore
256
+ self.child_log = self.normal_log
257
+
258
+ def writable(self) -> bool:
259
+ return False
260
+
261
+ def readable(self) -> bool:
262
+ if self._closed:
263
+ return False
264
+ return True
265
+
266
+ def handle_read_event(self) -> None:
267
+ data = readfd(self._fd)
268
+ self.output_buffer += data
269
+ self.record_output()
270
+ if not data:
271
+ # if we get no data back from the pipe, it means that the child process has ended. See
272
+ # mail.python.org/pipermail/python-dev/2004-August/046850.html
273
+ self.close()
274
+
275
+
276
+ class InputDispatcher(Dispatcher):
277
+
278
+ def __init__(self, process: AbstractSubprocess, channel: str, fd: int) -> None:
279
+ super().__init__(process, channel, fd)
280
+ self._input_buffer = b''
281
+
282
+ def writable(self) -> bool:
283
+ if self._input_buffer and not self._closed:
284
+ return True
285
+ return False
286
+
287
+ def readable(self) -> bool:
288
+ return False
289
+
290
+ def flush(self) -> None:
291
+ # other code depends on this raising EPIPE if the pipe is closed
292
+ sent = os.write(self._fd, as_bytes(self._input_buffer))
293
+ self._input_buffer = self._input_buffer[sent:]
294
+
295
+ def handle_write_event(self) -> None:
296
+ if self._input_buffer:
297
+ try:
298
+ self.flush()
299
+ except OSError as why:
300
+ if why.args[0] == errno.EPIPE:
301
+ self._input_buffer = b''
302
+ self.close()
303
+ else:
304
+ raise
@@ -0,0 +1,304 @@
1
+ # ruff: noqa: UP006 UP007
2
+ import typing as ta
3
+
4
+ from .compat import as_string
5
+ from .states import get_process_state_description
6
+
7
+
8
+ class EventCallbacks:
9
+ def __init__(self) -> None:
10
+ super().__init__()
11
+
12
+ self._callbacks: ta.List[ta.Tuple[type, ta.Callable]] = []
13
+
14
+ def subscribe(self, type, callback): # noqa
15
+ self._callbacks.append((type, callback))
16
+
17
+ def unsubscribe(self, type, callback): # noqa
18
+ self._callbacks.remove((type, callback))
19
+
20
+ def notify(self, event):
21
+ for type, callback in self._callbacks: # noqa
22
+ if isinstance(event, type):
23
+ callback(event)
24
+
25
+ def clear(self):
26
+ self._callbacks[:] = []
27
+
28
+
29
+ EVENT_CALLBACKS = EventCallbacks()
30
+
31
+ notify_event = EVENT_CALLBACKS.notify
32
+ clear_events = EVENT_CALLBACKS.clear
33
+
34
+
35
+ class Event:
36
+ """Abstract event type """
37
+
38
+
39
+ class ProcessLogEvent(Event):
40
+ """Abstract"""
41
+ channel: ta.Optional[str] = None
42
+
43
+ def __init__(self, process, pid, data):
44
+ super().__init__()
45
+ self.process = process
46
+ self.pid = pid
47
+ self.data = data
48
+
49
+ def payload(self):
50
+ groupname = ''
51
+ if self.process.group is not None:
52
+ groupname = self.process.group.config.name
53
+ try:
54
+ data = as_string(self.data)
55
+ except UnicodeDecodeError:
56
+ data = f'Undecodable: {self.data!r}'
57
+ fmt = as_string('processname:%s groupname:%s pid:%s channel:%s\n%s')
58
+ result = fmt % (
59
+ as_string(self.process.config.name),
60
+ as_string(groupname),
61
+ self.pid,
62
+ as_string(self.channel), # type: ignore
63
+ data,
64
+ )
65
+ return result
66
+
67
+
68
+ class ProcessLogStdoutEvent(ProcessLogEvent):
69
+ channel = 'stdout'
70
+
71
+
72
+ class ProcessLogStderrEvent(ProcessLogEvent):
73
+ channel = 'stderr'
74
+
75
+
76
+ class ProcessCommunicationEvent(Event):
77
+ """ Abstract """
78
+ # event mode tokens
79
+ BEGIN_TOKEN = b'<!--XSUPERVISOR:BEGIN-->'
80
+ END_TOKEN = b'<!--XSUPERVISOR:END-->'
81
+
82
+ def __init__(self, process, pid, data):
83
+ super().__init__()
84
+ self.process = process
85
+ self.pid = pid
86
+ self.data = data
87
+
88
+ def payload(self):
89
+ groupname = ''
90
+ if self.process.group is not None:
91
+ groupname = self.process.group.config.name
92
+ try:
93
+ data = as_string(self.data)
94
+ except UnicodeDecodeError:
95
+ data = f'Undecodable: {self.data!r}'
96
+ return f'processname:{self.process.config.name} groupname:{groupname} pid:{self.pid}\n{data}'
97
+
98
+
99
+ class ProcessCommunicationStdoutEvent(ProcessCommunicationEvent):
100
+ channel = 'stdout'
101
+
102
+
103
+ class ProcessCommunicationStderrEvent(ProcessCommunicationEvent):
104
+ channel = 'stderr'
105
+
106
+
107
+ class RemoteCommunicationEvent(Event):
108
+ def __init__(self, type, data): # noqa
109
+ super().__init__()
110
+ self.type = type
111
+ self.data = data
112
+
113
+ def payload(self):
114
+ return f'type:{self.type}\n{self.data}'
115
+
116
+
117
+ class SupervisorStateChangeEvent(Event):
118
+ """ Abstract class """
119
+
120
+ def payload(self):
121
+ return ''
122
+
123
+
124
+ class SupervisorRunningEvent(SupervisorStateChangeEvent):
125
+ pass
126
+
127
+
128
+ class SupervisorStoppingEvent(SupervisorStateChangeEvent):
129
+ pass
130
+
131
+
132
+ class EventRejectedEvent: # purposely does not subclass Event
133
+ def __init__(self, process, event):
134
+ super().__init__()
135
+ self.process = process
136
+ self.event = event
137
+
138
+
139
+ class ProcessStateEvent(Event):
140
+ """ Abstract class, never raised directly """
141
+ frm = None
142
+ to = None
143
+
144
+ def __init__(self, process, from_state, expected=True):
145
+ super().__init__()
146
+ self.process = process
147
+ self.from_state = from_state
148
+ self.expected = expected
149
+ # we eagerly render these so if the process pid, etc changes beneath
150
+ # us, we stash the values at the time the event was sent
151
+ self.extra_values = self.get_extra_values()
152
+
153
+ def payload(self):
154
+ groupname = ''
155
+ if self.process.group is not None:
156
+ groupname = self.process.group.config.name
157
+ l = [
158
+ ('processname', self.process.config.name),
159
+ ('groupname', groupname),
160
+ ('from_state', get_process_state_description(self.from_state)),
161
+ ]
162
+ l.extend(self.extra_values)
163
+ s = ' '.join([f'{name}:{val}' for name, val in l])
164
+ return s
165
+
166
+ def get_extra_values(self):
167
+ return []
168
+
169
+
170
+ class ProcessStateFatalEvent(ProcessStateEvent):
171
+ pass
172
+
173
+
174
+ class ProcessStateUnknownEvent(ProcessStateEvent):
175
+ pass
176
+
177
+
178
+ class ProcessStateStartingOrBackoffEvent(ProcessStateEvent):
179
+ def get_extra_values(self):
180
+ return [('tries', int(self.process.backoff))]
181
+
182
+
183
+ class ProcessStateBackoffEvent(ProcessStateStartingOrBackoffEvent):
184
+ pass
185
+
186
+
187
+ class ProcessStateStartingEvent(ProcessStateStartingOrBackoffEvent):
188
+ pass
189
+
190
+
191
+ class ProcessStateExitedEvent(ProcessStateEvent):
192
+ def get_extra_values(self):
193
+ return [('expected', int(self.expected)), ('pid', self.process.pid)]
194
+
195
+
196
+ class ProcessStateRunningEvent(ProcessStateEvent):
197
+ def get_extra_values(self):
198
+ return [('pid', self.process.pid)]
199
+
200
+
201
+ class ProcessStateStoppingEvent(ProcessStateEvent):
202
+ def get_extra_values(self):
203
+ return [('pid', self.process.pid)]
204
+
205
+
206
+ class ProcessStateStoppedEvent(ProcessStateEvent):
207
+ def get_extra_values(self):
208
+ return [('pid', self.process.pid)]
209
+
210
+
211
+ class ProcessGroupEvent(Event):
212
+ def __init__(self, group):
213
+ super().__init__()
214
+ self.group = group
215
+
216
+ def payload(self):
217
+ return f'groupname:{self.group}\n'
218
+
219
+
220
+ class ProcessGroupAddedEvent(ProcessGroupEvent):
221
+ pass
222
+
223
+
224
+ class ProcessGroupRemovedEvent(ProcessGroupEvent):
225
+ pass
226
+
227
+
228
+ class TickEvent(Event):
229
+ """ Abstract """
230
+
231
+ def __init__(self, when, supervisord):
232
+ super().__init__()
233
+ self.when = when
234
+ self.supervisord = supervisord
235
+
236
+ def payload(self):
237
+ return f'when:{self.when}'
238
+
239
+
240
+ class Tick5Event(TickEvent):
241
+ period = 5
242
+
243
+
244
+ class Tick60Event(TickEvent):
245
+ period = 60
246
+
247
+
248
+ class Tick3600Event(TickEvent):
249
+ period = 3600
250
+
251
+
252
+ TICK_EVENTS = [ # imported elsewhere
253
+ Tick5Event,
254
+ Tick60Event,
255
+ Tick3600Event,
256
+ ]
257
+
258
+
259
+ class EventTypes:
260
+ EVENT = Event # abstract
261
+
262
+ PROCESS_STATE = ProcessStateEvent # abstract
263
+ PROCESS_STATE_STOPPED = ProcessStateStoppedEvent
264
+ PROCESS_STATE_EXITED = ProcessStateExitedEvent
265
+ PROCESS_STATE_STARTING = ProcessStateStartingEvent
266
+ PROCESS_STATE_STOPPING = ProcessStateStoppingEvent
267
+ PROCESS_STATE_BACKOFF = ProcessStateBackoffEvent
268
+ PROCESS_STATE_FATAL = ProcessStateFatalEvent
269
+ PROCESS_STATE_RUNNING = ProcessStateRunningEvent
270
+ PROCESS_STATE_UNKNOWN = ProcessStateUnknownEvent
271
+
272
+ PROCESS_COMMUNICATION = ProcessCommunicationEvent # abstract
273
+ PROCESS_COMMUNICATION_STDOUT = ProcessCommunicationStdoutEvent
274
+ PROCESS_COMMUNICATION_STDERR = ProcessCommunicationStderrEvent
275
+
276
+ PROCESS_LOG = ProcessLogEvent
277
+ PROCESS_LOG_STDOUT = ProcessLogStdoutEvent
278
+ PROCESS_LOG_STDERR = ProcessLogStderrEvent
279
+
280
+ REMOTE_COMMUNICATION = RemoteCommunicationEvent
281
+
282
+ SUPERVISOR_STATE_CHANGE = SupervisorStateChangeEvent # abstract
283
+ SUPERVISOR_STATE_CHANGE_RUNNING = SupervisorRunningEvent
284
+ SUPERVISOR_STATE_CHANGE_STOPPING = SupervisorStoppingEvent
285
+
286
+ TICK = TickEvent # abstract
287
+ TICK_5 = Tick5Event
288
+ TICK_60 = Tick60Event
289
+ TICK_3600 = Tick3600Event
290
+
291
+ PROCESS_GROUP = ProcessGroupEvent # abstract
292
+ PROCESS_GROUP_ADDED = ProcessGroupAddedEvent
293
+ PROCESS_GROUP_REMOVED = ProcessGroupRemovedEvent
294
+
295
+
296
+ def get_event_name_by_type(requested):
297
+ for name, typ in EventTypes.__dict__.items():
298
+ if typ is requested:
299
+ return name
300
+ return None
301
+
302
+
303
+ def register(name, event):
304
+ setattr(EventTypes, name, event)
@@ -0,0 +1,22 @@
1
+ class ProcessError(Exception):
2
+ """ Specialized exceptions used when attempting to start a process """
3
+
4
+
5
+ class BadCommandError(ProcessError):
6
+ """ Indicates the command could not be parsed properly. """
7
+
8
+
9
+ class NotExecutableError(ProcessError):
10
+ """ Indicates that the filespec cannot be executed because its path
11
+ resolves to a file which is not executable, or which is a directory. """
12
+
13
+
14
+ class NotFoundError(ProcessError):
15
+ """ Indicates that the filespec cannot be executed because it could not be found """
16
+
17
+
18
+ class NoPermissionError(ProcessError):
19
+ """
20
+ Indicates that the file cannot be executed because the supervisor process does not possess the appropriate UNIX
21
+ filesystem permission to execute the file.
22
+ """