ominfra 0.0.0.dev121__py3-none-any.whl → 0.0.0.dev123__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -6,57 +6,47 @@ import os
6
6
  import pwd
7
7
  import re
8
8
  import resource
9
- import signal
10
9
  import stat
11
10
  import typing as ta
12
11
  import warnings
13
12
 
14
13
  from omlish.lite.logs import log
15
14
 
16
- from .compat import SignalReceiver
17
- from .compat import close_fd
18
- from .compat import mktempfile
19
- from .compat import real_exit
20
- from .compat import try_unlink
21
15
  from .configs import ServerConfig
22
16
  from .datatypes import gid_for_uid
23
17
  from .datatypes import name_to_uid
24
18
  from .exceptions import NoPermissionError
25
19
  from .exceptions import NotExecutableError
26
20
  from .exceptions import NotFoundError
27
- from .poller import BasePoller
28
21
  from .poller import Poller
29
22
  from .states import SupervisorState
30
- from .states import SupervisorStates
31
23
  from .types import AbstractServerContext
32
24
  from .types import AbstractSubprocess
25
+ from .utils import close_fd
26
+ from .utils import mktempfile
27
+ from .utils import real_exit
28
+ from .utils import try_unlink
33
29
 
34
30
 
35
31
  ServerEpoch = ta.NewType('ServerEpoch', int)
36
32
 
37
- InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[int])
38
-
39
33
 
40
34
  class ServerContext(AbstractServerContext):
41
35
  def __init__(
42
36
  self,
43
37
  config: ServerConfig,
38
+ poller: Poller,
44
39
  *,
45
40
  epoch: ServerEpoch = ServerEpoch(0),
46
- inherited_fds: ta.Optional[InheritedFds] = None,
47
41
  ) -> None:
48
42
  super().__init__()
49
43
 
50
44
  self._config = config
45
+ self._poller = poller
51
46
  self._epoch = epoch
52
- self._inherited_fds = InheritedFds(frozenset(inherited_fds or []))
53
47
 
54
48
  self._pid_history: ta.Dict[int, AbstractSubprocess] = {}
55
- self._state: SupervisorState = SupervisorStates.RUNNING
56
-
57
- self._signal_receiver = SignalReceiver()
58
-
59
- self._poller: BasePoller = Poller()
49
+ self._state: SupervisorState = SupervisorState.RUNNING
60
50
 
61
51
  if config.user is not None:
62
52
  uid = name_to_uid(config.user)
@@ -87,10 +77,6 @@ class ServerContext(AbstractServerContext):
87
77
  def set_state(self, state: SupervisorState) -> None:
88
78
  self._state = state
89
79
 
90
- @property
91
- def poller(self) -> BasePoller:
92
- return self._poller
93
-
94
80
  @property
95
81
  def pid_history(self) -> ta.Dict[int, AbstractSubprocess]:
96
82
  return self._pid_history
@@ -103,22 +89,8 @@ class ServerContext(AbstractServerContext):
103
89
  def gid(self) -> ta.Optional[int]:
104
90
  return self._gid
105
91
 
106
- @property
107
- def inherited_fds(self) -> InheritedFds:
108
- return self._inherited_fds
109
-
110
92
  ##
111
93
 
112
- def set_signals(self) -> None:
113
- self._signal_receiver.install(
114
- signal.SIGTERM,
115
- signal.SIGINT,
116
- signal.SIGQUIT,
117
- signal.SIGHUP,
118
- signal.SIGCHLD,
119
- signal.SIGUSR2,
120
- )
121
-
122
94
  def waitpid(self) -> ta.Tuple[ta.Optional[int], ta.Optional[int]]:
123
95
  # Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
124
96
  # still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
@@ -223,7 +195,7 @@ class ServerContext(AbstractServerContext):
223
195
  def cleanup(self) -> None:
224
196
  if self._unlink_pidfile:
225
197
  try_unlink(self.config.pidfile)
226
- self.poller.close()
198
+ self._poller.close()
227
199
 
228
200
  def cleanup_fds(self) -> None:
229
201
  # try to close any leaked file descriptors (for reload)
@@ -249,9 +221,9 @@ class ServerContext(AbstractServerContext):
249
221
  log.warning('Failed to clean up %r', pathname)
250
222
 
251
223
  def daemonize(self) -> None:
252
- self.poller.before_daemonize()
224
+ self._poller.before_daemonize()
253
225
  self._daemonize()
254
- self.poller.after_daemonize()
226
+ self._poller.after_daemonize()
255
227
 
256
228
  def _daemonize(self) -> None:
257
229
  # To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
@@ -306,9 +278,6 @@ class ServerContext(AbstractServerContext):
306
278
  )
307
279
  return logfile
308
280
 
309
- def get_signal(self) -> ta.Optional[int]:
310
- return self._signal_receiver.get_signal()
311
-
312
281
  def write_pidfile(self) -> None:
313
282
  pid = os.getpid()
314
283
  try:
@@ -395,19 +364,25 @@ def make_pipes(stderr=True) -> ta.Mapping[str, int]:
395
364
  'stderr': None,
396
365
  'child_stderr': None,
397
366
  }
367
+
398
368
  try:
399
369
  stdin, child_stdin = os.pipe()
400
370
  pipes['child_stdin'], pipes['stdin'] = stdin, child_stdin
371
+
401
372
  stdout, child_stdout = os.pipe()
402
373
  pipes['stdout'], pipes['child_stdout'] = stdout, child_stdout
374
+
403
375
  if stderr:
404
376
  stderr, child_stderr = os.pipe()
405
377
  pipes['stderr'], pipes['child_stderr'] = stderr, child_stderr
378
+
406
379
  for fd in (pipes['stdout'], pipes['stderr'], pipes['stdin']):
407
380
  if fd is not None:
408
381
  flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NDELAY
409
382
  fcntl.fcntl(fd, fcntl.F_SETFL, flags)
383
+
410
384
  return pipes # type: ignore
385
+
411
386
  except OSError:
412
387
  for fd in pipes.values():
413
388
  if fd is not None:
@@ -1,4 +1,4 @@
1
- # ruff: noqa: UP007
1
+ # ruff: noqa: UP006 UP007
2
2
  import abc
3
3
  import errno
4
4
  import logging
@@ -7,26 +7,35 @@ import typing as ta
7
7
 
8
8
  from omlish.lite.logs import log
9
9
 
10
- from .compat import as_bytes
11
- from .compat import compact_traceback
12
- from .compat import find_prefix_at_end
13
- from .compat import readfd
14
- from .compat import strip_escapes
15
10
  from .configs import ProcessConfig
16
- from .events import EVENT_CALLBACKS
11
+ from .events import EventCallbacks
12
+ from .events import ProcessCommunicationEvent
17
13
  from .events import ProcessLogStderrEvent
18
14
  from .events import ProcessLogStdoutEvent
19
15
  from .types import AbstractSubprocess
16
+ from .utils import as_bytes
17
+ from .utils import compact_traceback
18
+ from .utils import find_prefix_at_end
19
+ from .utils import read_fd
20
+ from .utils import strip_escapes
20
21
 
21
22
 
22
23
  class Dispatcher(abc.ABC):
23
-
24
- def __init__(self, process: AbstractSubprocess, channel: str, fd: int) -> None:
24
+ def __init__(
25
+ self,
26
+ process: AbstractSubprocess,
27
+ channel: str,
28
+ fd: int,
29
+ *,
30
+ event_callbacks: EventCallbacks,
31
+ ) -> None:
25
32
  super().__init__()
26
33
 
27
34
  self._process = process # process which "owns" this dispatcher
28
35
  self._channel = channel # 'stderr' or 'stdout'
29
36
  self._fd = fd
37
+ self._event_callbacks = event_callbacks
38
+
30
39
  self._closed = False # True if close() has been called
31
40
 
32
41
  def __repr__(self) -> str:
@@ -86,18 +95,23 @@ class OutputDispatcher(Dispatcher):
86
95
  - route the output to the appropriate log handlers as specified in the config.
87
96
  """
88
97
 
89
- def __init__(self, process: AbstractSubprocess, event_type, fd):
90
- """
91
- Initialize the dispatcher.
92
-
93
- `event_type` should be one of ProcessLogStdoutEvent or ProcessLogStderrEvent
94
- """
95
-
96
- super().__init__(process, event_type.channel, fd)
98
+ def __init__(
99
+ self,
100
+ process: AbstractSubprocess,
101
+ event_type: ta.Type[ProcessCommunicationEvent],
102
+ fd: int,
103
+ **kwargs: ta.Any,
104
+ ) -> None:
105
+ super().__init__(
106
+ process,
107
+ event_type.channel,
108
+ fd,
109
+ **kwargs,
110
+ )
97
111
 
98
- self.event_type = event_type
112
+ self._event_type = event_type
99
113
 
100
- self.lc: ProcessConfig.Log = getattr(process.config, self._channel)
114
+ self._lc: ProcessConfig.Log = getattr(process.config, self._channel)
101
115
 
102
116
  self._init_normal_log()
103
117
  self._init_capture_log()
@@ -109,8 +123,8 @@ class OutputDispatcher(Dispatcher):
109
123
 
110
124
  # all code below is purely for minor speedups
111
125
 
112
- begin_token = self.event_type.BEGIN_TOKEN
113
- end_token = self.event_type.END_TOKEN
126
+ begin_token = self._event_type.BEGIN_TOKEN
127
+ end_token = self._event_type.END_TOKEN
114
128
  self._begin_token_data = (begin_token, len(begin_token))
115
129
  self._end_token_data = (end_token, len(end_token))
116
130
 
@@ -135,10 +149,10 @@ class OutputDispatcher(Dispatcher):
135
149
  config = self._process.config # noqa
136
150
  channel = self._channel # noqa
137
151
 
138
- logfile = self.lc.file
139
- maxbytes = self.lc.maxbytes # noqa
140
- backups = self.lc.backups # noqa
141
- to_syslog = self.lc.syslog
152
+ logfile = self._lc.file
153
+ maxbytes = self._lc.maxbytes # noqa
154
+ backups = self._lc.backups # noqa
155
+ to_syslog = self._lc.syslog
142
156
 
143
157
  if logfile or to_syslog:
144
158
  self._normal_log = logging.getLogger(__name__)
@@ -165,7 +179,7 @@ class OutputDispatcher(Dispatcher):
165
179
  is detected. Sets self.capture_log if capturing is enabled.
166
180
  """
167
181
 
168
- capture_maxbytes = self.lc.capture_maxbytes
182
+ capture_maxbytes = self._lc.capture_maxbytes
169
183
  if capture_maxbytes:
170
184
  self._capture_log = logging.getLogger(__name__)
171
185
  # loggers.handle_boundIO(
@@ -174,45 +188,47 @@ class OutputDispatcher(Dispatcher):
174
188
  # maxbytes=capture_maxbytes,
175
189
  # )
176
190
 
177
- def remove_logs(self):
191
+ def remove_logs(self) -> None:
178
192
  for l in (self._normal_log, self._capture_log):
179
193
  if l is not None:
180
194
  for handler in l.handlers:
181
195
  handler.remove() # type: ignore
182
196
  handler.reopen() # type: ignore
183
197
 
184
- def reopen_logs(self):
198
+ def reopen_logs(self) -> None:
185
199
  for l in (self._normal_log, self._capture_log):
186
200
  if l is not None:
187
201
  for handler in l.handlers:
188
202
  handler.reopen() # type: ignore
189
203
 
190
- def _log(self, data):
191
- if data:
192
- if self._process.context.config.strip_ansi:
193
- data = strip_escapes(data)
204
+ def _log(self, data: ta.Union[str, bytes, None]) -> None:
205
+ if not data:
206
+ return
194
207
 
195
- if self._child_log:
196
- self._child_log.info(data)
208
+ if self._process.context.config.strip_ansi:
209
+ data = strip_escapes(as_bytes(data))
197
210
 
198
- if self._log_to_main_log:
199
- if not isinstance(data, bytes):
200
- text = data
201
- else:
202
- try:
203
- text = data.decode('utf-8')
204
- except UnicodeDecodeError:
205
- text = f'Undecodable: {data!r}'
206
- log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
211
+ if self._child_log:
212
+ self._child_log.info(data)
207
213
 
208
- if self._channel == 'stdout':
209
- if self._stdout_events_enabled:
210
- EVENT_CALLBACKS.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
214
+ if self._log_to_main_log:
215
+ if not isinstance(data, bytes):
216
+ text = data
217
+ else:
218
+ try:
219
+ text = data.decode('utf-8')
220
+ except UnicodeDecodeError:
221
+ text = f'Undecodable: {data!r}'
222
+ log.log(self._main_log_level, '%r %s output:\n%s', self._process.config.name, self._channel, text) # noqa
223
+
224
+ if self._channel == 'stdout':
225
+ if self._stdout_events_enabled:
226
+ self._event_callbacks.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
211
227
 
212
- elif self._stderr_events_enabled:
213
- EVENT_CALLBACKS.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
228
+ elif self._stderr_events_enabled:
229
+ self._event_callbacks.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
214
230
 
215
- def record_output(self):
231
+ def record_output(self) -> None:
216
232
  if self._capture_log is None:
217
233
  # shortcut trying to find capture data
218
234
  data = self._output_buffer
@@ -221,11 +237,11 @@ class OutputDispatcher(Dispatcher):
221
237
  return
222
238
 
223
239
  if self._capture_mode:
224
- token, tokenlen = self._end_token_data
240
+ token, token_len = self._end_token_data
225
241
  else:
226
- token, tokenlen = self._begin_token_data
242
+ token, token_len = self._begin_token_data
227
243
 
228
- if len(self._output_buffer) <= tokenlen:
244
+ if len(self._output_buffer) <= token_len:
229
245
  return # not enough data
230
246
 
231
247
  data = self._output_buffer
@@ -248,7 +264,7 @@ class OutputDispatcher(Dispatcher):
248
264
  if after:
249
265
  self.record_output()
250
266
 
251
- def toggle_capture_mode(self):
267
+ def toggle_capture_mode(self) -> None:
252
268
  self._capture_mode = not self._capture_mode
253
269
 
254
270
  if self._capture_log is not None:
@@ -260,8 +276,8 @@ class OutputDispatcher(Dispatcher):
260
276
  data = self._capture_log.getvalue() # type: ignore
261
277
  channel = self._channel
262
278
  procname = self._process.config.name
263
- event = self.event_type(self._process, self._process.pid, data)
264
- EVENT_CALLBACKS.notify(event)
279
+ event = self._event_type(self._process, self._process.pid, data)
280
+ self._event_callbacks.notify(event)
265
281
 
266
282
  log.debug('%r %s emitted a comm event', procname, channel)
267
283
  for handler in self._capture_log.handlers:
@@ -278,7 +294,7 @@ class OutputDispatcher(Dispatcher):
278
294
  return True
279
295
 
280
296
  def handle_read_event(self) -> None:
281
- data = readfd(self._fd)
297
+ data = read_fd(self._fd)
282
298
  self._output_buffer += data
283
299
  self.record_output()
284
300
  if not data:
@@ -288,11 +304,25 @@ class OutputDispatcher(Dispatcher):
288
304
 
289
305
 
290
306
  class InputDispatcher(Dispatcher):
307
+ def __init__(
308
+ self,
309
+ process: AbstractSubprocess,
310
+ channel: str,
311
+ fd: int,
312
+ **kwargs: ta.Any,
313
+ ) -> None:
314
+ super().__init__(
315
+ process,
316
+ channel,
317
+ fd,
318
+ **kwargs,
319
+ )
291
320
 
292
- def __init__(self, process: AbstractSubprocess, channel: str, fd: int) -> None:
293
- super().__init__(process, channel, fd)
294
321
  self._input_buffer = b''
295
322
 
323
+ def write(self, chars: ta.Union[bytes, str]) -> None:
324
+ self._input_buffer += as_bytes(chars)
325
+
296
326
  def writable(self) -> bool:
297
327
  if self._input_buffer and not self._closed:
298
328
  return True
@@ -2,36 +2,44 @@
2
2
  import abc
3
3
  import typing as ta
4
4
 
5
- from .compat import as_string
6
- from .states import get_process_state_description
5
+ from .states import ProcessState
6
+
7
+
8
+ ##
9
+
10
+
11
+ class Event(abc.ABC): # noqa
12
+ """Abstract event type."""
13
+
14
+
15
+ ##
16
+
17
+
18
+ EventCallback = ta.Callable[['Event'], None]
7
19
 
8
20
 
9
21
  class EventCallbacks:
10
22
  def __init__(self) -> None:
11
23
  super().__init__()
12
24
 
13
- self._callbacks: ta.List[ta.Tuple[type, ta.Callable]] = []
25
+ self._callbacks: ta.List[ta.Tuple[ta.Type[Event], EventCallback]] = []
14
26
 
15
- def subscribe(self, type, callback): # noqa
27
+ def subscribe(self, type: ta.Type[Event], callback: EventCallback) -> None: # noqa
16
28
  self._callbacks.append((type, callback))
17
29
 
18
- def unsubscribe(self, type, callback): # noqa
30
+ def unsubscribe(self, type: ta.Type[Event], callback: EventCallback) -> None: # noqa
19
31
  self._callbacks.remove((type, callback))
20
32
 
21
- def notify(self, event):
33
+ def notify(self, event: Event) -> None:
22
34
  for type, callback in self._callbacks: # noqa
23
35
  if isinstance(event, type):
24
36
  callback(event)
25
37
 
26
- def clear(self):
38
+ def clear(self) -> None:
27
39
  self._callbacks[:] = []
28
40
 
29
41
 
30
- EVENT_CALLBACKS = EventCallbacks()
31
-
32
-
33
- class Event(abc.ABC): # noqa
34
- """Abstract event type."""
42
+ ##
35
43
 
36
44
 
37
45
  class ProcessLogEvent(Event, abc.ABC):
@@ -43,24 +51,6 @@ class ProcessLogEvent(Event, abc.ABC):
43
51
  self.pid = pid
44
52
  self.data = data
45
53
 
46
- def payload(self):
47
- groupname = ''
48
- if self.process.group is not None:
49
- groupname = self.process.group.config.name
50
- try:
51
- data = as_string(self.data)
52
- except UnicodeDecodeError:
53
- data = f'Undecodable: {self.data!r}'
54
-
55
- result = 'processname:%s groupname:%s pid:%s channel:%s\n%s' % ( # noqa
56
- as_string(self.process.config.name),
57
- as_string(groupname),
58
- self.pid,
59
- as_string(self.channel), # type: ignore
60
- data,
61
- )
62
- return result
63
-
64
54
 
65
55
  class ProcessLogStdoutEvent(ProcessLogEvent):
66
56
  channel = 'stdout'
@@ -70,27 +60,22 @@ class ProcessLogStderrEvent(ProcessLogEvent):
70
60
  channel = 'stderr'
71
61
 
72
62
 
63
+ #
64
+
65
+
73
66
  class ProcessCommunicationEvent(Event, abc.ABC):
74
67
  # event mode tokens
75
68
  BEGIN_TOKEN = b'<!--XSUPERVISOR:BEGIN-->'
76
69
  END_TOKEN = b'<!--XSUPERVISOR:END-->'
77
70
 
71
+ channel: ta.ClassVar[str]
72
+
78
73
  def __init__(self, process, pid, data):
79
74
  super().__init__()
80
75
  self.process = process
81
76
  self.pid = pid
82
77
  self.data = data
83
78
 
84
- def payload(self):
85
- groupname = ''
86
- if self.process.group is not None:
87
- groupname = self.process.group.config.name
88
- try:
89
- data = as_string(self.data)
90
- except UnicodeDecodeError:
91
- data = f'Undecodable: {self.data!r}'
92
- return f'processname:{self.process.config.name} groupname:{groupname} pid:{self.pid}\n{data}'
93
-
94
79
 
95
80
  class ProcessCommunicationStdoutEvent(ProcessCommunicationEvent):
96
81
  channel = 'stdout'
@@ -100,22 +85,22 @@ class ProcessCommunicationStderrEvent(ProcessCommunicationEvent):
100
85
  channel = 'stderr'
101
86
 
102
87
 
88
+ #
89
+
90
+
103
91
  class RemoteCommunicationEvent(Event):
104
92
  def __init__(self, type, data): # noqa
105
93
  super().__init__()
106
94
  self.type = type
107
95
  self.data = data
108
96
 
109
- def payload(self):
110
- return f'type:{self.type}\n{self.data}'
97
+
98
+ #
111
99
 
112
100
 
113
101
  class SupervisorStateChangeEvent(Event):
114
102
  """Abstract class."""
115
103
 
116
- def payload(self):
117
- return ''
118
-
119
104
 
120
105
  class SupervisorRunningEvent(SupervisorStateChangeEvent):
121
106
  pass
@@ -125,6 +110,9 @@ class SupervisorStoppingEvent(SupervisorStateChangeEvent):
125
110
  pass
126
111
 
127
112
 
113
+ #
114
+
115
+
128
116
  class EventRejectedEvent: # purposely does not subclass Event
129
117
  def __init__(self, process, event):
130
118
  super().__init__()
@@ -132,6 +120,9 @@ class EventRejectedEvent: # purposely does not subclass Event
132
120
  self.event = event
133
121
 
134
122
 
123
+ #
124
+
125
+
135
126
  class ProcessStateEvent(Event):
136
127
  """Abstract class, never raised directly."""
137
128
  frm = None
@@ -146,19 +137,6 @@ class ProcessStateEvent(Event):
146
137
  # us, we stash the values at the time the event was sent
147
138
  self.extra_values = self.get_extra_values()
148
139
 
149
- def payload(self):
150
- groupname = ''
151
- if self.process.group is not None:
152
- groupname = self.process.group.config.name
153
- l = [
154
- ('processname', self.process.config.name),
155
- ('groupname', groupname),
156
- ('from_state', get_process_state_description(self.from_state)),
157
- ]
158
- l.extend(self.extra_values)
159
- s = ' '.join([f'{name}:{val}' for name, val in l])
160
- return s
161
-
162
140
  def get_extra_values(self):
163
141
  return []
164
142
 
@@ -204,14 +182,26 @@ class ProcessStateStoppedEvent(ProcessStateEvent):
204
182
  return [('pid', self.process.pid)]
205
183
 
206
184
 
185
+ PROCESS_STATE_EVENT_MAP: ta.Mapping[ProcessState, ta.Type[ProcessStateEvent]] = {
186
+ ProcessState.BACKOFF: ProcessStateBackoffEvent,
187
+ ProcessState.FATAL: ProcessStateFatalEvent,
188
+ ProcessState.UNKNOWN: ProcessStateUnknownEvent,
189
+ ProcessState.STOPPED: ProcessStateStoppedEvent,
190
+ ProcessState.EXITED: ProcessStateExitedEvent,
191
+ ProcessState.RUNNING: ProcessStateRunningEvent,
192
+ ProcessState.STARTING: ProcessStateStartingEvent,
193
+ ProcessState.STOPPING: ProcessStateStoppingEvent,
194
+ }
195
+
196
+
197
+ #
198
+
199
+
207
200
  class ProcessGroupEvent(Event):
208
201
  def __init__(self, group):
209
202
  super().__init__()
210
203
  self.group = group
211
204
 
212
- def payload(self):
213
- return f'groupname:{self.group}\n'
214
-
215
205
 
216
206
  class ProcessGroupAddedEvent(ProcessGroupEvent):
217
207
  pass
@@ -221,6 +211,9 @@ class ProcessGroupRemovedEvent(ProcessGroupEvent):
221
211
  pass
222
212
 
223
213
 
214
+ #
215
+
216
+
224
217
  class TickEvent(Event):
225
218
  """Abstract."""
226
219
 
@@ -229,9 +222,6 @@ class TickEvent(Event):
229
222
  self.when = when
230
223
  self.supervisord = supervisord
231
224
 
232
- def payload(self):
233
- return f'when:{self.when}'
234
-
235
225
 
236
226
  class Tick5Event(TickEvent):
237
227
  period = 5
@@ -245,11 +235,14 @@ class Tick3600Event(TickEvent):
245
235
  period = 3600
246
236
 
247
237
 
248
- TICK_EVENTS = [ # imported elsewhere
238
+ TICK_EVENTS = ( # imported elsewhere
249
239
  Tick5Event,
250
240
  Tick60Event,
251
241
  Tick3600Event,
252
- ]
242
+ )
243
+
244
+
245
+ ##
253
246
 
254
247
 
255
248
  class EventTypes:
@@ -294,7 +287,3 @@ def get_event_name_by_type(requested):
294
287
  if typ is requested:
295
288
  return name
296
289
  return None
297
-
298
-
299
- def register(name, event):
300
- setattr(EventTypes, name, event)