uhttp-workers 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
uhttp/workers.py
ADDED
|
@@ -0,0 +1,1138 @@
|
|
|
1
|
+
"""uhttp-workers: Multi-process API server built on uhttp-server
|
|
2
|
+
|
|
3
|
+
Provides dispatcher/worker architecture for handling large volumes
|
|
4
|
+
of API requests using multiple processes.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys as _sys
|
|
8
|
+
import os as _os
|
|
9
|
+
import traceback as _traceback
|
|
10
|
+
import time as _time
|
|
11
|
+
import queue as _queue
|
|
12
|
+
import signal as _signal
|
|
13
|
+
import select as _select
|
|
14
|
+
import multiprocessing as _mp
|
|
15
|
+
|
|
16
|
+
import uhttp.server as _uhttp_server
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Message types for response queue
|
|
20
|
+
MSG_RESPONSE = 'RESPONSE'
|
|
21
|
+
MSG_HEARTBEAT = 'HEARTBEAT'
|
|
22
|
+
MSG_LOG = 'LOG'
|
|
23
|
+
|
|
24
|
+
# Worker control messages
|
|
25
|
+
CTL_STOP = 'STOP'
|
|
26
|
+
CTL_CONFIG = 'CONFIG'
|
|
27
|
+
|
|
28
|
+
# Log levels
|
|
29
|
+
LOG_CRITICAL = 50
|
|
30
|
+
LOG_ERROR = 40
|
|
31
|
+
LOG_WARNING = 30
|
|
32
|
+
LOG_INFO = 20
|
|
33
|
+
LOG_DEBUG = 10
|
|
34
|
+
|
|
35
|
+
LOG_LEVEL_NAMES = {
|
|
36
|
+
LOG_CRITICAL: 'CRITICAL',
|
|
37
|
+
LOG_ERROR: 'ERROR',
|
|
38
|
+
LOG_WARNING: 'WARNING',
|
|
39
|
+
LOG_INFO: 'INFO',
|
|
40
|
+
LOG_DEBUG: 'DEBUG',
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
# Syslog priority prefixes for systemd-journald
|
|
44
|
+
_LOG_SYSLOG_PREFIX = {
|
|
45
|
+
LOG_CRITICAL: '<2>',
|
|
46
|
+
LOG_ERROR: '<3>',
|
|
47
|
+
LOG_WARNING: '<4>',
|
|
48
|
+
LOG_INFO: '<6>',
|
|
49
|
+
LOG_DEBUG: '<7>',
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
# ANSI color codes for terminal output
|
|
53
|
+
_LOG_ANSI_COLOR = {
|
|
54
|
+
LOG_CRITICAL: '\033[1;31m', # bold red
|
|
55
|
+
LOG_ERROR: '\033[31m', # red
|
|
56
|
+
LOG_WARNING: '\033[33m', # yellow
|
|
57
|
+
LOG_INFO: '\033[0m', # default
|
|
58
|
+
LOG_DEBUG: '\033[2m', # dim
|
|
59
|
+
}
|
|
60
|
+
_ANSI_RESET = '\033[0m'
|
|
61
|
+
|
|
62
|
+
_DIR_INDEX = 'index.html'
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# Exceptions
|
|
66
|
+
|
|
67
|
+
class ApiException(Exception):
|
|
68
|
+
"""Base exception for uhttp-workers."""
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class RejectRequest(ApiException):
|
|
72
|
+
"""Raised in do_check() to reject request (response already sent)."""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
# Route decorator
|
|
76
|
+
|
|
77
|
+
def api(pattern, *methods):
|
|
78
|
+
"""Decorator to register a method as API endpoint handler on Worker.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
pattern: URL pattern with optional parameters (e.g., '/user/{id:int}')
|
|
82
|
+
*methods: HTTP methods to accept (e.g., 'GET', 'POST'). None = all.
|
|
83
|
+
"""
|
|
84
|
+
def decorator(func):
|
|
85
|
+
func._api_pattern = pattern
|
|
86
|
+
func._api_methods = list(methods) if methods else None
|
|
87
|
+
return func
|
|
88
|
+
return decorator
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def sync(pattern, *methods):
|
|
92
|
+
"""Decorator to register a method as sync handler on Dispatcher.
|
|
93
|
+
|
|
94
|
+
Sync handlers run directly in dispatcher process.
|
|
95
|
+
Use for lightweight, fast responses only.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
pattern: URL pattern with optional parameters (e.g., '/health')
|
|
99
|
+
*methods: HTTP methods to accept (e.g., 'GET', 'POST'). None = all.
|
|
100
|
+
"""
|
|
101
|
+
def decorator(func):
|
|
102
|
+
func._sync_pattern = pattern
|
|
103
|
+
func._sync_methods = list(methods) if methods else None
|
|
104
|
+
return func
|
|
105
|
+
return decorator
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
# Type converters for path parameters
|
|
109
|
+
|
|
110
|
+
_TYPE_CONVERTERS = {
|
|
111
|
+
'str': str,
|
|
112
|
+
'int': int,
|
|
113
|
+
'float': float,
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _parse_param(pattern_part):
|
|
118
|
+
"""Parse parameter pattern like {name} or {name:type}.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Tuple (param_name, converter_func) or None if not a parameter.
|
|
122
|
+
"""
|
|
123
|
+
if not (pattern_part.startswith('{') and pattern_part.endswith('}')):
|
|
124
|
+
return None
|
|
125
|
+
inner = pattern_part[1:-1]
|
|
126
|
+
if ':' in inner:
|
|
127
|
+
name, type_name = inner.split(':', 1)
|
|
128
|
+
converter = _TYPE_CONVERTERS.get(type_name)
|
|
129
|
+
if converter is None:
|
|
130
|
+
raise ValueError(f"Unknown type converter: {type_name}")
|
|
131
|
+
return name, converter
|
|
132
|
+
return inner, str
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _match_pattern(pattern, path):
|
|
136
|
+
"""Match URL path against pattern with parameters.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
pattern: Pattern string (e.g., '/user/{id:int}')
|
|
140
|
+
path: URL path (e.g., '/user/42')
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Dict of path parameters if match, None otherwise.
|
|
144
|
+
"""
|
|
145
|
+
pattern_parts = [p for p in pattern.split('/') if p]
|
|
146
|
+
path_parts = [p for p in path.split('/') if p]
|
|
147
|
+
if len(pattern_parts) != len(path_parts):
|
|
148
|
+
return None
|
|
149
|
+
path_params = {}
|
|
150
|
+
for pattern_part, path_part in zip(pattern_parts, path_parts):
|
|
151
|
+
param = _parse_param(pattern_part)
|
|
152
|
+
if param:
|
|
153
|
+
name, converter = param
|
|
154
|
+
try:
|
|
155
|
+
path_params[name] = converter(path_part)
|
|
156
|
+
except (ValueError, TypeError):
|
|
157
|
+
return None
|
|
158
|
+
elif pattern_part != path_part:
|
|
159
|
+
return None
|
|
160
|
+
return path_params
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _match_prefix(prefix_pattern, path):
|
|
164
|
+
"""Match URL path against prefix pattern with glob support.
|
|
165
|
+
|
|
166
|
+
Supports '**' wildcard at the end of pattern.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
prefix_pattern: Pattern like '/api/users/**'
|
|
170
|
+
path: URL path
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
True if path matches prefix pattern.
|
|
174
|
+
"""
|
|
175
|
+
if prefix_pattern.endswith('/**'):
|
|
176
|
+
prefix = prefix_pattern[:-3]
|
|
177
|
+
prefix_parts = [p for p in prefix.split('/') if p]
|
|
178
|
+
path_parts = [p for p in path.split('/') if p]
|
|
179
|
+
return path_parts[:len(prefix_parts)] == prefix_parts
|
|
180
|
+
# exact match
|
|
181
|
+
return path == prefix_pattern
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# Request/Response objects passed through queues
|
|
185
|
+
|
|
186
|
+
class Request:
|
|
187
|
+
"""HTTP request data passed from dispatcher to worker via queue.
|
|
188
|
+
|
|
189
|
+
Attributes:
|
|
190
|
+
request_id: Internal ID for dispatcher/worker pairing.
|
|
191
|
+
method: HTTP method (e.g., 'GET', 'POST').
|
|
192
|
+
path: URL path (e.g., '/api/user/42').
|
|
193
|
+
query: Parsed query parameters dict, or None.
|
|
194
|
+
data: Parsed body — dict (JSON), bytes (binary), or None.
|
|
195
|
+
headers: Request headers dict.
|
|
196
|
+
content_type: Content-Type header value, or None.
|
|
197
|
+
path_params: Path parameters filled by worker router.
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
__slots__ = (
|
|
201
|
+
'request_id', 'method', 'path', 'query',
|
|
202
|
+
'data', 'headers', 'content_type', 'path_params')
|
|
203
|
+
|
|
204
|
+
def __init__(
|
|
205
|
+
self, request_id, method, path, query=None,
|
|
206
|
+
data=None, headers=None, content_type=None):
|
|
207
|
+
self.request_id = request_id
|
|
208
|
+
self.method = method
|
|
209
|
+
self.path = path
|
|
210
|
+
self.query = query
|
|
211
|
+
self.data = data
|
|
212
|
+
self.headers = headers or {}
|
|
213
|
+
self.content_type = content_type
|
|
214
|
+
self.path_params = {}
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class Response:
|
|
218
|
+
"""HTTP response data passed from worker to dispatcher via queue.
|
|
219
|
+
|
|
220
|
+
Attributes:
|
|
221
|
+
request_id: Matches the originating Request.
|
|
222
|
+
status: HTTP status code.
|
|
223
|
+
data: Response body — dict (JSON), bytes (binary), or None.
|
|
224
|
+
headers: Response headers dict, or None.
|
|
225
|
+
"""
|
|
226
|
+
|
|
227
|
+
__slots__ = ('request_id', 'status', 'data', 'headers')
|
|
228
|
+
|
|
229
|
+
def __init__(
|
|
230
|
+
self, request_id, data=None, status=200, headers=None):
|
|
231
|
+
self.request_id = request_id
|
|
232
|
+
self.status = status
|
|
233
|
+
self.data = data
|
|
234
|
+
self.headers = headers
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
# API Handler
|
|
238
|
+
|
|
239
|
+
class ApiHandler:
|
|
240
|
+
"""Base class for grouping API endpoints under a common URL prefix.
|
|
241
|
+
|
|
242
|
+
Subclass and set PATTERN as the URL prefix. Define handlers with @api
|
|
243
|
+
decorator. Handlers access the worker via self.worker.
|
|
244
|
+
|
|
245
|
+
Attributes:
|
|
246
|
+
PATTERN: URL prefix prepended to all @api patterns in this class.
|
|
247
|
+
worker: Reference to the Worker instance that owns this handler.
|
|
248
|
+
"""
|
|
249
|
+
|
|
250
|
+
PATTERN = ''
|
|
251
|
+
|
|
252
|
+
def __init__(self, worker):
|
|
253
|
+
self.worker = worker
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
# Logger
|
|
257
|
+
|
|
258
|
+
class Logger:
|
|
259
|
+
"""Logger that sends log records to dispatcher via response queue.
|
|
260
|
+
|
|
261
|
+
Supports both %-style and {}-style message formatting.
|
|
262
|
+
Messages below the configured level are not sent to queue.
|
|
263
|
+
|
|
264
|
+
Attributes:
|
|
265
|
+
name: Logger name (included in log output).
|
|
266
|
+
level: Minimum log level.
|
|
267
|
+
"""
|
|
268
|
+
|
|
269
|
+
def __init__(self, name, queue, level=LOG_WARNING):
|
|
270
|
+
self.name = name
|
|
271
|
+
self.level = level
|
|
272
|
+
self._queue = queue
|
|
273
|
+
|
|
274
|
+
def _log(self, level, msg, *args, **kwargs):
|
|
275
|
+
if level >= self.level:
|
|
276
|
+
try:
|
|
277
|
+
message = msg % args if args else msg
|
|
278
|
+
message = message.format(**kwargs) if kwargs else message
|
|
279
|
+
except (TypeError, KeyError, IndexError, ValueError):
|
|
280
|
+
message = f"{msg} {args} {kwargs}"
|
|
281
|
+
self._queue.put((MSG_LOG, self.name, level, message))
|
|
282
|
+
|
|
283
|
+
def critical(self, msg, *args, **kwargs):
|
|
284
|
+
self._log(LOG_CRITICAL, msg, *args, **kwargs)
|
|
285
|
+
|
|
286
|
+
def error(self, msg, *args, **kwargs):
|
|
287
|
+
self._log(LOG_ERROR, msg, *args, **kwargs)
|
|
288
|
+
|
|
289
|
+
def warning(self, msg, *args, **kwargs):
|
|
290
|
+
self._log(LOG_WARNING, msg, *args, **kwargs)
|
|
291
|
+
|
|
292
|
+
def info(self, msg, *args, **kwargs):
|
|
293
|
+
self._log(LOG_INFO, msg, *args, **kwargs)
|
|
294
|
+
|
|
295
|
+
def debug(self, msg, *args, **kwargs):
|
|
296
|
+
self._log(LOG_DEBUG, msg, *args, **kwargs)
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
# Worker
|
|
300
|
+
|
|
301
|
+
class Worker(_mp.Process):
|
|
302
|
+
"""Base worker process. Subclass and define handlers with @api decorator.
|
|
303
|
+
|
|
304
|
+
Handlers can be defined directly on the worker or in separate
|
|
305
|
+
ApiHandler classes listed in HANDLERS. Uses select()-based event loop
|
|
306
|
+
for multiplexing request queue, control queue, and custom file descriptors.
|
|
307
|
+
|
|
308
|
+
Attributes:
|
|
309
|
+
HANDLERS: List of ApiHandler subclasses with grouped endpoints.
|
|
310
|
+
worker_id: Unique index of this worker within its pool.
|
|
311
|
+
heartbeat_interval: Seconds between heartbeats when idle.
|
|
312
|
+
kwargs: Extra arguments from WorkerPool, accessible in setup().
|
|
313
|
+
log: Logger instance for sending log messages to dispatcher.
|
|
314
|
+
"""
|
|
315
|
+
|
|
316
|
+
HANDLERS = []
|
|
317
|
+
|
|
318
|
+
def __init__(
|
|
319
|
+
self, worker_id, request_queue, control_queue,
|
|
320
|
+
response_queue, heartbeat_interval=1,
|
|
321
|
+
log_level=LOG_WARNING, pool_name=None, **kwargs):
|
|
322
|
+
"""Initialize worker process.
|
|
323
|
+
|
|
324
|
+
Args:
|
|
325
|
+
worker_id: Unique index of this worker within its pool.
|
|
326
|
+
request_queue: Queue for receiving Request objects from dispatcher.
|
|
327
|
+
control_queue: Per-worker queue for stop signals and config updates.
|
|
328
|
+
response_queue: Shared queue for sending responses and heartbeats
|
|
329
|
+
back to dispatcher.
|
|
330
|
+
heartbeat_interval: Seconds between heartbeats when idle.
|
|
331
|
+
log_level: Minimum log level for worker logger.
|
|
332
|
+
pool_name: Name of the pool this worker belongs to.
|
|
333
|
+
**kwargs: Extra arguments accessible via self.kwargs in setup().
|
|
334
|
+
"""
|
|
335
|
+
super().__init__(daemon=True)
|
|
336
|
+
self.worker_id = worker_id
|
|
337
|
+
self.pool_name = pool_name
|
|
338
|
+
self.heartbeat_interval = heartbeat_interval
|
|
339
|
+
self.kwargs = kwargs
|
|
340
|
+
self._request_queue = request_queue
|
|
341
|
+
self._control_queue = control_queue
|
|
342
|
+
self._response_queue = response_queue
|
|
343
|
+
self.log = Logger(
|
|
344
|
+
f'{type(self).__name__}[{worker_id}]',
|
|
345
|
+
response_queue, level=log_level)
|
|
346
|
+
self._routes = []
|
|
347
|
+
self._handlers = []
|
|
348
|
+
self._readers = {}
|
|
349
|
+
self._writers = {}
|
|
350
|
+
self._running = True
|
|
351
|
+
|
|
352
|
+
def _build_routes(self):
|
|
353
|
+
"""Collect @api decorated methods from worker and HANDLERS."""
|
|
354
|
+
# routes from worker itself
|
|
355
|
+
for klass in type(self).__mro__:
|
|
356
|
+
for name, val in vars(klass).items():
|
|
357
|
+
if callable(val) and hasattr(val, '_api_pattern'):
|
|
358
|
+
bound = getattr(self, name)
|
|
359
|
+
self._routes.append((
|
|
360
|
+
val._api_pattern,
|
|
361
|
+
val._api_methods,
|
|
362
|
+
bound))
|
|
363
|
+
# routes from handler classes
|
|
364
|
+
for handler_cls in self.HANDLERS:
|
|
365
|
+
handler = handler_cls(self)
|
|
366
|
+
self._handlers.append(handler)
|
|
367
|
+
prefix = handler_cls.PATTERN.rstrip('/')
|
|
368
|
+
for klass in handler_cls.__mro__:
|
|
369
|
+
if klass is ApiHandler or klass is object:
|
|
370
|
+
continue
|
|
371
|
+
for name, val in vars(klass).items():
|
|
372
|
+
if callable(val) and hasattr(val, '_api_pattern'):
|
|
373
|
+
full_pattern = prefix + val._api_pattern
|
|
374
|
+
bound = getattr(handler, name)
|
|
375
|
+
self._routes.append((
|
|
376
|
+
full_pattern,
|
|
377
|
+
val._api_methods,
|
|
378
|
+
bound))
|
|
379
|
+
|
|
380
|
+
def _match_route(self, request):
|
|
381
|
+
"""Find matching handler for request, or None."""
|
|
382
|
+
for pattern, methods, handler in self._routes:
|
|
383
|
+
if methods and request.method not in methods:
|
|
384
|
+
continue
|
|
385
|
+
path_params = _match_pattern(pattern, request.path)
|
|
386
|
+
if path_params is not None:
|
|
387
|
+
request.path_params = path_params
|
|
388
|
+
return handler
|
|
389
|
+
return None
|
|
390
|
+
|
|
391
|
+
def register_reader(self, fd, callback):
|
|
392
|
+
"""Register file-like object for read events in worker select loop.
|
|
393
|
+
|
|
394
|
+
Args:
|
|
395
|
+
fd: Any object with fileno() (socket, serial port, pipe, ...).
|
|
396
|
+
callback: Called with fd when readable: callback(fd).
|
|
397
|
+
"""
|
|
398
|
+
self._readers[fd] = callback
|
|
399
|
+
|
|
400
|
+
def unregister_reader(self, fd):
|
|
401
|
+
"""Remove file-like object from read events."""
|
|
402
|
+
self._readers.pop(fd, None)
|
|
403
|
+
|
|
404
|
+
def register_writer(self, fd, callback):
|
|
405
|
+
"""Register file-like object for write events in worker select loop.
|
|
406
|
+
|
|
407
|
+
Only register when there is data to send, unregister when buffer
|
|
408
|
+
is empty to avoid spinning in select.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
fd: Any object with fileno() (socket, serial port, pipe, ...).
|
|
412
|
+
callback: Called with fd when writable: callback(fd).
|
|
413
|
+
"""
|
|
414
|
+
self._writers[fd] = callback
|
|
415
|
+
|
|
416
|
+
def unregister_writer(self, fd):
|
|
417
|
+
"""Remove file-like object from write events."""
|
|
418
|
+
self._writers.pop(fd, None)
|
|
419
|
+
|
|
420
|
+
def setup(self):
|
|
421
|
+
"""Called once when worker process starts.
|
|
422
|
+
|
|
423
|
+
Override to initialize resources (database connections, models, etc.).
|
|
424
|
+
Extra kwargs from WorkerPool are available as self.kwargs.
|
|
425
|
+
"""
|
|
426
|
+
|
|
427
|
+
def on_idle(self):
|
|
428
|
+
"""Called on each heartbeat interval when no request arrived.
|
|
429
|
+
|
|
430
|
+
Override for periodic background processing.
|
|
431
|
+
"""
|
|
432
|
+
|
|
433
|
+
def on_config(self, config):
|
|
434
|
+
"""Called when dispatcher sends configuration update via control queue.
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
config: Configuration dict sent by pool.send_config().
|
|
438
|
+
"""
|
|
439
|
+
|
|
440
|
+
def _process_control(self):
|
|
441
|
+
"""Process all pending control messages."""
|
|
442
|
+
while True:
|
|
443
|
+
try:
|
|
444
|
+
msg = self._control_queue.get_nowait()
|
|
445
|
+
except _queue.Empty:
|
|
446
|
+
return
|
|
447
|
+
except (EOFError, OSError):
|
|
448
|
+
self._running = False
|
|
449
|
+
return
|
|
450
|
+
if msg is None or (isinstance(msg, tuple) and msg[0] == CTL_STOP):
|
|
451
|
+
self._running = False
|
|
452
|
+
return
|
|
453
|
+
if isinstance(msg, tuple) and msg[0] == CTL_CONFIG:
|
|
454
|
+
self.on_config(msg[1])
|
|
455
|
+
|
|
456
|
+
def _handle_request(self, request):
|
|
457
|
+
"""Route and handle a single request, return Response."""
|
|
458
|
+
handler = self._match_route(request)
|
|
459
|
+
if handler is None:
|
|
460
|
+
# check if path matches but method doesn't
|
|
461
|
+
for pattern, methods, _ in self._routes:
|
|
462
|
+
if _match_pattern(pattern, request.path) is not None:
|
|
463
|
+
return Response(
|
|
464
|
+
request.request_id,
|
|
465
|
+
data={'error': 'Method not allowed'},
|
|
466
|
+
status=405,
|
|
467
|
+
headers={'Allow': ', '.join(methods)})
|
|
468
|
+
return Response(
|
|
469
|
+
request.request_id,
|
|
470
|
+
data={'error': 'Not found'},
|
|
471
|
+
status=404)
|
|
472
|
+
try:
|
|
473
|
+
result = handler(request)
|
|
474
|
+
if isinstance(result, tuple):
|
|
475
|
+
data, status = result
|
|
476
|
+
else:
|
|
477
|
+
data, status = result, 200
|
|
478
|
+
return Response(request.request_id, data=data, status=status)
|
|
479
|
+
except Exception as err:
|
|
480
|
+
self.log.error(
|
|
481
|
+
"%s %s: %s\n%s",
|
|
482
|
+
request.method, request.path, err,
|
|
483
|
+
_traceback.format_exc())
|
|
484
|
+
return Response(
|
|
485
|
+
request.request_id,
|
|
486
|
+
data={'error': str(err)},
|
|
487
|
+
status=500)
|
|
488
|
+
|
|
489
|
+
def run(self):
|
|
490
|
+
"""Worker main loop using select for multiplexing."""
|
|
491
|
+
_signal.signal(_signal.SIGTERM, lambda *_: None)
|
|
492
|
+
_signal.signal(_signal.SIGINT, lambda *_: None)
|
|
493
|
+
try:
|
|
494
|
+
self._build_routes()
|
|
495
|
+
self.setup()
|
|
496
|
+
except Exception:
|
|
497
|
+
self._response_queue.put(
|
|
498
|
+
(MSG_LOG, f'{type(self).__name__}[{self.worker_id}]',
|
|
499
|
+
LOG_CRITICAL,
|
|
500
|
+
f"setup() failed:\n{_traceback.format_exc()}"))
|
|
501
|
+
return
|
|
502
|
+
req_reader = self._request_queue._reader
|
|
503
|
+
ctl_reader = self._control_queue._reader
|
|
504
|
+
while self._running:
|
|
505
|
+
read_fds = [req_reader, ctl_reader] + list(self._readers)
|
|
506
|
+
write_fds = list(self._writers)
|
|
507
|
+
readable, writable, _ = _select.select(
|
|
508
|
+
read_fds, write_fds, [], self.heartbeat_interval)
|
|
509
|
+
if not self._running:
|
|
510
|
+
break
|
|
511
|
+
if not readable and not writable:
|
|
512
|
+
# timeout — heartbeat, orphan check, idle hook
|
|
513
|
+
if _os.getppid() == 1:
|
|
514
|
+
break
|
|
515
|
+
self._response_queue.put(
|
|
516
|
+
(MSG_HEARTBEAT, self.pool_name, self.worker_id, None))
|
|
517
|
+
self.on_idle()
|
|
518
|
+
continue
|
|
519
|
+
# control messages
|
|
520
|
+
if ctl_reader in readable:
|
|
521
|
+
self._process_control()
|
|
522
|
+
if not self._running:
|
|
523
|
+
break
|
|
524
|
+
# custom writers
|
|
525
|
+
for fd in writable:
|
|
526
|
+
if fd in self._writers:
|
|
527
|
+
self._writers[fd](fd)
|
|
528
|
+
# custom readers
|
|
529
|
+
for fd in readable:
|
|
530
|
+
if fd in self._readers:
|
|
531
|
+
self._readers[fd](fd)
|
|
532
|
+
# request from dispatcher
|
|
533
|
+
if req_reader in readable:
|
|
534
|
+
try:
|
|
535
|
+
request = self._request_queue.get_nowait()
|
|
536
|
+
except _queue.Empty:
|
|
537
|
+
continue
|
|
538
|
+
except (EOFError, OSError):
|
|
539
|
+
break
|
|
540
|
+
self._response_queue.put(
|
|
541
|
+
(MSG_HEARTBEAT, self.pool_name, self.worker_id, request.request_id))
|
|
542
|
+
response = self._handle_request(request)
|
|
543
|
+
self._response_queue.put(
|
|
544
|
+
(MSG_RESPONSE, request.request_id, response))
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
# Worker Pool
|
|
548
|
+
|
|
549
|
+
class WorkerPool:
|
|
550
|
+
"""Manages a group of workers of the same type.
|
|
551
|
+
|
|
552
|
+
Handles worker lifecycle: start, health monitoring, restart, shutdown.
|
|
553
|
+
"""
|
|
554
|
+
|
|
555
|
+
def __init__(
|
|
556
|
+
self, worker_class, num_workers=1, routes=None,
|
|
557
|
+
timeout=30, stuck_timeout=60, heartbeat_interval=1,
|
|
558
|
+
log_level=LOG_WARNING, max_restarts=10,
|
|
559
|
+
restart_window=300, queue_warning=100, **kwargs):
|
|
560
|
+
"""Initialize worker pool.
|
|
561
|
+
|
|
562
|
+
Args:
|
|
563
|
+
worker_class: Worker subclass to instantiate.
|
|
564
|
+
num_workers: Number of worker processes.
|
|
565
|
+
routes: Prefix patterns for dispatcher routing
|
|
566
|
+
(e.g., ['/api/users/**']). None = fallback pool.
|
|
567
|
+
timeout: Request timeout in seconds (504 response).
|
|
568
|
+
stuck_timeout: Max seconds without heartbeat before kill.
|
|
569
|
+
heartbeat_interval: Seconds between worker heartbeats.
|
|
570
|
+
log_level: Minimum log level for worker loggers.
|
|
571
|
+
max_restarts: Max restarts per restart_window before degraded.
|
|
572
|
+
restart_window: Time window for counting restarts (seconds).
|
|
573
|
+
queue_warning: Log warning when queue size exceeds this value.
|
|
574
|
+
Set to 0 to disable.
|
|
575
|
+
**kwargs: Extra arguments passed to worker constructor.
|
|
576
|
+
"""
|
|
577
|
+
self.worker_class = worker_class
|
|
578
|
+
self.num_workers = num_workers
|
|
579
|
+
self.routes = routes
|
|
580
|
+
self.timeout = timeout
|
|
581
|
+
self.heartbeat_interval = heartbeat_interval
|
|
582
|
+
self.log_level = log_level
|
|
583
|
+
self.stuck_timeout = stuck_timeout
|
|
584
|
+
self.max_restarts = max_restarts
|
|
585
|
+
self.restart_window = restart_window
|
|
586
|
+
self.queue_warning = queue_warning
|
|
587
|
+
self.kwargs = kwargs
|
|
588
|
+
self.name = worker_class.__name__
|
|
589
|
+
self.request_queue = _mp.Queue()
|
|
590
|
+
self.workers = []
|
|
591
|
+
self._control_queues = []
|
|
592
|
+
self._last_seen = {}
|
|
593
|
+
self._current_request = {}
|
|
594
|
+
self._restart_times = []
|
|
595
|
+
self._degraded = False
|
|
596
|
+
self._response_queue = None
|
|
597
|
+
|
|
598
|
+
def start(self, response_queue):
|
|
599
|
+
"""Start all workers in this pool.
|
|
600
|
+
|
|
601
|
+
Args:
|
|
602
|
+
response_queue: Shared response queue for all pools.
|
|
603
|
+
"""
|
|
604
|
+
self._response_queue = response_queue
|
|
605
|
+
for i in range(self.num_workers):
|
|
606
|
+
self._start_worker(i)
|
|
607
|
+
|
|
608
|
+
def _start_worker(self, index):
|
|
609
|
+
"""Start or restart a single worker."""
|
|
610
|
+
control_queue = _mp.Queue()
|
|
611
|
+
worker = self.worker_class(
|
|
612
|
+
worker_id=index,
|
|
613
|
+
request_queue=self.request_queue,
|
|
614
|
+
control_queue=control_queue,
|
|
615
|
+
response_queue=self._response_queue,
|
|
616
|
+
heartbeat_interval=self.heartbeat_interval,
|
|
617
|
+
log_level=self.log_level,
|
|
618
|
+
pool_name=self.name,
|
|
619
|
+
**self.kwargs)
|
|
620
|
+
worker.start()
|
|
621
|
+
if index < len(self.workers):
|
|
622
|
+
self.workers[index] = worker
|
|
623
|
+
self._control_queues[index] = control_queue
|
|
624
|
+
else:
|
|
625
|
+
self.workers.append(worker)
|
|
626
|
+
self._control_queues.append(control_queue)
|
|
627
|
+
self._last_seen[index] = _time.time()
|
|
628
|
+
self._current_request[index] = None
|
|
629
|
+
|
|
630
|
+
def update_heartbeat(self, worker_id, request_id=None):
|
|
631
|
+
"""Update last seen time for a worker."""
|
|
632
|
+
self._last_seen[worker_id] = _time.time()
|
|
633
|
+
self._current_request[worker_id] = request_id
|
|
634
|
+
|
|
635
|
+
def check_workers(self):
|
|
636
|
+
"""Check worker health, restart dead or stuck workers.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
List of (worker_id, reason) tuples for restarted workers.
|
|
640
|
+
"""
|
|
641
|
+
restarted = []
|
|
642
|
+
now = _time.time()
|
|
643
|
+
# clean old restart times
|
|
644
|
+
self._restart_times = [
|
|
645
|
+
t for t in self._restart_times
|
|
646
|
+
if now - t < self.restart_window]
|
|
647
|
+
for i, worker in enumerate(self.workers):
|
|
648
|
+
reason = None
|
|
649
|
+
if not worker.is_alive():
|
|
650
|
+
reason = f"died exit={worker.exitcode}"
|
|
651
|
+
elif now - self._last_seen.get(i, 0) > self.stuck_timeout:
|
|
652
|
+
reason = "stuck"
|
|
653
|
+
worker.kill()
|
|
654
|
+
if reason:
|
|
655
|
+
try:
|
|
656
|
+
worker.join(timeout=1)
|
|
657
|
+
worker.close()
|
|
658
|
+
except Exception:
|
|
659
|
+
pass
|
|
660
|
+
self._restart_times.append(now)
|
|
661
|
+
if len(self._restart_times) >= self.max_restarts:
|
|
662
|
+
self._degraded = True
|
|
663
|
+
self._start_worker(i)
|
|
664
|
+
restarted.append((i, reason))
|
|
665
|
+
return restarted
|
|
666
|
+
|
|
667
|
+
def matches(self, path):
|
|
668
|
+
"""Check if path matches any of this pool's route patterns.
|
|
669
|
+
|
|
670
|
+
Args:
|
|
671
|
+
path: URL path to match.
|
|
672
|
+
|
|
673
|
+
Returns:
|
|
674
|
+
True if path matches, or pool is fallback (routes=None).
|
|
675
|
+
"""
|
|
676
|
+
if self.routes is None:
|
|
677
|
+
return True # default/fallback pool
|
|
678
|
+
for route in self.routes:
|
|
679
|
+
if _match_prefix(route, path):
|
|
680
|
+
return True
|
|
681
|
+
return False
|
|
682
|
+
|
|
683
|
+
def broadcast(self, msg):
|
|
684
|
+
"""Send message to all workers via their control queues.
|
|
685
|
+
|
|
686
|
+
Args:
|
|
687
|
+
msg: Message to send (None for stop, tuple for config).
|
|
688
|
+
"""
|
|
689
|
+
for control_queue in self._control_queues:
|
|
690
|
+
control_queue.put(msg)
|
|
691
|
+
|
|
692
|
+
def send_config(self, config):
|
|
693
|
+
"""Send configuration update to all workers.
|
|
694
|
+
|
|
695
|
+
Args:
|
|
696
|
+
config: Dict received by worker's on_config() method.
|
|
697
|
+
"""
|
|
698
|
+
self.broadcast((CTL_CONFIG, config))
|
|
699
|
+
|
|
700
|
+
def shutdown(self, timeout=5):
|
|
701
|
+
"""Stop all workers gracefully, kill after timeout.
|
|
702
|
+
|
|
703
|
+
Args:
|
|
704
|
+
timeout: Max seconds to wait for workers to finish.
|
|
705
|
+
"""
|
|
706
|
+
self.broadcast(None)
|
|
707
|
+
deadline = _time.time() + timeout
|
|
708
|
+
for worker in self.workers:
|
|
709
|
+
remaining = max(0, deadline - _time.time())
|
|
710
|
+
worker.join(timeout=remaining)
|
|
711
|
+
if worker.is_alive():
|
|
712
|
+
worker.kill()
|
|
713
|
+
worker.join(timeout=1)
|
|
714
|
+
|
|
715
|
+
@property
|
|
716
|
+
def is_degraded(self):
|
|
717
|
+
return self._degraded
|
|
718
|
+
|
|
719
|
+
@property
|
|
720
|
+
def pending_count(self):
|
|
721
|
+
try:
|
|
722
|
+
return self.request_queue.qsize()
|
|
723
|
+
except NotImplementedError:
|
|
724
|
+
return 0
|
|
725
|
+
|
|
726
|
+
def status(self):
|
|
727
|
+
"""Return pool status dict for monitoring.
|
|
728
|
+
|
|
729
|
+
Returns:
|
|
730
|
+
Dict with name, degraded, queue_size, and per-worker info.
|
|
731
|
+
"""
|
|
732
|
+
now = _time.time()
|
|
733
|
+
return {
|
|
734
|
+
'name': self.name,
|
|
735
|
+
'degraded': self._degraded,
|
|
736
|
+
'queue_size': self.pending_count,
|
|
737
|
+
'workers': [
|
|
738
|
+
{
|
|
739
|
+
'id': i,
|
|
740
|
+
'alive': w.is_alive(),
|
|
741
|
+
'last_seen': round(now - self._last_seen.get(i, 0), 1),
|
|
742
|
+
'current_request': self._current_request.get(i),
|
|
743
|
+
}
|
|
744
|
+
for i, w in enumerate(self.workers)
|
|
745
|
+
],
|
|
746
|
+
}
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
# Pending request tracking
|
|
750
|
+
|
|
751
|
+
class _PendingRequest:
|
|
752
|
+
__slots__ = ('client', 'timestamp', 'pool')
|
|
753
|
+
|
|
754
|
+
def __init__(self, client, pool):
|
|
755
|
+
self.client = client
|
|
756
|
+
self.timestamp = _time.time()
|
|
757
|
+
self.pool = pool
|
|
758
|
+
|
|
759
|
+
|
|
760
|
+
# Dispatcher
|
|
761
|
+
|
|
762
|
+
class Dispatcher:
|
|
763
|
+
"""Main dispatcher process — HTTP server, routing, worker management.
|
|
764
|
+
|
|
765
|
+
Handles static files and @sync routes directly in the main process.
|
|
766
|
+
Routes API requests to worker pools via queues. Uses select()-based
|
|
767
|
+
event loop for multiplexing HTTP sockets, response queue, and custom
|
|
768
|
+
file descriptors.
|
|
769
|
+
"""
|
|
770
|
+
|
|
771
|
+
SELECT_TIMEOUT = 1
|
|
772
|
+
|
|
773
|
+
def __init__(
|
|
774
|
+
self, port=8080, address='0.0.0.0', pools=None,
|
|
775
|
+
static_routes=None, shutdown_timeout=10,
|
|
776
|
+
max_pending=1000, **kwargs):
|
|
777
|
+
"""Initialize dispatcher.
|
|
778
|
+
|
|
779
|
+
Args:
|
|
780
|
+
port: Listen port.
|
|
781
|
+
address: Listen address.
|
|
782
|
+
pools: List of WorkerPool instances.
|
|
783
|
+
static_routes: Dict of URL prefix -> filesystem path.
|
|
784
|
+
shutdown_timeout: Seconds to wait for workers on shutdown.
|
|
785
|
+
max_pending: Max pending requests before rejecting (503).
|
|
786
|
+
ssl_context: Optional ssl.SSLContext for HTTPS.
|
|
787
|
+
**kwargs: Extra arguments passed to HttpServer.
|
|
788
|
+
"""
|
|
789
|
+
self._port = port
|
|
790
|
+
self._address = address
|
|
791
|
+
self._pools = pools or []
|
|
792
|
+
self._static_routes = {}
|
|
793
|
+
if static_routes:
|
|
794
|
+
for prefix, path in static_routes.items():
|
|
795
|
+
self._static_routes[prefix] = _os.path.abspath(
|
|
796
|
+
_os.path.expanduser(path))
|
|
797
|
+
self._shutdown_timeout = shutdown_timeout
|
|
798
|
+
self._max_pending = max_pending
|
|
799
|
+
self._server_kwargs = kwargs
|
|
800
|
+
self._http_server = None
|
|
801
|
+
self._response_queue = _mp.Queue()
|
|
802
|
+
self._pending = {}
|
|
803
|
+
self._next_request_id = 0
|
|
804
|
+
self._sync_routes = []
|
|
805
|
+
self._readers = {}
|
|
806
|
+
self._writers = {}
|
|
807
|
+
self._log_is_tty = _sys.stderr.isatty()
|
|
808
|
+
self._running = False
|
|
809
|
+
self._build_sync_routes()
|
|
810
|
+
|
|
811
|
+
def _build_sync_routes(self):
|
|
812
|
+
"""Collect @sync decorated methods and build sync route table."""
|
|
813
|
+
for klass in type(self).__mro__:
|
|
814
|
+
for name, val in vars(klass).items():
|
|
815
|
+
if callable(val) and hasattr(val, '_sync_pattern'):
|
|
816
|
+
bound = getattr(self, name)
|
|
817
|
+
self._sync_routes.append((
|
|
818
|
+
val._sync_pattern,
|
|
819
|
+
val._sync_methods,
|
|
820
|
+
bound))
|
|
821
|
+
|
|
822
|
+
def register_reader(self, fd, callback):
|
|
823
|
+
"""Register file-like object for read events in dispatcher select loop.
|
|
824
|
+
|
|
825
|
+
Args:
|
|
826
|
+
fd: Any object with fileno() (socket, serial port, pipe, ...).
|
|
827
|
+
callback: Called with fd when readable: callback(fd).
|
|
828
|
+
"""
|
|
829
|
+
self._readers[fd] = callback
|
|
830
|
+
|
|
831
|
+
def unregister_reader(self, fd):
|
|
832
|
+
"""Remove file-like object from read events."""
|
|
833
|
+
self._readers.pop(fd, None)
|
|
834
|
+
|
|
835
|
+
def register_writer(self, fd, callback):
|
|
836
|
+
"""Register file-like object for write events in dispatcher select loop.
|
|
837
|
+
|
|
838
|
+
Only register when there is data to send, unregister when buffer
|
|
839
|
+
is empty to avoid spinning in select.
|
|
840
|
+
|
|
841
|
+
Args:
|
|
842
|
+
fd: Any object with fileno() (socket, serial port, pipe, ...).
|
|
843
|
+
callback: Called with fd when writable: callback(fd).
|
|
844
|
+
"""
|
|
845
|
+
self._writers[fd] = callback
|
|
846
|
+
|
|
847
|
+
def unregister_writer(self, fd):
|
|
848
|
+
"""Remove file-like object from write events."""
|
|
849
|
+
self._writers.pop(fd, None)
|
|
850
|
+
|
|
851
|
+
def on_response(self, response, pending):
|
|
852
|
+
"""Called after response is sent to client.
|
|
853
|
+
|
|
854
|
+
Override to post-process, e.g., forward data to another pool.
|
|
855
|
+
|
|
856
|
+
Args:
|
|
857
|
+
response: Response object from worker.
|
|
858
|
+
pending: _PendingRequest with client and pool reference.
|
|
859
|
+
"""
|
|
860
|
+
|
|
861
|
+
def on_idle(self):
|
|
862
|
+
"""Called on each select timeout when no events arrived.
|
|
863
|
+
|
|
864
|
+
Override for periodic background processing in dispatcher.
|
|
865
|
+
"""
|
|
866
|
+
|
|
867
|
+
def do_check(self, client):
|
|
868
|
+
"""Validation hook called before dispatching request to worker pool.
|
|
869
|
+
|
|
870
|
+
Override for API key validation, auth, rate limiting, etc.
|
|
871
|
+
Send error response and raise RejectRequest to skip dispatch.
|
|
872
|
+
|
|
873
|
+
Args:
|
|
874
|
+
client: HttpConnection from uhttp-server.
|
|
875
|
+
"""
|
|
876
|
+
|
|
877
|
+
def _serve_static(self, client):
|
|
878
|
+
"""Try to serve static file. Returns True if served."""
|
|
879
|
+
path = client.path
|
|
880
|
+
for prefix, base_path in self._static_routes.items():
|
|
881
|
+
if path.startswith(prefix):
|
|
882
|
+
rel_path = path[len(prefix):]
|
|
883
|
+
file_path = _os.path.normpath(
|
|
884
|
+
_os.path.join(base_path, rel_path))
|
|
885
|
+
# path traversal protection
|
|
886
|
+
if not (file_path.startswith(base_path + _os.sep)
|
|
887
|
+
or file_path == base_path):
|
|
888
|
+
continue
|
|
889
|
+
if _os.path.isdir(file_path):
|
|
890
|
+
file_path = _os.path.join(file_path, _DIR_INDEX)
|
|
891
|
+
if _os.path.isfile(file_path):
|
|
892
|
+
client.respond_file(file_path)
|
|
893
|
+
return True
|
|
894
|
+
return False
|
|
895
|
+
|
|
896
|
+
def _handle_sync(self, client):
|
|
897
|
+
"""Try sync route handlers. Returns True if handled."""
|
|
898
|
+
for pattern, methods, handler in self._sync_routes:
|
|
899
|
+
if methods and client.method not in methods:
|
|
900
|
+
continue
|
|
901
|
+
path_params = _match_pattern(pattern, client.path)
|
|
902
|
+
if path_params is not None:
|
|
903
|
+
handler(client, path_params)
|
|
904
|
+
return True
|
|
905
|
+
return False
|
|
906
|
+
|
|
907
|
+
def _find_pool(self, path):
|
|
908
|
+
"""Find matching worker pool for path, or fallback pool."""
|
|
909
|
+
default_pool = None
|
|
910
|
+
for pool in self._pools:
|
|
911
|
+
if pool.routes is None:
|
|
912
|
+
default_pool = pool
|
|
913
|
+
continue
|
|
914
|
+
if pool.matches(path):
|
|
915
|
+
return pool
|
|
916
|
+
return default_pool
|
|
917
|
+
|
|
918
|
+
def _dispatch_to_pool(self, client):
|
|
919
|
+
"""Send request to matching worker pool."""
|
|
920
|
+
pool = self._find_pool(client.path)
|
|
921
|
+
if pool is None:
|
|
922
|
+
client.respond({'error': 'Not found'}, status=404)
|
|
923
|
+
return
|
|
924
|
+
if pool.is_degraded:
|
|
925
|
+
client.respond(
|
|
926
|
+
{'error': 'Service unavailable'}, status=503)
|
|
927
|
+
return
|
|
928
|
+
if len(self._pending) >= self._max_pending:
|
|
929
|
+
client.respond(
|
|
930
|
+
{'error': 'Too many requests'}, status=503)
|
|
931
|
+
return
|
|
932
|
+
request_id = self._next_request_id
|
|
933
|
+
self._next_request_id += 1
|
|
934
|
+
self._pending[request_id] = _PendingRequest(client, pool)
|
|
935
|
+
pool.request_queue.put(Request(
|
|
936
|
+
request_id=request_id,
|
|
937
|
+
method=client.method,
|
|
938
|
+
path=client.path,
|
|
939
|
+
query=client.query,
|
|
940
|
+
data=client.data,
|
|
941
|
+
headers=dict(client.headers),
|
|
942
|
+
content_type=client.content_type))
|
|
943
|
+
|
|
944
|
+
def _http_request(self, client):
|
|
945
|
+
"""Process incoming HTTP request."""
|
|
946
|
+
# 1. static files
|
|
947
|
+
if self._serve_static(client):
|
|
948
|
+
return
|
|
949
|
+
# 2. sync handlers
|
|
950
|
+
if self._handle_sync(client):
|
|
951
|
+
return
|
|
952
|
+
# 3. auth/validation check
|
|
953
|
+
try:
|
|
954
|
+
self.do_check(client)
|
|
955
|
+
except RejectRequest:
|
|
956
|
+
return
|
|
957
|
+
except Exception:
|
|
958
|
+
client.respond({'error': 'Internal server error'}, status=500)
|
|
959
|
+
return
|
|
960
|
+
# 4. dispatch to worker pool
|
|
961
|
+
self._dispatch_to_pool(client)
|
|
962
|
+
|
|
963
|
+
def _process_response(self, msg):
|
|
964
|
+
"""Process a single message from response queue."""
|
|
965
|
+
msg_type = msg[0]
|
|
966
|
+
if msg_type == MSG_HEARTBEAT:
|
|
967
|
+
_, pool_name, worker_id, request_id = msg
|
|
968
|
+
for pool in self._pools:
|
|
969
|
+
if pool.name == pool_name:
|
|
970
|
+
pool.update_heartbeat(worker_id, request_id)
|
|
971
|
+
break
|
|
972
|
+
elif msg_type == MSG_LOG:
|
|
973
|
+
_, name, level, message = msg
|
|
974
|
+
self.on_log(name, level, message)
|
|
975
|
+
elif msg_type == MSG_RESPONSE:
|
|
976
|
+
_, request_id, response = msg
|
|
977
|
+
pending = self._pending.pop(request_id, None)
|
|
978
|
+
if pending is not None:
|
|
979
|
+
pending.client.respond(
|
|
980
|
+
response.data,
|
|
981
|
+
status=response.status,
|
|
982
|
+
headers=response.headers)
|
|
983
|
+
self.on_response(response, pending)
|
|
984
|
+
|
|
985
|
+
def _process_responses(self):
|
|
986
|
+
"""Process all pending messages from response queue."""
|
|
987
|
+
while True:
|
|
988
|
+
try:
|
|
989
|
+
msg = self._response_queue.get_nowait()
|
|
990
|
+
except _queue.Empty:
|
|
991
|
+
return
|
|
992
|
+
|
|
993
|
+
self._process_response(msg)
|
|
994
|
+
|
|
995
|
+
def _expire_pending(self):
|
|
996
|
+
"""Timeout expired pending requests."""
|
|
997
|
+
now = _time.time()
|
|
998
|
+
expired = [
|
|
999
|
+
rid for rid, pending in self._pending.items()
|
|
1000
|
+
if now - pending.timestamp > pending.pool.timeout]
|
|
1001
|
+
for request_id in expired:
|
|
1002
|
+
pending = self._pending.pop(request_id)
|
|
1003
|
+
self.on_log(
|
|
1004
|
+
pending.pool.name, LOG_WARNING,
|
|
1005
|
+
f"request {request_id} timed out after "
|
|
1006
|
+
f"{pending.pool.timeout}s")
|
|
1007
|
+
pending.client.respond(
|
|
1008
|
+
{'error': 'Request timeout'}, status=504)
|
|
1009
|
+
|
|
1010
|
+
def _check_all_workers(self):
|
|
1011
|
+
"""Check health of all worker pools and queue sizes."""
|
|
1012
|
+
for pool in self._pools:
|
|
1013
|
+
restarted = pool.check_workers()
|
|
1014
|
+
for worker_id, reason in restarted:
|
|
1015
|
+
self._on_worker_restarted(pool, worker_id, reason)
|
|
1016
|
+
if pool.queue_warning:
|
|
1017
|
+
qsize = pool.pending_count
|
|
1018
|
+
if qsize >= pool.queue_warning:
|
|
1019
|
+
self.on_log(
|
|
1020
|
+
pool.name, LOG_WARNING,
|
|
1021
|
+
f"queue size {qsize} exceeds "
|
|
1022
|
+
f"threshold {pool.queue_warning}")
|
|
1023
|
+
|
|
1024
|
+
def on_log(self, name, level, message):
|
|
1025
|
+
"""Called when a worker sends a log message.
|
|
1026
|
+
|
|
1027
|
+
Override to customize log output or forward to logging framework.
|
|
1028
|
+
Default prints to stderr.
|
|
1029
|
+
|
|
1030
|
+
Args:
|
|
1031
|
+
name: Logger name (e.g., 'MyWorker[0]').
|
|
1032
|
+
level: Log level (LOG_DEBUG..LOG_CRITICAL).
|
|
1033
|
+
message: Formatted log message string.
|
|
1034
|
+
"""
|
|
1035
|
+
level_name = LOG_LEVEL_NAMES.get(level, str(level))
|
|
1036
|
+
if self._log_is_tty:
|
|
1037
|
+
color = _LOG_ANSI_COLOR.get(level, '')
|
|
1038
|
+
print(f"{color}{level_name:8s} {name:20s} {message}{_ANSI_RESET}",
|
|
1039
|
+
file=_sys.stderr)
|
|
1040
|
+
else:
|
|
1041
|
+
prefix = _LOG_SYSLOG_PREFIX.get(level, '')
|
|
1042
|
+
print(f"{prefix}{level_name:8s} {name:20s} {message}",
|
|
1043
|
+
file=_sys.stderr)
|
|
1044
|
+
|
|
1045
|
+
def _on_worker_restarted(self, pool, worker_id, reason):
|
|
1046
|
+
"""Called when a worker is restarted.
|
|
1047
|
+
|
|
1048
|
+
Default logs the event. Override to customize.
|
|
1049
|
+
"""
|
|
1050
|
+
self.on_log(
|
|
1051
|
+
f'{pool.name}[{worker_id}]', LOG_ERROR,
|
|
1052
|
+
f"worker restarted: {reason}")
|
|
1053
|
+
|
|
1054
|
+
def _sigterm(self, _signo, _stack_frame):
|
|
1055
|
+
self._running = False
|
|
1056
|
+
|
|
1057
|
+
def _wait_events(self):
|
|
1058
|
+
"""Single iteration of the main event loop."""
|
|
1059
|
+
waiting_sockets = self._http_server.read_sockets + [
|
|
1060
|
+
self._response_queue._reader] + list(self._readers)
|
|
1061
|
+
write_sockets = (self._http_server.write_sockets
|
|
1062
|
+
+ list(self._writers))
|
|
1063
|
+
read_events, write_events, _ = _select.select(
|
|
1064
|
+
waiting_sockets, write_sockets, [], self.SELECT_TIMEOUT)
|
|
1065
|
+
# process responses from workers
|
|
1066
|
+
if self._response_queue._reader in read_events:
|
|
1067
|
+
read_events = [
|
|
1068
|
+
s for s in read_events
|
|
1069
|
+
if s is not self._response_queue._reader]
|
|
1070
|
+
self._process_responses()
|
|
1071
|
+
# custom writers
|
|
1072
|
+
for fd in write_events:
|
|
1073
|
+
if fd in self._writers:
|
|
1074
|
+
self._writers[fd](fd)
|
|
1075
|
+
# custom readers
|
|
1076
|
+
for fd in read_events:
|
|
1077
|
+
if fd in self._readers:
|
|
1078
|
+
self._readers[fd](fd)
|
|
1079
|
+
# filter custom fds before passing to http server
|
|
1080
|
+
http_read = [s for s in read_events if s not in self._readers]
|
|
1081
|
+
http_write = [s for s in write_events if s not in self._writers]
|
|
1082
|
+
# process HTTP events
|
|
1083
|
+
if http_read or http_write:
|
|
1084
|
+
client = self._http_server.process_events(
|
|
1085
|
+
http_read, http_write)
|
|
1086
|
+
if client:
|
|
1087
|
+
self._http_request(client)
|
|
1088
|
+
# periodic maintenance (on timeout — no events)
|
|
1089
|
+
if not read_events and not write_events:
|
|
1090
|
+
self._check_all_workers()
|
|
1091
|
+
self.on_idle()
|
|
1092
|
+
# always check expired requests
|
|
1093
|
+
self._expire_pending()
|
|
1094
|
+
|
|
1095
|
+
def run(self):
|
|
1096
|
+
"""Start dispatcher and all worker pools.
|
|
1097
|
+
|
|
1098
|
+
Blocks until SIGTERM/SIGINT, then performs graceful shutdown.
|
|
1099
|
+
"""
|
|
1100
|
+
self._http_server = _uhttp_server.HttpServer(
|
|
1101
|
+
address=self._address,
|
|
1102
|
+
port=self._port,
|
|
1103
|
+
**self._server_kwargs)
|
|
1104
|
+
self._running = True
|
|
1105
|
+
# start all pools
|
|
1106
|
+
for pool in self._pools:
|
|
1107
|
+
pool.start(self._response_queue)
|
|
1108
|
+
_signal.signal(_signal.SIGTERM, self._sigterm)
|
|
1109
|
+
_signal.signal(_signal.SIGINT, self._sigterm)
|
|
1110
|
+
try:
|
|
1111
|
+
while self._running:
|
|
1112
|
+
self._wait_events()
|
|
1113
|
+
finally:
|
|
1114
|
+
self._shutdown()
|
|
1115
|
+
|
|
1116
|
+
def _shutdown(self):
|
|
1117
|
+
"""Graceful shutdown."""
|
|
1118
|
+
# stop accepting connections
|
|
1119
|
+
self._http_server.close()
|
|
1120
|
+
# drain remaining responses
|
|
1121
|
+
deadline = _time.time() + self._shutdown_timeout
|
|
1122
|
+
while self._pending and _time.time() < deadline:
|
|
1123
|
+
try:
|
|
1124
|
+
msg = self._response_queue.get(timeout=0.1)
|
|
1125
|
+
self._process_response(msg)
|
|
1126
|
+
except _queue.Empty:
|
|
1127
|
+
pass
|
|
1128
|
+
# respond 503 to remaining pending
|
|
1129
|
+
for pending in self._pending.values():
|
|
1130
|
+
try:
|
|
1131
|
+
pending.client.respond(
|
|
1132
|
+
{'error': 'Server shutting down'}, status=503)
|
|
1133
|
+
except Exception:
|
|
1134
|
+
pass
|
|
1135
|
+
self._pending.clear()
|
|
1136
|
+
# shutdown all pools
|
|
1137
|
+
for pool in self._pools:
|
|
1138
|
+
pool.shutdown(timeout=self._shutdown_timeout)
|