plain 0.75.0__py3-none-any.whl → 0.76.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- plain/CHANGELOG.md +19 -0
- plain/cli/core.py +35 -17
- plain/cli/runtime.py +28 -0
- plain/cli/server.py +143 -0
- plain/server/LICENSE +35 -0
- plain/server/README.md +75 -0
- plain/server/__init__.py +9 -0
- plain/server/app.py +52 -0
- plain/server/arbiter.py +555 -0
- plain/server/config.py +118 -0
- plain/server/errors.py +31 -0
- plain/server/glogging.py +292 -0
- plain/server/http/__init__.py +12 -0
- plain/server/http/body.py +283 -0
- plain/server/http/errors.py +150 -0
- plain/server/http/message.py +399 -0
- plain/server/http/parser.py +69 -0
- plain/server/http/unreader.py +88 -0
- plain/server/http/wsgi.py +421 -0
- plain/server/pidfile.py +91 -0
- plain/server/reloader.py +158 -0
- plain/server/sock.py +219 -0
- plain/server/util.py +380 -0
- plain/server/workers/__init__.py +12 -0
- plain/server/workers/base.py +305 -0
- plain/server/workers/gthread.py +393 -0
- plain/server/workers/sync.py +210 -0
- plain/server/workers/workertmp.py +50 -0
- {plain-0.75.0.dist-info → plain-0.76.0.dist-info}/METADATA +1 -1
- {plain-0.75.0.dist-info → plain-0.76.0.dist-info}/RECORD +33 -7
- {plain-0.75.0.dist-info → plain-0.76.0.dist-info}/WHEEL +0 -0
- {plain-0.75.0.dist-info → plain-0.76.0.dist-info}/entry_points.txt +0 -0
- {plain-0.75.0.dist-info → plain-0.76.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,305 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
#
|
4
|
+
#
|
5
|
+
# This file is part of gunicorn released under the MIT license.
|
6
|
+
# See the LICENSE for more information.
|
7
|
+
#
|
8
|
+
# Vendored and modified for Plain.
|
9
|
+
import io
|
10
|
+
import os
|
11
|
+
import signal
|
12
|
+
import sys
|
13
|
+
import time
|
14
|
+
import traceback
|
15
|
+
from datetime import datetime
|
16
|
+
from random import randint
|
17
|
+
from ssl import SSLError
|
18
|
+
from typing import TYPE_CHECKING, Any
|
19
|
+
|
20
|
+
from .. import util
|
21
|
+
from ..http.errors import (
|
22
|
+
ConfigurationProblem,
|
23
|
+
InvalidHeader,
|
24
|
+
InvalidHeaderName,
|
25
|
+
InvalidHTTPVersion,
|
26
|
+
InvalidRequestLine,
|
27
|
+
InvalidRequestMethod,
|
28
|
+
InvalidSchemeHeaders,
|
29
|
+
LimitRequestHeaders,
|
30
|
+
LimitRequestLine,
|
31
|
+
ObsoleteFolding,
|
32
|
+
UnsupportedTransferCoding,
|
33
|
+
)
|
34
|
+
from ..http.wsgi import Response, default_environ
|
35
|
+
from ..reloader import reloader_engines
|
36
|
+
from .workertmp import WorkerTmp
|
37
|
+
|
38
|
+
if TYPE_CHECKING:
|
39
|
+
import socket
|
40
|
+
|
41
|
+
from ..app import ServerApplication
|
42
|
+
from ..config import Config
|
43
|
+
from ..glogging import Logger
|
44
|
+
from ..http.message import Request
|
45
|
+
|
46
|
+
# Maximum jitter to add to max_requests to stagger worker restarts
|
47
|
+
MAX_REQUESTS_JITTER = 50
|
48
|
+
|
49
|
+
|
50
|
+
class Worker:
|
51
|
+
SIGNALS = [
|
52
|
+
getattr(signal, f"SIG{x}")
|
53
|
+
for x in ("ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD".split())
|
54
|
+
]
|
55
|
+
|
56
|
+
PIPE = []
|
57
|
+
|
58
|
+
def __init__(
|
59
|
+
self,
|
60
|
+
age: int,
|
61
|
+
ppid: int,
|
62
|
+
sockets: list[socket.socket],
|
63
|
+
app: ServerApplication,
|
64
|
+
timeout: int,
|
65
|
+
cfg: Config,
|
66
|
+
log: Logger,
|
67
|
+
):
|
68
|
+
"""\
|
69
|
+
This is called pre-fork so it shouldn't do anything to the
|
70
|
+
current process. If there's a need to make process wide
|
71
|
+
changes you'll want to do that in ``self.init_process()``.
|
72
|
+
"""
|
73
|
+
self.age = age
|
74
|
+
self.pid: str | int = "[booting]"
|
75
|
+
self.ppid = ppid
|
76
|
+
self.sockets = sockets
|
77
|
+
self.app = app
|
78
|
+
self.timeout = timeout
|
79
|
+
self.cfg = cfg
|
80
|
+
self.booted = False
|
81
|
+
self.aborted = False
|
82
|
+
self.reloader: Any = None
|
83
|
+
|
84
|
+
self.nr = 0
|
85
|
+
|
86
|
+
if cfg.max_requests > 0:
|
87
|
+
jitter = randint(0, MAX_REQUESTS_JITTER)
|
88
|
+
self.max_requests = cfg.max_requests + jitter
|
89
|
+
else:
|
90
|
+
self.max_requests = sys.maxsize
|
91
|
+
|
92
|
+
self.alive = True
|
93
|
+
self.log = log
|
94
|
+
self.tmp = WorkerTmp(cfg)
|
95
|
+
|
96
|
+
def __str__(self) -> str:
|
97
|
+
return f"<Worker {self.pid}>"
|
98
|
+
|
99
|
+
def notify(self) -> None:
|
100
|
+
"""\
|
101
|
+
Your worker subclass must arrange to have this method called
|
102
|
+
once every ``self.timeout`` seconds. If you fail in accomplishing
|
103
|
+
this task, the master process will murder your workers.
|
104
|
+
"""
|
105
|
+
self.tmp.notify()
|
106
|
+
|
107
|
+
def run(self) -> None:
|
108
|
+
"""\
|
109
|
+
This is the mainloop of a worker process. You should override
|
110
|
+
this method in a subclass to provide the intended behaviour
|
111
|
+
for your particular evil schemes.
|
112
|
+
"""
|
113
|
+
raise NotImplementedError()
|
114
|
+
|
115
|
+
def init_process(self) -> None:
|
116
|
+
"""\
|
117
|
+
If you override this method in a subclass, the last statement
|
118
|
+
in the function should be to call this method with
|
119
|
+
super().init_process() so that the ``run()`` loop is initiated.
|
120
|
+
"""
|
121
|
+
|
122
|
+
# Reseed the random number generator
|
123
|
+
util.seed()
|
124
|
+
|
125
|
+
# For waking ourselves up
|
126
|
+
self.PIPE = os.pipe()
|
127
|
+
for p in self.PIPE:
|
128
|
+
util.set_non_blocking(p)
|
129
|
+
util.close_on_exec(p)
|
130
|
+
|
131
|
+
# Prevent fd inheritance
|
132
|
+
for s in self.sockets:
|
133
|
+
util.close_on_exec(s)
|
134
|
+
util.close_on_exec(self.tmp.fileno())
|
135
|
+
|
136
|
+
self.wait_fds = self.sockets + [self.PIPE[0]]
|
137
|
+
|
138
|
+
self.log.close_on_exec()
|
139
|
+
|
140
|
+
self.init_signals()
|
141
|
+
|
142
|
+
# start the reloader
|
143
|
+
if self.cfg.reload:
|
144
|
+
|
145
|
+
def changed(fname: str) -> None:
|
146
|
+
self.log.info("Worker reloading: %s modified", fname)
|
147
|
+
self.alive = False
|
148
|
+
os.write(self.PIPE[1], b"1")
|
149
|
+
time.sleep(0.1)
|
150
|
+
sys.exit(0)
|
151
|
+
|
152
|
+
reloader_cls = reloader_engines["auto"]
|
153
|
+
self.reloader = reloader_cls(
|
154
|
+
extra_files=self.cfg.reload_extra_files, callback=changed
|
155
|
+
)
|
156
|
+
|
157
|
+
self.load_wsgi()
|
158
|
+
if self.reloader:
|
159
|
+
self.reloader.start()
|
160
|
+
|
161
|
+
# Enter main run loop
|
162
|
+
self.booted = True
|
163
|
+
self.run()
|
164
|
+
|
165
|
+
def load_wsgi(self) -> None:
|
166
|
+
try:
|
167
|
+
self.wsgi = self.app.wsgi()
|
168
|
+
except SyntaxError as e:
|
169
|
+
if not self.cfg.reload:
|
170
|
+
raise
|
171
|
+
|
172
|
+
self.log.exception(e)
|
173
|
+
|
174
|
+
# fix from PR #1228
|
175
|
+
# storing the traceback into exc_tb will create a circular reference.
|
176
|
+
# per https://docs.python.org/2/library/sys.html#sys.exc_info warning,
|
177
|
+
# delete the traceback after use.
|
178
|
+
try:
|
179
|
+
_, exc_val, exc_tb = sys.exc_info()
|
180
|
+
self.reloader.add_extra_file(exc_val.filename)
|
181
|
+
|
182
|
+
tb_string = io.StringIO()
|
183
|
+
traceback.print_tb(exc_tb, file=tb_string)
|
184
|
+
self.wsgi = util.make_fail_app(tb_string.getvalue())
|
185
|
+
finally:
|
186
|
+
del exc_tb
|
187
|
+
|
188
|
+
def init_signals(self) -> None:
|
189
|
+
# reset signaling
|
190
|
+
for s in self.SIGNALS:
|
191
|
+
signal.signal(s, signal.SIG_DFL)
|
192
|
+
# init new signaling
|
193
|
+
signal.signal(signal.SIGQUIT, self.handle_quit)
|
194
|
+
signal.signal(signal.SIGTERM, self.handle_exit)
|
195
|
+
signal.signal(signal.SIGINT, self.handle_quit)
|
196
|
+
signal.signal(signal.SIGWINCH, self.handle_winch)
|
197
|
+
signal.signal(signal.SIGUSR1, self.handle_usr1)
|
198
|
+
signal.signal(signal.SIGABRT, self.handle_abort)
|
199
|
+
|
200
|
+
# Don't let SIGTERM and SIGUSR1 disturb active requests
|
201
|
+
# by interrupting system calls
|
202
|
+
signal.siginterrupt(signal.SIGTERM, False)
|
203
|
+
signal.siginterrupt(signal.SIGUSR1, False)
|
204
|
+
|
205
|
+
if hasattr(signal, "set_wakeup_fd"):
|
206
|
+
signal.set_wakeup_fd(self.PIPE[1])
|
207
|
+
|
208
|
+
def handle_usr1(self, sig: int, frame: Any) -> None:
|
209
|
+
self.log.reopen_files()
|
210
|
+
|
211
|
+
def handle_exit(self, sig: int, frame: Any) -> None:
|
212
|
+
self.alive = False
|
213
|
+
|
214
|
+
def handle_quit(self, sig: int, frame: Any) -> None:
|
215
|
+
self.alive = False
|
216
|
+
time.sleep(0.1)
|
217
|
+
sys.exit(0)
|
218
|
+
|
219
|
+
def handle_abort(self, sig: int, frame: Any) -> None:
|
220
|
+
self.alive = False
|
221
|
+
sys.exit(1)
|
222
|
+
|
223
|
+
def handle_error(
|
224
|
+
self, req: Request | None, client: socket.socket, addr: Any, exc: Exception
|
225
|
+
) -> None:
|
226
|
+
request_start = datetime.now()
|
227
|
+
addr = addr or ("", -1) # unix socket case
|
228
|
+
if isinstance(
|
229
|
+
exc,
|
230
|
+
InvalidRequestLine
|
231
|
+
| InvalidRequestMethod
|
232
|
+
| InvalidHTTPVersion
|
233
|
+
| InvalidHeader
|
234
|
+
| InvalidHeaderName
|
235
|
+
| LimitRequestLine
|
236
|
+
| LimitRequestHeaders
|
237
|
+
| InvalidSchemeHeaders
|
238
|
+
| UnsupportedTransferCoding
|
239
|
+
| ConfigurationProblem
|
240
|
+
| ObsoleteFolding
|
241
|
+
| SSLError,
|
242
|
+
):
|
243
|
+
status_int = 400
|
244
|
+
reason = "Bad Request"
|
245
|
+
|
246
|
+
if isinstance(exc, InvalidRequestLine):
|
247
|
+
mesg = f"Invalid Request Line '{str(exc)}'"
|
248
|
+
elif isinstance(exc, InvalidRequestMethod):
|
249
|
+
mesg = f"Invalid Method '{str(exc)}'"
|
250
|
+
elif isinstance(exc, InvalidHTTPVersion):
|
251
|
+
mesg = f"Invalid HTTP Version '{str(exc)}'"
|
252
|
+
elif isinstance(exc, UnsupportedTransferCoding):
|
253
|
+
mesg = f"{str(exc)}"
|
254
|
+
status_int = 501
|
255
|
+
elif isinstance(exc, ConfigurationProblem):
|
256
|
+
mesg = f"{str(exc)}"
|
257
|
+
status_int = 500
|
258
|
+
elif isinstance(exc, ObsoleteFolding):
|
259
|
+
mesg = f"{str(exc)}"
|
260
|
+
elif isinstance(exc, InvalidHeaderName | InvalidHeader):
|
261
|
+
mesg = f"{str(exc)}"
|
262
|
+
if not req and hasattr(exc, "req"):
|
263
|
+
req = exc.req # type: ignore[attr-defined] # for access log
|
264
|
+
elif isinstance(exc, LimitRequestLine):
|
265
|
+
mesg = f"{str(exc)}"
|
266
|
+
elif isinstance(exc, LimitRequestHeaders):
|
267
|
+
reason = "Request Header Fields Too Large"
|
268
|
+
mesg = f"Error parsing headers: '{str(exc)}'"
|
269
|
+
status_int = 431
|
270
|
+
elif isinstance(exc, InvalidSchemeHeaders):
|
271
|
+
mesg = f"{str(exc)}"
|
272
|
+
elif isinstance(exc, SSLError):
|
273
|
+
reason = "Forbidden"
|
274
|
+
mesg = f"'{str(exc)}'"
|
275
|
+
status_int = 403
|
276
|
+
|
277
|
+
msg = "Invalid request from ip={ip}: {error}"
|
278
|
+
self.log.warning(msg.format(ip=addr[0], error=str(exc)))
|
279
|
+
else:
|
280
|
+
if hasattr(req, "uri"):
|
281
|
+
self.log.exception("Error handling request %s", req.uri)
|
282
|
+
else:
|
283
|
+
self.log.exception("Error handling request (no URI read)")
|
284
|
+
status_int = 500
|
285
|
+
reason = "Internal Server Error"
|
286
|
+
mesg = ""
|
287
|
+
|
288
|
+
if req is not None:
|
289
|
+
request_time = datetime.now() - request_start
|
290
|
+
environ = default_environ(req, client, self.cfg)
|
291
|
+
environ["REMOTE_ADDR"] = addr[0]
|
292
|
+
environ["REMOTE_PORT"] = str(addr[1])
|
293
|
+
resp = Response(req, client, self.cfg)
|
294
|
+
resp.status = f"{status_int} {reason}"
|
295
|
+
resp.response_length = len(mesg)
|
296
|
+
self.log.access(resp, req, environ, request_time)
|
297
|
+
|
298
|
+
try:
|
299
|
+
util.write_error(client, status_int, reason, mesg)
|
300
|
+
except Exception:
|
301
|
+
self.log.debug("Failed to send error message.")
|
302
|
+
|
303
|
+
def handle_winch(self, sig: int, fname: Any) -> None:
|
304
|
+
# Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
|
305
|
+
self.log.debug("worker: SIGWINCH ignored.")
|
@@ -0,0 +1,393 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
#
|
4
|
+
#
|
5
|
+
# This file is part of gunicorn released under the MIT license.
|
6
|
+
# See the LICENSE for more information.
|
7
|
+
#
|
8
|
+
# Vendored and modified for Plain.
|
9
|
+
# design:
|
10
|
+
# A threaded worker accepts connections in the main loop, accepted
|
11
|
+
# connections are added to the thread pool as a connection job.
|
12
|
+
# Keepalive connections are put back in the loop waiting for an event.
|
13
|
+
# If no event happen after the keep alive timeout, the connection is
|
14
|
+
# closed.
|
15
|
+
# pylint: disable=no-else-break
|
16
|
+
import errno
|
17
|
+
import os
|
18
|
+
import selectors
|
19
|
+
import socket
|
20
|
+
import ssl
|
21
|
+
import sys
|
22
|
+
import time
|
23
|
+
from collections import deque
|
24
|
+
from concurrent import futures
|
25
|
+
from datetime import datetime
|
26
|
+
from functools import partial
|
27
|
+
from threading import RLock
|
28
|
+
from types import FrameType
|
29
|
+
from typing import TYPE_CHECKING, Any
|
30
|
+
|
31
|
+
from .. import http, sock, util
|
32
|
+
from ..http import wsgi
|
33
|
+
from . import base
|
34
|
+
|
35
|
+
if TYPE_CHECKING:
|
36
|
+
from ..config import Config
|
37
|
+
from ..glogging import Logger
|
38
|
+
|
39
|
+
# Keep-alive connection timeout in seconds
|
40
|
+
KEEPALIVE = 2
|
41
|
+
|
42
|
+
# Maximum number of simultaneous client connections
|
43
|
+
WORKER_CONNECTIONS = 1000
|
44
|
+
|
45
|
+
|
46
|
+
class TConn:
|
47
|
+
def __init__(
|
48
|
+
self,
|
49
|
+
cfg: Config,
|
50
|
+
sock: socket.socket,
|
51
|
+
client: tuple[str, int],
|
52
|
+
server: tuple[str, int],
|
53
|
+
) -> None:
|
54
|
+
self.cfg = cfg
|
55
|
+
self.sock = sock
|
56
|
+
self.client = client
|
57
|
+
self.server = server
|
58
|
+
|
59
|
+
self.timeout: float | None = None
|
60
|
+
self.parser: http.RequestParser | None = None
|
61
|
+
self.initialized: bool = False
|
62
|
+
|
63
|
+
# set the socket to non blocking
|
64
|
+
self.sock.setblocking(False)
|
65
|
+
|
66
|
+
def init(self) -> None:
|
67
|
+
self.initialized = True
|
68
|
+
self.sock.setblocking(True)
|
69
|
+
|
70
|
+
if self.parser is None:
|
71
|
+
# wrap the socket if needed
|
72
|
+
if self.cfg.is_ssl:
|
73
|
+
self.sock = sock.ssl_wrap_socket(self.sock, self.cfg)
|
74
|
+
|
75
|
+
# initialize the parser
|
76
|
+
self.parser = http.RequestParser(self.cfg, self.sock, self.client)
|
77
|
+
|
78
|
+
def set_timeout(self) -> None:
|
79
|
+
# set the timeout
|
80
|
+
self.timeout = time.time() + KEEPALIVE
|
81
|
+
|
82
|
+
def close(self) -> None:
|
83
|
+
util.close(self.sock)
|
84
|
+
|
85
|
+
|
86
|
+
class ThreadWorker(base.Worker):
|
87
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
88
|
+
super().__init__(*args, **kwargs)
|
89
|
+
self.worker_connections: int = WORKER_CONNECTIONS
|
90
|
+
self.max_keepalived: int = WORKER_CONNECTIONS - self.cfg.threads
|
91
|
+
# initialise the pool
|
92
|
+
self.tpool: futures.ThreadPoolExecutor | None = None
|
93
|
+
self.poller: selectors.DefaultSelector | None = None
|
94
|
+
self._lock: RLock | None = None
|
95
|
+
self.futures: deque[futures.Future[tuple[bool, TConn]]] = deque()
|
96
|
+
self._keep: deque[TConn] = deque()
|
97
|
+
self.nr_conns: int = 0
|
98
|
+
|
99
|
+
@classmethod
|
100
|
+
def check_config(cls, cfg: Config, log: Logger) -> None:
|
101
|
+
max_keepalived = WORKER_CONNECTIONS - cfg.threads
|
102
|
+
|
103
|
+
if max_keepalived <= 0:
|
104
|
+
log.warning(
|
105
|
+
"No keepalived connections can be handled. "
|
106
|
+
"Check the number of worker connections and threads."
|
107
|
+
)
|
108
|
+
|
109
|
+
def init_process(self) -> None:
|
110
|
+
self.tpool = self.get_thread_pool()
|
111
|
+
self.poller = selectors.DefaultSelector()
|
112
|
+
self._lock = RLock()
|
113
|
+
super().init_process()
|
114
|
+
|
115
|
+
def get_thread_pool(self) -> futures.ThreadPoolExecutor:
|
116
|
+
"""Override this method to customize how the thread pool is created"""
|
117
|
+
return futures.ThreadPoolExecutor(max_workers=self.cfg.threads)
|
118
|
+
|
119
|
+
def handle_quit(self, sig: int, frame: FrameType | None) -> None:
|
120
|
+
self.alive = False
|
121
|
+
self.tpool.shutdown(False)
|
122
|
+
time.sleep(0.1)
|
123
|
+
sys.exit(0)
|
124
|
+
|
125
|
+
def _wrap_future(self, fs: futures.Future[tuple[bool, TConn]], conn: TConn) -> None:
|
126
|
+
fs.conn = conn # type: ignore[attr-defined]
|
127
|
+
self.futures.append(fs)
|
128
|
+
fs.add_done_callback(self.finish_request)
|
129
|
+
|
130
|
+
def enqueue_req(self, conn: TConn) -> None:
|
131
|
+
conn.init()
|
132
|
+
# submit the connection to a worker
|
133
|
+
fs = self.tpool.submit(self.handle, conn)
|
134
|
+
self._wrap_future(fs, conn)
|
135
|
+
|
136
|
+
def accept(self, server: tuple[str, int], listener: socket.socket) -> None:
|
137
|
+
try:
|
138
|
+
sock, client = listener.accept()
|
139
|
+
# initialize the connection object
|
140
|
+
conn = TConn(self.cfg, sock, client, server)
|
141
|
+
|
142
|
+
self.nr_conns += 1
|
143
|
+
# wait until socket is readable
|
144
|
+
with self._lock:
|
145
|
+
self.poller.register(
|
146
|
+
conn.sock,
|
147
|
+
selectors.EVENT_READ,
|
148
|
+
partial(self.on_client_socket_readable, conn),
|
149
|
+
)
|
150
|
+
except OSError as e:
|
151
|
+
if e.errno not in (errno.EAGAIN, errno.ECONNABORTED, errno.EWOULDBLOCK):
|
152
|
+
raise
|
153
|
+
|
154
|
+
def on_client_socket_readable(self, conn: TConn, client: socket.socket) -> None:
|
155
|
+
with self._lock:
|
156
|
+
# unregister the client from the poller
|
157
|
+
self.poller.unregister(client)
|
158
|
+
|
159
|
+
if conn.initialized:
|
160
|
+
# remove the connection from keepalive
|
161
|
+
try:
|
162
|
+
self._keep.remove(conn)
|
163
|
+
except ValueError:
|
164
|
+
# race condition
|
165
|
+
return
|
166
|
+
|
167
|
+
# submit the connection to a worker
|
168
|
+
self.enqueue_req(conn)
|
169
|
+
|
170
|
+
def murder_keepalived(self) -> None:
|
171
|
+
now = time.time()
|
172
|
+
while True:
|
173
|
+
with self._lock:
|
174
|
+
try:
|
175
|
+
# remove the connection from the queue
|
176
|
+
conn = self._keep.popleft()
|
177
|
+
except IndexError:
|
178
|
+
break
|
179
|
+
|
180
|
+
delta = conn.timeout - now
|
181
|
+
if delta > 0:
|
182
|
+
# add the connection back to the queue
|
183
|
+
with self._lock:
|
184
|
+
self._keep.appendleft(conn)
|
185
|
+
break
|
186
|
+
else:
|
187
|
+
self.nr_conns -= 1
|
188
|
+
# remove the socket from the poller
|
189
|
+
with self._lock:
|
190
|
+
try:
|
191
|
+
self.poller.unregister(conn.sock)
|
192
|
+
except OSError as e:
|
193
|
+
if e.errno != errno.EBADF:
|
194
|
+
raise
|
195
|
+
except KeyError:
|
196
|
+
# already removed by the system, continue
|
197
|
+
pass
|
198
|
+
except ValueError:
|
199
|
+
# already removed by the system continue
|
200
|
+
pass
|
201
|
+
|
202
|
+
# close the socket
|
203
|
+
conn.close()
|
204
|
+
|
205
|
+
def is_parent_alive(self) -> bool:
|
206
|
+
# If our parent changed then we shut down.
|
207
|
+
if self.ppid != os.getppid():
|
208
|
+
self.log.info("Parent changed, shutting down: %s", self)
|
209
|
+
return False
|
210
|
+
return True
|
211
|
+
|
212
|
+
def run(self) -> None:
|
213
|
+
# init listeners, add them to the event loop
|
214
|
+
for listener in self.sockets:
|
215
|
+
listener.setblocking(False)
|
216
|
+
# a race condition during graceful shutdown may make the listener
|
217
|
+
# name unavailable in the request handler so capture it once here
|
218
|
+
server = listener.getsockname()
|
219
|
+
acceptor = partial(self.accept, server)
|
220
|
+
self.poller.register(listener, selectors.EVENT_READ, acceptor)
|
221
|
+
|
222
|
+
while self.alive:
|
223
|
+
# notify the arbiter we are alive
|
224
|
+
self.notify()
|
225
|
+
|
226
|
+
# can we accept more connections?
|
227
|
+
if self.nr_conns < self.worker_connections:
|
228
|
+
# wait for an event
|
229
|
+
events = self.poller.select(1.0)
|
230
|
+
for key, _ in events:
|
231
|
+
callback = key.data
|
232
|
+
callback(key.fileobj)
|
233
|
+
|
234
|
+
# check (but do not wait) for finished requests
|
235
|
+
result = futures.wait(
|
236
|
+
self.futures, timeout=0, return_when=futures.FIRST_COMPLETED
|
237
|
+
)
|
238
|
+
else:
|
239
|
+
# wait for a request to finish
|
240
|
+
result = futures.wait(
|
241
|
+
self.futures, timeout=1.0, return_when=futures.FIRST_COMPLETED
|
242
|
+
)
|
243
|
+
|
244
|
+
# clean up finished requests
|
245
|
+
for fut in result.done:
|
246
|
+
self.futures.remove(fut)
|
247
|
+
|
248
|
+
if not self.is_parent_alive():
|
249
|
+
break
|
250
|
+
|
251
|
+
# handle keepalive timeouts
|
252
|
+
self.murder_keepalived()
|
253
|
+
|
254
|
+
self.tpool.shutdown(False)
|
255
|
+
self.poller.close()
|
256
|
+
|
257
|
+
for s in self.sockets:
|
258
|
+
s.close()
|
259
|
+
|
260
|
+
futures.wait(self.futures, timeout=self.cfg.graceful_timeout)
|
261
|
+
|
262
|
+
def finish_request(self, fs: futures.Future[tuple[bool, TConn]]) -> None:
|
263
|
+
if fs.cancelled():
|
264
|
+
self.nr_conns -= 1
|
265
|
+
fs.conn.close() # type: ignore[attr-defined]
|
266
|
+
return
|
267
|
+
|
268
|
+
try:
|
269
|
+
(keepalive, conn) = fs.result()
|
270
|
+
# if the connection should be kept alived add it
|
271
|
+
# to the eventloop and record it
|
272
|
+
if keepalive and self.alive:
|
273
|
+
# flag the socket as non blocked
|
274
|
+
conn.sock.setblocking(False)
|
275
|
+
|
276
|
+
# register the connection
|
277
|
+
conn.set_timeout()
|
278
|
+
with self._lock:
|
279
|
+
self._keep.append(conn)
|
280
|
+
|
281
|
+
# add the socket to the event loop
|
282
|
+
self.poller.register(
|
283
|
+
conn.sock,
|
284
|
+
selectors.EVENT_READ,
|
285
|
+
partial(self.on_client_socket_readable, conn),
|
286
|
+
)
|
287
|
+
else:
|
288
|
+
self.nr_conns -= 1
|
289
|
+
conn.close()
|
290
|
+
except Exception:
|
291
|
+
# an exception happened, make sure to close the
|
292
|
+
# socket.
|
293
|
+
self.nr_conns -= 1
|
294
|
+
fs.conn.close() # type: ignore[attr-defined]
|
295
|
+
|
296
|
+
def handle(self, conn: TConn) -> tuple[bool, TConn]:
|
297
|
+
keepalive = False
|
298
|
+
req = None
|
299
|
+
try:
|
300
|
+
# conn.parser is guaranteed to be initialized by enqueue_req -> conn.init()
|
301
|
+
assert conn.parser is not None
|
302
|
+
req = next(conn.parser)
|
303
|
+
if not req:
|
304
|
+
return (False, conn)
|
305
|
+
|
306
|
+
# handle the request
|
307
|
+
keepalive = self.handle_request(req, conn)
|
308
|
+
if keepalive:
|
309
|
+
return (keepalive, conn)
|
310
|
+
except http.errors.NoMoreData as e:
|
311
|
+
self.log.debug("Ignored premature client disconnection. %s", e)
|
312
|
+
|
313
|
+
except StopIteration as e:
|
314
|
+
self.log.debug("Closing connection. %s", e)
|
315
|
+
except ssl.SSLError as e:
|
316
|
+
if e.args[0] == ssl.SSL_ERROR_EOF:
|
317
|
+
self.log.debug("ssl connection closed")
|
318
|
+
conn.sock.close()
|
319
|
+
else:
|
320
|
+
self.log.debug("Error processing SSL request.")
|
321
|
+
self.handle_error(req, conn.sock, conn.client, e)
|
322
|
+
|
323
|
+
except OSError as e:
|
324
|
+
if e.errno not in (errno.EPIPE, errno.ECONNRESET, errno.ENOTCONN):
|
325
|
+
self.log.exception("Socket error processing request.")
|
326
|
+
else:
|
327
|
+
if e.errno == errno.ECONNRESET:
|
328
|
+
self.log.debug("Ignoring connection reset")
|
329
|
+
elif e.errno == errno.ENOTCONN:
|
330
|
+
self.log.debug("Ignoring socket not connected")
|
331
|
+
else:
|
332
|
+
self.log.debug("Ignoring connection epipe")
|
333
|
+
except Exception as e:
|
334
|
+
self.handle_error(req, conn.sock, conn.client, e)
|
335
|
+
|
336
|
+
return (False, conn)
|
337
|
+
|
338
|
+
def handle_request(self, req: Any, conn: TConn) -> bool:
|
339
|
+
environ: dict[str, Any] = {}
|
340
|
+
resp: wsgi.Response | None = None
|
341
|
+
try:
|
342
|
+
request_start = datetime.now()
|
343
|
+
resp, environ = wsgi.create(
|
344
|
+
req, conn.sock, conn.client, conn.server, self.cfg
|
345
|
+
)
|
346
|
+
environ["wsgi.multithread"] = True
|
347
|
+
self.nr += 1
|
348
|
+
if self.nr >= self.max_requests:
|
349
|
+
if self.alive:
|
350
|
+
self.log.info("Autorestarting worker after current request.")
|
351
|
+
self.alive = False
|
352
|
+
resp.force_close()
|
353
|
+
|
354
|
+
if not self.alive:
|
355
|
+
resp.force_close()
|
356
|
+
elif len(self._keep) >= self.max_keepalived:
|
357
|
+
resp.force_close()
|
358
|
+
|
359
|
+
respiter = self.wsgi(environ, resp.start_response)
|
360
|
+
try:
|
361
|
+
if isinstance(respiter, environ["wsgi.file_wrapper"]):
|
362
|
+
resp.write_file(respiter)
|
363
|
+
else:
|
364
|
+
for item in respiter:
|
365
|
+
resp.write(item)
|
366
|
+
|
367
|
+
resp.close()
|
368
|
+
finally:
|
369
|
+
request_time = datetime.now() - request_start
|
370
|
+
self.log.access(resp, req, environ, request_time)
|
371
|
+
if hasattr(respiter, "close"):
|
372
|
+
respiter.close()
|
373
|
+
|
374
|
+
if resp.should_close():
|
375
|
+
self.log.debug("Closing connection.")
|
376
|
+
return False
|
377
|
+
except OSError:
|
378
|
+
# pass to next try-except level
|
379
|
+
util.reraise(*sys.exc_info())
|
380
|
+
except Exception:
|
381
|
+
if resp and resp.headers_sent:
|
382
|
+
# If the requests have already been sent, we should close the
|
383
|
+
# connection to indicate the error.
|
384
|
+
self.log.exception("Error handling request")
|
385
|
+
try:
|
386
|
+
conn.sock.shutdown(socket.SHUT_RDWR)
|
387
|
+
conn.sock.close()
|
388
|
+
except OSError:
|
389
|
+
pass
|
390
|
+
raise StopIteration()
|
391
|
+
raise
|
392
|
+
|
393
|
+
return True
|