swift 2.23.3__py3-none-any.whl → 2.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/__init__.py +29 -50
- swift/account/auditor.py +21 -118
- swift/account/backend.py +33 -28
- swift/account/reaper.py +37 -28
- swift/account/replicator.py +22 -0
- swift/account/server.py +60 -26
- swift/account/utils.py +28 -11
- swift-2.23.3.data/scripts/swift-account-audit → swift/cli/account_audit.py +23 -13
- swift-2.23.3.data/scripts/swift-config → swift/cli/config.py +2 -2
- swift/cli/container_deleter.py +5 -11
- swift-2.23.3.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +8 -7
- swift/cli/dispersion_report.py +10 -9
- swift-2.23.3.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +63 -21
- swift/cli/form_signature.py +3 -7
- swift-2.23.3.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +8 -2
- swift/cli/info.py +154 -14
- swift/cli/manage_shard_ranges.py +705 -37
- swift-2.23.3.data/scripts/swift-oldies → swift/cli/oldies.py +25 -14
- swift-2.23.3.data/scripts/swift-orphans → swift/cli/orphans.py +7 -3
- swift/cli/recon.py +196 -67
- swift-2.23.3.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +17 -20
- swift-2.23.3.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +807 -126
- swift/cli/reload.py +135 -0
- swift/cli/ringbuilder.py +217 -20
- swift/cli/ringcomposer.py +0 -1
- swift/cli/shard-info.py +4 -3
- swift/common/base_storage_server.py +9 -20
- swift/common/bufferedhttp.py +48 -74
- swift/common/constraints.py +20 -15
- swift/common/container_sync_realms.py +9 -11
- swift/common/daemon.py +25 -8
- swift/common/db.py +195 -128
- swift/common/db_auditor.py +168 -0
- swift/common/db_replicator.py +95 -55
- swift/common/digest.py +141 -0
- swift/common/direct_client.py +144 -33
- swift/common/error_limiter.py +93 -0
- swift/common/exceptions.py +25 -1
- swift/common/header_key_dict.py +2 -9
- swift/common/http_protocol.py +373 -0
- swift/common/internal_client.py +129 -59
- swift/common/linkat.py +3 -4
- swift/common/manager.py +284 -67
- swift/common/memcached.py +390 -145
- swift/common/middleware/__init__.py +4 -0
- swift/common/middleware/account_quotas.py +211 -46
- swift/common/middleware/acl.py +3 -8
- swift/common/middleware/backend_ratelimit.py +230 -0
- swift/common/middleware/bulk.py +22 -34
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +6 -11
- swift/common/middleware/container_quotas.py +1 -1
- swift/common/middleware/container_sync.py +39 -17
- swift/common/middleware/copy.py +12 -0
- swift/common/middleware/crossdomain.py +22 -9
- swift/common/middleware/crypto/__init__.py +2 -1
- swift/common/middleware/crypto/crypto_utils.py +11 -15
- swift/common/middleware/crypto/decrypter.py +28 -11
- swift/common/middleware/crypto/encrypter.py +12 -17
- swift/common/middleware/crypto/keymaster.py +8 -15
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/dlo.py +15 -11
- swift/common/middleware/domain_remap.py +5 -4
- swift/common/middleware/etag_quoter.py +128 -0
- swift/common/middleware/formpost.py +73 -70
- swift/common/middleware/gatekeeper.py +8 -1
- swift/common/middleware/keystoneauth.py +33 -3
- swift/common/middleware/list_endpoints.py +4 -4
- swift/common/middleware/listing_formats.py +85 -49
- swift/common/middleware/memcache.py +4 -95
- swift/common/middleware/name_check.py +3 -2
- swift/common/middleware/proxy_logging.py +160 -92
- swift/common/middleware/ratelimit.py +17 -10
- swift/common/middleware/read_only.py +6 -4
- swift/common/middleware/recon.py +59 -22
- swift/common/middleware/s3api/acl_handlers.py +25 -3
- swift/common/middleware/s3api/acl_utils.py +6 -1
- swift/common/middleware/s3api/controllers/__init__.py +6 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/bucket.py +242 -137
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_delete.py +43 -20
- swift/common/middleware/s3api/controllers/multi_upload.py +219 -133
- swift/common/middleware/s3api/controllers/obj.py +112 -8
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/controllers/s3_acl.py +2 -2
- swift/common/middleware/s3api/controllers/tagging.py +57 -0
- swift/common/middleware/s3api/controllers/versioning.py +36 -7
- swift/common/middleware/s3api/etree.py +22 -9
- swift/common/middleware/s3api/exception.py +0 -4
- swift/common/middleware/s3api/s3api.py +113 -41
- swift/common/middleware/s3api/s3request.py +384 -218
- swift/common/middleware/s3api/s3response.py +126 -23
- swift/common/middleware/s3api/s3token.py +16 -17
- swift/common/middleware/s3api/schema/delete.rng +1 -1
- swift/common/middleware/s3api/subresource.py +7 -10
- swift/common/middleware/s3api/utils.py +27 -10
- swift/common/middleware/slo.py +665 -358
- swift/common/middleware/staticweb.py +64 -37
- swift/common/middleware/symlink.py +51 -18
- swift/common/middleware/tempauth.py +76 -58
- swift/common/middleware/tempurl.py +191 -173
- swift/common/middleware/versioned_writes/__init__.py +51 -0
- swift/common/middleware/{versioned_writes.py → versioned_writes/legacy.py} +27 -26
- swift/common/middleware/versioned_writes/object_versioning.py +1482 -0
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +18 -19
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +10 -10
- swift-2.23.3.data/scripts/swift-container-server → swift/common/recon.py +13 -8
- swift/common/registry.py +147 -0
- swift/common/request_helpers.py +324 -57
- swift/common/ring/builder.py +67 -25
- swift/common/ring/composite_builder.py +1 -1
- swift/common/ring/ring.py +177 -51
- swift/common/ring/utils.py +1 -1
- swift/common/splice.py +10 -6
- swift/common/statsd_client.py +205 -0
- swift/common/storage_policy.py +49 -44
- swift/common/swob.py +86 -102
- swift/common/{utils.py → utils/__init__.py} +2163 -2772
- swift/common/utils/base.py +131 -0
- swift/common/utils/config.py +433 -0
- swift/common/utils/ipaddrs.py +256 -0
- swift/common/utils/libc.py +345 -0
- swift/common/utils/logs.py +859 -0
- swift/common/utils/timestamp.py +412 -0
- swift/common/wsgi.py +553 -535
- swift/container/auditor.py +14 -100
- swift/container/backend.py +490 -231
- swift/container/reconciler.py +126 -37
- swift/container/replicator.py +96 -22
- swift/container/server.py +358 -165
- swift/container/sharder.py +1540 -684
- swift/container/sync.py +94 -88
- swift/container/updater.py +53 -32
- swift/obj/auditor.py +153 -35
- swift/obj/diskfile.py +466 -217
- swift/obj/expirer.py +406 -124
- swift/obj/mem_diskfile.py +7 -4
- swift/obj/mem_server.py +1 -0
- swift/obj/reconstructor.py +523 -262
- swift/obj/replicator.py +249 -188
- swift/obj/server.py +207 -122
- swift/obj/ssync_receiver.py +145 -85
- swift/obj/ssync_sender.py +113 -54
- swift/obj/updater.py +652 -139
- swift/obj/watchers/__init__.py +0 -0
- swift/obj/watchers/dark_data.py +213 -0
- swift/proxy/controllers/account.py +11 -11
- swift/proxy/controllers/base.py +848 -604
- swift/proxy/controllers/container.py +433 -92
- swift/proxy/controllers/info.py +3 -2
- swift/proxy/controllers/obj.py +1000 -489
- swift/proxy/server.py +185 -112
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/AUTHORS +58 -11
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/METADATA +51 -56
- swift-2.35.0.dist-info/RECORD +201 -0
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/WHEEL +1 -1
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/entry_points.txt +43 -0
- swift-2.35.0.dist-info/pbr.json +1 -0
- swift/locale/de/LC_MESSAGES/swift.po +0 -1216
- swift/locale/en_GB/LC_MESSAGES/swift.po +0 -1207
- swift/locale/es/LC_MESSAGES/swift.po +0 -1085
- swift/locale/fr/LC_MESSAGES/swift.po +0 -909
- swift/locale/it/LC_MESSAGES/swift.po +0 -894
- swift/locale/ja/LC_MESSAGES/swift.po +0 -965
- swift/locale/ko_KR/LC_MESSAGES/swift.po +0 -964
- swift/locale/pt_BR/LC_MESSAGES/swift.po +0 -881
- swift/locale/ru/LC_MESSAGES/swift.po +0 -891
- swift/locale/tr_TR/LC_MESSAGES/swift.po +0 -832
- swift/locale/zh_CN/LC_MESSAGES/swift.po +0 -833
- swift/locale/zh_TW/LC_MESSAGES/swift.po +0 -838
- swift-2.23.3.data/scripts/swift-account-auditor +0 -23
- swift-2.23.3.data/scripts/swift-account-info +0 -51
- swift-2.23.3.data/scripts/swift-account-reaper +0 -23
- swift-2.23.3.data/scripts/swift-account-replicator +0 -34
- swift-2.23.3.data/scripts/swift-account-server +0 -23
- swift-2.23.3.data/scripts/swift-container-auditor +0 -23
- swift-2.23.3.data/scripts/swift-container-info +0 -55
- swift-2.23.3.data/scripts/swift-container-reconciler +0 -21
- swift-2.23.3.data/scripts/swift-container-replicator +0 -34
- swift-2.23.3.data/scripts/swift-container-sharder +0 -37
- swift-2.23.3.data/scripts/swift-container-sync +0 -23
- swift-2.23.3.data/scripts/swift-container-updater +0 -23
- swift-2.23.3.data/scripts/swift-dispersion-report +0 -24
- swift-2.23.3.data/scripts/swift-form-signature +0 -20
- swift-2.23.3.data/scripts/swift-init +0 -119
- swift-2.23.3.data/scripts/swift-object-auditor +0 -29
- swift-2.23.3.data/scripts/swift-object-expirer +0 -33
- swift-2.23.3.data/scripts/swift-object-info +0 -60
- swift-2.23.3.data/scripts/swift-object-reconstructor +0 -33
- swift-2.23.3.data/scripts/swift-object-relinker +0 -41
- swift-2.23.3.data/scripts/swift-object-replicator +0 -37
- swift-2.23.3.data/scripts/swift-object-server +0 -27
- swift-2.23.3.data/scripts/swift-object-updater +0 -23
- swift-2.23.3.data/scripts/swift-proxy-server +0 -23
- swift-2.23.3.data/scripts/swift-recon +0 -24
- swift-2.23.3.data/scripts/swift-ring-builder +0 -24
- swift-2.23.3.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.23.3.data/scripts/swift-ring-composer +0 -22
- swift-2.23.3.dist-info/RECORD +0 -220
- swift-2.23.3.dist-info/pbr.json +0 -1
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/LICENSE +0 -0
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/top_level.txt +0 -0
swift/common/wsgi.py
CHANGED
@@ -15,35 +15,38 @@
|
|
15
15
|
|
16
16
|
"""WSGI tools for use with swift."""
|
17
17
|
|
18
|
-
from __future__ import print_function
|
19
18
|
|
20
19
|
import errno
|
20
|
+
import json
|
21
21
|
import os
|
22
22
|
import signal
|
23
|
-
import
|
24
|
-
|
23
|
+
import struct
|
24
|
+
import sys
|
25
25
|
from textwrap import dedent
|
26
|
+
import time
|
27
|
+
import warnings
|
26
28
|
|
27
29
|
import eventlet
|
28
30
|
import eventlet.debug
|
29
31
|
from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout
|
30
32
|
from paste.deploy import loadwsgi
|
31
33
|
from eventlet.green import socket, ssl, os as green_os
|
32
|
-
import
|
33
|
-
from six import BytesIO
|
34
|
-
from six import StringIO
|
34
|
+
from io import BytesIO, StringIO
|
35
35
|
|
36
36
|
from swift.common import utils, constraints
|
37
|
+
from swift.common.http_protocol import SwiftHttpProtocol, \
|
38
|
+
SwiftHttpProxiedProtocol
|
37
39
|
from swift.common.storage_policy import BindPortsCache
|
38
|
-
from swift.common.swob import Request,
|
39
|
-
wsgi_quote_plus, wsgi_unquote_plus, wsgi_to_bytes, bytes_to_wsgi
|
40
|
+
from swift.common.swob import Request, wsgi_unquote
|
40
41
|
from swift.common.utils import capture_stdio, disable_fallocate, \
|
41
42
|
drop_privileges, get_logger, NullLogger, config_true_value, \
|
42
43
|
validate_configuration, get_hub, config_auto_int_value, \
|
43
|
-
reiterate, NicerInterpolation
|
44
|
+
reiterate, clean_up_daemon_hygiene, systemd_notify, NicerInterpolation
|
44
45
|
|
45
46
|
SIGNUM_TO_NAME = {getattr(signal, n): n for n in dir(signal)
|
46
47
|
if n.startswith('SIG') and '_' not in n}
|
48
|
+
NOTIFY_FD_ENV_KEY = '__SWIFT_SERVER_NOTIFY_FD'
|
49
|
+
CHILD_STATE_FD_ENV_KEY = '__SWIFT_SERVER_CHILD_STATE_FD'
|
47
50
|
|
48
51
|
# Set maximum line size of message headers to be accepted.
|
49
52
|
wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE
|
@@ -62,8 +65,7 @@ class NamedConfigLoader(loadwsgi.ConfigLoader):
|
|
62
65
|
"""
|
63
66
|
|
64
67
|
def get_context(self, object_type, name=None, global_conf=None):
|
65
|
-
|
66
|
-
self.parser._interpolation = NicerInterpolation()
|
68
|
+
self.parser._interpolation = NicerInterpolation()
|
67
69
|
context = super(NamedConfigLoader, self).get_context(
|
68
70
|
object_type, name=name, global_conf=global_conf)
|
69
71
|
context.name = name
|
@@ -117,13 +119,22 @@ class ConfigString(NamedConfigLoader):
|
|
117
119
|
self.filename = "string"
|
118
120
|
defaults = {
|
119
121
|
'here': "string",
|
120
|
-
'__file__': self
|
122
|
+
'__file__': self,
|
121
123
|
}
|
122
124
|
self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults)
|
123
125
|
self.parser.optionxform = str # Don't lower-case keys
|
124
126
|
# Defaults don't need interpolation (crazy PasteDeploy...)
|
125
127
|
self.parser.defaults = lambda: dict(self.parser._defaults, **defaults)
|
126
|
-
self.parser.
|
128
|
+
self.parser.read_file(self.contents)
|
129
|
+
|
130
|
+
def readline(self, *args, **kwargs):
|
131
|
+
return self.contents.readline(*args, **kwargs)
|
132
|
+
|
133
|
+
def seek(self, *args, **kwargs):
|
134
|
+
return self.contents.seek(*args, **kwargs)
|
135
|
+
|
136
|
+
def __iter__(self):
|
137
|
+
return iter(self.contents)
|
127
138
|
|
128
139
|
|
129
140
|
def wrap_conf_type(f):
|
@@ -181,27 +192,29 @@ def get_socket(conf):
|
|
181
192
|
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
|
182
193
|
family=address_family)
|
183
194
|
if 'cert_file' in conf:
|
195
|
+
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
196
|
+
context.verify_mode = ssl.CERT_NONE
|
197
|
+
context.load_cert_chain(conf['cert_file'], conf['key_file'])
|
184
198
|
warn_ssl = True
|
185
|
-
sock =
|
186
|
-
keyfile=conf['key_file'])
|
199
|
+
sock = context.wrap_socket(sock, server_side=True)
|
187
200
|
except socket.error as err:
|
188
201
|
if err.args[0] != errno.EADDRINUSE:
|
189
202
|
raise
|
190
203
|
sleep(0.1)
|
191
204
|
if not sock:
|
192
|
-
raise Exception(
|
193
|
-
|
194
|
-
|
195
|
-
|
205
|
+
raise Exception('Could not bind to %(addr)s:%(port)s '
|
206
|
+
'after trying for %(timeout)s seconds' % {
|
207
|
+
'addr': bind_addr[0], 'port': bind_addr[1],
|
208
|
+
'timeout': bind_timeout})
|
196
209
|
# in my experience, sockets can hang around forever without keepalive
|
197
210
|
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
198
211
|
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
199
212
|
if hasattr(socket, 'TCP_KEEPIDLE'):
|
200
213
|
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepidle)
|
201
214
|
if warn_ssl:
|
202
|
-
ssl_warning_message =
|
203
|
-
|
204
|
-
|
215
|
+
ssl_warning_message = ('WARNING: SSL should only be enabled for '
|
216
|
+
'testing purposes. Use external SSL '
|
217
|
+
'termination for a production deployment.')
|
205
218
|
get_logger(conf).warning(ssl_warning_message)
|
206
219
|
print(ssl_warning_message)
|
207
220
|
return sock
|
@@ -223,47 +236,6 @@ class RestrictedGreenPool(GreenPool):
|
|
223
236
|
self.waitall()
|
224
237
|
|
225
238
|
|
226
|
-
def pipeline_property(name, **kwargs):
|
227
|
-
"""
|
228
|
-
Create a property accessor for the given name. The property will
|
229
|
-
dig through the bound instance on which it was accessed for an
|
230
|
-
attribute "app" and check that object for an attribute of the given
|
231
|
-
name. If the "app" object does not have such an attribute, it will
|
232
|
-
look for an attribute "app" on THAT object and continue it's search
|
233
|
-
from there. If the named attribute cannot be found accessing the
|
234
|
-
property will raise AttributeError.
|
235
|
-
|
236
|
-
If a default kwarg is provided you get that instead of the
|
237
|
-
AttributeError. When found the attribute will be cached on instance
|
238
|
-
with the property accessor using the same name as the attribute
|
239
|
-
prefixed with a leading underscore.
|
240
|
-
"""
|
241
|
-
|
242
|
-
cache_attr_name = '_%s' % name
|
243
|
-
|
244
|
-
def getter(self):
|
245
|
-
cached_value = getattr(self, cache_attr_name, None)
|
246
|
-
if cached_value:
|
247
|
-
return cached_value
|
248
|
-
app = self # first app is on self
|
249
|
-
while True:
|
250
|
-
app = getattr(app, 'app', None)
|
251
|
-
if not app:
|
252
|
-
break
|
253
|
-
try:
|
254
|
-
value = getattr(app, name)
|
255
|
-
except AttributeError:
|
256
|
-
continue
|
257
|
-
setattr(self, cache_attr_name, value)
|
258
|
-
return value
|
259
|
-
if 'default' in kwargs:
|
260
|
-
return kwargs['default']
|
261
|
-
raise AttributeError('No apps in pipeline have a '
|
262
|
-
'%s attribute' % name)
|
263
|
-
|
264
|
-
return property(getter)
|
265
|
-
|
266
|
-
|
267
239
|
class PipelineWrapper(object):
|
268
240
|
"""
|
269
241
|
This class provides a number of utility methods for
|
@@ -361,26 +333,53 @@ def loadcontext(object_type, uri, name=None, relative_to=None,
|
|
361
333
|
global_conf=global_conf)
|
362
334
|
|
363
335
|
|
364
|
-
def _add_pipeline_properties(app, *names):
|
365
|
-
for property_name in names:
|
366
|
-
if not hasattr(app, property_name):
|
367
|
-
setattr(app.__class__, property_name,
|
368
|
-
pipeline_property(property_name))
|
369
|
-
|
370
|
-
|
371
336
|
def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True):
|
372
337
|
"""
|
373
338
|
Loads a context from a config file, and if the context is a pipeline
|
374
339
|
then presents the app with the opportunity to modify the pipeline.
|
340
|
+
|
341
|
+
:param conf_file: path to a config file
|
342
|
+
:param global_conf: a dict of options to update the loaded config. Options
|
343
|
+
in ``global_conf`` will override those in ``conf_file`` except where
|
344
|
+
the ``conf_file`` option is preceded by ``set``.
|
345
|
+
:param allow_modify_pipeline: if True, and the context is a pipeline, and
|
346
|
+
the loaded app has a ``modify_wsgi_pipeline`` property, then that
|
347
|
+
property will be called before the pipeline is loaded.
|
348
|
+
:return: the loaded app
|
375
349
|
"""
|
376
350
|
global_conf = global_conf or {}
|
377
351
|
ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf)
|
378
352
|
if ctx.object_type.name == 'pipeline':
|
379
353
|
# give app the opportunity to modify the pipeline context
|
380
|
-
|
381
|
-
func = getattr(
|
354
|
+
ultimate_app = ctx.app_context.create()
|
355
|
+
func = getattr(ultimate_app, 'modify_wsgi_pipeline', None)
|
382
356
|
if func and allow_modify_pipeline:
|
383
357
|
func(PipelineWrapper(ctx))
|
358
|
+
filters = [c.create() for c in reversed(ctx.filter_contexts)]
|
359
|
+
pipeline = [ultimate_app]
|
360
|
+
request_logging_app = app = ultimate_app
|
361
|
+
for filter_app in filters:
|
362
|
+
app = filter_app(pipeline[0])
|
363
|
+
pipeline.insert(0, app)
|
364
|
+
if request_logging_app is ultimate_app and \
|
365
|
+
app.__class__.__name__ == 'ProxyLoggingMiddleware':
|
366
|
+
request_logging_app = filter_app(ultimate_app)
|
367
|
+
# Set some separate-pipeline attrs
|
368
|
+
request_logging_app._pipeline = [
|
369
|
+
request_logging_app, ultimate_app]
|
370
|
+
request_logging_app._pipeline_request_logging_app = \
|
371
|
+
request_logging_app
|
372
|
+
request_logging_app._pipeline_final_app = ultimate_app
|
373
|
+
|
374
|
+
for app in pipeline:
|
375
|
+
app._pipeline = pipeline
|
376
|
+
# For things like making (logged) backend requests for
|
377
|
+
# get_account_info and get_container_info
|
378
|
+
app._pipeline_request_logging_app = request_logging_app
|
379
|
+
# For getting proxy-server options like *_existence_skip_cache_pct
|
380
|
+
app._pipeline_final_app = ultimate_app
|
381
|
+
|
382
|
+
return pipeline[0]
|
384
383
|
return ctx.create()
|
385
384
|
|
386
385
|
|
@@ -402,216 +401,13 @@ def load_app_config(conf_file):
|
|
402
401
|
return app_conf
|
403
402
|
|
404
403
|
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
def log_request(self, *a):
|
409
|
-
"""
|
410
|
-
Turn off logging requests by the underlying WSGI software.
|
411
|
-
"""
|
412
|
-
pass
|
413
|
-
|
414
|
-
def log_message(self, f, *a):
|
415
|
-
"""
|
416
|
-
Redirect logging other messages by the underlying WSGI software.
|
417
|
-
"""
|
418
|
-
logger = getattr(self.server.app, 'logger', None)
|
419
|
-
if logger:
|
420
|
-
logger.error('ERROR WSGI: ' + f, *a)
|
421
|
-
else:
|
422
|
-
# eventlet<=0.17.4 doesn't have an error method, and in newer
|
423
|
-
# versions the output from error is same as info anyway
|
424
|
-
self.server.log.info('ERROR WSGI: ' + f, *a)
|
425
|
-
|
426
|
-
class MessageClass(wsgi.HttpProtocol.MessageClass):
|
427
|
-
'''Subclass to see when the client didn't provide a Content-Type'''
|
428
|
-
# for py2:
|
429
|
-
def parsetype(self):
|
430
|
-
if self.typeheader is None:
|
431
|
-
self.typeheader = ''
|
432
|
-
wsgi.HttpProtocol.MessageClass.parsetype(self)
|
433
|
-
|
434
|
-
# for py3:
|
435
|
-
def get_default_type(self):
|
436
|
-
'''If the client didn't provide a content type, leave it blank.'''
|
437
|
-
return ''
|
438
|
-
|
439
|
-
def parse_request(self):
|
440
|
-
# Need to track the bytes-on-the-wire for S3 signatures -- eventlet
|
441
|
-
# would do it for us, but since we rewrite the path on py3, we need to
|
442
|
-
# fix it ourselves later.
|
443
|
-
self.__raw_path_info = None
|
444
|
-
|
445
|
-
if not six.PY2:
|
446
|
-
# request lines *should* be ascii per the RFC, but historically
|
447
|
-
# we've allowed (and even have func tests that use) arbitrary
|
448
|
-
# bytes. This breaks on py3 (see https://bugs.python.org/issue33973
|
449
|
-
# ) but the work-around is simple: munge the request line to be
|
450
|
-
# properly quoted.
|
451
|
-
if self.raw_requestline.count(b' ') >= 2:
|
452
|
-
parts = self.raw_requestline.split(b' ', 2)
|
453
|
-
path, q, query = parts[1].partition(b'?')
|
454
|
-
self.__raw_path_info = path
|
455
|
-
# unquote first, so we don't over-quote something
|
456
|
-
# that was *correctly* quoted
|
457
|
-
path = wsgi_to_bytes(wsgi_quote(wsgi_unquote(
|
458
|
-
bytes_to_wsgi(path))))
|
459
|
-
query = b'&'.join(
|
460
|
-
sep.join([
|
461
|
-
wsgi_to_bytes(wsgi_quote_plus(wsgi_unquote_plus(
|
462
|
-
bytes_to_wsgi(key)))),
|
463
|
-
wsgi_to_bytes(wsgi_quote_plus(wsgi_unquote_plus(
|
464
|
-
bytes_to_wsgi(val))))
|
465
|
-
])
|
466
|
-
for part in query.split(b'&')
|
467
|
-
for key, sep, val in (part.partition(b'='), ))
|
468
|
-
parts[1] = path + q + query
|
469
|
-
self.raw_requestline = b' '.join(parts)
|
470
|
-
# else, mangled protocol, most likely; let base class deal with it
|
471
|
-
return wsgi.HttpProtocol.parse_request(self)
|
472
|
-
|
473
|
-
if not six.PY2:
|
474
|
-
def get_environ(self, *args, **kwargs):
|
475
|
-
environ = wsgi.HttpProtocol.get_environ(self, *args, **kwargs)
|
476
|
-
environ['RAW_PATH_INFO'] = bytes_to_wsgi(
|
477
|
-
self.__raw_path_info)
|
478
|
-
header_payload = self.headers.get_payload()
|
479
|
-
if isinstance(header_payload, list) and len(header_payload) == 1:
|
480
|
-
header_payload = header_payload[0].get_payload()
|
481
|
-
if header_payload:
|
482
|
-
# This shouldn't be here. We must've bumped up against
|
483
|
-
# https://bugs.python.org/issue37093
|
484
|
-
headers_raw = list(environ['headers_raw'])
|
485
|
-
for line in header_payload.rstrip('\r\n').split('\n'):
|
486
|
-
if ':' not in line or line[:1] in ' \t':
|
487
|
-
# Well, we're no more broken than we were before...
|
488
|
-
# Should we support line folding?
|
489
|
-
# Should we 400 a bad header line?
|
490
|
-
break
|
491
|
-
header, value = line.split(':', 1)
|
492
|
-
value = value.strip(' \t\n\r')
|
493
|
-
# NB: Eventlet looks at the headers obj to figure out
|
494
|
-
# whether the client said the connection should close;
|
495
|
-
# see https://github.com/eventlet/eventlet/blob/v0.25.0/
|
496
|
-
# eventlet/wsgi.py#L504
|
497
|
-
self.headers.add_header(header, value)
|
498
|
-
headers_raw.append((header, value))
|
499
|
-
wsgi_key = 'HTTP_' + header.replace('-', '_').encode(
|
500
|
-
'latin1').upper().decode('latin1')
|
501
|
-
if wsgi_key in ('HTTP_CONTENT_LENGTH',
|
502
|
-
'HTTP_CONTENT_TYPE'):
|
503
|
-
wsgi_key = wsgi_key[5:]
|
504
|
-
environ[wsgi_key] = value
|
505
|
-
environ['headers_raw'] = tuple(headers_raw)
|
506
|
-
# Since we parsed some more headers, check to see if they
|
507
|
-
# change how our wsgi.input should behave
|
508
|
-
te = environ.get('HTTP_TRANSFER_ENCODING', '').lower()
|
509
|
-
if te.rsplit(',', 1)[-1].strip() == 'chunked':
|
510
|
-
environ['wsgi.input'].chunked_input = True
|
511
|
-
else:
|
512
|
-
length = environ.get('CONTENT_LENGTH')
|
513
|
-
if length:
|
514
|
-
length = int(length)
|
515
|
-
environ['wsgi.input'].content_length = length
|
516
|
-
if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
|
517
|
-
environ['wsgi.input'].wfile = self.wfile
|
518
|
-
environ['wsgi.input'].wfile_line = \
|
519
|
-
b'HTTP/1.1 100 Continue\r\n'
|
520
|
-
return environ
|
521
|
-
|
522
|
-
|
523
|
-
class SwiftHttpProxiedProtocol(SwiftHttpProtocol):
|
524
|
-
"""
|
525
|
-
Protocol object that speaks HTTP, including multiple requests, but with
|
526
|
-
a single PROXY line as the very first thing coming in over the socket.
|
527
|
-
This is so we can learn what the client's IP address is when Swift is
|
528
|
-
behind a TLS terminator, like hitch, that does not understand HTTP and
|
529
|
-
so cannot add X-Forwarded-For or other similar headers.
|
530
|
-
|
531
|
-
See http://www.haproxy.org/download/1.7/doc/proxy-protocol.txt for
|
532
|
-
protocol details.
|
533
|
-
"""
|
534
|
-
def __init__(self, *a, **kw):
|
535
|
-
self.proxy_address = None
|
536
|
-
SwiftHttpProtocol.__init__(self, *a, **kw)
|
537
|
-
|
538
|
-
def handle_error(self, connection_line):
|
539
|
-
if not six.PY2:
|
540
|
-
connection_line = connection_line.decode('latin-1')
|
541
|
-
|
542
|
-
# No further processing will proceed on this connection under any
|
543
|
-
# circumstances. We always send the request into the superclass to
|
544
|
-
# handle any cleanup - this ensures that the request will not be
|
545
|
-
# processed.
|
546
|
-
self.rfile.close()
|
547
|
-
# We don't really have any confidence that an HTTP Error will be
|
548
|
-
# processable by the client as our transmission broken down between
|
549
|
-
# ourselves and our gateway proxy before processing the client
|
550
|
-
# protocol request. Hopefully the operator will know what to do!
|
551
|
-
msg = 'Invalid PROXY line %r' % connection_line
|
552
|
-
self.log_message(msg)
|
553
|
-
# Even assuming HTTP we don't even known what version of HTTP the
|
554
|
-
# client is sending? This entire endeavor seems questionable.
|
555
|
-
self.request_version = self.default_request_version
|
556
|
-
# appease http.server
|
557
|
-
self.command = 'PROXY'
|
558
|
-
self.send_error(400, msg)
|
559
|
-
|
560
|
-
def handle(self):
|
561
|
-
"""Handle multiple requests if necessary."""
|
562
|
-
# ensure the opening line for the connection is a valid PROXY protcol
|
563
|
-
# line; this is the only IO we do on this connection before any
|
564
|
-
# additional wrapping further pollutes the raw socket.
|
565
|
-
connection_line = self.rfile.readline(self.server.url_length_limit)
|
566
|
-
|
567
|
-
if not connection_line.startswith(b'PROXY '):
|
568
|
-
return self.handle_error(connection_line)
|
569
|
-
|
570
|
-
proxy_parts = connection_line.strip(b'\r\n').split(b' ')
|
571
|
-
if proxy_parts[1].startswith(b'UNKNOWN'):
|
572
|
-
# "UNKNOWN", in PROXY protocol version 1, means "not
|
573
|
-
# TCP4 or TCP6". This includes completely legitimate
|
574
|
-
# things like QUIC or Unix domain sockets. The PROXY
|
575
|
-
# protocol (section 2.1) states that the receiver
|
576
|
-
# (that's us) MUST ignore anything after "UNKNOWN" and
|
577
|
-
# before the CRLF, essentially discarding the first
|
578
|
-
# line.
|
579
|
-
pass
|
580
|
-
elif proxy_parts[1] in (b'TCP4', b'TCP6') and len(proxy_parts) == 6:
|
581
|
-
if six.PY2:
|
582
|
-
self.client_address = (proxy_parts[2], proxy_parts[4])
|
583
|
-
self.proxy_address = (proxy_parts[3], proxy_parts[5])
|
584
|
-
else:
|
585
|
-
self.client_address = (
|
586
|
-
proxy_parts[2].decode('latin-1'),
|
587
|
-
proxy_parts[4].decode('latin-1'))
|
588
|
-
self.proxy_address = (
|
589
|
-
proxy_parts[3].decode('latin-1'),
|
590
|
-
proxy_parts[5].decode('latin-1'))
|
591
|
-
else:
|
592
|
-
self.handle_error(connection_line)
|
593
|
-
|
594
|
-
return SwiftHttpProtocol.handle(self)
|
595
|
-
|
596
|
-
def get_environ(self, *args, **kwargs):
|
597
|
-
environ = SwiftHttpProtocol.get_environ(self, *args, **kwargs)
|
598
|
-
if self.proxy_address:
|
599
|
-
environ['SERVER_ADDR'] = self.proxy_address[0]
|
600
|
-
environ['SERVER_PORT'] = self.proxy_address[1]
|
601
|
-
if self.proxy_address[1] == '443':
|
602
|
-
environ['wsgi.url_scheme'] = 'https'
|
603
|
-
environ['HTTPS'] = 'on'
|
604
|
-
return environ
|
605
|
-
|
606
|
-
|
607
|
-
def run_server(conf, logger, sock, global_conf=None):
|
404
|
+
def run_server(conf, logger, sock, global_conf=None, ready_callback=None,
|
405
|
+
allow_modify_pipeline=True):
|
608
406
|
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
|
609
407
|
# some platforms. This locks in reported times to UTC.
|
610
408
|
os.environ['TZ'] = 'UTC+0'
|
611
409
|
time.tzset()
|
612
410
|
|
613
|
-
wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60)
|
614
|
-
|
615
411
|
eventlet.hubs.use_hub(get_hub())
|
616
412
|
eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no'))
|
617
413
|
eventlet.debug.hub_exceptions(eventlet_debug)
|
@@ -626,7 +422,8 @@ def run_server(conf, logger, sock, global_conf=None):
|
|
626
422
|
else:
|
627
423
|
log_name = logger.name
|
628
424
|
global_conf = {'log_name': log_name}
|
629
|
-
app = loadapp(conf['__file__'], global_conf=global_conf
|
425
|
+
app = loadapp(conf['__file__'], global_conf=global_conf,
|
426
|
+
allow_modify_pipeline=allow_modify_pipeline)
|
630
427
|
max_clients = int(conf.get('max_clients', '1024'))
|
631
428
|
pool = RestrictedGreenPool(size=max_clients)
|
632
429
|
|
@@ -640,24 +437,235 @@ def run_server(conf, logger, sock, global_conf=None):
|
|
640
437
|
server_kwargs = {
|
641
438
|
'custom_pool': pool,
|
642
439
|
'protocol': protocol_class,
|
440
|
+
'socket_timeout': float(conf.get('client_timeout') or 60),
|
643
441
|
# Disable capitalizing headers in Eventlet. This is necessary for
|
644
442
|
# the AWS SDK to work with s3api middleware (it needs an "ETag"
|
645
443
|
# header; "Etag" just won't do).
|
646
444
|
'capitalize_response_headers': False,
|
647
445
|
}
|
446
|
+
if conf.get('keepalive_timeout'):
|
447
|
+
server_kwargs['keepalive'] = float(conf['keepalive_timeout']) or False
|
448
|
+
|
449
|
+
if ready_callback:
|
450
|
+
ready_callback()
|
451
|
+
# Yes, eventlet, we know -- we have to support bad clients, though
|
452
|
+
warnings.filterwarnings(
|
453
|
+
'ignore', message='capitalize_response_headers is disabled')
|
648
454
|
try:
|
649
455
|
wsgi.server(sock, app, wsgi_logger, **server_kwargs)
|
650
456
|
except socket.error as err:
|
651
457
|
if err.errno != errno.EINVAL:
|
652
458
|
raise
|
653
|
-
|
459
|
+
finally:
|
460
|
+
pool.waitall()
|
461
|
+
if hasattr(app._pipeline_final_app, 'watchdog'):
|
462
|
+
app._pipeline_final_app.watchdog.kill()
|
654
463
|
|
655
464
|
|
656
|
-
class
|
465
|
+
class StrategyBase(object):
|
466
|
+
"""
|
467
|
+
Some operations common to all strategy classes.
|
468
|
+
"""
|
469
|
+
def __init__(self, conf, logger):
|
470
|
+
self.conf = conf
|
471
|
+
self.logger = logger
|
472
|
+
self.signaled_ready = False
|
473
|
+
|
474
|
+
# Each strategy is welcome to track data however it likes, but all
|
475
|
+
# socket refs should be somewhere in this dict. This allows forked-off
|
476
|
+
# children to easily drop refs to sibling sockets in post_fork_hook().
|
477
|
+
self.tracking_data = {}
|
478
|
+
|
479
|
+
# When doing a seamless reload, we inherit a bunch of child processes
|
480
|
+
# that should all clean themselves up fairly quickly; track them here
|
481
|
+
self.reload_pids = dict()
|
482
|
+
# If they don't cleanup quickly, we'll start killing them after this
|
483
|
+
self.stale_worker_timeout = utils.non_negative_float(
|
484
|
+
conf.get('stale_worker_timeout', 86400))
|
485
|
+
|
486
|
+
def post_fork_hook(self):
|
487
|
+
"""
|
488
|
+
Called in each forked-off child process, prior to starting the actual
|
489
|
+
wsgi server, to perform any initialization such as drop privileges.
|
490
|
+
"""
|
491
|
+
|
492
|
+
if not self.signaled_ready:
|
493
|
+
capture_stdio(self.logger)
|
494
|
+
drop_privileges(self.conf.get('user', 'swift'))
|
495
|
+
del self.tracking_data # children don't need to track siblings
|
496
|
+
# only MAINPID should be sending systemd notifications
|
497
|
+
os.environ.pop('NOTIFY_SOCKET', None)
|
498
|
+
|
499
|
+
def shutdown_sockets(self):
|
500
|
+
"""
|
501
|
+
Shutdown any listen sockets.
|
502
|
+
"""
|
503
|
+
|
504
|
+
for sock in self.iter_sockets():
|
505
|
+
greenio.shutdown_safe(sock)
|
506
|
+
sock.close()
|
507
|
+
|
508
|
+
def set_close_on_exec_on_listen_sockets(self):
|
509
|
+
"""
|
510
|
+
Set the close-on-exec flag on any listen sockets.
|
511
|
+
"""
|
512
|
+
|
513
|
+
for sock in self.iter_sockets():
|
514
|
+
# Python 3.4 and later default to sockets having close-on-exec
|
515
|
+
# set (what PEP 0446 calls "non-inheritable"). This new method
|
516
|
+
# on socket objects is provided to toggle it.
|
517
|
+
sock.set_inheritable(False)
|
518
|
+
|
519
|
+
def signal_ready(self):
|
520
|
+
"""
|
521
|
+
Signal that the server is up and accepting connections.
|
522
|
+
"""
|
523
|
+
if self.signaled_ready:
|
524
|
+
return # Already did it
|
525
|
+
|
526
|
+
# Redirect errors to logger and close stdio. swift-init (for example)
|
527
|
+
# uses this to know that the service is ready to accept connections.
|
528
|
+
capture_stdio(self.logger)
|
529
|
+
|
530
|
+
# If necessary, signal an old copy of us that it's okay to shutdown
|
531
|
+
# its listen sockets now because ours are up and ready to receive
|
532
|
+
# connections. This is used for seamless reloading using SIGUSR1.
|
533
|
+
reexec_signal_fd = os.getenv(NOTIFY_FD_ENV_KEY)
|
534
|
+
if reexec_signal_fd:
|
535
|
+
if ',' in reexec_signal_fd:
|
536
|
+
reexec_signal_fd, worker_state_fd = reexec_signal_fd.split(',')
|
537
|
+
reexec_signal_fd = int(reexec_signal_fd)
|
538
|
+
os.write(reexec_signal_fd, str(os.getpid()).encode('utf8'))
|
539
|
+
os.close(reexec_signal_fd)
|
540
|
+
worker_state_fd = os.getenv(CHILD_STATE_FD_ENV_KEY)
|
541
|
+
try:
|
542
|
+
self.read_state_from_old_manager(worker_state_fd)
|
543
|
+
except Exception as e:
|
544
|
+
# This was all opportunistic anyway; old swift wouldn't even
|
545
|
+
# *try* to send us any state -- we don't want *new* code to
|
546
|
+
# fail just because *old* code didn't live up to its promise
|
547
|
+
self.logger.warning(
|
548
|
+
'Failed to read state from the old manager: %r', e,
|
549
|
+
exc_info=True)
|
550
|
+
|
551
|
+
# Finally, signal systemd (if appropriate) that process started
|
552
|
+
# properly.
|
553
|
+
systemd_notify(logger=self.logger)
|
554
|
+
|
555
|
+
self.signaled_ready = True
|
556
|
+
|
557
|
+
def read_state_from_old_manager(self, worker_state_fd):
|
558
|
+
"""
|
559
|
+
Read worker state from the old manager's socket-closer.
|
560
|
+
|
561
|
+
The socket-closing process is the last thing to still have the worker
|
562
|
+
PIDs in its head, so it sends us a JSON dict (prefixed by its length)
|
563
|
+
of the form::
|
564
|
+
|
565
|
+
{
|
566
|
+
"old_pids": {
|
567
|
+
"<old worker>": "<first reload time>",
|
568
|
+
...
|
569
|
+
}
|
570
|
+
}
|
571
|
+
|
572
|
+
More data may be added in the future.
|
573
|
+
|
574
|
+
:param worker_state_fd: The file descriptor that should have the
|
575
|
+
old worker state. Should be passed to us
|
576
|
+
via the ``__SWIFT_SERVER_CHILD_STATE_FD``
|
577
|
+
environment variable.
|
578
|
+
"""
|
579
|
+
if not worker_state_fd:
|
580
|
+
return
|
581
|
+
worker_state_fd = int(worker_state_fd)
|
582
|
+
try:
|
583
|
+
# The temporary manager may have up and died while trying to send
|
584
|
+
# state; hopefully its logs will have more about what went wrong
|
585
|
+
# -- let's just log at warning here
|
586
|
+
data_len = os.read(worker_state_fd, 4)
|
587
|
+
if len(data_len) != 4:
|
588
|
+
self.logger.warning(
|
589
|
+
'Invalid worker state received; expected 4 bytes '
|
590
|
+
'followed by a payload but only received %d bytes',
|
591
|
+
len(data_len))
|
592
|
+
return
|
593
|
+
|
594
|
+
data_len = struct.unpack('!I', data_len)[0]
|
595
|
+
data = b''
|
596
|
+
while len(data) < data_len:
|
597
|
+
chunk = os.read(worker_state_fd, data_len - len(data))
|
598
|
+
if not chunk:
|
599
|
+
break
|
600
|
+
data += chunk
|
601
|
+
if len(data) != data_len:
|
602
|
+
self.logger.warning(
|
603
|
+
'Incomplete worker state received; expected %d '
|
604
|
+
'bytes but only received %d', data_len, len(data))
|
605
|
+
return
|
606
|
+
|
607
|
+
# OK, the temporary manager was able to tell us how much it wanted
|
608
|
+
# to send and send it; from here on, error seems appropriate.
|
609
|
+
try:
|
610
|
+
old_state = json.loads(data)
|
611
|
+
except ValueError:
|
612
|
+
self.logger.error(
|
613
|
+
'Invalid worker state received; '
|
614
|
+
'invalid JSON: %r', data)
|
615
|
+
return
|
616
|
+
|
617
|
+
try:
|
618
|
+
old_pids = {
|
619
|
+
int(pid): float(reloaded)
|
620
|
+
for pid, reloaded in old_state["old_pids"].items()}
|
621
|
+
except (KeyError, TypeError) as err:
|
622
|
+
self.logger.error(
|
623
|
+
'Invalid worker state received; '
|
624
|
+
'error reading old pids: %s', err)
|
625
|
+
self.logger.debug('Received old worker pids: %s', old_pids)
|
626
|
+
self.reload_pids.update(old_pids)
|
627
|
+
|
628
|
+
def smother(old_pids=old_pids, timeout=self.stale_worker_timeout):
|
629
|
+
own_pid = os.getpid()
|
630
|
+
kill_times = sorted(((reloaded + timeout, pid)
|
631
|
+
for pid, reloaded in old_pids.items()),
|
632
|
+
reverse=True)
|
633
|
+
while kill_times:
|
634
|
+
kill_time, pid = kill_times.pop()
|
635
|
+
now = time.time()
|
636
|
+
if kill_time > now:
|
637
|
+
sleep(kill_time - now)
|
638
|
+
try:
|
639
|
+
ppid = utils.get_ppid(pid)
|
640
|
+
except OSError as e:
|
641
|
+
if e.errno != errno.ESRCH:
|
642
|
+
self.logger.error("Could not determine parent "
|
643
|
+
"for stale pid %d: %s", pid, e)
|
644
|
+
continue
|
645
|
+
if ppid == own_pid:
|
646
|
+
self.logger.notice("Killing long-running stale worker "
|
647
|
+
"%d after %ds", pid, int(timeout))
|
648
|
+
try:
|
649
|
+
os.kill(pid, signal.SIGKILL)
|
650
|
+
except OSError as e:
|
651
|
+
if e.errno != errno.ESRCH:
|
652
|
+
self.logger.error(
|
653
|
+
"Could not kill stale pid %d: %s", pid, e)
|
654
|
+
# else, pid got re-used?
|
655
|
+
|
656
|
+
eventlet.spawn_n(smother)
|
657
|
+
|
658
|
+
finally:
|
659
|
+
os.close(worker_state_fd)
|
660
|
+
|
661
|
+
|
662
|
+
class WorkersStrategy(StrategyBase):
|
657
663
|
"""
|
658
664
|
WSGI server management strategy object for a single bind port and listen
|
659
665
|
socket shared by a configured number of forked-off workers.
|
660
666
|
|
667
|
+
Tracking data is a map of ``pid -> socket``.
|
668
|
+
|
661
669
|
Used in :py:func:`run_wsgi`.
|
662
670
|
|
663
671
|
:param dict conf: Server configuration dictionary.
|
@@ -666,10 +674,7 @@ class WorkersStrategy(object):
|
|
666
674
|
"""
|
667
675
|
|
668
676
|
def __init__(self, conf, logger):
|
669
|
-
self.conf
|
670
|
-
self.logger = logger
|
671
|
-
self.sock = None
|
672
|
-
self.children = []
|
677
|
+
super(WorkersStrategy, self).__init__(conf, logger)
|
673
678
|
self.worker_count = config_auto_int_value(conf.get('workers'),
|
674
679
|
CPU_COUNT)
|
675
680
|
|
@@ -684,20 +689,6 @@ class WorkersStrategy(object):
|
|
684
689
|
|
685
690
|
return 0.5
|
686
691
|
|
687
|
-
def do_bind_ports(self):
|
688
|
-
"""
|
689
|
-
Bind the one listen socket for this strategy and drop privileges
|
690
|
-
(since the parent process will never need to bind again).
|
691
|
-
"""
|
692
|
-
|
693
|
-
try:
|
694
|
-
self.sock = get_socket(self.conf)
|
695
|
-
except ConfigFilePortError:
|
696
|
-
msg = 'bind_port wasn\'t properly set in the config file. ' \
|
697
|
-
'It must be explicitly set to a valid port number.'
|
698
|
-
return msg
|
699
|
-
drop_privileges(self.conf.get('user', 'swift'))
|
700
|
-
|
701
692
|
def no_fork_sock(self):
|
702
693
|
"""
|
703
694
|
Return a server listen socket if the server should run in the
|
@@ -706,7 +697,7 @@ class WorkersStrategy(object):
|
|
706
697
|
|
707
698
|
# Useful for profiling [no forks].
|
708
699
|
if self.worker_count == 0:
|
709
|
-
return self.
|
700
|
+
return get_socket(self.conf)
|
710
701
|
|
711
702
|
def new_worker_socks(self):
|
712
703
|
"""
|
@@ -718,16 +709,8 @@ class WorkersStrategy(object):
|
|
718
709
|
where it will be ignored.
|
719
710
|
"""
|
720
711
|
|
721
|
-
while len(self.
|
722
|
-
yield self.
|
723
|
-
|
724
|
-
def post_fork_hook(self):
|
725
|
-
"""
|
726
|
-
Perform any initialization in a forked-off child process prior to
|
727
|
-
starting the wsgi server.
|
728
|
-
"""
|
729
|
-
|
730
|
-
pass
|
712
|
+
while len(self.tracking_data) < self.worker_count:
|
713
|
+
yield get_socket(self.conf), None
|
731
714
|
|
732
715
|
def log_sock_exit(self, sock, _unused):
|
733
716
|
"""
|
@@ -751,154 +734,55 @@ class WorkersStrategy(object):
|
|
751
734
|
|
752
735
|
self.logger.notice('Started child %s from parent %s',
|
753
736
|
pid, os.getpid())
|
754
|
-
self.
|
737
|
+
self.tracking_data[pid] = sock
|
755
738
|
|
756
739
|
def register_worker_exit(self, pid):
|
757
740
|
"""
|
758
741
|
Called when a worker has exited.
|
759
742
|
|
760
|
-
:
|
761
|
-
|
762
|
-
|
763
|
-
self.logger.error('Removing dead child %s from parent %s',
|
764
|
-
pid, os.getpid())
|
765
|
-
self.children.remove(pid)
|
766
|
-
|
767
|
-
def shutdown_sockets(self):
|
768
|
-
"""
|
769
|
-
Shutdown any listen sockets.
|
770
|
-
"""
|
771
|
-
|
772
|
-
greenio.shutdown_safe(self.sock)
|
773
|
-
self.sock.close()
|
774
|
-
|
775
|
-
|
776
|
-
class PortPidState(object):
|
777
|
-
"""
|
778
|
-
A helper class for :py:class:`ServersPerPortStrategy` to track listen
|
779
|
-
sockets and PIDs for each port.
|
780
|
-
|
781
|
-
:param int servers_per_port: The configured number of servers per port.
|
782
|
-
:param logger: The server's :py:class:`~swift.common.utils.LogAdaptor`
|
783
|
-
"""
|
784
|
-
|
785
|
-
def __init__(self, servers_per_port, logger):
|
786
|
-
self.servers_per_port = servers_per_port
|
787
|
-
self.logger = logger
|
788
|
-
self.sock_data_by_port = {}
|
789
|
-
|
790
|
-
def sock_for_port(self, port):
|
791
|
-
"""
|
792
|
-
:param int port: The port whose socket is desired.
|
793
|
-
:returns: The bound listen socket for the given port.
|
794
|
-
"""
|
795
|
-
|
796
|
-
return self.sock_data_by_port[port]['sock']
|
797
|
-
|
798
|
-
def port_for_sock(self, sock):
|
799
|
-
"""
|
800
|
-
:param socket sock: A tracked bound listen socket
|
801
|
-
:returns: The port the socket is bound to.
|
802
|
-
"""
|
803
|
-
|
804
|
-
for port, sock_data in self.sock_data_by_port.items():
|
805
|
-
if sock_data['sock'] == sock:
|
806
|
-
return port
|
807
|
-
|
808
|
-
def _pid_to_port_and_index(self, pid):
|
809
|
-
for port, sock_data in self.sock_data_by_port.items():
|
810
|
-
for server_idx, a_pid in enumerate(sock_data['pids']):
|
811
|
-
if pid == a_pid:
|
812
|
-
return port, server_idx
|
813
|
-
|
814
|
-
def port_index_pairs(self):
|
815
|
-
"""
|
816
|
-
Returns current (port, server index) pairs.
|
817
|
-
|
818
|
-
:returns: A set of (port, server_idx) tuples for currently-tracked
|
819
|
-
ports, sockets, and PIDs.
|
820
|
-
"""
|
821
|
-
|
822
|
-
current_port_index_pairs = set()
|
823
|
-
for port, pid_state in self.sock_data_by_port.items():
|
824
|
-
current_port_index_pairs |= set(
|
825
|
-
(port, i)
|
826
|
-
for i, pid in enumerate(pid_state['pids'])
|
827
|
-
if pid is not None)
|
828
|
-
return current_port_index_pairs
|
829
|
-
|
830
|
-
def track_port(self, port, sock):
|
831
|
-
"""
|
832
|
-
Start tracking servers for the given port and listen socket.
|
833
|
-
|
834
|
-
:param int port: The port to start tracking
|
835
|
-
:param socket sock: The bound listen socket for the port.
|
836
|
-
"""
|
837
|
-
|
838
|
-
self.sock_data_by_port[port] = {
|
839
|
-
'sock': sock,
|
840
|
-
'pids': [None] * self.servers_per_port,
|
841
|
-
}
|
842
|
-
|
843
|
-
def not_tracking(self, port):
|
844
|
-
"""
|
845
|
-
Return True if the specified port is not being tracked.
|
743
|
+
NOTE: a re-exec'ed server can reap the dead worker PIDs from the old
|
744
|
+
server process that is being replaced as part of a service reload
|
745
|
+
(SIGUSR1). So we need to be robust to getting some unknown PID here.
|
846
746
|
|
847
|
-
:param int
|
747
|
+
:param int pid: The PID of the worker that exited.
|
848
748
|
"""
|
849
749
|
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
Yield all current listen sockets.
|
855
|
-
"""
|
750
|
+
if self.reload_pids.pop(pid, None):
|
751
|
+
self.logger.notice('Removing stale child %d from parent %d',
|
752
|
+
pid, os.getpid())
|
753
|
+
return
|
856
754
|
|
857
|
-
|
858
|
-
|
755
|
+
sock = self.tracking_data.pop(pid, None)
|
756
|
+
if sock is None:
|
757
|
+
self.logger.warning('Ignoring wait() result from unknown PID %d',
|
758
|
+
pid)
|
759
|
+
else:
|
760
|
+
self.logger.error('Removing dead child %d from parent %d',
|
761
|
+
pid, os.getpid())
|
762
|
+
greenio.shutdown_safe(sock)
|
763
|
+
sock.close()
|
859
764
|
|
860
|
-
def
|
765
|
+
def iter_sockets(self):
|
861
766
|
"""
|
862
|
-
|
767
|
+
Yields all known listen sockets.
|
863
768
|
"""
|
864
769
|
|
865
|
-
|
866
|
-
|
867
|
-
greenio.shutdown_safe(orphan_data['sock'])
|
868
|
-
orphan_data['sock'].close()
|
869
|
-
self.logger.notice('Closing unnecessary sock for port %d', port)
|
870
|
-
|
871
|
-
def add_pid(self, port, index, pid):
|
872
|
-
self.sock_data_by_port[port]['pids'][index] = pid
|
873
|
-
|
874
|
-
def forget_pid(self, pid):
|
875
|
-
"""
|
876
|
-
Idempotently forget a PID. It's okay if the PID is no longer in our
|
877
|
-
data structure (it could have been removed by the "orphan port" removal
|
878
|
-
in :py:meth:`new_worker_socks`).
|
770
|
+
for sock in self.tracking_data.values():
|
771
|
+
yield sock
|
879
772
|
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
port_server_idx = self._pid_to_port_and_index(pid)
|
884
|
-
if port_server_idx is None:
|
885
|
-
# This method can lose a race with the "orphan port" removal, when
|
886
|
-
# a ring reload no longer contains a port. So it's okay if we were
|
887
|
-
# unable to find a (port, server_idx) pair.
|
888
|
-
return
|
889
|
-
dead_port, server_idx = port_server_idx
|
890
|
-
self.logger.error('Removing dead child %d (PID: %s) for port %s',
|
891
|
-
server_idx, pid, dead_port)
|
892
|
-
self.sock_data_by_port[dead_port]['pids'][server_idx] = None
|
773
|
+
def get_worker_pids(self):
|
774
|
+
return list(self.tracking_data.keys())
|
893
775
|
|
894
776
|
|
895
|
-
class ServersPerPortStrategy(
|
777
|
+
class ServersPerPortStrategy(StrategyBase):
|
896
778
|
"""
|
897
779
|
WSGI server management strategy object for an object-server with one listen
|
898
780
|
port per unique local port in the storage policy rings. The
|
899
781
|
`servers_per_port` integer config setting determines how many workers are
|
900
782
|
run per port.
|
901
783
|
|
784
|
+
Tracking data is a map like ``port -> [(pid, socket), ...]``.
|
785
|
+
|
902
786
|
Used in :py:func:`run_wsgi`.
|
903
787
|
|
904
788
|
:param dict conf: Server configuration dictionary.
|
@@ -908,15 +792,16 @@ class ServersPerPortStrategy(object):
|
|
908
792
|
"""
|
909
793
|
|
910
794
|
def __init__(self, conf, logger, servers_per_port):
|
911
|
-
self.conf
|
912
|
-
self.logger = logger
|
795
|
+
super(ServersPerPortStrategy, self).__init__(conf, logger)
|
913
796
|
self.servers_per_port = servers_per_port
|
914
797
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
915
|
-
self.ring_check_interval =
|
916
|
-
self.port_pid_state = PortPidState(servers_per_port, logger)
|
798
|
+
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
|
917
799
|
|
918
|
-
|
919
|
-
|
800
|
+
# typically ring_ip will be the same as bind_ip, but in a container the
|
801
|
+
# bind_ip might be differnt than the host ip address used to lookup
|
802
|
+
# devices/ports in the ring
|
803
|
+
ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
|
804
|
+
self.cache = BindPortsCache(self.swift_dir, ring_ip)
|
920
805
|
|
921
806
|
def _reload_bind_ports(self):
|
922
807
|
self.bind_ports = self.cache.all_bind_ports_for_node()
|
@@ -924,8 +809,7 @@ class ServersPerPortStrategy(object):
|
|
924
809
|
def _bind_port(self, port):
|
925
810
|
new_conf = self.conf.copy()
|
926
811
|
new_conf['bind_port'] = port
|
927
|
-
|
928
|
-
self.port_pid_state.track_port(port, sock)
|
812
|
+
return get_socket(new_conf)
|
929
813
|
|
930
814
|
def loop_timeout(self):
|
931
815
|
"""
|
@@ -937,30 +821,6 @@ class ServersPerPortStrategy(object):
|
|
937
821
|
|
938
822
|
return self.ring_check_interval
|
939
823
|
|
940
|
-
def do_bind_ports(self):
|
941
|
-
"""
|
942
|
-
Bind one listen socket per unique local storage policy ring port. Then
|
943
|
-
do all the work of drop_privileges except the actual dropping of
|
944
|
-
privileges (each forked-off worker will do that post-fork in
|
945
|
-
:py:meth:`post_fork_hook`).
|
946
|
-
"""
|
947
|
-
|
948
|
-
self._reload_bind_ports()
|
949
|
-
for port in self.bind_ports:
|
950
|
-
self._bind_port(port)
|
951
|
-
|
952
|
-
# The workers strategy drops privileges here, which we obviously cannot
|
953
|
-
# do if we want to support binding to low ports. But we do want some
|
954
|
-
# of the actions that drop_privileges did.
|
955
|
-
try:
|
956
|
-
os.setsid()
|
957
|
-
except OSError:
|
958
|
-
pass
|
959
|
-
# In case you need to rmdir where you started the daemon:
|
960
|
-
os.chdir('/')
|
961
|
-
# Ensure files are created with the correct privileges:
|
962
|
-
os.umask(0o22)
|
963
|
-
|
964
824
|
def no_fork_sock(self):
|
965
825
|
"""
|
966
826
|
This strategy does not support running in the foreground.
|
@@ -970,8 +830,8 @@ class ServersPerPortStrategy(object):
|
|
970
830
|
|
971
831
|
def new_worker_socks(self):
|
972
832
|
"""
|
973
|
-
Yield a sequence of (socket, server_idx) tuples for each server
|
974
|
-
should be forked-off and started.
|
833
|
+
Yield a sequence of (socket, (port, server_idx)) tuples for each server
|
834
|
+
which should be forked-off and started.
|
975
835
|
|
976
836
|
Any sockets for "orphaned" ports no longer in any ring will be closed
|
977
837
|
(causing their associated workers to gracefully exit) after all new
|
@@ -982,11 +842,15 @@ class ServersPerPortStrategy(object):
|
|
982
842
|
"""
|
983
843
|
|
984
844
|
self._reload_bind_ports()
|
985
|
-
desired_port_index_pairs =
|
845
|
+
desired_port_index_pairs = {
|
986
846
|
(p, i) for p in self.bind_ports
|
987
|
-
for i in range(self.servers_per_port)
|
847
|
+
for i in range(self.servers_per_port)}
|
988
848
|
|
989
|
-
current_port_index_pairs =
|
849
|
+
current_port_index_pairs = {
|
850
|
+
(p, i)
|
851
|
+
for p, port_data in self.tracking_data.items()
|
852
|
+
for i, (pid, sock) in enumerate(port_data)
|
853
|
+
if pid is not None}
|
990
854
|
|
991
855
|
if desired_port_index_pairs != current_port_index_pairs:
|
992
856
|
# Orphan ports are ports which had object-server processes running,
|
@@ -995,56 +859,59 @@ class ServersPerPortStrategy(object):
|
|
995
859
|
orphan_port_index_pairs = current_port_index_pairs - \
|
996
860
|
desired_port_index_pairs
|
997
861
|
|
998
|
-
# Fork off worker(s) for every port
|
862
|
+
# Fork off worker(s) for every port that's supposed to have
|
999
863
|
# worker(s) but doesn't
|
1000
864
|
missing_port_index_pairs = desired_port_index_pairs - \
|
1001
865
|
current_port_index_pairs
|
1002
866
|
for port, server_idx in sorted(missing_port_index_pairs):
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
|
1012
|
-
for orphan_pair in orphan_port_index_pairs:
|
867
|
+
try:
|
868
|
+
sock = self._bind_port(port)
|
869
|
+
except Exception as e:
|
870
|
+
self.logger.critical('Unable to bind to port %d: %s',
|
871
|
+
port, e)
|
872
|
+
continue
|
873
|
+
yield sock, (port, server_idx)
|
874
|
+
|
875
|
+
for port, idx in orphan_port_index_pairs:
|
1013
876
|
# For any port in orphan_port_index_pairs, it is guaranteed
|
1014
877
|
# that there should be no listen socket for that port, so we
|
1015
878
|
# can close and forget them.
|
1016
|
-
self.
|
1017
|
-
|
1018
|
-
|
1019
|
-
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
|
1025
|
-
|
1026
|
-
|
879
|
+
pid, sock = self.tracking_data[port][idx]
|
880
|
+
greenio.shutdown_safe(sock)
|
881
|
+
sock.close()
|
882
|
+
self.logger.notice(
|
883
|
+
'Closing unnecessary sock for port %d (child pid %d)',
|
884
|
+
port, pid)
|
885
|
+
self.tracking_data[port][idx] = (None, None)
|
886
|
+
if all(sock is None
|
887
|
+
for _pid, sock in self.tracking_data[port]):
|
888
|
+
del self.tracking_data[port]
|
889
|
+
|
890
|
+
def log_sock_exit(self, sock, data):
|
1027
891
|
"""
|
1028
892
|
Log a server's exit.
|
1029
893
|
"""
|
1030
894
|
|
1031
|
-
port =
|
895
|
+
port, server_idx = data
|
1032
896
|
self.logger.notice('Child %d (PID %d, port %d) exiting normally',
|
1033
897
|
server_idx, os.getpid(), port)
|
1034
898
|
|
1035
|
-
def register_worker_start(self, sock,
|
899
|
+
def register_worker_start(self, sock, data, pid):
|
1036
900
|
"""
|
1037
901
|
Called when a new worker is started.
|
1038
902
|
|
1039
903
|
:param socket sock: The listen socket for the worker just started.
|
1040
|
-
:param
|
904
|
+
:param tuple data: The socket's (port, server_idx) as yielded by
|
1041
905
|
:py:meth:`new_worker_socks`.
|
1042
906
|
:param int pid: The new worker process' PID
|
1043
907
|
"""
|
1044
|
-
|
908
|
+
|
909
|
+
port, server_idx = data
|
1045
910
|
self.logger.notice('Started child %d (PID %d) for port %d',
|
1046
911
|
server_idx, pid, port)
|
1047
|
-
|
912
|
+
if port not in self.tracking_data:
|
913
|
+
self.tracking_data[port] = [(None, None)] * self.servers_per_port
|
914
|
+
self.tracking_data[port][server_idx] = (pid, sock)
|
1048
915
|
|
1049
916
|
def register_worker_exit(self, pid):
|
1050
917
|
"""
|
@@ -1053,36 +920,43 @@ class ServersPerPortStrategy(object):
|
|
1053
920
|
:param int pid: The PID of the worker that exited.
|
1054
921
|
"""
|
1055
922
|
|
1056
|
-
self.
|
923
|
+
if self.reload_pids.pop(pid, None):
|
924
|
+
self.logger.notice('Removing stale child %d from parent %d',
|
925
|
+
pid, os.getpid())
|
926
|
+
return
|
927
|
+
|
928
|
+
for port_data in self.tracking_data.values():
|
929
|
+
for idx, (child_pid, sock) in enumerate(port_data):
|
930
|
+
if child_pid == pid:
|
931
|
+
self.logger.error('Removing dead child %d from parent %d',
|
932
|
+
pid, os.getpid())
|
933
|
+
port_data[idx] = (None, None)
|
934
|
+
greenio.shutdown_safe(sock)
|
935
|
+
sock.close()
|
936
|
+
return
|
1057
937
|
|
1058
|
-
|
938
|
+
self.logger.warning('Ignoring wait() result from unknown PID %d', pid)
|
939
|
+
|
940
|
+
def iter_sockets(self):
|
1059
941
|
"""
|
1060
|
-
|
942
|
+
Yields all known listen sockets.
|
1061
943
|
"""
|
1062
944
|
|
1063
|
-
for
|
1064
|
-
|
1065
|
-
|
945
|
+
for port_data in self.tracking_data.values():
|
946
|
+
for _pid, sock in port_data:
|
947
|
+
yield sock
|
1066
948
|
|
949
|
+
def get_worker_pids(self):
|
950
|
+
return [
|
951
|
+
pid
|
952
|
+
for port_data in self.tracking_data.values()
|
953
|
+
for pid, _sock in port_data]
|
1067
954
|
|
1068
|
-
def run_wsgi(conf_path, app_section, *args, **kwargs):
|
1069
|
-
"""
|
1070
|
-
Runs the server according to some strategy. The default strategy runs a
|
1071
|
-
specified number of workers in pre-fork model. The object-server (only)
|
1072
|
-
may use a servers-per-port strategy if its config has a servers_per_port
|
1073
|
-
setting with a value greater than zero.
|
1074
955
|
|
1075
|
-
|
1076
|
-
:param app_section: App name from conf file to load config from
|
1077
|
-
:returns: 0 if successful, nonzero otherwise
|
1078
|
-
"""
|
956
|
+
def check_config(conf_path, app_section, *args, **kwargs):
|
1079
957
|
# Load configuration, Set logger and Load request processor
|
1080
|
-
|
1081
|
-
(
|
1082
|
-
_initrp(conf_path, app_section, *args, **kwargs)
|
1083
|
-
except ConfigFileError as e:
|
1084
|
-
print(e)
|
1085
|
-
return 1
|
958
|
+
(conf, logger, log_name) = \
|
959
|
+
_initrp(conf_path, app_section, *args, **kwargs)
|
1086
960
|
|
1087
961
|
# optional nice/ionice priority scheduling
|
1088
962
|
utils.modify_priority(conf, logger)
|
@@ -1097,9 +971,18 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
1097
971
|
conf, logger, servers_per_port=servers_per_port)
|
1098
972
|
else:
|
1099
973
|
strategy = WorkersStrategy(conf, logger)
|
974
|
+
try:
|
975
|
+
# Quick sanity check
|
976
|
+
if not (1 <= int(conf['bind_port']) <= 2 ** 16 - 1):
|
977
|
+
raise ValueError
|
978
|
+
except (ValueError, KeyError, TypeError):
|
979
|
+
error_msg = 'bind_port wasn\'t properly set in the config file. ' \
|
980
|
+
'It must be explicitly set to a valid port number.'
|
981
|
+
logger.error(error_msg)
|
982
|
+
raise ConfigFileError(error_msg)
|
1100
983
|
|
1101
984
|
# patch event before loadapp
|
1102
|
-
utils.
|
985
|
+
utils.monkey_patch()
|
1103
986
|
|
1104
987
|
# Ensure the configuration and application can be loaded before proceeding.
|
1105
988
|
global_conf = {'log_name': log_name}
|
@@ -1110,21 +993,47 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
1110
993
|
# set utils.FALLOCATE_RESERVE if desired
|
1111
994
|
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
|
1112
995
|
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
|
996
|
+
return conf, logger, global_conf, strategy
|
1113
997
|
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
998
|
+
|
999
|
+
def run_wsgi(conf_path, app_section, *args, **kwargs):
|
1000
|
+
"""
|
1001
|
+
Runs the server according to some strategy. The default strategy runs a
|
1002
|
+
specified number of workers in pre-fork model. The object-server (only)
|
1003
|
+
may use a servers-per-port strategy if its config has a servers_per_port
|
1004
|
+
setting with a value greater than zero.
|
1005
|
+
|
1006
|
+
:param conf_path: Path to paste.deploy style configuration file/directory
|
1007
|
+
:param app_section: App name from conf file to load config from
|
1008
|
+
:param allow_modify_pipeline: Boolean for whether the server should have
|
1009
|
+
an opportunity to change its own pipeline.
|
1010
|
+
Defaults to True
|
1011
|
+
:param test_config: if False (the default) then load and validate the
|
1012
|
+
config and if successful then continue to run the server; if True then
|
1013
|
+
load and validate the config but do not run the server.
|
1014
|
+
:returns: 0 if successful, nonzero otherwise
|
1015
|
+
"""
|
1016
|
+
try:
|
1017
|
+
conf, logger, global_conf, strategy = check_config(
|
1018
|
+
conf_path, app_section, *args, **kwargs)
|
1019
|
+
except ConfigFileError as err:
|
1020
|
+
print(err)
|
1119
1021
|
return 1
|
1120
1022
|
|
1121
|
-
|
1122
|
-
|
1123
|
-
|
1023
|
+
if kwargs.get('test_config'):
|
1024
|
+
return 0
|
1025
|
+
|
1026
|
+
# Do some daemonization process hygene before we fork any children or run a
|
1027
|
+
# server without forking.
|
1028
|
+
clean_up_daemon_hygiene()
|
1124
1029
|
|
1030
|
+
allow_modify_pipeline = kwargs.get('allow_modify_pipeline', True)
|
1125
1031
|
no_fork_sock = strategy.no_fork_sock()
|
1126
1032
|
if no_fork_sock:
|
1127
|
-
run_server(conf, logger, no_fork_sock, global_conf=global_conf
|
1033
|
+
run_server(conf, logger, no_fork_sock, global_conf=global_conf,
|
1034
|
+
ready_callback=strategy.signal_ready,
|
1035
|
+
allow_modify_pipeline=allow_modify_pipeline)
|
1036
|
+
systemd_notify(logger, "STOPPING=1")
|
1128
1037
|
return 0
|
1129
1038
|
|
1130
1039
|
def stop_with_signal(signum, *args):
|
@@ -1136,20 +1045,49 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
1136
1045
|
running_context = [True, None]
|
1137
1046
|
signal.signal(signal.SIGTERM, stop_with_signal)
|
1138
1047
|
signal.signal(signal.SIGHUP, stop_with_signal)
|
1048
|
+
signal.signal(signal.SIGUSR1, stop_with_signal)
|
1139
1049
|
|
1140
1050
|
while running_context[0]:
|
1051
|
+
new_workers = {} # pid -> status pipe
|
1141
1052
|
for sock, sock_info in strategy.new_worker_socks():
|
1053
|
+
read_fd, write_fd = os.pipe()
|
1142
1054
|
pid = os.fork()
|
1143
1055
|
if pid == 0:
|
1144
|
-
|
1056
|
+
os.close(read_fd)
|
1145
1057
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
1058
|
+
|
1059
|
+
def shutdown_my_listen_sock(signum, *args):
|
1060
|
+
greenio.shutdown_safe(sock)
|
1061
|
+
|
1062
|
+
signal.signal(signal.SIGHUP, shutdown_my_listen_sock)
|
1063
|
+
signal.signal(signal.SIGUSR1, shutdown_my_listen_sock)
|
1146
1064
|
strategy.post_fork_hook()
|
1147
|
-
|
1065
|
+
|
1066
|
+
def notify():
|
1067
|
+
os.write(write_fd, b'ready')
|
1068
|
+
os.close(write_fd)
|
1069
|
+
|
1070
|
+
run_server(conf, logger, sock, ready_callback=notify,
|
1071
|
+
allow_modify_pipeline=allow_modify_pipeline)
|
1148
1072
|
strategy.log_sock_exit(sock, sock_info)
|
1149
1073
|
return 0
|
1150
1074
|
else:
|
1075
|
+
os.close(write_fd)
|
1076
|
+
new_workers[pid] = read_fd
|
1151
1077
|
strategy.register_worker_start(sock, sock_info, pid)
|
1152
1078
|
|
1079
|
+
for pid, read_fd in new_workers.items():
|
1080
|
+
worker_status = os.read(read_fd, 30)
|
1081
|
+
os.close(read_fd)
|
1082
|
+
if worker_status != b'ready':
|
1083
|
+
raise Exception(
|
1084
|
+
'worker %d did not start normally: %r' %
|
1085
|
+
(pid, worker_status))
|
1086
|
+
|
1087
|
+
# TODO: signal_ready() as soon as we have at least one new worker for
|
1088
|
+
# each port, instead of waiting for all of them
|
1089
|
+
strategy.signal_ready()
|
1090
|
+
|
1153
1091
|
# The strategy may need to pay attention to something in addition to
|
1154
1092
|
# child process exits (like new ports showing up in a ring).
|
1155
1093
|
#
|
@@ -1187,9 +1125,88 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
1187
1125
|
logger.error('Stopping with unexpected signal %r' %
|
1188
1126
|
running_context[1])
|
1189
1127
|
else:
|
1190
|
-
logger.
|
1128
|
+
logger.notice('%s received (%s)', signame, os.getpid())
|
1191
1129
|
if running_context[1] == signal.SIGTERM:
|
1130
|
+
systemd_notify(logger, "STOPPING=1")
|
1192
1131
|
os.killpg(0, signal.SIGTERM)
|
1132
|
+
elif running_context[1] == signal.SIGUSR1:
|
1133
|
+
systemd_notify(logger, "RELOADING=1")
|
1134
|
+
# set up a pipe, fork off a child to handle cleanup later,
|
1135
|
+
# and rexec ourselves with an environment variable set which will
|
1136
|
+
# indicate which fd (one of the pipe ends) to write a byte to
|
1137
|
+
# to indicate listen socket setup is complete. That will signal
|
1138
|
+
# the forked-off child to complete its listen socket shutdown.
|
1139
|
+
#
|
1140
|
+
# NOTE: all strategies will now require the parent process to retain
|
1141
|
+
# superuser privileges so that the re'execd process can bind a new
|
1142
|
+
# socket to the configured IP & port(s). We can't just reuse existing
|
1143
|
+
# listen sockets because then the bind IP couldn't be changed.
|
1144
|
+
#
|
1145
|
+
# NOTE: we need to set all our listen sockets close-on-exec so the only
|
1146
|
+
# open reference to those file descriptors will be in the forked-off
|
1147
|
+
# child here who waits to shutdown the old server's listen sockets. If
|
1148
|
+
# the re-exec'ed server's old listen sockets aren't closed-on-exec,
|
1149
|
+
# then the old server can't actually ever exit.
|
1150
|
+
strategy.set_close_on_exec_on_listen_sockets()
|
1151
|
+
read_fd, write_fd = os.pipe()
|
1152
|
+
state_rfd, state_wfd = os.pipe()
|
1153
|
+
orig_server_pid = os.getpid()
|
1154
|
+
child_pid = os.fork()
|
1155
|
+
if child_pid:
|
1156
|
+
# parent; set env var for fds and reexec ourselves
|
1157
|
+
os.close(read_fd)
|
1158
|
+
os.close(state_wfd)
|
1159
|
+
os.putenv(NOTIFY_FD_ENV_KEY, str(write_fd))
|
1160
|
+
os.putenv(CHILD_STATE_FD_ENV_KEY, str(state_rfd))
|
1161
|
+
myself = os.path.realpath(sys.argv[0])
|
1162
|
+
logger.info("Old server PID=%d re'execing as: %r",
|
1163
|
+
orig_server_pid, [myself] + list(sys.argv))
|
1164
|
+
if hasattr(os, 'set_inheritable'):
|
1165
|
+
# See https://www.python.org/dev/peps/pep-0446/
|
1166
|
+
os.set_inheritable(write_fd, True)
|
1167
|
+
os.set_inheritable(state_rfd, True)
|
1168
|
+
os.execv(myself, sys.argv) # nosec B606
|
1169
|
+
logger.error('Somehow lived past os.execv()?!')
|
1170
|
+
exit('Somehow lived past os.execv()?!')
|
1171
|
+
elif child_pid == 0:
|
1172
|
+
# child
|
1173
|
+
os.close(write_fd)
|
1174
|
+
os.close(state_rfd)
|
1175
|
+
logger.info('Old server temporary child PID=%d waiting for '
|
1176
|
+
"re-exec'ed PID=%d to signal readiness...",
|
1177
|
+
os.getpid(), orig_server_pid)
|
1178
|
+
try:
|
1179
|
+
got_pid = os.read(read_fd, 30)
|
1180
|
+
except Exception:
|
1181
|
+
logger.warning('Unexpected exception while reading from '
|
1182
|
+
'pipe:', exc_info=True)
|
1183
|
+
else:
|
1184
|
+
got_pid = got_pid.decode('ascii')
|
1185
|
+
if got_pid:
|
1186
|
+
logger.info('Old server temporary child PID=%d notified '
|
1187
|
+
'to shutdown old listen sockets by PID=%s',
|
1188
|
+
os.getpid(), got_pid)
|
1189
|
+
# Ensure new process knows about old children
|
1190
|
+
stale_pids = dict(strategy.reload_pids)
|
1191
|
+
stale_pids[os.getpid()] = now = time.time()
|
1192
|
+
stale_pids.update({
|
1193
|
+
pid: now for pid in strategy.get_worker_pids()})
|
1194
|
+
data = json.dumps({
|
1195
|
+
"old_pids": stale_pids,
|
1196
|
+
}).encode('ascii')
|
1197
|
+
os.write(state_wfd, struct.pack('!I', len(data)) + data)
|
1198
|
+
os.close(state_wfd)
|
1199
|
+
else:
|
1200
|
+
logger.warning('Old server temporary child PID=%d *NOT* '
|
1201
|
+
'notified to shutdown old listen sockets; '
|
1202
|
+
'the pipe just *died*.', os.getpid())
|
1203
|
+
try:
|
1204
|
+
os.close(read_fd)
|
1205
|
+
except Exception:
|
1206
|
+
pass
|
1207
|
+
else:
|
1208
|
+
# SIGHUP or, less likely, run in "once" mode
|
1209
|
+
systemd_notify(logger, "STOPPING=1")
|
1193
1210
|
|
1194
1211
|
strategy.shutdown_sockets()
|
1195
1212
|
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
@@ -1261,7 +1278,8 @@ class WSGIContext(object):
|
|
1261
1278
|
Uses the same semantics as the usual WSGI start_response.
|
1262
1279
|
"""
|
1263
1280
|
self._response_status = status
|
1264
|
-
self._response_headers =
|
1281
|
+
self._response_headers = \
|
1282
|
+
headers if isinstance(headers, list) else list(headers)
|
1265
1283
|
self._response_exc_info = exc_info
|
1266
1284
|
|
1267
1285
|
def _app_call(self, env):
|