swift 2.32.0__py2.py3-none-any.whl → 2.34.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/account/auditor.py +11 -0
- swift/account/reaper.py +11 -1
- swift/account/replicator.py +22 -0
- swift/account/server.py +13 -12
- swift-2.32.0.data/scripts/swift-account-audit → swift/cli/account_audit.py +6 -2
- swift-2.32.0.data/scripts/swift-config → swift/cli/config.py +1 -1
- swift-2.32.0.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +6 -2
- swift-2.32.0.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +12 -3
- swift-2.32.0.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +6 -2
- swift/cli/info.py +131 -3
- swift-2.32.0.data/scripts/swift-oldies → swift/cli/oldies.py +6 -3
- swift-2.32.0.data/scripts/swift-orphans → swift/cli/orphans.py +7 -2
- swift-2.32.0.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +9 -18
- swift-2.32.0.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +1 -1
- swift/cli/reload.py +141 -0
- swift/cli/ringbuilder.py +24 -0
- swift/common/daemon.py +12 -2
- swift/common/db.py +14 -9
- swift/common/db_auditor.py +2 -2
- swift/common/db_replicator.py +6 -0
- swift/common/exceptions.py +12 -0
- swift/common/http_protocol.py +76 -3
- swift/common/manager.py +120 -5
- swift/common/memcached.py +24 -25
- swift/common/middleware/account_quotas.py +144 -43
- swift/common/middleware/backend_ratelimit.py +166 -24
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +3 -5
- swift/common/middleware/container_sync.py +6 -10
- swift/common/middleware/crypto/crypto_utils.py +4 -5
- swift/common/middleware/crypto/decrypter.py +4 -5
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/proxy_logging.py +57 -43
- swift/common/middleware/ratelimit.py +6 -7
- swift/common/middleware/recon.py +6 -7
- swift/common/middleware/s3api/acl_handlers.py +10 -1
- swift/common/middleware/s3api/controllers/__init__.py +3 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_upload.py +31 -15
- swift/common/middleware/s3api/controllers/obj.py +20 -1
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/s3api.py +6 -0
- swift/common/middleware/s3api/s3request.py +190 -74
- swift/common/middleware/s3api/s3response.py +48 -8
- swift/common/middleware/s3api/s3token.py +2 -2
- swift/common/middleware/s3api/utils.py +2 -1
- swift/common/middleware/slo.py +508 -310
- swift/common/middleware/staticweb.py +45 -14
- swift/common/middleware/tempauth.py +6 -4
- swift/common/middleware/tempurl.py +134 -93
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +9 -10
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +1 -2
- swift/common/request_helpers.py +101 -8
- swift/common/statsd_client.py +207 -0
- swift/common/storage_policy.py +1 -1
- swift/common/swob.py +5 -2
- swift/common/utils/__init__.py +331 -1774
- swift/common/utils/base.py +138 -0
- swift/common/utils/config.py +443 -0
- swift/common/utils/logs.py +999 -0
- swift/common/utils/timestamp.py +23 -2
- swift/common/wsgi.py +19 -3
- swift/container/auditor.py +11 -0
- swift/container/backend.py +136 -31
- swift/container/reconciler.py +11 -2
- swift/container/replicator.py +64 -7
- swift/container/server.py +276 -146
- swift/container/sharder.py +86 -42
- swift/container/sync.py +11 -1
- swift/container/updater.py +12 -2
- swift/obj/auditor.py +20 -3
- swift/obj/diskfile.py +63 -25
- swift/obj/expirer.py +154 -47
- swift/obj/mem_diskfile.py +2 -1
- swift/obj/mem_server.py +1 -0
- swift/obj/reconstructor.py +28 -4
- swift/obj/replicator.py +63 -24
- swift/obj/server.py +76 -59
- swift/obj/updater.py +12 -2
- swift/obj/watchers/dark_data.py +72 -34
- swift/proxy/controllers/account.py +3 -2
- swift/proxy/controllers/base.py +254 -148
- swift/proxy/controllers/container.py +274 -289
- swift/proxy/controllers/obj.py +120 -166
- swift/proxy/server.py +17 -13
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/AUTHORS +14 -4
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/METADATA +9 -7
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/RECORD +97 -120
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/entry_points.txt +39 -0
- swift-2.34.0.dist-info/pbr.json +1 -0
- swift-2.32.0.data/scripts/swift-account-auditor +0 -23
- swift-2.32.0.data/scripts/swift-account-info +0 -52
- swift-2.32.0.data/scripts/swift-account-reaper +0 -23
- swift-2.32.0.data/scripts/swift-account-replicator +0 -34
- swift-2.32.0.data/scripts/swift-account-server +0 -23
- swift-2.32.0.data/scripts/swift-container-auditor +0 -23
- swift-2.32.0.data/scripts/swift-container-info +0 -56
- swift-2.32.0.data/scripts/swift-container-reconciler +0 -21
- swift-2.32.0.data/scripts/swift-container-replicator +0 -34
- swift-2.32.0.data/scripts/swift-container-server +0 -23
- swift-2.32.0.data/scripts/swift-container-sharder +0 -37
- swift-2.32.0.data/scripts/swift-container-sync +0 -23
- swift-2.32.0.data/scripts/swift-container-updater +0 -23
- swift-2.32.0.data/scripts/swift-dispersion-report +0 -24
- swift-2.32.0.data/scripts/swift-form-signature +0 -20
- swift-2.32.0.data/scripts/swift-init +0 -119
- swift-2.32.0.data/scripts/swift-object-auditor +0 -29
- swift-2.32.0.data/scripts/swift-object-expirer +0 -33
- swift-2.32.0.data/scripts/swift-object-info +0 -60
- swift-2.32.0.data/scripts/swift-object-reconstructor +0 -33
- swift-2.32.0.data/scripts/swift-object-relinker +0 -23
- swift-2.32.0.data/scripts/swift-object-replicator +0 -37
- swift-2.32.0.data/scripts/swift-object-server +0 -27
- swift-2.32.0.data/scripts/swift-object-updater +0 -23
- swift-2.32.0.data/scripts/swift-proxy-server +0 -23
- swift-2.32.0.data/scripts/swift-recon +0 -24
- swift-2.32.0.data/scripts/swift-ring-builder +0 -37
- swift-2.32.0.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.32.0.data/scripts/swift-ring-composer +0 -22
- swift-2.32.0.dist-info/pbr.json +0 -1
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/LICENSE +0 -0
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/WHEEL +0 -0
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/top_level.txt +0 -0
swift/proxy/controllers/base.py
CHANGED
@@ -39,12 +39,13 @@ from sys import exc_info
|
|
39
39
|
from eventlet.timeout import Timeout
|
40
40
|
import six
|
41
41
|
|
42
|
+
from swift.common.memcached import MemcacheConnectionError
|
42
43
|
from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request
|
43
44
|
from swift.common.utils import Timestamp, WatchdogTimeout, config_true_value, \
|
44
45
|
public, split_path, list_from_csv, GreenthreadSafeIterator, \
|
45
46
|
GreenAsyncPile, quorum_size, parse_content_type, drain_and_close, \
|
46
|
-
document_iters_to_http_response_body,
|
47
|
-
|
47
|
+
document_iters_to_http_response_body, cache_from_env, \
|
48
|
+
CooperativeIterator, NamespaceBoundList, Namespace, ClosingMapper
|
48
49
|
from swift.common.bufferedhttp import http_connect
|
49
50
|
from swift.common import constraints
|
50
51
|
from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \
|
@@ -423,9 +424,9 @@ def _record_ac_info_cache_metrics(
|
|
423
424
|
logger = None
|
424
425
|
else:
|
425
426
|
logger = proxy_app.logger
|
426
|
-
|
427
|
+
server_type = 'container' if container else 'account'
|
427
428
|
if logger:
|
428
|
-
record_cache_op_metrics(logger,
|
429
|
+
record_cache_op_metrics(logger, server_type, 'info', cache_state, resp)
|
429
430
|
|
430
431
|
|
431
432
|
def get_container_info(env, app, swift_source=None, cache_only=False):
|
@@ -479,8 +480,7 @@ def get_container_info(env, app, swift_source=None, cache_only=False):
|
|
479
480
|
# account is successful whether the account actually has .db files
|
480
481
|
# on disk or not.
|
481
482
|
is_autocreate_account = account.startswith(
|
482
|
-
|
483
|
-
constraints.AUTO_CREATE_ACCOUNT_PREFIX))
|
483
|
+
constraints.AUTO_CREATE_ACCOUNT_PREFIX)
|
484
484
|
if not is_autocreate_account:
|
485
485
|
account_info = get_account_info(env, logged_app, swift_source)
|
486
486
|
if not account_info or not is_success(account_info['status']):
|
@@ -774,11 +774,12 @@ def _get_info_from_infocache(env, account, container=None):
|
|
774
774
|
|
775
775
|
|
776
776
|
def record_cache_op_metrics(
|
777
|
-
logger, op_type, cache_state, resp=None):
|
777
|
+
logger, server_type, op_type, cache_state, resp=None):
|
778
778
|
"""
|
779
779
|
Record a single cache operation into its corresponding metrics.
|
780
780
|
|
781
781
|
:param logger: the metrics logger
|
782
|
+
:param server_type: 'account' or 'container'
|
782
783
|
:param op_type: the name of the operation type, includes 'shard_listing',
|
783
784
|
'shard_updating', and etc.
|
784
785
|
:param cache_state: the state of this cache operation. When it's
|
@@ -787,21 +788,23 @@ def record_cache_op_metrics(
|
|
787
788
|
which will make to backend, expect a valid 'resp'.
|
788
789
|
:param resp: the response from backend for all cases except cache hits.
|
789
790
|
"""
|
791
|
+
server_type = server_type.lower()
|
790
792
|
if cache_state == 'infocache_hit':
|
791
|
-
logger.increment('%s.infocache.hit' % op_type)
|
793
|
+
logger.increment('%s.%s.infocache.hit' % (server_type, op_type))
|
792
794
|
elif cache_state == 'hit':
|
793
795
|
# memcache hits.
|
794
|
-
logger.increment('%s.cache.hit' % op_type)
|
796
|
+
logger.increment('%s.%s.cache.hit' % (server_type, op_type))
|
795
797
|
else:
|
796
798
|
# the cases of cache_state is memcache miss, error, skip, force_skip
|
797
799
|
# or disabled.
|
798
800
|
if resp:
|
799
|
-
logger.increment(
|
800
|
-
|
801
|
+
logger.increment('%s.%s.cache.%s.%d' % (
|
802
|
+
server_type, op_type, cache_state, resp.status_int))
|
801
803
|
else:
|
802
804
|
# In some situation, we choose not to lookup backend after cache
|
803
805
|
# miss.
|
804
|
-
logger.increment('%s.cache.%s' % (
|
806
|
+
logger.increment('%s.%s.cache.%s' % (
|
807
|
+
server_type, op_type, cache_state))
|
805
808
|
|
806
809
|
|
807
810
|
def _get_info_from_memcache(app, env, account, container=None):
|
@@ -886,6 +889,77 @@ def _get_info_from_caches(app, env, account, container=None):
|
|
886
889
|
return info, cache_state
|
887
890
|
|
888
891
|
|
892
|
+
def get_namespaces_from_cache(req, cache_key, skip_chance):
|
893
|
+
"""
|
894
|
+
Get cached namespaces from infocache or memcache.
|
895
|
+
|
896
|
+
:param req: a :class:`swift.common.swob.Request` object.
|
897
|
+
:param cache_key: the cache key for both infocache and memcache.
|
898
|
+
:param skip_chance: the probability of skipping the memcache look-up.
|
899
|
+
:return: a tuple of (value, cache state). Value is an instance of
|
900
|
+
:class:`swift.common.utils.NamespaceBoundList` if a non-empty list is
|
901
|
+
found in memcache. Otherwise value is ``None``, for example if memcache
|
902
|
+
look-up was skipped, or no value was found, or an empty list was found.
|
903
|
+
"""
|
904
|
+
# try get namespaces from infocache first
|
905
|
+
infocache = req.environ.setdefault('swift.infocache', {})
|
906
|
+
ns_bound_list = infocache.get(cache_key)
|
907
|
+
if ns_bound_list:
|
908
|
+
return ns_bound_list, 'infocache_hit'
|
909
|
+
|
910
|
+
# then try get them from memcache
|
911
|
+
memcache = cache_from_env(req.environ, True)
|
912
|
+
if not memcache:
|
913
|
+
return None, 'disabled'
|
914
|
+
if skip_chance and random.random() < skip_chance:
|
915
|
+
return None, 'skip'
|
916
|
+
try:
|
917
|
+
bounds = memcache.get(cache_key, raise_on_error=True)
|
918
|
+
cache_state = 'hit' if bounds else 'miss'
|
919
|
+
except MemcacheConnectionError:
|
920
|
+
bounds = None
|
921
|
+
cache_state = 'error'
|
922
|
+
|
923
|
+
if bounds:
|
924
|
+
if six.PY2:
|
925
|
+
# json.loads() in memcache.get will convert json 'string' to
|
926
|
+
# 'unicode' with python2, here we cast 'unicode' back to 'str'
|
927
|
+
bounds = [
|
928
|
+
[lower.encode('utf-8'), name.encode('utf-8')]
|
929
|
+
for lower, name in bounds]
|
930
|
+
ns_bound_list = NamespaceBoundList(bounds)
|
931
|
+
infocache[cache_key] = ns_bound_list
|
932
|
+
else:
|
933
|
+
ns_bound_list = None
|
934
|
+
return ns_bound_list, cache_state
|
935
|
+
|
936
|
+
|
937
|
+
def set_namespaces_in_cache(req, cache_key, ns_bound_list, time):
|
938
|
+
"""
|
939
|
+
Set a list of namespace bounds in infocache and memcache.
|
940
|
+
|
941
|
+
:param req: a :class:`swift.common.swob.Request` object.
|
942
|
+
:param cache_key: the cache key for both infocache and memcache.
|
943
|
+
:param ns_bound_list: a :class:`swift.common.utils.NamespaceBoundList`.
|
944
|
+
:param time: how long the namespaces should remain in memcache.
|
945
|
+
:return: the cache_state.
|
946
|
+
"""
|
947
|
+
infocache = req.environ.setdefault('swift.infocache', {})
|
948
|
+
infocache[cache_key] = ns_bound_list
|
949
|
+
memcache = cache_from_env(req.environ, True)
|
950
|
+
if memcache and ns_bound_list:
|
951
|
+
try:
|
952
|
+
memcache.set(cache_key, ns_bound_list.bounds, time=time,
|
953
|
+
raise_on_error=True)
|
954
|
+
except MemcacheConnectionError:
|
955
|
+
cache_state = 'set_error'
|
956
|
+
else:
|
957
|
+
cache_state = 'set'
|
958
|
+
else:
|
959
|
+
cache_state = 'disabled'
|
960
|
+
return cache_state
|
961
|
+
|
962
|
+
|
889
963
|
def _prepare_pre_auth_info_request(env, path, swift_source):
|
890
964
|
"""
|
891
965
|
Prepares a pre authed request to obtain info using a HEAD.
|
@@ -1033,6 +1107,18 @@ def is_good_source(status, server_type):
|
|
1033
1107
|
return is_success(status) or is_redirection(status)
|
1034
1108
|
|
1035
1109
|
|
1110
|
+
def is_useful_response(resp, node):
|
1111
|
+
if not resp:
|
1112
|
+
return False
|
1113
|
+
if ('handoff_index' in node
|
1114
|
+
and resp.status == 404
|
1115
|
+
and resp.headers.get('x-backend-timestamp') is None):
|
1116
|
+
# a 404 from a handoff are not considered authoritative unless they
|
1117
|
+
# have an x-backend-timestamp that indicates that there is a tombstone
|
1118
|
+
return False
|
1119
|
+
return True
|
1120
|
+
|
1121
|
+
|
1036
1122
|
class ByteCountEnforcer(object):
|
1037
1123
|
"""
|
1038
1124
|
Enforces that successive calls to file_like.read() give at least
|
@@ -1065,6 +1151,14 @@ class ByteCountEnforcer(object):
|
|
1065
1151
|
|
1066
1152
|
|
1067
1153
|
class GetterSource(object):
|
1154
|
+
"""
|
1155
|
+
Encapsulates properties of a source from which a GET response is read.
|
1156
|
+
|
1157
|
+
:param app: a proxy app.
|
1158
|
+
:param resp: an instance of ``HTTPResponse``.
|
1159
|
+
:param node: a dict describing the node from which the response was
|
1160
|
+
returned.
|
1161
|
+
"""
|
1068
1162
|
__slots__ = ('app', 'resp', 'node', '_parts_iter')
|
1069
1163
|
|
1070
1164
|
def __init__(self, app, resp, node):
|
@@ -1101,8 +1195,26 @@ class GetterSource(object):
|
|
1101
1195
|
|
1102
1196
|
|
1103
1197
|
class GetterBase(object):
|
1198
|
+
"""
|
1199
|
+
This base class provides helper methods for handling GET requests to
|
1200
|
+
backend servers.
|
1201
|
+
|
1202
|
+
:param app: a proxy app.
|
1203
|
+
:param req: an instance of ``swob.Request``.
|
1204
|
+
:param node_iter: an iterator yielding nodes.
|
1205
|
+
:param partition: partition.
|
1206
|
+
:param policy: the policy instance, or None if Account or Container.
|
1207
|
+
:param path: path for the request.
|
1208
|
+
:param backend_headers: a dict of headers to be sent with backend requests.
|
1209
|
+
:param node_timeout: the timeout value for backend requests.
|
1210
|
+
:param resource_type: a string description of the type of resource being
|
1211
|
+
accessed; ``resource type`` is used in logs and isn't necessarily the
|
1212
|
+
server type.
|
1213
|
+
:param logger: a logger instance.
|
1214
|
+
"""
|
1104
1215
|
def __init__(self, app, req, node_iter, partition, policy,
|
1105
|
-
path, backend_headers,
|
1216
|
+
path, backend_headers, node_timeout, resource_type,
|
1217
|
+
logger=None):
|
1106
1218
|
self.app = app
|
1107
1219
|
self.req = req
|
1108
1220
|
self.node_iter = node_iter
|
@@ -1110,6 +1222,9 @@ class GetterBase(object):
|
|
1110
1222
|
self.policy = policy
|
1111
1223
|
self.path = path
|
1112
1224
|
self.backend_headers = backend_headers
|
1225
|
+
# resource type is used in logs and isn't necessarily the server type
|
1226
|
+
self.resource_type = resource_type
|
1227
|
+
self.node_timeout = node_timeout
|
1113
1228
|
self.logger = logger or app.logger
|
1114
1229
|
self.bytes_used_from_backend = 0
|
1115
1230
|
self.source = None
|
@@ -1122,18 +1237,44 @@ class GetterBase(object):
|
|
1122
1237
|
:return: ``True`` if ``self.source`` has been updated, ``False``
|
1123
1238
|
otherwise.
|
1124
1239
|
"""
|
1125
|
-
# Subclasses must implement this method
|
1240
|
+
# Subclasses must implement this method, but _replace_source should be
|
1241
|
+
# called to get a source installed
|
1126
1242
|
raise NotImplementedError()
|
1127
1243
|
|
1128
|
-
def _replace_source(self, err_msg):
|
1129
|
-
|
1130
|
-
|
1131
|
-
|
1132
|
-
|
1244
|
+
def _replace_source(self, err_msg=''):
|
1245
|
+
if self.source:
|
1246
|
+
self.app.error_occurred(self.source.node, err_msg)
|
1247
|
+
self.source.close()
|
1248
|
+
return self._find_source()
|
1133
1249
|
|
1134
|
-
|
1135
|
-
|
1136
|
-
|
1250
|
+
def _get_next_response_part(self):
|
1251
|
+
# return the next part of the response body; there may only be one part
|
1252
|
+
# unless it's a multipart/byteranges response
|
1253
|
+
while True:
|
1254
|
+
# the loop here is to resume if trying to parse
|
1255
|
+
# multipart/byteranges response raises a ChunkReadTimeout
|
1256
|
+
# and resets the source_parts_iter
|
1257
|
+
try:
|
1258
|
+
with WatchdogTimeout(self.app.watchdog, self.node_timeout,
|
1259
|
+
ChunkReadTimeout):
|
1260
|
+
# If we don't have a multipart/byteranges response,
|
1261
|
+
# but just a 200 or a single-range 206, then this
|
1262
|
+
# performs no IO, and either just returns source or
|
1263
|
+
# raises StopIteration.
|
1264
|
+
# Otherwise, this call to next() performs IO when
|
1265
|
+
# we have a multipart/byteranges response, as it
|
1266
|
+
# will read the MIME boundary and part headers. In this
|
1267
|
+
# case, ChunkReadTimeout may also be raised.
|
1268
|
+
# If StopIteration is raised, it escapes and is
|
1269
|
+
# handled elsewhere.
|
1270
|
+
start_byte, end_byte, length, headers, part = next(
|
1271
|
+
self.source.parts_iter)
|
1272
|
+
return (start_byte, end_byte, length, headers, part)
|
1273
|
+
except ChunkReadTimeout:
|
1274
|
+
if not self._replace_source(
|
1275
|
+
'Trying to read next part of %s multi-part GET '
|
1276
|
+
'(retrying)' % self.resource_type):
|
1277
|
+
raise
|
1137
1278
|
|
1138
1279
|
def fast_forward(self, num_bytes):
|
1139
1280
|
"""
|
@@ -1240,32 +1381,43 @@ class GetterBase(object):
|
|
1240
1381
|
|
1241
1382
|
|
1242
1383
|
class GetOrHeadHandler(GetterBase):
|
1384
|
+
"""
|
1385
|
+
Handles GET requests to backend servers.
|
1386
|
+
|
1387
|
+
:param app: a proxy app.
|
1388
|
+
:param req: an instance of ``swob.Request``.
|
1389
|
+
:param server_type: server type used in logging
|
1390
|
+
:param node_iter: an iterator yielding nodes.
|
1391
|
+
:param partition: partition.
|
1392
|
+
:param path: path for the request.
|
1393
|
+
:param backend_headers: a dict of headers to be sent with backend requests.
|
1394
|
+
:param concurrency: number of requests to run concurrently.
|
1395
|
+
:param policy: the policy instance, or None if Account or Container.
|
1396
|
+
:param logger: a logger instance.
|
1397
|
+
"""
|
1243
1398
|
def __init__(self, app, req, server_type, node_iter, partition, path,
|
1244
|
-
backend_headers, concurrency=1, policy=None,
|
1245
|
-
|
1399
|
+
backend_headers, concurrency=1, policy=None, logger=None):
|
1400
|
+
newest = config_true_value(req.headers.get('x-newest', 'f'))
|
1401
|
+
if server_type == 'Object' and not newest:
|
1402
|
+
node_timeout = app.recoverable_node_timeout
|
1403
|
+
else:
|
1404
|
+
node_timeout = app.node_timeout
|
1246
1405
|
super(GetOrHeadHandler, self).__init__(
|
1247
|
-
app=app, req=req, node_iter=node_iter,
|
1248
|
-
|
1249
|
-
|
1406
|
+
app=app, req=req, node_iter=node_iter, partition=partition,
|
1407
|
+
policy=policy, path=path, backend_headers=backend_headers,
|
1408
|
+
node_timeout=node_timeout, resource_type=server_type.lower(),
|
1409
|
+
logger=logger)
|
1410
|
+
self.newest = newest
|
1250
1411
|
self.server_type = server_type
|
1251
1412
|
self.used_nodes = []
|
1252
1413
|
self.used_source_etag = None
|
1253
1414
|
self.concurrency = concurrency
|
1254
1415
|
self.latest_404_timestamp = Timestamp(0)
|
1255
|
-
if self.server_type == 'Object':
|
1256
|
-
self.node_timeout = self.app.recoverable_node_timeout
|
1257
|
-
else:
|
1258
|
-
self.node_timeout = self.app.node_timeout
|
1259
1416
|
policy_options = self.app.get_policy_options(self.policy)
|
1260
1417
|
self.rebalance_missing_suppression_count = min(
|
1261
1418
|
policy_options.rebalance_missing_suppression_count,
|
1262
1419
|
node_iter.num_primary_nodes - 1)
|
1263
1420
|
|
1264
|
-
if newest is None:
|
1265
|
-
self.newest = config_true_value(req.headers.get('x-newest', 'f'))
|
1266
|
-
else:
|
1267
|
-
self.newest = newest
|
1268
|
-
|
1269
1421
|
# populated when finding source
|
1270
1422
|
self.statuses = []
|
1271
1423
|
self.reasons = []
|
@@ -1276,31 +1428,6 @@ class GetOrHeadHandler(GetterBase):
|
|
1276
1428
|
# populated from response headers
|
1277
1429
|
self.start_byte = self.end_byte = self.length = None
|
1278
1430
|
|
1279
|
-
def _get_next_response_part(self):
|
1280
|
-
# return the next part of the response body; there may only be one part
|
1281
|
-
# unless it's a multipart/byteranges response
|
1282
|
-
while True:
|
1283
|
-
try:
|
1284
|
-
# This call to next() performs IO when we have a
|
1285
|
-
# multipart/byteranges response; it reads the MIME
|
1286
|
-
# boundary and part headers.
|
1287
|
-
#
|
1288
|
-
# If we don't have a multipart/byteranges response,
|
1289
|
-
# but just a 200 or a single-range 206, then this
|
1290
|
-
# performs no IO, and either just returns source or
|
1291
|
-
# raises StopIteration.
|
1292
|
-
with WatchdogTimeout(self.app.watchdog, self.node_timeout,
|
1293
|
-
ChunkReadTimeout):
|
1294
|
-
# if StopIteration is raised, it escapes and is
|
1295
|
-
# handled elsewhere
|
1296
|
-
start_byte, end_byte, length, headers, part = next(
|
1297
|
-
self.source.parts_iter)
|
1298
|
-
return (start_byte, end_byte, length, headers, part)
|
1299
|
-
except ChunkReadTimeout:
|
1300
|
-
if not self._replace_source(
|
1301
|
-
'Trying to read object during GET (retrying)'):
|
1302
|
-
raise StopIteration()
|
1303
|
-
|
1304
1431
|
def _iter_bytes_from_response_part(self, part_file, nbytes):
|
1305
1432
|
# yield chunks of bytes from a single response part; if an error
|
1306
1433
|
# occurs, try to resume yielding bytes from a different source
|
@@ -1345,7 +1472,7 @@ class GetOrHeadHandler(GetterBase):
|
|
1345
1472
|
self.bytes_used_from_backend += len(chunk)
|
1346
1473
|
yield chunk
|
1347
1474
|
|
1348
|
-
def _iter_parts_from_response(self
|
1475
|
+
def _iter_parts_from_response(self):
|
1349
1476
|
# iterate over potentially multiple response body parts; for each
|
1350
1477
|
# part, yield an iterator over the part's bytes
|
1351
1478
|
try:
|
@@ -1370,20 +1497,17 @@ class GetOrHeadHandler(GetterBase):
|
|
1370
1497
|
'part_iter': part_iter}
|
1371
1498
|
self.pop_range()
|
1372
1499
|
except StopIteration:
|
1373
|
-
req.environ['swift.non_client_disconnect'] = True
|
1500
|
+
self.req.environ['swift.non_client_disconnect'] = True
|
1374
1501
|
finally:
|
1375
1502
|
if part_iter:
|
1376
1503
|
part_iter.close()
|
1377
1504
|
|
1378
|
-
except ChunkReadTimeout:
|
1379
|
-
self.app.exception_occurred(self.source.node, 'Object',
|
1380
|
-
'Trying to read during GET')
|
1381
|
-
raise
|
1382
1505
|
except ChunkWriteTimeout:
|
1383
1506
|
self.logger.info(
|
1384
1507
|
'Client did not read from proxy within %ss',
|
1385
1508
|
self.app.client_timeout)
|
1386
|
-
self.logger.increment('client_timeouts'
|
1509
|
+
self.logger.increment('%s.client_timeouts' %
|
1510
|
+
self.server_type.lower())
|
1387
1511
|
except GeneratorExit:
|
1388
1512
|
warn = True
|
1389
1513
|
req_range = self.backend_headers['Range']
|
@@ -1394,7 +1518,8 @@ class GetOrHeadHandler(GetterBase):
|
|
1394
1518
|
if end is not None and begin is not None:
|
1395
1519
|
if end - begin + 1 == self.bytes_used_from_backend:
|
1396
1520
|
warn = False
|
1397
|
-
if
|
1521
|
+
if (warn and
|
1522
|
+
not self.req.environ.get('swift.non_client_disconnect')):
|
1398
1523
|
self.logger.info('Client disconnected on read of %r',
|
1399
1524
|
self.path)
|
1400
1525
|
raise
|
@@ -1418,7 +1543,7 @@ class GetOrHeadHandler(GetterBase):
|
|
1418
1543
|
else:
|
1419
1544
|
return None
|
1420
1545
|
|
1421
|
-
def _make_node_request(self, node,
|
1546
|
+
def _make_node_request(self, node, logger_thread_locals):
|
1422
1547
|
# make a backend request; return True if the response is deemed good
|
1423
1548
|
# (has an acceptable status code), useful (matches any previously
|
1424
1549
|
# discovered etag) and sufficient (a single good response is
|
@@ -1426,6 +1551,7 @@ class GetOrHeadHandler(GetterBase):
|
|
1426
1551
|
self.logger.thread_locals = logger_thread_locals
|
1427
1552
|
if node in self.used_nodes:
|
1428
1553
|
return False
|
1554
|
+
|
1429
1555
|
req_headers = dict(self.backend_headers)
|
1430
1556
|
ip, port = get_ip_port(node, req_headers)
|
1431
1557
|
start_node_timing = time.time()
|
@@ -1438,7 +1564,7 @@ class GetOrHeadHandler(GetterBase):
|
|
1438
1564
|
query_string=self.req.query_string)
|
1439
1565
|
self.app.set_node_timing(node, time.time() - start_node_timing)
|
1440
1566
|
|
1441
|
-
with Timeout(node_timeout):
|
1567
|
+
with Timeout(self.node_timeout):
|
1442
1568
|
possible_source = conn.getresponse()
|
1443
1569
|
# See NOTE: swift_conn at top of file about this.
|
1444
1570
|
possible_source.swift_conn = conn
|
@@ -1530,14 +1656,10 @@ class GetOrHeadHandler(GetterBase):
|
|
1530
1656
|
|
1531
1657
|
nodes = GreenthreadSafeIterator(self.node_iter)
|
1532
1658
|
|
1533
|
-
node_timeout = self.app.node_timeout
|
1534
|
-
if self.server_type == 'Object' and not self.newest:
|
1535
|
-
node_timeout = self.app.recoverable_node_timeout
|
1536
|
-
|
1537
1659
|
pile = GreenAsyncPile(self.concurrency)
|
1538
1660
|
|
1539
1661
|
for node in nodes:
|
1540
|
-
pile.spawn(self._make_node_request, node,
|
1662
|
+
pile.spawn(self._make_node_request, node,
|
1541
1663
|
self.logger.thread_locals)
|
1542
1664
|
_timeout = self.app.get_policy_options(
|
1543
1665
|
self.policy).concurrency_timeout \
|
@@ -1573,13 +1695,12 @@ class GetOrHeadHandler(GetterBase):
|
|
1573
1695
|
return True
|
1574
1696
|
return False
|
1575
1697
|
|
1576
|
-
def _make_app_iter(self
|
1698
|
+
def _make_app_iter(self):
|
1577
1699
|
"""
|
1578
1700
|
Returns an iterator over the contents of the source (via its read
|
1579
1701
|
func). There is also quite a bit of cleanup to ensure garbage
|
1580
1702
|
collection works and the underlying socket of the source is closed.
|
1581
1703
|
|
1582
|
-
:param req: incoming request object
|
1583
1704
|
:return: an iterator that yields chunks of response body bytes
|
1584
1705
|
"""
|
1585
1706
|
|
@@ -1596,7 +1717,7 @@ class GetOrHeadHandler(GetterBase):
|
|
1596
1717
|
# furnished one for us, so we'll just re-use it
|
1597
1718
|
boundary = dict(content_type_attrs)["boundary"]
|
1598
1719
|
|
1599
|
-
parts_iter = self._iter_parts_from_response(
|
1720
|
+
parts_iter = self._iter_parts_from_response()
|
1600
1721
|
|
1601
1722
|
def add_content_type(response_part):
|
1602
1723
|
response_part["content_type"] = \
|
@@ -1604,18 +1725,18 @@ class GetOrHeadHandler(GetterBase):
|
|
1604
1725
|
return response_part
|
1605
1726
|
|
1606
1727
|
return document_iters_to_http_response_body(
|
1607
|
-
(add_content_type
|
1728
|
+
ClosingMapper(add_content_type, parts_iter),
|
1608
1729
|
boundary, is_multipart, self.logger)
|
1609
1730
|
|
1610
|
-
def get_working_response(self
|
1731
|
+
def get_working_response(self):
|
1611
1732
|
res = None
|
1612
|
-
if self.
|
1613
|
-
res = Response(request=req)
|
1733
|
+
if self._replace_source():
|
1734
|
+
res = Response(request=self.req)
|
1614
1735
|
res.status = self.source.resp.status
|
1615
1736
|
update_headers(res, self.source.resp.getheaders())
|
1616
|
-
if req.method == 'GET' and \
|
1737
|
+
if self.req.method == 'GET' and \
|
1617
1738
|
self.source.resp.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
|
1618
|
-
res.app_iter = self._make_app_iter(
|
1739
|
+
res.app_iter = self._make_app_iter()
|
1619
1740
|
# See NOTE: swift_conn at top of file about this.
|
1620
1741
|
res.swift_conn = self.source.resp.swift_conn
|
1621
1742
|
if not res.environ:
|
@@ -1644,6 +1765,7 @@ class NodeIter(object):
|
|
1644
1765
|
may not, depending on how logging is configured, the vagaries of
|
1645
1766
|
socket IO and eventlet, and the phase of the moon.)
|
1646
1767
|
|
1768
|
+
:param server_type: one of 'account', 'container', or 'object'
|
1647
1769
|
:param app: a proxy app
|
1648
1770
|
:param ring: ring to get yield nodes from
|
1649
1771
|
:param partition: ring partition to yield nodes for
|
@@ -1656,8 +1778,9 @@ class NodeIter(object):
|
|
1656
1778
|
None for an account or container ring.
|
1657
1779
|
"""
|
1658
1780
|
|
1659
|
-
def __init__(self, app, ring, partition, logger, request,
|
1660
|
-
policy=None):
|
1781
|
+
def __init__(self, server_type, app, ring, partition, logger, request,
|
1782
|
+
node_iter=None, policy=None):
|
1783
|
+
self.server_type = server_type
|
1661
1784
|
self.app = app
|
1662
1785
|
self.ring = ring
|
1663
1786
|
self.partition = partition
|
@@ -1704,12 +1827,14 @@ class NodeIter(object):
|
|
1704
1827
|
return
|
1705
1828
|
extra_handoffs = handoffs - self.expected_handoffs
|
1706
1829
|
if extra_handoffs > 0:
|
1707
|
-
self.logger.increment('handoff_count'
|
1830
|
+
self.logger.increment('%s.handoff_count' %
|
1831
|
+
self.server_type.lower())
|
1708
1832
|
self.logger.warning(
|
1709
1833
|
'Handoff requested (%d)' % handoffs)
|
1710
1834
|
if (extra_handoffs == self.num_primary_nodes):
|
1711
1835
|
# all the primaries were skipped, and handoffs didn't help
|
1712
|
-
self.logger.increment('handoff_all_count'
|
1836
|
+
self.logger.increment('%s.handoff_all_count' %
|
1837
|
+
self.server_type.lower())
|
1713
1838
|
|
1714
1839
|
def set_node_provider(self, callback):
|
1715
1840
|
"""
|
@@ -1786,9 +1911,10 @@ class Controller(object):
|
|
1786
1911
|
self.trans_id = '-'
|
1787
1912
|
self._allowed_methods = None
|
1788
1913
|
self._private_methods = None
|
1789
|
-
|
1790
|
-
|
1791
|
-
|
1914
|
+
|
1915
|
+
@property
|
1916
|
+
def logger(self):
|
1917
|
+
return self.app.logger
|
1792
1918
|
|
1793
1919
|
@property
|
1794
1920
|
def allowed_methods(self):
|
@@ -1842,7 +1968,7 @@ class Controller(object):
|
|
1842
1968
|
def generate_request_headers(self, orig_req=None, additional=None,
|
1843
1969
|
transfer=False):
|
1844
1970
|
"""
|
1845
|
-
Create a
|
1971
|
+
Create a dict of headers to be used in backend requests
|
1846
1972
|
|
1847
1973
|
:param orig_req: the original request sent by the client to the proxy
|
1848
1974
|
:param additional: additional headers to send to the backend
|
@@ -1974,14 +2100,14 @@ class Controller(object):
|
|
1974
2100
|
if (self.app.check_response(node, self.server_type, resp,
|
1975
2101
|
method, path)
|
1976
2102
|
and not is_informational(resp.status)):
|
1977
|
-
return resp
|
1978
|
-
resp.read()
|
2103
|
+
return resp, resp.read(), node
|
1979
2104
|
|
1980
2105
|
except (Exception, Timeout):
|
1981
2106
|
self.app.exception_occurred(
|
1982
2107
|
node, self.server_type,
|
1983
2108
|
'Trying to %(method)s %(path)s' %
|
1984
2109
|
{'method': method, 'path': path})
|
2110
|
+
return None, None, None
|
1985
2111
|
|
1986
2112
|
def make_requests(self, req, ring, part, method, path, headers,
|
1987
2113
|
query_string='', overrides=None, node_count=None,
|
@@ -2004,36 +2130,37 @@ class Controller(object):
|
|
2004
2130
|
the returned status of a request.
|
2005
2131
|
:param node_count: optional number of nodes to send request to.
|
2006
2132
|
:param node_iterator: optional node iterator.
|
2133
|
+
:param body: byte string to use as the request body.
|
2134
|
+
Try to keep it small.
|
2007
2135
|
:returns: a swob.Response object
|
2008
2136
|
"""
|
2009
|
-
nodes = GreenthreadSafeIterator(
|
2010
|
-
|
2011
|
-
)
|
2137
|
+
nodes = GreenthreadSafeIterator(node_iterator or NodeIter(
|
2138
|
+
self.server_type.lower(), self.app, ring, part, self.logger, req))
|
2012
2139
|
node_number = node_count or len(ring.get_part_nodes(part))
|
2013
2140
|
pile = GreenAsyncPile(node_number)
|
2014
2141
|
|
2015
2142
|
for head in headers:
|
2016
2143
|
pile.spawn(self._make_request, nodes, part, method, path,
|
2017
2144
|
head, query_string, body, self.logger.thread_locals)
|
2018
|
-
|
2145
|
+
results = []
|
2019
2146
|
statuses = []
|
2020
|
-
for resp in pile:
|
2021
|
-
if not resp:
|
2147
|
+
for resp, body, node in pile:
|
2148
|
+
if not is_useful_response(resp, node):
|
2022
2149
|
continue
|
2023
|
-
|
2024
|
-
statuses.append(resp
|
2150
|
+
results.append((resp.status, resp.reason, resp.getheaders(), body))
|
2151
|
+
statuses.append(resp.status)
|
2025
2152
|
if self.have_quorum(statuses, node_number):
|
2026
2153
|
break
|
2027
2154
|
# give any pending requests *some* chance to finish
|
2028
2155
|
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
|
2029
|
-
for resp in finished_quickly:
|
2030
|
-
if not resp:
|
2156
|
+
for resp, body, node in finished_quickly:
|
2157
|
+
if not is_useful_response(resp, node):
|
2031
2158
|
continue
|
2032
|
-
|
2033
|
-
statuses.append(resp
|
2034
|
-
while len(
|
2035
|
-
|
2036
|
-
statuses, reasons, resp_headers, bodies = zip(*
|
2159
|
+
results.append((resp.status, resp.reason, resp.getheaders(), body))
|
2160
|
+
statuses.append(resp.status)
|
2161
|
+
while len(results) < node_number:
|
2162
|
+
results.append((HTTP_SERVICE_UNAVAILABLE, '', '', b''))
|
2163
|
+
statuses, reasons, resp_headers, bodies = zip(*results)
|
2037
2164
|
return self.best_response(req, statuses, reasons, bodies,
|
2038
2165
|
'%s %s' % (self.server_type, req.method),
|
2039
2166
|
overrides=overrides, headers=resp_headers)
|
@@ -2209,7 +2336,7 @@ class Controller(object):
|
|
2209
2336
|
partition, path, backend_headers,
|
2210
2337
|
concurrency, policy=policy,
|
2211
2338
|
logger=self.logger)
|
2212
|
-
res = handler.get_working_response(
|
2339
|
+
res = handler.get_working_response()
|
2213
2340
|
|
2214
2341
|
if not res:
|
2215
2342
|
res = self.best_response(
|
@@ -2339,9 +2466,10 @@ class Controller(object):
|
|
2339
2466
|
|
2340
2467
|
def _parse_listing_response(self, req, response):
|
2341
2468
|
if not is_success(response.status_int):
|
2469
|
+
record_type = req.headers.get('X-Backend-Record-Type')
|
2342
2470
|
self.logger.warning(
|
2343
|
-
'Failed to get container listing from %s: %s',
|
2344
|
-
req.path_qs, response.status_int)
|
2471
|
+
'Failed to get container %s listing from %s: %s',
|
2472
|
+
record_type, req.path_qs, response.status_int)
|
2345
2473
|
return None
|
2346
2474
|
|
2347
2475
|
try:
|
@@ -2350,9 +2478,10 @@ class Controller(object):
|
|
2350
2478
|
raise ValueError('not a list')
|
2351
2479
|
return data
|
2352
2480
|
except ValueError as err:
|
2481
|
+
record_type = response.headers.get('X-Backend-Record-Type')
|
2353
2482
|
self.logger.error(
|
2354
|
-
'Problem with listing response from %s: %r',
|
2355
|
-
req.path_qs, err)
|
2483
|
+
'Problem with container %s listing response from %s: %r',
|
2484
|
+
record_type, req.path_qs, err)
|
2356
2485
|
return None
|
2357
2486
|
|
2358
2487
|
def _get_container_listing(self, req, account, container, headers=None,
|
@@ -2382,10 +2511,10 @@ class Controller(object):
|
|
2382
2511
|
self.logger.debug(
|
2383
2512
|
'Get listing from %s %s' % (subreq.path_qs, headers))
|
2384
2513
|
response = self.app.handle_request(subreq)
|
2385
|
-
data = self._parse_listing_response(
|
2514
|
+
data = self._parse_listing_response(subreq, response)
|
2386
2515
|
return data, response
|
2387
2516
|
|
2388
|
-
def
|
2517
|
+
def _parse_namespaces(self, req, listing, response):
|
2389
2518
|
if listing is None:
|
2390
2519
|
return None
|
2391
2520
|
|
@@ -2397,38 +2526,15 @@ class Controller(object):
|
|
2397
2526
|
return None
|
2398
2527
|
|
2399
2528
|
try:
|
2400
|
-
return
|
2401
|
-
|
2529
|
+
# Note: a legacy container-server could return a list of
|
2530
|
+
# ShardRanges, but that's ok: namespaces just need 'name', 'lower'
|
2531
|
+
# and 'upper' keys. If we ever need to know we can look for a
|
2532
|
+
# 'x-backend-record-shard-format' header from newer container
|
2533
|
+
# servers.
|
2534
|
+
return [Namespace(data['name'], data['lower'], data['upper'])
|
2535
|
+
for data in listing]
|
2402
2536
|
except (ValueError, TypeError, KeyError) as err:
|
2403
2537
|
self.logger.error(
|
2404
|
-
"Failed to get
|
2538
|
+
"Failed to get namespaces from %s: invalid data: %r",
|
2405
2539
|
req.path_qs, err)
|
2406
2540
|
return None
|
2407
|
-
|
2408
|
-
def _get_shard_ranges(
|
2409
|
-
self, req, account, container, includes=None, states=None):
|
2410
|
-
"""
|
2411
|
-
Fetch shard ranges from given `account/container`. If `includes` is
|
2412
|
-
given then the shard range for that object name is requested, otherwise
|
2413
|
-
all shard ranges are requested.
|
2414
|
-
|
2415
|
-
:param req: original Request instance.
|
2416
|
-
:param account: account from which shard ranges should be fetched.
|
2417
|
-
:param container: container from which shard ranges should be fetched.
|
2418
|
-
:param includes: (optional) restricts the list of fetched shard ranges
|
2419
|
-
to those which include the given name.
|
2420
|
-
:param states: (optional) the states of shard ranges to be fetched.
|
2421
|
-
:return: a list of instances of :class:`swift.common.utils.ShardRange`,
|
2422
|
-
or None if there was a problem fetching the shard ranges
|
2423
|
-
"""
|
2424
|
-
params = req.params.copy()
|
2425
|
-
params.pop('limit', None)
|
2426
|
-
params['format'] = 'json'
|
2427
|
-
if includes:
|
2428
|
-
params['includes'] = str_to_wsgi(includes)
|
2429
|
-
if states:
|
2430
|
-
params['states'] = states
|
2431
|
-
headers = {'X-Backend-Record-Type': 'shard'}
|
2432
|
-
listing, response = self._get_container_listing(
|
2433
|
-
req, account, container, headers=headers, params=params)
|
2434
|
-
return self._parse_shard_ranges(req, listing, response), response
|