swift 2.32.0__py2.py3-none-any.whl → 2.34.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/account/auditor.py +11 -0
- swift/account/reaper.py +11 -1
- swift/account/replicator.py +22 -0
- swift/account/server.py +13 -12
- swift-2.32.0.data/scripts/swift-account-audit → swift/cli/account_audit.py +6 -2
- swift-2.32.0.data/scripts/swift-config → swift/cli/config.py +1 -1
- swift-2.32.0.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +6 -2
- swift-2.32.0.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +12 -3
- swift-2.32.0.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +6 -2
- swift/cli/info.py +131 -3
- swift-2.32.0.data/scripts/swift-oldies → swift/cli/oldies.py +6 -3
- swift-2.32.0.data/scripts/swift-orphans → swift/cli/orphans.py +7 -2
- swift-2.32.0.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +9 -18
- swift-2.32.0.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +1 -1
- swift/cli/reload.py +141 -0
- swift/cli/ringbuilder.py +24 -0
- swift/common/daemon.py +12 -2
- swift/common/db.py +14 -9
- swift/common/db_auditor.py +2 -2
- swift/common/db_replicator.py +6 -0
- swift/common/exceptions.py +12 -0
- swift/common/http_protocol.py +76 -3
- swift/common/manager.py +120 -5
- swift/common/memcached.py +24 -25
- swift/common/middleware/account_quotas.py +144 -43
- swift/common/middleware/backend_ratelimit.py +166 -24
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +3 -5
- swift/common/middleware/container_sync.py +6 -10
- swift/common/middleware/crypto/crypto_utils.py +4 -5
- swift/common/middleware/crypto/decrypter.py +4 -5
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/proxy_logging.py +57 -43
- swift/common/middleware/ratelimit.py +6 -7
- swift/common/middleware/recon.py +6 -7
- swift/common/middleware/s3api/acl_handlers.py +10 -1
- swift/common/middleware/s3api/controllers/__init__.py +3 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_upload.py +31 -15
- swift/common/middleware/s3api/controllers/obj.py +20 -1
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/s3api.py +6 -0
- swift/common/middleware/s3api/s3request.py +190 -74
- swift/common/middleware/s3api/s3response.py +48 -8
- swift/common/middleware/s3api/s3token.py +2 -2
- swift/common/middleware/s3api/utils.py +2 -1
- swift/common/middleware/slo.py +508 -310
- swift/common/middleware/staticweb.py +45 -14
- swift/common/middleware/tempauth.py +6 -4
- swift/common/middleware/tempurl.py +134 -93
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +9 -10
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +1 -2
- swift/common/request_helpers.py +101 -8
- swift/common/statsd_client.py +207 -0
- swift/common/storage_policy.py +1 -1
- swift/common/swob.py +5 -2
- swift/common/utils/__init__.py +331 -1774
- swift/common/utils/base.py +138 -0
- swift/common/utils/config.py +443 -0
- swift/common/utils/logs.py +999 -0
- swift/common/utils/timestamp.py +23 -2
- swift/common/wsgi.py +19 -3
- swift/container/auditor.py +11 -0
- swift/container/backend.py +136 -31
- swift/container/reconciler.py +11 -2
- swift/container/replicator.py +64 -7
- swift/container/server.py +276 -146
- swift/container/sharder.py +86 -42
- swift/container/sync.py +11 -1
- swift/container/updater.py +12 -2
- swift/obj/auditor.py +20 -3
- swift/obj/diskfile.py +63 -25
- swift/obj/expirer.py +154 -47
- swift/obj/mem_diskfile.py +2 -1
- swift/obj/mem_server.py +1 -0
- swift/obj/reconstructor.py +28 -4
- swift/obj/replicator.py +63 -24
- swift/obj/server.py +76 -59
- swift/obj/updater.py +12 -2
- swift/obj/watchers/dark_data.py +72 -34
- swift/proxy/controllers/account.py +3 -2
- swift/proxy/controllers/base.py +254 -148
- swift/proxy/controllers/container.py +274 -289
- swift/proxy/controllers/obj.py +120 -166
- swift/proxy/server.py +17 -13
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/AUTHORS +14 -4
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/METADATA +9 -7
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/RECORD +97 -120
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/entry_points.txt +39 -0
- swift-2.34.0.dist-info/pbr.json +1 -0
- swift-2.32.0.data/scripts/swift-account-auditor +0 -23
- swift-2.32.0.data/scripts/swift-account-info +0 -52
- swift-2.32.0.data/scripts/swift-account-reaper +0 -23
- swift-2.32.0.data/scripts/swift-account-replicator +0 -34
- swift-2.32.0.data/scripts/swift-account-server +0 -23
- swift-2.32.0.data/scripts/swift-container-auditor +0 -23
- swift-2.32.0.data/scripts/swift-container-info +0 -56
- swift-2.32.0.data/scripts/swift-container-reconciler +0 -21
- swift-2.32.0.data/scripts/swift-container-replicator +0 -34
- swift-2.32.0.data/scripts/swift-container-server +0 -23
- swift-2.32.0.data/scripts/swift-container-sharder +0 -37
- swift-2.32.0.data/scripts/swift-container-sync +0 -23
- swift-2.32.0.data/scripts/swift-container-updater +0 -23
- swift-2.32.0.data/scripts/swift-dispersion-report +0 -24
- swift-2.32.0.data/scripts/swift-form-signature +0 -20
- swift-2.32.0.data/scripts/swift-init +0 -119
- swift-2.32.0.data/scripts/swift-object-auditor +0 -29
- swift-2.32.0.data/scripts/swift-object-expirer +0 -33
- swift-2.32.0.data/scripts/swift-object-info +0 -60
- swift-2.32.0.data/scripts/swift-object-reconstructor +0 -33
- swift-2.32.0.data/scripts/swift-object-relinker +0 -23
- swift-2.32.0.data/scripts/swift-object-replicator +0 -37
- swift-2.32.0.data/scripts/swift-object-server +0 -27
- swift-2.32.0.data/scripts/swift-object-updater +0 -23
- swift-2.32.0.data/scripts/swift-proxy-server +0 -23
- swift-2.32.0.data/scripts/swift-recon +0 -24
- swift-2.32.0.data/scripts/swift-ring-builder +0 -37
- swift-2.32.0.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.32.0.data/scripts/swift-ring-composer +0 -22
- swift-2.32.0.dist-info/pbr.json +0 -1
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/LICENSE +0 -0
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/WHEEL +0 -0
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/top_level.txt +0 -0
swift/proxy/controllers/obj.py
CHANGED
@@ -48,8 +48,7 @@ from swift.common.utils import (
|
|
48
48
|
normalize_delete_at_timestamp, public, get_expirer_container,
|
49
49
|
document_iters_to_http_response_body, parse_content_range,
|
50
50
|
quorum_size, reiterate, close_if_possible, safe_json_loads, md5,
|
51
|
-
|
52
|
-
CooperativeIterator)
|
51
|
+
NamespaceBoundList, CooperativeIterator)
|
53
52
|
from swift.common.bufferedhttp import http_connect
|
54
53
|
from swift.common.constraints import check_metadata, check_object_creation
|
55
54
|
from swift.common import constraints
|
@@ -64,21 +63,22 @@ from swift.common.http import (
|
|
64
63
|
HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE,
|
65
64
|
HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY,
|
66
65
|
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE, HTTP_NOT_FOUND)
|
67
|
-
from swift.common.memcached import MemcacheConnectionError
|
68
66
|
from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
|
69
67
|
ECDriverError, PolicyError)
|
70
68
|
from swift.proxy.controllers.base import Controller, delay_denial, \
|
71
69
|
cors_validation, update_headers, bytes_to_skip, ByteCountEnforcer, \
|
72
70
|
record_cache_op_metrics, get_cache_key, GetterBase, GetterSource, \
|
73
|
-
is_good_source, NodeIter
|
71
|
+
is_good_source, NodeIter, get_namespaces_from_cache, \
|
72
|
+
set_namespaces_in_cache
|
74
73
|
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
|
75
74
|
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
|
76
75
|
HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \
|
77
76
|
HTTPUnprocessableEntity, Response, HTTPException, \
|
78
77
|
HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError, \
|
79
|
-
normalize_etag
|
78
|
+
normalize_etag, str_to_wsgi
|
80
79
|
from swift.common.request_helpers import update_etag_is_at_header, \
|
81
|
-
resolve_etag_is_at_header, validate_internal_obj, get_ip_port
|
80
|
+
resolve_etag_is_at_header, validate_internal_obj, get_ip_port, \
|
81
|
+
is_open_expired, append_log_info
|
82
82
|
|
83
83
|
|
84
84
|
def check_content_type(req):
|
@@ -201,8 +201,9 @@ class BaseObjectController(Controller):
|
|
201
201
|
policy_options = self.app.get_policy_options(policy)
|
202
202
|
is_local = policy_options.write_affinity_is_local_fn
|
203
203
|
if is_local is None:
|
204
|
-
return NodeIter(
|
205
|
-
|
204
|
+
return NodeIter(
|
205
|
+
'object', self.app, ring, partition, self.logger, request,
|
206
|
+
policy=policy)
|
206
207
|
|
207
208
|
primary_nodes = ring.get_part_nodes(partition)
|
208
209
|
handoff_nodes = ring.get_more_nodes(partition)
|
@@ -235,8 +236,9 @@ class BaseObjectController(Controller):
|
|
235
236
|
(node for node in all_nodes if node not in preferred_nodes)
|
236
237
|
)
|
237
238
|
|
238
|
-
return NodeIter(
|
239
|
-
|
239
|
+
return NodeIter(
|
240
|
+
'object', self.app, ring, partition, self.logger, request,
|
241
|
+
node_iter=node_iter, policy=policy)
|
240
242
|
|
241
243
|
def GETorHEAD(self, req):
|
242
244
|
"""Handle HTTP GET or HEAD requests."""
|
@@ -249,14 +251,17 @@ class BaseObjectController(Controller):
|
|
249
251
|
policy = POLICIES.get_by_index(policy_index)
|
250
252
|
obj_ring = self.app.get_object_ring(policy_index)
|
251
253
|
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
|
254
|
+
if is_open_expired(self.app, req):
|
255
|
+
req.headers['X-Backend-Open-Expired'] = 'true'
|
252
256
|
if 'swift.authorize' in req.environ:
|
253
257
|
aresp = req.environ['swift.authorize'](req)
|
254
258
|
if aresp:
|
255
259
|
return aresp
|
256
260
|
partition = obj_ring.get_part(
|
257
261
|
self.account_name, self.container_name, self.object_name)
|
258
|
-
node_iter = NodeIter(
|
259
|
-
|
262
|
+
node_iter = NodeIter(
|
263
|
+
'object', self.app, obj_ring, partition, self.logger, req,
|
264
|
+
policy=policy)
|
260
265
|
|
261
266
|
resp = self._get_or_head_response(req, node_iter, partition, policy)
|
262
267
|
|
@@ -279,47 +284,32 @@ class BaseObjectController(Controller):
|
|
279
284
|
"""Handler for HTTP HEAD requests."""
|
280
285
|
return self.GETorHEAD(req)
|
281
286
|
|
282
|
-
def
|
283
|
-
self,
|
287
|
+
def _get_updating_namespaces(
|
288
|
+
self, req, account, container, includes=None):
|
284
289
|
"""
|
285
|
-
Fetch
|
286
|
-
|
290
|
+
Fetch namespaces in 'updating' states from given `account/container`.
|
291
|
+
If `includes` is given then the shard range for that object name is
|
292
|
+
requested, otherwise all namespaces are requested.
|
287
293
|
|
288
|
-
:param
|
289
|
-
:param
|
290
|
-
|
291
|
-
:param
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
cache_state = 'hit' if namespaces else 'miss'
|
308
|
-
except MemcacheConnectionError:
|
309
|
-
namespaces = None
|
310
|
-
cache_state = 'error'
|
311
|
-
|
312
|
-
if namespaces:
|
313
|
-
if six.PY2:
|
314
|
-
# json.loads() in memcache.get will convert json 'string' to
|
315
|
-
# 'unicode' with python2, here we cast 'unicode' back to 'str'
|
316
|
-
namespaces = [
|
317
|
-
[lower.encode('utf-8'), name.encode('utf-8')]
|
318
|
-
for lower, name in namespaces]
|
319
|
-
namespace_list = NamespaceBoundList(namespaces)
|
320
|
-
else:
|
321
|
-
namespace_list = None
|
322
|
-
return namespace_list, cache_state
|
294
|
+
:param req: original Request instance.
|
295
|
+
:param account: account from which namespaces should be fetched.
|
296
|
+
:param container: container from which namespaces should be fetched.
|
297
|
+
:param includes: (optional) restricts the list of fetched namespaces
|
298
|
+
to those which include the given name.
|
299
|
+
:return: a list of instances of :class:`swift.common.utils.Namespace`,
|
300
|
+
or None if there was a problem fetching the namespaces.
|
301
|
+
"""
|
302
|
+
params = req.params.copy()
|
303
|
+
params.pop('limit', None)
|
304
|
+
params['format'] = 'json'
|
305
|
+
params['states'] = 'updating'
|
306
|
+
headers = {'X-Backend-Record-Type': 'shard',
|
307
|
+
'X-Backend-Record-Shard-Format': 'namespace'}
|
308
|
+
if includes:
|
309
|
+
params['includes'] = str_to_wsgi(includes)
|
310
|
+
listing, response = self._get_container_listing(
|
311
|
+
req, account, container, headers=headers, params=params)
|
312
|
+
return self._parse_namespaces(req, listing, response), response
|
323
313
|
|
324
314
|
def _get_update_shard_caching_disabled(self, req, account, container, obj):
|
325
315
|
"""
|
@@ -330,16 +320,17 @@ class BaseObjectController(Controller):
|
|
330
320
|
:param account: account from which shard ranges should be fetched.
|
331
321
|
:param container: container from which shard ranges should be fetched.
|
332
322
|
:param obj: object getting updated.
|
333
|
-
:return: an instance of :class:`swift.common.utils.
|
323
|
+
:return: an instance of :class:`swift.common.utils.Namespace`,
|
334
324
|
or None if the update should go back to the root
|
335
325
|
"""
|
336
326
|
# legacy behavior requests container server for includes=obj
|
337
|
-
|
338
|
-
req, account, container,
|
327
|
+
namespaces, response = self._get_updating_namespaces(
|
328
|
+
req, account, container, includes=obj)
|
339
329
|
record_cache_op_metrics(
|
340
|
-
self.logger,
|
341
|
-
|
342
|
-
|
330
|
+
self.logger, self.server_type.lower(), 'shard_updating',
|
331
|
+
'disabled', response)
|
332
|
+
# there will be only one Namespace in the list if any
|
333
|
+
return namespaces[0] if namespaces else None
|
343
334
|
|
344
335
|
def _get_update_shard(self, req, account, container, obj):
|
345
336
|
"""
|
@@ -353,7 +344,7 @@ class BaseObjectController(Controller):
|
|
353
344
|
:param account: account from which shard ranges should be fetched.
|
354
345
|
:param container: container from which shard ranges should be fetched.
|
355
346
|
:param obj: object getting updated.
|
356
|
-
:return: an instance of :class:`swift.common.utils.
|
347
|
+
:return: an instance of :class:`swift.common.utils.Namespace`,
|
357
348
|
or None if the update should go back to the root
|
358
349
|
"""
|
359
350
|
if not self.app.recheck_updating_shard_ranges:
|
@@ -364,51 +355,46 @@ class BaseObjectController(Controller):
|
|
364
355
|
# caching is enabled, try to get from caches
|
365
356
|
response = None
|
366
357
|
cache_key = get_cache_key(account, container, shard='updating')
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
#
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
name=namespace.name, timestamp=0, lower=namespace.lower,
|
377
|
-
upper=namespace.upper)
|
378
|
-
else:
|
379
|
-
# pull full set of updating shard ranges from backend
|
380
|
-
shard_ranges, response = self._get_shard_ranges(
|
381
|
-
req, account, container, states='updating')
|
382
|
-
if shard_ranges:
|
358
|
+
skip_chance = self.app.container_updating_shard_ranges_skip_cache
|
359
|
+
ns_bound_list, get_cache_state = get_namespaces_from_cache(
|
360
|
+
req, cache_key, skip_chance)
|
361
|
+
if not ns_bound_list:
|
362
|
+
# namespaces not found in either infocache or memcache so pull full
|
363
|
+
# set of updating shard ranges from backend
|
364
|
+
namespaces, response = self._get_updating_namespaces(
|
365
|
+
req, account, container)
|
366
|
+
if namespaces:
|
383
367
|
# only store the list of namespace lower bounds and names into
|
384
368
|
# infocache and memcache.
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
369
|
+
ns_bound_list = NamespaceBoundList.parse(namespaces)
|
370
|
+
set_cache_state = set_namespaces_in_cache(
|
371
|
+
req, cache_key, ns_bound_list,
|
372
|
+
self.app.recheck_updating_shard_ranges)
|
373
|
+
record_cache_op_metrics(
|
374
|
+
self.logger, self.server_type.lower(), 'shard_updating',
|
375
|
+
set_cache_state, None)
|
376
|
+
if set_cache_state == 'set':
|
389
377
|
self.logger.info(
|
390
378
|
'Caching updating shards for %s (%d shards)',
|
391
|
-
cache_key, len(
|
392
|
-
memcache.set(
|
393
|
-
cache_key, cached_namespaces.bounds,
|
394
|
-
time=self.app.recheck_updating_shard_ranges)
|
395
|
-
update_shard = find_namespace(obj, shard_ranges or [])
|
379
|
+
cache_key, len(namespaces))
|
396
380
|
record_cache_op_metrics(
|
397
|
-
self.logger, 'shard_updating',
|
398
|
-
|
381
|
+
self.logger, self.server_type.lower(), 'shard_updating',
|
382
|
+
get_cache_state, response)
|
383
|
+
return ns_bound_list.get_namespace(obj) if ns_bound_list else None
|
399
384
|
|
400
385
|
def _get_update_target(self, req, container_info):
|
401
386
|
# find the sharded container to which we'll send the update
|
402
387
|
db_state = container_info.get('sharding_state', 'unsharded')
|
403
388
|
if db_state in ('sharded', 'sharding'):
|
404
|
-
|
389
|
+
update_shard_ns = self._get_update_shard(
|
405
390
|
req, self.account_name, self.container_name, self.object_name)
|
406
|
-
if
|
391
|
+
if update_shard_ns:
|
407
392
|
partition, nodes = self.app.container_ring.get_nodes(
|
408
|
-
|
409
|
-
return partition, nodes,
|
393
|
+
update_shard_ns.account, update_shard_ns.container)
|
394
|
+
return partition, nodes, update_shard_ns.name, db_state
|
410
395
|
|
411
|
-
return container_info['partition'], container_info['nodes'], None
|
396
|
+
return (container_info['partition'], container_info['nodes'], None,
|
397
|
+
db_state)
|
412
398
|
|
413
399
|
@public
|
414
400
|
@cors_validation
|
@@ -417,14 +403,14 @@ class BaseObjectController(Controller):
|
|
417
403
|
"""HTTP POST request handler."""
|
418
404
|
container_info = self.container_info(
|
419
405
|
self.account_name, self.container_name, req)
|
420
|
-
container_partition, container_nodes, container_path = \
|
421
|
-
self._get_update_target(req, container_info)
|
422
406
|
req.acl = container_info['write_acl']
|
407
|
+
if is_open_expired(self.app, req):
|
408
|
+
req.headers['X-Backend-Open-Expired'] = 'true'
|
423
409
|
if 'swift.authorize' in req.environ:
|
424
410
|
aresp = req.environ['swift.authorize'](req)
|
425
411
|
if aresp:
|
426
412
|
return aresp
|
427
|
-
if not
|
413
|
+
if not is_success(container_info.get('status')):
|
428
414
|
return HTTPNotFound(request=req)
|
429
415
|
error_response = check_metadata(req, 'object')
|
430
416
|
if error_response:
|
@@ -447,28 +433,30 @@ class BaseObjectController(Controller):
|
|
447
433
|
self.account_name, self.container_name, self.object_name)
|
448
434
|
|
449
435
|
headers = self._backend_requests(
|
450
|
-
req, len(nodes),
|
451
|
-
|
452
|
-
container_path=container_path)
|
436
|
+
req, len(nodes), container_info, delete_at_container,
|
437
|
+
delete_at_part, delete_at_nodes)
|
453
438
|
return self._post_object(req, obj_ring, partition, headers)
|
454
439
|
|
455
440
|
def _backend_requests(self, req, n_outgoing,
|
456
|
-
|
457
|
-
|
458
|
-
delete_at_nodes=None, container_path=None):
|
441
|
+
container_info, delete_at_container=None,
|
442
|
+
delete_at_partition=None, delete_at_nodes=None):
|
459
443
|
policy_index = req.headers['X-Backend-Storage-Policy-Index']
|
460
444
|
policy = POLICIES.get_by_index(policy_index)
|
445
|
+
container_partition, containers, container_path, db_state = \
|
446
|
+
self._get_update_target(req, container_info)
|
461
447
|
headers = [self.generate_request_headers(req, additional=req.headers)
|
462
448
|
for _junk in range(n_outgoing)]
|
463
449
|
|
464
|
-
def set_container_update(index,
|
450
|
+
def set_container_update(index, container_node):
|
451
|
+
ip, port = get_ip_port(container_node, headers[index])
|
465
452
|
headers[index]['X-Container-Partition'] = container_partition
|
466
453
|
headers[index]['X-Container-Host'] = csv_append(
|
467
454
|
headers[index].get('X-Container-Host'),
|
468
|
-
'%(ip)s:%(port)s' %
|
455
|
+
'%(ip)s:%(port)s' % {'ip': ip, 'port': port})
|
469
456
|
headers[index]['X-Container-Device'] = csv_append(
|
470
457
|
headers[index].get('X-Container-Device'),
|
471
|
-
|
458
|
+
container_node['device'])
|
459
|
+
headers[index]['X-Container-Root-Db-State'] = db_state
|
472
460
|
if container_path:
|
473
461
|
headers[index]['X-Backend-Quoted-Container-Path'] = quote(
|
474
462
|
container_path)
|
@@ -481,11 +469,12 @@ class BaseObjectController(Controller):
|
|
481
469
|
# will eat the update and move it as a misplaced object.
|
482
470
|
|
483
471
|
def set_delete_at_headers(index, delete_at_node):
|
472
|
+
ip, port = get_ip_port(delete_at_node, headers[index])
|
484
473
|
headers[index]['X-Delete-At-Container'] = delete_at_container
|
485
474
|
headers[index]['X-Delete-At-Partition'] = delete_at_partition
|
486
475
|
headers[index]['X-Delete-At-Host'] = csv_append(
|
487
476
|
headers[index].get('X-Delete-At-Host'),
|
488
|
-
'%(ip)s:%(port)s' %
|
477
|
+
'%(ip)s:%(port)s' % {'ip': ip, 'port': port})
|
489
478
|
headers[index]['X-Delete-At-Device'] = csv_append(
|
490
479
|
headers[index].get('X-Delete-At-Device'),
|
491
480
|
delete_at_node['device'])
|
@@ -642,8 +631,7 @@ class BaseObjectController(Controller):
|
|
642
631
|
int(req.headers['x-delete-at']))
|
643
632
|
x_delete_at = int(req.headers['x-delete-at'])
|
644
633
|
|
645
|
-
req.environ
|
646
|
-
'x-delete-at:%s' % x_delete_at)
|
634
|
+
append_log_info(req.environ, 'x-delete-at:%s' % x_delete_at)
|
647
635
|
|
648
636
|
delete_at_container = get_expirer_container(
|
649
637
|
x_delete_at, self.app.expiring_objects_container_divisor,
|
@@ -860,8 +848,6 @@ class BaseObjectController(Controller):
|
|
860
848
|
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
|
861
849
|
container_info['storage_policy'])
|
862
850
|
obj_ring = self.app.get_object_ring(policy_index)
|
863
|
-
container_partition, container_nodes, container_path = \
|
864
|
-
self._get_update_target(req, container_info)
|
865
851
|
partition, nodes = obj_ring.get_nodes(
|
866
852
|
self.account_name, self.container_name, self.object_name)
|
867
853
|
|
@@ -879,7 +865,7 @@ class BaseObjectController(Controller):
|
|
879
865
|
if aresp:
|
880
866
|
return aresp
|
881
867
|
|
882
|
-
if not
|
868
|
+
if not is_success(container_info.get('status')):
|
883
869
|
return HTTPNotFound(request=req)
|
884
870
|
|
885
871
|
# update content type in case it is missing
|
@@ -907,9 +893,8 @@ class BaseObjectController(Controller):
|
|
907
893
|
|
908
894
|
# add special headers to be handled by storage nodes
|
909
895
|
outgoing_headers = self._backend_requests(
|
910
|
-
req, len(nodes),
|
911
|
-
delete_at_container, delete_at_part, delete_at_nodes
|
912
|
-
container_path=container_path)
|
896
|
+
req, len(nodes), container_info,
|
897
|
+
delete_at_container, delete_at_part, delete_at_nodes)
|
913
898
|
|
914
899
|
# send object to storage nodes
|
915
900
|
resp = self._store_object(
|
@@ -932,15 +917,13 @@ class BaseObjectController(Controller):
|
|
932
917
|
next_part_power = getattr(obj_ring, 'next_part_power', None)
|
933
918
|
if next_part_power:
|
934
919
|
req.headers['X-Backend-Next-Part-Power'] = next_part_power
|
935
|
-
container_partition, container_nodes, container_path = \
|
936
|
-
self._get_update_target(req, container_info)
|
937
920
|
req.acl = container_info['write_acl']
|
938
921
|
req.environ['swift_sync_key'] = container_info['sync_key']
|
939
922
|
if 'swift.authorize' in req.environ:
|
940
923
|
aresp = req.environ['swift.authorize'](req)
|
941
924
|
if aresp:
|
942
925
|
return aresp
|
943
|
-
if not
|
926
|
+
if not is_success(container_info.get('status')):
|
944
927
|
return HTTPNotFound(request=req)
|
945
928
|
partition, nodes = obj_ring.get_nodes(
|
946
929
|
self.account_name, self.container_name, self.object_name)
|
@@ -963,9 +946,7 @@ class BaseObjectController(Controller):
|
|
963
946
|
obj_ring, partition, req, policy=policy,
|
964
947
|
local_handoffs_first=True)
|
965
948
|
|
966
|
-
headers = self._backend_requests(
|
967
|
-
req, node_count, container_partition, container_nodes,
|
968
|
-
container_path=container_path)
|
949
|
+
headers = self._backend_requests(req, node_count, container_info)
|
969
950
|
return self._delete_object(req, obj_ring, partition, headers,
|
970
951
|
node_count=node_count,
|
971
952
|
node_iterator=node_iterator)
|
@@ -1046,7 +1027,7 @@ class ReplicatedObjectController(BaseObjectController):
|
|
1046
1027
|
if ml and bytes_transferred < ml:
|
1047
1028
|
self.logger.warning(
|
1048
1029
|
'Client disconnected without sending enough data')
|
1049
|
-
self.logger.increment('client_disconnects')
|
1030
|
+
self.logger.increment('object.client_disconnects')
|
1050
1031
|
raise HTTPClientDisconnect(request=req)
|
1051
1032
|
|
1052
1033
|
trail_md = self._get_footers(req)
|
@@ -1061,14 +1042,14 @@ class ReplicatedObjectController(BaseObjectController):
|
|
1061
1042
|
except ChunkReadTimeout as err:
|
1062
1043
|
self.logger.warning(
|
1063
1044
|
'ERROR Client read timeout (%ss)', err.seconds)
|
1064
|
-
self.logger.increment('client_timeouts')
|
1045
|
+
self.logger.increment('object.client_timeouts')
|
1065
1046
|
raise HTTPRequestTimeout(request=req)
|
1066
1047
|
except HTTPException:
|
1067
1048
|
raise
|
1068
1049
|
except ChunkReadError:
|
1069
1050
|
self.logger.warning(
|
1070
1051
|
'Client disconnected without sending last chunk')
|
1071
|
-
self.logger.increment('client_disconnects')
|
1052
|
+
self.logger.increment('object.client_disconnects')
|
1072
1053
|
raise HTTPClientDisconnect(request=req)
|
1073
1054
|
except Timeout:
|
1074
1055
|
self.logger.exception(
|
@@ -2484,9 +2465,10 @@ class ECFragGetter(GetterBase):
|
|
2484
2465
|
backend_headers, header_provider, logger_thread_locals,
|
2485
2466
|
logger):
|
2486
2467
|
super(ECFragGetter, self).__init__(
|
2487
|
-
app=app, req=req, node_iter=node_iter,
|
2488
|
-
|
2489
|
-
|
2468
|
+
app=app, req=req, node_iter=node_iter, partition=partition,
|
2469
|
+
policy=policy, path=path, backend_headers=backend_headers,
|
2470
|
+
node_timeout=app.recoverable_node_timeout,
|
2471
|
+
resource_type='EC fragment', logger=logger)
|
2490
2472
|
self.header_provider = header_provider
|
2491
2473
|
self.fragment_size = policy.fragment_size
|
2492
2474
|
self.skip_bytes = 0
|
@@ -2494,39 +2476,13 @@ class ECFragGetter(GetterBase):
|
|
2494
2476
|
self.status = self.reason = self.body = self.source_headers = None
|
2495
2477
|
self._source_iter = None
|
2496
2478
|
|
2497
|
-
def _get_next_response_part(self):
|
2498
|
-
node_timeout = self.app.recoverable_node_timeout
|
2499
|
-
|
2500
|
-
while True:
|
2501
|
-
# the loop here is to resume if trying to parse
|
2502
|
-
# multipart/byteranges response raises a ChunkReadTimeout
|
2503
|
-
# and resets the source_parts_iter
|
2504
|
-
try:
|
2505
|
-
with WatchdogTimeout(self.app.watchdog, node_timeout,
|
2506
|
-
ChunkReadTimeout):
|
2507
|
-
# If we don't have a multipart/byteranges response,
|
2508
|
-
# but just a 200 or a single-range 206, then this
|
2509
|
-
# performs no IO, and just returns source (or
|
2510
|
-
# raises StopIteration).
|
2511
|
-
# Otherwise, this call to next() performs IO when
|
2512
|
-
# we have a multipart/byteranges response; as it
|
2513
|
-
# will read the MIME boundary and part headers.
|
2514
|
-
start_byte, end_byte, length, headers, part = next(
|
2515
|
-
self.source.parts_iter)
|
2516
|
-
return (start_byte, end_byte, length, headers, part)
|
2517
|
-
except ChunkReadTimeout:
|
2518
|
-
if not self._replace_source(
|
2519
|
-
'Trying to read next part of EC multi-part GET '
|
2520
|
-
'(retrying)'):
|
2521
|
-
raise
|
2522
|
-
|
2523
2479
|
def _iter_bytes_from_response_part(self, part_file, nbytes):
|
2524
2480
|
buf = b''
|
2525
2481
|
part_file = ByteCountEnforcer(part_file, nbytes)
|
2526
2482
|
while True:
|
2527
2483
|
try:
|
2528
2484
|
with WatchdogTimeout(self.app.watchdog,
|
2529
|
-
self.
|
2485
|
+
self.node_timeout,
|
2530
2486
|
ChunkReadTimeout):
|
2531
2487
|
chunk = part_file.read(self.app.object_chunk_size)
|
2532
2488
|
# NB: this append must be *inside* the context
|
@@ -2580,7 +2536,7 @@ class ECFragGetter(GetterBase):
|
|
2580
2536
|
if not chunk:
|
2581
2537
|
break
|
2582
2538
|
|
2583
|
-
def _iter_parts_from_response(self
|
2539
|
+
def _iter_parts_from_response(self):
|
2584
2540
|
try:
|
2585
2541
|
part_iter = None
|
2586
2542
|
try:
|
@@ -2591,7 +2547,7 @@ class ECFragGetter(GetterBase):
|
|
2591
2547
|
except StopIteration:
|
2592
2548
|
# it seems this is the only way out of the loop; not
|
2593
2549
|
# sure why the req.environ update is always needed
|
2594
|
-
req.environ['swift.non_client_disconnect'] = True
|
2550
|
+
self.req.environ['swift.non_client_disconnect'] = True
|
2595
2551
|
break
|
2596
2552
|
# skip_bytes compensates for the backend request range
|
2597
2553
|
# expansion done in _convert_range
|
@@ -2624,7 +2580,7 @@ class ECFragGetter(GetterBase):
|
|
2624
2580
|
self.logger.warning(
|
2625
2581
|
'Client did not read from proxy within %ss' %
|
2626
2582
|
self.app.client_timeout)
|
2627
|
-
self.logger.increment('client_timeouts')
|
2583
|
+
self.logger.increment('object.client_timeouts')
|
2628
2584
|
except GeneratorExit:
|
2629
2585
|
warn = True
|
2630
2586
|
req_range = self.backend_headers['Range']
|
@@ -2635,7 +2591,8 @@ class ECFragGetter(GetterBase):
|
|
2635
2591
|
if end is not None and begin is not None:
|
2636
2592
|
if end - begin + 1 == self.bytes_used_from_backend:
|
2637
2593
|
warn = False
|
2638
|
-
if
|
2594
|
+
if (warn and
|
2595
|
+
not self.req.environ.get('swift.non_client_disconnect')):
|
2639
2596
|
self.logger.warning(
|
2640
2597
|
'Client disconnected on read of EC frag %r', self.path)
|
2641
2598
|
raise
|
@@ -2656,7 +2613,7 @@ class ECFragGetter(GetterBase):
|
|
2656
2613
|
else:
|
2657
2614
|
return HeaderKeyDict()
|
2658
2615
|
|
2659
|
-
def _make_node_request(self, node
|
2616
|
+
def _make_node_request(self, node):
|
2660
2617
|
# make a backend request; return a response if it has an acceptable
|
2661
2618
|
# status code, otherwise None
|
2662
2619
|
self.logger.thread_locals = self.logger_thread_locals
|
@@ -2673,7 +2630,7 @@ class ECFragGetter(GetterBase):
|
|
2673
2630
|
query_string=self.req.query_string)
|
2674
2631
|
self.app.set_node_timing(node, time.time() - start_node_timing)
|
2675
2632
|
|
2676
|
-
with Timeout(node_timeout):
|
2633
|
+
with Timeout(self.node_timeout):
|
2677
2634
|
possible_source = conn.getresponse()
|
2678
2635
|
# See NOTE: swift_conn at top of file about this.
|
2679
2636
|
possible_source.swift_conn = conn
|
@@ -2729,9 +2686,7 @@ class ECFragGetter(GetterBase):
|
|
2729
2686
|
def _source_gen(self):
|
2730
2687
|
self.status = self.reason = self.body = self.source_headers = None
|
2731
2688
|
for node in self.node_iter:
|
2732
|
-
source = self._make_node_request(
|
2733
|
-
node, self.app.recoverable_node_timeout)
|
2734
|
-
|
2689
|
+
source = self._make_node_request(node)
|
2735
2690
|
if source:
|
2736
2691
|
yield GetterSource(self.app, source, node)
|
2737
2692
|
else:
|
@@ -2755,11 +2710,10 @@ class ECFragGetter(GetterBase):
|
|
2755
2710
|
return True
|
2756
2711
|
return False
|
2757
2712
|
|
2758
|
-
def response_parts_iter(self
|
2713
|
+
def response_parts_iter(self):
|
2759
2714
|
"""
|
2760
2715
|
Create an iterator over a single fragment response body.
|
2761
2716
|
|
2762
|
-
:param req: a ``swob.Request``.
|
2763
2717
|
:return: an interator that yields chunks of bytes from a fragment
|
2764
2718
|
response body.
|
2765
2719
|
"""
|
@@ -2771,7 +2725,7 @@ class ECFragGetter(GetterBase):
|
|
2771
2725
|
else:
|
2772
2726
|
if source:
|
2773
2727
|
self.source = source
|
2774
|
-
it = self._iter_parts_from_response(
|
2728
|
+
it = self._iter_parts_from_response()
|
2775
2729
|
return it
|
2776
2730
|
|
2777
2731
|
|
@@ -2791,7 +2745,7 @@ class ECObjectController(BaseObjectController):
|
|
2791
2745
|
policy, req.swift_entity_path, backend_headers,
|
2792
2746
|
header_provider, logger_thread_locals,
|
2793
2747
|
self.logger)
|
2794
|
-
return
|
2748
|
+
return getter, getter.response_parts_iter()
|
2795
2749
|
|
2796
2750
|
def _convert_range(self, req, policy):
|
2797
2751
|
"""
|
@@ -3226,7 +3180,7 @@ class ECObjectController(BaseObjectController):
|
|
3226
3180
|
if ml and bytes_transferred < ml:
|
3227
3181
|
self.logger.warning(
|
3228
3182
|
'Client disconnected without sending enough data')
|
3229
|
-
self.logger.increment('client_disconnects')
|
3183
|
+
self.logger.increment('object.client_disconnects')
|
3230
3184
|
raise HTTPClientDisconnect(request=req)
|
3231
3185
|
|
3232
3186
|
send_chunk(b'') # flush out any buffered data
|
@@ -3296,12 +3250,12 @@ class ECObjectController(BaseObjectController):
|
|
3296
3250
|
except ChunkReadTimeout as err:
|
3297
3251
|
self.logger.warning(
|
3298
3252
|
'ERROR Client read timeout (%ss)', err.seconds)
|
3299
|
-
self.logger.increment('client_timeouts')
|
3253
|
+
self.logger.increment('object.client_timeouts')
|
3300
3254
|
raise HTTPRequestTimeout(request=req)
|
3301
3255
|
except ChunkReadError:
|
3302
3256
|
self.logger.warning(
|
3303
3257
|
'Client disconnected without sending last chunk')
|
3304
|
-
self.logger.increment('client_disconnects')
|
3258
|
+
self.logger.increment('object.client_disconnects')
|
3305
3259
|
raise HTTPClientDisconnect(request=req)
|
3306
3260
|
except HTTPException:
|
3307
3261
|
raise
|