swift 2.31.1__py2.py3-none-any.whl → 2.32.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/cli/info.py +9 -2
- swift/cli/ringbuilder.py +5 -1
- swift/common/container_sync_realms.py +6 -7
- swift/common/daemon.py +7 -3
- swift/common/db.py +22 -7
- swift/common/db_replicator.py +19 -20
- swift/common/direct_client.py +63 -14
- swift/common/internal_client.py +24 -3
- swift/common/manager.py +43 -44
- swift/common/memcached.py +168 -74
- swift/common/middleware/__init__.py +4 -0
- swift/common/middleware/account_quotas.py +98 -40
- swift/common/middleware/backend_ratelimit.py +6 -4
- swift/common/middleware/crossdomain.py +21 -8
- swift/common/middleware/listing_formats.py +26 -38
- swift/common/middleware/proxy_logging.py +12 -9
- swift/common/middleware/s3api/controllers/bucket.py +8 -2
- swift/common/middleware/s3api/s3api.py +9 -4
- swift/common/middleware/s3api/s3request.py +32 -24
- swift/common/middleware/s3api/s3response.py +10 -1
- swift/common/middleware/tempauth.py +9 -10
- swift/common/middleware/versioned_writes/__init__.py +0 -3
- swift/common/middleware/versioned_writes/object_versioning.py +22 -5
- swift/common/middleware/x_profile/html_viewer.py +1 -1
- swift/common/middleware/xprofile.py +5 -0
- swift/common/request_helpers.py +1 -2
- swift/common/ring/ring.py +22 -19
- swift/common/swob.py +2 -1
- swift/common/{utils.py → utils/__init__.py} +610 -1146
- swift/common/utils/ipaddrs.py +256 -0
- swift/common/utils/libc.py +345 -0
- swift/common/utils/timestamp.py +399 -0
- swift/common/wsgi.py +70 -39
- swift/container/backend.py +106 -38
- swift/container/server.py +11 -2
- swift/container/sharder.py +34 -15
- swift/locale/de/LC_MESSAGES/swift.po +1 -320
- swift/locale/en_GB/LC_MESSAGES/swift.po +1 -347
- swift/locale/es/LC_MESSAGES/swift.po +1 -279
- swift/locale/fr/LC_MESSAGES/swift.po +1 -209
- swift/locale/it/LC_MESSAGES/swift.po +1 -207
- swift/locale/ja/LC_MESSAGES/swift.po +2 -278
- swift/locale/ko_KR/LC_MESSAGES/swift.po +3 -303
- swift/locale/pt_BR/LC_MESSAGES/swift.po +1 -204
- swift/locale/ru/LC_MESSAGES/swift.po +1 -203
- swift/locale/tr_TR/LC_MESSAGES/swift.po +1 -192
- swift/locale/zh_CN/LC_MESSAGES/swift.po +1 -192
- swift/locale/zh_TW/LC_MESSAGES/swift.po +1 -193
- swift/obj/diskfile.py +19 -6
- swift/obj/server.py +20 -6
- swift/obj/ssync_receiver.py +19 -9
- swift/obj/ssync_sender.py +10 -10
- swift/proxy/controllers/account.py +7 -7
- swift/proxy/controllers/base.py +374 -366
- swift/proxy/controllers/container.py +112 -53
- swift/proxy/controllers/obj.py +254 -390
- swift/proxy/server.py +3 -8
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-server +1 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-server +1 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-drive-audit +45 -14
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-server +1 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-proxy-server +1 -1
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/AUTHORS +4 -0
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/METADATA +32 -35
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/RECORD +103 -100
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/WHEEL +1 -1
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/entry_points.txt +0 -1
- swift-2.32.1.dist-info/pbr.json +1 -0
- swift-2.31.1.dist-info/pbr.json +0 -1
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-audit +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-auditor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-info +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-reaper +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-account-replicator +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-config +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-auditor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-info +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-reconciler +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-replicator +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-sharder +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-sync +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-container-updater +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-dispersion-populate +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-dispersion-report +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-form-signature +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-get-nodes +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-init +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-auditor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-expirer +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-info +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-reconstructor +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-relinker +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-replicator +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-object-updater +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-oldies +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-orphans +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-recon +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-recon-cron +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-reconciler-enqueue +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-ring-builder +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-ring-builder-analyzer +0 -0
- {swift-2.31.1.data → swift-2.32.1.data}/scripts/swift-ring-composer +0 -0
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/LICENSE +0 -0
- {swift-2.31.1.dist-info → swift-2.32.1.dist-info}/top_level.txt +0 -0
swift/proxy/controllers/obj.py
CHANGED
@@ -38,7 +38,7 @@ import random
|
|
38
38
|
import sys
|
39
39
|
|
40
40
|
from greenlet import GreenletExit
|
41
|
-
from eventlet import GreenPile
|
41
|
+
from eventlet import GreenPile
|
42
42
|
from eventlet.queue import Queue, Empty
|
43
43
|
from eventlet.timeout import Timeout
|
44
44
|
|
@@ -48,7 +48,8 @@ from swift.common.utils import (
|
|
48
48
|
normalize_delete_at_timestamp, public, get_expirer_container,
|
49
49
|
document_iters_to_http_response_body, parse_content_range,
|
50
50
|
quorum_size, reiterate, close_if_possible, safe_json_loads, md5,
|
51
|
-
ShardRange,
|
51
|
+
ShardRange, find_namespace, cache_from_env, NamespaceBoundList,
|
52
|
+
CooperativeIterator)
|
52
53
|
from swift.common.bufferedhttp import http_connect
|
53
54
|
from swift.common.constraints import check_metadata, check_object_creation
|
54
55
|
from swift.common import constraints
|
@@ -67,8 +68,9 @@ from swift.common.memcached import MemcacheConnectionError
|
|
67
68
|
from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
|
68
69
|
ECDriverError, PolicyError)
|
69
70
|
from swift.proxy.controllers.base import Controller, delay_denial, \
|
70
|
-
cors_validation, update_headers, bytes_to_skip,
|
71
|
-
|
71
|
+
cors_validation, update_headers, bytes_to_skip, ByteCountEnforcer, \
|
72
|
+
record_cache_op_metrics, get_cache_key, GetterBase, GetterSource, \
|
73
|
+
is_good_source, NodeIter
|
72
74
|
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
|
73
75
|
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
|
74
76
|
HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \
|
@@ -76,8 +78,7 @@ from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
|
|
76
78
|
HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError, \
|
77
79
|
normalize_etag
|
78
80
|
from swift.common.request_helpers import update_etag_is_at_header, \
|
79
|
-
resolve_etag_is_at_header, validate_internal_obj, get_ip_port
|
80
|
-
http_response_to_document_iters
|
81
|
+
resolve_etag_is_at_header, validate_internal_obj, get_ip_port
|
81
82
|
|
82
83
|
|
83
84
|
def check_content_type(req):
|
@@ -200,8 +201,8 @@ class BaseObjectController(Controller):
|
|
200
201
|
policy_options = self.app.get_policy_options(policy)
|
201
202
|
is_local = policy_options.write_affinity_is_local_fn
|
202
203
|
if is_local is None:
|
203
|
-
return self.app
|
204
|
-
|
204
|
+
return NodeIter(self.app, ring, partition, self.logger, request,
|
205
|
+
policy=policy)
|
205
206
|
|
206
207
|
primary_nodes = ring.get_part_nodes(partition)
|
207
208
|
handoff_nodes = ring.get_more_nodes(partition)
|
@@ -234,8 +235,8 @@ class BaseObjectController(Controller):
|
|
234
235
|
(node for node in all_nodes if node not in preferred_nodes)
|
235
236
|
)
|
236
237
|
|
237
|
-
return self.app
|
238
|
-
|
238
|
+
return NodeIter(self.app, ring, partition, self.logger, request,
|
239
|
+
node_iter=node_iter, policy=policy)
|
239
240
|
|
240
241
|
def GETorHEAD(self, req):
|
241
242
|
"""Handle HTTP GET or HEAD requests."""
|
@@ -254,8 +255,8 @@ class BaseObjectController(Controller):
|
|
254
255
|
return aresp
|
255
256
|
partition = obj_ring.get_part(
|
256
257
|
self.account_name, self.container_name, self.object_name)
|
257
|
-
node_iter = self.app
|
258
|
-
|
258
|
+
node_iter = NodeIter(self.app, obj_ring, partition, self.logger, req,
|
259
|
+
policy=policy)
|
259
260
|
|
260
261
|
resp = self._get_or_head_response(req, node_iter, partition, policy)
|
261
262
|
|
@@ -278,37 +279,67 @@ class BaseObjectController(Controller):
|
|
278
279
|
"""Handler for HTTP HEAD requests."""
|
279
280
|
return self.GETorHEAD(req)
|
280
281
|
|
281
|
-
def
|
282
|
+
def _get_cached_updating_namespaces(
|
282
283
|
self, infocache, memcache, cache_key):
|
283
284
|
"""
|
284
|
-
Fetch cached shard ranges from
|
285
|
+
Fetch cached updating namespaces of updating shard ranges from
|
286
|
+
infocache and memcache.
|
285
287
|
|
286
288
|
:param infocache: the infocache instance.
|
287
289
|
:param memcache: an instance of a memcache client,
|
288
290
|
:class:`swift.common.memcached.MemcacheRing`.
|
289
291
|
:param cache_key: the cache key for both infocache and memcache.
|
290
|
-
:return: a tuple of (
|
291
|
-
"""
|
292
|
-
|
293
|
-
|
294
|
-
|
292
|
+
:return: a tuple of (an instance of NamespaceBoundList, cache state)
|
293
|
+
"""
|
294
|
+
# try get namespaces from infocache first
|
295
|
+
namespace_list = infocache.get(cache_key)
|
296
|
+
if namespace_list:
|
297
|
+
return namespace_list, 'infocache_hit'
|
298
|
+
|
299
|
+
# then try get them from memcache
|
300
|
+
if not memcache:
|
301
|
+
return None, 'disabled'
|
302
|
+
skip_chance = self.app.container_updating_shard_ranges_skip_cache
|
303
|
+
if skip_chance and random.random() < skip_chance:
|
304
|
+
return None, 'skip'
|
305
|
+
try:
|
306
|
+
namespaces = memcache.get(cache_key, raise_on_error=True)
|
307
|
+
cache_state = 'hit' if namespaces else 'miss'
|
308
|
+
except MemcacheConnectionError:
|
309
|
+
namespaces = None
|
310
|
+
cache_state = 'error'
|
311
|
+
|
312
|
+
if namespaces:
|
313
|
+
if six.PY2:
|
314
|
+
# json.loads() in memcache.get will convert json 'string' to
|
315
|
+
# 'unicode' with python2, here we cast 'unicode' back to 'str'
|
316
|
+
namespaces = [
|
317
|
+
[lower.encode('utf-8'), name.encode('utf-8')]
|
318
|
+
for lower, name in namespaces]
|
319
|
+
namespace_list = NamespaceBoundList(namespaces)
|
295
320
|
else:
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
321
|
+
namespace_list = None
|
322
|
+
return namespace_list, cache_state
|
323
|
+
|
324
|
+
def _get_update_shard_caching_disabled(self, req, account, container, obj):
|
325
|
+
"""
|
326
|
+
Fetch all updating shard ranges for the given root container when
|
327
|
+
all caching is disabled.
|
328
|
+
|
329
|
+
:param req: original Request instance.
|
330
|
+
:param account: account from which shard ranges should be fetched.
|
331
|
+
:param container: container from which shard ranges should be fetched.
|
332
|
+
:param obj: object getting updated.
|
333
|
+
:return: an instance of :class:`swift.common.utils.ShardRange`,
|
334
|
+
or None if the update should go back to the root
|
335
|
+
"""
|
336
|
+
# legacy behavior requests container server for includes=obj
|
337
|
+
shard_ranges, response = self._get_shard_ranges(
|
338
|
+
req, account, container, states='updating', includes=obj)
|
339
|
+
record_cache_op_metrics(
|
340
|
+
self.logger, 'shard_updating', 'disabled', response)
|
341
|
+
# there will be only one shard range in the list if any
|
342
|
+
return shard_ranges[0] if shard_ranges else None
|
312
343
|
|
313
344
|
def _get_update_shard(self, req, account, container, obj):
|
314
345
|
"""
|
@@ -327,39 +358,44 @@ class BaseObjectController(Controller):
|
|
327
358
|
"""
|
328
359
|
if not self.app.recheck_updating_shard_ranges:
|
329
360
|
# caching is disabled
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
361
|
+
return self._get_update_shard_caching_disabled(
|
362
|
+
req, account, container, obj)
|
363
|
+
|
364
|
+
# caching is enabled, try to get from caches
|
365
|
+
response = None
|
366
|
+
cache_key = get_cache_key(account, container, shard='updating')
|
367
|
+
infocache = req.environ.setdefault('swift.infocache', {})
|
368
|
+
memcache = cache_from_env(req.environ, True)
|
369
|
+
cached_namespaces, cache_state = self._get_cached_updating_namespaces(
|
370
|
+
infocache, memcache, cache_key)
|
371
|
+
if cached_namespaces:
|
372
|
+
# found cached namespaces in either infocache or memcache
|
373
|
+
infocache[cache_key] = cached_namespaces
|
374
|
+
namespace = cached_namespaces.get_namespace(obj)
|
375
|
+
update_shard = ShardRange(
|
376
|
+
name=namespace.name, timestamp=0, lower=namespace.lower,
|
377
|
+
upper=namespace.upper)
|
334
378
|
else:
|
335
|
-
#
|
336
|
-
response =
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
if shard_ranges:
|
353
|
-
cached_ranges = [dict(sr) for sr in shard_ranges]
|
354
|
-
infocache[cache_key] = tuple(cached_ranges)
|
355
|
-
if memcache:
|
356
|
-
memcache.set(
|
357
|
-
cache_key, cached_ranges,
|
358
|
-
time=self.app.recheck_updating_shard_ranges)
|
359
|
-
|
379
|
+
# pull full set of updating shard ranges from backend
|
380
|
+
shard_ranges, response = self._get_shard_ranges(
|
381
|
+
req, account, container, states='updating')
|
382
|
+
if shard_ranges:
|
383
|
+
# only store the list of namespace lower bounds and names into
|
384
|
+
# infocache and memcache.
|
385
|
+
cached_namespaces = NamespaceBoundList.parse(
|
386
|
+
shard_ranges)
|
387
|
+
infocache[cache_key] = cached_namespaces
|
388
|
+
if memcache:
|
389
|
+
self.logger.info(
|
390
|
+
'Caching updating shards for %s (%d shards)',
|
391
|
+
cache_key, len(cached_namespaces.bounds))
|
392
|
+
memcache.set(
|
393
|
+
cache_key, cached_namespaces.bounds,
|
394
|
+
time=self.app.recheck_updating_shard_ranges)
|
395
|
+
update_shard = find_namespace(obj, shard_ranges or [])
|
360
396
|
record_cache_op_metrics(
|
361
397
|
self.logger, 'shard_updating', cache_state, response)
|
362
|
-
return
|
398
|
+
return update_shard
|
363
399
|
|
364
400
|
def _get_update_target(self, req, container_info):
|
365
401
|
# find the sharded container to which we'll send the update
|
@@ -975,6 +1011,7 @@ class ReplicatedObjectController(BaseObjectController):
|
|
975
1011
|
This method was added in the PUT method extraction change
|
976
1012
|
"""
|
977
1013
|
bytes_transferred = 0
|
1014
|
+
data_source = CooperativeIterator(data_source)
|
978
1015
|
|
979
1016
|
def send_chunk(chunk):
|
980
1017
|
timeout_at = time.time() + self.app.node_timeout
|
@@ -1132,14 +1169,15 @@ class ECAppIter(object):
|
|
1132
1169
|
self.mime_boundary = None
|
1133
1170
|
self.learned_content_type = None
|
1134
1171
|
self.stashed_iter = None
|
1172
|
+
self.pool = ContextPool(len(internal_parts_iters))
|
1135
1173
|
|
1136
1174
|
def close(self):
|
1137
|
-
# close down the stashed iter
|
1138
|
-
#
|
1175
|
+
# close down the stashed iter and shutdown the context pool to
|
1176
|
+
# clean up the frag queue feeding coroutines that may be currently
|
1139
1177
|
# executing the internal_parts_iters.
|
1140
1178
|
if self.stashed_iter:
|
1141
1179
|
close_if_possible(self.stashed_iter)
|
1142
|
-
|
1180
|
+
self.pool.close()
|
1143
1181
|
for it in self.internal_parts_iters:
|
1144
1182
|
close_if_possible(it)
|
1145
1183
|
|
@@ -1486,7 +1524,7 @@ class ECAppIter(object):
|
|
1486
1524
|
except ChunkWriteTimeout:
|
1487
1525
|
# slow client disconnect
|
1488
1526
|
self.logger.exception(
|
1489
|
-
"ChunkWriteTimeout
|
1527
|
+
"ChunkWriteTimeout feeding fragments for %r",
|
1490
1528
|
quote(self.path))
|
1491
1529
|
except: # noqa
|
1492
1530
|
self.logger.exception("Exception fetching fragments for %r",
|
@@ -1497,7 +1535,7 @@ class ECAppIter(object):
|
|
1497
1535
|
frag_iter.close()
|
1498
1536
|
|
1499
1537
|
segments_decoded = 0
|
1500
|
-
with
|
1538
|
+
with self.pool as pool:
|
1501
1539
|
for frag_iter, queue in zip(fragment_iters, queues):
|
1502
1540
|
pool.spawn(put_fragments_in_queue, frag_iter, queue,
|
1503
1541
|
self.logger.thread_locals)
|
@@ -2195,8 +2233,8 @@ class ECGetResponseBucket(object):
|
|
2195
2233
|
Close bucket's responses; they won't be used for a client response.
|
2196
2234
|
"""
|
2197
2235
|
for getter, frag_iter in self.get_responses():
|
2198
|
-
if
|
2199
|
-
|
2236
|
+
if getter.source:
|
2237
|
+
getter.source.close()
|
2200
2238
|
|
2201
2239
|
def __str__(self):
|
2202
2240
|
# return a string summarising bucket state, useful for debugging.
|
@@ -2286,7 +2324,8 @@ class ECGetResponseCollection(object):
|
|
2286
2324
|
frag_sets = safe_json_loads(headers.get('X-Backend-Fragments')) or {}
|
2287
2325
|
for t_frag, frag_set in frag_sets.items():
|
2288
2326
|
t_frag = Timestamp(t_frag)
|
2289
|
-
self._get_bucket(t_frag).add_alternate_nodes(
|
2327
|
+
self._get_bucket(t_frag).add_alternate_nodes(
|
2328
|
+
get.source.node, frag_set)
|
2290
2329
|
# If the response includes a durable timestamp then mark that bucket as
|
2291
2330
|
# durable. Note that this may be a different bucket than the one this
|
2292
2331
|
# response got added to, and that we may never go and get a durable
|
@@ -2439,326 +2478,125 @@ class ECGetResponseCollection(object):
|
|
2439
2478
|
return nodes.pop(0).copy()
|
2440
2479
|
|
2441
2480
|
|
2442
|
-
|
2443
|
-
"""
|
2444
|
-
Indicates whether or not the request made to the backend found
|
2445
|
-
what it was looking for.
|
2446
|
-
|
2447
|
-
:param status: the response from the backend
|
2448
|
-
:returns: True if found, False if not
|
2449
|
-
"""
|
2450
|
-
if status == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE:
|
2451
|
-
return True
|
2452
|
-
return is_success(status) or is_redirection(status)
|
2453
|
-
|
2454
|
-
|
2455
|
-
class ECFragGetter(object):
|
2481
|
+
class ECFragGetter(GetterBase):
|
2456
2482
|
|
2457
2483
|
def __init__(self, app, req, node_iter, partition, policy, path,
|
2458
2484
|
backend_headers, header_provider, logger_thread_locals,
|
2459
2485
|
logger):
|
2460
|
-
self.
|
2461
|
-
|
2462
|
-
|
2463
|
-
|
2464
|
-
self.path = path
|
2465
|
-
self.backend_headers = backend_headers
|
2486
|
+
super(ECFragGetter, self).__init__(
|
2487
|
+
app=app, req=req, node_iter=node_iter,
|
2488
|
+
partition=partition, policy=policy, path=path,
|
2489
|
+
backend_headers=backend_headers, logger=logger)
|
2466
2490
|
self.header_provider = header_provider
|
2467
|
-
self.
|
2468
|
-
self.client_chunk_size = policy.fragment_size
|
2491
|
+
self.fragment_size = policy.fragment_size
|
2469
2492
|
self.skip_bytes = 0
|
2470
|
-
self.bytes_used_from_backend = 0
|
2471
|
-
self.source = None
|
2472
2493
|
self.logger_thread_locals = logger_thread_locals
|
2473
|
-
self.
|
2474
|
-
|
2475
|
-
def fast_forward(self, num_bytes):
|
2476
|
-
"""
|
2477
|
-
Will skip num_bytes into the current ranges.
|
2478
|
-
|
2479
|
-
:params num_bytes: the number of bytes that have already been read on
|
2480
|
-
this request. This will change the Range header
|
2481
|
-
so that the next req will start where it left off.
|
2482
|
-
|
2483
|
-
:raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes
|
2484
|
-
> end of range + 1
|
2485
|
-
:raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1
|
2486
|
-
"""
|
2487
|
-
try:
|
2488
|
-
req_range = Range(self.backend_headers.get('Range'))
|
2489
|
-
except ValueError:
|
2490
|
-
req_range = None
|
2491
|
-
|
2492
|
-
if req_range:
|
2493
|
-
begin, end = req_range.ranges[0]
|
2494
|
-
if begin is None:
|
2495
|
-
# this is a -50 range req (last 50 bytes of file)
|
2496
|
-
end -= num_bytes
|
2497
|
-
if end == 0:
|
2498
|
-
# we sent out exactly the first range's worth of bytes, so
|
2499
|
-
# we're done with it
|
2500
|
-
raise RangeAlreadyComplete()
|
2501
|
-
|
2502
|
-
if end < 0:
|
2503
|
-
raise HTTPRequestedRangeNotSatisfiable()
|
2504
|
-
|
2505
|
-
else:
|
2506
|
-
begin += num_bytes
|
2507
|
-
if end is not None and begin == end + 1:
|
2508
|
-
# we sent out exactly the first range's worth of bytes, so
|
2509
|
-
# we're done with it
|
2510
|
-
raise RangeAlreadyComplete()
|
2511
|
-
|
2512
|
-
if end is not None and begin > end:
|
2513
|
-
raise HTTPRequestedRangeNotSatisfiable()
|
2514
|
-
|
2515
|
-
req_range.ranges = [(begin, end)] + req_range.ranges[1:]
|
2516
|
-
self.backend_headers['Range'] = str(req_range)
|
2517
|
-
else:
|
2518
|
-
self.backend_headers['Range'] = 'bytes=%d-' % num_bytes
|
2519
|
-
|
2520
|
-
# Reset so if we need to do this more than once, we don't double-up
|
2521
|
-
self.bytes_used_from_backend = 0
|
2522
|
-
|
2523
|
-
def pop_range(self):
|
2524
|
-
"""
|
2525
|
-
Remove the first byterange from our Range header.
|
2494
|
+
self.status = self.reason = self.body = self.source_headers = None
|
2495
|
+
self._source_iter = None
|
2526
2496
|
|
2527
|
-
|
2528
|
-
|
2529
|
-
object server, we do not re-fetch byteranges that the client already
|
2530
|
-
has.
|
2497
|
+
def _get_next_response_part(self):
|
2498
|
+
node_timeout = self.app.recoverable_node_timeout
|
2531
2499
|
|
2532
|
-
|
2533
|
-
|
2534
|
-
|
2500
|
+
while True:
|
2501
|
+
# the loop here is to resume if trying to parse
|
2502
|
+
# multipart/byteranges response raises a ChunkReadTimeout
|
2503
|
+
# and resets the source_parts_iter
|
2535
2504
|
try:
|
2536
|
-
|
2537
|
-
|
2538
|
-
|
2539
|
-
|
2540
|
-
|
2541
|
-
|
2542
|
-
|
2543
|
-
|
2544
|
-
|
2545
|
-
|
2546
|
-
|
2547
|
-
|
2548
|
-
|
2549
|
-
|
2550
|
-
|
2551
|
-
|
2552
|
-
|
2553
|
-
the Content-Range header in the response; if we were given a
|
2554
|
-
fully-specified range (e.g. "bytes=123-456"), this is a no-op.
|
2555
|
-
|
2556
|
-
If we were given a half-specified range (e.g. "bytes=123-" or
|
2557
|
-
"bytes=-456"), then this changes the Range header to a
|
2558
|
-
semantically-equivalent one *and* it lets us resume on a proper
|
2559
|
-
boundary instead of just in the middle of a piece somewhere.
|
2560
|
-
"""
|
2561
|
-
if length == 0:
|
2562
|
-
return
|
2563
|
-
|
2564
|
-
if self.client_chunk_size:
|
2565
|
-
self.skip_bytes = bytes_to_skip(self.client_chunk_size, start)
|
2505
|
+
with WatchdogTimeout(self.app.watchdog, node_timeout,
|
2506
|
+
ChunkReadTimeout):
|
2507
|
+
# If we don't have a multipart/byteranges response,
|
2508
|
+
# but just a 200 or a single-range 206, then this
|
2509
|
+
# performs no IO, and just returns source (or
|
2510
|
+
# raises StopIteration).
|
2511
|
+
# Otherwise, this call to next() performs IO when
|
2512
|
+
# we have a multipart/byteranges response; as it
|
2513
|
+
# will read the MIME boundary and part headers.
|
2514
|
+
start_byte, end_byte, length, headers, part = next(
|
2515
|
+
self.source.parts_iter)
|
2516
|
+
return (start_byte, end_byte, length, headers, part)
|
2517
|
+
except ChunkReadTimeout:
|
2518
|
+
if not self._replace_source(
|
2519
|
+
'Trying to read next part of EC multi-part GET '
|
2520
|
+
'(retrying)'):
|
2521
|
+
raise
|
2566
2522
|
|
2567
|
-
|
2523
|
+
def _iter_bytes_from_response_part(self, part_file, nbytes):
|
2524
|
+
buf = b''
|
2525
|
+
part_file = ByteCountEnforcer(part_file, nbytes)
|
2526
|
+
while True:
|
2568
2527
|
try:
|
2569
|
-
|
2570
|
-
|
2571
|
-
|
2572
|
-
|
2573
|
-
|
2574
|
-
|
2575
|
-
|
2576
|
-
|
2577
|
-
|
2578
|
-
|
2579
|
-
|
2580
|
-
|
2581
|
-
|
2582
|
-
|
2583
|
-
|
2584
|
-
|
2585
|
-
|
2586
|
-
|
2587
|
-
if self.source:
|
2588
|
-
it = self._get_response_parts_iter(req)
|
2589
|
-
return it
|
2590
|
-
|
2591
|
-
def _get_response_parts_iter(self, req):
|
2592
|
-
try:
|
2593
|
-
client_chunk_size = self.client_chunk_size
|
2594
|
-
node_timeout = self.app.recoverable_node_timeout
|
2595
|
-
|
2596
|
-
# This is safe; it sets up a generator but does not call next()
|
2597
|
-
# on it, so no IO is performed.
|
2598
|
-
parts_iter = [
|
2599
|
-
http_response_to_document_iters(
|
2600
|
-
self.source, read_chunk_size=self.app.object_chunk_size)]
|
2601
|
-
|
2602
|
-
def get_next_doc_part():
|
2603
|
-
while True:
|
2604
|
-
# the loop here is to resume if trying to parse
|
2605
|
-
# multipart/byteranges response raises a ChunkReadTimeout
|
2606
|
-
# and resets the parts_iter
|
2607
|
-
try:
|
2608
|
-
with WatchdogTimeout(self.app.watchdog, node_timeout,
|
2609
|
-
ChunkReadTimeout):
|
2610
|
-
# If we don't have a multipart/byteranges response,
|
2611
|
-
# but just a 200 or a single-range 206, then this
|
2612
|
-
# performs no IO, and just returns source (or
|
2613
|
-
# raises StopIteration).
|
2614
|
-
# Otherwise, this call to next() performs IO when
|
2615
|
-
# we have a multipart/byteranges response; as it
|
2616
|
-
# will read the MIME boundary and part headers.
|
2617
|
-
start_byte, end_byte, length, headers, part = next(
|
2618
|
-
parts_iter[0])
|
2619
|
-
return (start_byte, end_byte, length, headers, part)
|
2620
|
-
except ChunkReadTimeout:
|
2621
|
-
new_source, new_node = self._dig_for_source_and_node()
|
2622
|
-
if not new_source:
|
2623
|
-
raise
|
2624
|
-
self.app.error_occurred(
|
2625
|
-
self.node, 'Trying to read next part of '
|
2626
|
-
'EC multi-part GET (retrying)')
|
2627
|
-
# Close-out the connection as best as possible.
|
2628
|
-
if getattr(self.source, 'swift_conn', None):
|
2629
|
-
close_swift_conn(self.source)
|
2630
|
-
self.source = new_source
|
2631
|
-
self.node = new_node
|
2632
|
-
# This is safe; it sets up a generator but does
|
2633
|
-
# not call next() on it, so no IO is performed.
|
2634
|
-
parts_iter[0] = http_response_to_document_iters(
|
2635
|
-
new_source,
|
2636
|
-
read_chunk_size=self.app.object_chunk_size)
|
2637
|
-
|
2638
|
-
def iter_bytes_from_response_part(part_file, nbytes):
|
2639
|
-
nchunks = 0
|
2528
|
+
with WatchdogTimeout(self.app.watchdog,
|
2529
|
+
self.app.recoverable_node_timeout,
|
2530
|
+
ChunkReadTimeout):
|
2531
|
+
chunk = part_file.read(self.app.object_chunk_size)
|
2532
|
+
# NB: this append must be *inside* the context
|
2533
|
+
# manager for test.unit.SlowBody to do its thing
|
2534
|
+
buf += chunk
|
2535
|
+
if nbytes is not None:
|
2536
|
+
nbytes -= len(chunk)
|
2537
|
+
except (ChunkReadTimeout, ShortReadError):
|
2538
|
+
exc_type, exc_value, exc_traceback = sys.exc_info()
|
2539
|
+
try:
|
2540
|
+
self.fast_forward(self.bytes_used_from_backend)
|
2541
|
+
except (HTTPException, ValueError):
|
2542
|
+
self.logger.exception('Unable to fast forward')
|
2543
|
+
six.reraise(exc_type, exc_value, exc_traceback)
|
2544
|
+
except RangeAlreadyComplete:
|
2545
|
+
break
|
2640
2546
|
buf = b''
|
2641
|
-
|
2642
|
-
|
2547
|
+
if self._replace_source(
|
2548
|
+
'Trying to read EC fragment during GET (retrying)'):
|
2643
2549
|
try:
|
2644
|
-
|
2645
|
-
|
2646
|
-
|
2647
|
-
|
2648
|
-
|
2649
|
-
|
2650
|
-
|
2651
|
-
|
2652
|
-
|
2653
|
-
|
2654
|
-
|
2655
|
-
|
2656
|
-
|
2657
|
-
|
2658
|
-
|
2659
|
-
|
2660
|
-
except RangeAlreadyComplete:
|
2661
|
-
break
|
2662
|
-
buf = b''
|
2663
|
-
old_node = self.node
|
2664
|
-
new_source, new_node = self._dig_for_source_and_node()
|
2665
|
-
if new_source:
|
2666
|
-
self.app.error_occurred(
|
2667
|
-
old_node, 'Trying to read EC fragment '
|
2668
|
-
'during GET (retrying)')
|
2669
|
-
# Close-out the connection as best as possible.
|
2670
|
-
if getattr(self.source, 'swift_conn', None):
|
2671
|
-
close_swift_conn(self.source)
|
2672
|
-
self.source = new_source
|
2673
|
-
self.node = new_node
|
2674
|
-
# This is safe; it just sets up a generator but
|
2675
|
-
# does not call next() on it, so no IO is
|
2676
|
-
# performed.
|
2677
|
-
parts_iter[0] = http_response_to_document_iters(
|
2678
|
-
new_source,
|
2679
|
-
read_chunk_size=self.app.object_chunk_size)
|
2680
|
-
try:
|
2681
|
-
_junk, _junk, _junk, _junk, part_file = \
|
2682
|
-
get_next_doc_part()
|
2683
|
-
except StopIteration:
|
2684
|
-
# it's not clear to me how to make
|
2685
|
-
# get_next_doc_part raise StopIteration for the
|
2686
|
-
# first doc part of a new request
|
2687
|
-
six.reraise(exc_type, exc_value, exc_traceback)
|
2688
|
-
part_file = ByteCountEnforcer(part_file, nbytes)
|
2689
|
-
else:
|
2690
|
-
six.reraise(exc_type, exc_value, exc_traceback)
|
2550
|
+
_junk, _junk, _junk, _junk, part_file = \
|
2551
|
+
self._get_next_response_part()
|
2552
|
+
except StopIteration:
|
2553
|
+
# it's not clear to me how to make
|
2554
|
+
# _get_next_response_part raise StopIteration for the
|
2555
|
+
# first doc part of a new request
|
2556
|
+
six.reraise(exc_type, exc_value, exc_traceback)
|
2557
|
+
part_file = ByteCountEnforcer(part_file, nbytes)
|
2558
|
+
else:
|
2559
|
+
six.reraise(exc_type, exc_value, exc_traceback)
|
2560
|
+
else:
|
2561
|
+
if buf and self.skip_bytes:
|
2562
|
+
if self.skip_bytes < len(buf):
|
2563
|
+
buf = buf[self.skip_bytes:]
|
2564
|
+
self.bytes_used_from_backend += self.skip_bytes
|
2565
|
+
self.skip_bytes = 0
|
2691
2566
|
else:
|
2692
|
-
|
2693
|
-
|
2694
|
-
|
2695
|
-
self.bytes_used_from_backend += self.skip_bytes
|
2696
|
-
self.skip_bytes = 0
|
2697
|
-
else:
|
2698
|
-
self.skip_bytes -= len(buf)
|
2699
|
-
self.bytes_used_from_backend += len(buf)
|
2700
|
-
buf = b''
|
2701
|
-
|
2702
|
-
if not chunk:
|
2703
|
-
if buf:
|
2704
|
-
with WatchdogTimeout(self.app.watchdog,
|
2705
|
-
self.app.client_timeout,
|
2706
|
-
ChunkWriteTimeout):
|
2707
|
-
self.bytes_used_from_backend += len(buf)
|
2708
|
-
yield buf
|
2709
|
-
buf = b''
|
2710
|
-
break
|
2711
|
-
|
2712
|
-
if client_chunk_size is not None:
|
2713
|
-
while len(buf) >= client_chunk_size:
|
2714
|
-
client_chunk = buf[:client_chunk_size]
|
2715
|
-
buf = buf[client_chunk_size:]
|
2716
|
-
with WatchdogTimeout(self.app.watchdog,
|
2717
|
-
self.app.client_timeout,
|
2718
|
-
ChunkWriteTimeout):
|
2719
|
-
self.bytes_used_from_backend += \
|
2720
|
-
len(client_chunk)
|
2721
|
-
yield client_chunk
|
2722
|
-
else:
|
2723
|
-
with WatchdogTimeout(self.app.watchdog,
|
2724
|
-
self.app.client_timeout,
|
2725
|
-
ChunkWriteTimeout):
|
2726
|
-
self.bytes_used_from_backend += len(buf)
|
2727
|
-
yield buf
|
2728
|
-
buf = b''
|
2729
|
-
|
2730
|
-
# This is for fairness; if the network is outpacing
|
2731
|
-
# the CPU, we'll always be able to read and write
|
2732
|
-
# data without encountering an EWOULDBLOCK, and so
|
2733
|
-
# eventlet will not switch greenthreads on its own.
|
2734
|
-
# We do it manually so that clients don't starve.
|
2735
|
-
#
|
2736
|
-
# The number 5 here was chosen by making stuff up.
|
2737
|
-
# It's not every single chunk, but it's not too big
|
2738
|
-
# either, so it seemed like it would probably be an
|
2739
|
-
# okay choice.
|
2740
|
-
#
|
2741
|
-
# Note that we may trampoline to other greenthreads
|
2742
|
-
# more often than once every 5 chunks, depending on
|
2743
|
-
# how blocking our network IO is; the explicit sleep
|
2744
|
-
# here simply provides a lower bound on the rate of
|
2745
|
-
# trampolining.
|
2746
|
-
if nchunks % 5 == 0:
|
2747
|
-
sleep()
|
2567
|
+
self.skip_bytes -= len(buf)
|
2568
|
+
self.bytes_used_from_backend += len(buf)
|
2569
|
+
buf = b''
|
2748
2570
|
|
2571
|
+
while buf and (len(buf) >= self.fragment_size or not chunk):
|
2572
|
+
client_chunk = buf[:self.fragment_size]
|
2573
|
+
buf = buf[self.fragment_size:]
|
2574
|
+
with WatchdogTimeout(self.app.watchdog,
|
2575
|
+
self.app.client_timeout,
|
2576
|
+
ChunkWriteTimeout):
|
2577
|
+
self.bytes_used_from_backend += len(client_chunk)
|
2578
|
+
yield client_chunk
|
2579
|
+
|
2580
|
+
if not chunk:
|
2581
|
+
break
|
2582
|
+
|
2583
|
+
def _iter_parts_from_response(self, req):
|
2584
|
+
try:
|
2749
2585
|
part_iter = None
|
2750
2586
|
try:
|
2751
2587
|
while True:
|
2752
2588
|
try:
|
2753
2589
|
start_byte, end_byte, length, headers, part = \
|
2754
|
-
|
2590
|
+
self._get_next_response_part()
|
2755
2591
|
except StopIteration:
|
2756
2592
|
# it seems this is the only way out of the loop; not
|
2757
2593
|
# sure why the req.environ update is always needed
|
2758
2594
|
req.environ['swift.non_client_disconnect'] = True
|
2759
2595
|
break
|
2760
|
-
#
|
2761
|
-
#
|
2596
|
+
# skip_bytes compensates for the backend request range
|
2597
|
+
# expansion done in _convert_range
|
2598
|
+
self.skip_bytes = bytes_to_skip(
|
2599
|
+
self.fragment_size, start_byte)
|
2762
2600
|
self.learn_size_from_content_range(
|
2763
2601
|
start_byte, end_byte, length)
|
2764
2602
|
self.bytes_used_from_backend = 0
|
@@ -2768,7 +2606,8 @@ class ECFragGetter(object):
|
|
2768
2606
|
if (end_byte is not None
|
2769
2607
|
and start_byte is not None)
|
2770
2608
|
else None)
|
2771
|
-
part_iter =
|
2609
|
+
part_iter = CooperativeIterator(
|
2610
|
+
self._iter_bytes_from_response_part(part, byte_count))
|
2772
2611
|
yield {'start_byte': start_byte, 'end_byte': end_byte,
|
2773
2612
|
'entity_length': length, 'headers': headers,
|
2774
2613
|
'part_iter': part_iter}
|
@@ -2778,7 +2617,7 @@ class ECFragGetter(object):
|
|
2778
2617
|
part_iter.close()
|
2779
2618
|
|
2780
2619
|
except ChunkReadTimeout:
|
2781
|
-
self.app.exception_occurred(self.node, 'Object',
|
2620
|
+
self.app.exception_occurred(self.source.node, 'Object',
|
2782
2621
|
'Trying to read during GET')
|
2783
2622
|
raise
|
2784
2623
|
except ChunkWriteTimeout:
|
@@ -2804,9 +2643,7 @@ class ECFragGetter(object):
|
|
2804
2643
|
self.logger.exception('Trying to send to client')
|
2805
2644
|
raise
|
2806
2645
|
finally:
|
2807
|
-
|
2808
|
-
if getattr(self.source, 'swift_conn', None):
|
2809
|
-
close_swift_conn(self.source)
|
2646
|
+
self.source.close()
|
2810
2647
|
|
2811
2648
|
@property
|
2812
2649
|
def last_status(self):
|
@@ -2820,6 +2657,8 @@ class ECFragGetter(object):
|
|
2820
2657
|
return HeaderKeyDict()
|
2821
2658
|
|
2822
2659
|
def _make_node_request(self, node, node_timeout):
|
2660
|
+
# make a backend request; return a response if it has an acceptable
|
2661
|
+
# status code, otherwise None
|
2823
2662
|
self.logger.thread_locals = self.logger_thread_locals
|
2824
2663
|
req_headers = dict(self.backend_headers)
|
2825
2664
|
ip, port = get_ip_port(node, req_headers)
|
@@ -2831,7 +2670,7 @@ class ECFragGetter(object):
|
|
2831
2670
|
ip, port, node['device'],
|
2832
2671
|
self.partition, 'GET', self.path,
|
2833
2672
|
headers=req_headers,
|
2834
|
-
query_string=self.
|
2673
|
+
query_string=self.req.query_string)
|
2835
2674
|
self.app.set_node_timing(node, time.time() - start_node_timing)
|
2836
2675
|
|
2837
2676
|
with Timeout(node_timeout):
|
@@ -2863,7 +2702,7 @@ class ECFragGetter(object):
|
|
2863
2702
|
self.status = possible_source.status
|
2864
2703
|
self.reason = possible_source.reason
|
2865
2704
|
self.source_headers = possible_source.getheaders()
|
2866
|
-
if is_good_source(possible_source.status):
|
2705
|
+
if is_good_source(possible_source.status, server_type='Object'):
|
2867
2706
|
self.body = None
|
2868
2707
|
return possible_source
|
2869
2708
|
else:
|
@@ -2878,38 +2717,62 @@ class ECFragGetter(object):
|
|
2878
2717
|
return None
|
2879
2718
|
|
2880
2719
|
@property
|
2881
|
-
def
|
2882
|
-
|
2883
|
-
|
2884
|
-
|
2720
|
+
def source_iter(self):
|
2721
|
+
"""
|
2722
|
+
An iterator over responses to backend fragment GETs. Yields an
|
2723
|
+
instance of ``GetterSource`` if a response is good, otherwise ``None``.
|
2724
|
+
"""
|
2725
|
+
if self._source_iter is None:
|
2726
|
+
self._source_iter = self._source_gen()
|
2727
|
+
return self._source_iter
|
2885
2728
|
|
2886
|
-
def
|
2729
|
+
def _source_gen(self):
|
2887
2730
|
self.status = self.reason = self.body = self.source_headers = None
|
2888
2731
|
for node in self.node_iter:
|
2889
2732
|
source = self._make_node_request(
|
2890
2733
|
node, self.app.recoverable_node_timeout)
|
2891
2734
|
|
2892
2735
|
if source:
|
2893
|
-
self.
|
2894
|
-
yield source, node
|
2736
|
+
yield GetterSource(self.app, source, node)
|
2895
2737
|
else:
|
2896
|
-
yield None
|
2738
|
+
yield None
|
2897
2739
|
self.status = self.reason = self.body = self.source_headers = None
|
2898
2740
|
|
2899
|
-
def
|
2741
|
+
def _find_source(self):
|
2900
2742
|
# capture last used etag before continuation
|
2901
2743
|
used_etag = self.last_headers.get('X-Object-Sysmeta-EC-ETag')
|
2902
|
-
for source
|
2744
|
+
for source in self.source_iter:
|
2903
2745
|
if not source:
|
2904
2746
|
# _make_node_request only returns good sources
|
2905
2747
|
continue
|
2906
|
-
if source.getheader('X-Object-Sysmeta-EC-ETag') != used_etag:
|
2748
|
+
if source.resp.getheader('X-Object-Sysmeta-EC-ETag') != used_etag:
|
2907
2749
|
self.logger.warning(
|
2908
2750
|
'Skipping source (etag mismatch: got %s, expected %s)',
|
2909
|
-
source.getheader('X-Object-Sysmeta-EC-ETag'),
|
2751
|
+
source.resp.getheader('X-Object-Sysmeta-EC-ETag'),
|
2752
|
+
used_etag)
|
2910
2753
|
else:
|
2911
|
-
|
2912
|
-
|
2754
|
+
self.source = source
|
2755
|
+
return True
|
2756
|
+
return False
|
2757
|
+
|
2758
|
+
def response_parts_iter(self, req):
|
2759
|
+
"""
|
2760
|
+
Create an iterator over a single fragment response body.
|
2761
|
+
|
2762
|
+
:param req: a ``swob.Request``.
|
2763
|
+
:return: an interator that yields chunks of bytes from a fragment
|
2764
|
+
response body.
|
2765
|
+
"""
|
2766
|
+
it = None
|
2767
|
+
try:
|
2768
|
+
source = next(self.source_iter)
|
2769
|
+
except StopIteration:
|
2770
|
+
pass
|
2771
|
+
else:
|
2772
|
+
if source:
|
2773
|
+
self.source = source
|
2774
|
+
it = self._iter_parts_from_response(req)
|
2775
|
+
return it
|
2913
2776
|
|
2914
2777
|
|
2915
2778
|
@ObjectControllerRouter.register(EC_POLICY)
|
@@ -3070,9 +2933,9 @@ class ECObjectController(BaseObjectController):
|
|
3070
2933
|
break
|
3071
2934
|
requests_available = extra_requests < max_extra_requests and (
|
3072
2935
|
node_iter.nodes_left > 0 or buckets.has_alternate_node())
|
3073
|
-
bad_resp = not is_good_source(get.last_status)
|
3074
2936
|
if requests_available and (
|
3075
|
-
buckets.shortfall > pile._pending or
|
2937
|
+
buckets.shortfall > pile._pending or
|
2938
|
+
not is_good_source(get.last_status, self.server_type)):
|
3076
2939
|
extra_requests += 1
|
3077
2940
|
pile.spawn(self._fragment_GET_request, req, safe_iter,
|
3078
2941
|
partition, policy, buckets.get_extra_headers,
|
@@ -3343,6 +3206,7 @@ class ECObjectController(BaseObjectController):
|
|
3343
3206
|
# same part nodes index as the primaries they are covering
|
3344
3207
|
putter_to_frag_index = self._determine_chunk_destinations(
|
3345
3208
|
putters, policy)
|
3209
|
+
data_source = CooperativeIterator(data_source)
|
3346
3210
|
|
3347
3211
|
while True:
|
3348
3212
|
with WatchdogTimeout(self.app.watchdog,
|