swift 2.32.1__py2.py3-none-any.whl → 2.33.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. swift/account/server.py +1 -11
  2. swift/cli/info.py +28 -1
  3. swift-2.32.1.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +4 -13
  4. swift/cli/reload.py +141 -0
  5. swift/common/daemon.py +12 -2
  6. swift/common/db.py +12 -8
  7. swift/common/http_protocol.py +76 -3
  8. swift/common/manager.py +18 -5
  9. swift/common/memcached.py +18 -12
  10. swift/common/middleware/proxy_logging.py +35 -27
  11. swift/common/middleware/s3api/acl_handlers.py +1 -1
  12. swift/common/middleware/s3api/controllers/__init__.py +3 -0
  13. swift/common/middleware/s3api/controllers/acl.py +3 -2
  14. swift/common/middleware/s3api/controllers/logging.py +2 -2
  15. swift/common/middleware/s3api/controllers/multi_upload.py +30 -6
  16. swift/common/middleware/s3api/controllers/object_lock.py +44 -0
  17. swift/common/middleware/s3api/s3api.py +4 -0
  18. swift/common/middleware/s3api/s3request.py +19 -12
  19. swift/common/middleware/s3api/s3response.py +13 -2
  20. swift/common/middleware/s3api/utils.py +1 -1
  21. swift/common/middleware/slo.py +395 -298
  22. swift/common/middleware/staticweb.py +45 -14
  23. swift/common/middleware/tempurl.py +132 -91
  24. swift/common/request_helpers.py +32 -8
  25. swift/common/storage_policy.py +1 -1
  26. swift/common/swob.py +5 -2
  27. swift/common/utils/__init__.py +230 -135
  28. swift/common/utils/timestamp.py +23 -2
  29. swift/common/wsgi.py +8 -0
  30. swift/container/backend.py +126 -21
  31. swift/container/replicator.py +42 -6
  32. swift/container/server.py +264 -145
  33. swift/container/sharder.py +50 -30
  34. swift/container/updater.py +1 -0
  35. swift/obj/auditor.py +2 -1
  36. swift/obj/diskfile.py +55 -19
  37. swift/obj/expirer.py +1 -13
  38. swift/obj/mem_diskfile.py +2 -1
  39. swift/obj/mem_server.py +1 -0
  40. swift/obj/replicator.py +2 -2
  41. swift/obj/server.py +12 -23
  42. swift/obj/updater.py +1 -0
  43. swift/obj/watchers/dark_data.py +72 -34
  44. swift/proxy/controllers/account.py +3 -2
  45. swift/proxy/controllers/base.py +217 -127
  46. swift/proxy/controllers/container.py +274 -289
  47. swift/proxy/controllers/obj.py +98 -141
  48. swift/proxy/server.py +2 -12
  49. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-info +3 -0
  50. swift-2.33.1.data/scripts/swift-recon-cron +24 -0
  51. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/AUTHORS +3 -1
  52. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/METADATA +4 -3
  53. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/RECORD +94 -91
  54. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/WHEEL +1 -1
  55. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/entry_points.txt +1 -0
  56. swift-2.33.1.dist-info/pbr.json +1 -0
  57. swift-2.32.1.dist-info/pbr.json +0 -1
  58. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-audit +0 -0
  59. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-auditor +0 -0
  60. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-info +0 -0
  61. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-reaper +0 -0
  62. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-replicator +0 -0
  63. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-server +0 -0
  64. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-config +0 -0
  65. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-auditor +0 -0
  66. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-reconciler +0 -0
  67. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-replicator +0 -0
  68. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-server +0 -0
  69. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sharder +0 -0
  70. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sync +0 -0
  71. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-updater +0 -0
  72. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-populate +0 -0
  73. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-report +0 -0
  74. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-drive-audit +0 -0
  75. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-form-signature +0 -0
  76. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-get-nodes +0 -0
  77. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-init +0 -0
  78. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-auditor +0 -0
  79. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-expirer +0 -0
  80. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-info +0 -0
  81. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-reconstructor +0 -0
  82. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-relinker +0 -0
  83. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-replicator +0 -0
  84. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-server +0 -0
  85. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-updater +0 -0
  86. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-oldies +0 -0
  87. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-orphans +0 -0
  88. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-proxy-server +0 -0
  89. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-recon +0 -0
  90. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-reconciler-enqueue +0 -0
  91. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder +0 -0
  92. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder-analyzer +0 -0
  93. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-composer +0 -0
  94. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/LICENSE +0 -0
  95. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/top_level.txt +0 -0
@@ -39,12 +39,13 @@ from sys import exc_info
39
39
  from eventlet.timeout import Timeout
40
40
  import six
41
41
 
42
+ from swift.common.memcached import MemcacheConnectionError
42
43
  from swift.common.wsgi import make_pre_authed_env, make_pre_authed_request
43
44
  from swift.common.utils import Timestamp, WatchdogTimeout, config_true_value, \
44
45
  public, split_path, list_from_csv, GreenthreadSafeIterator, \
45
46
  GreenAsyncPile, quorum_size, parse_content_type, drain_and_close, \
46
- document_iters_to_http_response_body, ShardRange, cache_from_env, \
47
- MetricsPrefixLoggerAdapter, CooperativeIterator
47
+ document_iters_to_http_response_body, cache_from_env, \
48
+ CooperativeIterator, NamespaceBoundList, Namespace
48
49
  from swift.common.bufferedhttp import http_connect
49
50
  from swift.common import constraints
50
51
  from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \
@@ -423,9 +424,9 @@ def _record_ac_info_cache_metrics(
423
424
  logger = None
424
425
  else:
425
426
  logger = proxy_app.logger
426
- op_type = 'container.info' if container else 'account.info'
427
+ server_type = 'container' if container else 'account'
427
428
  if logger:
428
- record_cache_op_metrics(logger, op_type, cache_state, resp)
429
+ record_cache_op_metrics(logger, server_type, 'info', cache_state, resp)
429
430
 
430
431
 
431
432
  def get_container_info(env, app, swift_source=None, cache_only=False):
@@ -479,8 +480,7 @@ def get_container_info(env, app, swift_source=None, cache_only=False):
479
480
  # account is successful whether the account actually has .db files
480
481
  # on disk or not.
481
482
  is_autocreate_account = account.startswith(
482
- getattr(proxy_app, 'auto_create_account_prefix',
483
- constraints.AUTO_CREATE_ACCOUNT_PREFIX))
483
+ constraints.AUTO_CREATE_ACCOUNT_PREFIX)
484
484
  if not is_autocreate_account:
485
485
  account_info = get_account_info(env, logged_app, swift_source)
486
486
  if not account_info or not is_success(account_info['status']):
@@ -774,11 +774,12 @@ def _get_info_from_infocache(env, account, container=None):
774
774
 
775
775
 
776
776
  def record_cache_op_metrics(
777
- logger, op_type, cache_state, resp=None):
777
+ logger, server_type, op_type, cache_state, resp=None):
778
778
  """
779
779
  Record a single cache operation into its corresponding metrics.
780
780
 
781
781
  :param logger: the metrics logger
782
+ :param server_type: 'account' or 'container'
782
783
  :param op_type: the name of the operation type, includes 'shard_listing',
783
784
  'shard_updating', and etc.
784
785
  :param cache_state: the state of this cache operation. When it's
@@ -787,21 +788,23 @@ def record_cache_op_metrics(
787
788
  which will make to backend, expect a valid 'resp'.
788
789
  :param resp: the response from backend for all cases except cache hits.
789
790
  """
791
+ server_type = server_type.lower()
790
792
  if cache_state == 'infocache_hit':
791
- logger.increment('%s.infocache.hit' % op_type)
793
+ logger.increment('%s.%s.infocache.hit' % (server_type, op_type))
792
794
  elif cache_state == 'hit':
793
795
  # memcache hits.
794
- logger.increment('%s.cache.hit' % op_type)
796
+ logger.increment('%s.%s.cache.hit' % (server_type, op_type))
795
797
  else:
796
798
  # the cases of cache_state is memcache miss, error, skip, force_skip
797
799
  # or disabled.
798
800
  if resp:
799
- logger.increment(
800
- '%s.cache.%s.%d' % (op_type, cache_state, resp.status_int))
801
+ logger.increment('%s.%s.cache.%s.%d' % (
802
+ server_type, op_type, cache_state, resp.status_int))
801
803
  else:
802
804
  # In some situation, we choose not to lookup backend after cache
803
805
  # miss.
804
- logger.increment('%s.cache.%s' % (op_type, cache_state))
806
+ logger.increment('%s.%s.cache.%s' % (
807
+ server_type, op_type, cache_state))
805
808
 
806
809
 
807
810
  def _get_info_from_memcache(app, env, account, container=None):
@@ -886,6 +889,77 @@ def _get_info_from_caches(app, env, account, container=None):
886
889
  return info, cache_state
887
890
 
888
891
 
892
+ def get_namespaces_from_cache(req, cache_key, skip_chance):
893
+ """
894
+ Get cached namespaces from infocache or memcache.
895
+
896
+ :param req: a :class:`swift.common.swob.Request` object.
897
+ :param cache_key: the cache key for both infocache and memcache.
898
+ :param skip_chance: the probability of skipping the memcache look-up.
899
+ :return: a tuple of (value, cache state). Value is an instance of
900
+ :class:`swift.common.utils.NamespaceBoundList` if a non-empty list is
901
+ found in memcache. Otherwise value is ``None``, for example if memcache
902
+ look-up was skipped, or no value was found, or an empty list was found.
903
+ """
904
+ # try get namespaces from infocache first
905
+ infocache = req.environ.setdefault('swift.infocache', {})
906
+ ns_bound_list = infocache.get(cache_key)
907
+ if ns_bound_list:
908
+ return ns_bound_list, 'infocache_hit'
909
+
910
+ # then try get them from memcache
911
+ memcache = cache_from_env(req.environ, True)
912
+ if not memcache:
913
+ return None, 'disabled'
914
+ if skip_chance and random.random() < skip_chance:
915
+ return None, 'skip'
916
+ try:
917
+ bounds = memcache.get(cache_key, raise_on_error=True)
918
+ cache_state = 'hit' if bounds else 'miss'
919
+ except MemcacheConnectionError:
920
+ bounds = None
921
+ cache_state = 'error'
922
+
923
+ if bounds:
924
+ if six.PY2:
925
+ # json.loads() in memcache.get will convert json 'string' to
926
+ # 'unicode' with python2, here we cast 'unicode' back to 'str'
927
+ bounds = [
928
+ [lower.encode('utf-8'), name.encode('utf-8')]
929
+ for lower, name in bounds]
930
+ ns_bound_list = NamespaceBoundList(bounds)
931
+ infocache[cache_key] = ns_bound_list
932
+ else:
933
+ ns_bound_list = None
934
+ return ns_bound_list, cache_state
935
+
936
+
937
+ def set_namespaces_in_cache(req, cache_key, ns_bound_list, time):
938
+ """
939
+ Set a list of namespace bounds in infocache and memcache.
940
+
941
+ :param req: a :class:`swift.common.swob.Request` object.
942
+ :param cache_key: the cache key for both infocache and memcache.
943
+ :param ns_bound_list: a :class:`swift.common.utils.NamespaceBoundList`.
944
+ :param time: how long the namespaces should remain in memcache.
945
+ :return: the cache_state.
946
+ """
947
+ infocache = req.environ.setdefault('swift.infocache', {})
948
+ infocache[cache_key] = ns_bound_list
949
+ memcache = cache_from_env(req.environ, True)
950
+ if memcache and ns_bound_list:
951
+ try:
952
+ memcache.set(cache_key, ns_bound_list.bounds, time=time,
953
+ raise_on_error=True)
954
+ except MemcacheConnectionError:
955
+ cache_state = 'set_error'
956
+ else:
957
+ cache_state = 'set'
958
+ else:
959
+ cache_state = 'disabled'
960
+ return cache_state
961
+
962
+
889
963
  def _prepare_pre_auth_info_request(env, path, swift_source):
890
964
  """
891
965
  Prepares a pre authed request to obtain info using a HEAD.
@@ -1065,6 +1139,14 @@ class ByteCountEnforcer(object):
1065
1139
 
1066
1140
 
1067
1141
  class GetterSource(object):
1142
+ """
1143
+ Encapsulates properties of a source from which a GET response is read.
1144
+
1145
+ :param app: a proxy app.
1146
+ :param resp: an instance of ``HTTPResponse``.
1147
+ :param node: a dict describing the node from which the response was
1148
+ returned.
1149
+ """
1068
1150
  __slots__ = ('app', 'resp', 'node', '_parts_iter')
1069
1151
 
1070
1152
  def __init__(self, app, resp, node):
@@ -1101,8 +1183,26 @@ class GetterSource(object):
1101
1183
 
1102
1184
 
1103
1185
  class GetterBase(object):
1186
+ """
1187
+ This base class provides helper methods for handling GET requests to
1188
+ backend servers.
1189
+
1190
+ :param app: a proxy app.
1191
+ :param req: an instance of ``swob.Request``.
1192
+ :param node_iter: an iterator yielding nodes.
1193
+ :param partition: partition.
1194
+ :param policy: the policy instance, or None if Account or Container.
1195
+ :param path: path for the request.
1196
+ :param backend_headers: a dict of headers to be sent with backend requests.
1197
+ :param node_timeout: the timeout value for backend requests.
1198
+ :param resource_type: a string description of the type of resource being
1199
+ accessed; ``resource type`` is used in logs and isn't necessarily the
1200
+ server type.
1201
+ :param logger: a logger instance.
1202
+ """
1104
1203
  def __init__(self, app, req, node_iter, partition, policy,
1105
- path, backend_headers, logger=None):
1204
+ path, backend_headers, node_timeout, resource_type,
1205
+ logger=None):
1106
1206
  self.app = app
1107
1207
  self.req = req
1108
1208
  self.node_iter = node_iter
@@ -1110,6 +1210,9 @@ class GetterBase(object):
1110
1210
  self.policy = policy
1111
1211
  self.path = path
1112
1212
  self.backend_headers = backend_headers
1213
+ # resource type is used in logs and isn't necessarily the server type
1214
+ self.resource_type = resource_type
1215
+ self.node_timeout = node_timeout
1113
1216
  self.logger = logger or app.logger
1114
1217
  self.bytes_used_from_backend = 0
1115
1218
  self.source = None
@@ -1122,18 +1225,44 @@ class GetterBase(object):
1122
1225
  :return: ``True`` if ``self.source`` has been updated, ``False``
1123
1226
  otherwise.
1124
1227
  """
1125
- # Subclasses must implement this method
1228
+ # Subclasses must implement this method, but _replace_source should be
1229
+ # called to get a source installed
1126
1230
  raise NotImplementedError()
1127
1231
 
1128
- def _replace_source(self, err_msg):
1129
- # _find_source can modify self.source so stash current source
1130
- old_source = self.source
1131
- if not self._find_source():
1132
- return False
1232
+ def _replace_source(self, err_msg=''):
1233
+ if self.source:
1234
+ self.app.error_occurred(self.source.node, err_msg)
1235
+ self.source.close()
1236
+ return self._find_source()
1133
1237
 
1134
- self.app.error_occurred(old_source.node, err_msg)
1135
- old_source.close()
1136
- return True
1238
+ def _get_next_response_part(self):
1239
+ # return the next part of the response body; there may only be one part
1240
+ # unless it's a multipart/byteranges response
1241
+ while True:
1242
+ # the loop here is to resume if trying to parse
1243
+ # multipart/byteranges response raises a ChunkReadTimeout
1244
+ # and resets the source_parts_iter
1245
+ try:
1246
+ with WatchdogTimeout(self.app.watchdog, self.node_timeout,
1247
+ ChunkReadTimeout):
1248
+ # If we don't have a multipart/byteranges response,
1249
+ # but just a 200 or a single-range 206, then this
1250
+ # performs no IO, and either just returns source or
1251
+ # raises StopIteration.
1252
+ # Otherwise, this call to next() performs IO when
1253
+ # we have a multipart/byteranges response, as it
1254
+ # will read the MIME boundary and part headers. In this
1255
+ # case, ChunkReadTimeout may also be raised.
1256
+ # If StopIteration is raised, it escapes and is
1257
+ # handled elsewhere.
1258
+ start_byte, end_byte, length, headers, part = next(
1259
+ self.source.parts_iter)
1260
+ return (start_byte, end_byte, length, headers, part)
1261
+ except ChunkReadTimeout:
1262
+ if not self._replace_source(
1263
+ 'Trying to read next part of %s multi-part GET '
1264
+ '(retrying)' % self.resource_type):
1265
+ raise
1137
1266
 
1138
1267
  def fast_forward(self, num_bytes):
1139
1268
  """
@@ -1240,32 +1369,43 @@ class GetterBase(object):
1240
1369
 
1241
1370
 
1242
1371
  class GetOrHeadHandler(GetterBase):
1372
+ """
1373
+ Handles GET requests to backend servers.
1374
+
1375
+ :param app: a proxy app.
1376
+ :param req: an instance of ``swob.Request``.
1377
+ :param server_type: server type used in logging
1378
+ :param node_iter: an iterator yielding nodes.
1379
+ :param partition: partition.
1380
+ :param path: path for the request.
1381
+ :param backend_headers: a dict of headers to be sent with backend requests.
1382
+ :param concurrency: number of requests to run concurrently.
1383
+ :param policy: the policy instance, or None if Account or Container.
1384
+ :param logger: a logger instance.
1385
+ """
1243
1386
  def __init__(self, app, req, server_type, node_iter, partition, path,
1244
- backend_headers, concurrency=1, policy=None,
1245
- newest=None, logger=None):
1387
+ backend_headers, concurrency=1, policy=None, logger=None):
1388
+ newest = config_true_value(req.headers.get('x-newest', 'f'))
1389
+ if server_type == 'Object' and not newest:
1390
+ node_timeout = app.recoverable_node_timeout
1391
+ else:
1392
+ node_timeout = app.node_timeout
1246
1393
  super(GetOrHeadHandler, self).__init__(
1247
- app=app, req=req, node_iter=node_iter,
1248
- partition=partition, policy=policy, path=path,
1249
- backend_headers=backend_headers, logger=logger)
1394
+ app=app, req=req, node_iter=node_iter, partition=partition,
1395
+ policy=policy, path=path, backend_headers=backend_headers,
1396
+ node_timeout=node_timeout, resource_type=server_type.lower(),
1397
+ logger=logger)
1398
+ self.newest = newest
1250
1399
  self.server_type = server_type
1251
1400
  self.used_nodes = []
1252
1401
  self.used_source_etag = None
1253
1402
  self.concurrency = concurrency
1254
1403
  self.latest_404_timestamp = Timestamp(0)
1255
- if self.server_type == 'Object':
1256
- self.node_timeout = self.app.recoverable_node_timeout
1257
- else:
1258
- self.node_timeout = self.app.node_timeout
1259
1404
  policy_options = self.app.get_policy_options(self.policy)
1260
1405
  self.rebalance_missing_suppression_count = min(
1261
1406
  policy_options.rebalance_missing_suppression_count,
1262
1407
  node_iter.num_primary_nodes - 1)
1263
1408
 
1264
- if newest is None:
1265
- self.newest = config_true_value(req.headers.get('x-newest', 'f'))
1266
- else:
1267
- self.newest = newest
1268
-
1269
1409
  # populated when finding source
1270
1410
  self.statuses = []
1271
1411
  self.reasons = []
@@ -1276,31 +1416,6 @@ class GetOrHeadHandler(GetterBase):
1276
1416
  # populated from response headers
1277
1417
  self.start_byte = self.end_byte = self.length = None
1278
1418
 
1279
- def _get_next_response_part(self):
1280
- # return the next part of the response body; there may only be one part
1281
- # unless it's a multipart/byteranges response
1282
- while True:
1283
- try:
1284
- # This call to next() performs IO when we have a
1285
- # multipart/byteranges response; it reads the MIME
1286
- # boundary and part headers.
1287
- #
1288
- # If we don't have a multipart/byteranges response,
1289
- # but just a 200 or a single-range 206, then this
1290
- # performs no IO, and either just returns source or
1291
- # raises StopIteration.
1292
- with WatchdogTimeout(self.app.watchdog, self.node_timeout,
1293
- ChunkReadTimeout):
1294
- # if StopIteration is raised, it escapes and is
1295
- # handled elsewhere
1296
- start_byte, end_byte, length, headers, part = next(
1297
- self.source.parts_iter)
1298
- return (start_byte, end_byte, length, headers, part)
1299
- except ChunkReadTimeout:
1300
- if not self._replace_source(
1301
- 'Trying to read object during GET (retrying)'):
1302
- raise StopIteration()
1303
-
1304
1419
  def _iter_bytes_from_response_part(self, part_file, nbytes):
1305
1420
  # yield chunks of bytes from a single response part; if an error
1306
1421
  # occurs, try to resume yielding bytes from a different source
@@ -1345,7 +1460,7 @@ class GetOrHeadHandler(GetterBase):
1345
1460
  self.bytes_used_from_backend += len(chunk)
1346
1461
  yield chunk
1347
1462
 
1348
- def _iter_parts_from_response(self, req):
1463
+ def _iter_parts_from_response(self):
1349
1464
  # iterate over potentially multiple response body parts; for each
1350
1465
  # part, yield an iterator over the part's bytes
1351
1466
  try:
@@ -1370,20 +1485,17 @@ class GetOrHeadHandler(GetterBase):
1370
1485
  'part_iter': part_iter}
1371
1486
  self.pop_range()
1372
1487
  except StopIteration:
1373
- req.environ['swift.non_client_disconnect'] = True
1488
+ self.req.environ['swift.non_client_disconnect'] = True
1374
1489
  finally:
1375
1490
  if part_iter:
1376
1491
  part_iter.close()
1377
1492
 
1378
- except ChunkReadTimeout:
1379
- self.app.exception_occurred(self.source.node, 'Object',
1380
- 'Trying to read during GET')
1381
- raise
1382
1493
  except ChunkWriteTimeout:
1383
1494
  self.logger.info(
1384
1495
  'Client did not read from proxy within %ss',
1385
1496
  self.app.client_timeout)
1386
- self.logger.increment('client_timeouts')
1497
+ self.logger.increment('%s.client_timeouts' %
1498
+ self.server_type.lower())
1387
1499
  except GeneratorExit:
1388
1500
  warn = True
1389
1501
  req_range = self.backend_headers['Range']
@@ -1394,7 +1506,8 @@ class GetOrHeadHandler(GetterBase):
1394
1506
  if end is not None and begin is not None:
1395
1507
  if end - begin + 1 == self.bytes_used_from_backend:
1396
1508
  warn = False
1397
- if not req.environ.get('swift.non_client_disconnect') and warn:
1509
+ if (warn and
1510
+ not self.req.environ.get('swift.non_client_disconnect')):
1398
1511
  self.logger.info('Client disconnected on read of %r',
1399
1512
  self.path)
1400
1513
  raise
@@ -1418,7 +1531,7 @@ class GetOrHeadHandler(GetterBase):
1418
1531
  else:
1419
1532
  return None
1420
1533
 
1421
- def _make_node_request(self, node, node_timeout, logger_thread_locals):
1534
+ def _make_node_request(self, node, logger_thread_locals):
1422
1535
  # make a backend request; return True if the response is deemed good
1423
1536
  # (has an acceptable status code), useful (matches any previously
1424
1537
  # discovered etag) and sufficient (a single good response is
@@ -1426,6 +1539,7 @@ class GetOrHeadHandler(GetterBase):
1426
1539
  self.logger.thread_locals = logger_thread_locals
1427
1540
  if node in self.used_nodes:
1428
1541
  return False
1542
+
1429
1543
  req_headers = dict(self.backend_headers)
1430
1544
  ip, port = get_ip_port(node, req_headers)
1431
1545
  start_node_timing = time.time()
@@ -1438,7 +1552,7 @@ class GetOrHeadHandler(GetterBase):
1438
1552
  query_string=self.req.query_string)
1439
1553
  self.app.set_node_timing(node, time.time() - start_node_timing)
1440
1554
 
1441
- with Timeout(node_timeout):
1555
+ with Timeout(self.node_timeout):
1442
1556
  possible_source = conn.getresponse()
1443
1557
  # See NOTE: swift_conn at top of file about this.
1444
1558
  possible_source.swift_conn = conn
@@ -1530,14 +1644,10 @@ class GetOrHeadHandler(GetterBase):
1530
1644
 
1531
1645
  nodes = GreenthreadSafeIterator(self.node_iter)
1532
1646
 
1533
- node_timeout = self.app.node_timeout
1534
- if self.server_type == 'Object' and not self.newest:
1535
- node_timeout = self.app.recoverable_node_timeout
1536
-
1537
1647
  pile = GreenAsyncPile(self.concurrency)
1538
1648
 
1539
1649
  for node in nodes:
1540
- pile.spawn(self._make_node_request, node, node_timeout,
1650
+ pile.spawn(self._make_node_request, node,
1541
1651
  self.logger.thread_locals)
1542
1652
  _timeout = self.app.get_policy_options(
1543
1653
  self.policy).concurrency_timeout \
@@ -1573,13 +1683,12 @@ class GetOrHeadHandler(GetterBase):
1573
1683
  return True
1574
1684
  return False
1575
1685
 
1576
- def _make_app_iter(self, req):
1686
+ def _make_app_iter(self):
1577
1687
  """
1578
1688
  Returns an iterator over the contents of the source (via its read
1579
1689
  func). There is also quite a bit of cleanup to ensure garbage
1580
1690
  collection works and the underlying socket of the source is closed.
1581
1691
 
1582
- :param req: incoming request object
1583
1692
  :return: an iterator that yields chunks of response body bytes
1584
1693
  """
1585
1694
 
@@ -1596,7 +1705,7 @@ class GetOrHeadHandler(GetterBase):
1596
1705
  # furnished one for us, so we'll just re-use it
1597
1706
  boundary = dict(content_type_attrs)["boundary"]
1598
1707
 
1599
- parts_iter = self._iter_parts_from_response(req)
1708
+ parts_iter = self._iter_parts_from_response()
1600
1709
 
1601
1710
  def add_content_type(response_part):
1602
1711
  response_part["content_type"] = \
@@ -1607,15 +1716,15 @@ class GetOrHeadHandler(GetterBase):
1607
1716
  (add_content_type(pi) for pi in parts_iter),
1608
1717
  boundary, is_multipart, self.logger)
1609
1718
 
1610
- def get_working_response(self, req):
1719
+ def get_working_response(self):
1611
1720
  res = None
1612
- if self._find_source():
1613
- res = Response(request=req)
1721
+ if self._replace_source():
1722
+ res = Response(request=self.req)
1614
1723
  res.status = self.source.resp.status
1615
1724
  update_headers(res, self.source.resp.getheaders())
1616
- if req.method == 'GET' and \
1725
+ if self.req.method == 'GET' and \
1617
1726
  self.source.resp.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
1618
- res.app_iter = self._make_app_iter(req)
1727
+ res.app_iter = self._make_app_iter()
1619
1728
  # See NOTE: swift_conn at top of file about this.
1620
1729
  res.swift_conn = self.source.resp.swift_conn
1621
1730
  if not res.environ:
@@ -1644,6 +1753,7 @@ class NodeIter(object):
1644
1753
  may not, depending on how logging is configured, the vagaries of
1645
1754
  socket IO and eventlet, and the phase of the moon.)
1646
1755
 
1756
+ :param server_type: one of 'account', 'container', or 'object'
1647
1757
  :param app: a proxy app
1648
1758
  :param ring: ring to get yield nodes from
1649
1759
  :param partition: ring partition to yield nodes for
@@ -1656,8 +1766,9 @@ class NodeIter(object):
1656
1766
  None for an account or container ring.
1657
1767
  """
1658
1768
 
1659
- def __init__(self, app, ring, partition, logger, request, node_iter=None,
1660
- policy=None):
1769
+ def __init__(self, server_type, app, ring, partition, logger, request,
1770
+ node_iter=None, policy=None):
1771
+ self.server_type = server_type
1661
1772
  self.app = app
1662
1773
  self.ring = ring
1663
1774
  self.partition = partition
@@ -1704,12 +1815,14 @@ class NodeIter(object):
1704
1815
  return
1705
1816
  extra_handoffs = handoffs - self.expected_handoffs
1706
1817
  if extra_handoffs > 0:
1707
- self.logger.increment('handoff_count')
1818
+ self.logger.increment('%s.handoff_count' %
1819
+ self.server_type.lower())
1708
1820
  self.logger.warning(
1709
1821
  'Handoff requested (%d)' % handoffs)
1710
1822
  if (extra_handoffs == self.num_primary_nodes):
1711
1823
  # all the primaries were skipped, and handoffs didn't help
1712
- self.logger.increment('handoff_all_count')
1824
+ self.logger.increment('%s.handoff_all_count' %
1825
+ self.server_type.lower())
1713
1826
 
1714
1827
  def set_node_provider(self, callback):
1715
1828
  """
@@ -1786,9 +1899,10 @@ class Controller(object):
1786
1899
  self.trans_id = '-'
1787
1900
  self._allowed_methods = None
1788
1901
  self._private_methods = None
1789
- # adapt the app logger to prefix statsd metrics with the server type
1790
- self.logger = MetricsPrefixLoggerAdapter(
1791
- self.app.logger, {}, self.server_type.lower())
1902
+
1903
+ @property
1904
+ def logger(self):
1905
+ return self.app.logger
1792
1906
 
1793
1907
  @property
1794
1908
  def allowed_methods(self):
@@ -2006,9 +2120,8 @@ class Controller(object):
2006
2120
  :param node_iterator: optional node iterator.
2007
2121
  :returns: a swob.Response object
2008
2122
  """
2009
- nodes = GreenthreadSafeIterator(
2010
- node_iterator or NodeIter(self.app, ring, part, self.logger, req)
2011
- )
2123
+ nodes = GreenthreadSafeIterator(node_iterator or NodeIter(
2124
+ self.server_type.lower(), self.app, ring, part, self.logger, req))
2012
2125
  node_number = node_count or len(ring.get_part_nodes(part))
2013
2126
  pile = GreenAsyncPile(node_number)
2014
2127
 
@@ -2209,7 +2322,7 @@ class Controller(object):
2209
2322
  partition, path, backend_headers,
2210
2323
  concurrency, policy=policy,
2211
2324
  logger=self.logger)
2212
- res = handler.get_working_response(req)
2325
+ res = handler.get_working_response()
2213
2326
 
2214
2327
  if not res:
2215
2328
  res = self.best_response(
@@ -2385,7 +2498,7 @@ class Controller(object):
2385
2498
  data = self._parse_listing_response(req, response)
2386
2499
  return data, response
2387
2500
 
2388
- def _parse_shard_ranges(self, req, listing, response):
2501
+ def _parse_namespaces(self, req, listing, response):
2389
2502
  if listing is None:
2390
2503
  return None
2391
2504
 
@@ -2397,38 +2510,15 @@ class Controller(object):
2397
2510
  return None
2398
2511
 
2399
2512
  try:
2400
- return [ShardRange.from_dict(shard_range)
2401
- for shard_range in listing]
2513
+ # Note: a legacy container-server could return a list of
2514
+ # ShardRanges, but that's ok: namespaces just need 'name', 'lower'
2515
+ # and 'upper' keys. If we ever need to know we can look for a
2516
+ # 'x-backend-record-shard-format' header from newer container
2517
+ # servers.
2518
+ return [Namespace(data['name'], data['lower'], data['upper'])
2519
+ for data in listing]
2402
2520
  except (ValueError, TypeError, KeyError) as err:
2403
2521
  self.logger.error(
2404
- "Failed to get shard ranges from %s: invalid data: %r",
2522
+ "Failed to get namespaces from %s: invalid data: %r",
2405
2523
  req.path_qs, err)
2406
2524
  return None
2407
-
2408
- def _get_shard_ranges(
2409
- self, req, account, container, includes=None, states=None):
2410
- """
2411
- Fetch shard ranges from given `account/container`. If `includes` is
2412
- given then the shard range for that object name is requested, otherwise
2413
- all shard ranges are requested.
2414
-
2415
- :param req: original Request instance.
2416
- :param account: account from which shard ranges should be fetched.
2417
- :param container: container from which shard ranges should be fetched.
2418
- :param includes: (optional) restricts the list of fetched shard ranges
2419
- to those which include the given name.
2420
- :param states: (optional) the states of shard ranges to be fetched.
2421
- :return: a list of instances of :class:`swift.common.utils.ShardRange`,
2422
- or None if there was a problem fetching the shard ranges
2423
- """
2424
- params = req.params.copy()
2425
- params.pop('limit', None)
2426
- params['format'] = 'json'
2427
- if includes:
2428
- params['includes'] = str_to_wsgi(includes)
2429
- if states:
2430
- params['states'] = states
2431
- headers = {'X-Backend-Record-Type': 'shard'}
2432
- listing, response = self._get_container_listing(
2433
- req, account, container, headers=headers, params=params)
2434
- return self._parse_shard_ranges(req, listing, response), response