swift 2.32.1__py2.py3-none-any.whl → 2.33.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. swift/account/server.py +1 -11
  2. swift/cli/info.py +28 -1
  3. swift-2.32.1.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +4 -13
  4. swift/cli/reload.py +141 -0
  5. swift/common/daemon.py +12 -2
  6. swift/common/db.py +12 -8
  7. swift/common/http_protocol.py +76 -3
  8. swift/common/manager.py +18 -5
  9. swift/common/memcached.py +18 -12
  10. swift/common/middleware/proxy_logging.py +35 -27
  11. swift/common/middleware/s3api/acl_handlers.py +1 -1
  12. swift/common/middleware/s3api/controllers/__init__.py +3 -0
  13. swift/common/middleware/s3api/controllers/acl.py +3 -2
  14. swift/common/middleware/s3api/controllers/logging.py +2 -2
  15. swift/common/middleware/s3api/controllers/multi_upload.py +30 -6
  16. swift/common/middleware/s3api/controllers/object_lock.py +44 -0
  17. swift/common/middleware/s3api/s3api.py +4 -0
  18. swift/common/middleware/s3api/s3request.py +19 -12
  19. swift/common/middleware/s3api/s3response.py +13 -2
  20. swift/common/middleware/s3api/utils.py +1 -1
  21. swift/common/middleware/slo.py +395 -298
  22. swift/common/middleware/staticweb.py +45 -14
  23. swift/common/middleware/tempurl.py +132 -91
  24. swift/common/request_helpers.py +32 -8
  25. swift/common/storage_policy.py +1 -1
  26. swift/common/swob.py +5 -2
  27. swift/common/utils/__init__.py +230 -135
  28. swift/common/utils/timestamp.py +23 -2
  29. swift/common/wsgi.py +8 -0
  30. swift/container/backend.py +126 -21
  31. swift/container/replicator.py +42 -6
  32. swift/container/server.py +264 -145
  33. swift/container/sharder.py +50 -30
  34. swift/container/updater.py +1 -0
  35. swift/obj/auditor.py +2 -1
  36. swift/obj/diskfile.py +55 -19
  37. swift/obj/expirer.py +1 -13
  38. swift/obj/mem_diskfile.py +2 -1
  39. swift/obj/mem_server.py +1 -0
  40. swift/obj/replicator.py +2 -2
  41. swift/obj/server.py +12 -23
  42. swift/obj/updater.py +1 -0
  43. swift/obj/watchers/dark_data.py +72 -34
  44. swift/proxy/controllers/account.py +3 -2
  45. swift/proxy/controllers/base.py +217 -127
  46. swift/proxy/controllers/container.py +274 -289
  47. swift/proxy/controllers/obj.py +98 -141
  48. swift/proxy/server.py +2 -12
  49. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-info +3 -0
  50. swift-2.33.1.data/scripts/swift-recon-cron +24 -0
  51. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/AUTHORS +3 -1
  52. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/METADATA +4 -3
  53. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/RECORD +94 -91
  54. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/WHEEL +1 -1
  55. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/entry_points.txt +1 -0
  56. swift-2.33.1.dist-info/pbr.json +1 -0
  57. swift-2.32.1.dist-info/pbr.json +0 -1
  58. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-audit +0 -0
  59. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-auditor +0 -0
  60. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-info +0 -0
  61. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-reaper +0 -0
  62. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-replicator +0 -0
  63. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-server +0 -0
  64. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-config +0 -0
  65. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-auditor +0 -0
  66. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-reconciler +0 -0
  67. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-replicator +0 -0
  68. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-server +0 -0
  69. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sharder +0 -0
  70. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sync +0 -0
  71. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-updater +0 -0
  72. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-populate +0 -0
  73. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-report +0 -0
  74. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-drive-audit +0 -0
  75. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-form-signature +0 -0
  76. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-get-nodes +0 -0
  77. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-init +0 -0
  78. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-auditor +0 -0
  79. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-expirer +0 -0
  80. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-info +0 -0
  81. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-reconstructor +0 -0
  82. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-relinker +0 -0
  83. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-replicator +0 -0
  84. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-server +0 -0
  85. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-updater +0 -0
  86. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-oldies +0 -0
  87. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-orphans +0 -0
  88. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-proxy-server +0 -0
  89. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-recon +0 -0
  90. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-reconciler-enqueue +0 -0
  91. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder +0 -0
  92. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder-analyzer +0 -0
  93. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-composer +0 -0
  94. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/LICENSE +0 -0
  95. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/top_level.txt +0 -0
@@ -48,8 +48,7 @@ from swift.common.utils import (
48
48
  normalize_delete_at_timestamp, public, get_expirer_container,
49
49
  document_iters_to_http_response_body, parse_content_range,
50
50
  quorum_size, reiterate, close_if_possible, safe_json_loads, md5,
51
- ShardRange, find_namespace, cache_from_env, NamespaceBoundList,
52
- CooperativeIterator)
51
+ NamespaceBoundList, CooperativeIterator)
53
52
  from swift.common.bufferedhttp import http_connect
54
53
  from swift.common.constraints import check_metadata, check_object_creation
55
54
  from swift.common import constraints
@@ -64,19 +63,19 @@ from swift.common.http import (
64
63
  HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE,
65
64
  HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY,
66
65
  HTTP_REQUESTED_RANGE_NOT_SATISFIABLE, HTTP_NOT_FOUND)
67
- from swift.common.memcached import MemcacheConnectionError
68
66
  from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
69
67
  ECDriverError, PolicyError)
70
68
  from swift.proxy.controllers.base import Controller, delay_denial, \
71
69
  cors_validation, update_headers, bytes_to_skip, ByteCountEnforcer, \
72
70
  record_cache_op_metrics, get_cache_key, GetterBase, GetterSource, \
73
- is_good_source, NodeIter
71
+ is_good_source, NodeIter, get_namespaces_from_cache, \
72
+ set_namespaces_in_cache
74
73
  from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
75
74
  HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
76
75
  HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \
77
76
  HTTPUnprocessableEntity, Response, HTTPException, \
78
77
  HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError, \
79
- normalize_etag
78
+ normalize_etag, str_to_wsgi
80
79
  from swift.common.request_helpers import update_etag_is_at_header, \
81
80
  resolve_etag_is_at_header, validate_internal_obj, get_ip_port
82
81
 
@@ -201,8 +200,9 @@ class BaseObjectController(Controller):
201
200
  policy_options = self.app.get_policy_options(policy)
202
201
  is_local = policy_options.write_affinity_is_local_fn
203
202
  if is_local is None:
204
- return NodeIter(self.app, ring, partition, self.logger, request,
205
- policy=policy)
203
+ return NodeIter(
204
+ 'object', self.app, ring, partition, self.logger, request,
205
+ policy=policy)
206
206
 
207
207
  primary_nodes = ring.get_part_nodes(partition)
208
208
  handoff_nodes = ring.get_more_nodes(partition)
@@ -235,8 +235,9 @@ class BaseObjectController(Controller):
235
235
  (node for node in all_nodes if node not in preferred_nodes)
236
236
  )
237
237
 
238
- return NodeIter(self.app, ring, partition, self.logger, request,
239
- node_iter=node_iter, policy=policy)
238
+ return NodeIter(
239
+ 'object', self.app, ring, partition, self.logger, request,
240
+ node_iter=node_iter, policy=policy)
240
241
 
241
242
  def GETorHEAD(self, req):
242
243
  """Handle HTTP GET or HEAD requests."""
@@ -255,8 +256,9 @@ class BaseObjectController(Controller):
255
256
  return aresp
256
257
  partition = obj_ring.get_part(
257
258
  self.account_name, self.container_name, self.object_name)
258
- node_iter = NodeIter(self.app, obj_ring, partition, self.logger, req,
259
- policy=policy)
259
+ node_iter = NodeIter(
260
+ 'object', self.app, obj_ring, partition, self.logger, req,
261
+ policy=policy)
260
262
 
261
263
  resp = self._get_or_head_response(req, node_iter, partition, policy)
262
264
 
@@ -279,47 +281,32 @@ class BaseObjectController(Controller):
279
281
  """Handler for HTTP HEAD requests."""
280
282
  return self.GETorHEAD(req)
281
283
 
282
- def _get_cached_updating_namespaces(
283
- self, infocache, memcache, cache_key):
284
+ def _get_updating_namespaces(
285
+ self, req, account, container, includes=None):
284
286
  """
285
- Fetch cached updating namespaces of updating shard ranges from
286
- infocache and memcache.
287
+ Fetch namespaces in 'updating' states from given `account/container`.
288
+ If `includes` is given then the shard range for that object name is
289
+ requested, otherwise all namespaces are requested.
287
290
 
288
- :param infocache: the infocache instance.
289
- :param memcache: an instance of a memcache client,
290
- :class:`swift.common.memcached.MemcacheRing`.
291
- :param cache_key: the cache key for both infocache and memcache.
292
- :return: a tuple of (an instance of NamespaceBoundList, cache state)
293
- """
294
- # try get namespaces from infocache first
295
- namespace_list = infocache.get(cache_key)
296
- if namespace_list:
297
- return namespace_list, 'infocache_hit'
298
-
299
- # then try get them from memcache
300
- if not memcache:
301
- return None, 'disabled'
302
- skip_chance = self.app.container_updating_shard_ranges_skip_cache
303
- if skip_chance and random.random() < skip_chance:
304
- return None, 'skip'
305
- try:
306
- namespaces = memcache.get(cache_key, raise_on_error=True)
307
- cache_state = 'hit' if namespaces else 'miss'
308
- except MemcacheConnectionError:
309
- namespaces = None
310
- cache_state = 'error'
311
-
312
- if namespaces:
313
- if six.PY2:
314
- # json.loads() in memcache.get will convert json 'string' to
315
- # 'unicode' with python2, here we cast 'unicode' back to 'str'
316
- namespaces = [
317
- [lower.encode('utf-8'), name.encode('utf-8')]
318
- for lower, name in namespaces]
319
- namespace_list = NamespaceBoundList(namespaces)
320
- else:
321
- namespace_list = None
322
- return namespace_list, cache_state
291
+ :param req: original Request instance.
292
+ :param account: account from which namespaces should be fetched.
293
+ :param container: container from which namespaces should be fetched.
294
+ :param includes: (optional) restricts the list of fetched namespaces
295
+ to those which include the given name.
296
+ :return: a list of instances of :class:`swift.common.utils.Namespace`,
297
+ or None if there was a problem fetching the namespaces.
298
+ """
299
+ params = req.params.copy()
300
+ params.pop('limit', None)
301
+ params['format'] = 'json'
302
+ params['states'] = 'updating'
303
+ headers = {'X-Backend-Record-Type': 'shard',
304
+ 'X-Backend-Record-Shard-Format': 'namespace'}
305
+ if includes:
306
+ params['includes'] = str_to_wsgi(includes)
307
+ listing, response = self._get_container_listing(
308
+ req, account, container, headers=headers, params=params)
309
+ return self._parse_namespaces(req, listing, response), response
323
310
 
324
311
  def _get_update_shard_caching_disabled(self, req, account, container, obj):
325
312
  """
@@ -330,16 +317,17 @@ class BaseObjectController(Controller):
330
317
  :param account: account from which shard ranges should be fetched.
331
318
  :param container: container from which shard ranges should be fetched.
332
319
  :param obj: object getting updated.
333
- :return: an instance of :class:`swift.common.utils.ShardRange`,
320
+ :return: an instance of :class:`swift.common.utils.Namespace`,
334
321
  or None if the update should go back to the root
335
322
  """
336
323
  # legacy behavior requests container server for includes=obj
337
- shard_ranges, response = self._get_shard_ranges(
338
- req, account, container, states='updating', includes=obj)
324
+ namespaces, response = self._get_updating_namespaces(
325
+ req, account, container, includes=obj)
339
326
  record_cache_op_metrics(
340
- self.logger, 'shard_updating', 'disabled', response)
341
- # there will be only one shard range in the list if any
342
- return shard_ranges[0] if shard_ranges else None
327
+ self.logger, self.server_type.lower(), 'shard_updating',
328
+ 'disabled', response)
329
+ # there will be only one Namespace in the list if any
330
+ return namespaces[0] if namespaces else None
343
331
 
344
332
  def _get_update_shard(self, req, account, container, obj):
345
333
  """
@@ -353,7 +341,7 @@ class BaseObjectController(Controller):
353
341
  :param account: account from which shard ranges should be fetched.
354
342
  :param container: container from which shard ranges should be fetched.
355
343
  :param obj: object getting updated.
356
- :return: an instance of :class:`swift.common.utils.ShardRange`,
344
+ :return: an instance of :class:`swift.common.utils.Namespace`,
357
345
  or None if the update should go back to the root
358
346
  """
359
347
  if not self.app.recheck_updating_shard_ranges:
@@ -364,49 +352,43 @@ class BaseObjectController(Controller):
364
352
  # caching is enabled, try to get from caches
365
353
  response = None
366
354
  cache_key = get_cache_key(account, container, shard='updating')
367
- infocache = req.environ.setdefault('swift.infocache', {})
368
- memcache = cache_from_env(req.environ, True)
369
- cached_namespaces, cache_state = self._get_cached_updating_namespaces(
370
- infocache, memcache, cache_key)
371
- if cached_namespaces:
372
- # found cached namespaces in either infocache or memcache
373
- infocache[cache_key] = cached_namespaces
374
- namespace = cached_namespaces.get_namespace(obj)
375
- update_shard = ShardRange(
376
- name=namespace.name, timestamp=0, lower=namespace.lower,
377
- upper=namespace.upper)
378
- else:
379
- # pull full set of updating shard ranges from backend
380
- shard_ranges, response = self._get_shard_ranges(
381
- req, account, container, states='updating')
382
- if shard_ranges:
355
+ skip_chance = self.app.container_updating_shard_ranges_skip_cache
356
+ ns_bound_list, get_cache_state = get_namespaces_from_cache(
357
+ req, cache_key, skip_chance)
358
+ if not ns_bound_list:
359
+ # namespaces not found in either infocache or memcache so pull full
360
+ # set of updating shard ranges from backend
361
+ namespaces, response = self._get_updating_namespaces(
362
+ req, account, container)
363
+ if namespaces:
383
364
  # only store the list of namespace lower bounds and names into
384
365
  # infocache and memcache.
385
- cached_namespaces = NamespaceBoundList.parse(
386
- shard_ranges)
387
- infocache[cache_key] = cached_namespaces
388
- if memcache:
366
+ ns_bound_list = NamespaceBoundList.parse(namespaces)
367
+ set_cache_state = set_namespaces_in_cache(
368
+ req, cache_key, ns_bound_list,
369
+ self.app.recheck_updating_shard_ranges)
370
+ record_cache_op_metrics(
371
+ self.logger, self.server_type.lower(), 'shard_updating',
372
+ set_cache_state, None)
373
+ if set_cache_state == 'set':
389
374
  self.logger.info(
390
375
  'Caching updating shards for %s (%d shards)',
391
- cache_key, len(cached_namespaces.bounds))
392
- memcache.set(
393
- cache_key, cached_namespaces.bounds,
394
- time=self.app.recheck_updating_shard_ranges)
395
- update_shard = find_namespace(obj, shard_ranges or [])
376
+ cache_key, len(namespaces))
396
377
  record_cache_op_metrics(
397
- self.logger, 'shard_updating', cache_state, response)
398
- return update_shard
378
+ self.logger, self.server_type.lower(), 'shard_updating',
379
+ get_cache_state, response)
380
+ return ns_bound_list.get_namespace(obj) if ns_bound_list else None
399
381
 
400
382
  def _get_update_target(self, req, container_info):
401
383
  # find the sharded container to which we'll send the update
402
384
  db_state = container_info.get('sharding_state', 'unsharded')
403
385
  if db_state in ('sharded', 'sharding'):
404
- shard_range = self._get_update_shard(
386
+ update_shard_ns = self._get_update_shard(
405
387
  req, self.account_name, self.container_name, self.object_name)
406
- if shard_range:
388
+ if update_shard_ns:
407
389
  partition, nodes = self.app.container_ring.get_nodes(
408
- shard_range.account, shard_range.container)
409
- return partition, nodes, shard_range.name
390
+ update_shard_ns.account, update_shard_ns.container)
391
+ return partition, nodes, update_shard_ns.name
410
392
 
411
393
  return container_info['partition'], container_info['nodes'], None
412
394
 
@@ -461,14 +443,15 @@ class BaseObjectController(Controller):
461
443
  headers = [self.generate_request_headers(req, additional=req.headers)
462
444
  for _junk in range(n_outgoing)]
463
445
 
464
- def set_container_update(index, container):
446
+ def set_container_update(index, container_node):
447
+ ip, port = get_ip_port(container_node, headers[index])
465
448
  headers[index]['X-Container-Partition'] = container_partition
466
449
  headers[index]['X-Container-Host'] = csv_append(
467
450
  headers[index].get('X-Container-Host'),
468
- '%(ip)s:%(port)s' % container)
451
+ '%(ip)s:%(port)s' % {'ip': ip, 'port': port})
469
452
  headers[index]['X-Container-Device'] = csv_append(
470
453
  headers[index].get('X-Container-Device'),
471
- container['device'])
454
+ container_node['device'])
472
455
  if container_path:
473
456
  headers[index]['X-Backend-Quoted-Container-Path'] = quote(
474
457
  container_path)
@@ -481,11 +464,12 @@ class BaseObjectController(Controller):
481
464
  # will eat the update and move it as a misplaced object.
482
465
 
483
466
  def set_delete_at_headers(index, delete_at_node):
467
+ ip, port = get_ip_port(delete_at_node, headers[index])
484
468
  headers[index]['X-Delete-At-Container'] = delete_at_container
485
469
  headers[index]['X-Delete-At-Partition'] = delete_at_partition
486
470
  headers[index]['X-Delete-At-Host'] = csv_append(
487
471
  headers[index].get('X-Delete-At-Host'),
488
- '%(ip)s:%(port)s' % delete_at_node)
472
+ '%(ip)s:%(port)s' % {'ip': ip, 'port': port})
489
473
  headers[index]['X-Delete-At-Device'] = csv_append(
490
474
  headers[index].get('X-Delete-At-Device'),
491
475
  delete_at_node['device'])
@@ -1046,7 +1030,7 @@ class ReplicatedObjectController(BaseObjectController):
1046
1030
  if ml and bytes_transferred < ml:
1047
1031
  self.logger.warning(
1048
1032
  'Client disconnected without sending enough data')
1049
- self.logger.increment('client_disconnects')
1033
+ self.logger.increment('object.client_disconnects')
1050
1034
  raise HTTPClientDisconnect(request=req)
1051
1035
 
1052
1036
  trail_md = self._get_footers(req)
@@ -1061,14 +1045,14 @@ class ReplicatedObjectController(BaseObjectController):
1061
1045
  except ChunkReadTimeout as err:
1062
1046
  self.logger.warning(
1063
1047
  'ERROR Client read timeout (%ss)', err.seconds)
1064
- self.logger.increment('client_timeouts')
1048
+ self.logger.increment('object.client_timeouts')
1065
1049
  raise HTTPRequestTimeout(request=req)
1066
1050
  except HTTPException:
1067
1051
  raise
1068
1052
  except ChunkReadError:
1069
1053
  self.logger.warning(
1070
1054
  'Client disconnected without sending last chunk')
1071
- self.logger.increment('client_disconnects')
1055
+ self.logger.increment('object.client_disconnects')
1072
1056
  raise HTTPClientDisconnect(request=req)
1073
1057
  except Timeout:
1074
1058
  self.logger.exception(
@@ -2484,9 +2468,10 @@ class ECFragGetter(GetterBase):
2484
2468
  backend_headers, header_provider, logger_thread_locals,
2485
2469
  logger):
2486
2470
  super(ECFragGetter, self).__init__(
2487
- app=app, req=req, node_iter=node_iter,
2488
- partition=partition, policy=policy, path=path,
2489
- backend_headers=backend_headers, logger=logger)
2471
+ app=app, req=req, node_iter=node_iter, partition=partition,
2472
+ policy=policy, path=path, backend_headers=backend_headers,
2473
+ node_timeout=app.recoverable_node_timeout,
2474
+ resource_type='EC fragment', logger=logger)
2490
2475
  self.header_provider = header_provider
2491
2476
  self.fragment_size = policy.fragment_size
2492
2477
  self.skip_bytes = 0
@@ -2494,39 +2479,13 @@ class ECFragGetter(GetterBase):
2494
2479
  self.status = self.reason = self.body = self.source_headers = None
2495
2480
  self._source_iter = None
2496
2481
 
2497
- def _get_next_response_part(self):
2498
- node_timeout = self.app.recoverable_node_timeout
2499
-
2500
- while True:
2501
- # the loop here is to resume if trying to parse
2502
- # multipart/byteranges response raises a ChunkReadTimeout
2503
- # and resets the source_parts_iter
2504
- try:
2505
- with WatchdogTimeout(self.app.watchdog, node_timeout,
2506
- ChunkReadTimeout):
2507
- # If we don't have a multipart/byteranges response,
2508
- # but just a 200 or a single-range 206, then this
2509
- # performs no IO, and just returns source (or
2510
- # raises StopIteration).
2511
- # Otherwise, this call to next() performs IO when
2512
- # we have a multipart/byteranges response; as it
2513
- # will read the MIME boundary and part headers.
2514
- start_byte, end_byte, length, headers, part = next(
2515
- self.source.parts_iter)
2516
- return (start_byte, end_byte, length, headers, part)
2517
- except ChunkReadTimeout:
2518
- if not self._replace_source(
2519
- 'Trying to read next part of EC multi-part GET '
2520
- '(retrying)'):
2521
- raise
2522
-
2523
2482
  def _iter_bytes_from_response_part(self, part_file, nbytes):
2524
2483
  buf = b''
2525
2484
  part_file = ByteCountEnforcer(part_file, nbytes)
2526
2485
  while True:
2527
2486
  try:
2528
2487
  with WatchdogTimeout(self.app.watchdog,
2529
- self.app.recoverable_node_timeout,
2488
+ self.node_timeout,
2530
2489
  ChunkReadTimeout):
2531
2490
  chunk = part_file.read(self.app.object_chunk_size)
2532
2491
  # NB: this append must be *inside* the context
@@ -2580,7 +2539,7 @@ class ECFragGetter(GetterBase):
2580
2539
  if not chunk:
2581
2540
  break
2582
2541
 
2583
- def _iter_parts_from_response(self, req):
2542
+ def _iter_parts_from_response(self):
2584
2543
  try:
2585
2544
  part_iter = None
2586
2545
  try:
@@ -2591,7 +2550,7 @@ class ECFragGetter(GetterBase):
2591
2550
  except StopIteration:
2592
2551
  # it seems this is the only way out of the loop; not
2593
2552
  # sure why the req.environ update is always needed
2594
- req.environ['swift.non_client_disconnect'] = True
2553
+ self.req.environ['swift.non_client_disconnect'] = True
2595
2554
  break
2596
2555
  # skip_bytes compensates for the backend request range
2597
2556
  # expansion done in _convert_range
@@ -2624,7 +2583,7 @@ class ECFragGetter(GetterBase):
2624
2583
  self.logger.warning(
2625
2584
  'Client did not read from proxy within %ss' %
2626
2585
  self.app.client_timeout)
2627
- self.logger.increment('client_timeouts')
2586
+ self.logger.increment('object.client_timeouts')
2628
2587
  except GeneratorExit:
2629
2588
  warn = True
2630
2589
  req_range = self.backend_headers['Range']
@@ -2635,7 +2594,8 @@ class ECFragGetter(GetterBase):
2635
2594
  if end is not None and begin is not None:
2636
2595
  if end - begin + 1 == self.bytes_used_from_backend:
2637
2596
  warn = False
2638
- if not req.environ.get('swift.non_client_disconnect') and warn:
2597
+ if (warn and
2598
+ not self.req.environ.get('swift.non_client_disconnect')):
2639
2599
  self.logger.warning(
2640
2600
  'Client disconnected on read of EC frag %r', self.path)
2641
2601
  raise
@@ -2656,7 +2616,7 @@ class ECFragGetter(GetterBase):
2656
2616
  else:
2657
2617
  return HeaderKeyDict()
2658
2618
 
2659
- def _make_node_request(self, node, node_timeout):
2619
+ def _make_node_request(self, node):
2660
2620
  # make a backend request; return a response if it has an acceptable
2661
2621
  # status code, otherwise None
2662
2622
  self.logger.thread_locals = self.logger_thread_locals
@@ -2673,7 +2633,7 @@ class ECFragGetter(GetterBase):
2673
2633
  query_string=self.req.query_string)
2674
2634
  self.app.set_node_timing(node, time.time() - start_node_timing)
2675
2635
 
2676
- with Timeout(node_timeout):
2636
+ with Timeout(self.node_timeout):
2677
2637
  possible_source = conn.getresponse()
2678
2638
  # See NOTE: swift_conn at top of file about this.
2679
2639
  possible_source.swift_conn = conn
@@ -2729,9 +2689,7 @@ class ECFragGetter(GetterBase):
2729
2689
  def _source_gen(self):
2730
2690
  self.status = self.reason = self.body = self.source_headers = None
2731
2691
  for node in self.node_iter:
2732
- source = self._make_node_request(
2733
- node, self.app.recoverable_node_timeout)
2734
-
2692
+ source = self._make_node_request(node)
2735
2693
  if source:
2736
2694
  yield GetterSource(self.app, source, node)
2737
2695
  else:
@@ -2755,11 +2713,10 @@ class ECFragGetter(GetterBase):
2755
2713
  return True
2756
2714
  return False
2757
2715
 
2758
- def response_parts_iter(self, req):
2716
+ def response_parts_iter(self):
2759
2717
  """
2760
2718
  Create an iterator over a single fragment response body.
2761
2719
 
2762
- :param req: a ``swob.Request``.
2763
2720
  :return: an interator that yields chunks of bytes from a fragment
2764
2721
  response body.
2765
2722
  """
@@ -2771,7 +2728,7 @@ class ECFragGetter(GetterBase):
2771
2728
  else:
2772
2729
  if source:
2773
2730
  self.source = source
2774
- it = self._iter_parts_from_response(req)
2731
+ it = self._iter_parts_from_response()
2775
2732
  return it
2776
2733
 
2777
2734
 
@@ -2791,7 +2748,7 @@ class ECObjectController(BaseObjectController):
2791
2748
  policy, req.swift_entity_path, backend_headers,
2792
2749
  header_provider, logger_thread_locals,
2793
2750
  self.logger)
2794
- return (getter, getter.response_parts_iter(req))
2751
+ return getter, getter.response_parts_iter()
2795
2752
 
2796
2753
  def _convert_range(self, req, policy):
2797
2754
  """
@@ -3226,7 +3183,7 @@ class ECObjectController(BaseObjectController):
3226
3183
  if ml and bytes_transferred < ml:
3227
3184
  self.logger.warning(
3228
3185
  'Client disconnected without sending enough data')
3229
- self.logger.increment('client_disconnects')
3186
+ self.logger.increment('object.client_disconnects')
3230
3187
  raise HTTPClientDisconnect(request=req)
3231
3188
 
3232
3189
  send_chunk(b'') # flush out any buffered data
@@ -3296,12 +3253,12 @@ class ECObjectController(BaseObjectController):
3296
3253
  except ChunkReadTimeout as err:
3297
3254
  self.logger.warning(
3298
3255
  'ERROR Client read timeout (%ss)', err.seconds)
3299
- self.logger.increment('client_timeouts')
3256
+ self.logger.increment('object.client_timeouts')
3300
3257
  raise HTTPRequestTimeout(request=req)
3301
3258
  except ChunkReadError:
3302
3259
  self.logger.warning(
3303
3260
  'Client disconnected without sending last chunk')
3304
- self.logger.increment('client_disconnects')
3261
+ self.logger.increment('object.client_disconnects')
3305
3262
  raise HTTPClientDisconnect(request=req)
3306
3263
  except HTTPException:
3307
3264
  raise
swift/proxy/server.py CHANGED
@@ -260,18 +260,8 @@ class Application(object):
260
260
  [os.path.join(swift_dir, 'mime.types')])
261
261
  self.account_autocreate = \
262
262
  config_true_value(conf.get('account_autocreate', 'no'))
263
- if conf.get('auto_create_account_prefix'):
264
- self.logger.warning('Option auto_create_account_prefix is '
265
- 'deprecated. Configure '
266
- 'auto_create_account_prefix under the '
267
- 'swift-constraints section of '
268
- 'swift.conf. This option will '
269
- 'be ignored in a future release.')
270
- self.auto_create_account_prefix = \
271
- conf['auto_create_account_prefix']
272
- else:
273
- self.auto_create_account_prefix = \
274
- constraints.AUTO_CREATE_ACCOUNT_PREFIX
263
+ self.auto_create_account_prefix = \
264
+ constraints.AUTO_CREATE_ACCOUNT_PREFIX
275
265
  self.expiring_objects_account = self.auto_create_account_prefix + \
276
266
  (conf.get('expiring_objects_account_name') or 'expiring_objects')
277
267
  self.expiring_objects_container_divisor = \
@@ -47,6 +47,9 @@ if __name__ == '__main__':
47
47
  '-v', '--verbose', default=False, action="store_true",
48
48
  help="Show all shard ranges. By default, only the number of shard "
49
49
  "ranges is displayed if there are many shards.")
50
+ parser.add_option(
51
+ '--sync', '-s', default=False, action="store_true",
52
+ help="Output the contents of the incoming/outging sync tables")
50
53
 
51
54
  options, args = parser.parse_args()
52
55
 
@@ -0,0 +1,24 @@
1
+ #!python
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
11
+ # implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ swift-recon-cron.py
17
+ """
18
+
19
+ import sys
20
+
21
+ from swift.cli.recon_cron import main
22
+
23
+ if __name__ == '__main__':
24
+ sys.exit(main())
@@ -218,6 +218,7 @@ Ionuț Arțăriși (iartarisi@suse.cz)
218
218
  Iryoung Jeong (iryoung@gmail.com)
219
219
  its-not-a-bug-its-a-feature (david.cole@sohonet.com)
220
220
  Jaivish Kothari (jaivish.kothari@nectechnologies.in)
221
+ Jake Yip (jake.yip@ardc.edu.au)
221
222
  James E. Blair (jeblair@openstack.org)
222
223
  James Page (james.page@ubuntu.com)
223
224
  Jamie Lennox (jlennox@redhat.com)
@@ -257,6 +258,7 @@ Kazuhiro Miyahara (miyahara.kazuhiro@lab.ntt.co.jp)
257
258
  Ke Liang (ke.liang@easystack.cn)
258
259
  Kenichiro Matsuda (matsuda_kenichi@jp.fujitsu.com)
259
260
  Keshava Bharadwaj (kb.sankethi@gmail.com)
261
+ kim woo seok (rladntjr4@gmail.com)
260
262
  Kiyoung Jung (kiyoung.jung@kt.com)
261
263
  Koert van der Veer (koert@cloudvps.com)
262
264
  Konrad Kügler (swamblumat-eclipsebugs@yahoo.de)
@@ -404,7 +406,7 @@ Steve Kowalik (steven@wedontsleep.org)
404
406
  Steve Martinelli (stevemar@ca.ibm.com)
405
407
  Steven Lang (Steven.Lang@hgst.com)
406
408
  Sushil Kumar (sushil.kumar2@globallogic.com)
407
- Takashi Kajinami (tkajinam@redhat.com)
409
+ Takashi Kajinami (kajinamit@oss.nttdata.com)
408
410
  Takashi Natsume (takanattie@gmail.com)
409
411
  TheSriram (sriram@klusterkloud.com)
410
412
  Thiago da Silva (thiagodasilva@gmail.com)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: swift
3
- Version: 2.32.1
3
+ Version: 2.33.1
4
4
  Summary: OpenStack Object Storage
5
5
  Home-page: https://docs.openstack.org/swift/latest/
6
6
  Author: OpenStack
@@ -24,9 +24,10 @@ Classifier: Programming Language :: Python :: 3.7
24
24
  Classifier: Programming Language :: Python :: 3.8
25
25
  Classifier: Programming Language :: Python :: 3.9
26
26
  Classifier: Programming Language :: Python :: 3.10
27
+ Classifier: Programming Language :: Python :: 3.11
27
28
  License-File: LICENSE
28
29
  License-File: AUTHORS
29
- Requires-Dist: eventlet >=0.25.0
30
+ Requires-Dist: eventlet !=0.34.3,>=0.25.0
30
31
  Requires-Dist: greenlet >=0.3.2
31
32
  Requires-Dist: PasteDeploy >=2.0.0
32
33
  Requires-Dist: lxml >=3.4.1
@@ -43,7 +44,7 @@ Provides-Extra: kms_keymaster
43
44
  Requires-Dist: oslo.config !=4.3.0,!=4.4.0,>=4.0.0 ; extra == 'kms_keymaster'
44
45
  Requires-Dist: castellan >=0.13.0 ; extra == 'kms_keymaster'
45
46
  Provides-Extra: test
46
- Requires-Dist: hacking <2.1.0,>=2.0 ; extra == 'test'
47
+ Requires-Dist: hacking <6.2.0,>=2.0 ; extra == 'test'
47
48
  Requires-Dist: coverage >=5.0.4 ; extra == 'test'
48
49
  Requires-Dist: pytest >=4.6.11 ; extra == 'test'
49
50
  Requires-Dist: pytest-cov >=2.12.1 ; extra == 'test'