swift 2.32.1__py2.py3-none-any.whl → 2.33.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. swift/account/server.py +1 -11
  2. swift/cli/info.py +28 -1
  3. swift-2.32.1.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +4 -13
  4. swift/cli/reload.py +141 -0
  5. swift/common/daemon.py +12 -2
  6. swift/common/db.py +12 -8
  7. swift/common/http_protocol.py +76 -3
  8. swift/common/manager.py +18 -5
  9. swift/common/memcached.py +18 -12
  10. swift/common/middleware/proxy_logging.py +35 -27
  11. swift/common/middleware/s3api/acl_handlers.py +1 -1
  12. swift/common/middleware/s3api/controllers/__init__.py +3 -0
  13. swift/common/middleware/s3api/controllers/acl.py +3 -2
  14. swift/common/middleware/s3api/controllers/logging.py +2 -2
  15. swift/common/middleware/s3api/controllers/multi_upload.py +30 -6
  16. swift/common/middleware/s3api/controllers/object_lock.py +44 -0
  17. swift/common/middleware/s3api/s3api.py +4 -0
  18. swift/common/middleware/s3api/s3request.py +19 -12
  19. swift/common/middleware/s3api/s3response.py +13 -2
  20. swift/common/middleware/s3api/utils.py +1 -1
  21. swift/common/middleware/slo.py +395 -298
  22. swift/common/middleware/staticweb.py +45 -14
  23. swift/common/middleware/tempurl.py +132 -91
  24. swift/common/request_helpers.py +32 -8
  25. swift/common/storage_policy.py +1 -1
  26. swift/common/swob.py +5 -2
  27. swift/common/utils/__init__.py +230 -135
  28. swift/common/utils/timestamp.py +23 -2
  29. swift/common/wsgi.py +8 -0
  30. swift/container/backend.py +126 -21
  31. swift/container/replicator.py +42 -6
  32. swift/container/server.py +264 -145
  33. swift/container/sharder.py +50 -30
  34. swift/container/updater.py +1 -0
  35. swift/obj/auditor.py +2 -1
  36. swift/obj/diskfile.py +55 -19
  37. swift/obj/expirer.py +1 -13
  38. swift/obj/mem_diskfile.py +2 -1
  39. swift/obj/mem_server.py +1 -0
  40. swift/obj/replicator.py +2 -2
  41. swift/obj/server.py +12 -23
  42. swift/obj/updater.py +1 -0
  43. swift/obj/watchers/dark_data.py +72 -34
  44. swift/proxy/controllers/account.py +3 -2
  45. swift/proxy/controllers/base.py +217 -127
  46. swift/proxy/controllers/container.py +274 -289
  47. swift/proxy/controllers/obj.py +98 -141
  48. swift/proxy/server.py +2 -12
  49. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-info +3 -0
  50. swift-2.33.1.data/scripts/swift-recon-cron +24 -0
  51. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/AUTHORS +3 -1
  52. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/METADATA +4 -3
  53. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/RECORD +94 -91
  54. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/WHEEL +1 -1
  55. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/entry_points.txt +1 -0
  56. swift-2.33.1.dist-info/pbr.json +1 -0
  57. swift-2.32.1.dist-info/pbr.json +0 -1
  58. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-audit +0 -0
  59. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-auditor +0 -0
  60. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-info +0 -0
  61. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-reaper +0 -0
  62. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-replicator +0 -0
  63. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-server +0 -0
  64. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-config +0 -0
  65. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-auditor +0 -0
  66. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-reconciler +0 -0
  67. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-replicator +0 -0
  68. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-server +0 -0
  69. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sharder +0 -0
  70. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sync +0 -0
  71. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-updater +0 -0
  72. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-populate +0 -0
  73. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-report +0 -0
  74. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-drive-audit +0 -0
  75. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-form-signature +0 -0
  76. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-get-nodes +0 -0
  77. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-init +0 -0
  78. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-auditor +0 -0
  79. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-expirer +0 -0
  80. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-info +0 -0
  81. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-reconstructor +0 -0
  82. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-relinker +0 -0
  83. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-replicator +0 -0
  84. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-server +0 -0
  85. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-updater +0 -0
  86. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-oldies +0 -0
  87. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-orphans +0 -0
  88. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-proxy-server +0 -0
  89. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-recon +0 -0
  90. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-reconciler-enqueue +0 -0
  91. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder +0 -0
  92. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder-analyzer +0 -0
  93. {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-composer +0 -0
  94. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/LICENSE +0 -0
  95. {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/top_level.txt +0 -0
@@ -114,6 +114,7 @@ from swift.common.utils.timestamp import ( # noqa
114
114
  EPOCH,
115
115
  last_modified_date_to_timestamp,
116
116
  normalize_delete_at_timestamp,
117
+ UTC,
117
118
  )
118
119
  from swift.common.utils.ipaddrs import ( # noqa
119
120
  is_valid_ip,
@@ -170,6 +171,11 @@ LOG_LINE_DEFAULT_FORMAT = '{remote_addr} - - [{time.d}/{time.b}/{time.Y}' \
170
171
  '{trans_time:.4f} "{additional_info}" {pid} ' \
171
172
  '{policy_index}'
172
173
  DEFAULT_LOCK_TIMEOUT = 10
174
+ # this is coupled with object-server.conf's network_chunk_size; if someone is
175
+ # running that unreasonably small they may find this number inefficient, but in
176
+ # the more likely case they've increased the value to optimize high througput
177
+ # transfers this will still cut off the transfer after the first chunk.
178
+ DEFAULT_DRAIN_LIMIT = 65536
173
179
 
174
180
 
175
181
  class InvalidHashPathConfigError(ValueError):
@@ -275,7 +281,7 @@ def backward(f, blocksize=4096):
275
281
 
276
282
 
277
283
  # Used when reading config values
278
- TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
284
+ TRUE_VALUES = {'true', '1', 'yes', 'on', 't', 'y'}
279
285
 
280
286
 
281
287
  def non_negative_float(value):
@@ -516,22 +522,6 @@ def get_policy_index(req_headers, res_headers):
516
522
  return str(policy_index) if policy_index is not None else None
517
523
 
518
524
 
519
- class _UTC(datetime.tzinfo):
520
- """
521
- A tzinfo class for datetime objects that returns a 0 timedelta (UTC time)
522
- """
523
-
524
- def dst(self, dt):
525
- return datetime.timedelta(0)
526
- utcoffset = dst
527
-
528
- def tzname(self, dt):
529
- return 'UTC'
530
-
531
-
532
- UTC = _UTC()
533
-
534
-
535
525
  class LogStringFormatter(string.Formatter):
536
526
  def __init__(self, default='', quote=False):
537
527
  super(LogStringFormatter, self).__init__()
@@ -830,13 +820,14 @@ class FileLikeIter(object):
830
820
  self.closed = True
831
821
 
832
822
 
833
- def fs_has_free_space(fs_path, space_needed, is_percent):
823
+ def fs_has_free_space(fs_path_or_fd, space_needed, is_percent):
834
824
  """
835
825
  Check to see whether or not a filesystem has the given amount of space
836
826
  free. Unlike fallocate(), this does not reserve any space.
837
827
 
838
- :param fs_path: path to a file or directory on the filesystem; typically
839
- the path to the filesystem's mount point
828
+ :param fs_path_or_fd: path to a file or directory on the filesystem, or an
829
+ open file descriptor; if a directory, typically the path to the
830
+ filesystem's mount point
840
831
 
841
832
  :param space_needed: minimum bytes or percentage of free space
842
833
 
@@ -849,7 +840,10 @@ def fs_has_free_space(fs_path, space_needed, is_percent):
849
840
 
850
841
  :raises OSError: if fs_path does not exist
851
842
  """
852
- st = os.statvfs(fs_path)
843
+ if isinstance(fs_path_or_fd, int):
844
+ st = os.fstatvfs(fs_path_or_fd)
845
+ else:
846
+ st = os.statvfs(fs_path_or_fd)
853
847
  free_bytes = st.f_frsize * st.f_bavail
854
848
  if is_percent:
855
849
  size_bytes = st.f_frsize * st.f_blocks
@@ -1495,7 +1489,7 @@ class StatsdClient(object):
1495
1489
 
1496
1490
  def _timing(self, metric, timing_ms, sample_rate):
1497
1491
  # This method was added to disagregate timing metrics when testing
1498
- return self._send(metric, timing_ms, 'ms', sample_rate)
1492
+ return self._send(metric, round(timing_ms, 4), 'ms', sample_rate)
1499
1493
 
1500
1494
  def timing(self, metric, timing_ms, sample_rate=None):
1501
1495
  return self._timing(metric, timing_ms, sample_rate)
@@ -1577,28 +1571,23 @@ class SwiftLoggerAdapter(logging.LoggerAdapter):
1577
1571
  # py3 does this for us already; add it for py2
1578
1572
  return self.logger.name
1579
1573
 
1580
- def get_metric_name(self, metric):
1581
- # subclasses may override this method to annotate the metric name
1582
- return metric
1574
+ def update_stats(self, *a, **kw):
1575
+ return self.logger.update_stats(*a, **kw)
1583
1576
 
1584
- def update_stats(self, metric, *a, **kw):
1585
- return self.logger.update_stats(self.get_metric_name(metric), *a, **kw)
1577
+ def increment(self, *a, **kw):
1578
+ return self.logger.increment(*a, **kw)
1586
1579
 
1587
- def increment(self, metric, *a, **kw):
1588
- return self.logger.increment(self.get_metric_name(metric), *a, **kw)
1580
+ def decrement(self, *a, **kw):
1581
+ return self.logger.decrement(*a, **kw)
1589
1582
 
1590
- def decrement(self, metric, *a, **kw):
1591
- return self.logger.decrement(self.get_metric_name(metric), *a, **kw)
1583
+ def timing(self, *a, **kw):
1584
+ return self.logger.timing(*a, **kw)
1592
1585
 
1593
- def timing(self, metric, *a, **kw):
1594
- return self.logger.timing(self.get_metric_name(metric), *a, **kw)
1586
+ def timing_since(self, *a, **kw):
1587
+ return self.logger.timing_since(*a, **kw)
1595
1588
 
1596
- def timing_since(self, metric, *a, **kw):
1597
- return self.logger.timing_since(self.get_metric_name(metric), *a, **kw)
1598
-
1599
- def transfer_rate(self, metric, *a, **kw):
1600
- return self.logger.transfer_rate(
1601
- self.get_metric_name(metric), *a, **kw)
1589
+ def transfer_rate(self, *a, **kw):
1590
+ return self.logger.transfer_rate(*a, **kw)
1602
1591
 
1603
1592
  @property
1604
1593
  def thread_locals(self):
@@ -1635,27 +1624,6 @@ class PrefixLoggerAdapter(SwiftLoggerAdapter):
1635
1624
  return (msg, kwargs)
1636
1625
 
1637
1626
 
1638
- class MetricsPrefixLoggerAdapter(SwiftLoggerAdapter):
1639
- """
1640
- Adds a prefix to all Statsd metrics' names.
1641
- """
1642
-
1643
- def __init__(self, logger, extra, metric_prefix):
1644
- """
1645
- :param logger: an instance of logging.Logger
1646
- :param extra: a dict-like object
1647
- :param metric_prefix: A prefix that will be added to the start of each
1648
- metric name such that the metric name is transformed to:
1649
- ``<metric_prefix>.<metric name>``. Note that the logger's
1650
- StatsdClient also adds its configured prefix to metric names.
1651
- """
1652
- super(MetricsPrefixLoggerAdapter, self).__init__(logger, extra)
1653
- self.metric_prefix = metric_prefix
1654
-
1655
- def get_metric_name(self, metric):
1656
- return '%s.%s' % (self.metric_prefix, metric)
1657
-
1658
-
1659
1627
  # double inheritance to support property with setter
1660
1628
  class LogAdapter(logging.LoggerAdapter, object):
1661
1629
  """
@@ -3425,7 +3393,7 @@ def put_recon_cache_entry(cache_entry, key, item):
3425
3393
 
3426
3394
  If ``item`` is an empty dict then any existing ``key`` in ``cache_entry``
3427
3395
  will be deleted. Similarly if ``item`` is a dict and any of its values are
3428
- empty dicts then the corrsponsing key will be deleted from the nested dict
3396
+ empty dicts then the corresponding key will be deleted from the nested dict
3429
3397
  in ``cache_entry``.
3430
3398
 
3431
3399
  We use nested recon cache entries when the object auditor
@@ -3704,27 +3672,66 @@ def csv_append(csv_string, item):
3704
3672
  return item
3705
3673
 
3706
3674
 
3707
- class CloseableChain(object):
3675
+ class ClosingIterator(object):
3708
3676
  """
3709
- Like itertools.chain, but with a close method that will attempt to invoke
3710
- its sub-iterators' close methods, if any.
3677
+ Wrap another iterator and close it, if possible, on completion/exception.
3678
+
3679
+ If other closeable objects are given then they will also be closed when
3680
+ this iterator is closed.
3681
+
3682
+ This is particularly useful for ensuring a generator properly closes its
3683
+ resources, even if the generator was never started.
3684
+
3685
+ This class may be subclassed to override the behavior of
3686
+ ``_get_next_item``.
3687
+
3688
+ :param iterable: iterator to wrap.
3689
+ :param other_closeables: other resources to attempt to close.
3711
3690
  """
3691
+ __slots__ = ('closeables', 'wrapped_iter', 'closed')
3712
3692
 
3713
- def __init__(self, *iterables):
3714
- self.iterables = iterables
3715
- self.chained_iter = itertools.chain(*self.iterables)
3693
+ def __init__(self, iterable, other_closeables=None):
3694
+ self.closeables = [iterable]
3695
+ if other_closeables:
3696
+ self.closeables.extend(other_closeables)
3697
+ # this is usually, but not necessarily, the same object
3698
+ self.wrapped_iter = iter(iterable)
3699
+ self.closed = False
3716
3700
 
3717
3701
  def __iter__(self):
3718
3702
  return self
3719
3703
 
3704
+ def _get_next_item(self):
3705
+ return next(self.wrapped_iter)
3706
+
3720
3707
  def __next__(self):
3721
- return next(self.chained_iter)
3708
+ try:
3709
+ return self._get_next_item()
3710
+ except Exception:
3711
+ # note: if wrapped_iter is a generator then the exception
3712
+ # already caused it to exit (without raising a GeneratorExit)
3713
+ # but we still need to close any other closeables.
3714
+ self.close()
3715
+ raise
3722
3716
 
3723
3717
  next = __next__ # py2
3724
3718
 
3725
3719
  def close(self):
3726
- for it in self.iterables:
3727
- close_if_possible(it)
3720
+ if not self.closed:
3721
+ for wrapped in self.closeables:
3722
+ close_if_possible(wrapped)
3723
+ self.closed = True
3724
+
3725
+
3726
+ class CloseableChain(ClosingIterator):
3727
+ """
3728
+ Like itertools.chain, but with a close method that will attempt to invoke
3729
+ its sub-iterators' close methods, if any.
3730
+ """
3731
+
3732
+ def __init__(self, *iterables):
3733
+ chained_iter = itertools.chain(*iterables)
3734
+ super(CloseableChain, self).__init__(chained_iter, iterables)
3728
3735
 
3729
3736
 
3730
3737
  def reiterate(iterable):
@@ -4040,7 +4047,7 @@ def closing_if_possible(maybe_closable):
4040
4047
  close_if_possible(maybe_closable)
4041
4048
 
4042
4049
 
4043
- def drain_and_close(response_or_app_iter):
4050
+ def drain_and_close(response_or_app_iter, read_limit=None):
4044
4051
  """
4045
4052
  Drain and close a swob or WSGI response.
4046
4053
 
@@ -4050,9 +4057,26 @@ def drain_and_close(response_or_app_iter):
4050
4057
  app_iter = getattr(response_or_app_iter, 'app_iter', response_or_app_iter)
4051
4058
  if app_iter is None: # for example, if we used the Response.body property
4052
4059
  return
4053
- for _chunk in app_iter:
4054
- pass
4055
- close_if_possible(app_iter)
4060
+ bytes_read = 0
4061
+ with closing_if_possible(app_iter):
4062
+ for chunk in app_iter:
4063
+ bytes_read += len(chunk)
4064
+ if read_limit is not None and bytes_read >= read_limit:
4065
+ break
4066
+
4067
+
4068
+ def friendly_close(resp):
4069
+ """
4070
+ Close a swob or WSGI response and maybe drain it.
4071
+
4072
+ It's basically free to "read" a HEAD or HTTPException response - the bytes
4073
+ are probably already in our network buffers. For a larger response we
4074
+ could possibly burn a lot of CPU/network trying to drain an un-used
4075
+ response. This method will read up to DEFAULT_DRAIN_LIMIT bytes to avoid
4076
+ logging a 499 in the proxy when it would otherwise be easy to just throw
4077
+ away the small/empty body.
4078
+ """
4079
+ return drain_and_close(resp, read_limit=DEFAULT_DRAIN_LIMIT)
4056
4080
 
4057
4081
 
4058
4082
  _rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+'
@@ -4396,6 +4420,47 @@ def document_iters_to_multipart_byteranges(ranges_iter, boundary):
4396
4420
  yield terminator
4397
4421
 
4398
4422
 
4423
+ class StringAlong(ClosingIterator):
4424
+ """
4425
+ This iterator wraps and iterates over a first iterator until it stops, and
4426
+ then iterates a second iterator, expecting it to stop immediately. This
4427
+ "stringing along" of the second iterator is useful when the exit of the
4428
+ second iterator must be delayed until the first iterator has stopped. For
4429
+ example, when the second iterator has already yielded its item(s) but
4430
+ has resources that mustn't be garbage collected until the first iterator
4431
+ has stopped.
4432
+
4433
+ The second iterator is expected to have no more items and raise
4434
+ StopIteration when called. If this is not the case then
4435
+ ``unexpected_items_func`` is called.
4436
+
4437
+ :param iterable: a first iterator that is wrapped and iterated.
4438
+ :param other_iter: a second iterator that is stopped once the first
4439
+ iterator has stopped.
4440
+ :param unexpected_items_func: a no-arg function that will be called if the
4441
+ second iterator is found to have remaining items.
4442
+ """
4443
+ __slots__ = ('other_iter', 'unexpected_items_func')
4444
+
4445
+ def __init__(self, iterable, other_iter, unexpected_items_func):
4446
+ super(StringAlong, self).__init__(iterable, [other_iter])
4447
+ self.other_iter = other_iter
4448
+ self.unexpected_items_func = unexpected_items_func
4449
+
4450
+ def _get_next_item(self):
4451
+ try:
4452
+ return super(StringAlong, self)._get_next_item()
4453
+ except StopIteration:
4454
+ try:
4455
+ next(self.other_iter)
4456
+ except StopIteration:
4457
+ pass
4458
+ else:
4459
+ self.unexpected_items_func()
4460
+ finally:
4461
+ raise
4462
+
4463
+
4399
4464
  def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
4400
4465
  logger):
4401
4466
  """
@@ -4445,20 +4510,11 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
4445
4510
  # ranges_iter has a finally block that calls close_swift_conn, and
4446
4511
  # so if that finally block fires before we read response_body_iter,
4447
4512
  # there's nothing there.
4448
- def string_along(useful_iter, useless_iter_iter, logger):
4449
- with closing_if_possible(useful_iter):
4450
- for x in useful_iter:
4451
- yield x
4452
-
4453
- try:
4454
- next(useless_iter_iter)
4455
- except StopIteration:
4456
- pass
4457
- else:
4458
- logger.warning(
4459
- "More than one part in a single-part response?")
4460
-
4461
- return string_along(response_body_iter, ranges_iter, logger)
4513
+ result = StringAlong(
4514
+ response_body_iter, ranges_iter,
4515
+ lambda: logger.warning(
4516
+ "More than one part in a single-part response?"))
4517
+ return result
4462
4518
 
4463
4519
 
4464
4520
  def multipart_byteranges_to_document_iters(input_file, boundary,
@@ -4562,13 +4618,14 @@ class Namespace(object):
4562
4618
  A Namespace encapsulates parameters that define a range of the object
4563
4619
  namespace.
4564
4620
 
4565
- :param name: the name of the ``Namespace``.
4621
+ :param name: the name of the ``Namespace``; this SHOULD take the form of a
4622
+ path to a container i.e. <account_name>/<container_name>.
4566
4623
  :param lower: the lower bound of object names contained in the namespace;
4567
4624
  the lower bound *is not* included in the namespace.
4568
4625
  :param upper: the upper bound of object names contained in the namespace;
4569
4626
  the upper bound *is* included in the namespace.
4570
4627
  """
4571
- __slots__ = ('_lower', '_upper', 'name')
4628
+ __slots__ = ('_lower', '_upper', '_name')
4572
4629
 
4573
4630
  @functools.total_ordering
4574
4631
  class MaxBound(NamespaceOuterBound):
@@ -4588,9 +4645,14 @@ class Namespace(object):
4588
4645
  def __init__(self, name, lower, upper):
4589
4646
  self._lower = Namespace.MIN
4590
4647
  self._upper = Namespace.MAX
4648
+ # We deliberately do not validate that the name has the form 'a/c'
4649
+ # because we want Namespace instantiation to be fast. Namespaces are
4650
+ # typically created using state that has previously been serialized
4651
+ # from a ShardRange instance, and the ShardRange will have validated
4652
+ # the name format.
4653
+ self._name = self._encode(name)
4591
4654
  self.lower = lower
4592
4655
  self.upper = upper
4593
- self.name = name
4594
4656
 
4595
4657
  def __iter__(self):
4596
4658
  yield 'name', str(self.name)
@@ -4618,7 +4680,7 @@ class Namespace(object):
4618
4680
  def __gt__(self, other):
4619
4681
  # a Namespace is greater than other if its entire namespace is greater
4620
4682
  # than other; if other is another Namespace that implies that this
4621
- # Namespace's lower must be less greater than or equal to the other
4683
+ # Namespace's lower must be greater than or equal to the other
4622
4684
  # Namespace's upper
4623
4685
  if self.lower == Namespace.MIN:
4624
4686
  return False
@@ -4663,6 +4725,21 @@ class Namespace(object):
4663
4725
  raise TypeError('must be a string type')
4664
4726
  return self._encode(bound)
4665
4727
 
4728
+ @property
4729
+ def account(self):
4730
+ return self._name.split('/')[0]
4731
+
4732
+ @property
4733
+ def container(self):
4734
+ # note: this may raise an IndexError if name does not have the expected
4735
+ # form 'a/c'; that is a deliberate trade-off against the overhead of
4736
+ # validating the name every time a Namespace is instantiated.
4737
+ return self._name.split('/')[1]
4738
+
4739
+ @property
4740
+ def name(self):
4741
+ return self._name
4742
+
4666
4743
  @property
4667
4744
  def lower(self):
4668
4745
  return self._lower
@@ -4988,7 +5065,7 @@ class ShardRange(Namespace):
4988
5065
  range record and the most recent version of an attribute should be
4989
5066
  persisted.
4990
5067
 
4991
- :param name: the name of the shard range; this should take the form of a
5068
+ :param name: the name of the shard range; this MUST take the form of a
4992
5069
  path to a container i.e. <account_name>/<container_name>.
4993
5070
  :param timestamp: a timestamp that represents the time at which the
4994
5071
  shard range's ``lower``, ``upper`` or ``deleted`` attributes were
@@ -5046,7 +5123,6 @@ class ShardRange(Namespace):
5046
5123
  CLEAVING_STATES = SHRINKING_STATES + SHARDING_STATES
5047
5124
 
5048
5125
  __slots__ = (
5049
- 'account', 'container',
5050
5126
  '_timestamp', '_meta_timestamp', '_state_timestamp', '_epoch',
5051
5127
  '_deleted', '_state', '_count', '_bytes',
5052
5128
  '_tombstones', '_reported')
@@ -5057,12 +5133,12 @@ class ShardRange(Namespace):
5057
5133
  deleted=False, state=None, state_timestamp=None, epoch=None,
5058
5134
  reported=False, tombstones=-1, **kwargs):
5059
5135
  super(ShardRange, self).__init__(name=name, lower=lower, upper=upper)
5060
- self.account = self.container = self._timestamp = \
5061
- self._meta_timestamp = self._state_timestamp = self._epoch = None
5136
+ self._validate_name(self.name)
5137
+ self._timestamp = self._meta_timestamp = self._state_timestamp = \
5138
+ self._epoch = None
5062
5139
  self._deleted = False
5063
5140
  self._state = None
5064
5141
 
5065
- self.name = name
5066
5142
  self.timestamp = timestamp
5067
5143
  self.deleted = deleted
5068
5144
  self.object_count = object_count
@@ -5076,11 +5152,18 @@ class ShardRange(Namespace):
5076
5152
 
5077
5153
  @classmethod
5078
5154
  def sort_key(cls, sr):
5155
+ return cls.sort_key_order(sr.name, sr.lower, sr.upper, sr.state)
5156
+
5157
+ @staticmethod
5158
+ def sort_key_order(name, lower, upper, state):
5159
+ # Use Namespace.MaxBound() for upper bound '', this will allow this
5160
+ # record to be sorted correctly by upper.
5161
+ upper = upper if upper else Namespace.MaxBound()
5079
5162
  # defines the sort order for shard ranges
5080
5163
  # note if this ever changes to *not* sort by upper first then it breaks
5081
5164
  # a key assumption for bisect, which is used by utils.find_namespace
5082
5165
  # with shard ranges.
5083
- return sr.upper, sr.state, sr.lower, sr.name
5166
+ return upper, state, lower, name
5084
5167
 
5085
5168
  def is_child_of(self, parent):
5086
5169
  """
@@ -5221,16 +5304,24 @@ class ShardRange(Namespace):
5221
5304
 
5222
5305
  @property
5223
5306
  def name(self):
5224
- return '%s/%s' % (self.account, self.container)
5225
-
5226
- @name.setter
5227
- def name(self, path):
5228
- path = self._encode(path)
5229
- if not path or len(path.split('/')) != 2 or not all(path.split('/')):
5307
+ return self._name
5308
+
5309
+ @staticmethod
5310
+ def _validate_name(name):
5311
+ # Validate the name format is 'a/c'. The ShardRange class is typically
5312
+ # used when shard state is created (e.g. by the sharder or
5313
+ # swift-manage-shard-ranges), but it is not typically used in
5314
+ # performance sensitive paths (e.g. listing namespaces), so we can
5315
+ # afford the overhead of being more defensive here.
5316
+ if not name or len(name.split('/')) != 2 or not all(name.split('/')):
5230
5317
  raise ValueError(
5231
5318
  "Name must be of the form '<account>/<container>', got %r" %
5232
- path)
5233
- self.account, self.container = path.split('/')
5319
+ name)
5320
+ return name
5321
+
5322
+ @name.setter
5323
+ def name(self, name):
5324
+ self._name = self._validate_name(self._encode(name))
5234
5325
 
5235
5326
  @property
5236
5327
  def timestamp(self):
@@ -5533,7 +5624,7 @@ class ShardRangeList(UserList):
5533
5624
  def __getitem__(self, index):
5534
5625
  # workaround for py3 - not needed for py2.7,py3.8
5535
5626
  result = self.data[index]
5536
- return ShardRangeList(result) if type(result) == list else result
5627
+ return ShardRangeList(result) if type(result) is list else result
5537
5628
 
5538
5629
  @property
5539
5630
  def lower(self):
@@ -6228,17 +6319,28 @@ def get_db_files(db_path):
6228
6319
  return sorted(results)
6229
6320
 
6230
6321
 
6231
- def systemd_notify(logger=None):
6322
+ def systemd_notify(logger=None, msg=b"READY=1"):
6232
6323
  """
6233
- Notify the service manager that started this process, if it is
6234
- systemd-compatible, that this process correctly started. To do so,
6235
- it communicates through a Unix socket stored in environment variable
6236
- NOTIFY_SOCKET. More information can be found in systemd documentation:
6324
+ Send systemd-compatible notifications.
6325
+
6326
+ Notify the service manager that started this process, if it has set the
6327
+ NOTIFY_SOCKET environment variable. For example, systemd will set this
6328
+ when the unit has ``Type=notify``. More information can be found in
6329
+ systemd documentation:
6237
6330
  https://www.freedesktop.org/software/systemd/man/sd_notify.html
6238
6331
 
6332
+ Common messages include::
6333
+
6334
+ READY=1
6335
+ RELOADING=1
6336
+ STOPPING=1
6337
+ STATUS=<some string>
6338
+
6239
6339
  :param logger: a logger object
6340
+ :param msg: the message to send
6240
6341
  """
6241
- msg = b'READY=1'
6342
+ if not isinstance(msg, bytes):
6343
+ msg = msg.encode('utf8')
6242
6344
  notify_socket = os.getenv('NOTIFY_SOCKET')
6243
6345
  if notify_socket:
6244
6346
  if notify_socket.startswith('@'):
@@ -6249,7 +6351,6 @@ def systemd_notify(logger=None):
6249
6351
  try:
6250
6352
  sock.connect(notify_socket)
6251
6353
  sock.sendall(msg)
6252
- del os.environ['NOTIFY_SOCKET']
6253
6354
  except EnvironmentError:
6254
6355
  if logger:
6255
6356
  logger.debug("Systemd notification failed", exc_info=True)
@@ -6320,7 +6421,7 @@ class Watchdog(object):
6320
6421
  :param key: timeout id, as returned by start()
6321
6422
  """
6322
6423
  try:
6323
- del(self._timeouts[key])
6424
+ del self._timeouts[key]
6324
6425
  except KeyError:
6325
6426
  pass
6326
6427
 
@@ -6385,7 +6486,7 @@ class WatchdogTimeout(object):
6385
6486
  self.watchdog.stop(self.key)
6386
6487
 
6387
6488
 
6388
- class CooperativeIterator(object):
6489
+ class CooperativeIterator(ClosingIterator):
6389
6490
  """
6390
6491
  Wrapper to make a deliberate periodic call to ``sleep()`` while iterating
6391
6492
  over wrapped iterator, providing an opportunity to switch greenthreads.
@@ -6405,26 +6506,20 @@ class CooperativeIterator(object):
6405
6506
 
6406
6507
  :param iterable: iterator to wrap.
6407
6508
  :param period: number of items yielded from this iterator between calls to
6408
- ``sleep()``.
6509
+ ``sleep()``; a negative value or 0 mean that cooperative sleep will be
6510
+ disabled.
6409
6511
  """
6410
- __slots__ = ('period', 'count', 'wrapped_iter')
6512
+ __slots__ = ('period', 'count')
6411
6513
 
6412
6514
  def __init__(self, iterable, period=5):
6413
- self.wrapped_iter = iterable
6515
+ super(CooperativeIterator, self).__init__(iterable)
6414
6516
  self.count = 0
6415
- self.period = period
6416
-
6417
- def __iter__(self):
6418
- return self
6419
-
6420
- def next(self):
6421
- if self.count >= self.period:
6422
- self.count = 0
6423
- sleep()
6424
- self.count += 1
6425
- return next(self.wrapped_iter)
6426
-
6427
- __next__ = next
6428
-
6429
- def close(self):
6430
- close_if_possible(self.wrapped_iter)
6517
+ self.period = max(0, period or 0)
6518
+
6519
+ def _get_next_item(self):
6520
+ if self.period:
6521
+ if self.count >= self.period:
6522
+ self.count = 0
6523
+ sleep()
6524
+ self.count += 1
6525
+ return super(CooperativeIterator, self)._get_next_item()
@@ -18,6 +18,7 @@
18
18
  import datetime
19
19
  import functools
20
20
  import math
21
+ import sys
21
22
  import time
22
23
 
23
24
  import six
@@ -189,12 +190,14 @@ class Timestamp(object):
189
190
  elif us < 0:
190
191
  t -= 1
191
192
  us += 1000000
192
- dt = datetime.datetime.utcfromtimestamp(t)
193
+ dt = datetime.datetime.fromtimestamp(t, UTC)
193
194
  dt = dt.replace(microsecond=us)
194
195
  else:
195
- dt = datetime.datetime.utcfromtimestamp(t)
196
+ dt = datetime.datetime.fromtimestamp(t, UTC)
196
197
 
197
198
  isoformat = dt.isoformat()
199
+ # need to drop tzinfo
200
+ isoformat = isoformat[:isoformat.index('+')]
198
201
  # python isoformat() doesn't include msecs when zero
199
202
  if len(isoformat) < len("1970-01-01T00:00:00.000000"):
200
203
  isoformat += ".000000"
@@ -397,3 +400,21 @@ def normalize_delete_at_timestamp(timestamp, high_precision=False):
397
400
  """
398
401
  fmt = '%016.5f' if high_precision else '%010d'
399
402
  return fmt % min(max(0, float(timestamp)), 9999999999.99999)
403
+
404
+
405
+ if sys.version_info < (3, 11):
406
+ class _UTC(datetime.tzinfo):
407
+ """
408
+ A tzinfo class for datetimes that returns a 0 timedelta (UTC time)
409
+ """
410
+
411
+ def dst(self, dt):
412
+ return datetime.timedelta(0)
413
+ utcoffset = dst
414
+
415
+ def tzname(self, dt):
416
+ return 'UTC'
417
+
418
+ UTC = _UTC()
419
+ else:
420
+ from datetime import UTC