swift 2.32.0__py2.py3-none-any.whl → 2.34.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/account/auditor.py +11 -0
- swift/account/reaper.py +11 -1
- swift/account/replicator.py +22 -0
- swift/account/server.py +13 -12
- swift-2.32.0.data/scripts/swift-account-audit → swift/cli/account_audit.py +6 -2
- swift-2.32.0.data/scripts/swift-config → swift/cli/config.py +1 -1
- swift-2.32.0.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +6 -2
- swift-2.32.0.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +12 -3
- swift-2.32.0.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +6 -2
- swift/cli/info.py +131 -3
- swift-2.32.0.data/scripts/swift-oldies → swift/cli/oldies.py +6 -3
- swift-2.32.0.data/scripts/swift-orphans → swift/cli/orphans.py +7 -2
- swift-2.32.0.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +9 -18
- swift-2.32.0.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +1 -1
- swift/cli/reload.py +141 -0
- swift/cli/ringbuilder.py +24 -0
- swift/common/daemon.py +12 -2
- swift/common/db.py +14 -9
- swift/common/db_auditor.py +2 -2
- swift/common/db_replicator.py +6 -0
- swift/common/exceptions.py +12 -0
- swift/common/http_protocol.py +76 -3
- swift/common/manager.py +120 -5
- swift/common/memcached.py +24 -25
- swift/common/middleware/account_quotas.py +144 -43
- swift/common/middleware/backend_ratelimit.py +166 -24
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +3 -5
- swift/common/middleware/container_sync.py +6 -10
- swift/common/middleware/crypto/crypto_utils.py +4 -5
- swift/common/middleware/crypto/decrypter.py +4 -5
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/proxy_logging.py +57 -43
- swift/common/middleware/ratelimit.py +6 -7
- swift/common/middleware/recon.py +6 -7
- swift/common/middleware/s3api/acl_handlers.py +10 -1
- swift/common/middleware/s3api/controllers/__init__.py +3 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_upload.py +31 -15
- swift/common/middleware/s3api/controllers/obj.py +20 -1
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/s3api.py +6 -0
- swift/common/middleware/s3api/s3request.py +190 -74
- swift/common/middleware/s3api/s3response.py +48 -8
- swift/common/middleware/s3api/s3token.py +2 -2
- swift/common/middleware/s3api/utils.py +2 -1
- swift/common/middleware/slo.py +508 -310
- swift/common/middleware/staticweb.py +45 -14
- swift/common/middleware/tempauth.py +6 -4
- swift/common/middleware/tempurl.py +134 -93
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +9 -10
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +1 -2
- swift/common/request_helpers.py +101 -8
- swift/common/statsd_client.py +207 -0
- swift/common/storage_policy.py +1 -1
- swift/common/swob.py +5 -2
- swift/common/utils/__init__.py +331 -1774
- swift/common/utils/base.py +138 -0
- swift/common/utils/config.py +443 -0
- swift/common/utils/logs.py +999 -0
- swift/common/utils/timestamp.py +23 -2
- swift/common/wsgi.py +19 -3
- swift/container/auditor.py +11 -0
- swift/container/backend.py +136 -31
- swift/container/reconciler.py +11 -2
- swift/container/replicator.py +64 -7
- swift/container/server.py +276 -146
- swift/container/sharder.py +86 -42
- swift/container/sync.py +11 -1
- swift/container/updater.py +12 -2
- swift/obj/auditor.py +20 -3
- swift/obj/diskfile.py +63 -25
- swift/obj/expirer.py +154 -47
- swift/obj/mem_diskfile.py +2 -1
- swift/obj/mem_server.py +1 -0
- swift/obj/reconstructor.py +28 -4
- swift/obj/replicator.py +63 -24
- swift/obj/server.py +76 -59
- swift/obj/updater.py +12 -2
- swift/obj/watchers/dark_data.py +72 -34
- swift/proxy/controllers/account.py +3 -2
- swift/proxy/controllers/base.py +254 -148
- swift/proxy/controllers/container.py +274 -289
- swift/proxy/controllers/obj.py +120 -166
- swift/proxy/server.py +17 -13
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/AUTHORS +14 -4
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/METADATA +9 -7
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/RECORD +97 -120
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/entry_points.txt +39 -0
- swift-2.34.0.dist-info/pbr.json +1 -0
- swift-2.32.0.data/scripts/swift-account-auditor +0 -23
- swift-2.32.0.data/scripts/swift-account-info +0 -52
- swift-2.32.0.data/scripts/swift-account-reaper +0 -23
- swift-2.32.0.data/scripts/swift-account-replicator +0 -34
- swift-2.32.0.data/scripts/swift-account-server +0 -23
- swift-2.32.0.data/scripts/swift-container-auditor +0 -23
- swift-2.32.0.data/scripts/swift-container-info +0 -56
- swift-2.32.0.data/scripts/swift-container-reconciler +0 -21
- swift-2.32.0.data/scripts/swift-container-replicator +0 -34
- swift-2.32.0.data/scripts/swift-container-server +0 -23
- swift-2.32.0.data/scripts/swift-container-sharder +0 -37
- swift-2.32.0.data/scripts/swift-container-sync +0 -23
- swift-2.32.0.data/scripts/swift-container-updater +0 -23
- swift-2.32.0.data/scripts/swift-dispersion-report +0 -24
- swift-2.32.0.data/scripts/swift-form-signature +0 -20
- swift-2.32.0.data/scripts/swift-init +0 -119
- swift-2.32.0.data/scripts/swift-object-auditor +0 -29
- swift-2.32.0.data/scripts/swift-object-expirer +0 -33
- swift-2.32.0.data/scripts/swift-object-info +0 -60
- swift-2.32.0.data/scripts/swift-object-reconstructor +0 -33
- swift-2.32.0.data/scripts/swift-object-relinker +0 -23
- swift-2.32.0.data/scripts/swift-object-replicator +0 -37
- swift-2.32.0.data/scripts/swift-object-server +0 -27
- swift-2.32.0.data/scripts/swift-object-updater +0 -23
- swift-2.32.0.data/scripts/swift-proxy-server +0 -23
- swift-2.32.0.data/scripts/swift-recon +0 -24
- swift-2.32.0.data/scripts/swift-ring-builder +0 -37
- swift-2.32.0.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.32.0.data/scripts/swift-ring-composer +0 -22
- swift-2.32.0.dist-info/pbr.json +0 -1
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/LICENSE +0 -0
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/WHEEL +0 -0
- {swift-2.32.0.dist-info → swift-2.34.0.dist-info}/top_level.txt +0 -0
swift/common/utils/timestamp.py
CHANGED
@@ -18,6 +18,7 @@
|
|
18
18
|
import datetime
|
19
19
|
import functools
|
20
20
|
import math
|
21
|
+
import sys
|
21
22
|
import time
|
22
23
|
|
23
24
|
import six
|
@@ -189,12 +190,14 @@ class Timestamp(object):
|
|
189
190
|
elif us < 0:
|
190
191
|
t -= 1
|
191
192
|
us += 1000000
|
192
|
-
dt = datetime.datetime.
|
193
|
+
dt = datetime.datetime.fromtimestamp(t, UTC)
|
193
194
|
dt = dt.replace(microsecond=us)
|
194
195
|
else:
|
195
|
-
dt = datetime.datetime.
|
196
|
+
dt = datetime.datetime.fromtimestamp(t, UTC)
|
196
197
|
|
197
198
|
isoformat = dt.isoformat()
|
199
|
+
# need to drop tzinfo
|
200
|
+
isoformat = isoformat[:isoformat.index('+')]
|
198
201
|
# python isoformat() doesn't include msecs when zero
|
199
202
|
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
|
200
203
|
isoformat += ".000000"
|
@@ -397,3 +400,21 @@ def normalize_delete_at_timestamp(timestamp, high_precision=False):
|
|
397
400
|
"""
|
398
401
|
fmt = '%016.5f' if high_precision else '%010d'
|
399
402
|
return fmt % min(max(0, float(timestamp)), 9999999999.99999)
|
403
|
+
|
404
|
+
|
405
|
+
if sys.version_info < (3, 11):
|
406
|
+
class _UTC(datetime.tzinfo):
|
407
|
+
"""
|
408
|
+
A tzinfo class for datetimes that returns a 0 timedelta (UTC time)
|
409
|
+
"""
|
410
|
+
|
411
|
+
def dst(self, dt):
|
412
|
+
return datetime.timedelta(0)
|
413
|
+
utcoffset = dst
|
414
|
+
|
415
|
+
def tzname(self, dt):
|
416
|
+
return 'UTC'
|
417
|
+
|
418
|
+
UTC = _UTC()
|
419
|
+
else:
|
420
|
+
from datetime import UTC
|
swift/common/wsgi.py
CHANGED
@@ -198,9 +198,14 @@ def get_socket(conf):
|
|
198
198
|
sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)),
|
199
199
|
family=address_family)
|
200
200
|
if 'cert_file' in conf:
|
201
|
+
if six.PY2:
|
202
|
+
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
203
|
+
else:
|
204
|
+
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
205
|
+
context.verify_mode = ssl.CERT_NONE
|
206
|
+
context.load_cert_chain(conf['cert_file'], conf['key_file'])
|
201
207
|
warn_ssl = True
|
202
|
-
sock =
|
203
|
-
keyfile=conf['key_file'])
|
208
|
+
sock = context.wrap_socket(sock, server_side=True)
|
204
209
|
except socket.error as err:
|
205
210
|
if err.args[0] != errno.EADDRINUSE:
|
206
211
|
raise
|
@@ -460,7 +465,10 @@ def run_server(conf, logger, sock, global_conf=None, ready_callback=None,
|
|
460
465
|
except socket.error as err:
|
461
466
|
if err.errno != errno.EINVAL:
|
462
467
|
raise
|
463
|
-
|
468
|
+
finally:
|
469
|
+
pool.waitall()
|
470
|
+
if hasattr(app._pipeline_final_app, 'watchdog'):
|
471
|
+
app._pipeline_final_app.watchdog.kill()
|
464
472
|
|
465
473
|
|
466
474
|
class StrategyBase(object):
|
@@ -487,6 +495,8 @@ class StrategyBase(object):
|
|
487
495
|
capture_stdio(self.logger)
|
488
496
|
drop_privileges(self.conf.get('user', 'swift'))
|
489
497
|
del self.tracking_data # children don't need to track siblings
|
498
|
+
# only MAINPID should be sending systemd notifications
|
499
|
+
os.environ.pop('NOTIFY_SOCKET', None)
|
490
500
|
|
491
501
|
def shutdown_sockets(self):
|
492
502
|
"""
|
@@ -888,6 +898,7 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
888
898
|
run_server(conf, logger, no_fork_sock, global_conf=global_conf,
|
889
899
|
ready_callback=strategy.signal_ready,
|
890
900
|
allow_modify_pipeline=allow_modify_pipeline)
|
901
|
+
systemd_notify(logger, "STOPPING=1")
|
891
902
|
return 0
|
892
903
|
|
893
904
|
def stop_with_signal(signum, *args):
|
@@ -981,8 +992,10 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
981
992
|
else:
|
982
993
|
logger.notice('%s received (%s)', signame, os.getpid())
|
983
994
|
if running_context[1] == signal.SIGTERM:
|
995
|
+
systemd_notify(logger, "STOPPING=1")
|
984
996
|
os.killpg(0, signal.SIGTERM)
|
985
997
|
elif running_context[1] == signal.SIGUSR1:
|
998
|
+
systemd_notify(logger, "RELOADING=1")
|
986
999
|
# set up a pipe, fork off a child to handle cleanup later,
|
987
1000
|
# and rexec ourselves with an environment variable set which will
|
988
1001
|
# indicate which fd (one of the pipe ends) to write a byte to
|
@@ -1041,6 +1054,9 @@ def run_wsgi(conf_path, app_section, *args, **kwargs):
|
|
1041
1054
|
os.close(read_fd)
|
1042
1055
|
except Exception:
|
1043
1056
|
pass
|
1057
|
+
else:
|
1058
|
+
# SIGHUP or, less likely, run in "once" mode
|
1059
|
+
systemd_notify(logger, "STOPPING=1")
|
1044
1060
|
|
1045
1061
|
strategy.shutdown_sockets()
|
1046
1062
|
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
swift/container/auditor.py
CHANGED
@@ -15,7 +15,9 @@
|
|
15
15
|
|
16
16
|
|
17
17
|
from swift.container.backend import ContainerBroker
|
18
|
+
from swift.common.daemon import run_daemon
|
18
19
|
from swift.common.db_auditor import DatabaseAuditor
|
20
|
+
from swift.common.utils import parse_options
|
19
21
|
|
20
22
|
|
21
23
|
class ContainerAuditor(DatabaseAuditor):
|
@@ -26,3 +28,12 @@ class ContainerAuditor(DatabaseAuditor):
|
|
26
28
|
|
27
29
|
def _audit(self, job, broker):
|
28
30
|
return None
|
31
|
+
|
32
|
+
|
33
|
+
def main():
|
34
|
+
conf_file, options = parse_options(once=True)
|
35
|
+
run_daemon(ContainerAuditor, conf_file, **options)
|
36
|
+
|
37
|
+
|
38
|
+
if __name__ == '__main__':
|
39
|
+
main()
|
swift/container/backend.py
CHANGED
@@ -941,16 +941,16 @@ class ContainerBroker(DatabaseBroker):
|
|
941
941
|
self._do_get_info_query(conn)
|
942
942
|
|
943
943
|
def _get_alternate_object_stats(self):
|
944
|
-
|
945
|
-
if
|
944
|
+
db_state = self.get_db_state()
|
945
|
+
if db_state == SHARDING:
|
946
946
|
other_info = self.get_brokers()[0]._get_info()
|
947
947
|
stats = {'object_count': other_info['object_count'],
|
948
948
|
'bytes_used': other_info['bytes_used']}
|
949
|
-
elif
|
949
|
+
elif db_state == SHARDED and self.is_root_container():
|
950
950
|
stats = self.get_shard_usage()
|
951
951
|
else:
|
952
952
|
stats = {}
|
953
|
-
return
|
953
|
+
return db_state, stats
|
954
954
|
|
955
955
|
def get_info(self):
|
956
956
|
"""
|
@@ -1685,6 +1685,125 @@ class ContainerBroker(DatabaseBroker):
|
|
1685
1685
|
if ('no such table: %s' % SHARD_RANGE_TABLE) not in str(err):
|
1686
1686
|
raise
|
1687
1687
|
|
1688
|
+
def _make_filler_shard_range(self, namespaces, marker, end_marker):
|
1689
|
+
if namespaces and namespaces[-1].upper == Namespace.MAX:
|
1690
|
+
return None
|
1691
|
+
|
1692
|
+
# Insert a modified copy of own shard range to fill any gap between the
|
1693
|
+
# end of any found and the upper bound of own shard range. Gaps
|
1694
|
+
# enclosed within the found shard ranges are not filled.
|
1695
|
+
own_shard_range = self.get_own_shard_range()
|
1696
|
+
if namespaces:
|
1697
|
+
last_upper = namespaces[-1].upper
|
1698
|
+
else:
|
1699
|
+
last_upper = max(marker or own_shard_range.lower,
|
1700
|
+
own_shard_range.lower)
|
1701
|
+
required_upper = min(end_marker or own_shard_range.upper,
|
1702
|
+
own_shard_range.upper)
|
1703
|
+
if required_upper > last_upper:
|
1704
|
+
filler_sr = own_shard_range
|
1705
|
+
filler_sr.lower = last_upper
|
1706
|
+
filler_sr.upper = required_upper
|
1707
|
+
return filler_sr
|
1708
|
+
else:
|
1709
|
+
return None
|
1710
|
+
|
1711
|
+
def get_namespaces(self, marker=None, end_marker=None, includes=None,
|
1712
|
+
reverse=False, states=None, fill_gaps=False):
|
1713
|
+
"""
|
1714
|
+
Returns a list of persisted namespaces per input parameters.
|
1715
|
+
|
1716
|
+
:param marker: restricts the returned list to shard ranges whose
|
1717
|
+
namespace includes or is greater than the marker value. If
|
1718
|
+
``reverse=True`` then ``marker`` is treated as ``end_marker``.
|
1719
|
+
``marker`` is ignored if ``includes`` is specified.
|
1720
|
+
:param end_marker: restricts the returned list to shard ranges whose
|
1721
|
+
namespace includes or is less than the end_marker value. If
|
1722
|
+
``reverse=True`` then ``end_marker`` is treated as ``marker``.
|
1723
|
+
``end_marker`` is ignored if ``includes`` is specified.
|
1724
|
+
:param includes: restricts the returned list to the shard range that
|
1725
|
+
includes the given value; if ``includes`` is specified then
|
1726
|
+
``fill_gaps``, ``marker`` and ``end_marker`` are ignored.
|
1727
|
+
:param reverse: reverse the result order.
|
1728
|
+
:param states: if specified, restricts the returned list to namespaces
|
1729
|
+
that have one of the given states; should be a list of ints.
|
1730
|
+
:param fill_gaps: if True, insert a modified copy of own shard range to
|
1731
|
+
fill any gap between the end of any found shard ranges and the
|
1732
|
+
upper bound of own shard range. Gaps enclosed within the found
|
1733
|
+
shard ranges are not filled.
|
1734
|
+
:return: a list of Namespace objects.
|
1735
|
+
"""
|
1736
|
+
if includes is None and (marker == Namespace.MAX
|
1737
|
+
or end_marker == Namespace.MIN):
|
1738
|
+
return []
|
1739
|
+
|
1740
|
+
if reverse:
|
1741
|
+
marker, end_marker = end_marker, marker
|
1742
|
+
if marker and end_marker and marker >= end_marker:
|
1743
|
+
return []
|
1744
|
+
|
1745
|
+
included_states = set(states) if states else None
|
1746
|
+
with self.get() as conn:
|
1747
|
+
# Namespace only needs 'name', 'lower' and 'upper', but the query
|
1748
|
+
# also need to include 'state' to be used when subesequently
|
1749
|
+
# sorting the rows. And the sorting can't be done within SQLite
|
1750
|
+
# since the value for maximum upper bound is an empty string.
|
1751
|
+
|
1752
|
+
conditions = ['deleted = 0', 'name != ?']
|
1753
|
+
params = [self.path]
|
1754
|
+
if included_states:
|
1755
|
+
conditions.append('state in (%s)' % ','.join(
|
1756
|
+
'?' * len(included_states)))
|
1757
|
+
params.extend(included_states)
|
1758
|
+
if includes is None:
|
1759
|
+
if end_marker:
|
1760
|
+
conditions.append('lower < ?')
|
1761
|
+
params.append(end_marker)
|
1762
|
+
if marker:
|
1763
|
+
conditions.append("(upper = '' OR upper > ?)")
|
1764
|
+
params.append(marker)
|
1765
|
+
else:
|
1766
|
+
conditions.extend(('lower < ?', "(upper = '' OR upper >= ?)"))
|
1767
|
+
params.extend((includes, includes))
|
1768
|
+
condition = ' WHERE ' + ' AND '.join(conditions)
|
1769
|
+
sql = '''
|
1770
|
+
SELECT name, lower, upper, state FROM %s%s
|
1771
|
+
''' % (SHARD_RANGE_TABLE, condition)
|
1772
|
+
try:
|
1773
|
+
data = conn.execute(sql, params)
|
1774
|
+
data.row_factory = None
|
1775
|
+
namespaces = [row for row in data]
|
1776
|
+
except sqlite3.OperationalError as err:
|
1777
|
+
if ('no such table: %s' % SHARD_RANGE_TABLE) in str(err):
|
1778
|
+
return []
|
1779
|
+
else:
|
1780
|
+
raise
|
1781
|
+
|
1782
|
+
# Sort those namespaces in order, note that each namespace record also
|
1783
|
+
# include additional attribute 'state'.
|
1784
|
+
def sort_key(namespace):
|
1785
|
+
return ShardRange.sort_key_order(name=namespace[0],
|
1786
|
+
lower=namespace[1],
|
1787
|
+
upper=namespace[2],
|
1788
|
+
state=namespace[3])
|
1789
|
+
namespaces.sort(key=sort_key)
|
1790
|
+
# Convert the record tuples to Namespace objects.
|
1791
|
+
namespaces = [Namespace(row[0], row[1], row[2]) for row in namespaces]
|
1792
|
+
if includes:
|
1793
|
+
return namespaces[:1] if namespaces else []
|
1794
|
+
|
1795
|
+
if fill_gaps:
|
1796
|
+
filler_sr = self._make_filler_shard_range(
|
1797
|
+
namespaces, marker, end_marker)
|
1798
|
+
if filler_sr:
|
1799
|
+
namespaces.append(Namespace(filler_sr.name,
|
1800
|
+
filler_sr.lower,
|
1801
|
+
filler_sr.upper))
|
1802
|
+
if reverse:
|
1803
|
+
namespaces.reverse()
|
1804
|
+
|
1805
|
+
return namespaces
|
1806
|
+
|
1688
1807
|
def _get_shard_range_rows(self, connection=None, marker=None,
|
1689
1808
|
end_marker=None, includes=None,
|
1690
1809
|
include_deleted=False, states=None,
|
@@ -1709,8 +1828,8 @@ class ContainerBroker(DatabaseBroker):
|
|
1709
1828
|
``marker`` and ``end_marker`` are ignored, but other constraints
|
1710
1829
|
are applied (e.g. ``exclude_others`` and ``include_deleted``).
|
1711
1830
|
:param include_deleted: include rows marked as deleted.
|
1712
|
-
:param states: include only rows matching the given
|
1713
|
-
|
1831
|
+
:param states: include only rows matching the given states; should be
|
1832
|
+
a list of ints.
|
1714
1833
|
:param include_own: boolean that governs whether the row whose name
|
1715
1834
|
matches the broker's path is included in the returned list. If
|
1716
1835
|
True, that row is included unless it is excluded by other
|
@@ -1734,11 +1853,7 @@ class ContainerBroker(DatabaseBroker):
|
|
1734
1853
|
if exclude_others and not include_own:
|
1735
1854
|
return []
|
1736
1855
|
|
1737
|
-
included_states = set()
|
1738
|
-
if isinstance(states, (list, tuple, set)):
|
1739
|
-
included_states.update(states)
|
1740
|
-
elif states is not None:
|
1741
|
-
included_states.add(states)
|
1856
|
+
included_states = set(states) if states else None
|
1742
1857
|
|
1743
1858
|
# defaults to be used when legacy db's are missing columns
|
1744
1859
|
default_values = {'reported': 0,
|
@@ -1868,8 +1983,7 @@ class ContainerBroker(DatabaseBroker):
|
|
1868
1983
|
:param reverse: reverse the result order.
|
1869
1984
|
:param include_deleted: include items that have the delete marker set.
|
1870
1985
|
:param states: if specified, restricts the returned list to shard
|
1871
|
-
ranges that have the given
|
1872
|
-
single int.
|
1986
|
+
ranges that have one of the given states; should be a list of ints.
|
1873
1987
|
:param include_own: boolean that governs whether the row whose name
|
1874
1988
|
matches the broker's path is included in the returned list. If
|
1875
1989
|
True, that row is included unless it is excluded by other
|
@@ -1906,18 +2020,9 @@ class ContainerBroker(DatabaseBroker):
|
|
1906
2020
|
return shard_ranges[:1] if shard_ranges else []
|
1907
2021
|
|
1908
2022
|
if fill_gaps:
|
1909
|
-
|
1910
|
-
|
1911
|
-
|
1912
|
-
else:
|
1913
|
-
last_upper = max(marker or own_shard_range.lower,
|
1914
|
-
own_shard_range.lower)
|
1915
|
-
required_upper = min(end_marker or own_shard_range.upper,
|
1916
|
-
own_shard_range.upper)
|
1917
|
-
if required_upper > last_upper:
|
1918
|
-
filler_sr = own_shard_range
|
1919
|
-
filler_sr.lower = last_upper
|
1920
|
-
filler_sr.upper = required_upper
|
2023
|
+
filler_sr = self._make_filler_shard_range(
|
2024
|
+
shard_ranges, marker, end_marker)
|
2025
|
+
if filler_sr:
|
1921
2026
|
shard_ranges.append(filler_sr)
|
1922
2027
|
|
1923
2028
|
if reverse:
|
@@ -2039,10 +2144,10 @@ class ContainerBroker(DatabaseBroker):
|
|
2039
2144
|
self.logger.warning("Container '%s' cannot be set to sharding "
|
2040
2145
|
"state: missing epoch", self.path)
|
2041
2146
|
return False
|
2042
|
-
|
2043
|
-
if not
|
2147
|
+
db_state = self.get_db_state()
|
2148
|
+
if not db_state == UNSHARDED:
|
2044
2149
|
self.logger.warning("Container '%s' cannot be set to sharding "
|
2045
|
-
"state while in %s state", self.path,
|
2150
|
+
"state while in %s state", self.path, db_state)
|
2046
2151
|
return False
|
2047
2152
|
|
2048
2153
|
info = self.get_info()
|
@@ -2120,11 +2225,11 @@ class ContainerBroker(DatabaseBroker):
|
|
2120
2225
|
:return: True if the retiring DB was successfully unlinked, False
|
2121
2226
|
otherwise.
|
2122
2227
|
"""
|
2123
|
-
|
2124
|
-
if not
|
2228
|
+
db_state = self.get_db_state()
|
2229
|
+
if not db_state == SHARDING:
|
2125
2230
|
self.logger.warning("Container %r cannot be set to sharded "
|
2126
2231
|
"state while in %s state",
|
2127
|
-
self.path,
|
2232
|
+
self.path, db_state)
|
2128
2233
|
return False
|
2129
2234
|
|
2130
2235
|
self.reload_db_files()
|
swift/container/reconciler.py
CHANGED
@@ -22,7 +22,7 @@ from eventlet import GreenPile, GreenPool, Timeout
|
|
22
22
|
import six
|
23
23
|
|
24
24
|
from swift.common import constraints
|
25
|
-
from swift.common.daemon import Daemon
|
25
|
+
from swift.common.daemon import Daemon, run_daemon
|
26
26
|
from swift.common.direct_client import (
|
27
27
|
direct_head_container, direct_delete_container_object,
|
28
28
|
direct_put_container_object, ClientException)
|
@@ -31,7 +31,7 @@ from swift.common.request_helpers import MISPLACED_OBJECTS_ACCOUNT, \
|
|
31
31
|
USE_REPLICATION_NETWORK_HEADER
|
32
32
|
from swift.common.utils import get_logger, split_path, majority_size, \
|
33
33
|
FileLikeIter, Timestamp, last_modified_date_to_timestamp, \
|
34
|
-
LRUCache, decode_timestamps, hash_path
|
34
|
+
LRUCache, decode_timestamps, hash_path, parse_options
|
35
35
|
from swift.common.storage_policy import POLICIES
|
36
36
|
|
37
37
|
MISPLACED_OBJECTS_CONTAINER_DIVISOR = 3600 # 1 hour
|
@@ -860,3 +860,12 @@ class ContainerReconciler(Daemon):
|
|
860
860
|
self.stats = defaultdict(int)
|
861
861
|
self.logger.info('sleeping between intervals (%ss)', self.interval)
|
862
862
|
time.sleep(self.interval)
|
863
|
+
|
864
|
+
|
865
|
+
def main():
|
866
|
+
conf_file, options = parse_options(once=True)
|
867
|
+
run_daemon(ContainerReconciler, conf_file, **options)
|
868
|
+
|
869
|
+
|
870
|
+
if __name__ == '__main__':
|
871
|
+
main()
|
swift/container/replicator.py
CHANGED
@@ -17,18 +17,51 @@ import os
|
|
17
17
|
import json
|
18
18
|
from collections import defaultdict
|
19
19
|
from eventlet import Timeout
|
20
|
+
import optparse
|
20
21
|
from random import choice
|
21
22
|
|
22
23
|
from swift.container.sync_store import ContainerSyncStore
|
23
|
-
from swift.container.backend import ContainerBroker, DATADIR, SHARDED
|
24
|
+
from swift.container.backend import ContainerBroker, DATADIR, SHARDED, \
|
25
|
+
merge_shards
|
24
26
|
from swift.container.reconciler import (
|
25
27
|
MISPLACED_OBJECTS_ACCOUNT, incorrect_policy_index,
|
26
28
|
get_reconciler_container_name, get_row_to_q_entry_translator)
|
27
29
|
from swift.common import db_replicator
|
30
|
+
from swift.common.daemon import run_daemon
|
28
31
|
from swift.common.storage_policy import POLICIES
|
29
32
|
from swift.common.swob import HTTPOk, HTTPAccepted
|
30
33
|
from swift.common.http import is_success
|
31
|
-
from swift.common.utils import Timestamp, majority_size, get_db_files
|
34
|
+
from swift.common.utils import Timestamp, majority_size, get_db_files, \
|
35
|
+
parse_options
|
36
|
+
|
37
|
+
|
38
|
+
def check_merge_own_shard_range(shards, broker, logger, source):
|
39
|
+
"""
|
40
|
+
If broker has own_shard_range *with an epoch* then filter out an
|
41
|
+
own_shard_range *without an epoch*, and log a warning about it.
|
42
|
+
|
43
|
+
:param shards: a list of candidate ShardRanges to merge
|
44
|
+
:param broker: a ContainerBroker
|
45
|
+
:param logger: a logger
|
46
|
+
:param source: string to log as source of shards
|
47
|
+
:return: a list of ShardRanges to actually merge
|
48
|
+
"""
|
49
|
+
# work-around for https://bugs.launchpad.net/swift/+bug/1980451
|
50
|
+
own_sr = broker.get_own_shard_range()
|
51
|
+
if own_sr.epoch is None:
|
52
|
+
return shards
|
53
|
+
to_merge = []
|
54
|
+
for shard in shards:
|
55
|
+
if shard['name'] == own_sr.name and not shard['epoch']:
|
56
|
+
shard_copy = dict(shard)
|
57
|
+
new_content = merge_shards(shard_copy, dict(own_sr))
|
58
|
+
if new_content and shard_copy['epoch'] is None:
|
59
|
+
logger.warning(
|
60
|
+
'Ignoring remote osr w/o epoch, own_sr: %r, remote_sr: %r,'
|
61
|
+
' source: %s', dict(own_sr), shard, source)
|
62
|
+
continue
|
63
|
+
to_merge.append(shard)
|
64
|
+
return to_merge
|
32
65
|
|
33
66
|
|
34
67
|
class ContainerReplicator(db_replicator.Replicator):
|
@@ -138,8 +171,10 @@ class ContainerReplicator(db_replicator.Replicator):
|
|
138
171
|
with Timeout(self.node_timeout):
|
139
172
|
response = http.replicate('get_shard_ranges')
|
140
173
|
if response and is_success(response.status):
|
141
|
-
|
142
|
-
|
174
|
+
shards = json.loads(response.data.decode('ascii'))
|
175
|
+
shards = check_merge_own_shard_range(
|
176
|
+
shards, broker, self.logger, '%s%s' % (http.host, http.path))
|
177
|
+
broker.merge_shard_ranges(shards)
|
143
178
|
|
144
179
|
def find_local_handoff_for_part(self, part):
|
145
180
|
"""
|
@@ -394,13 +429,35 @@ class ContainerReplicatorRpc(db_replicator.ReplicatorRpc):
|
|
394
429
|
def _post_rsync_then_merge_hook(self, existing_broker, new_broker):
|
395
430
|
# Note the following hook will need to change to using a pointer and
|
396
431
|
# limit in the future.
|
397
|
-
|
398
|
-
|
432
|
+
shards = existing_broker.get_all_shard_range_data()
|
433
|
+
shards = check_merge_own_shard_range(
|
434
|
+
shards, new_broker, self.logger, 'rsync')
|
435
|
+
new_broker.merge_shard_ranges(shards)
|
399
436
|
|
400
437
|
def merge_shard_ranges(self, broker, args):
|
401
|
-
|
438
|
+
shards = check_merge_own_shard_range(
|
439
|
+
args[0], broker, self.logger, 'repl_req')
|
440
|
+
broker.merge_shard_ranges(shards)
|
402
441
|
return HTTPAccepted()
|
403
442
|
|
404
443
|
def get_shard_ranges(self, broker, args):
|
405
444
|
return HTTPOk(headers={'Content-Type': 'application/json'},
|
406
445
|
body=json.dumps(broker.get_all_shard_range_data()))
|
446
|
+
|
447
|
+
|
448
|
+
def main():
|
449
|
+
parser = optparse.OptionParser("%prog CONFIG [options]")
|
450
|
+
parser.add_option('-d', '--devices',
|
451
|
+
help=('Replicate only given devices. '
|
452
|
+
'Comma-separated list. '
|
453
|
+
'Only has effect if --once is used.'))
|
454
|
+
parser.add_option('-p', '--partitions',
|
455
|
+
help=('Replicate only given partitions. '
|
456
|
+
'Comma-separated list. '
|
457
|
+
'Only has effect if --once is used.'))
|
458
|
+
conf_file, options = parse_options(parser=parser, once=True)
|
459
|
+
run_daemon(ContainerReplicator, conf_file, **options)
|
460
|
+
|
461
|
+
|
462
|
+
if __name__ == '__main__':
|
463
|
+
main()
|