swift 2.32.1__py2.py3-none-any.whl → 2.33.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/account/server.py +1 -11
- swift/cli/info.py +28 -1
- swift-2.32.1.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +4 -13
- swift/cli/reload.py +141 -0
- swift/common/daemon.py +12 -2
- swift/common/db.py +12 -8
- swift/common/http_protocol.py +76 -3
- swift/common/manager.py +18 -5
- swift/common/memcached.py +18 -12
- swift/common/middleware/proxy_logging.py +35 -27
- swift/common/middleware/s3api/acl_handlers.py +1 -1
- swift/common/middleware/s3api/controllers/__init__.py +3 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_upload.py +30 -6
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/s3api.py +4 -0
- swift/common/middleware/s3api/s3request.py +19 -12
- swift/common/middleware/s3api/s3response.py +13 -2
- swift/common/middleware/s3api/utils.py +1 -1
- swift/common/middleware/slo.py +395 -298
- swift/common/middleware/staticweb.py +45 -14
- swift/common/middleware/tempurl.py +132 -91
- swift/common/request_helpers.py +32 -8
- swift/common/storage_policy.py +1 -1
- swift/common/swob.py +5 -2
- swift/common/utils/__init__.py +230 -135
- swift/common/utils/timestamp.py +23 -2
- swift/common/wsgi.py +8 -0
- swift/container/backend.py +126 -21
- swift/container/replicator.py +42 -6
- swift/container/server.py +264 -145
- swift/container/sharder.py +50 -30
- swift/container/updater.py +1 -0
- swift/obj/auditor.py +2 -1
- swift/obj/diskfile.py +55 -19
- swift/obj/expirer.py +1 -13
- swift/obj/mem_diskfile.py +2 -1
- swift/obj/mem_server.py +1 -0
- swift/obj/replicator.py +2 -2
- swift/obj/server.py +12 -23
- swift/obj/updater.py +1 -0
- swift/obj/watchers/dark_data.py +72 -34
- swift/proxy/controllers/account.py +3 -2
- swift/proxy/controllers/base.py +217 -127
- swift/proxy/controllers/container.py +274 -289
- swift/proxy/controllers/obj.py +98 -141
- swift/proxy/server.py +2 -12
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-info +3 -0
- swift-2.33.1.data/scripts/swift-recon-cron +24 -0
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/AUTHORS +3 -1
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/METADATA +4 -3
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/RECORD +94 -91
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/WHEEL +1 -1
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/entry_points.txt +1 -0
- swift-2.33.1.dist-info/pbr.json +1 -0
- swift-2.32.1.dist-info/pbr.json +0 -1
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-audit +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-auditor +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-info +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-reaper +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-replicator +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-account-server +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-config +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-auditor +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-reconciler +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-replicator +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-server +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sharder +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-sync +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-container-updater +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-populate +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-dispersion-report +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-drive-audit +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-form-signature +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-get-nodes +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-init +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-auditor +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-expirer +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-info +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-reconstructor +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-relinker +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-replicator +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-server +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-object-updater +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-oldies +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-orphans +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-proxy-server +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-recon +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-reconciler-enqueue +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-builder-analyzer +0 -0
- {swift-2.32.1.data → swift-2.33.1.data}/scripts/swift-ring-composer +0 -0
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/LICENSE +0 -0
- {swift-2.32.1.dist-info → swift-2.33.1.dist-info}/top_level.txt +0 -0
swift/container/sharder.py
CHANGED
@@ -50,6 +50,8 @@ CLEAVE_SUCCESS = 0
|
|
50
50
|
CLEAVE_FAILED = 1
|
51
51
|
CLEAVE_EMPTY = 2
|
52
52
|
|
53
|
+
DEFAULT_PERIODIC_WARNINGS_INTERVAL = 24 * 3600
|
54
|
+
|
53
55
|
|
54
56
|
def sharding_enabled(broker):
|
55
57
|
# NB all shards will by default have been created with
|
@@ -852,18 +854,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
852
854
|
ContainerReplicator.__init__(self, conf, logger=logger)
|
853
855
|
ContainerSharderConf.__init__(self, conf)
|
854
856
|
ContainerSharderConf.validate_conf(self)
|
855
|
-
|
856
|
-
self.logger.warning('Option auto_create_account_prefix is '
|
857
|
-
'deprecated. Configure '
|
858
|
-
'auto_create_account_prefix under the '
|
859
|
-
'swift-constraints section of '
|
860
|
-
'swift.conf. This option will '
|
861
|
-
'be ignored in a future release.')
|
862
|
-
auto_create_account_prefix = \
|
863
|
-
self.conf['auto_create_account_prefix']
|
864
|
-
else:
|
865
|
-
auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
|
866
|
-
self.shards_account_prefix = (auto_create_account_prefix + 'shards_')
|
857
|
+
self.shards_account_prefix = (AUTO_CREATE_ACCOUNT_PREFIX + 'shards_')
|
867
858
|
self.sharding_candidates = []
|
868
859
|
self.shrinking_candidates = []
|
869
860
|
replica_count = self.ring.replica_count
|
@@ -908,9 +899,13 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
908
899
|
(internal_client_conf_path, err))
|
909
900
|
self.stats_interval = float(conf.get('stats_interval', '3600'))
|
910
901
|
self.reported = 0
|
902
|
+
self.periodic_warnings_interval = float(
|
903
|
+
conf.get('periodic_warnings_interval',
|
904
|
+
DEFAULT_PERIODIC_WARNINGS_INTERVAL))
|
905
|
+
self.periodic_warnings_start = time.time()
|
906
|
+
self.periodic_warnings = set()
|
911
907
|
|
912
|
-
def
|
913
|
-
# make best effort to include broker properties...
|
908
|
+
def _get_broker_details(self, broker):
|
914
909
|
try:
|
915
910
|
db_file = broker.db_file
|
916
911
|
except Exception: # noqa
|
@@ -919,7 +914,11 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
919
914
|
path = broker.path
|
920
915
|
except Exception: # noqa
|
921
916
|
path = ''
|
917
|
+
return db_file, path
|
922
918
|
|
919
|
+
def _format_log_msg(self, broker, msg, *args):
|
920
|
+
# make best effort to include broker properties...
|
921
|
+
db_file, path = self._get_broker_details(broker)
|
923
922
|
if args:
|
924
923
|
msg = msg % args
|
925
924
|
return '%s, path: %s, db: %s' % (msg, quote(path), db_file)
|
@@ -939,6 +938,19 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
939
938
|
def warning(self, broker, msg, *args, **kwargs):
|
940
939
|
self._log(logging.WARNING, broker, msg, *args, **kwargs)
|
941
940
|
|
941
|
+
def periodic_warning(self, broker, msg, *args, **kwargs):
|
942
|
+
now = time.time()
|
943
|
+
if now - self.periodic_warnings_start >= \
|
944
|
+
self.periodic_warnings_interval:
|
945
|
+
self.periodic_warnings.clear()
|
946
|
+
self.periodic_warnings_start = now
|
947
|
+
|
948
|
+
db_file, path = self._get_broker_details(broker)
|
949
|
+
key = (db_file, msg)
|
950
|
+
if key not in self.periodic_warnings:
|
951
|
+
self.periodic_warnings.add(key)
|
952
|
+
self._log(logging.WARNING, broker, msg, *args, **kwargs)
|
953
|
+
|
942
954
|
def error(self, broker, msg, *args, **kwargs):
|
943
955
|
self._log(logging.ERROR, broker, msg, *args, **kwargs)
|
944
956
|
|
@@ -1156,6 +1168,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
1156
1168
|
params = params or {}
|
1157
1169
|
params.setdefault('format', 'json')
|
1158
1170
|
headers = {'X-Backend-Record-Type': 'shard',
|
1171
|
+
'X-Backend-Record-Shard-Format': 'full',
|
1159
1172
|
'X-Backend-Override-Deleted': 'true',
|
1160
1173
|
'X-Backend-Include-Deleted': str(include_deleted)}
|
1161
1174
|
if newest:
|
@@ -1296,7 +1309,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
1296
1309
|
# Shrinking is how we resolve overlaps; we've got to
|
1297
1310
|
# allow multiple shards in that state
|
1298
1311
|
continue
|
1299
|
-
shard_ranges = broker.get_shard_ranges(states=state)
|
1312
|
+
shard_ranges = broker.get_shard_ranges(states=[state])
|
1300
1313
|
# Transient overlaps can occur during the period immediately after
|
1301
1314
|
# sharding if a root learns about new child shards before it learns
|
1302
1315
|
# that the parent has sharded. These overlaps are normally
|
@@ -1549,8 +1562,8 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
1549
1562
|
if broker.is_deleted():
|
1550
1563
|
if broker.is_old_enough_to_reclaim(time.time(), self.reclaim_age) \
|
1551
1564
|
and not broker.is_empty_enough_to_reclaim():
|
1552
|
-
self.
|
1553
|
-
|
1565
|
+
self.periodic_warning(
|
1566
|
+
broker, 'Reclaimable db stuck waiting for shrinking')
|
1554
1567
|
# if the container has been marked as deleted, all metadata will
|
1555
1568
|
# have been erased so no point auditing. But we want it to pass, in
|
1556
1569
|
# case any objects exist inside it.
|
@@ -1678,11 +1691,15 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
1678
1691
|
dest_broker, node_id, info):
|
1679
1692
|
success, responses = self._replicate_object(
|
1680
1693
|
part, dest_broker.db_file, node_id)
|
1694
|
+
replication_successes = responses.count(True)
|
1681
1695
|
quorum = quorum_size(self.ring.replica_count)
|
1682
|
-
if not success and
|
1683
|
-
self.warning(
|
1684
|
-
|
1685
|
-
|
1696
|
+
if not success and replication_successes < quorum:
|
1697
|
+
self.warning(
|
1698
|
+
broker, 'Failed to sufficiently replicate misplaced objects '
|
1699
|
+
'shard %s in state %s: %s successes, %s required '
|
1700
|
+
'(not removing objects), shard db: %s',
|
1701
|
+
dest_shard_range.name, dest_shard_range.state_text,
|
1702
|
+
replication_successes, quorum, dest_broker.db_file)
|
1686
1703
|
return False
|
1687
1704
|
|
1688
1705
|
if broker.get_info()['id'] != info['id']:
|
@@ -1700,9 +1717,9 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
1700
1717
|
success = True
|
1701
1718
|
|
1702
1719
|
if not success:
|
1703
|
-
self.warning(broker,
|
1704
|
-
|
1705
|
-
dest_shard_range)
|
1720
|
+
self.warning(broker, 'Refused to remove misplaced objects for '
|
1721
|
+
'dest %s in state %s',
|
1722
|
+
dest_shard_range.name, dest_shard_range.state_text)
|
1706
1723
|
return success
|
1707
1724
|
|
1708
1725
|
def _move_objects(self, src_broker, src_shard_range, policy_index,
|
@@ -1919,7 +1936,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
1919
1936
|
# Create shard containers that are ready to receive redirected object
|
1920
1937
|
# updates. Do this now, so that redirection can begin immediately
|
1921
1938
|
# without waiting for cleaving to complete.
|
1922
|
-
found_ranges = broker.get_shard_ranges(states=ShardRange.FOUND)
|
1939
|
+
found_ranges = broker.get_shard_ranges(states=[ShardRange.FOUND])
|
1923
1940
|
created_ranges = []
|
1924
1941
|
for shard_range in found_ranges:
|
1925
1942
|
self._increment_stat('created', 'attempted')
|
@@ -2059,10 +2076,13 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
2059
2076
|
# insufficient replication or replication not even attempted;
|
2060
2077
|
# break because we don't want to progress the cleave cursor
|
2061
2078
|
# until each shard range has been successfully cleaved
|
2062
|
-
self.warning(
|
2063
|
-
|
2064
|
-
|
2065
|
-
|
2079
|
+
self.warning(
|
2080
|
+
broker, 'Failed to sufficiently replicate cleaved shard '
|
2081
|
+
'%s in state %s: %s successes, %s required, '
|
2082
|
+
'shard db: %s',
|
2083
|
+
shard_broker.path, shard_range.state_text,
|
2084
|
+
replication_successes, replication_quorum,
|
2085
|
+
shard_broker.db_file)
|
2066
2086
|
self._increment_stat('cleaved', 'failure', statsd=True)
|
2067
2087
|
result = CLEAVE_FAILED
|
2068
2088
|
else:
|
@@ -2214,7 +2234,7 @@ class ContainerSharder(ContainerSharderConf, ContainerReplicator):
|
|
2214
2234
|
else:
|
2215
2235
|
own_shard_range.update_state(ShardRange.SHARDED)
|
2216
2236
|
modified_shard_ranges = broker.get_shard_ranges(
|
2217
|
-
states=ShardRange.CLEAVED)
|
2237
|
+
states=[ShardRange.CLEAVED])
|
2218
2238
|
for sr in modified_shard_ranges:
|
2219
2239
|
sr.update_state(ShardRange.ACTIVE)
|
2220
2240
|
if (not broker.is_root_container() and not
|
swift/container/updater.py
CHANGED
swift/obj/auditor.py
CHANGED
@@ -24,7 +24,7 @@ from contextlib import closing
|
|
24
24
|
from eventlet import Timeout
|
25
25
|
|
26
26
|
from swift.obj import diskfile, replicator
|
27
|
-
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist
|
27
|
+
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
28
28
|
DiskFileDeleted, DiskFileExpired, QuarantineRequest
|
29
29
|
from swift.common.daemon import Daemon
|
30
30
|
from swift.common.storage_policy import POLICIES
|
@@ -368,6 +368,7 @@ class ObjectAuditor(Daemon):
|
|
368
368
|
return pid
|
369
369
|
else:
|
370
370
|
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
371
|
+
os.environ.pop('NOTIFY_SOCKET', None)
|
371
372
|
if zero_byte_fps:
|
372
373
|
kwargs['zero_byte_fps'] = self.conf_zero_byte_fps
|
373
374
|
if sleep_between_zbf_scanner:
|
swift/obj/diskfile.py
CHANGED
@@ -65,7 +65,8 @@ from swift.common.utils import mkdirs, Timestamp, \
|
|
65
65
|
get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \
|
66
66
|
MD5_OF_EMPTY_STRING, link_fd_to_path, \
|
67
67
|
O_TMPFILE, makedirs_count, replace_partition_in_path, remove_directory, \
|
68
|
-
md5, is_file_older, non_negative_float
|
68
|
+
md5, is_file_older, non_negative_float, config_fallocate_value, \
|
69
|
+
fs_has_free_space, CooperativeIterator
|
69
70
|
from swift.common.splice import splice, tee
|
70
71
|
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
|
71
72
|
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
|
@@ -428,19 +429,23 @@ def consolidate_hashes(partition_dir):
|
|
428
429
|
with lock_path(partition_dir):
|
429
430
|
hashes = read_hashes(partition_dir)
|
430
431
|
|
431
|
-
found_invalidation_entry = False
|
432
|
+
found_invalidation_entry = hashes_updated = False
|
432
433
|
try:
|
433
434
|
with open(invalidations_file, 'r') as inv_fh:
|
434
435
|
for line in inv_fh:
|
435
436
|
found_invalidation_entry = True
|
436
437
|
suffix = line.strip()
|
438
|
+
if not valid_suffix(suffix):
|
439
|
+
continue
|
440
|
+
hashes_updated = True
|
437
441
|
hashes[suffix] = None
|
438
442
|
except (IOError, OSError) as e:
|
439
443
|
if e.errno != errno.ENOENT:
|
440
444
|
raise
|
441
445
|
|
442
|
-
if
|
446
|
+
if hashes_updated:
|
443
447
|
write_hashes(partition_dir, hashes)
|
448
|
+
if found_invalidation_entry:
|
444
449
|
# Now that all the invalidations are reflected in hashes.pkl, it's
|
445
450
|
# safe to clear out the invalidations file.
|
446
451
|
with open(invalidations_file, 'wb') as inv_fh:
|
@@ -751,6 +756,8 @@ class BaseDiskFileManager(object):
|
|
751
756
|
replication_concurrency_per_device)
|
752
757
|
self.replication_lock_timeout = int(conf.get(
|
753
758
|
'replication_lock_timeout', 15))
|
759
|
+
self.fallocate_reserve, self.fallocate_is_percent = \
|
760
|
+
config_fallocate_value(conf.get('fallocate_reserve', '1%'))
|
754
761
|
|
755
762
|
self.use_splice = False
|
756
763
|
self.pipe_size = None
|
@@ -1793,10 +1800,12 @@ class BaseDiskFileWriter(object):
|
|
1793
1800
|
:param bytes_per_sync: number bytes written between sync calls
|
1794
1801
|
:param diskfile: the diskfile creating this DiskFileWriter instance
|
1795
1802
|
:param next_part_power: the next partition power to be used
|
1803
|
+
:param extension: the file extension to be used; may be used internally
|
1804
|
+
to distinguish between PUT/POST/DELETE operations
|
1796
1805
|
"""
|
1797
1806
|
|
1798
1807
|
def __init__(self, name, datadir, size, bytes_per_sync, diskfile,
|
1799
|
-
next_part_power):
|
1808
|
+
next_part_power, extension='.data'):
|
1800
1809
|
# Parameter tracking
|
1801
1810
|
self._name = name
|
1802
1811
|
self._datadir = datadir
|
@@ -1807,11 +1816,11 @@ class BaseDiskFileWriter(object):
|
|
1807
1816
|
self._bytes_per_sync = bytes_per_sync
|
1808
1817
|
self._diskfile = diskfile
|
1809
1818
|
self.next_part_power = next_part_power
|
1819
|
+
self._extension = extension
|
1810
1820
|
|
1811
1821
|
# Internal attributes
|
1812
1822
|
self._upload_size = 0
|
1813
1823
|
self._last_sync = 0
|
1814
|
-
self._extension = '.data'
|
1815
1824
|
self._put_succeeded = False
|
1816
1825
|
|
1817
1826
|
@property
|
@@ -1856,13 +1865,26 @@ class BaseDiskFileWriter(object):
|
|
1856
1865
|
# No more inodes in filesystem
|
1857
1866
|
raise DiskFileNoSpace()
|
1858
1867
|
raise
|
1859
|
-
if self.
|
1868
|
+
if self._extension == '.ts':
|
1869
|
+
# DELETEs always bypass any free-space reserve checks
|
1870
|
+
pass
|
1871
|
+
elif self._size:
|
1860
1872
|
try:
|
1861
1873
|
fallocate(self._fd, self._size)
|
1862
1874
|
except OSError as err:
|
1863
1875
|
if err.errno in (errno.ENOSPC, errno.EDQUOT):
|
1864
1876
|
raise DiskFileNoSpace()
|
1865
1877
|
raise
|
1878
|
+
else:
|
1879
|
+
# If we don't know the size (i.e. self._size is None) or the size
|
1880
|
+
# is known to be zero, we still want to block writes once we're
|
1881
|
+
# past the reserve threshold.
|
1882
|
+
if not fs_has_free_space(
|
1883
|
+
self._fd,
|
1884
|
+
self.manager.fallocate_reserve,
|
1885
|
+
self.manager.fallocate_is_percent
|
1886
|
+
):
|
1887
|
+
raise DiskFileNoSpace()
|
1866
1888
|
return self
|
1867
1889
|
|
1868
1890
|
def close(self):
|
@@ -2088,11 +2110,13 @@ class BaseDiskFileReader(object):
|
|
2088
2110
|
:param pipe_size: size of pipe buffer used in zero-copy operations
|
2089
2111
|
:param diskfile: the diskfile creating this DiskFileReader instance
|
2090
2112
|
:param keep_cache: should resulting reads be kept in the buffer cache
|
2113
|
+
:param cooperative_period: the period parameter when does cooperative
|
2114
|
+
yielding during file read
|
2091
2115
|
"""
|
2092
2116
|
def __init__(self, fp, data_file, obj_size, etag,
|
2093
2117
|
disk_chunk_size, keep_cache_size, device_path, logger,
|
2094
2118
|
quarantine_hook, use_splice, pipe_size, diskfile,
|
2095
|
-
keep_cache=False):
|
2119
|
+
keep_cache=False, cooperative_period=0):
|
2096
2120
|
# Parameter tracking
|
2097
2121
|
self._fp = fp
|
2098
2122
|
self._data_file = data_file
|
@@ -2111,6 +2135,7 @@ class BaseDiskFileReader(object):
|
|
2111
2135
|
self._keep_cache = obj_size < keep_cache_size
|
2112
2136
|
else:
|
2113
2137
|
self._keep_cache = False
|
2138
|
+
self._cooperative_period = cooperative_period
|
2114
2139
|
|
2115
2140
|
# Internal Attributes
|
2116
2141
|
self._iter_etag = None
|
@@ -2135,6 +2160,10 @@ class BaseDiskFileReader(object):
|
|
2135
2160
|
self._iter_etag.update(chunk)
|
2136
2161
|
|
2137
2162
|
def __iter__(self):
|
2163
|
+
return CooperativeIterator(
|
2164
|
+
self._inner_iter(), period=self._cooperative_period)
|
2165
|
+
|
2166
|
+
def _inner_iter(self):
|
2138
2167
|
"""Returns an iterator over the data file."""
|
2139
2168
|
try:
|
2140
2169
|
dropped_cache = 0
|
@@ -2953,7 +2982,7 @@ class BaseDiskFile(object):
|
|
2953
2982
|
with self.open(current_time=current_time):
|
2954
2983
|
return self.get_metadata()
|
2955
2984
|
|
2956
|
-
def reader(self, keep_cache=False,
|
2985
|
+
def reader(self, keep_cache=False, cooperative_period=0,
|
2957
2986
|
_quarantine_hook=lambda m: None):
|
2958
2987
|
"""
|
2959
2988
|
Return a :class:`swift.common.swob.Response` class compatible
|
@@ -2965,6 +2994,8 @@ class BaseDiskFile(object):
|
|
2965
2994
|
|
2966
2995
|
:param keep_cache: caller's preference for keeping data read in the
|
2967
2996
|
OS buffer cache
|
2997
|
+
:param cooperative_period: the period parameter for cooperative
|
2998
|
+
yielding during file read
|
2968
2999
|
:param _quarantine_hook: 1-arg callable called when obj quarantined;
|
2969
3000
|
the arg is the reason for quarantine.
|
2970
3001
|
Default is to ignore it.
|
@@ -2976,19 +3007,23 @@ class BaseDiskFile(object):
|
|
2976
3007
|
self._metadata['ETag'], self._disk_chunk_size,
|
2977
3008
|
self._manager.keep_cache_size, self._device_path, self._logger,
|
2978
3009
|
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
|
2979
|
-
pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache
|
3010
|
+
pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache,
|
3011
|
+
cooperative_period=cooperative_period)
|
2980
3012
|
# At this point the reader object is now responsible for closing
|
2981
3013
|
# the file pointer.
|
2982
3014
|
self._fp = None
|
2983
3015
|
return dr
|
2984
3016
|
|
2985
|
-
def
|
3017
|
+
def _writer(self, size, extension):
|
2986
3018
|
return self.writer_cls(self._name, self._datadir, size,
|
2987
3019
|
self._bytes_per_sync, self,
|
2988
|
-
self.next_part_power)
|
3020
|
+
self.next_part_power, extension=extension)
|
3021
|
+
|
3022
|
+
def writer(self, size=None):
|
3023
|
+
return self._writer(size, '.data')
|
2989
3024
|
|
2990
3025
|
@contextmanager
|
2991
|
-
def create(self, size=None):
|
3026
|
+
def create(self, size=None, extension='.data'):
|
2992
3027
|
"""
|
2993
3028
|
Context manager to create a file. We create a temporary file first, and
|
2994
3029
|
then return a DiskFileWriter object to encapsulate the state.
|
@@ -3001,9 +3036,11 @@ class BaseDiskFile(object):
|
|
3001
3036
|
|
3002
3037
|
:param size: optional initial size of file to explicitly allocate on
|
3003
3038
|
disk
|
3039
|
+
:param extension: file extension to use for the newly-created file;
|
3040
|
+
defaults to ``.data`` for the sake of tests
|
3004
3041
|
:raises DiskFileNoSpace: if a size is specified and allocation fails
|
3005
3042
|
"""
|
3006
|
-
dfw = self.
|
3043
|
+
dfw = self._writer(size, extension)
|
3007
3044
|
try:
|
3008
3045
|
yield dfw.open()
|
3009
3046
|
finally:
|
@@ -3019,8 +3056,7 @@ class BaseDiskFile(object):
|
|
3019
3056
|
:raises DiskFileError: this implementation will raise the same
|
3020
3057
|
errors as the `create()` method.
|
3021
3058
|
"""
|
3022
|
-
with self.create() as writer:
|
3023
|
-
writer._extension = '.meta'
|
3059
|
+
with self.create(extension='.meta') as writer:
|
3024
3060
|
writer.put(metadata)
|
3025
3061
|
|
3026
3062
|
def delete(self, timestamp):
|
@@ -3042,8 +3078,7 @@ class BaseDiskFile(object):
|
|
3042
3078
|
"""
|
3043
3079
|
# this is dumb, only tests send in strings
|
3044
3080
|
timestamp = Timestamp(timestamp)
|
3045
|
-
with self.create() as deleter:
|
3046
|
-
deleter._extension = '.ts'
|
3081
|
+
with self.create(extension='.ts') as deleter:
|
3047
3082
|
deleter.put({'X-Timestamp': timestamp.internal})
|
3048
3083
|
|
3049
3084
|
|
@@ -3136,11 +3171,12 @@ class ECDiskFileReader(BaseDiskFileReader):
|
|
3136
3171
|
def __init__(self, fp, data_file, obj_size, etag,
|
3137
3172
|
disk_chunk_size, keep_cache_size, device_path, logger,
|
3138
3173
|
quarantine_hook, use_splice, pipe_size, diskfile,
|
3139
|
-
keep_cache=False):
|
3174
|
+
keep_cache=False, cooperative_period=0):
|
3140
3175
|
super(ECDiskFileReader, self).__init__(
|
3141
3176
|
fp, data_file, obj_size, etag,
|
3142
3177
|
disk_chunk_size, keep_cache_size, device_path, logger,
|
3143
|
-
quarantine_hook, use_splice, pipe_size, diskfile, keep_cache
|
3178
|
+
quarantine_hook, use_splice, pipe_size, diskfile, keep_cache,
|
3179
|
+
cooperative_period)
|
3144
3180
|
self.frag_buf = None
|
3145
3181
|
self.frag_offset = 0
|
3146
3182
|
self.frag_size = self._diskfile.policy.fragment_size
|
swift/obj/expirer.py
CHANGED
@@ -114,19 +114,7 @@ class ObjectExpirer(Daemon):
|
|
114
114
|
self.reclaim_age = int(conf.get('reclaim_age', 604800))
|
115
115
|
|
116
116
|
def read_conf_for_queue_access(self, swift):
|
117
|
-
|
118
|
-
self.logger.warning('Option auto_create_account_prefix is '
|
119
|
-
'deprecated. Configure '
|
120
|
-
'auto_create_account_prefix under the '
|
121
|
-
'swift-constraints section of '
|
122
|
-
'swift.conf. This option will '
|
123
|
-
'be ignored in a future release.')
|
124
|
-
auto_create_account_prefix = \
|
125
|
-
self.conf['auto_create_account_prefix']
|
126
|
-
else:
|
127
|
-
auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
|
128
|
-
|
129
|
-
self.expiring_objects_account = auto_create_account_prefix + \
|
117
|
+
self.expiring_objects_account = AUTO_CREATE_ACCOUNT_PREFIX + \
|
130
118
|
(self.conf.get('expiring_objects_account_name') or
|
131
119
|
'expiring_objects')
|
132
120
|
|
swift/obj/mem_diskfile.py
CHANGED
@@ -426,13 +426,14 @@ class DiskFile(object):
|
|
426
426
|
with self.open(current_time=current_time):
|
427
427
|
return self.get_metadata()
|
428
428
|
|
429
|
-
def reader(self, keep_cache=False):
|
429
|
+
def reader(self, keep_cache=False, cooperative_period=0):
|
430
430
|
"""
|
431
431
|
Return a swift.common.swob.Response class compatible "app_iter"
|
432
432
|
object. The responsibility of closing the open file is passed to the
|
433
433
|
DiskFileReader object.
|
434
434
|
|
435
435
|
:param keep_cache:
|
436
|
+
:param cooperative_period:
|
436
437
|
"""
|
437
438
|
dr = DiskFileReader(self._name, self._fp,
|
438
439
|
int(self._metadata['Content-Length']),
|
swift/obj/mem_server.py
CHANGED
swift/obj/replicator.py
CHANGED
@@ -493,7 +493,7 @@ class ObjectReplicator(Daemon):
|
|
493
493
|
return False
|
494
494
|
return True
|
495
495
|
|
496
|
-
def
|
496
|
+
def revert(self, job):
|
497
497
|
"""
|
498
498
|
High-level method that replicates a single partition that doesn't
|
499
499
|
belong on this node.
|
@@ -993,7 +993,7 @@ class ObjectReplicator(Daemon):
|
|
993
993
|
except OSError:
|
994
994
|
continue
|
995
995
|
if job['delete']:
|
996
|
-
self.run_pool.spawn(self.
|
996
|
+
self.run_pool.spawn(self.revert, job)
|
997
997
|
else:
|
998
998
|
self.run_pool.spawn(self.update, job)
|
999
999
|
current_nodes = None
|
swift/obj/server.py
CHANGED
@@ -42,7 +42,7 @@ from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
|
|
42
42
|
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
|
43
43
|
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
|
44
44
|
ChunkReadError, DiskFileXattrNotSupported
|
45
|
-
from swift.common.request_helpers import \
|
45
|
+
from swift.common.request_helpers import resolve_ignore_range_header, \
|
46
46
|
OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX
|
47
47
|
from swift.obj import ssync_receiver
|
48
48
|
from swift.common.http import is_success, HTTP_MOVED_PERMANENTLY
|
@@ -152,6 +152,7 @@ class ObjectController(BaseStorageServer):
|
|
152
152
|
config_true_value(conf.get('keep_cache_private', 'false'))
|
153
153
|
self.keep_cache_slo_manifest = \
|
154
154
|
config_true_value(conf.get('keep_cache_slo_manifest', 'false'))
|
155
|
+
self.cooperative_period = int(conf.get("cooperative_period", 0))
|
155
156
|
|
156
157
|
default_allowed_headers = '''
|
157
158
|
content-disposition,
|
@@ -173,18 +174,8 @@ class ObjectController(BaseStorageServer):
|
|
173
174
|
for header in extra_allowed_headers:
|
174
175
|
if header not in RESERVED_DATAFILE_META:
|
175
176
|
self.allowed_headers.add(header)
|
176
|
-
if conf.get('auto_create_account_prefix'):
|
177
|
-
self.logger.warning('Option auto_create_account_prefix is '
|
178
|
-
'deprecated. Configure '
|
179
|
-
'auto_create_account_prefix under the '
|
180
|
-
'swift-constraints section of '
|
181
|
-
'swift.conf. This option will '
|
182
|
-
'be ignored in a future release.')
|
183
|
-
self.auto_create_account_prefix = \
|
184
|
-
conf['auto_create_account_prefix']
|
185
|
-
else:
|
186
|
-
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
|
187
177
|
|
178
|
+
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
|
188
179
|
self.expiring_objects_account = self.auto_create_account_prefix + \
|
189
180
|
(conf.get('expiring_objects_account_name') or 'expiring_objects')
|
190
181
|
self.expiring_objects_container_divisor = \
|
@@ -1090,14 +1081,7 @@ class ObjectController(BaseStorageServer):
|
|
1090
1081
|
try:
|
1091
1082
|
with disk_file.open(current_time=req_timestamp):
|
1092
1083
|
metadata = disk_file.get_metadata()
|
1093
|
-
|
1094
|
-
h.strip().lower()
|
1095
|
-
for h in request.headers.get(
|
1096
|
-
'X-Backend-Ignore-Range-If-Metadata-Present',
|
1097
|
-
'').split(','))
|
1098
|
-
if ignore_range_headers.intersection(
|
1099
|
-
h.lower() for h in metadata):
|
1100
|
-
request.headers.pop('Range', None)
|
1084
|
+
resolve_ignore_range_header(request, metadata)
|
1101
1085
|
obj_size = int(metadata['Content-Length'])
|
1102
1086
|
file_x_ts = Timestamp(metadata['X-Timestamp'])
|
1103
1087
|
keep_cache = (
|
@@ -1114,10 +1098,15 @@ class ObjectController(BaseStorageServer):
|
|
1114
1098
|
)
|
1115
1099
|
)
|
1116
1100
|
conditional_etag = resolve_etag_is_at_header(request, metadata)
|
1101
|
+
app_iter = disk_file.reader(
|
1102
|
+
keep_cache=keep_cache,
|
1103
|
+
cooperative_period=self.cooperative_period,
|
1104
|
+
)
|
1117
1105
|
response = Response(
|
1118
|
-
app_iter=
|
1119
|
-
|
1120
|
-
conditional_etag=conditional_etag
|
1106
|
+
app_iter=app_iter, request=request,
|
1107
|
+
conditional_response=True,
|
1108
|
+
conditional_etag=conditional_etag,
|
1109
|
+
)
|
1121
1110
|
response.headers['Content-Type'] = metadata.get(
|
1122
1111
|
'Content-Type', 'application/octet-stream')
|
1123
1112
|
for key, value in metadata.items():
|
swift/obj/updater.py
CHANGED