swift 2.23.3__py3-none-any.whl → 2.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/__init__.py +29 -50
- swift/account/auditor.py +21 -118
- swift/account/backend.py +33 -28
- swift/account/reaper.py +37 -28
- swift/account/replicator.py +22 -0
- swift/account/server.py +60 -26
- swift/account/utils.py +28 -11
- swift-2.23.3.data/scripts/swift-account-audit → swift/cli/account_audit.py +23 -13
- swift-2.23.3.data/scripts/swift-config → swift/cli/config.py +2 -2
- swift/cli/container_deleter.py +5 -11
- swift-2.23.3.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +8 -7
- swift/cli/dispersion_report.py +10 -9
- swift-2.23.3.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +63 -21
- swift/cli/form_signature.py +3 -7
- swift-2.23.3.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +8 -2
- swift/cli/info.py +154 -14
- swift/cli/manage_shard_ranges.py +705 -37
- swift-2.23.3.data/scripts/swift-oldies → swift/cli/oldies.py +25 -14
- swift-2.23.3.data/scripts/swift-orphans → swift/cli/orphans.py +7 -3
- swift/cli/recon.py +196 -67
- swift-2.23.3.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +17 -20
- swift-2.23.3.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +807 -126
- swift/cli/reload.py +135 -0
- swift/cli/ringbuilder.py +217 -20
- swift/cli/ringcomposer.py +0 -1
- swift/cli/shard-info.py +4 -3
- swift/common/base_storage_server.py +9 -20
- swift/common/bufferedhttp.py +48 -74
- swift/common/constraints.py +20 -15
- swift/common/container_sync_realms.py +9 -11
- swift/common/daemon.py +25 -8
- swift/common/db.py +195 -128
- swift/common/db_auditor.py +168 -0
- swift/common/db_replicator.py +95 -55
- swift/common/digest.py +141 -0
- swift/common/direct_client.py +144 -33
- swift/common/error_limiter.py +93 -0
- swift/common/exceptions.py +25 -1
- swift/common/header_key_dict.py +2 -9
- swift/common/http_protocol.py +373 -0
- swift/common/internal_client.py +129 -59
- swift/common/linkat.py +3 -4
- swift/common/manager.py +284 -67
- swift/common/memcached.py +390 -145
- swift/common/middleware/__init__.py +4 -0
- swift/common/middleware/account_quotas.py +211 -46
- swift/common/middleware/acl.py +3 -8
- swift/common/middleware/backend_ratelimit.py +230 -0
- swift/common/middleware/bulk.py +22 -34
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +6 -11
- swift/common/middleware/container_quotas.py +1 -1
- swift/common/middleware/container_sync.py +39 -17
- swift/common/middleware/copy.py +12 -0
- swift/common/middleware/crossdomain.py +22 -9
- swift/common/middleware/crypto/__init__.py +2 -1
- swift/common/middleware/crypto/crypto_utils.py +11 -15
- swift/common/middleware/crypto/decrypter.py +28 -11
- swift/common/middleware/crypto/encrypter.py +12 -17
- swift/common/middleware/crypto/keymaster.py +8 -15
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/dlo.py +15 -11
- swift/common/middleware/domain_remap.py +5 -4
- swift/common/middleware/etag_quoter.py +128 -0
- swift/common/middleware/formpost.py +73 -70
- swift/common/middleware/gatekeeper.py +8 -1
- swift/common/middleware/keystoneauth.py +33 -3
- swift/common/middleware/list_endpoints.py +4 -4
- swift/common/middleware/listing_formats.py +85 -49
- swift/common/middleware/memcache.py +4 -95
- swift/common/middleware/name_check.py +3 -2
- swift/common/middleware/proxy_logging.py +160 -92
- swift/common/middleware/ratelimit.py +17 -10
- swift/common/middleware/read_only.py +6 -4
- swift/common/middleware/recon.py +59 -22
- swift/common/middleware/s3api/acl_handlers.py +25 -3
- swift/common/middleware/s3api/acl_utils.py +6 -1
- swift/common/middleware/s3api/controllers/__init__.py +6 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/bucket.py +242 -137
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_delete.py +43 -20
- swift/common/middleware/s3api/controllers/multi_upload.py +219 -133
- swift/common/middleware/s3api/controllers/obj.py +112 -8
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/controllers/s3_acl.py +2 -2
- swift/common/middleware/s3api/controllers/tagging.py +57 -0
- swift/common/middleware/s3api/controllers/versioning.py +36 -7
- swift/common/middleware/s3api/etree.py +22 -9
- swift/common/middleware/s3api/exception.py +0 -4
- swift/common/middleware/s3api/s3api.py +113 -41
- swift/common/middleware/s3api/s3request.py +384 -218
- swift/common/middleware/s3api/s3response.py +126 -23
- swift/common/middleware/s3api/s3token.py +16 -17
- swift/common/middleware/s3api/schema/delete.rng +1 -1
- swift/common/middleware/s3api/subresource.py +7 -10
- swift/common/middleware/s3api/utils.py +27 -10
- swift/common/middleware/slo.py +665 -358
- swift/common/middleware/staticweb.py +64 -37
- swift/common/middleware/symlink.py +51 -18
- swift/common/middleware/tempauth.py +76 -58
- swift/common/middleware/tempurl.py +191 -173
- swift/common/middleware/versioned_writes/__init__.py +51 -0
- swift/common/middleware/{versioned_writes.py → versioned_writes/legacy.py} +27 -26
- swift/common/middleware/versioned_writes/object_versioning.py +1482 -0
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +18 -19
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +10 -10
- swift-2.23.3.data/scripts/swift-container-server → swift/common/recon.py +13 -8
- swift/common/registry.py +147 -0
- swift/common/request_helpers.py +324 -57
- swift/common/ring/builder.py +67 -25
- swift/common/ring/composite_builder.py +1 -1
- swift/common/ring/ring.py +177 -51
- swift/common/ring/utils.py +1 -1
- swift/common/splice.py +10 -6
- swift/common/statsd_client.py +205 -0
- swift/common/storage_policy.py +49 -44
- swift/common/swob.py +86 -102
- swift/common/{utils.py → utils/__init__.py} +2163 -2772
- swift/common/utils/base.py +131 -0
- swift/common/utils/config.py +433 -0
- swift/common/utils/ipaddrs.py +256 -0
- swift/common/utils/libc.py +345 -0
- swift/common/utils/logs.py +859 -0
- swift/common/utils/timestamp.py +412 -0
- swift/common/wsgi.py +553 -535
- swift/container/auditor.py +14 -100
- swift/container/backend.py +490 -231
- swift/container/reconciler.py +126 -37
- swift/container/replicator.py +96 -22
- swift/container/server.py +358 -165
- swift/container/sharder.py +1540 -684
- swift/container/sync.py +94 -88
- swift/container/updater.py +53 -32
- swift/obj/auditor.py +153 -35
- swift/obj/diskfile.py +466 -217
- swift/obj/expirer.py +406 -124
- swift/obj/mem_diskfile.py +7 -4
- swift/obj/mem_server.py +1 -0
- swift/obj/reconstructor.py +523 -262
- swift/obj/replicator.py +249 -188
- swift/obj/server.py +207 -122
- swift/obj/ssync_receiver.py +145 -85
- swift/obj/ssync_sender.py +113 -54
- swift/obj/updater.py +652 -139
- swift/obj/watchers/__init__.py +0 -0
- swift/obj/watchers/dark_data.py +213 -0
- swift/proxy/controllers/account.py +11 -11
- swift/proxy/controllers/base.py +848 -604
- swift/proxy/controllers/container.py +433 -92
- swift/proxy/controllers/info.py +3 -2
- swift/proxy/controllers/obj.py +1000 -489
- swift/proxy/server.py +185 -112
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/AUTHORS +58 -11
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/METADATA +51 -56
- swift-2.35.0.dist-info/RECORD +201 -0
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/WHEEL +1 -1
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/entry_points.txt +43 -0
- swift-2.35.0.dist-info/pbr.json +1 -0
- swift/locale/de/LC_MESSAGES/swift.po +0 -1216
- swift/locale/en_GB/LC_MESSAGES/swift.po +0 -1207
- swift/locale/es/LC_MESSAGES/swift.po +0 -1085
- swift/locale/fr/LC_MESSAGES/swift.po +0 -909
- swift/locale/it/LC_MESSAGES/swift.po +0 -894
- swift/locale/ja/LC_MESSAGES/swift.po +0 -965
- swift/locale/ko_KR/LC_MESSAGES/swift.po +0 -964
- swift/locale/pt_BR/LC_MESSAGES/swift.po +0 -881
- swift/locale/ru/LC_MESSAGES/swift.po +0 -891
- swift/locale/tr_TR/LC_MESSAGES/swift.po +0 -832
- swift/locale/zh_CN/LC_MESSAGES/swift.po +0 -833
- swift/locale/zh_TW/LC_MESSAGES/swift.po +0 -838
- swift-2.23.3.data/scripts/swift-account-auditor +0 -23
- swift-2.23.3.data/scripts/swift-account-info +0 -51
- swift-2.23.3.data/scripts/swift-account-reaper +0 -23
- swift-2.23.3.data/scripts/swift-account-replicator +0 -34
- swift-2.23.3.data/scripts/swift-account-server +0 -23
- swift-2.23.3.data/scripts/swift-container-auditor +0 -23
- swift-2.23.3.data/scripts/swift-container-info +0 -55
- swift-2.23.3.data/scripts/swift-container-reconciler +0 -21
- swift-2.23.3.data/scripts/swift-container-replicator +0 -34
- swift-2.23.3.data/scripts/swift-container-sharder +0 -37
- swift-2.23.3.data/scripts/swift-container-sync +0 -23
- swift-2.23.3.data/scripts/swift-container-updater +0 -23
- swift-2.23.3.data/scripts/swift-dispersion-report +0 -24
- swift-2.23.3.data/scripts/swift-form-signature +0 -20
- swift-2.23.3.data/scripts/swift-init +0 -119
- swift-2.23.3.data/scripts/swift-object-auditor +0 -29
- swift-2.23.3.data/scripts/swift-object-expirer +0 -33
- swift-2.23.3.data/scripts/swift-object-info +0 -60
- swift-2.23.3.data/scripts/swift-object-reconstructor +0 -33
- swift-2.23.3.data/scripts/swift-object-relinker +0 -41
- swift-2.23.3.data/scripts/swift-object-replicator +0 -37
- swift-2.23.3.data/scripts/swift-object-server +0 -27
- swift-2.23.3.data/scripts/swift-object-updater +0 -23
- swift-2.23.3.data/scripts/swift-proxy-server +0 -23
- swift-2.23.3.data/scripts/swift-recon +0 -24
- swift-2.23.3.data/scripts/swift-ring-builder +0 -24
- swift-2.23.3.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.23.3.data/scripts/swift-ring-composer +0 -22
- swift-2.23.3.dist-info/RECORD +0 -220
- swift-2.23.3.dist-info/pbr.json +0 -1
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/LICENSE +0 -0
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,168 @@
|
|
1
|
+
# Copyright (c) 2010-2018 OpenStack Foundation
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
12
|
+
# implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
|
16
|
+
import os
|
17
|
+
import time
|
18
|
+
from random import random
|
19
|
+
|
20
|
+
from eventlet import Timeout
|
21
|
+
|
22
|
+
import swift.common.db
|
23
|
+
from swift.common.utils import get_logger, audit_location_generator, \
|
24
|
+
config_true_value, dump_recon_cache, EventletRateLimiter
|
25
|
+
from swift.common.daemon import Daemon
|
26
|
+
from swift.common.exceptions import DatabaseAuditorException
|
27
|
+
from swift.common.recon import DEFAULT_RECON_CACHE_PATH, \
|
28
|
+
server_type_to_recon_file
|
29
|
+
|
30
|
+
|
31
|
+
class DatabaseAuditor(Daemon):
|
32
|
+
"""Base Database Auditor."""
|
33
|
+
|
34
|
+
@property
|
35
|
+
def rcache(self):
|
36
|
+
return os.path.join(
|
37
|
+
self.recon_cache_path,
|
38
|
+
server_type_to_recon_file(self.server_type))
|
39
|
+
|
40
|
+
@property
|
41
|
+
def server_type(self):
|
42
|
+
raise NotImplementedError
|
43
|
+
|
44
|
+
@property
|
45
|
+
def broker_class(self):
|
46
|
+
raise NotImplementedError
|
47
|
+
|
48
|
+
def __init__(self, conf, logger=None):
|
49
|
+
self.conf = conf
|
50
|
+
self.logger = logger or get_logger(conf, log_route='{}-auditor'.format(
|
51
|
+
self.server_type))
|
52
|
+
self.devices = conf.get('devices', '/srv/node')
|
53
|
+
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
54
|
+
self.interval = float(conf.get('interval', 1800))
|
55
|
+
self.logging_interval = 3600 # once an hour
|
56
|
+
self.passes = 0
|
57
|
+
self.failures = 0
|
58
|
+
self.max_dbs_per_second = \
|
59
|
+
float(conf.get('{}s_per_second'.format(self.server_type), 200))
|
60
|
+
self.rate_limiter = EventletRateLimiter(self.max_dbs_per_second)
|
61
|
+
swift.common.db.DB_PREALLOCATION = \
|
62
|
+
config_true_value(conf.get('db_preallocation', 'f'))
|
63
|
+
self.recon_cache_path = conf.get('recon_cache_path',
|
64
|
+
DEFAULT_RECON_CACHE_PATH)
|
65
|
+
self.datadir = '{}s'.format(self.server_type)
|
66
|
+
|
67
|
+
def _one_audit_pass(self, reported):
|
68
|
+
all_locs = audit_location_generator(self.devices, self.datadir, '.db',
|
69
|
+
mount_check=self.mount_check,
|
70
|
+
logger=self.logger)
|
71
|
+
for path, device, partition in all_locs:
|
72
|
+
self.audit(path)
|
73
|
+
if time.time() - reported >= self.logging_interval:
|
74
|
+
self.logger.info(
|
75
|
+
'Since %(time)s: %(server_type)s audits: %(pass)s '
|
76
|
+
'passed audit, %(fail)s failed audit',
|
77
|
+
{'time': time.ctime(reported),
|
78
|
+
'pass': self.passes,
|
79
|
+
'fail': self.failures,
|
80
|
+
'server_type': self.server_type})
|
81
|
+
dump_recon_cache(
|
82
|
+
{'{}_audits_since'.format(self.server_type): reported,
|
83
|
+
'{}_audits_passed'.format(self.server_type): self.passes,
|
84
|
+
'{}_audits_failed'.format(self.server_type):
|
85
|
+
self.failures},
|
86
|
+
self.rcache, self.logger)
|
87
|
+
reported = time.time()
|
88
|
+
self.passes = 0
|
89
|
+
self.failures = 0
|
90
|
+
self.rate_limiter.wait()
|
91
|
+
return reported
|
92
|
+
|
93
|
+
def run_forever(self, *args, **kwargs):
|
94
|
+
"""Run the database audit until stopped."""
|
95
|
+
reported = time.time()
|
96
|
+
time.sleep(random() * self.interval)
|
97
|
+
while True:
|
98
|
+
self.logger.info(
|
99
|
+
'Begin %s audit pass.', self.server_type)
|
100
|
+
begin = time.time()
|
101
|
+
try:
|
102
|
+
reported = self._one_audit_pass(reported)
|
103
|
+
except (Exception, Timeout):
|
104
|
+
self.logger.increment('errors')
|
105
|
+
self.logger.exception('ERROR auditing')
|
106
|
+
elapsed = time.time() - begin
|
107
|
+
self.logger.info(
|
108
|
+
'%(server_type)s audit pass completed: %(elapsed).02fs',
|
109
|
+
{'elapsed': elapsed, 'server_type': self.server_type.title()})
|
110
|
+
dump_recon_cache({
|
111
|
+
'{}_auditor_pass_completed'.format(self.server_type): elapsed},
|
112
|
+
self.rcache, self.logger)
|
113
|
+
if elapsed < self.interval:
|
114
|
+
time.sleep(self.interval - elapsed)
|
115
|
+
|
116
|
+
def run_once(self, *args, **kwargs):
|
117
|
+
"""Run the database audit once."""
|
118
|
+
self.logger.info(
|
119
|
+
'Begin %s audit "once" mode', self.server_type)
|
120
|
+
begin = reported = time.time()
|
121
|
+
self._one_audit_pass(reported)
|
122
|
+
elapsed = time.time() - begin
|
123
|
+
self.logger.info(
|
124
|
+
'%(server_type)s audit "once" mode completed: %(elapsed).02fs',
|
125
|
+
{'elapsed': elapsed, 'server_type': self.server_type.title()})
|
126
|
+
dump_recon_cache(
|
127
|
+
{'{}_auditor_pass_completed'.format(self.server_type): elapsed},
|
128
|
+
self.rcache, self.logger)
|
129
|
+
|
130
|
+
def audit(self, path):
|
131
|
+
"""
|
132
|
+
Audits the given database path
|
133
|
+
|
134
|
+
:param path: the path to a db
|
135
|
+
"""
|
136
|
+
start_time = time.time()
|
137
|
+
try:
|
138
|
+
broker = self.broker_class(path, logger=self.logger)
|
139
|
+
if not broker.is_deleted():
|
140
|
+
info = broker.get_info()
|
141
|
+
err = self._audit(info, broker)
|
142
|
+
if err:
|
143
|
+
raise err
|
144
|
+
self.logger.increment('passes')
|
145
|
+
self.passes += 1
|
146
|
+
self.logger.debug('Audit passed for %s', broker)
|
147
|
+
except DatabaseAuditorException as e:
|
148
|
+
self.logger.increment('failures')
|
149
|
+
self.failures += 1
|
150
|
+
self.logger.error('Audit Failed for %(path)s: %(err)s',
|
151
|
+
{'path': path, 'err': str(e)})
|
152
|
+
except (Exception, Timeout):
|
153
|
+
self.logger.increment('failures')
|
154
|
+
self.failures += 1
|
155
|
+
self.logger.exception(
|
156
|
+
'ERROR Could not get %(server_type)s info %(path)s',
|
157
|
+
{'server_type': self.server_type, 'path': path})
|
158
|
+
self.logger.timing_since('timing', start_time)
|
159
|
+
|
160
|
+
def _audit(self, info, broker):
|
161
|
+
"""
|
162
|
+
Run any additional audit checks in sub auditor classes
|
163
|
+
|
164
|
+
:param info: The DB <account/container>_info
|
165
|
+
:param broker: The broker
|
166
|
+
:return: None on success, otherwise an exception to throw.
|
167
|
+
"""
|
168
|
+
raise NotImplementedError
|
swift/common/db_replicator.py
CHANGED
@@ -23,7 +23,6 @@ import uuid
|
|
23
23
|
import errno
|
24
24
|
import re
|
25
25
|
from contextlib import contextmanager
|
26
|
-
from swift import gettext_ as _
|
27
26
|
|
28
27
|
from eventlet import GreenPool, sleep, Timeout
|
29
28
|
from eventlet.green import subprocess
|
@@ -34,7 +33,7 @@ from swift.common.utils import get_logger, whataremyips, storage_directory, \
|
|
34
33
|
renamer, mkdirs, lock_parent_directory, config_true_value, \
|
35
34
|
unlink_older_than, dump_recon_cache, rsync_module_interpolation, \
|
36
35
|
parse_override_options, round_robin_iter, Everything, get_db_files, \
|
37
|
-
parse_db_filename, quote, RateLimitedIterator
|
36
|
+
parse_db_filename, quote, RateLimitedIterator, config_auto_int_value
|
38
37
|
from swift.common import ring
|
39
38
|
from swift.common.ring.utils import is_local_device
|
40
39
|
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE, \
|
@@ -44,6 +43,8 @@ from swift.common.exceptions import DriveNotMounted
|
|
44
43
|
from swift.common.daemon import Daemon
|
45
44
|
from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \
|
46
45
|
HTTPAccepted, HTTPBadRequest
|
46
|
+
from swift.common.recon import DEFAULT_RECON_CACHE_PATH, \
|
47
|
+
server_type_to_recon_file
|
47
48
|
|
48
49
|
|
49
50
|
DEBUG_TIMINGS_THRESHOLD = 10
|
@@ -172,8 +173,9 @@ class ReplConnection(BufferedHTTPConnection):
|
|
172
173
|
response.data = response.read()
|
173
174
|
return response
|
174
175
|
except (Exception, Timeout):
|
176
|
+
self.close()
|
175
177
|
self.logger.exception(
|
176
|
-
|
178
|
+
'ERROR reading HTTP response from %s', self.node)
|
177
179
|
return None
|
178
180
|
|
179
181
|
|
@@ -193,19 +195,27 @@ class Replicator(Daemon):
|
|
193
195
|
self.cpool = GreenPool(size=concurrency)
|
194
196
|
swift_dir = conf.get('swift_dir', '/etc/swift')
|
195
197
|
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
|
196
|
-
self._local_device_ids =
|
198
|
+
self._local_device_ids = {}
|
197
199
|
self.per_diff = int(conf.get('per_diff', 1000))
|
198
200
|
self.max_diffs = int(conf.get('max_diffs') or 100)
|
199
|
-
self.interval =
|
200
|
-
|
201
|
-
if 'run_pause' in conf
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
201
|
+
self.interval = float(conf.get('interval') or
|
202
|
+
conf.get('run_pause') or 30)
|
203
|
+
if 'run_pause' in conf:
|
204
|
+
if 'interval' in conf:
|
205
|
+
self.logger.warning(
|
206
|
+
'Option %(type)s-replicator/run_pause is deprecated '
|
207
|
+
'and %(type)s-replicator/interval is already configured. '
|
208
|
+
'You can safely remove run_pause; it is now ignored and '
|
209
|
+
'will be removed in a future version.'
|
210
|
+
% {'type': self.server_type})
|
211
|
+
else:
|
212
|
+
self.logger.warning(
|
213
|
+
'Option %(type)s-replicator/run_pause is deprecated '
|
214
|
+
'and will be removed in a future version. '
|
215
|
+
'Update your configuration to use option '
|
216
|
+
'%(type)s-replicator/interval.'
|
217
|
+
% {'type': self.server_type})
|
218
|
+
self.databases_per_second = float(
|
209
219
|
conf.get('databases_per_second', 50))
|
210
220
|
self.node_timeout = float(conf.get('node_timeout', 10))
|
211
221
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
@@ -217,15 +227,25 @@ class Replicator(Daemon):
|
|
217
227
|
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
|
218
228
|
swift.common.db.DB_PREALLOCATION = \
|
219
229
|
config_true_value(conf.get('db_preallocation', 'f'))
|
230
|
+
swift.common.db.QUERY_LOGGING = \
|
231
|
+
config_true_value(conf.get('db_query_logging', 'f'))
|
220
232
|
self._zero_stats()
|
221
233
|
self.recon_cache_path = conf.get('recon_cache_path',
|
222
|
-
|
223
|
-
self.recon_replicator =
|
234
|
+
DEFAULT_RECON_CACHE_PATH)
|
235
|
+
self.recon_replicator = server_type_to_recon_file(self.server_type)
|
224
236
|
self.rcache = os.path.join(self.recon_cache_path,
|
225
237
|
self.recon_replicator)
|
226
238
|
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
|
227
239
|
self.root, os.path.sep, os.path.sep))
|
228
240
|
self.handoffs_only = config_true_value(conf.get('handoffs_only', 'no'))
|
241
|
+
self.handoff_delete = config_auto_int_value(
|
242
|
+
conf.get('handoff_delete', 'auto'), 0)
|
243
|
+
if self.handoff_delete >= self.ring.replica_count:
|
244
|
+
self.logger.warning(
|
245
|
+
'handoff_delete=%d is too high to have an effect on a ring '
|
246
|
+
'with replica count %d. Disabling.',
|
247
|
+
self.handoff_delete, self.ring.replica_count)
|
248
|
+
self.handoff_delete = 0
|
229
249
|
|
230
250
|
def _zero_stats(self):
|
231
251
|
"""Zero out the stats."""
|
@@ -239,15 +259,15 @@ class Replicator(Daemon):
|
|
239
259
|
"""Report the current stats to the logs."""
|
240
260
|
now = time.time()
|
241
261
|
self.logger.info(
|
242
|
-
|
243
|
-
|
262
|
+
'Attempted to replicate %(count)d dbs in %(time).5f seconds '
|
263
|
+
'(%(rate).5f/s)',
|
244
264
|
{'count': self.stats['attempted'],
|
245
265
|
'time': now - self.stats['start'],
|
246
266
|
'rate': self.stats['attempted'] /
|
247
267
|
(now - self.stats['start'] + 0.0000001)})
|
248
|
-
self.logger.info(
|
249
|
-
self.logger.info(
|
250
|
-
|
268
|
+
self.logger.info('Removed %(remove)d dbs', self.stats)
|
269
|
+
self.logger.info('%(success)s successes, %(failure)s failures',
|
270
|
+
self.stats)
|
251
271
|
dump_recon_cache(
|
252
272
|
{'replication_stats': self.stats,
|
253
273
|
'replication_time': now - self.stats['start'],
|
@@ -293,7 +313,7 @@ class Replicator(Daemon):
|
|
293
313
|
proc = subprocess.Popen(popen_args)
|
294
314
|
proc.communicate()
|
295
315
|
if proc.returncode != 0:
|
296
|
-
self.logger.error(
|
316
|
+
self.logger.error('ERROR rsync failed with %(code)s: %(args)s',
|
297
317
|
{'code': proc.returncode, 'args': popen_args})
|
298
318
|
return proc.returncode == 0
|
299
319
|
|
@@ -426,7 +446,7 @@ class Replicator(Daemon):
|
|
426
446
|
Make an http_connection using ReplConnection
|
427
447
|
|
428
448
|
:param node: node dictionary from the ring
|
429
|
-
:param partition: partition
|
449
|
+
:param partition: partition to send in the url
|
430
450
|
:param db_file: DB file
|
431
451
|
|
432
452
|
:returns: ReplConnection object
|
@@ -542,7 +562,13 @@ class Replicator(Daemon):
|
|
542
562
|
reason = '%s new rows' % max_row_delta
|
543
563
|
self.logger.debug(log_template, reason)
|
544
564
|
return True
|
545
|
-
if
|
565
|
+
if self.handoff_delete:
|
566
|
+
# delete handoff if we have had handoff_delete successes
|
567
|
+
successes_count = len([resp for resp in responses if resp])
|
568
|
+
delete_handoff = successes_count >= self.handoff_delete
|
569
|
+
else:
|
570
|
+
delete_handoff = responses and all(responses)
|
571
|
+
if not delete_handoff:
|
546
572
|
reason = '%s/%s success' % (responses.count(True), len(responses))
|
547
573
|
self.logger.debug(log_template, reason)
|
548
574
|
return True
|
@@ -556,6 +582,12 @@ class Replicator(Daemon):
|
|
556
582
|
self.logger.debug('Successfully deleted db %s', broker.db_file)
|
557
583
|
return True
|
558
584
|
|
585
|
+
def _reclaim(self, broker, now=None):
|
586
|
+
if not now:
|
587
|
+
now = time.time()
|
588
|
+
return broker.reclaim(now - self.reclaim_age,
|
589
|
+
now - (self.reclaim_age * 2))
|
590
|
+
|
559
591
|
def _replicate_object(self, partition, object_file, node_id):
|
560
592
|
"""
|
561
593
|
Replicate the db, choosing method based on whether or not it
|
@@ -579,9 +611,9 @@ class Replicator(Daemon):
|
|
579
611
|
shouldbehere = True
|
580
612
|
responses = []
|
581
613
|
try:
|
582
|
-
broker = self.brokerclass(object_file, pending_timeout=30
|
583
|
-
|
584
|
-
|
614
|
+
broker = self.brokerclass(object_file, pending_timeout=30,
|
615
|
+
logger=self.logger)
|
616
|
+
self._reclaim(broker, now)
|
585
617
|
info = broker.get_replication_info()
|
586
618
|
bpart = self.ring.get_part(
|
587
619
|
info['account'], info.get('container'))
|
@@ -598,10 +630,10 @@ class Replicator(Daemon):
|
|
598
630
|
'replicate out and remove.' % (object_file, name, bpart))
|
599
631
|
except (Exception, Timeout) as e:
|
600
632
|
if 'no such table' in str(e):
|
601
|
-
self.logger.error(
|
633
|
+
self.logger.error('Quarantining DB %s', object_file)
|
602
634
|
quarantine_db(broker.db_file, broker.db_type)
|
603
635
|
else:
|
604
|
-
self.logger.exception(
|
636
|
+
self.logger.exception('ERROR reading db %s', object_file)
|
605
637
|
nodes = self.ring.get_part_nodes(int(partition))
|
606
638
|
self._add_failure_stats([(failure_dev['replication_ip'],
|
607
639
|
failure_dev['device'])
|
@@ -653,13 +685,13 @@ class Replicator(Daemon):
|
|
653
685
|
repl_nodes.append(next(more_nodes))
|
654
686
|
except StopIteration:
|
655
687
|
self.logger.error(
|
656
|
-
|
657
|
-
|
688
|
+
'ERROR There are not enough handoff nodes to reach '
|
689
|
+
'replica count for partition %s',
|
658
690
|
partition)
|
659
|
-
self.logger.error(
|
691
|
+
self.logger.error('ERROR Remote drive not mounted %s', node)
|
660
692
|
except (Exception, Timeout):
|
661
|
-
self.logger.exception(
|
662
|
-
|
693
|
+
self.logger.exception('ERROR syncing %(file)s with node'
|
694
|
+
' %(node)s',
|
663
695
|
{'file': object_file, 'node': node})
|
664
696
|
if not success:
|
665
697
|
failure_devs_info.add((node['replication_ip'], node['device']))
|
@@ -692,16 +724,22 @@ class Replicator(Daemon):
|
|
692
724
|
suf_dir = os.path.dirname(hash_dir)
|
693
725
|
with lock_parent_directory(object_file):
|
694
726
|
shutil.rmtree(hash_dir, True)
|
695
|
-
try:
|
696
|
-
os.rmdir(suf_dir)
|
697
|
-
except OSError as err:
|
698
|
-
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
|
699
|
-
self.logger.exception(
|
700
|
-
_('ERROR while trying to clean up %s') % suf_dir)
|
701
|
-
return False
|
702
727
|
self.stats['remove'] += 1
|
703
728
|
device_name = self.extract_device(object_file)
|
704
729
|
self.logger.increment('removes.' + device_name)
|
730
|
+
|
731
|
+
for parent_dir in (suf_dir, os.path.dirname(suf_dir)):
|
732
|
+
try:
|
733
|
+
os.rmdir(parent_dir)
|
734
|
+
except OSError as err:
|
735
|
+
if err.errno == errno.ENOTEMPTY:
|
736
|
+
break
|
737
|
+
elif err.errno == errno.ENOENT:
|
738
|
+
continue
|
739
|
+
else:
|
740
|
+
self.logger.exception(
|
741
|
+
'ERROR while trying to clean up %s', parent_dir)
|
742
|
+
return False
|
705
743
|
return True
|
706
744
|
|
707
745
|
def extract_device(self, object_file):
|
@@ -752,16 +790,17 @@ class Replicator(Daemon):
|
|
752
790
|
dirs = []
|
753
791
|
ips = whataremyips(self.bind_ip)
|
754
792
|
if not ips:
|
755
|
-
self.logger.error(
|
793
|
+
self.logger.error('ERROR Failed to get my own IPs?')
|
756
794
|
return
|
757
795
|
|
758
|
-
if self.handoffs_only:
|
796
|
+
if self.handoffs_only or self.handoff_delete:
|
759
797
|
self.logger.warning(
|
760
|
-
'Starting replication pass with handoffs_only
|
761
|
-
'
|
762
|
-
'
|
798
|
+
'Starting replication pass with handoffs_only '
|
799
|
+
'and/or handoffs_delete enabled. '
|
800
|
+
'These modes are not intended for normal '
|
801
|
+
'operation; use these options with care.')
|
763
802
|
|
764
|
-
self._local_device_ids =
|
803
|
+
self._local_device_ids = {}
|
765
804
|
found_local = False
|
766
805
|
for node in self.ring.devs:
|
767
806
|
if node and is_local_device(ips, self.port,
|
@@ -788,7 +827,7 @@ class Replicator(Daemon):
|
|
788
827
|
time.time() - self.reclaim_age)
|
789
828
|
datadir = os.path.join(self.root, node['device'], self.datadir)
|
790
829
|
if os.path.isdir(datadir):
|
791
|
-
self._local_device_ids
|
830
|
+
self._local_device_ids[node['id']] = node
|
792
831
|
part_filt = self._partition_dir_filter(
|
793
832
|
node['id'], partitions_to_replicate)
|
794
833
|
dirs.append((datadir, node['id'], part_filt))
|
@@ -796,16 +835,17 @@ class Replicator(Daemon):
|
|
796
835
|
self.logger.error("Can't find itself %s with port %s in ring "
|
797
836
|
"file, not replicating",
|
798
837
|
", ".join(ips), self.port)
|
799
|
-
self.logger.info(
|
838
|
+
self.logger.info('Beginning replication run')
|
800
839
|
for part, object_file, node_id in self.roundrobin_datadirs(dirs):
|
801
840
|
self.cpool.spawn_n(
|
802
841
|
self._replicate_object, part, object_file, node_id)
|
803
842
|
self.cpool.waitall()
|
804
|
-
self.logger.info(
|
805
|
-
if self.handoffs_only:
|
843
|
+
self.logger.info('Replication run OVER')
|
844
|
+
if self.handoffs_only or self.handoff_delete:
|
806
845
|
self.logger.warning(
|
807
|
-
'Finished replication pass with handoffs_only
|
808
|
-
'If
|
846
|
+
'Finished replication pass with handoffs_only and/or '
|
847
|
+
'handoffs_delete enabled. If these are no longer required, '
|
848
|
+
'disable them.')
|
809
849
|
self._report_stats()
|
810
850
|
|
811
851
|
def run_forever(self, *args, **kwargs):
|
@@ -818,7 +858,7 @@ class Replicator(Daemon):
|
|
818
858
|
try:
|
819
859
|
self.run_once()
|
820
860
|
except (Exception, Timeout):
|
821
|
-
self.logger.exception(
|
861
|
+
self.logger.exception('ERROR trying to replicate')
|
822
862
|
elapsed = time.time() - begin
|
823
863
|
if elapsed < self.interval:
|
824
864
|
sleep(self.interval - elapsed)
|
@@ -922,7 +962,7 @@ class ReplicatorRpc(object):
|
|
922
962
|
info = self._get_synced_replication_info(broker, remote_info)
|
923
963
|
except (Exception, Timeout) as e:
|
924
964
|
if 'no such table' in str(e):
|
925
|
-
self.logger.error(
|
965
|
+
self.logger.error("Quarantining DB %s", broker)
|
926
966
|
quarantine_db(broker.db_file, broker.db_type)
|
927
967
|
return HTTPNotFound()
|
928
968
|
raise
|
swift/common/digest.py
ADDED
@@ -0,0 +1,141 @@
|
|
1
|
+
# Copyright (c) 2022 NVIDIA
|
2
|
+
#
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
# you may not use this file except in compliance with the License.
|
5
|
+
# You may obtain a copy of the License at
|
6
|
+
#
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8
|
+
#
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
12
|
+
# implied.
|
13
|
+
# See the License for the specific language governing permissions and
|
14
|
+
# limitations under the License.
|
15
|
+
import binascii
|
16
|
+
import hmac
|
17
|
+
|
18
|
+
from swift.common.utils import strict_b64decode
|
19
|
+
|
20
|
+
|
21
|
+
DEFAULT_ALLOWED_DIGESTS = 'sha1 sha256 sha512'
|
22
|
+
DEPRECATED_DIGESTS = {'sha1'}
|
23
|
+
SUPPORTED_DIGESTS = set(DEFAULT_ALLOWED_DIGESTS.split()) | DEPRECATED_DIGESTS
|
24
|
+
|
25
|
+
|
26
|
+
def get_hmac(request_method, path, expires, key, digest="sha1",
|
27
|
+
ip_range=None):
|
28
|
+
"""
|
29
|
+
Returns the hexdigest string of the HMAC (see RFC 2104) for
|
30
|
+
the request.
|
31
|
+
|
32
|
+
:param request_method: Request method to allow.
|
33
|
+
:param path: The path to the resource to allow access to.
|
34
|
+
:param expires: Unix timestamp as an int for when the URL
|
35
|
+
expires.
|
36
|
+
:param key: HMAC shared secret.
|
37
|
+
:param digest: constructor or the string name for the digest to use in
|
38
|
+
calculating the HMAC
|
39
|
+
Defaults to SHA1
|
40
|
+
:param ip_range: The ip range from which the resource is allowed
|
41
|
+
to be accessed. We need to put the ip_range as the
|
42
|
+
first argument to hmac to avoid manipulation of the path
|
43
|
+
due to newlines being valid in paths
|
44
|
+
e.g. /v1/a/c/o\\n127.0.0.1
|
45
|
+
:returns: hexdigest str of the HMAC for the request using the specified
|
46
|
+
digest algorithm.
|
47
|
+
"""
|
48
|
+
# These are the three mandatory fields.
|
49
|
+
parts = [request_method, str(expires), path]
|
50
|
+
formats = [b"%s", b"%s", b"%s"]
|
51
|
+
|
52
|
+
if ip_range:
|
53
|
+
parts.insert(0, ip_range)
|
54
|
+
formats.insert(0, b"ip=%s")
|
55
|
+
|
56
|
+
if isinstance(key, str):
|
57
|
+
key = key.encode('utf8')
|
58
|
+
|
59
|
+
message = b'\n'.join(
|
60
|
+
fmt % (part if isinstance(part, bytes)
|
61
|
+
else part.encode("utf-8"))
|
62
|
+
for fmt, part in zip(formats, parts))
|
63
|
+
|
64
|
+
return hmac.new(key, message, digest).hexdigest()
|
65
|
+
|
66
|
+
|
67
|
+
def get_allowed_digests(conf_digests, logger=None):
|
68
|
+
"""
|
69
|
+
Pulls out 'allowed_digests' from the supplied conf. Then compares them with
|
70
|
+
the list of supported and deprecated digests and returns whatever remain.
|
71
|
+
|
72
|
+
When something is unsupported or deprecated it'll log a warning.
|
73
|
+
|
74
|
+
:param conf_digests: iterable of allowed digests. If empty, defaults to
|
75
|
+
DEFAULT_ALLOWED_DIGESTS.
|
76
|
+
:param logger: optional logger; if provided, use it issue deprecation
|
77
|
+
warnings
|
78
|
+
:returns: A set of allowed digests that are supported and a set of
|
79
|
+
deprecated digests.
|
80
|
+
:raises: ValueError, if there are no digests left to return.
|
81
|
+
"""
|
82
|
+
allowed_digests = set(digest.lower() for digest in conf_digests)
|
83
|
+
if not allowed_digests:
|
84
|
+
allowed_digests = SUPPORTED_DIGESTS
|
85
|
+
|
86
|
+
not_supported = allowed_digests - SUPPORTED_DIGESTS
|
87
|
+
if not_supported:
|
88
|
+
if logger:
|
89
|
+
logger.warning('The following digest algorithms are configured '
|
90
|
+
'but not supported: %s', ', '.join(not_supported))
|
91
|
+
allowed_digests -= not_supported
|
92
|
+
deprecated = allowed_digests & DEPRECATED_DIGESTS
|
93
|
+
if deprecated and logger:
|
94
|
+
if not conf_digests:
|
95
|
+
logger.warning('The following digest algorithms are allowed by '
|
96
|
+
'default but deprecated: %s. Support will be '
|
97
|
+
'disabled by default in a future release, and '
|
98
|
+
'later removed entirely.', ', '.join(deprecated))
|
99
|
+
else:
|
100
|
+
logger.warning('The following digest algorithms are configured '
|
101
|
+
'but deprecated: %s. Support will be removed in a '
|
102
|
+
'future release.', ', '.join(deprecated))
|
103
|
+
if not allowed_digests:
|
104
|
+
raise ValueError('No valid digest algorithms are configured')
|
105
|
+
|
106
|
+
return allowed_digests, deprecated
|
107
|
+
|
108
|
+
|
109
|
+
def extract_digest_and_algorithm(value):
|
110
|
+
"""
|
111
|
+
Returns a tuple of (digest_algorithm, hex_encoded_digest)
|
112
|
+
from a client-provided string of the form::
|
113
|
+
|
114
|
+
<hex-encoded digest>
|
115
|
+
|
116
|
+
or::
|
117
|
+
|
118
|
+
<algorithm>:<base64-encoded digest>
|
119
|
+
|
120
|
+
Note that hex-encoded strings must use one of sha1, sha256, or sha512.
|
121
|
+
|
122
|
+
:raises: ValueError on parse failures
|
123
|
+
"""
|
124
|
+
if ':' in value:
|
125
|
+
algo, value = value.split(':', 1)
|
126
|
+
# accept both standard and url-safe base64
|
127
|
+
if ('-' in value or '_' in value) and not (
|
128
|
+
'+' in value or '/' in value):
|
129
|
+
value = value.replace('-', '+').replace('_', '/')
|
130
|
+
value = binascii.hexlify(
|
131
|
+
strict_b64decode(value + '==')).decode('ascii')
|
132
|
+
else:
|
133
|
+
binascii.unhexlify(value) # make sure it decodes
|
134
|
+
algo = {
|
135
|
+
40: 'sha1',
|
136
|
+
64: 'sha256',
|
137
|
+
128: 'sha512',
|
138
|
+
}.get(len(value))
|
139
|
+
if not algo:
|
140
|
+
raise ValueError('Bad digest length')
|
141
|
+
return algo, value
|