swift 2.23.3__py3-none-any.whl → 2.35.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swift/__init__.py +29 -50
- swift/account/auditor.py +21 -118
- swift/account/backend.py +33 -28
- swift/account/reaper.py +37 -28
- swift/account/replicator.py +22 -0
- swift/account/server.py +60 -26
- swift/account/utils.py +28 -11
- swift-2.23.3.data/scripts/swift-account-audit → swift/cli/account_audit.py +23 -13
- swift-2.23.3.data/scripts/swift-config → swift/cli/config.py +2 -2
- swift/cli/container_deleter.py +5 -11
- swift-2.23.3.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +8 -7
- swift/cli/dispersion_report.py +10 -9
- swift-2.23.3.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +63 -21
- swift/cli/form_signature.py +3 -7
- swift-2.23.3.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +8 -2
- swift/cli/info.py +154 -14
- swift/cli/manage_shard_ranges.py +705 -37
- swift-2.23.3.data/scripts/swift-oldies → swift/cli/oldies.py +25 -14
- swift-2.23.3.data/scripts/swift-orphans → swift/cli/orphans.py +7 -3
- swift/cli/recon.py +196 -67
- swift-2.23.3.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +17 -20
- swift-2.23.3.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
- swift/cli/relinker.py +807 -126
- swift/cli/reload.py +135 -0
- swift/cli/ringbuilder.py +217 -20
- swift/cli/ringcomposer.py +0 -1
- swift/cli/shard-info.py +4 -3
- swift/common/base_storage_server.py +9 -20
- swift/common/bufferedhttp.py +48 -74
- swift/common/constraints.py +20 -15
- swift/common/container_sync_realms.py +9 -11
- swift/common/daemon.py +25 -8
- swift/common/db.py +195 -128
- swift/common/db_auditor.py +168 -0
- swift/common/db_replicator.py +95 -55
- swift/common/digest.py +141 -0
- swift/common/direct_client.py +144 -33
- swift/common/error_limiter.py +93 -0
- swift/common/exceptions.py +25 -1
- swift/common/header_key_dict.py +2 -9
- swift/common/http_protocol.py +373 -0
- swift/common/internal_client.py +129 -59
- swift/common/linkat.py +3 -4
- swift/common/manager.py +284 -67
- swift/common/memcached.py +390 -145
- swift/common/middleware/__init__.py +4 -0
- swift/common/middleware/account_quotas.py +211 -46
- swift/common/middleware/acl.py +3 -8
- swift/common/middleware/backend_ratelimit.py +230 -0
- swift/common/middleware/bulk.py +22 -34
- swift/common/middleware/catch_errors.py +1 -3
- swift/common/middleware/cname_lookup.py +6 -11
- swift/common/middleware/container_quotas.py +1 -1
- swift/common/middleware/container_sync.py +39 -17
- swift/common/middleware/copy.py +12 -0
- swift/common/middleware/crossdomain.py +22 -9
- swift/common/middleware/crypto/__init__.py +2 -1
- swift/common/middleware/crypto/crypto_utils.py +11 -15
- swift/common/middleware/crypto/decrypter.py +28 -11
- swift/common/middleware/crypto/encrypter.py +12 -17
- swift/common/middleware/crypto/keymaster.py +8 -15
- swift/common/middleware/crypto/kms_keymaster.py +2 -1
- swift/common/middleware/dlo.py +15 -11
- swift/common/middleware/domain_remap.py +5 -4
- swift/common/middleware/etag_quoter.py +128 -0
- swift/common/middleware/formpost.py +73 -70
- swift/common/middleware/gatekeeper.py +8 -1
- swift/common/middleware/keystoneauth.py +33 -3
- swift/common/middleware/list_endpoints.py +4 -4
- swift/common/middleware/listing_formats.py +85 -49
- swift/common/middleware/memcache.py +4 -95
- swift/common/middleware/name_check.py +3 -2
- swift/common/middleware/proxy_logging.py +160 -92
- swift/common/middleware/ratelimit.py +17 -10
- swift/common/middleware/read_only.py +6 -4
- swift/common/middleware/recon.py +59 -22
- swift/common/middleware/s3api/acl_handlers.py +25 -3
- swift/common/middleware/s3api/acl_utils.py +6 -1
- swift/common/middleware/s3api/controllers/__init__.py +6 -0
- swift/common/middleware/s3api/controllers/acl.py +3 -2
- swift/common/middleware/s3api/controllers/bucket.py +242 -137
- swift/common/middleware/s3api/controllers/logging.py +2 -2
- swift/common/middleware/s3api/controllers/multi_delete.py +43 -20
- swift/common/middleware/s3api/controllers/multi_upload.py +219 -133
- swift/common/middleware/s3api/controllers/obj.py +112 -8
- swift/common/middleware/s3api/controllers/object_lock.py +44 -0
- swift/common/middleware/s3api/controllers/s3_acl.py +2 -2
- swift/common/middleware/s3api/controllers/tagging.py +57 -0
- swift/common/middleware/s3api/controllers/versioning.py +36 -7
- swift/common/middleware/s3api/etree.py +22 -9
- swift/common/middleware/s3api/exception.py +0 -4
- swift/common/middleware/s3api/s3api.py +113 -41
- swift/common/middleware/s3api/s3request.py +384 -218
- swift/common/middleware/s3api/s3response.py +126 -23
- swift/common/middleware/s3api/s3token.py +16 -17
- swift/common/middleware/s3api/schema/delete.rng +1 -1
- swift/common/middleware/s3api/subresource.py +7 -10
- swift/common/middleware/s3api/utils.py +27 -10
- swift/common/middleware/slo.py +665 -358
- swift/common/middleware/staticweb.py +64 -37
- swift/common/middleware/symlink.py +51 -18
- swift/common/middleware/tempauth.py +76 -58
- swift/common/middleware/tempurl.py +191 -173
- swift/common/middleware/versioned_writes/__init__.py +51 -0
- swift/common/middleware/{versioned_writes.py → versioned_writes/legacy.py} +27 -26
- swift/common/middleware/versioned_writes/object_versioning.py +1482 -0
- swift/common/middleware/x_profile/exceptions.py +1 -4
- swift/common/middleware/x_profile/html_viewer.py +18 -19
- swift/common/middleware/x_profile/profile_model.py +1 -2
- swift/common/middleware/xprofile.py +10 -10
- swift-2.23.3.data/scripts/swift-container-server → swift/common/recon.py +13 -8
- swift/common/registry.py +147 -0
- swift/common/request_helpers.py +324 -57
- swift/common/ring/builder.py +67 -25
- swift/common/ring/composite_builder.py +1 -1
- swift/common/ring/ring.py +177 -51
- swift/common/ring/utils.py +1 -1
- swift/common/splice.py +10 -6
- swift/common/statsd_client.py +205 -0
- swift/common/storage_policy.py +49 -44
- swift/common/swob.py +86 -102
- swift/common/{utils.py → utils/__init__.py} +2163 -2772
- swift/common/utils/base.py +131 -0
- swift/common/utils/config.py +433 -0
- swift/common/utils/ipaddrs.py +256 -0
- swift/common/utils/libc.py +345 -0
- swift/common/utils/logs.py +859 -0
- swift/common/utils/timestamp.py +412 -0
- swift/common/wsgi.py +553 -535
- swift/container/auditor.py +14 -100
- swift/container/backend.py +490 -231
- swift/container/reconciler.py +126 -37
- swift/container/replicator.py +96 -22
- swift/container/server.py +358 -165
- swift/container/sharder.py +1540 -684
- swift/container/sync.py +94 -88
- swift/container/updater.py +53 -32
- swift/obj/auditor.py +153 -35
- swift/obj/diskfile.py +466 -217
- swift/obj/expirer.py +406 -124
- swift/obj/mem_diskfile.py +7 -4
- swift/obj/mem_server.py +1 -0
- swift/obj/reconstructor.py +523 -262
- swift/obj/replicator.py +249 -188
- swift/obj/server.py +207 -122
- swift/obj/ssync_receiver.py +145 -85
- swift/obj/ssync_sender.py +113 -54
- swift/obj/updater.py +652 -139
- swift/obj/watchers/__init__.py +0 -0
- swift/obj/watchers/dark_data.py +213 -0
- swift/proxy/controllers/account.py +11 -11
- swift/proxy/controllers/base.py +848 -604
- swift/proxy/controllers/container.py +433 -92
- swift/proxy/controllers/info.py +3 -2
- swift/proxy/controllers/obj.py +1000 -489
- swift/proxy/server.py +185 -112
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/AUTHORS +58 -11
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/METADATA +51 -56
- swift-2.35.0.dist-info/RECORD +201 -0
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/WHEEL +1 -1
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/entry_points.txt +43 -0
- swift-2.35.0.dist-info/pbr.json +1 -0
- swift/locale/de/LC_MESSAGES/swift.po +0 -1216
- swift/locale/en_GB/LC_MESSAGES/swift.po +0 -1207
- swift/locale/es/LC_MESSAGES/swift.po +0 -1085
- swift/locale/fr/LC_MESSAGES/swift.po +0 -909
- swift/locale/it/LC_MESSAGES/swift.po +0 -894
- swift/locale/ja/LC_MESSAGES/swift.po +0 -965
- swift/locale/ko_KR/LC_MESSAGES/swift.po +0 -964
- swift/locale/pt_BR/LC_MESSAGES/swift.po +0 -881
- swift/locale/ru/LC_MESSAGES/swift.po +0 -891
- swift/locale/tr_TR/LC_MESSAGES/swift.po +0 -832
- swift/locale/zh_CN/LC_MESSAGES/swift.po +0 -833
- swift/locale/zh_TW/LC_MESSAGES/swift.po +0 -838
- swift-2.23.3.data/scripts/swift-account-auditor +0 -23
- swift-2.23.3.data/scripts/swift-account-info +0 -51
- swift-2.23.3.data/scripts/swift-account-reaper +0 -23
- swift-2.23.3.data/scripts/swift-account-replicator +0 -34
- swift-2.23.3.data/scripts/swift-account-server +0 -23
- swift-2.23.3.data/scripts/swift-container-auditor +0 -23
- swift-2.23.3.data/scripts/swift-container-info +0 -55
- swift-2.23.3.data/scripts/swift-container-reconciler +0 -21
- swift-2.23.3.data/scripts/swift-container-replicator +0 -34
- swift-2.23.3.data/scripts/swift-container-sharder +0 -37
- swift-2.23.3.data/scripts/swift-container-sync +0 -23
- swift-2.23.3.data/scripts/swift-container-updater +0 -23
- swift-2.23.3.data/scripts/swift-dispersion-report +0 -24
- swift-2.23.3.data/scripts/swift-form-signature +0 -20
- swift-2.23.3.data/scripts/swift-init +0 -119
- swift-2.23.3.data/scripts/swift-object-auditor +0 -29
- swift-2.23.3.data/scripts/swift-object-expirer +0 -33
- swift-2.23.3.data/scripts/swift-object-info +0 -60
- swift-2.23.3.data/scripts/swift-object-reconstructor +0 -33
- swift-2.23.3.data/scripts/swift-object-relinker +0 -41
- swift-2.23.3.data/scripts/swift-object-replicator +0 -37
- swift-2.23.3.data/scripts/swift-object-server +0 -27
- swift-2.23.3.data/scripts/swift-object-updater +0 -23
- swift-2.23.3.data/scripts/swift-proxy-server +0 -23
- swift-2.23.3.data/scripts/swift-recon +0 -24
- swift-2.23.3.data/scripts/swift-ring-builder +0 -24
- swift-2.23.3.data/scripts/swift-ring-builder-analyzer +0 -22
- swift-2.23.3.data/scripts/swift-ring-composer +0 -22
- swift-2.23.3.dist-info/RECORD +0 -220
- swift-2.23.3.dist-info/pbr.json +0 -1
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/LICENSE +0 -0
- {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/top_level.txt +0 -0
swift/obj/replicator.py
CHANGED
@@ -14,6 +14,7 @@
|
|
14
14
|
# limitations under the License.
|
15
15
|
|
16
16
|
from collections import defaultdict
|
17
|
+
from optparse import OptionParser
|
17
18
|
import os
|
18
19
|
import errno
|
19
20
|
from os.path import isdir, isfile, join, dirname
|
@@ -21,9 +22,7 @@ import random
|
|
21
22
|
import shutil
|
22
23
|
import time
|
23
24
|
import itertools
|
24
|
-
|
25
|
-
import six.moves.cPickle as pickle
|
26
|
-
from swift import gettext_ as _
|
25
|
+
import pickle # nosec: B403
|
27
26
|
|
28
27
|
import eventlet
|
29
28
|
from eventlet import GreenPool, queue, tpool, Timeout, sleep
|
@@ -32,17 +31,19 @@ from eventlet.green import subprocess
|
|
32
31
|
from swift.common.constraints import check_drive
|
33
32
|
from swift.common.ring.utils import is_local_device
|
34
33
|
from swift.common.utils import whataremyips, unlink_older_than, \
|
35
|
-
compute_eta, get_logger, dump_recon_cache, \
|
34
|
+
compute_eta, get_logger, dump_recon_cache, parse_options, \
|
36
35
|
rsync_module_interpolation, mkdirs, config_true_value, \
|
37
|
-
config_auto_int_value, storage_directory, \
|
38
|
-
|
39
|
-
|
36
|
+
config_auto_int_value, storage_directory, load_recon_cache, EUCLEAN, \
|
37
|
+
parse_override_options, distribute_evenly, listdir, node_to_string, \
|
38
|
+
get_prefixed_logger
|
40
39
|
from swift.common.bufferedhttp import http_connect
|
41
|
-
from swift.common.daemon import Daemon
|
40
|
+
from swift.common.daemon import Daemon, run_daemon
|
42
41
|
from swift.common.http import HTTP_OK, HTTP_INSUFFICIENT_STORAGE
|
42
|
+
from swift.common.recon import RECON_OBJECT_FILE, DEFAULT_RECON_CACHE_PATH
|
43
43
|
from swift.obj import ssync_sender
|
44
44
|
from swift.obj.diskfile import get_data_dir, get_tmp_dir, DiskFileRouter
|
45
45
|
from swift.common.storage_policy import POLICIES, REPL_POLICY
|
46
|
+
from swift.common.exceptions import PartitionLockTimeout
|
46
47
|
|
47
48
|
DEFAULT_RSYNC_TIMEOUT = 900
|
48
49
|
|
@@ -125,33 +126,41 @@ class ObjectReplicator(Daemon):
|
|
125
126
|
def __init__(self, conf, logger=None):
|
126
127
|
"""
|
127
128
|
:param conf: configuration object obtained from ConfigParser
|
128
|
-
:param logger:
|
129
|
+
:param logger: an instance of ``SwiftLogAdapter``.
|
129
130
|
"""
|
130
131
|
self.conf = conf
|
131
|
-
self.logger =
|
132
|
-
logger or get_logger(conf, log_route='object-replicator')
|
132
|
+
self.logger = \
|
133
|
+
logger or get_logger(conf, log_route='object-replicator')
|
133
134
|
self.devices_dir = conf.get('devices', '/srv/node')
|
134
135
|
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
|
135
136
|
self.swift_dir = conf.get('swift_dir', '/etc/swift')
|
136
|
-
self.
|
137
|
+
self.ring_ip = conf.get('ring_ip', conf.get('bind_ip', '0.0.0.0'))
|
137
138
|
self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
|
138
139
|
self.port = None if self.servers_per_port else \
|
139
140
|
int(conf.get('bind_port', 6200))
|
140
141
|
self.concurrency = int(conf.get('concurrency', 1))
|
141
142
|
self.replicator_workers = int(conf.get('replicator_workers', 0))
|
142
|
-
self.
|
143
|
-
|
143
|
+
self.policies = [policy for policy in POLICIES
|
144
|
+
if policy.policy_type == REPL_POLICY]
|
145
|
+
self.stats_interval = float(conf.get('stats_interval', '300'))
|
146
|
+
self.ring_check_interval = float(conf.get('ring_check_interval', 15))
|
144
147
|
self.next_check = time.time() + self.ring_check_interval
|
145
148
|
self.replication_cycle = random.randint(0, 9)
|
146
149
|
self.partition_times = []
|
147
|
-
self.interval =
|
148
|
-
|
149
|
-
if 'run_pause' in conf
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
150
|
+
self.interval = float(conf.get('interval') or
|
151
|
+
conf.get('run_pause') or 30)
|
152
|
+
if 'run_pause' in conf:
|
153
|
+
if 'interval' in conf:
|
154
|
+
self.logger.warning(
|
155
|
+
'Option object-replicator/run_pause is deprecated and '
|
156
|
+
'object-replicator/interval is already configured. You '
|
157
|
+
'can safely remove run_pause; it is now ignored and will '
|
158
|
+
'be removed in a future version.')
|
159
|
+
else:
|
160
|
+
self.logger.warning(
|
161
|
+
'Option object-replicator/run_pause is deprecated and '
|
162
|
+
'will be removed in a future version. Update your '
|
163
|
+
'configuration to use option object-replicator/interval.')
|
155
164
|
self.rsync_timeout = int(conf.get('rsync_timeout',
|
156
165
|
DEFAULT_RSYNC_TIMEOUT))
|
157
166
|
self.rsync_io_timeout = conf.get('rsync_io_timeout', '30')
|
@@ -163,8 +172,8 @@ class ObjectReplicator(Daemon):
|
|
163
172
|
self.rsync_module = '{replication_ip}::object'
|
164
173
|
self.http_timeout = int(conf.get('http_timeout', 60))
|
165
174
|
self.recon_cache_path = conf.get('recon_cache_path',
|
166
|
-
|
167
|
-
self.rcache = os.path.join(self.recon_cache_path,
|
175
|
+
DEFAULT_RECON_CACHE_PATH)
|
176
|
+
self.rcache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE)
|
168
177
|
self._next_rcache_update = time.time() + self.stats_interval
|
169
178
|
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
|
170
179
|
self.node_timeout = float(conf.get('node_timeout', 10))
|
@@ -173,6 +182,8 @@ class ObjectReplicator(Daemon):
|
|
173
182
|
self.default_headers = {
|
174
183
|
'Content-Length': '0',
|
175
184
|
'user-agent': 'object-replicator %s' % os.getpid()}
|
185
|
+
self.log_rsync_transfers = config_true_value(
|
186
|
+
conf.get('log_rsync_transfers', True))
|
176
187
|
self.rsync_error_log_line_length = \
|
177
188
|
int(conf.get('rsync_error_log_line_length', 0))
|
178
189
|
self.handoffs_first = config_true_value(conf.get('handoffs_first',
|
@@ -184,9 +195,16 @@ class ObjectReplicator(Daemon):
|
|
184
195
|
'operation, please disable handoffs_first and '
|
185
196
|
'handoff_delete before the next '
|
186
197
|
'normal rebalance')
|
198
|
+
if all(self.load_object_ring(p).replica_count <= self.handoff_delete
|
199
|
+
for p in self.policies):
|
200
|
+
self.logger.warning('No storage policies found for which '
|
201
|
+
'handoff_delete=%d would have an effect. '
|
202
|
+
'Disabling.', self.handoff_delete)
|
203
|
+
self.handoff_delete = 0
|
187
204
|
self.is_multiprocess_worker = None
|
188
205
|
self._df_router = DiskFileRouter(conf, self.logger)
|
189
206
|
self._child_process_reaper_queue = queue.LightQueue()
|
207
|
+
self.rings_mtime = None
|
190
208
|
|
191
209
|
def _zero_stats(self):
|
192
210
|
self.stats_for_dev = defaultdict(Stats)
|
@@ -196,21 +214,12 @@ class ObjectReplicator(Daemon):
|
|
196
214
|
return sum(self.stats_for_dev.values(), Stats())
|
197
215
|
|
198
216
|
def _emplace_log_prefix(self, worker_index):
|
199
|
-
self.logger
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
my_replication_ips = set()
|
206
|
-
ips = whataremyips()
|
207
|
-
for policy in POLICIES:
|
208
|
-
self.load_object_ring(policy)
|
209
|
-
for local_dev in [dev for dev in policy.object_ring.devs
|
210
|
-
if dev and dev['replication_ip'] in ips and
|
211
|
-
dev['replication_port'] == self.port]:
|
212
|
-
my_replication_ips.add(local_dev['replication_ip'])
|
213
|
-
return list(my_replication_ips)
|
217
|
+
self.logger = get_prefixed_logger(
|
218
|
+
self.logger, "[worker %d/%d pid=%d] " % (
|
219
|
+
worker_index + 1,
|
220
|
+
# use 1-based indexing for more readable logs
|
221
|
+
self.replicator_workers,
|
222
|
+
os.getpid()))
|
214
223
|
|
215
224
|
def _child_process_reaper(self):
|
216
225
|
"""
|
@@ -291,6 +300,11 @@ class ObjectReplicator(Daemon):
|
|
291
300
|
if time.time() >= self._next_rcache_update:
|
292
301
|
update = self.aggregate_recon_update()
|
293
302
|
dump_recon_cache(update, self.rcache, self.logger)
|
303
|
+
rings_mtime = [os.path.getmtime(self.load_object_ring(
|
304
|
+
policy).serialized_path) for policy in self.policies]
|
305
|
+
if self.rings_mtime == rings_mtime:
|
306
|
+
return True
|
307
|
+
self.rings_mtime = rings_mtime
|
294
308
|
return self.get_local_devices() == self.all_local_devices
|
295
309
|
|
296
310
|
def get_local_devices(self):
|
@@ -301,11 +315,9 @@ class ObjectReplicator(Daemon):
|
|
301
315
|
This is the device names, e.g. "sdq" or "d1234" or something, not
|
302
316
|
the full ring entries.
|
303
317
|
"""
|
304
|
-
ips = whataremyips(self.
|
318
|
+
ips = whataremyips(self.ring_ip)
|
305
319
|
local_devices = set()
|
306
|
-
for policy in
|
307
|
-
if policy.policy_type != REPL_POLICY:
|
308
|
-
continue
|
320
|
+
for policy in self.policies:
|
309
321
|
self.load_object_ring(policy)
|
310
322
|
for device in policy.object_ring.devs:
|
311
323
|
if device and is_local_device(
|
@@ -372,7 +384,8 @@ class ObjectReplicator(Daemon):
|
|
372
384
|
except Timeout:
|
373
385
|
self.logger.error(
|
374
386
|
self._limit_rsync_log(
|
375
|
-
|
387
|
+
"Killing long-running rsync after %ds: %s" % (
|
388
|
+
self.rsync_timeout, str(args))))
|
376
389
|
if proc:
|
377
390
|
proc.kill()
|
378
391
|
try:
|
@@ -399,20 +412,23 @@ class ObjectReplicator(Daemon):
|
|
399
412
|
continue
|
400
413
|
if result.startswith('cd+'):
|
401
414
|
continue
|
415
|
+
if result.startswith('<') and not self.log_rsync_transfers:
|
416
|
+
continue
|
402
417
|
if not ret_val:
|
403
|
-
self.logger.
|
418
|
+
self.logger.debug(result)
|
404
419
|
else:
|
405
420
|
self.logger.error(result)
|
406
421
|
if ret_val:
|
407
422
|
self.logger.error(
|
408
423
|
self._limit_rsync_log(
|
409
|
-
|
424
|
+
'Bad rsync return code: %(ret)d <- %(args)s' %
|
410
425
|
{'args': str(args), 'ret': ret_val}))
|
411
426
|
else:
|
412
427
|
log_method = self.logger.info if results else self.logger.debug
|
413
428
|
log_method(
|
414
|
-
|
415
|
-
{'src': args[-2], 'dst': args[-1],
|
429
|
+
"Successful rsync of %(src)s to %(dst)s (%(time).03f)",
|
430
|
+
{'src': args[-2][:-3] + '...', 'dst': args[-1],
|
431
|
+
'time': total_time})
|
416
432
|
return ret_val
|
417
433
|
|
418
434
|
def rsync(self, node, job, suffixes):
|
@@ -452,7 +468,24 @@ class ObjectReplicator(Daemon):
|
|
452
468
|
data_dir = get_data_dir(job['policy'])
|
453
469
|
args.append(join(rsync_module, node['device'],
|
454
470
|
data_dir, job['partition']))
|
455
|
-
|
471
|
+
success = (self._rsync(args) == 0)
|
472
|
+
|
473
|
+
# TODO: Catch and swallow (or at least minimize) timeouts when doing
|
474
|
+
# an update job; if we don't manage to notify the remote, we should
|
475
|
+
# catch it on the next pass
|
476
|
+
if success or not job['delete']:
|
477
|
+
headers = dict(self.default_headers)
|
478
|
+
headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
|
479
|
+
with Timeout(self.http_timeout):
|
480
|
+
conn = http_connect(
|
481
|
+
node['replication_ip'], node['replication_port'],
|
482
|
+
node['device'], job['partition'], 'REPLICATE',
|
483
|
+
'/' + '-'.join(suffixes), headers=headers)
|
484
|
+
try:
|
485
|
+
conn.getresponse().read()
|
486
|
+
finally:
|
487
|
+
conn.close()
|
488
|
+
return success, {}
|
456
489
|
|
457
490
|
def ssync(self, node, job, suffixes, remote_check_objs=None):
|
458
491
|
return ssync_sender.Sender(
|
@@ -471,7 +504,7 @@ class ObjectReplicator(Daemon):
|
|
471
504
|
return False
|
472
505
|
return True
|
473
506
|
|
474
|
-
def
|
507
|
+
def revert(self, job):
|
475
508
|
"""
|
476
509
|
High-level method that replicates a single partition that doesn't
|
477
510
|
belong on this node.
|
@@ -480,7 +513,7 @@ class ObjectReplicator(Daemon):
|
|
480
513
|
"""
|
481
514
|
|
482
515
|
def tpool_get_suffixes(path):
|
483
|
-
return [suff for suff in
|
516
|
+
return [suff for suff in listdir(path)
|
484
517
|
if len(suff) == 3 and isdir(join(path, suff))]
|
485
518
|
|
486
519
|
stats = self.stats_for_dev[job['device']]
|
@@ -492,79 +525,87 @@ class ObjectReplicator(Daemon):
|
|
492
525
|
begin = time.time()
|
493
526
|
handoff_partition_deleted = False
|
494
527
|
try:
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
node['
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
if
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
# delete handoff if all syncs were successful
|
538
|
-
delete_handoff = len(responses) == len(job['nodes']) and \
|
539
|
-
all(responses)
|
540
|
-
if delete_handoff:
|
541
|
-
stats.remove += 1
|
542
|
-
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
|
543
|
-
delete_objs is not None):
|
544
|
-
self.logger.info(_("Removing %s objects"),
|
545
|
-
len(delete_objs))
|
546
|
-
_junk, error_paths = self.delete_handoff_objs(
|
547
|
-
job, delete_objs)
|
548
|
-
# if replication works for a hand-off device and it failed,
|
549
|
-
# the remote devices which are target of the replication
|
550
|
-
# from the hand-off device will be marked. Because cleanup
|
551
|
-
# after replication failed means replicator needs to
|
552
|
-
# replicate again with the same info.
|
553
|
-
if error_paths:
|
554
|
-
failure_devs_info.update(
|
555
|
-
[(failure_dev['replication_ip'],
|
556
|
-
failure_dev['device'])
|
557
|
-
for failure_dev in job['nodes']])
|
528
|
+
df_mgr = self._df_router[job['policy']]
|
529
|
+
# Only object-server can take this lock if an incoming SSYNC is
|
530
|
+
# running on the same partition. Taking the lock here ensure we
|
531
|
+
# won't enter a race condition where both nodes try to
|
532
|
+
# cross-replicate the same partition and both delete it.
|
533
|
+
with df_mgr.partition_lock(job['device'], job['policy'],
|
534
|
+
job['partition'], name='replication',
|
535
|
+
timeout=0.2):
|
536
|
+
responses = []
|
537
|
+
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
|
538
|
+
synced_remote_regions = {}
|
539
|
+
delete_objs = None
|
540
|
+
if suffixes:
|
541
|
+
for node in job['nodes']:
|
542
|
+
stats.rsync += 1
|
543
|
+
kwargs = {}
|
544
|
+
if self.conf.get('sync_method', 'rsync') == 'ssync' \
|
545
|
+
and node['region'] in synced_remote_regions:
|
546
|
+
kwargs['remote_check_objs'] = \
|
547
|
+
synced_remote_regions[node['region']]
|
548
|
+
# candidates is a dict(hash=>timestamp) of objects
|
549
|
+
# for deletion
|
550
|
+
success, candidates = self.sync(
|
551
|
+
node, job, suffixes, **kwargs)
|
552
|
+
if not success:
|
553
|
+
failure_devs_info.add((node['replication_ip'],
|
554
|
+
node['device']))
|
555
|
+
if success and node['region'] != job['region']:
|
556
|
+
synced_remote_regions[node['region']] = \
|
557
|
+
candidates.keys()
|
558
|
+
responses.append(success)
|
559
|
+
for cand_objs in synced_remote_regions.values():
|
560
|
+
if delete_objs is None:
|
561
|
+
delete_objs = cand_objs
|
562
|
+
else:
|
563
|
+
delete_objs = delete_objs & cand_objs
|
564
|
+
|
565
|
+
if self.handoff_delete:
|
566
|
+
# delete handoff if we have had handoff_delete successes
|
567
|
+
successes_count = len([resp for resp in responses if resp])
|
568
|
+
delete_handoff = successes_count >= min(
|
569
|
+
self.handoff_delete, len(job['nodes']))
|
558
570
|
else:
|
571
|
+
# delete handoff if all syncs were successful
|
572
|
+
delete_handoff = len(responses) == len(job['nodes']) and \
|
573
|
+
all(responses)
|
574
|
+
if delete_handoff:
|
575
|
+
stats.remove += 1
|
576
|
+
if (self.conf.get('sync_method', 'rsync') == 'ssync' and
|
577
|
+
delete_objs is not None):
|
578
|
+
self.logger.info("Removing %s objects",
|
579
|
+
len(delete_objs))
|
580
|
+
_junk, error_paths = self.delete_handoff_objs(
|
581
|
+
job, delete_objs)
|
582
|
+
# if replication works for a hand-off device and it
|
583
|
+
# failed, the remote devices which are target of the
|
584
|
+
# replication from the hand-off device will be marked.
|
585
|
+
# Because cleanup after replication failed means
|
586
|
+
# replicator needs to replicate again with the same
|
587
|
+
# info.
|
588
|
+
if error_paths:
|
589
|
+
failure_devs_info.update(
|
590
|
+
[(failure_dev['replication_ip'],
|
591
|
+
failure_dev['device'])
|
592
|
+
for failure_dev in job['nodes']])
|
593
|
+
else:
|
594
|
+
self.delete_partition(job['path'])
|
595
|
+
handoff_partition_deleted = True
|
596
|
+
elif not suffixes:
|
559
597
|
self.delete_partition(job['path'])
|
560
598
|
handoff_partition_deleted = True
|
561
|
-
|
562
|
-
|
563
|
-
|
599
|
+
except PartitionLockTimeout:
|
600
|
+
self.logger.info("Unable to lock handoff partition %s for "
|
601
|
+
"replication on device %s policy %d",
|
602
|
+
job['partition'], job['device'],
|
603
|
+
job['policy'])
|
604
|
+
self.logger.increment('partition.lock-failure.count')
|
564
605
|
except (Exception, Timeout):
|
565
|
-
self.logger.exception(
|
566
|
-
stats.add_failure_stats(failure_devs_info)
|
606
|
+
self.logger.exception("Error syncing handoff partition")
|
567
607
|
finally:
|
608
|
+
stats.add_failure_stats(failure_devs_info)
|
568
609
|
target_devs_info = set([(target_dev['replication_ip'],
|
569
610
|
target_dev['device'])
|
570
611
|
for target_dev in job['nodes']])
|
@@ -575,12 +616,14 @@ class ObjectReplicator(Daemon):
|
|
575
616
|
self.logger.timing_since('partition.delete.timing', begin)
|
576
617
|
|
577
618
|
def delete_partition(self, path):
|
578
|
-
self.logger.info(
|
619
|
+
self.logger.info("Removing partition: %s", path)
|
579
620
|
try:
|
580
621
|
tpool.execute(shutil.rmtree, path)
|
581
622
|
except OSError as e:
|
582
|
-
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY
|
583
|
-
|
623
|
+
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.ENODATA,
|
624
|
+
EUCLEAN):
|
625
|
+
# Don't worry if there was a race to create or delete,
|
626
|
+
# or some disk corruption that happened after the sync
|
584
627
|
raise
|
585
628
|
|
586
629
|
def delete_handoff_objs(self, job, delete_objs):
|
@@ -598,7 +641,7 @@ class ObjectReplicator(Daemon):
|
|
598
641
|
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
|
599
642
|
error_paths.append(object_path)
|
600
643
|
self.logger.exception(
|
601
|
-
"Unexpected error trying to cleanup suffix dir
|
644
|
+
"Unexpected error trying to cleanup suffix dir %r",
|
602
645
|
suffix_dir)
|
603
646
|
return success_paths, error_paths
|
604
647
|
|
@@ -636,6 +679,7 @@ class ObjectReplicator(Daemon):
|
|
636
679
|
while attempts_left > 0:
|
637
680
|
# If this throws StopIteration it will be caught way below
|
638
681
|
node = next(nodes)
|
682
|
+
node_str = node_to_string(node, replication=True)
|
639
683
|
target_devs_info.add((node['replication_ip'], node['device']))
|
640
684
|
attempts_left -= 1
|
641
685
|
# if we have already synced to this remote region,
|
@@ -644,27 +688,31 @@ class ObjectReplicator(Daemon):
|
|
644
688
|
continue
|
645
689
|
try:
|
646
690
|
with Timeout(self.http_timeout):
|
647
|
-
|
691
|
+
conn = http_connect(
|
648
692
|
node['replication_ip'], node['replication_port'],
|
649
693
|
node['device'], job['partition'], 'REPLICATE',
|
650
|
-
'', headers=headers)
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
657
|
-
|
658
|
-
|
659
|
-
|
660
|
-
|
661
|
-
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
694
|
+
'', headers=headers)
|
695
|
+
try:
|
696
|
+
resp = conn.getresponse()
|
697
|
+
if resp.status == HTTP_INSUFFICIENT_STORAGE:
|
698
|
+
self.logger.error('%s responded as unmounted',
|
699
|
+
node_str)
|
700
|
+
attempts_left += 1
|
701
|
+
failure_devs_info.add((node['replication_ip'],
|
702
|
+
node['device']))
|
703
|
+
continue
|
704
|
+
if resp.status != HTTP_OK:
|
705
|
+
self.logger.error(
|
706
|
+
"Invalid response %(resp)s "
|
707
|
+
"from %(remote)s",
|
708
|
+
{'resp': resp.status, 'remote': node_str})
|
709
|
+
failure_devs_info.add((node['replication_ip'],
|
710
|
+
node['device']))
|
711
|
+
continue
|
712
|
+
remote_hash = pickle.loads(
|
713
|
+
resp.read()) # nosec: B301
|
714
|
+
finally:
|
715
|
+
conn.close()
|
668
716
|
del resp
|
669
717
|
suffixes = [suffix for suffix in local_hash if
|
670
718
|
local_hash[suffix] !=
|
@@ -686,13 +734,6 @@ class ObjectReplicator(Daemon):
|
|
686
734
|
continue
|
687
735
|
stats.rsync += 1
|
688
736
|
success, _junk = self.sync(node, job, suffixes)
|
689
|
-
with Timeout(self.http_timeout):
|
690
|
-
conn = http_connect(
|
691
|
-
node['replication_ip'], node['replication_port'],
|
692
|
-
node['device'], job['partition'], 'REPLICATE',
|
693
|
-
'/' + '-'.join(suffixes),
|
694
|
-
headers=headers)
|
695
|
-
conn.getresponse().read()
|
696
737
|
if not success:
|
697
738
|
failure_devs_info.add((node['replication_ip'],
|
698
739
|
node['device']))
|
@@ -704,8 +745,8 @@ class ObjectReplicator(Daemon):
|
|
704
745
|
except (Exception, Timeout):
|
705
746
|
failure_devs_info.add((node['replication_ip'],
|
706
747
|
node['device']))
|
707
|
-
self.logger.exception(
|
708
|
-
|
748
|
+
self.logger.exception("Error syncing with node: %s",
|
749
|
+
node_str)
|
709
750
|
stats.suffix_count += len(local_hash)
|
710
751
|
except StopIteration:
|
711
752
|
self.logger.error('Ran out of handoffs while replicating '
|
@@ -713,7 +754,7 @@ class ObjectReplicator(Daemon):
|
|
713
754
|
job['partition'], int(job['policy']))
|
714
755
|
except (Exception, Timeout):
|
715
756
|
failure_devs_info.update(target_devs_info)
|
716
|
-
self.logger.exception(
|
757
|
+
self.logger.exception("Error syncing partition")
|
717
758
|
finally:
|
718
759
|
stats.add_failure_stats(failure_devs_info)
|
719
760
|
stats.success += len(target_devs_info - failure_devs_info)
|
@@ -731,23 +772,23 @@ class ObjectReplicator(Daemon):
|
|
731
772
|
elapsed = (time.time() - self.start) or 0.000001
|
732
773
|
rate = replication_count / elapsed
|
733
774
|
self.logger.info(
|
734
|
-
|
735
|
-
|
736
|
-
|
775
|
+
"%(replicated)d/%(total)d (%(percentage).2f%%)"
|
776
|
+
" partitions replicated in %(time).2fs (%(rate).2f/sec, "
|
777
|
+
"%(remaining)s remaining)",
|
737
778
|
{'replicated': replication_count, 'total': self.job_count,
|
738
779
|
'percentage': replication_count * 100.0 / self.job_count,
|
739
780
|
'time': time.time() - self.start, 'rate': rate,
|
740
781
|
'remaining': '%d%s' % compute_eta(self.start,
|
741
782
|
replication_count,
|
742
783
|
self.job_count)})
|
743
|
-
self.logger.info(
|
744
|
-
|
745
|
-
|
784
|
+
self.logger.info('%(success)s successes, %(failure)s failures',
|
785
|
+
dict(success=stats.success,
|
786
|
+
failure=stats.failure))
|
746
787
|
|
747
788
|
if stats.suffix_count:
|
748
789
|
self.logger.info(
|
749
|
-
|
750
|
-
|
790
|
+
"%(checked)d suffixes checked - "
|
791
|
+
"%(hashed).2f%% hashed, %(synced).2f%% synced",
|
751
792
|
{'checked': stats.suffix_count,
|
752
793
|
'hashed':
|
753
794
|
(stats.suffix_hash * 100.0) / stats.suffix_count,
|
@@ -755,15 +796,15 @@ class ObjectReplicator(Daemon):
|
|
755
796
|
(stats.suffix_sync * 100.0) / stats.suffix_count})
|
756
797
|
self.partition_times.sort()
|
757
798
|
self.logger.info(
|
758
|
-
|
759
|
-
|
799
|
+
"Partition times: max %(max).4fs, "
|
800
|
+
"min %(min).4fs, med %(med).4fs",
|
760
801
|
{'max': self.partition_times[-1],
|
761
802
|
'min': self.partition_times[0],
|
762
803
|
'med': self.partition_times[
|
763
804
|
len(self.partition_times) // 2]})
|
764
805
|
else:
|
765
806
|
self.logger.info(
|
766
|
-
|
807
|
+
"Nothing replicated for %s seconds.",
|
767
808
|
(time.time() - self.start))
|
768
809
|
|
769
810
|
def heartbeat(self):
|
@@ -819,7 +860,7 @@ class ObjectReplicator(Daemon):
|
|
819
860
|
except Exception:
|
820
861
|
self.logger.exception('ERROR creating %s' % obj_path)
|
821
862
|
continue
|
822
|
-
for partition in
|
863
|
+
for partition in listdir(obj_path):
|
823
864
|
if (override_partitions is not None and partition.isdigit()
|
824
865
|
and int(partition) not in override_partitions):
|
825
866
|
continue
|
@@ -879,8 +920,8 @@ class ObjectReplicator(Daemon):
|
|
879
920
|
policies will be returned
|
880
921
|
"""
|
881
922
|
jobs = []
|
882
|
-
ips = whataremyips(self.
|
883
|
-
for policy in
|
923
|
+
ips = whataremyips(self.ring_ip)
|
924
|
+
for policy in self.policies:
|
884
925
|
# Skip replication if next_part_power is set. In this case
|
885
926
|
# every object is hard-linked twice, but the replicator can't
|
886
927
|
# detect them and would create a second copy of the file if not
|
@@ -890,19 +931,18 @@ class ObjectReplicator(Daemon):
|
|
890
931
|
policy.object_ring, 'next_part_power', None)
|
891
932
|
if next_part_power is not None:
|
892
933
|
self.logger.warning(
|
893
|
-
|
934
|
+
"next_part_power set in policy '%s'. Skipping",
|
894
935
|
policy.name)
|
895
936
|
continue
|
896
937
|
|
897
|
-
if
|
898
|
-
|
899
|
-
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
override_partitions=override_partitions)
|
938
|
+
if (override_policies is not None and
|
939
|
+
policy.idx not in override_policies):
|
940
|
+
continue
|
941
|
+
# ensure rings are loaded for policy
|
942
|
+
self.load_object_ring(policy)
|
943
|
+
jobs += self.build_replication_jobs(
|
944
|
+
policy, ips, override_devices=override_devices,
|
945
|
+
override_partitions=override_partitions)
|
906
946
|
random.shuffle(jobs)
|
907
947
|
if self.handoffs_first:
|
908
948
|
# Move the handoff parts to the front of the list
|
@@ -919,7 +959,6 @@ class ObjectReplicator(Daemon):
|
|
919
959
|
self.last_replication_count = 0
|
920
960
|
self.replication_cycle = (self.replication_cycle + 1) % 10
|
921
961
|
self.partition_times = []
|
922
|
-
self.my_replication_ips = self._get_my_replication_ips()
|
923
962
|
self.all_devs_info = set()
|
924
963
|
self.handoffs_remaining = 0
|
925
964
|
|
@@ -951,14 +990,14 @@ class ObjectReplicator(Daemon):
|
|
951
990
|
# in handoffs first mode, we won't process primary
|
952
991
|
# partitions until rebalance was successful!
|
953
992
|
if self.handoffs_remaining:
|
954
|
-
self.logger.warning(
|
993
|
+
self.logger.warning(
|
955
994
|
"Handoffs first mode still has handoffs "
|
956
995
|
"remaining. Aborting current "
|
957
|
-
"replication pass.")
|
996
|
+
"replication pass.")
|
958
997
|
break
|
959
998
|
if not self.check_ring(job['policy'].object_ring):
|
960
|
-
self.logger.info(
|
961
|
-
|
999
|
+
self.logger.info("Ring change detected. Aborting "
|
1000
|
+
"current replication pass.")
|
962
1001
|
return
|
963
1002
|
|
964
1003
|
try:
|
@@ -973,7 +1012,7 @@ class ObjectReplicator(Daemon):
|
|
973
1012
|
except OSError:
|
974
1013
|
continue
|
975
1014
|
if job['delete']:
|
976
|
-
self.run_pool.spawn(self.
|
1015
|
+
self.run_pool.spawn(self.revert, job)
|
977
1016
|
else:
|
978
1017
|
self.run_pool.spawn(self.update, job)
|
979
1018
|
current_nodes = None
|
@@ -988,7 +1027,7 @@ class ObjectReplicator(Daemon):
|
|
988
1027
|
else:
|
989
1028
|
dev_stats.add_failure_stats(self.all_devs_info)
|
990
1029
|
self.logger.exception(
|
991
|
-
|
1030
|
+
"Exception in top-level replication loop: %s", err)
|
992
1031
|
finally:
|
993
1032
|
stats.kill()
|
994
1033
|
self.stats_line()
|
@@ -1055,7 +1094,7 @@ class ObjectReplicator(Daemon):
|
|
1055
1094
|
|
1056
1095
|
rsync_reaper = eventlet.spawn(self._child_process_reaper)
|
1057
1096
|
self._zero_stats()
|
1058
|
-
self.logger.info(
|
1097
|
+
self.logger.info("Running object replicator in script mode.")
|
1059
1098
|
|
1060
1099
|
override_opts = parse_override_options(once=True, **kwargs)
|
1061
1100
|
devices = override_opts.devices or None
|
@@ -1071,7 +1110,7 @@ class ObjectReplicator(Daemon):
|
|
1071
1110
|
end_time = time.time()
|
1072
1111
|
total = (end_time - start_time) / 60
|
1073
1112
|
self.logger.info(
|
1074
|
-
|
1113
|
+
"Object replication complete (once). (%.02f minutes)", total)
|
1075
1114
|
|
1076
1115
|
# If we've been manually run on a subset of
|
1077
1116
|
# policies/devices/partitions, then our recon stats are not
|
@@ -1098,19 +1137,19 @@ class ObjectReplicator(Daemon):
|
|
1098
1137
|
if multiprocess_worker_index is not None:
|
1099
1138
|
self.is_multiprocess_worker = True
|
1100
1139
|
self._emplace_log_prefix(multiprocess_worker_index)
|
1101
|
-
self.logger.info(
|
1140
|
+
self.logger.info("Starting object replicator in daemon mode.")
|
1102
1141
|
eventlet.spawn_n(self._child_process_reaper)
|
1103
1142
|
# Run the replicator continually
|
1104
1143
|
while True:
|
1105
1144
|
self._zero_stats()
|
1106
|
-
self.logger.info(
|
1145
|
+
self.logger.info("Starting object replication pass.")
|
1107
1146
|
# Run the replicator
|
1108
1147
|
start = time.time()
|
1109
1148
|
self.replicate(override_devices=override_devices)
|
1110
1149
|
end = time.time()
|
1111
1150
|
total = (end - start) / 60
|
1112
1151
|
self.logger.info(
|
1113
|
-
|
1152
|
+
"Object replication complete. (%.02f minutes)", total)
|
1114
1153
|
self.update_recon(total, end, override_devices)
|
1115
1154
|
self.logger.debug('Replication sleeping for %s seconds.',
|
1116
1155
|
self.interval)
|
@@ -1120,3 +1159,25 @@ class ObjectReplicator(Daemon):
|
|
1120
1159
|
# This method is called after run_once using multiple workers.
|
1121
1160
|
update = self.aggregate_recon_update()
|
1122
1161
|
dump_recon_cache(update, self.rcache, self.logger)
|
1162
|
+
|
1163
|
+
|
1164
|
+
def main():
|
1165
|
+
parser = OptionParser("%prog CONFIG [options]")
|
1166
|
+
parser.add_option('-d', '--devices',
|
1167
|
+
help='Replicate only given devices. '
|
1168
|
+
'Comma-separated list. '
|
1169
|
+
'Only has effect if --once is used.')
|
1170
|
+
parser.add_option('-p', '--partitions',
|
1171
|
+
help='Replicate only given partitions. '
|
1172
|
+
'Comma-separated list. '
|
1173
|
+
'Only has effect if --once is used.')
|
1174
|
+
parser.add_option('-i', '--policies',
|
1175
|
+
help='Replicate only given policy indices. '
|
1176
|
+
'Comma-separated list. '
|
1177
|
+
'Only has effect if --once is used.')
|
1178
|
+
conf_file, options = parse_options(parser=parser, once=True)
|
1179
|
+
run_daemon(ObjectReplicator, conf_file, **options)
|
1180
|
+
|
1181
|
+
|
1182
|
+
if __name__ == '__main__':
|
1183
|
+
main()
|