swift 2.23.3__py3-none-any.whl → 2.35.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (206) hide show
  1. swift/__init__.py +29 -50
  2. swift/account/auditor.py +21 -118
  3. swift/account/backend.py +33 -28
  4. swift/account/reaper.py +37 -28
  5. swift/account/replicator.py +22 -0
  6. swift/account/server.py +60 -26
  7. swift/account/utils.py +28 -11
  8. swift-2.23.3.data/scripts/swift-account-audit → swift/cli/account_audit.py +23 -13
  9. swift-2.23.3.data/scripts/swift-config → swift/cli/config.py +2 -2
  10. swift/cli/container_deleter.py +5 -11
  11. swift-2.23.3.data/scripts/swift-dispersion-populate → swift/cli/dispersion_populate.py +8 -7
  12. swift/cli/dispersion_report.py +10 -9
  13. swift-2.23.3.data/scripts/swift-drive-audit → swift/cli/drive_audit.py +63 -21
  14. swift/cli/form_signature.py +3 -7
  15. swift-2.23.3.data/scripts/swift-get-nodes → swift/cli/get_nodes.py +8 -2
  16. swift/cli/info.py +154 -14
  17. swift/cli/manage_shard_ranges.py +705 -37
  18. swift-2.23.3.data/scripts/swift-oldies → swift/cli/oldies.py +25 -14
  19. swift-2.23.3.data/scripts/swift-orphans → swift/cli/orphans.py +7 -3
  20. swift/cli/recon.py +196 -67
  21. swift-2.23.3.data/scripts/swift-recon-cron → swift/cli/recon_cron.py +17 -20
  22. swift-2.23.3.data/scripts/swift-reconciler-enqueue → swift/cli/reconciler_enqueue.py +2 -3
  23. swift/cli/relinker.py +807 -126
  24. swift/cli/reload.py +135 -0
  25. swift/cli/ringbuilder.py +217 -20
  26. swift/cli/ringcomposer.py +0 -1
  27. swift/cli/shard-info.py +4 -3
  28. swift/common/base_storage_server.py +9 -20
  29. swift/common/bufferedhttp.py +48 -74
  30. swift/common/constraints.py +20 -15
  31. swift/common/container_sync_realms.py +9 -11
  32. swift/common/daemon.py +25 -8
  33. swift/common/db.py +195 -128
  34. swift/common/db_auditor.py +168 -0
  35. swift/common/db_replicator.py +95 -55
  36. swift/common/digest.py +141 -0
  37. swift/common/direct_client.py +144 -33
  38. swift/common/error_limiter.py +93 -0
  39. swift/common/exceptions.py +25 -1
  40. swift/common/header_key_dict.py +2 -9
  41. swift/common/http_protocol.py +373 -0
  42. swift/common/internal_client.py +129 -59
  43. swift/common/linkat.py +3 -4
  44. swift/common/manager.py +284 -67
  45. swift/common/memcached.py +390 -145
  46. swift/common/middleware/__init__.py +4 -0
  47. swift/common/middleware/account_quotas.py +211 -46
  48. swift/common/middleware/acl.py +3 -8
  49. swift/common/middleware/backend_ratelimit.py +230 -0
  50. swift/common/middleware/bulk.py +22 -34
  51. swift/common/middleware/catch_errors.py +1 -3
  52. swift/common/middleware/cname_lookup.py +6 -11
  53. swift/common/middleware/container_quotas.py +1 -1
  54. swift/common/middleware/container_sync.py +39 -17
  55. swift/common/middleware/copy.py +12 -0
  56. swift/common/middleware/crossdomain.py +22 -9
  57. swift/common/middleware/crypto/__init__.py +2 -1
  58. swift/common/middleware/crypto/crypto_utils.py +11 -15
  59. swift/common/middleware/crypto/decrypter.py +28 -11
  60. swift/common/middleware/crypto/encrypter.py +12 -17
  61. swift/common/middleware/crypto/keymaster.py +8 -15
  62. swift/common/middleware/crypto/kms_keymaster.py +2 -1
  63. swift/common/middleware/dlo.py +15 -11
  64. swift/common/middleware/domain_remap.py +5 -4
  65. swift/common/middleware/etag_quoter.py +128 -0
  66. swift/common/middleware/formpost.py +73 -70
  67. swift/common/middleware/gatekeeper.py +8 -1
  68. swift/common/middleware/keystoneauth.py +33 -3
  69. swift/common/middleware/list_endpoints.py +4 -4
  70. swift/common/middleware/listing_formats.py +85 -49
  71. swift/common/middleware/memcache.py +4 -95
  72. swift/common/middleware/name_check.py +3 -2
  73. swift/common/middleware/proxy_logging.py +160 -92
  74. swift/common/middleware/ratelimit.py +17 -10
  75. swift/common/middleware/read_only.py +6 -4
  76. swift/common/middleware/recon.py +59 -22
  77. swift/common/middleware/s3api/acl_handlers.py +25 -3
  78. swift/common/middleware/s3api/acl_utils.py +6 -1
  79. swift/common/middleware/s3api/controllers/__init__.py +6 -0
  80. swift/common/middleware/s3api/controllers/acl.py +3 -2
  81. swift/common/middleware/s3api/controllers/bucket.py +242 -137
  82. swift/common/middleware/s3api/controllers/logging.py +2 -2
  83. swift/common/middleware/s3api/controllers/multi_delete.py +43 -20
  84. swift/common/middleware/s3api/controllers/multi_upload.py +219 -133
  85. swift/common/middleware/s3api/controllers/obj.py +112 -8
  86. swift/common/middleware/s3api/controllers/object_lock.py +44 -0
  87. swift/common/middleware/s3api/controllers/s3_acl.py +2 -2
  88. swift/common/middleware/s3api/controllers/tagging.py +57 -0
  89. swift/common/middleware/s3api/controllers/versioning.py +36 -7
  90. swift/common/middleware/s3api/etree.py +22 -9
  91. swift/common/middleware/s3api/exception.py +0 -4
  92. swift/common/middleware/s3api/s3api.py +113 -41
  93. swift/common/middleware/s3api/s3request.py +384 -218
  94. swift/common/middleware/s3api/s3response.py +126 -23
  95. swift/common/middleware/s3api/s3token.py +16 -17
  96. swift/common/middleware/s3api/schema/delete.rng +1 -1
  97. swift/common/middleware/s3api/subresource.py +7 -10
  98. swift/common/middleware/s3api/utils.py +27 -10
  99. swift/common/middleware/slo.py +665 -358
  100. swift/common/middleware/staticweb.py +64 -37
  101. swift/common/middleware/symlink.py +51 -18
  102. swift/common/middleware/tempauth.py +76 -58
  103. swift/common/middleware/tempurl.py +191 -173
  104. swift/common/middleware/versioned_writes/__init__.py +51 -0
  105. swift/common/middleware/{versioned_writes.py → versioned_writes/legacy.py} +27 -26
  106. swift/common/middleware/versioned_writes/object_versioning.py +1482 -0
  107. swift/common/middleware/x_profile/exceptions.py +1 -4
  108. swift/common/middleware/x_profile/html_viewer.py +18 -19
  109. swift/common/middleware/x_profile/profile_model.py +1 -2
  110. swift/common/middleware/xprofile.py +10 -10
  111. swift-2.23.3.data/scripts/swift-container-server → swift/common/recon.py +13 -8
  112. swift/common/registry.py +147 -0
  113. swift/common/request_helpers.py +324 -57
  114. swift/common/ring/builder.py +67 -25
  115. swift/common/ring/composite_builder.py +1 -1
  116. swift/common/ring/ring.py +177 -51
  117. swift/common/ring/utils.py +1 -1
  118. swift/common/splice.py +10 -6
  119. swift/common/statsd_client.py +205 -0
  120. swift/common/storage_policy.py +49 -44
  121. swift/common/swob.py +86 -102
  122. swift/common/{utils.py → utils/__init__.py} +2163 -2772
  123. swift/common/utils/base.py +131 -0
  124. swift/common/utils/config.py +433 -0
  125. swift/common/utils/ipaddrs.py +256 -0
  126. swift/common/utils/libc.py +345 -0
  127. swift/common/utils/logs.py +859 -0
  128. swift/common/utils/timestamp.py +412 -0
  129. swift/common/wsgi.py +553 -535
  130. swift/container/auditor.py +14 -100
  131. swift/container/backend.py +490 -231
  132. swift/container/reconciler.py +126 -37
  133. swift/container/replicator.py +96 -22
  134. swift/container/server.py +358 -165
  135. swift/container/sharder.py +1540 -684
  136. swift/container/sync.py +94 -88
  137. swift/container/updater.py +53 -32
  138. swift/obj/auditor.py +153 -35
  139. swift/obj/diskfile.py +466 -217
  140. swift/obj/expirer.py +406 -124
  141. swift/obj/mem_diskfile.py +7 -4
  142. swift/obj/mem_server.py +1 -0
  143. swift/obj/reconstructor.py +523 -262
  144. swift/obj/replicator.py +249 -188
  145. swift/obj/server.py +207 -122
  146. swift/obj/ssync_receiver.py +145 -85
  147. swift/obj/ssync_sender.py +113 -54
  148. swift/obj/updater.py +652 -139
  149. swift/obj/watchers/__init__.py +0 -0
  150. swift/obj/watchers/dark_data.py +213 -0
  151. swift/proxy/controllers/account.py +11 -11
  152. swift/proxy/controllers/base.py +848 -604
  153. swift/proxy/controllers/container.py +433 -92
  154. swift/proxy/controllers/info.py +3 -2
  155. swift/proxy/controllers/obj.py +1000 -489
  156. swift/proxy/server.py +185 -112
  157. {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/AUTHORS +58 -11
  158. {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/METADATA +51 -56
  159. swift-2.35.0.dist-info/RECORD +201 -0
  160. {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/WHEEL +1 -1
  161. {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/entry_points.txt +43 -0
  162. swift-2.35.0.dist-info/pbr.json +1 -0
  163. swift/locale/de/LC_MESSAGES/swift.po +0 -1216
  164. swift/locale/en_GB/LC_MESSAGES/swift.po +0 -1207
  165. swift/locale/es/LC_MESSAGES/swift.po +0 -1085
  166. swift/locale/fr/LC_MESSAGES/swift.po +0 -909
  167. swift/locale/it/LC_MESSAGES/swift.po +0 -894
  168. swift/locale/ja/LC_MESSAGES/swift.po +0 -965
  169. swift/locale/ko_KR/LC_MESSAGES/swift.po +0 -964
  170. swift/locale/pt_BR/LC_MESSAGES/swift.po +0 -881
  171. swift/locale/ru/LC_MESSAGES/swift.po +0 -891
  172. swift/locale/tr_TR/LC_MESSAGES/swift.po +0 -832
  173. swift/locale/zh_CN/LC_MESSAGES/swift.po +0 -833
  174. swift/locale/zh_TW/LC_MESSAGES/swift.po +0 -838
  175. swift-2.23.3.data/scripts/swift-account-auditor +0 -23
  176. swift-2.23.3.data/scripts/swift-account-info +0 -51
  177. swift-2.23.3.data/scripts/swift-account-reaper +0 -23
  178. swift-2.23.3.data/scripts/swift-account-replicator +0 -34
  179. swift-2.23.3.data/scripts/swift-account-server +0 -23
  180. swift-2.23.3.data/scripts/swift-container-auditor +0 -23
  181. swift-2.23.3.data/scripts/swift-container-info +0 -55
  182. swift-2.23.3.data/scripts/swift-container-reconciler +0 -21
  183. swift-2.23.3.data/scripts/swift-container-replicator +0 -34
  184. swift-2.23.3.data/scripts/swift-container-sharder +0 -37
  185. swift-2.23.3.data/scripts/swift-container-sync +0 -23
  186. swift-2.23.3.data/scripts/swift-container-updater +0 -23
  187. swift-2.23.3.data/scripts/swift-dispersion-report +0 -24
  188. swift-2.23.3.data/scripts/swift-form-signature +0 -20
  189. swift-2.23.3.data/scripts/swift-init +0 -119
  190. swift-2.23.3.data/scripts/swift-object-auditor +0 -29
  191. swift-2.23.3.data/scripts/swift-object-expirer +0 -33
  192. swift-2.23.3.data/scripts/swift-object-info +0 -60
  193. swift-2.23.3.data/scripts/swift-object-reconstructor +0 -33
  194. swift-2.23.3.data/scripts/swift-object-relinker +0 -41
  195. swift-2.23.3.data/scripts/swift-object-replicator +0 -37
  196. swift-2.23.3.data/scripts/swift-object-server +0 -27
  197. swift-2.23.3.data/scripts/swift-object-updater +0 -23
  198. swift-2.23.3.data/scripts/swift-proxy-server +0 -23
  199. swift-2.23.3.data/scripts/swift-recon +0 -24
  200. swift-2.23.3.data/scripts/swift-ring-builder +0 -24
  201. swift-2.23.3.data/scripts/swift-ring-builder-analyzer +0 -22
  202. swift-2.23.3.data/scripts/swift-ring-composer +0 -22
  203. swift-2.23.3.dist-info/RECORD +0 -220
  204. swift-2.23.3.dist-info/pbr.json +0 -1
  205. {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/LICENSE +0 -0
  206. {swift-2.23.3.dist-info → swift-2.35.0.dist-info}/top_level.txt +0 -0
swift/cli/relinker.py CHANGED
@@ -14,146 +14,827 @@
14
14
  # limitations under the License.
15
15
 
16
16
 
17
+ import argparse
18
+ import datetime
19
+ import errno
20
+ import fcntl
21
+ import json
17
22
  import logging
18
23
  import os
24
+ import time
25
+ from collections import defaultdict
26
+
27
+ from eventlet import hubs
28
+
29
+ from swift.common.exceptions import LockTimeout
19
30
  from swift.common.storage_policy import POLICIES
20
- from swift.common.exceptions import DiskFileDeleted, DiskFileNotExist, \
21
- DiskFileQuarantined
22
- from swift.common.utils import replace_partition_in_path, \
23
- audit_location_generator, get_logger
31
+ from swift.common.utils import replace_partition_in_path, config_true_value, \
32
+ audit_location_generator, get_logger, readconf, drop_privileges, \
33
+ RateLimitedIterator, distribute_evenly, get_prefixed_logger, \
34
+ non_negative_float, non_negative_int, config_auto_int_value, \
35
+ dump_recon_cache, get_partition_from_path, get_hub
36
+ from swift.common.utils.logs import SwiftLogAdapter
24
37
  from swift.obj import diskfile
38
+ from swift.common.recon import RECON_RELINKER_FILE, DEFAULT_RECON_CACHE_PATH
25
39
 
26
40
 
27
- def relink(swift_dir='/etc/swift',
28
- devices='/srv/node',
29
- skip_mount_check=False,
30
- logger=logging.getLogger()):
31
- mount_check = not skip_mount_check
32
- run = False
33
- relinked = errors = 0
34
- for policy in POLICIES:
35
- policy.object_ring = None # Ensure it will be reloaded
36
- policy.load_ring(swift_dir)
37
- part_power = policy.object_ring.part_power
38
- next_part_power = policy.object_ring.next_part_power
39
- if not next_part_power or next_part_power == part_power:
40
- continue
41
- logging.info('Relinking files for policy %s under %s',
42
- policy.name, devices)
43
- run = True
44
- locations = audit_location_generator(
45
- devices,
46
- diskfile.get_data_dir(policy),
47
- mount_check=mount_check)
48
- for fname, _, _ in locations:
49
- newfname = replace_partition_in_path(fname, next_part_power)
50
- try:
51
- diskfile.relink_paths(fname, newfname, check_existing=True)
52
- relinked += 1
53
- except OSError as exc:
54
- errors += 1
55
- logger.warning("Relinking %s to %s failed: %s",
56
- fname, newfname, exc)
57
-
58
- if not run:
59
- logger.warning("No policy found to increase the partition power.")
60
- return 2
61
- logging.info('Relinked %d diskfiles (%d errors)', relinked, errors)
62
- if errors > 0:
63
- return 1
64
- return 0
65
-
66
-
67
- def cleanup(swift_dir='/etc/swift',
68
- devices='/srv/node',
69
- skip_mount_check=False,
70
- logger=logging.getLogger()):
71
- mount_check = not skip_mount_check
72
- conf = {'devices': devices, 'mount_check': mount_check}
73
- diskfile_router = diskfile.DiskFileRouter(conf, get_logger(conf))
74
- errors = cleaned_up = 0
75
- run = False
76
- for policy in POLICIES:
77
- policy.object_ring = None # Ensure it will be reloaded
78
- policy.load_ring(swift_dir)
79
- part_power = policy.object_ring.part_power
80
- next_part_power = policy.object_ring.next_part_power
81
- if not next_part_power or next_part_power != part_power:
82
- continue
83
- logging.info('Cleaning up files for policy %s under %s',
84
- policy.name, devices)
85
- run = True
86
- locations = audit_location_generator(
87
- devices,
88
- diskfile.get_data_dir(policy),
89
- mount_check=mount_check)
90
- for fname, device, partition in locations:
91
- expected_fname = replace_partition_in_path(fname, part_power)
92
- if fname == expected_fname:
93
- continue
94
- # Make sure there is a valid object file in the expected new
95
- # location. Note that this could be newer than the original one
96
- # (which happens if there is another PUT after partition power
97
- # has been increased, but cleanup did not yet run)
98
- loc = diskfile.AuditLocation(
99
- os.path.dirname(expected_fname), device, partition, policy)
100
- diskfile_mgr = diskfile_router[policy]
101
- df = diskfile_mgr.get_diskfile_from_audit_location(loc)
41
+ LOCK_FILE = '.relink.{datadir}.lock'
42
+ STATE_FILE = 'relink.{datadir}.json'
43
+ STATE_TMP_FILE = '.relink.{datadir}.json.tmp'
44
+ STEP_RELINK = 'relink'
45
+ STEP_CLEANUP = 'cleanup'
46
+ EXIT_SUCCESS = 0
47
+ EXIT_NO_APPLICABLE_POLICY = 2
48
+ EXIT_ERROR = 1
49
+ DEFAULT_STATS_INTERVAL = 300.0
50
+
51
+
52
+ def recursive_defaultdict():
53
+ return defaultdict(recursive_defaultdict)
54
+
55
+
56
+ def policy(policy_name_or_index):
57
+ value = POLICIES.get_by_name_or_index(policy_name_or_index)
58
+ if value is None:
59
+ raise ValueError
60
+ return value
61
+
62
+
63
+ def _aggregate_stats(base_stats, update_stats):
64
+ for key, value in update_stats.items():
65
+ base_stats.setdefault(key, 0)
66
+ base_stats[key] += value
67
+
68
+ return base_stats
69
+
70
+
71
+ def _aggregate_recon_stats(base_stats, updated_stats):
72
+ for k, v in updated_stats.items():
73
+ if k == 'stats':
74
+ base_stats['stats'] = _aggregate_stats(base_stats['stats'], v)
75
+ elif k == "start_time":
76
+ base_stats[k] = min(base_stats.get(k, v), v)
77
+ elif k in ("timestamp", "total_time"):
78
+ base_stats[k] = max(base_stats.get(k, 0), v)
79
+ elif k in ('parts_done', 'total_parts'):
80
+ base_stats[k] += v
81
+
82
+ return base_stats
83
+
84
+
85
+ def _zero_stats():
86
+ return {
87
+ 'hash_dirs': 0,
88
+ 'files': 0,
89
+ 'linked': 0,
90
+ 'removed': 0,
91
+ 'errors': 0}
92
+
93
+
94
+ def _zero_collated_stats():
95
+ return {
96
+ 'parts_done': 0,
97
+ 'total_parts': 0,
98
+ 'total_time': 0,
99
+ 'stats': _zero_stats()}
100
+
101
+
102
+ class Relinker(object):
103
+ def __init__(self, conf, logger, device_list=None, do_cleanup=False):
104
+ self.conf = conf
105
+ self.recon_cache = os.path.join(self.conf['recon_cache_path'],
106
+ RECON_RELINKER_FILE)
107
+ self.logger = logger
108
+ self.device_list = device_list or []
109
+ self.do_cleanup = do_cleanup
110
+ self.root = self.conf['devices']
111
+ if len(self.device_list) == 1:
112
+ self.root = os.path.join(self.root, list(self.device_list)[0])
113
+ self.part_power = self.next_part_power = None
114
+ self.diskfile_mgr = None
115
+ self.dev_lock = None
116
+ self._last_recon_update = time.time()
117
+ self.stats_interval = float(conf.get(
118
+ 'stats_interval', DEFAULT_STATS_INTERVAL))
119
+ self.diskfile_router = diskfile.DiskFileRouter(self.conf, self.logger)
120
+ self.stats = _zero_stats()
121
+ self.devices_data = recursive_defaultdict()
122
+ self.policy_count = 0
123
+ self.pid = os.getpid()
124
+ self.linked_into_partitions = set()
125
+
126
+ def _aggregate_dev_policy_stats(self):
127
+ for dev_data in self.devices_data.values():
128
+ dev_data.update(_zero_collated_stats())
129
+ for policy_data in dev_data.get('policies', {}).values():
130
+ _aggregate_recon_stats(dev_data, policy_data)
131
+
132
+ def _update_recon(self, device=None, force_dump=False):
133
+ if not force_dump and self._last_recon_update + self.stats_interval \
134
+ > time.time():
135
+ # not time yet!
136
+ return
137
+ if device:
138
+ # dump recon stats for the device
139
+ num_parts_done = sum(
140
+ 1 for part_done in self.states["state"].values()
141
+ if part_done)
142
+ num_total_parts = len(self.states["state"])
143
+ step = STEP_CLEANUP if self.do_cleanup else STEP_RELINK
144
+ policy_dev_progress = {'step': step,
145
+ 'parts_done': num_parts_done,
146
+ 'total_parts': num_total_parts,
147
+ 'timestamp': time.time()}
148
+ self.devices_data[device]['policies'][self.policy.idx].update(
149
+ policy_dev_progress)
150
+
151
+ # aggregate device policy level values into device level
152
+ self._aggregate_dev_policy_stats()
153
+
154
+ # We want to periodically update the worker recon timestamp so we know
155
+ # it's still running
156
+ recon_data = self._update_worker_stats(recon_dump=False)
157
+
158
+ recon_data.update({'devices': self.devices_data})
159
+ if device:
160
+ self.logger.debug("Updating recon for %s", device)
161
+ else:
162
+ self.logger.debug("Updating recon")
163
+ self._last_recon_update = time.time()
164
+ dump_recon_cache(recon_data, self.recon_cache, self.logger)
165
+
166
+ @property
167
+ def total_errors(self):
168
+ # first make sure the policy data is aggregated down to the device
169
+ # level
170
+ self._aggregate_dev_policy_stats()
171
+ return sum([sum([
172
+ dev.get('stats', {}).get('errors', 0),
173
+ dev.get('stats', {}).get('unmounted', 0),
174
+ dev.get('stats', {}).get('unlistable_partitions', 0)])
175
+ for dev in self.devices_data.values()])
176
+
177
+ def devices_filter(self, _, devices):
178
+ if self.device_list:
179
+ devices = [d for d in devices if d in self.device_list]
180
+
181
+ return set(devices)
182
+
183
+ def hook_pre_device(self, device_path):
184
+ lock_file = os.path.join(device_path,
185
+ LOCK_FILE.format(datadir=self.datadir))
186
+
187
+ fd = os.open(lock_file, os.O_CREAT | os.O_WRONLY)
188
+ fcntl.flock(fd, fcntl.LOCK_EX)
189
+ self.dev_lock = fd
190
+
191
+ state_file = os.path.join(device_path,
192
+ STATE_FILE.format(datadir=self.datadir))
193
+ self.states["state"].clear()
194
+ try:
195
+ with open(state_file, 'rt') as f:
196
+ state_from_disk = json.load(f)
197
+ if state_from_disk["next_part_power"] != \
198
+ self.states["next_part_power"]:
199
+ raise ValueError
200
+ on_disk_part_power = state_from_disk["part_power"]
201
+ if on_disk_part_power != self.states["part_power"]:
202
+ self.states["prev_part_power"] = on_disk_part_power
203
+ raise ValueError
204
+ self.states["state"].update(state_from_disk["state"])
205
+ except (ValueError, TypeError, KeyError):
206
+ # Bad state file: remove the file to restart from scratch
207
+ os.unlink(state_file)
208
+ except IOError as err:
209
+ # Ignore file not found error
210
+ if err.errno != errno.ENOENT:
211
+ raise
212
+
213
+ # initialise the device in recon.
214
+ device = os.path.basename(device_path)
215
+ self.devices_data[device]['policies'][self.policy.idx] = {
216
+ 'start_time': time.time(), 'stats': _zero_stats(),
217
+ 'part_power': self.states["part_power"],
218
+ 'next_part_power': self.states["next_part_power"]}
219
+ self.stats = \
220
+ self.devices_data[device]['policies'][self.policy.idx]['stats']
221
+ self._update_recon(device)
222
+
223
+ def hook_post_device(self, device_path):
224
+ os.close(self.dev_lock)
225
+ self.dev_lock = None
226
+ device = os.path.basename(device_path)
227
+ pol_stats = self.devices_data[device]['policies'][self.policy.idx]
228
+ total_time = time.time() - pol_stats['start_time']
229
+ pol_stats.update({'total_time': total_time, 'stats': self.stats})
230
+ self._update_recon(device, force_dump=True)
231
+
232
+ def partitions_filter(self, datadir_path, partitions):
233
+ # Remove all non partitions first (eg: auditor_status_ALL.json)
234
+ partitions = [p for p in partitions if p.isdigit()]
235
+
236
+ relinking = (self.part_power != self.next_part_power)
237
+ if relinking:
238
+ # All partitions in the upper half are new partitions and there is
239
+ # nothing to relink there
240
+ partitions = [part for part in partitions
241
+ if int(part) < 2 ** self.part_power]
242
+ elif "prev_part_power" in self.states:
243
+ # All partitions in the upper half are new partitions and there is
244
+ # nothing to clean up there
245
+ partitions = [part for part in partitions
246
+ if int(part) < 2 ** self.states["prev_part_power"]]
247
+
248
+ # Format: { 'part': processed }
249
+ if self.states["state"]:
250
+ missing = list(set(partitions) - set(self.states["state"].keys()))
251
+ if missing:
252
+ # All missing partitions were created after the first run of
253
+ # the relinker with this part_power/next_part_power pair. This
254
+ # is expected when relinking, where new partitions appear that
255
+ # are appropriate for the target part power. In such cases,
256
+ # there's nothing to be done. Err on the side of caution
257
+ # during cleanup, however.
258
+ for part in missing:
259
+ self.states["state"][part] = relinking
260
+ partitions = [
261
+ str(part) for part, processed in self.states["state"].items()
262
+ if not processed]
263
+ else:
264
+ self.states["state"].update({
265
+ str(part): False for part in partitions})
266
+
267
+ # Always scan the partitions in reverse order to minimize the amount
268
+ # of IO (it actually only matters for relink, not for cleanup).
269
+ #
270
+ # Initial situation:
271
+ # objects/0/000/00000000...00000000/12345.data
272
+ # -> relinked to objects/1/000/10000000...00000000/12345.data
273
+ #
274
+ # If the relinker then scan partition 1, it will listdir that object
275
+ # while it's unnecessary. By working in reverse order of partitions,
276
+ # this is avoided.
277
+ partitions = sorted(partitions, key=int, reverse=True)
278
+
279
+ # do this last so that self.states, and thus the state file, has been
280
+ # initiated with *all* partitions before partitions are restricted for
281
+ # this particular run...
282
+ conf_partitions = self.conf.get('partitions')
283
+ if conf_partitions:
284
+ partitions = [p for p in partitions if int(p) in conf_partitions]
285
+
286
+ return partitions
287
+
288
+ def hook_pre_partition(self, partition_path):
289
+ self.pre_partition_errors = self.total_errors
290
+ self.linked_into_partitions = set()
291
+
292
+ def hook_post_partition(self, partition_path):
293
+ datadir_path, partition = os.path.split(
294
+ os.path.abspath(partition_path))
295
+ device_path, datadir_name = os.path.split(datadir_path)
296
+ device = os.path.basename(device_path)
297
+ state_tmp_file = os.path.join(
298
+ device_path, STATE_TMP_FILE.format(datadir=datadir_name))
299
+ state_file = os.path.join(
300
+ device_path, STATE_FILE.format(datadir=datadir_name))
301
+
302
+ # We started with a partition space like
303
+ # |0 N|
304
+ # |ABCDEFGHIJKLMNOP|
305
+ #
306
+ # After relinking, it will be more like
307
+ # |0 2N|
308
+ # |AABBCCDDEEFFGGHHIIJJKKLLMMNNOOPP|
309
+ #
310
+ # We want to hold off on rehashing until after cleanup, since that is
311
+ # the point at which we've finished with filesystem manipulations. But
312
+ # there's a slight complication: we know the upper half has nothing to
313
+ # clean up, so the cleanup phase only looks at
314
+ # |0 2N|
315
+ # |AABBCCDDEEFFGGHH |
316
+ #
317
+ # To ensure that the upper half gets rehashed, too, do it as part of
318
+ # relinking; as we finish
319
+ # |0 N|
320
+ # | IJKLMNOP|
321
+ # shift to the new partition space and rehash
322
+ # |0 2N|
323
+ # | IIJJKKLLMMNNOOPP|
324
+ for dirty_partition in self.linked_into_partitions:
325
+ if self.do_cleanup or \
326
+ dirty_partition >= 2 ** self.states['part_power']:
327
+ self.diskfile_mgr.get_hashes(
328
+ device, dirty_partition, [], self.policy)
329
+
330
+ if self.do_cleanup:
102
331
  try:
103
- with df.open():
332
+ hashes = self.diskfile_mgr.get_hashes(
333
+ device, int(partition), [], self.policy)
334
+ except LockTimeout:
335
+ hashes = 1 # truthy, but invalid
336
+ # In any reasonably-large cluster, we'd expect all old
337
+ # partitions P to be empty after cleanup (i.e., it's unlikely
338
+ # that there's another partition Q := P//2 that also has data
339
+ # on this device).
340
+ #
341
+ # Try to clean up empty partitions now, so operators can use
342
+ # existing rebalance-complete metrics to monitor relinking
343
+ # progress (provided there are few/no handoffs when relinking
344
+ # starts and little data is written to handoffs during the
345
+ # increase).
346
+ if not hashes:
347
+ try:
348
+ with self.diskfile_mgr.replication_lock(
349
+ device, self.policy, partition), \
350
+ self.diskfile_mgr.partition_lock(
351
+ device, self.policy, partition):
352
+ # Order here is somewhat important for crash-tolerance
353
+ for f in ('hashes.pkl', 'hashes.invalid', '.lock',
354
+ '.lock-replication'):
355
+ try:
356
+ os.unlink(os.path.join(partition_path, f))
357
+ except OSError as e:
358
+ if e.errno != errno.ENOENT:
359
+ raise
360
+ # Note that as soon as we've deleted the lock files, some
361
+ # other process could come along and make new ones -- so
362
+ # this may well complain that the directory is not empty
363
+ os.rmdir(partition_path)
364
+ except (OSError, LockTimeout):
365
+ # Most likely, some data landed in here or we hit an error
366
+ # above. Let the replicator deal with things; it was worth
367
+ # a shot.
104
368
  pass
105
- except DiskFileQuarantined as exc:
106
- logger.warning('ERROR Object %(obj)s failed audit and was'
107
- ' quarantined: %(err)r',
108
- {'obj': loc, 'err': exc})
109
- errors += 1
110
- continue
111
- except DiskFileDeleted:
112
- pass
113
- except DiskFileNotExist as exc:
114
- err = False
115
- if policy.policy_type == 'erasure_coding':
116
- # Might be a non-durable fragment - check that there is
117
- # a fragment in the new path. Will be fixed by the
118
- # reconstructor then
119
- if not os.path.isfile(expected_fname):
120
- err = True
369
+
370
+ # If there were no errors, mark this partition as done. This is handy
371
+ # in case the process is interrupted and needs to resume, or there
372
+ # were errors and the relinker needs to run again.
373
+ if self.pre_partition_errors == self.total_errors:
374
+ self.states["state"][partition] = True
375
+ with open(state_tmp_file, 'wt') as f:
376
+ json.dump(self.states, f)
377
+ os.fsync(f.fileno())
378
+ os.rename(state_tmp_file, state_file)
379
+ num_parts_done = sum(
380
+ 1 for part in self.states["state"].values()
381
+ if part)
382
+ step = STEP_CLEANUP if self.do_cleanup else STEP_RELINK
383
+ num_total_parts = len(self.states["state"])
384
+ self.logger.info(
385
+ "Step: %s Device: %s Policy: %s Partitions: %d/%d",
386
+ step, device, self.policy.name, num_parts_done, num_total_parts)
387
+ self._update_recon(device)
388
+
389
+ def hashes_filter(self, suff_path, hashes):
390
+ hashes = list(hashes)
391
+ for hsh in hashes:
392
+ fname = os.path.join(suff_path, hsh)
393
+ if fname == replace_partition_in_path(
394
+ self.conf['devices'], fname, self.next_part_power):
395
+ hashes.remove(hsh)
396
+ return hashes
397
+
398
+ def process_location(self, hash_path, new_hash_path):
399
+ # Compare the contents of each hash dir with contents of same hash
400
+ # dir in its new partition to verify that the new location has the
401
+ # most up to date set of files. The new location may have newer
402
+ # files if it has been updated since relinked.
403
+ self.stats['hash_dirs'] += 1
404
+
405
+ # Get on disk data for new and old locations, cleaning up any
406
+ # reclaimable or obsolete files in each. The new location is
407
+ # cleaned up *before* the old location to prevent false negatives
408
+ # where the old still has a file that has been cleaned up in the
409
+ # new; cleaning up the new location first ensures that the old will
410
+ # always be 'cleaner' than the new.
411
+ new_df_data = self.diskfile_mgr.cleanup_ondisk_files(new_hash_path)
412
+ old_df_data = self.diskfile_mgr.cleanup_ondisk_files(hash_path)
413
+ # Now determine the most up to date set of on disk files would be
414
+ # given the content of old and new locations...
415
+ new_files = set(new_df_data['files'])
416
+ old_files = set(old_df_data['files'])
417
+ union_files = new_files.union(old_files)
418
+ union_data = self.diskfile_mgr.get_ondisk_files(
419
+ union_files, '', verify=False)
420
+ obsolete_files = set(info['filename']
421
+ for info in union_data.get('obsolete', []))
422
+ # drop 'obsolete' files but retain 'unexpected' files which might
423
+ # be misplaced diskfiles from another policy
424
+ required_files = union_files.difference(obsolete_files)
425
+ required_links = required_files.intersection(old_files)
426
+
427
+ missing_links = 0
428
+ created_links = 0
429
+ unwanted_files = []
430
+ for filename in required_links:
431
+ # Before removing old files, be sure that the corresponding
432
+ # required new files exist by calling relink_paths again. There
433
+ # are several possible outcomes:
434
+ # - The common case is that the new file exists, in which case
435
+ # relink_paths checks that the new file has the same inode
436
+ # as the old file. An exception is raised if the inode of
437
+ # the new file is not the same as the old file.
438
+ # - The new file may not exist because the relinker failed to
439
+ # create the link to the old file and has erroneously moved
440
+ # on to cleanup. In this case the relink_paths will create
441
+ # the link now or raise an exception if that fails.
442
+ # - The new file may not exist because some other process,
443
+ # such as an object server handling a request, has cleaned
444
+ # it up since we called cleanup_ondisk_files(new_hash_path).
445
+ # In this case a new link will be created to the old file.
446
+ # This is unnecessary but simpler than repeating the
447
+ # evaluation of what links are now required and safer than
448
+ # assuming that a non-existent file that *was* required is
449
+ # no longer required. The new file will eventually be
450
+ # cleaned up again.
451
+ self.stats['files'] += 1
452
+ old_file = os.path.join(hash_path, filename)
453
+ new_file = os.path.join(new_hash_path, filename)
454
+ try:
455
+ if diskfile.relink_paths(old_file, new_file):
456
+ self.logger.debug(
457
+ "Relinking%s created link: %s to %s",
458
+ ' (cleanup)' if self.do_cleanup else '',
459
+ old_file, new_file)
460
+ created_links += 1
461
+ self.stats['linked'] += 1
462
+ except OSError as exc:
463
+ if exc.errno == errno.EEXIST and filename.endswith('.ts'):
464
+ # special case for duplicate tombstones, see:
465
+ # https://bugs.launchpad.net/swift/+bug/1921718
466
+ # https://bugs.launchpad.net/swift/+bug/1934142
467
+ self.logger.debug(
468
+ "Relinking%s: tolerating different inodes for "
469
+ "tombstone with same timestamp: %s to %s",
470
+ ' (cleanup)' if self.do_cleanup else '',
471
+ old_file, new_file)
121
472
  else:
122
- err = True
123
- if err:
124
- logger.warning(
125
- 'Error cleaning up %s: %r', fname, exc)
126
- errors += 1
127
- continue
473
+ self.logger.warning(
474
+ "Error relinking%s: failed to relink %s to %s: %s",
475
+ ' (cleanup)' if self.do_cleanup else '',
476
+ old_file, new_file, exc)
477
+ self.stats['errors'] += 1
478
+ missing_links += 1
479
+ if created_links:
480
+ self.linked_into_partitions.add(get_partition_from_path(
481
+ self.conf['devices'], new_hash_path))
482
+ try:
483
+ diskfile.invalidate_hash(os.path.dirname(new_hash_path))
484
+ except (Exception, LockTimeout) as exc:
485
+ # at this point, the link's created. even if we counted it as
486
+ # an error, a subsequent run wouldn't find any work to do. so,
487
+ # don't bother; instead, wait for replication to be re-enabled
488
+ # so post-replication rehashing or periodic rehashing can
489
+ # eventually pick up the change
490
+ self.logger.warning(
491
+ 'Error invalidating suffix for %s: %r',
492
+ new_hash_path, exc)
493
+
494
+ if self.do_cleanup and not missing_links:
495
+ # use the sorted list to help unit testing
496
+ unwanted_files = old_df_data['files']
497
+
498
+ # the new partition hash dir has the most up to date set of on
499
+ # disk files so it is safe to delete the old location...
500
+ rehash = False
501
+ for filename in unwanted_files:
502
+ old_file = os.path.join(hash_path, filename)
128
503
  try:
129
- os.remove(fname)
130
- cleaned_up += 1
131
- logging.debug("Removed %s", fname)
504
+ os.remove(old_file)
132
505
  except OSError as exc:
133
- logger.warning('Error cleaning up %s: %r', fname, exc)
134
- errors += 1
506
+ self.logger.warning('Error cleaning up %s: %r', old_file, exc)
507
+ self.stats['errors'] += 1
508
+ else:
509
+ rehash = True
510
+ self.stats['removed'] += 1
511
+ self.logger.debug("Removed %s", old_file)
512
+
513
+ if rehash:
514
+ # Even though we're invalidating the suffix, don't update
515
+ # self.linked_into_partitions -- we only care about them for
516
+ # relinking into the new part-power space
517
+ try:
518
+ diskfile.invalidate_hash(os.path.dirname(hash_path))
519
+ except (Exception, LockTimeout) as exc:
520
+ # note: not counted as an error
521
+ self.logger.warning(
522
+ 'Error invalidating suffix for %s: %r',
523
+ hash_path, exc)
524
+
525
+ def place_policy_stat(self, dev, policy, stat, value):
526
+ stats = self.devices_data[dev]['policies'][policy.idx].setdefault(
527
+ "stats", _zero_stats())
528
+ stats[stat] = stats.get(stat, 0) + value
529
+
530
+ def process_policy(self, policy):
531
+ self.logger.info(
532
+ 'Processing files for policy %s under %s (cleanup=%s)',
533
+ policy.name, self.root, self.do_cleanup)
534
+ self.part_power = policy.object_ring.part_power
535
+ self.next_part_power = policy.object_ring.next_part_power
536
+ self.diskfile_mgr = self.diskfile_router[policy]
537
+ self.datadir = diskfile.get_data_dir(policy)
538
+ self.states = {
539
+ "part_power": self.part_power,
540
+ "next_part_power": self.next_part_power,
541
+ "state": {},
542
+ }
543
+ audit_stats = {}
544
+
545
+ locations = audit_location_generator(
546
+ self.conf['devices'],
547
+ self.datadir,
548
+ mount_check=self.conf['mount_check'],
549
+ devices_filter=self.devices_filter,
550
+ hook_pre_device=self.hook_pre_device,
551
+ hook_post_device=self.hook_post_device,
552
+ partitions_filter=self.partitions_filter,
553
+ hook_pre_partition=self.hook_pre_partition,
554
+ hook_post_partition=self.hook_post_partition,
555
+ hashes_filter=self.hashes_filter,
556
+ logger=self.logger,
557
+ error_counter=audit_stats,
558
+ yield_hash_dirs=True
559
+ )
560
+ if self.conf['files_per_second'] > 0:
561
+ locations = RateLimitedIterator(
562
+ locations, self.conf['files_per_second'])
563
+ for hash_path, device, partition in locations:
564
+ # note, in cleanup step next_part_power == part_power
565
+ new_hash_path = replace_partition_in_path(
566
+ self.conf['devices'], hash_path, self.next_part_power)
567
+ if new_hash_path == hash_path:
568
+ continue
569
+ self.process_location(hash_path, new_hash_path)
570
+
571
+ # any unmounted devices don't trigger the pre_device trigger.
572
+ # so we'll deal with them here.
573
+ for dev in audit_stats.get('unmounted', []):
574
+ self.place_policy_stat(dev, policy, 'unmounted', 1)
575
+
576
+ # Further unlistable_partitions doesn't trigger the post_device, so
577
+ # we also need to deal with them here.
578
+ for datadir in audit_stats.get('unlistable_partitions', []):
579
+ device_path, _ = os.path.split(datadir)
580
+ device = os.path.basename(device_path)
581
+ self.place_policy_stat(device, policy, 'unlistable_partitions', 1)
582
+
583
+ def _update_worker_stats(self, recon_dump=True, return_code=None):
584
+ worker_stats = {'devices': self.device_list,
585
+ 'timestamp': time.time(),
586
+ 'return_code': return_code}
587
+ worker_data = {"workers": {str(self.pid): worker_stats}}
588
+ if recon_dump:
589
+ dump_recon_cache(worker_data, self.recon_cache, self.logger)
590
+ return worker_data
591
+
592
+ def run(self):
593
+ num_policies = 0
594
+ self._update_worker_stats()
595
+ for policy in self.conf['policies']:
596
+ self.policy = policy
597
+ policy.object_ring = None # Ensure it will be reloaded
598
+ policy.load_ring(self.conf['swift_dir'])
599
+ ring = policy.object_ring
600
+ if not ring.next_part_power:
601
+ continue
602
+ part_power_increased = ring.next_part_power == ring.part_power
603
+ if self.do_cleanup != part_power_increased:
604
+ continue
605
+
606
+ num_policies += 1
607
+ self.process_policy(policy)
608
+
609
+ # Some stat collation happens during _update_recon and we want to force
610
+ # this to happen at the end of the run
611
+ self._update_recon(force_dump=True)
612
+ if not num_policies:
613
+ self.logger.warning(
614
+ "No policy found to increase the partition power.")
615
+ self._update_worker_stats(return_code=EXIT_NO_APPLICABLE_POLICY)
616
+ return EXIT_NO_APPLICABLE_POLICY
617
+
618
+ if self.total_errors > 0:
619
+ log_method = self.logger.warning
620
+ # NB: audit_location_generator logs unmounted disks as warnings,
621
+ # but we want to treat them as errors
622
+ status = EXIT_ERROR
623
+ else:
624
+ log_method = self.logger.info
625
+ status = EXIT_SUCCESS
626
+
627
+ stats = _zero_stats()
628
+ for dev_stats in self.devices_data.values():
629
+ stats = _aggregate_stats(stats, dev_stats.get('stats', {}))
630
+ hash_dirs = stats.pop('hash_dirs')
631
+ files = stats.pop('files')
632
+ linked = stats.pop('linked')
633
+ removed = stats.pop('removed')
634
+ action_errors = stats.pop('errors')
635
+ unmounted = stats.pop('unmounted', 0)
636
+ if unmounted:
637
+ self.logger.warning('%d disks were unmounted', unmounted)
638
+ listdir_errors = stats.pop('unlistable_partitions', 0)
639
+ if listdir_errors:
640
+ self.logger.warning(
641
+ 'There were %d errors listing partition directories',
642
+ listdir_errors)
643
+ if stats:
644
+ self.logger.warning(
645
+ 'There were unexpected errors while enumerating disk '
646
+ 'files: %r', stats)
647
+
648
+ log_method(
649
+ '%d hash dirs processed (cleanup=%s) (%d files, %d linked, '
650
+ '%d removed, %d errors)', hash_dirs, self.do_cleanup, files,
651
+ linked, removed, action_errors + listdir_errors)
652
+
653
+ self._update_worker_stats(return_code=status)
654
+ return status
655
+
656
+
657
+ def _reset_recon(recon_cache, logger):
658
+ device_progress_recon = {'devices': {}, 'workers': {}}
659
+ dump_recon_cache(device_progress_recon, recon_cache, logger)
660
+
661
+
662
+ def parallel_process(do_cleanup, conf, logger, device_list=None):
663
+ """
664
+ Fork Relinker workers based on config and wait for them to finish.
665
+
666
+ :param do_cleanup: boolean, if workers should perform cleanup step
667
+ :param conf: dict, config options
668
+ :param logger: SwiftLogAdapter instance
669
+ :kwarg device_list: list of strings, optionally limit to specific devices
670
+
671
+ :returns: int, exit code; zero on success
672
+ """
673
+
674
+ # initialise recon dump for collection
675
+ # Lets start by always deleting last run's stats
676
+ recon_cache = os.path.join(conf['recon_cache_path'], RECON_RELINKER_FILE)
677
+ _reset_recon(recon_cache, logger)
678
+
679
+ device_list = sorted(set(device_list or os.listdir(conf['devices'])))
680
+ workers = conf['workers']
681
+ if workers == 'auto':
682
+ workers = len(device_list)
683
+ else:
684
+ workers = min(workers, len(device_list))
685
+
686
+ start = time.time()
687
+ logger.info('Starting relinker (cleanup=%s) using %d workers: %s' %
688
+ (do_cleanup, workers,
689
+ time.strftime('%X %x %Z', time.gmtime(start))))
690
+ if workers == 0 or len(device_list) in (0, 1):
691
+ ret = Relinker(
692
+ conf, logger, device_list, do_cleanup=do_cleanup).run()
693
+ logger.info('Finished relinker (cleanup=%s): %s (%s elapsed)' %
694
+ (do_cleanup, time.strftime('%X %x %Z', time.gmtime()),
695
+ datetime.timedelta(seconds=time.time() - start)))
696
+ return ret
697
+
698
+ children = {}
699
+ for worker_devs in distribute_evenly(device_list, workers):
700
+ pid = os.fork()
701
+ if pid == 0:
702
+ logger = get_prefixed_logger(logger, '[pid=%s, devs=%s] ' % (
703
+ os.getpid(), ','.join(worker_devs)))
704
+ os._exit(Relinker(
705
+ conf, logger, worker_devs, do_cleanup=do_cleanup).run())
706
+ else:
707
+ children[pid] = worker_devs
708
+
709
+ final_status = EXIT_SUCCESS
710
+ final_messages = []
711
+ while children:
712
+ pid, status = os.wait()
713
+ sig = status & 0xff
714
+ status = status >> 8
715
+ time_delta = time.time() - start
716
+ devs = children.pop(pid, ['unknown device'])
717
+ worker_desc = '(pid=%s, devs=%s)' % (pid, ','.join(devs))
718
+ if sig != 0:
719
+ final_status = EXIT_ERROR
720
+ final_messages.append(
721
+ 'Worker %s exited in %.1fs after receiving signal: %s'
722
+ % (worker_desc, time_delta, sig))
723
+ continue
724
+
725
+ if status == EXIT_SUCCESS:
726
+ continue
727
+
728
+ if status == EXIT_NO_APPLICABLE_POLICY:
729
+ if final_status == EXIT_SUCCESS:
730
+ final_status = status
731
+ continue
732
+
733
+ final_status = EXIT_ERROR
734
+ if status == EXIT_ERROR:
735
+ final_messages.append(
736
+ 'Worker %s completed in %.1fs with errors'
737
+ % (worker_desc, time_delta))
738
+ else:
739
+ final_messages.append(
740
+ 'Worker %s exited in %.1fs with unexpected status %s'
741
+ % (worker_desc, time_delta, status))
742
+
743
+ for msg in final_messages:
744
+ logger.warning(msg)
745
+ logger.info('Finished relinker (cleanup=%s): %s (%s elapsed)' %
746
+ (do_cleanup, time.strftime('%X %x %Z', time.gmtime()),
747
+ datetime.timedelta(seconds=time.time() - start)))
748
+ return final_status
135
749
 
136
- if not run:
137
- logger.warning("No policy found to increase the partition power.")
138
- return 2
139
- logging.info('Cleaned up %d diskfiles (%d errors)', cleaned_up, errors)
140
- if errors > 0:
141
- return 1
142
- return 0
143
750
 
751
+ def auto_or_int(value):
752
+ return config_auto_int_value(value, default='auto')
144
753
 
145
- def main(args):
146
- logging.basicConfig(
147
- format='%(message)s',
148
- level=logging.DEBUG if args.debug else logging.INFO,
149
- filename=args.logfile)
150
754
 
151
- logger = logging.getLogger()
755
+ def main(args=None):
756
+ parser = argparse.ArgumentParser(
757
+ description='Relink and cleanup objects to increase partition power')
758
+ parser.add_argument('action', choices=['relink', 'cleanup'])
759
+ parser.add_argument('conf_file', nargs='?', help=(
760
+ 'Path to config file with [object-relinker] section'))
761
+ parser.add_argument('--swift-dir', default=None,
762
+ dest='swift_dir', help='Path to swift directory')
763
+ parser.add_argument(
764
+ '--policy', default=[], dest='policies',
765
+ action='append', type=policy,
766
+ help='Policy to relink; may specify multiple (default: all)')
767
+ parser.add_argument('--devices', default=None,
768
+ dest='devices', help='Path to swift device directory')
769
+ parser.add_argument('--user', default=None, dest='user',
770
+ help='Drop privileges to this user before relinking')
771
+ parser.add_argument('--device',
772
+ default=[], dest='device_list', action='append',
773
+ help='Device name to relink (default: all)')
774
+ parser.add_argument('--partition', '-p', default=[], dest='partitions',
775
+ type=non_negative_int, action='append',
776
+ help='Partition to relink (default: all)')
777
+ parser.add_argument('--skip-mount-check', default=False,
778
+ help='Don\'t test if disk is mounted',
779
+ action="store_true", dest='skip_mount_check')
780
+ parser.add_argument('--files-per-second', default=None,
781
+ type=non_negative_float, dest='files_per_second',
782
+ help='Used to limit I/O. Zero implies no limit '
783
+ '(default: no limit).')
784
+ parser.add_argument('--stats-interval', default=None,
785
+ type=non_negative_float, dest='stats_interval',
786
+ help='Emit stats to recon roughly every N seconds. '
787
+ '(default: %d).' % DEFAULT_STATS_INTERVAL)
788
+ parser.add_argument(
789
+ '--workers', default=None, type=auto_or_int, help=(
790
+ 'Process devices across N workers '
791
+ '(default: one worker per device)'))
792
+ parser.add_argument('--logfile', default=None, dest='logfile',
793
+ help='Set log file name. Ignored if using conf_file.')
794
+ parser.add_argument('--debug', default=False, action='store_true',
795
+ help='Enable debug mode')
152
796
 
153
- if args.action == 'relink':
154
- return relink(
155
- args.swift_dir, args.devices, args.skip_mount_check, logger)
797
+ args = parser.parse_args(args)
798
+ hubs.use_hub(get_hub())
799
+ if args.conf_file:
800
+ conf = readconf(args.conf_file, 'object-relinker')
801
+ if args.debug:
802
+ conf['log_level'] = 'DEBUG'
803
+ user = args.user or conf.get('user')
804
+ if user:
805
+ drop_privileges(user)
806
+ logger = get_logger(conf)
807
+ else:
808
+ level = 'DEBUG' if args.debug else 'INFO'
809
+ conf = {'log_level': level}
810
+ if args.user:
811
+ # Drop privs before creating log file
812
+ drop_privileges(args.user)
813
+ conf['user'] = args.user
814
+ logging.basicConfig(
815
+ format='%(message)s',
816
+ level=getattr(logging, level),
817
+ filename=args.logfile)
818
+ logger = SwiftLogAdapter(logging.getLogger(), server='relinker')
156
819
 
157
- if args.action == 'cleanup':
158
- return cleanup(
159
- args.swift_dir, args.devices, args.skip_mount_check, logger)
820
+ conf.update({
821
+ 'swift_dir': args.swift_dir or conf.get('swift_dir', '/etc/swift'),
822
+ 'devices': args.devices or conf.get('devices', '/srv/node'),
823
+ 'mount_check': (config_true_value(conf.get('mount_check', 'true'))
824
+ and not args.skip_mount_check),
825
+ 'files_per_second': (
826
+ args.files_per_second if args.files_per_second is not None
827
+ else non_negative_float(conf.get('files_per_second', '0'))),
828
+ 'policies': set(args.policies) or POLICIES,
829
+ 'partitions': set(args.partitions),
830
+ 'workers': config_auto_int_value(
831
+ conf.get('workers') if args.workers is None else args.workers,
832
+ 'auto'),
833
+ 'recon_cache_path': conf.get('recon_cache_path',
834
+ DEFAULT_RECON_CACHE_PATH),
835
+ 'stats_interval': non_negative_float(
836
+ args.stats_interval or conf.get('stats_interval',
837
+ DEFAULT_STATS_INTERVAL)),
838
+ })
839
+ return parallel_process(
840
+ args.action == 'cleanup', conf, logger, args.device_list)