teuthology 1.0.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/describe.py +1 -0
- scripts/dispatcher.py +62 -0
- scripts/exporter.py +18 -0
- scripts/lock.py +1 -1
- scripts/node_cleanup.py +58 -0
- scripts/openstack.py +9 -9
- scripts/results.py +12 -11
- scripts/run.py +4 -0
- scripts/schedule.py +4 -0
- scripts/suite.py +61 -16
- scripts/supervisor.py +44 -0
- scripts/update_inventory.py +10 -4
- scripts/wait.py +31 -0
- teuthology/__init__.py +24 -21
- teuthology/beanstalk.py +4 -3
- teuthology/config.py +17 -6
- teuthology/contextutil.py +18 -14
- teuthology/describe_tests.py +25 -18
- teuthology/dispatcher/__init__.py +365 -0
- teuthology/dispatcher/supervisor.py +374 -0
- teuthology/exceptions.py +54 -0
- teuthology/exporter.py +347 -0
- teuthology/kill.py +76 -75
- teuthology/lock/cli.py +16 -7
- teuthology/lock/ops.py +276 -70
- teuthology/lock/query.py +61 -44
- teuthology/ls.py +9 -18
- teuthology/misc.py +152 -137
- teuthology/nuke/__init__.py +12 -351
- teuthology/openstack/__init__.py +4 -3
- teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
- teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
- teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
- teuthology/openstack/openstack-teuthology.cron +0 -1
- teuthology/orchestra/cluster.py +51 -9
- teuthology/orchestra/connection.py +23 -16
- teuthology/orchestra/console.py +111 -50
- teuthology/orchestra/daemon/cephadmunit.py +23 -5
- teuthology/orchestra/daemon/state.py +10 -3
- teuthology/orchestra/daemon/systemd.py +10 -8
- teuthology/orchestra/opsys.py +32 -11
- teuthology/orchestra/remote.py +369 -152
- teuthology/orchestra/run.py +21 -12
- teuthology/packaging.py +54 -15
- teuthology/provision/__init__.py +30 -10
- teuthology/provision/cloud/openstack.py +12 -6
- teuthology/provision/cloud/util.py +1 -2
- teuthology/provision/downburst.py +83 -29
- teuthology/provision/fog.py +68 -20
- teuthology/provision/openstack.py +5 -4
- teuthology/provision/pelagos.py +13 -5
- teuthology/repo_utils.py +91 -44
- teuthology/report.py +57 -35
- teuthology/results.py +5 -3
- teuthology/run.py +21 -15
- teuthology/run_tasks.py +114 -40
- teuthology/schedule.py +4 -3
- teuthology/scrape.py +28 -22
- teuthology/suite/__init__.py +75 -46
- teuthology/suite/build_matrix.py +34 -24
- teuthology/suite/fragment-merge.lua +105 -0
- teuthology/suite/matrix.py +31 -2
- teuthology/suite/merge.py +175 -0
- teuthology/suite/placeholder.py +8 -8
- teuthology/suite/run.py +204 -102
- teuthology/suite/util.py +67 -211
- teuthology/task/__init__.py +1 -1
- teuthology/task/ansible.py +101 -31
- teuthology/task/buildpackages.py +2 -2
- teuthology/task/ceph_ansible.py +13 -6
- teuthology/task/cephmetrics.py +2 -1
- teuthology/task/clock.py +33 -14
- teuthology/task/exec.py +18 -0
- teuthology/task/hadoop.py +2 -2
- teuthology/task/install/__init__.py +51 -22
- teuthology/task/install/bin/adjust-ulimits +16 -0
- teuthology/task/install/bin/daemon-helper +114 -0
- teuthology/task/install/bin/stdin-killer +263 -0
- teuthology/task/install/deb.py +24 -4
- teuthology/task/install/redhat.py +36 -32
- teuthology/task/install/rpm.py +41 -14
- teuthology/task/install/util.py +48 -22
- teuthology/task/internal/__init__.py +69 -11
- teuthology/task/internal/edit_sudoers.sh +10 -0
- teuthology/task/internal/lock_machines.py +3 -133
- teuthology/task/internal/redhat.py +48 -28
- teuthology/task/internal/syslog.py +31 -8
- teuthology/task/kernel.py +155 -147
- teuthology/task/lockfile.py +1 -1
- teuthology/task/mpi.py +10 -10
- teuthology/task/pcp.py +1 -1
- teuthology/task/selinux.py +17 -8
- teuthology/task/ssh_keys.py +6 -6
- teuthology/task/tests/__init__.py +137 -77
- teuthology/task/tests/test_fetch_coredumps.py +116 -0
- teuthology/task/tests/test_run.py +4 -4
- teuthology/timer.py +3 -3
- teuthology/util/loggerfile.py +19 -0
- teuthology/util/scanner.py +159 -0
- teuthology/util/sentry.py +52 -0
- teuthology/util/time.py +52 -0
- teuthology-1.2.0.data/scripts/adjust-ulimits +16 -0
- teuthology-1.2.0.data/scripts/daemon-helper +114 -0
- teuthology-1.2.0.data/scripts/stdin-killer +263 -0
- teuthology-1.2.0.dist-info/METADATA +89 -0
- teuthology-1.2.0.dist-info/RECORD +174 -0
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/WHEEL +1 -1
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/entry_points.txt +5 -2
- scripts/nuke.py +0 -45
- scripts/worker.py +0 -37
- teuthology/nuke/actions.py +0 -456
- teuthology/openstack/test/__init__.py +0 -0
- teuthology/openstack/test/openstack-integration.py +0 -286
- teuthology/openstack/test/test_config.py +0 -35
- teuthology/openstack/test/test_openstack.py +0 -1695
- teuthology/orchestra/test/__init__.py +0 -0
- teuthology/orchestra/test/integration/__init__.py +0 -0
- teuthology/orchestra/test/integration/test_integration.py +0 -94
- teuthology/orchestra/test/test_cluster.py +0 -240
- teuthology/orchestra/test/test_connection.py +0 -106
- teuthology/orchestra/test/test_console.py +0 -217
- teuthology/orchestra/test/test_opsys.py +0 -404
- teuthology/orchestra/test/test_remote.py +0 -185
- teuthology/orchestra/test/test_run.py +0 -286
- teuthology/orchestra/test/test_systemd.py +0 -54
- teuthology/orchestra/test/util.py +0 -12
- teuthology/sentry.py +0 -18
- teuthology/test/__init__.py +0 -0
- teuthology/test/fake_archive.py +0 -107
- teuthology/test/fake_fs.py +0 -92
- teuthology/test/integration/__init__.py +0 -0
- teuthology/test/integration/test_suite.py +0 -86
- teuthology/test/task/__init__.py +0 -205
- teuthology/test/task/test_ansible.py +0 -624
- teuthology/test/task/test_ceph_ansible.py +0 -176
- teuthology/test/task/test_console_log.py +0 -88
- teuthology/test/task/test_install.py +0 -337
- teuthology/test/task/test_internal.py +0 -57
- teuthology/test/task/test_kernel.py +0 -243
- teuthology/test/task/test_pcp.py +0 -379
- teuthology/test/task/test_selinux.py +0 -35
- teuthology/test/test_config.py +0 -189
- teuthology/test/test_contextutil.py +0 -68
- teuthology/test/test_describe_tests.py +0 -316
- teuthology/test/test_email_sleep_before_teardown.py +0 -81
- teuthology/test/test_exit.py +0 -97
- teuthology/test/test_get_distro.py +0 -47
- teuthology/test/test_get_distro_version.py +0 -47
- teuthology/test/test_get_multi_machine_types.py +0 -27
- teuthology/test/test_job_status.py +0 -60
- teuthology/test/test_ls.py +0 -48
- teuthology/test/test_misc.py +0 -368
- teuthology/test/test_nuke.py +0 -232
- teuthology/test/test_packaging.py +0 -763
- teuthology/test/test_parallel.py +0 -28
- teuthology/test/test_repo_utils.py +0 -204
- teuthology/test/test_report.py +0 -77
- teuthology/test/test_results.py +0 -155
- teuthology/test/test_run.py +0 -238
- teuthology/test/test_safepath.py +0 -55
- teuthology/test/test_schedule.py +0 -45
- teuthology/test/test_scrape.py +0 -167
- teuthology/test/test_timer.py +0 -80
- teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
- teuthology/test/test_worker.py +0 -303
- teuthology/worker.py +0 -339
- teuthology-1.0.0.dist-info/METADATA +0 -76
- teuthology-1.0.0.dist-info/RECORD +0 -210
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/LICENSE +0 -0
- {teuthology-1.0.0.dist-info → teuthology-1.2.0.dist-info}/top_level.txt +0 -0
teuthology/nuke/actions.py
DELETED
@@ -1,456 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import time
|
3
|
-
|
4
|
-
from teuthology.misc import get_testdir, reconnect
|
5
|
-
from teuthology.orchestra import run
|
6
|
-
from teuthology.orchestra.remote import Remote
|
7
|
-
from teuthology.task import install as install_task
|
8
|
-
|
9
|
-
|
10
|
-
log = logging.getLogger(__name__)
|
11
|
-
|
12
|
-
|
13
|
-
def clear_firewall(ctx):
|
14
|
-
"""
|
15
|
-
Remove any iptables rules created by teuthology. These rules are
|
16
|
-
identified by containing a comment with 'teuthology' in it. Non-teuthology
|
17
|
-
firewall rules are unaffected.
|
18
|
-
"""
|
19
|
-
log.info("Clearing teuthology firewall rules...")
|
20
|
-
ctx.cluster.run(
|
21
|
-
args=[
|
22
|
-
"sudo", "sh", "-c",
|
23
|
-
"iptables-save | grep -v teuthology | iptables-restore"
|
24
|
-
],
|
25
|
-
)
|
26
|
-
log.info("Cleared teuthology firewall rules.")
|
27
|
-
|
28
|
-
|
29
|
-
def shutdown_daemons(ctx):
|
30
|
-
log.info('Unmounting ceph-fuse and killing daemons...')
|
31
|
-
ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
|
32
|
-
'sudo', 'service', 'ceph', 'stop', run.Raw('||'),
|
33
|
-
'sudo', 'systemctl', 'stop', 'ceph.target'],
|
34
|
-
check_status=False, timeout=180)
|
35
|
-
ctx.cluster.run(
|
36
|
-
args=[
|
37
|
-
'if', 'grep', '-q', 'ceph-fuse', '/etc/mtab', run.Raw(';'),
|
38
|
-
'then',
|
39
|
-
'grep', 'ceph-fuse', '/etc/mtab', run.Raw('|'),
|
40
|
-
'grep', '-o', " /.* fuse", run.Raw('|'),
|
41
|
-
'grep', '-o', "/.* ", run.Raw('|'),
|
42
|
-
'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'),
|
43
|
-
'fi',
|
44
|
-
run.Raw(';'),
|
45
|
-
'if', 'grep', '-q', 'rbd-fuse', '/etc/mtab', run.Raw(';'),
|
46
|
-
'then',
|
47
|
-
'grep', 'rbd-fuse', '/etc/mtab', run.Raw('|'),
|
48
|
-
'grep', '-o', " /.* fuse", run.Raw('|'),
|
49
|
-
'grep', '-o', "/.* ", run.Raw('|'),
|
50
|
-
'xargs', '-n', '1', 'sudo', 'fusermount', '-u', run.Raw(';'),
|
51
|
-
'fi',
|
52
|
-
run.Raw(';'),
|
53
|
-
'sudo',
|
54
|
-
'killall',
|
55
|
-
'--quiet',
|
56
|
-
'ceph-mon',
|
57
|
-
'ceph-osd',
|
58
|
-
'ceph-mds',
|
59
|
-
'ceph-mgr',
|
60
|
-
'ceph-fuse',
|
61
|
-
'ceph-disk',
|
62
|
-
'radosgw',
|
63
|
-
'ceph_test_rados',
|
64
|
-
'rados',
|
65
|
-
'rbd-fuse',
|
66
|
-
'apache2',
|
67
|
-
run.Raw('||'),
|
68
|
-
'true', # ignore errors from ceph binaries not being found
|
69
|
-
],
|
70
|
-
timeout=120,
|
71
|
-
)
|
72
|
-
log.info('All daemons killed.')
|
73
|
-
|
74
|
-
|
75
|
-
def kill_hadoop(ctx):
|
76
|
-
log.info("Terminating Hadoop services...")
|
77
|
-
ctx.cluster.run(args=[
|
78
|
-
"pkill", "-f", "-KILL", "java.*hadoop",
|
79
|
-
],
|
80
|
-
check_status=False,
|
81
|
-
timeout=60
|
82
|
-
)
|
83
|
-
|
84
|
-
|
85
|
-
def kill_valgrind(ctx):
|
86
|
-
# http://tracker.ceph.com/issues/17084
|
87
|
-
ctx.cluster.run(
|
88
|
-
args=['sudo', 'pkill', '-f', '-9', 'valgrind.bin'],
|
89
|
-
check_status=False,
|
90
|
-
timeout=20,
|
91
|
-
)
|
92
|
-
|
93
|
-
|
94
|
-
def remove_osd_mounts(ctx):
|
95
|
-
"""
|
96
|
-
unmount any osd data mounts (scratch disks)
|
97
|
-
"""
|
98
|
-
log.info('Unmount any osd data directories...')
|
99
|
-
ctx.cluster.run(
|
100
|
-
args=[
|
101
|
-
'grep',
|
102
|
-
'/var/lib/ceph/osd/',
|
103
|
-
'/etc/mtab',
|
104
|
-
run.Raw('|'),
|
105
|
-
'awk', '{print $2}', run.Raw('|'),
|
106
|
-
'xargs', '-r',
|
107
|
-
'sudo', 'umount', '-l', run.Raw(';'),
|
108
|
-
'true'
|
109
|
-
],
|
110
|
-
timeout=120
|
111
|
-
)
|
112
|
-
|
113
|
-
|
114
|
-
def remove_osd_tmpfs(ctx):
|
115
|
-
"""
|
116
|
-
unmount tmpfs mounts
|
117
|
-
"""
|
118
|
-
log.info('Unmount any osd tmpfs dirs...')
|
119
|
-
ctx.cluster.run(
|
120
|
-
args=[
|
121
|
-
'egrep', 'tmpfs\s+/mnt', '/etc/mtab', run.Raw('|'),
|
122
|
-
'awk', '{print $2}', run.Raw('|'),
|
123
|
-
'xargs', '-r',
|
124
|
-
'sudo', 'umount', run.Raw(';'),
|
125
|
-
'true'
|
126
|
-
],
|
127
|
-
timeout=120
|
128
|
-
)
|
129
|
-
|
130
|
-
|
131
|
-
def stale_kernel_mount(remote):
|
132
|
-
proc = remote.run(
|
133
|
-
args=[
|
134
|
-
'sudo', 'find',
|
135
|
-
'/sys/kernel/debug/ceph',
|
136
|
-
'-mindepth', '1',
|
137
|
-
'-type', 'd',
|
138
|
-
run.Raw('|'),
|
139
|
-
'read'
|
140
|
-
],
|
141
|
-
check_status=False
|
142
|
-
)
|
143
|
-
return proc.exitstatus == 0
|
144
|
-
|
145
|
-
|
146
|
-
def reboot(ctx, remotes):
|
147
|
-
for remote in remotes:
|
148
|
-
if stale_kernel_mount(remote):
|
149
|
-
log.warn('Stale kernel mount on %s!', remote.name)
|
150
|
-
log.info('force/no-sync rebooting %s', remote.name)
|
151
|
-
# -n is ignored in systemd versions through v229, which means this
|
152
|
-
# only works on trusty -- on 7.3 (v219) and xenial (v229) reboot -n
|
153
|
-
# still calls sync().
|
154
|
-
# args = ['sync', run.Raw('&'),
|
155
|
-
# 'sleep', '5', run.Raw(';'),
|
156
|
-
# 'sudo', 'reboot', '-f', '-n']
|
157
|
-
args = ['for', 'sysrq', 'in', 's', 'u', 'b', run.Raw(';'),
|
158
|
-
'do', 'echo', run.Raw('$sysrq'), run.Raw('|'),
|
159
|
-
'sudo', 'tee', '/proc/sysrq-trigger', run.Raw(';'),
|
160
|
-
'done']
|
161
|
-
else:
|
162
|
-
log.info('rebooting %s', remote.name)
|
163
|
-
args = ['sudo', 'reboot']
|
164
|
-
try:
|
165
|
-
remote.run(args=args, wait=False)
|
166
|
-
except Exception:
|
167
|
-
log.exception('ignoring exception during reboot command')
|
168
|
-
# we just ignore these procs because reboot -f doesn't actually
|
169
|
-
# send anything back to the ssh client!
|
170
|
-
if remotes:
|
171
|
-
log.info('waiting for nodes to reboot')
|
172
|
-
time.sleep(8) # if we try and reconnect too quickly, it succeeds!
|
173
|
-
reconnect(ctx, 480) # allow 8 minutes for the reboots
|
174
|
-
|
175
|
-
|
176
|
-
def reset_syslog_dir(ctx):
|
177
|
-
log.info('Resetting syslog output locations...')
|
178
|
-
nodes = {}
|
179
|
-
for remote in ctx.cluster.remotes.keys():
|
180
|
-
proc = remote.run(
|
181
|
-
args=[
|
182
|
-
'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf',
|
183
|
-
run.Raw(';'),
|
184
|
-
'then',
|
185
|
-
'sudo', 'rm', '-f', '--', '/etc/rsyslog.d/80-cephtest.conf',
|
186
|
-
run.Raw('&&'),
|
187
|
-
'sudo', 'service', 'rsyslog', 'restart',
|
188
|
-
run.Raw(';'),
|
189
|
-
'fi',
|
190
|
-
run.Raw(';'),
|
191
|
-
],
|
192
|
-
timeout=60,
|
193
|
-
)
|
194
|
-
nodes[remote.name] = proc
|
195
|
-
|
196
|
-
for name, proc in nodes.items():
|
197
|
-
log.info('Waiting for %s to restart syslog...', name)
|
198
|
-
proc.wait()
|
199
|
-
|
200
|
-
|
201
|
-
def dpkg_configure(ctx):
|
202
|
-
for remote in ctx.cluster.remotes.keys():
|
203
|
-
if remote.os.package_type != 'deb':
|
204
|
-
continue
|
205
|
-
log.info(
|
206
|
-
'Waiting for dpkg --configure -a and apt-get -f install...')
|
207
|
-
remote.run(
|
208
|
-
args=[
|
209
|
-
'sudo', 'dpkg', '--configure', '-a',
|
210
|
-
run.Raw(';'),
|
211
|
-
'sudo', 'DEBIAN_FRONTEND=noninteractive',
|
212
|
-
'apt-get', '-y', '--force-yes', '-f', 'install',
|
213
|
-
run.Raw('||'),
|
214
|
-
':',
|
215
|
-
],
|
216
|
-
timeout=180,
|
217
|
-
check_status=False,
|
218
|
-
)
|
219
|
-
|
220
|
-
|
221
|
-
def remove_yum_timedhosts(ctx):
|
222
|
-
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1233329
|
223
|
-
log.info("Removing yum timedhosts files...")
|
224
|
-
for remote in ctx.cluster.remotes.keys():
|
225
|
-
if remote.os.package_type != 'rpm':
|
226
|
-
continue
|
227
|
-
remote.run(
|
228
|
-
args="sudo find /var/cache/yum -name 'timedhosts' -exec rm {} \;",
|
229
|
-
check_status=False, timeout=180
|
230
|
-
)
|
231
|
-
|
232
|
-
|
233
|
-
def remove_ceph_packages(ctx):
|
234
|
-
"""
|
235
|
-
remove ceph and ceph dependent packages by force
|
236
|
-
force is needed since the node's repo might have changed and
|
237
|
-
in many cases autocorrect will not work due to missing packages
|
238
|
-
due to repo changes
|
239
|
-
"""
|
240
|
-
log.info("Force remove ceph packages")
|
241
|
-
ceph_packages_to_remove = ['ceph-common', 'ceph-mon', 'ceph-osd',
|
242
|
-
'libcephfs1', 'libcephfs2',
|
243
|
-
'librados2', 'librgw2', 'librbd1', 'python-rgw',
|
244
|
-
'ceph-selinux', 'python-cephfs', 'ceph-base',
|
245
|
-
'python-rbd', 'python-rados', 'ceph-mds',
|
246
|
-
'ceph-mgr', 'libcephfs-java', 'libcephfs-jni',
|
247
|
-
'ceph-deploy', 'libapache2-mod-fastcgi'
|
248
|
-
]
|
249
|
-
pkgs = str.join(' ', ceph_packages_to_remove)
|
250
|
-
for remote in ctx.cluster.remotes.keys():
|
251
|
-
if remote.os.package_type == 'rpm':
|
252
|
-
log.info("Remove any broken repos")
|
253
|
-
dist_release = remote.os.name
|
254
|
-
remote.run(
|
255
|
-
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*ceph*")],
|
256
|
-
check_status=False
|
257
|
-
)
|
258
|
-
remote.run(
|
259
|
-
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*fcgi*")],
|
260
|
-
check_status=False,
|
261
|
-
)
|
262
|
-
remote.run(
|
263
|
-
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*samba*")],
|
264
|
-
check_status=False,
|
265
|
-
)
|
266
|
-
remote.run(
|
267
|
-
args=['sudo', 'rm', run.Raw("/etc/yum.repos.d/*nfs-ganesha*")],
|
268
|
-
check_status=False,
|
269
|
-
)
|
270
|
-
remote.run(
|
271
|
-
args=['sudo', 'rpm', '--rebuilddb']
|
272
|
-
)
|
273
|
-
if dist_release in ['opensuse', 'sle']:
|
274
|
-
remote.sh('sudo zypper clean')
|
275
|
-
log.info('Remove any ceph packages')
|
276
|
-
remote.sh('sudo zypper remove --non-interactive',
|
277
|
-
check_status=False
|
278
|
-
)
|
279
|
-
else:
|
280
|
-
remote.sh('sudo yum clean all')
|
281
|
-
log.info('Remove any ceph packages')
|
282
|
-
remote.sh('sudo yum remove -y', check_status=False)
|
283
|
-
else:
|
284
|
-
log.info("Remove any broken repos")
|
285
|
-
remote.run(
|
286
|
-
args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*ceph*")],
|
287
|
-
check_status=False,
|
288
|
-
)
|
289
|
-
remote.run(
|
290
|
-
args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*samba*")],
|
291
|
-
check_status=False,
|
292
|
-
)
|
293
|
-
remote.run(
|
294
|
-
args=['sudo', 'rm', run.Raw("/etc/apt/sources.list.d/*nfs-ganesha*")],
|
295
|
-
check_status=False,
|
296
|
-
)
|
297
|
-
log.info("Autoclean")
|
298
|
-
remote.run(
|
299
|
-
args=['sudo', 'apt-get', 'autoclean'],
|
300
|
-
check_status=False,
|
301
|
-
)
|
302
|
-
log.info('Remove any ceph packages')
|
303
|
-
remote.run(
|
304
|
-
args=[
|
305
|
-
'sudo', 'dpkg', '--remove', '--force-remove-reinstreq',
|
306
|
-
run.Raw(pkgs)
|
307
|
-
],
|
308
|
-
check_status=False
|
309
|
-
)
|
310
|
-
log.info("Autoclean")
|
311
|
-
remote.run(
|
312
|
-
args=['sudo', 'apt-get', 'autoclean']
|
313
|
-
)
|
314
|
-
|
315
|
-
|
316
|
-
def remove_installed_packages(ctx):
|
317
|
-
dpkg_configure(ctx)
|
318
|
-
conf = dict(
|
319
|
-
project='ceph',
|
320
|
-
debuginfo='true',
|
321
|
-
)
|
322
|
-
packages = install_task.get_package_list(ctx, conf)
|
323
|
-
debs = packages['deb'] + \
|
324
|
-
['salt-common', 'salt-minion', 'calamari-server',
|
325
|
-
'python-rados', 'multipath-tools']
|
326
|
-
rpms = packages['rpm'] + \
|
327
|
-
['salt-common', 'salt-minion', 'calamari-server',
|
328
|
-
'multipath-tools', 'device-mapper-multipath']
|
329
|
-
install_task.remove_packages(
|
330
|
-
ctx,
|
331
|
-
conf,
|
332
|
-
dict(
|
333
|
-
deb=debs,
|
334
|
-
rpm=rpms,
|
335
|
-
)
|
336
|
-
)
|
337
|
-
install_task.remove_sources(ctx, conf)
|
338
|
-
|
339
|
-
|
340
|
-
def remove_ceph_data(ctx):
|
341
|
-
log.info("Removing any stale ceph data...")
|
342
|
-
ctx.cluster.run(
|
343
|
-
args=[
|
344
|
-
'sudo', 'rm', '-rf', '/etc/ceph',
|
345
|
-
run.Raw('/var/run/ceph*'),
|
346
|
-
],
|
347
|
-
)
|
348
|
-
|
349
|
-
|
350
|
-
def remove_testing_tree(ctx):
|
351
|
-
log.info('Clearing filesystem of test data...')
|
352
|
-
ctx.cluster.run(
|
353
|
-
args=[
|
354
|
-
'sudo', 'rm', '-rf', get_testdir(ctx),
|
355
|
-
# just for old time's sake
|
356
|
-
run.Raw('&&'),
|
357
|
-
'sudo', 'rm', '-rf', '/tmp/cephtest',
|
358
|
-
run.Raw('&&'),
|
359
|
-
'sudo', 'rm', '-rf', '/home/ubuntu/cephtest',
|
360
|
-
],
|
361
|
-
)
|
362
|
-
|
363
|
-
|
364
|
-
def remove_configuration_files(ctx):
|
365
|
-
"""
|
366
|
-
Goes through a list of commonly used configuration files used for testing
|
367
|
-
that should not be left behind.
|
368
|
-
|
369
|
-
For example, sometimes ceph-deploy may be configured via
|
370
|
-
``~/.cephdeploy.conf`` to alter how it handles installation by specifying
|
371
|
-
a default section in its config with custom locations.
|
372
|
-
"""
|
373
|
-
ctx.cluster.run(
|
374
|
-
args=[
|
375
|
-
'rm', '-f', '/home/ubuntu/.cephdeploy.conf'
|
376
|
-
],
|
377
|
-
timeout=30
|
378
|
-
)
|
379
|
-
|
380
|
-
|
381
|
-
def undo_multipath(ctx):
|
382
|
-
"""
|
383
|
-
Undo any multipath device mappings created, an
|
384
|
-
remove the packages/daemon that manages them so they don't
|
385
|
-
come back unless specifically requested by the test.
|
386
|
-
"""
|
387
|
-
log.info('Removing any multipath config/pkgs...')
|
388
|
-
for remote in ctx.cluster.remotes.keys():
|
389
|
-
remote.run(
|
390
|
-
args=[
|
391
|
-
'sudo', 'multipath', '-F',
|
392
|
-
],
|
393
|
-
check_status=False,
|
394
|
-
timeout=60
|
395
|
-
)
|
396
|
-
|
397
|
-
|
398
|
-
def synch_clocks(remotes):
|
399
|
-
log.info('Synchronizing clocks...')
|
400
|
-
for remote in remotes:
|
401
|
-
remote.run(
|
402
|
-
args=[
|
403
|
-
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
|
404
|
-
'sudo', 'systemctl', 'stop', 'ntpd.service', run.Raw('||'),
|
405
|
-
'sudo', 'systemctl', 'stop', 'chronyd.service',
|
406
|
-
run.Raw('&&'),
|
407
|
-
'sudo', 'ntpdate-debian', run.Raw('||'),
|
408
|
-
'sudo', 'ntp', '-gq', run.Raw('||'),
|
409
|
-
'sudo', 'ntpd', '-gq', run.Raw('||'),
|
410
|
-
'sudo', 'chronyc', 'sources',
|
411
|
-
run.Raw('&&'),
|
412
|
-
'sudo', 'hwclock', '--systohc', '--utc',
|
413
|
-
run.Raw('&&'),
|
414
|
-
'sudo', 'systemctl', 'start', 'ntp.service', run.Raw('||'),
|
415
|
-
'sudo', 'systemctl', 'start', 'ntpd.service', run.Raw('||'),
|
416
|
-
'sudo', 'systemctl', 'start', 'chronyd.service',
|
417
|
-
run.Raw('||'),
|
418
|
-
'true', # ignore errors; we may be racing with ntpd startup
|
419
|
-
],
|
420
|
-
timeout=60,
|
421
|
-
)
|
422
|
-
|
423
|
-
|
424
|
-
def unlock_firmware_repo(ctx):
|
425
|
-
log.info('Making sure firmware.git is not locked...')
|
426
|
-
ctx.cluster.run(args=['sudo', 'rm', '-f',
|
427
|
-
'/lib/firmware/updates/.git/index.lock', ])
|
428
|
-
|
429
|
-
|
430
|
-
def check_console(hostname):
|
431
|
-
remote = Remote(hostname)
|
432
|
-
shortname = remote.shortname
|
433
|
-
console = remote.console
|
434
|
-
if not console:
|
435
|
-
return
|
436
|
-
cname = '{host}.{domain}'.format(
|
437
|
-
host=shortname,
|
438
|
-
domain=console.ipmidomain,
|
439
|
-
)
|
440
|
-
log.info('checking console status of %s' % cname)
|
441
|
-
if console.check_status():
|
442
|
-
log.info('console ready on %s' % cname)
|
443
|
-
return
|
444
|
-
if console.check_power('on'):
|
445
|
-
log.info('attempting to reboot %s' % cname)
|
446
|
-
console.power_cycle()
|
447
|
-
else:
|
448
|
-
log.info('attempting to power on %s' % cname)
|
449
|
-
console.power_on()
|
450
|
-
timeout = 100
|
451
|
-
log.info('checking console status of %s with timeout %s' %
|
452
|
-
(cname, timeout))
|
453
|
-
if console.check_status(timeout=timeout):
|
454
|
-
log.info('console ready on %s' % cname)
|
455
|
-
else:
|
456
|
-
log.error("Failed to get console status for %s, " % cname)
|
File without changes
|