teuthology 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scripts/describe.py +1 -0
- scripts/dispatcher.py +55 -26
- scripts/exporter.py +18 -0
- scripts/lock.py +1 -1
- scripts/node_cleanup.py +58 -0
- scripts/openstack.py +9 -9
- scripts/results.py +12 -11
- scripts/schedule.py +4 -0
- scripts/suite.py +57 -16
- scripts/supervisor.py +44 -0
- scripts/update_inventory.py +10 -4
- teuthology/__init__.py +24 -26
- teuthology/beanstalk.py +4 -3
- teuthology/config.py +16 -6
- teuthology/contextutil.py +18 -14
- teuthology/describe_tests.py +25 -18
- teuthology/dispatcher/__init__.py +210 -35
- teuthology/dispatcher/supervisor.py +140 -58
- teuthology/exceptions.py +43 -0
- teuthology/exporter.py +347 -0
- teuthology/kill.py +76 -81
- teuthology/lock/cli.py +3 -3
- teuthology/lock/ops.py +135 -61
- teuthology/lock/query.py +61 -44
- teuthology/ls.py +1 -1
- teuthology/misc.py +61 -75
- teuthology/nuke/__init__.py +12 -353
- teuthology/openstack/__init__.py +4 -3
- teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
- teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
- teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
- teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
- teuthology/openstack/openstack-teuthology.cron +0 -1
- teuthology/orchestra/cluster.py +49 -7
- teuthology/orchestra/connection.py +17 -4
- teuthology/orchestra/console.py +111 -50
- teuthology/orchestra/daemon/cephadmunit.py +15 -2
- teuthology/orchestra/daemon/state.py +8 -1
- teuthology/orchestra/daemon/systemd.py +4 -4
- teuthology/orchestra/opsys.py +30 -11
- teuthology/orchestra/remote.py +405 -338
- teuthology/orchestra/run.py +3 -3
- teuthology/packaging.py +19 -16
- teuthology/provision/__init__.py +30 -10
- teuthology/provision/cloud/openstack.py +12 -6
- teuthology/provision/cloud/util.py +1 -2
- teuthology/provision/downburst.py +4 -3
- teuthology/provision/fog.py +68 -20
- teuthology/provision/openstack.py +5 -4
- teuthology/provision/pelagos.py +1 -1
- teuthology/repo_utils.py +43 -13
- teuthology/report.py +57 -35
- teuthology/results.py +5 -3
- teuthology/run.py +13 -14
- teuthology/run_tasks.py +27 -43
- teuthology/schedule.py +4 -3
- teuthology/scrape.py +28 -22
- teuthology/suite/__init__.py +74 -45
- teuthology/suite/build_matrix.py +34 -24
- teuthology/suite/fragment-merge.lua +105 -0
- teuthology/suite/matrix.py +31 -2
- teuthology/suite/merge.py +175 -0
- teuthology/suite/placeholder.py +6 -9
- teuthology/suite/run.py +175 -100
- teuthology/suite/util.py +64 -218
- teuthology/task/__init__.py +1 -1
- teuthology/task/ansible.py +101 -32
- teuthology/task/buildpackages.py +2 -2
- teuthology/task/ceph_ansible.py +13 -6
- teuthology/task/cephmetrics.py +2 -1
- teuthology/task/clock.py +33 -14
- teuthology/task/exec.py +18 -0
- teuthology/task/hadoop.py +2 -2
- teuthology/task/install/__init__.py +29 -7
- teuthology/task/install/bin/adjust-ulimits +16 -0
- teuthology/task/install/bin/daemon-helper +114 -0
- teuthology/task/install/bin/stdin-killer +263 -0
- teuthology/task/install/deb.py +1 -1
- teuthology/task/install/rpm.py +17 -5
- teuthology/task/install/util.py +3 -3
- teuthology/task/internal/__init__.py +41 -10
- teuthology/task/internal/edit_sudoers.sh +10 -0
- teuthology/task/internal/lock_machines.py +2 -9
- teuthology/task/internal/redhat.py +31 -1
- teuthology/task/internal/syslog.py +31 -8
- teuthology/task/kernel.py +152 -145
- teuthology/task/lockfile.py +1 -1
- teuthology/task/mpi.py +10 -10
- teuthology/task/pcp.py +1 -1
- teuthology/task/selinux.py +16 -8
- teuthology/task/ssh_keys.py +4 -4
- teuthology/timer.py +3 -3
- teuthology/util/loggerfile.py +19 -0
- teuthology/util/scanner.py +159 -0
- teuthology/util/sentry.py +52 -0
- teuthology/util/time.py +52 -0
- teuthology-1.2.1.data/scripts/adjust-ulimits +16 -0
- teuthology-1.2.1.data/scripts/daemon-helper +114 -0
- teuthology-1.2.1.data/scripts/stdin-killer +263 -0
- teuthology-1.2.1.dist-info/METADATA +88 -0
- teuthology-1.2.1.dist-info/RECORD +168 -0
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/WHEEL +1 -1
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/entry_points.txt +3 -2
- scripts/nuke.py +0 -47
- scripts/worker.py +0 -37
- teuthology/lock/test/__init__.py +0 -0
- teuthology/lock/test/test_lock.py +0 -7
- teuthology/nuke/actions.py +0 -456
- teuthology/openstack/test/__init__.py +0 -0
- teuthology/openstack/test/openstack-integration.py +0 -286
- teuthology/openstack/test/test_config.py +0 -35
- teuthology/openstack/test/test_openstack.py +0 -1695
- teuthology/orchestra/test/__init__.py +0 -0
- teuthology/orchestra/test/integration/__init__.py +0 -0
- teuthology/orchestra/test/integration/test_integration.py +0 -94
- teuthology/orchestra/test/test_cluster.py +0 -240
- teuthology/orchestra/test/test_connection.py +0 -106
- teuthology/orchestra/test/test_console.py +0 -217
- teuthology/orchestra/test/test_opsys.py +0 -404
- teuthology/orchestra/test/test_remote.py +0 -185
- teuthology/orchestra/test/test_run.py +0 -286
- teuthology/orchestra/test/test_systemd.py +0 -54
- teuthology/orchestra/test/util.py +0 -12
- teuthology/task/tests/__init__.py +0 -110
- teuthology/task/tests/test_locking.py +0 -25
- teuthology/task/tests/test_run.py +0 -40
- teuthology/test/__init__.py +0 -0
- teuthology/test/fake_archive.py +0 -107
- teuthology/test/fake_fs.py +0 -92
- teuthology/test/integration/__init__.py +0 -0
- teuthology/test/integration/test_suite.py +0 -86
- teuthology/test/task/__init__.py +0 -205
- teuthology/test/task/test_ansible.py +0 -624
- teuthology/test/task/test_ceph_ansible.py +0 -176
- teuthology/test/task/test_console_log.py +0 -88
- teuthology/test/task/test_install.py +0 -337
- teuthology/test/task/test_internal.py +0 -57
- teuthology/test/task/test_kernel.py +0 -243
- teuthology/test/task/test_pcp.py +0 -379
- teuthology/test/task/test_selinux.py +0 -35
- teuthology/test/test_config.py +0 -189
- teuthology/test/test_contextutil.py +0 -68
- teuthology/test/test_describe_tests.py +0 -316
- teuthology/test/test_email_sleep_before_teardown.py +0 -81
- teuthology/test/test_exit.py +0 -97
- teuthology/test/test_get_distro.py +0 -47
- teuthology/test/test_get_distro_version.py +0 -47
- teuthology/test/test_get_multi_machine_types.py +0 -27
- teuthology/test/test_job_status.py +0 -60
- teuthology/test/test_ls.py +0 -48
- teuthology/test/test_misc.py +0 -391
- teuthology/test/test_nuke.py +0 -290
- teuthology/test/test_packaging.py +0 -763
- teuthology/test/test_parallel.py +0 -28
- teuthology/test/test_repo_utils.py +0 -225
- teuthology/test/test_report.py +0 -77
- teuthology/test/test_results.py +0 -155
- teuthology/test/test_run.py +0 -239
- teuthology/test/test_safepath.py +0 -55
- teuthology/test/test_schedule.py +0 -45
- teuthology/test/test_scrape.py +0 -167
- teuthology/test/test_timer.py +0 -80
- teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
- teuthology/test/test_worker.py +0 -303
- teuthology/worker.py +0 -354
- teuthology-1.1.0.dist-info/METADATA +0 -76
- teuthology-1.1.0.dist-info/RECORD +0 -213
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/LICENSE +0 -0
- {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/top_level.txt +0 -0
teuthology/test/test_worker.py
DELETED
@@ -1,303 +0,0 @@
|
|
1
|
-
import beanstalkc
|
2
|
-
import os
|
3
|
-
|
4
|
-
from unittest.mock import patch, Mock, MagicMock
|
5
|
-
from datetime import datetime, timedelta
|
6
|
-
|
7
|
-
from teuthology import worker
|
8
|
-
|
9
|
-
from teuthology.contextutil import MaxWhileTries
|
10
|
-
|
11
|
-
|
12
|
-
class TestWorker(object):
|
13
|
-
def setup(self):
|
14
|
-
self.ctx = Mock()
|
15
|
-
self.ctx.verbose = True
|
16
|
-
self.ctx.archive_dir = '/archive/dir'
|
17
|
-
self.ctx.log_dir = '/log/dir'
|
18
|
-
self.ctx.tube = 'tube'
|
19
|
-
|
20
|
-
@patch("os.path.exists")
|
21
|
-
def test_restart_file_path_doesnt_exist(self, m_exists):
|
22
|
-
m_exists.return_value = False
|
23
|
-
result = worker.sentinel(worker.restart_file_path)
|
24
|
-
assert not result
|
25
|
-
|
26
|
-
@patch("os.path.getmtime")
|
27
|
-
@patch("os.path.exists")
|
28
|
-
@patch("teuthology.worker.datetime")
|
29
|
-
def test_needs_restart(self, m_datetime, m_exists, m_getmtime):
|
30
|
-
m_exists.return_value = True
|
31
|
-
m_datetime.utcfromtimestamp.return_value = datetime.utcnow() + timedelta(days=1)
|
32
|
-
result = worker.sentinel(worker.restart_file_path)
|
33
|
-
assert result
|
34
|
-
|
35
|
-
@patch("os.path.getmtime")
|
36
|
-
@patch("os.path.exists")
|
37
|
-
@patch("teuthology.worker.datetime")
|
38
|
-
def test_does_not_need_restart(self, m_datetime, m_exists, getmtime):
|
39
|
-
m_exists.return_value = True
|
40
|
-
m_datetime.utcfromtimestamp.return_value = datetime.utcnow() - timedelta(days=1)
|
41
|
-
result = worker.sentinel(worker.restart_file_path)
|
42
|
-
assert not result
|
43
|
-
|
44
|
-
@patch("os.symlink")
|
45
|
-
def test_symlink_success(self, m_symlink):
|
46
|
-
worker.symlink_worker_log("path/to/worker.log", "path/to/archive")
|
47
|
-
m_symlink.assert_called_with("path/to/worker.log", "path/to/archive/worker.log")
|
48
|
-
|
49
|
-
@patch("teuthology.worker.log")
|
50
|
-
@patch("os.symlink")
|
51
|
-
def test_symlink_failure(self, m_symlink, m_log):
|
52
|
-
m_symlink.side_effect = IOError
|
53
|
-
worker.symlink_worker_log("path/to/worker.log", "path/to/archive")
|
54
|
-
# actually logs the exception
|
55
|
-
assert m_log.exception.called
|
56
|
-
|
57
|
-
@patch("teuthology.worker.run_with_watchdog")
|
58
|
-
@patch("teuthology.worker.teuth_config")
|
59
|
-
@patch("subprocess.Popen")
|
60
|
-
@patch("os.environ")
|
61
|
-
@patch("os.mkdir")
|
62
|
-
@patch("yaml.safe_dump")
|
63
|
-
@patch("tempfile.NamedTemporaryFile")
|
64
|
-
def test_run_job_with_watchdog(self, m_tempfile, m_safe_dump, m_mkdir,
|
65
|
-
m_environ, m_popen, m_t_config,
|
66
|
-
m_run_watchdog):
|
67
|
-
config = {
|
68
|
-
"suite_path": "suite/path",
|
69
|
-
"config": {"foo": "bar"},
|
70
|
-
"verbose": True,
|
71
|
-
"owner": "the_owner",
|
72
|
-
"archive_path": "archive/path",
|
73
|
-
"name": "the_name",
|
74
|
-
"description": "the_description",
|
75
|
-
"job_id": "1",
|
76
|
-
}
|
77
|
-
m_tmp = MagicMock()
|
78
|
-
temp_file = Mock()
|
79
|
-
temp_file.name = "the_name"
|
80
|
-
m_tmp.__enter__.return_value = temp_file
|
81
|
-
m_tempfile.return_value = m_tmp
|
82
|
-
env = dict(PYTHONPATH="python/path")
|
83
|
-
m_environ.copy.return_value = env
|
84
|
-
m_p = Mock()
|
85
|
-
m_p.returncode = 0
|
86
|
-
m_popen.return_value = m_p
|
87
|
-
m_t_config.results_server = True
|
88
|
-
worker.run_job(config, "teuth/bin/path", "archive/dir", verbose=False)
|
89
|
-
m_run_watchdog.assert_called_with(m_p, config)
|
90
|
-
expected_args = [
|
91
|
-
'teuth/bin/path/teuthology',
|
92
|
-
'-v',
|
93
|
-
'--lock',
|
94
|
-
'--block',
|
95
|
-
'--owner', 'the_owner',
|
96
|
-
'--archive', 'archive/path',
|
97
|
-
'--name', 'the_name',
|
98
|
-
'--description',
|
99
|
-
'the_description',
|
100
|
-
'--',
|
101
|
-
"the_name"
|
102
|
-
]
|
103
|
-
m_popen.assert_called_with(args=expected_args, env=env)
|
104
|
-
|
105
|
-
@patch("time.sleep")
|
106
|
-
@patch("teuthology.worker.symlink_worker_log")
|
107
|
-
@patch("teuthology.worker.teuth_config")
|
108
|
-
@patch("subprocess.Popen")
|
109
|
-
@patch("os.environ")
|
110
|
-
@patch("os.mkdir")
|
111
|
-
@patch("yaml.safe_dump")
|
112
|
-
@patch("tempfile.NamedTemporaryFile")
|
113
|
-
def test_run_job_no_watchdog(self, m_tempfile, m_safe_dump, m_mkdir,
|
114
|
-
m_environ, m_popen, m_t_config, m_symlink_log,
|
115
|
-
m_sleep):
|
116
|
-
config = {
|
117
|
-
"suite_path": "suite/path",
|
118
|
-
"config": {"foo": "bar"},
|
119
|
-
"verbose": True,
|
120
|
-
"owner": "the_owner",
|
121
|
-
"archive_path": "archive/path",
|
122
|
-
"name": "the_name",
|
123
|
-
"description": "the_description",
|
124
|
-
"worker_log": "worker/log.log",
|
125
|
-
"job_id": "1",
|
126
|
-
}
|
127
|
-
m_tmp = MagicMock()
|
128
|
-
temp_file = Mock()
|
129
|
-
temp_file.name = "the_name"
|
130
|
-
m_tmp.__enter__.return_value = temp_file
|
131
|
-
m_tempfile.return_value = m_tmp
|
132
|
-
env = dict(PYTHONPATH="python/path")
|
133
|
-
m_environ.copy.return_value = env
|
134
|
-
m_p = Mock()
|
135
|
-
m_p.returncode = 1
|
136
|
-
m_popen.return_value = m_p
|
137
|
-
m_t_config.results_server = False
|
138
|
-
worker.run_job(config, "teuth/bin/path", "archive/dir", verbose=False)
|
139
|
-
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
|
140
|
-
|
141
|
-
@patch("teuthology.worker.report.try_push_job_info")
|
142
|
-
@patch("teuthology.worker.symlink_worker_log")
|
143
|
-
@patch("time.sleep")
|
144
|
-
def test_run_with_watchdog_no_reporting(self, m_sleep, m_symlink_log, m_try_push):
|
145
|
-
config = {
|
146
|
-
"name": "the_name",
|
147
|
-
"job_id": "1",
|
148
|
-
"worker_log": "worker_log",
|
149
|
-
"archive_path": "archive/path",
|
150
|
-
"teuthology_branch": "master"
|
151
|
-
}
|
152
|
-
process = Mock()
|
153
|
-
process.poll.return_value = "not None"
|
154
|
-
worker.run_with_watchdog(process, config)
|
155
|
-
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
|
156
|
-
m_try_push.assert_called_with(
|
157
|
-
dict(name=config["name"], job_id=config["job_id"]),
|
158
|
-
dict(status='dead')
|
159
|
-
)
|
160
|
-
|
161
|
-
@patch("subprocess.Popen")
|
162
|
-
@patch("teuthology.worker.symlink_worker_log")
|
163
|
-
@patch("time.sleep")
|
164
|
-
def test_run_with_watchdog_with_reporting(self, m_sleep, m_symlink_log, m_popen):
|
165
|
-
config = {
|
166
|
-
"name": "the_name",
|
167
|
-
"job_id": "1",
|
168
|
-
"worker_log": "worker_log",
|
169
|
-
"archive_path": "archive/path",
|
170
|
-
"teuthology_branch": "jewel"
|
171
|
-
}
|
172
|
-
process = Mock()
|
173
|
-
process.poll.return_value = "not None"
|
174
|
-
m_proc = Mock()
|
175
|
-
m_proc.poll.return_value = "not None"
|
176
|
-
m_popen.return_value = m_proc
|
177
|
-
worker.run_with_watchdog(process, config)
|
178
|
-
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
|
179
|
-
|
180
|
-
@patch("os.path.isdir")
|
181
|
-
@patch("teuthology.worker.fetch_teuthology")
|
182
|
-
@patch("teuthology.worker.fetch_qa_suite")
|
183
|
-
def test_prep_job(self, m_fetch_qa_suite,
|
184
|
-
m_fetch_teuthology, m_isdir):
|
185
|
-
config = dict(
|
186
|
-
name="the_name",
|
187
|
-
job_id="1",
|
188
|
-
)
|
189
|
-
archive_dir = '/archive/dir'
|
190
|
-
log_file_path = '/worker/log'
|
191
|
-
m_fetch_teuthology.return_value = '/teuth/path'
|
192
|
-
m_fetch_qa_suite.return_value = '/suite/path'
|
193
|
-
m_isdir.return_value = True
|
194
|
-
got_config, teuth_bin_path = worker.prep_job(
|
195
|
-
config,
|
196
|
-
log_file_path,
|
197
|
-
archive_dir,
|
198
|
-
)
|
199
|
-
assert got_config['worker_log'] == log_file_path
|
200
|
-
assert got_config['archive_path'] == os.path.join(
|
201
|
-
archive_dir,
|
202
|
-
config['name'],
|
203
|
-
config['job_id'],
|
204
|
-
)
|
205
|
-
assert got_config['teuthology_branch'] == 'master'
|
206
|
-
assert m_fetch_teuthology.called_once_with_args(branch='master')
|
207
|
-
assert teuth_bin_path == '/teuth/path/virtualenv/bin'
|
208
|
-
assert m_fetch_qa_suite.called_once_with_args(branch='master')
|
209
|
-
assert got_config['suite_path'] == '/suite/path'
|
210
|
-
|
211
|
-
def build_fake_jobs(self, m_connection, m_job, job_bodies):
|
212
|
-
"""
|
213
|
-
Given patched copies of:
|
214
|
-
beanstalkc.Connection
|
215
|
-
beanstalkc.Job
|
216
|
-
And a list of basic job bodies, return a list of mocked Job objects
|
217
|
-
"""
|
218
|
-
# Make sure instantiating m_job returns a new object each time
|
219
|
-
m_job.side_effect = lambda **kwargs: Mock(spec=beanstalkc.Job)
|
220
|
-
jobs = []
|
221
|
-
job_id = 0
|
222
|
-
for job_body in job_bodies:
|
223
|
-
job_id += 1
|
224
|
-
job = m_job(conn=m_connection, jid=job_id, body=job_body)
|
225
|
-
job.jid = job_id
|
226
|
-
job.body = job_body
|
227
|
-
jobs.append(job)
|
228
|
-
return jobs
|
229
|
-
|
230
|
-
@patch("teuthology.worker.run_job")
|
231
|
-
@patch("teuthology.worker.prep_job")
|
232
|
-
@patch("beanstalkc.Job", autospec=True)
|
233
|
-
@patch("teuthology.worker.fetch_qa_suite")
|
234
|
-
@patch("teuthology.worker.fetch_teuthology")
|
235
|
-
@patch("teuthology.worker.beanstalk.watch_tube")
|
236
|
-
@patch("teuthology.worker.beanstalk.connect")
|
237
|
-
@patch("os.path.isdir", return_value=True)
|
238
|
-
@patch("teuthology.worker.setup_log_file")
|
239
|
-
def test_main_loop(
|
240
|
-
self, m_setup_log_file, m_isdir, m_connect, m_watch_tube,
|
241
|
-
m_fetch_teuthology, m_fetch_qa_suite, m_job, m_prep_job, m_run_job,
|
242
|
-
):
|
243
|
-
m_connection = Mock()
|
244
|
-
jobs = self.build_fake_jobs(
|
245
|
-
m_connection,
|
246
|
-
m_job,
|
247
|
-
[
|
248
|
-
'foo: bar',
|
249
|
-
'stop_worker: true',
|
250
|
-
],
|
251
|
-
)
|
252
|
-
m_connection.reserve.side_effect = jobs
|
253
|
-
m_connect.return_value = m_connection
|
254
|
-
m_prep_job.return_value = (dict(), '/bin/path')
|
255
|
-
worker.main(self.ctx)
|
256
|
-
# There should be one reserve call per item in the jobs list
|
257
|
-
expected_reserve_calls = [
|
258
|
-
dict(timeout=60) for i in range(len(jobs))
|
259
|
-
]
|
260
|
-
got_reserve_calls = [
|
261
|
-
call[1] for call in m_connection.reserve.call_args_list
|
262
|
-
]
|
263
|
-
assert got_reserve_calls == expected_reserve_calls
|
264
|
-
for job in jobs:
|
265
|
-
job.bury.assert_called_once_with()
|
266
|
-
job.delete.assert_called_once_with()
|
267
|
-
|
268
|
-
@patch("teuthology.worker.report.try_push_job_info")
|
269
|
-
@patch("teuthology.worker.run_job")
|
270
|
-
@patch("beanstalkc.Job", autospec=True)
|
271
|
-
@patch("teuthology.worker.fetch_qa_suite")
|
272
|
-
@patch("teuthology.worker.fetch_teuthology")
|
273
|
-
@patch("teuthology.worker.beanstalk.watch_tube")
|
274
|
-
@patch("teuthology.worker.beanstalk.connect")
|
275
|
-
@patch("os.path.isdir", return_value=True)
|
276
|
-
@patch("teuthology.worker.setup_log_file")
|
277
|
-
def test_main_loop_13925(
|
278
|
-
self, m_setup_log_file, m_isdir, m_connect, m_watch_tube,
|
279
|
-
m_fetch_teuthology, m_fetch_qa_suite, m_job, m_run_job,
|
280
|
-
m_try_push_job_info,
|
281
|
-
):
|
282
|
-
m_connection = Mock()
|
283
|
-
jobs = self.build_fake_jobs(
|
284
|
-
m_connection,
|
285
|
-
m_job,
|
286
|
-
[
|
287
|
-
'name: name',
|
288
|
-
'name: name\nstop_worker: true',
|
289
|
-
],
|
290
|
-
)
|
291
|
-
m_connection.reserve.side_effect = jobs
|
292
|
-
m_connect.return_value = m_connection
|
293
|
-
m_fetch_qa_suite.side_effect = [
|
294
|
-
'/suite/path',
|
295
|
-
MaxWhileTries(),
|
296
|
-
MaxWhileTries(),
|
297
|
-
]
|
298
|
-
worker.main(self.ctx)
|
299
|
-
assert len(m_run_job.call_args_list) == 0
|
300
|
-
assert len(m_try_push_job_info.call_args_list) == len(jobs)
|
301
|
-
for i in range(len(jobs)):
|
302
|
-
push_call = m_try_push_job_info.call_args_list[i]
|
303
|
-
assert push_call[0][1]['status'] == 'dead'
|
teuthology/worker.py
DELETED
@@ -1,354 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import os
|
3
|
-
import subprocess
|
4
|
-
import sys
|
5
|
-
import tempfile
|
6
|
-
import time
|
7
|
-
import yaml
|
8
|
-
|
9
|
-
from datetime import datetime
|
10
|
-
|
11
|
-
from teuthology import setup_log_file, install_except_hook
|
12
|
-
from teuthology import beanstalk
|
13
|
-
from teuthology import report
|
14
|
-
from teuthology import safepath
|
15
|
-
from teuthology.config import config as teuth_config
|
16
|
-
from teuthology.config import set_config_attr
|
17
|
-
from teuthology.exceptions import BranchNotFoundError, CommitNotFoundError, SkipJob, MaxWhileTries
|
18
|
-
from teuthology.kill import kill_job
|
19
|
-
from teuthology.repo_utils import fetch_qa_suite, fetch_teuthology, ls_remote, build_git_url
|
20
|
-
|
21
|
-
log = logging.getLogger(__name__)
|
22
|
-
start_time = datetime.utcnow()
|
23
|
-
restart_file_path = '/tmp/teuthology-restart-workers'
|
24
|
-
stop_file_path = '/tmp/teuthology-stop-workers'
|
25
|
-
|
26
|
-
|
27
|
-
def sentinel(path):
|
28
|
-
if not os.path.exists(path):
|
29
|
-
return False
|
30
|
-
file_mtime = datetime.utcfromtimestamp(os.path.getmtime(path))
|
31
|
-
if file_mtime > start_time:
|
32
|
-
return True
|
33
|
-
else:
|
34
|
-
return False
|
35
|
-
|
36
|
-
|
37
|
-
def restart():
|
38
|
-
log.info('Restarting...')
|
39
|
-
args = sys.argv[:]
|
40
|
-
args.insert(0, sys.executable)
|
41
|
-
os.execv(sys.executable, args)
|
42
|
-
|
43
|
-
|
44
|
-
def stop():
|
45
|
-
log.info('Stopping...')
|
46
|
-
sys.exit(0)
|
47
|
-
|
48
|
-
|
49
|
-
def load_config(ctx=None):
|
50
|
-
teuth_config.load()
|
51
|
-
if ctx is not None:
|
52
|
-
if not os.path.isdir(ctx.archive_dir):
|
53
|
-
sys.exit("{prog}: archive directory must exist: {path}".format(
|
54
|
-
prog=os.path.basename(sys.argv[0]),
|
55
|
-
path=ctx.archive_dir,
|
56
|
-
))
|
57
|
-
else:
|
58
|
-
teuth_config.archive_base = ctx.archive_dir
|
59
|
-
|
60
|
-
|
61
|
-
def main(ctx):
|
62
|
-
loglevel = logging.INFO
|
63
|
-
if ctx.verbose:
|
64
|
-
loglevel = logging.DEBUG
|
65
|
-
log.setLevel(loglevel)
|
66
|
-
|
67
|
-
log_file_path = os.path.join(ctx.log_dir, 'worker.{tube}.{pid}'.format(
|
68
|
-
pid=os.getpid(), tube=ctx.tube,))
|
69
|
-
setup_log_file(log_file_path)
|
70
|
-
|
71
|
-
install_except_hook()
|
72
|
-
|
73
|
-
load_config(ctx=ctx)
|
74
|
-
|
75
|
-
set_config_attr(ctx)
|
76
|
-
|
77
|
-
connection = beanstalk.connect()
|
78
|
-
beanstalk.watch_tube(connection, ctx.tube)
|
79
|
-
result_proc = None
|
80
|
-
|
81
|
-
if teuth_config.teuthology_path is None:
|
82
|
-
fetch_teuthology('master')
|
83
|
-
fetch_qa_suite('master')
|
84
|
-
|
85
|
-
keep_running = True
|
86
|
-
while keep_running:
|
87
|
-
# Check to see if we have a teuthology-results process hanging around
|
88
|
-
# and if so, read its return code so that it can exit.
|
89
|
-
if result_proc is not None and result_proc.poll() is not None:
|
90
|
-
log.debug("teuthology-results exited with code: %s",
|
91
|
-
result_proc.returncode)
|
92
|
-
result_proc = None
|
93
|
-
|
94
|
-
if sentinel(restart_file_path):
|
95
|
-
restart()
|
96
|
-
elif sentinel(stop_file_path):
|
97
|
-
stop()
|
98
|
-
|
99
|
-
load_config()
|
100
|
-
|
101
|
-
job = connection.reserve(timeout=60)
|
102
|
-
if job is None:
|
103
|
-
continue
|
104
|
-
|
105
|
-
# bury the job so it won't be re-run if it fails
|
106
|
-
job.bury()
|
107
|
-
job_id = job.jid
|
108
|
-
log.info('Reserved job %d', job_id)
|
109
|
-
log.info('Config is: %s', job.body)
|
110
|
-
job_config = yaml.safe_load(job.body)
|
111
|
-
job_config['job_id'] = str(job_id)
|
112
|
-
|
113
|
-
if job_config.get('stop_worker'):
|
114
|
-
keep_running = False
|
115
|
-
|
116
|
-
try:
|
117
|
-
job_config, teuth_bin_path = prep_job(
|
118
|
-
job_config,
|
119
|
-
log_file_path,
|
120
|
-
ctx.archive_dir,
|
121
|
-
)
|
122
|
-
run_job(
|
123
|
-
job_config,
|
124
|
-
teuth_bin_path,
|
125
|
-
ctx.archive_dir,
|
126
|
-
ctx.verbose,
|
127
|
-
)
|
128
|
-
except SkipJob:
|
129
|
-
continue
|
130
|
-
|
131
|
-
# This try/except block is to keep the worker from dying when
|
132
|
-
# beanstalkc throws a SocketError
|
133
|
-
try:
|
134
|
-
job.delete()
|
135
|
-
except Exception:
|
136
|
-
log.exception("Saw exception while trying to delete job")
|
137
|
-
|
138
|
-
|
139
|
-
def prep_job(job_config, log_file_path, archive_dir):
|
140
|
-
job_id = job_config['job_id']
|
141
|
-
safe_archive = safepath.munge(job_config['name'])
|
142
|
-
job_config['worker_log'] = log_file_path
|
143
|
-
archive_path_full = os.path.join(
|
144
|
-
archive_dir, safe_archive, str(job_id))
|
145
|
-
job_config['archive_path'] = archive_path_full
|
146
|
-
|
147
|
-
# If the teuthology branch was not specified, default to master and
|
148
|
-
# store that value.
|
149
|
-
teuthology_branch = job_config.get('teuthology_branch', 'master')
|
150
|
-
job_config['teuthology_branch'] = teuthology_branch
|
151
|
-
teuthology_sha1 = job_config.get('teuthology_sha1')
|
152
|
-
if not teuthology_sha1:
|
153
|
-
repo_url = build_git_url('teuthology', 'ceph')
|
154
|
-
teuthology_sha1 = ls_remote(repo_url, teuthology_branch)
|
155
|
-
if not teuthology_sha1:
|
156
|
-
reason = "Teuthology branch {} not found; marking job as dead".format(teuthology_branch)
|
157
|
-
log.error(reason)
|
158
|
-
report.try_push_job_info(
|
159
|
-
job_config,
|
160
|
-
dict(status='dead', failure_reason=reason)
|
161
|
-
)
|
162
|
-
raise SkipJob()
|
163
|
-
log.info('Using teuthology sha1 %s', teuthology_sha1)
|
164
|
-
|
165
|
-
try:
|
166
|
-
if teuth_config.teuthology_path is not None:
|
167
|
-
teuth_path = teuth_config.teuthology_path
|
168
|
-
else:
|
169
|
-
teuth_path = fetch_teuthology(branch=teuthology_branch,
|
170
|
-
commit=teuthology_sha1)
|
171
|
-
# For the teuthology tasks, we look for suite_branch, and if we
|
172
|
-
# don't get that, we look for branch, and fall back to 'master'.
|
173
|
-
# last-in-suite jobs don't have suite_branch or branch set.
|
174
|
-
ceph_branch = job_config.get('branch', 'master')
|
175
|
-
suite_branch = job_config.get('suite_branch', ceph_branch)
|
176
|
-
suite_sha1 = job_config.get('suite_sha1')
|
177
|
-
suite_repo = job_config.get('suite_repo')
|
178
|
-
if suite_repo:
|
179
|
-
teuth_config.ceph_qa_suite_git_url = suite_repo
|
180
|
-
job_config['suite_path'] = os.path.normpath(os.path.join(
|
181
|
-
fetch_qa_suite(suite_branch, suite_sha1),
|
182
|
-
job_config.get('suite_relpath', ''),
|
183
|
-
))
|
184
|
-
except (BranchNotFoundError, CommitNotFoundError) as exc:
|
185
|
-
log.exception("Requested version not found; marking job as dead")
|
186
|
-
report.try_push_job_info(
|
187
|
-
job_config,
|
188
|
-
dict(status='dead', failure_reason=str(exc))
|
189
|
-
)
|
190
|
-
raise SkipJob()
|
191
|
-
except MaxWhileTries as exc:
|
192
|
-
log.exception("Failed to fetch or bootstrap; marking job as dead")
|
193
|
-
report.try_push_job_info(
|
194
|
-
job_config,
|
195
|
-
dict(status='dead', failure_reason=str(exc))
|
196
|
-
)
|
197
|
-
raise SkipJob()
|
198
|
-
|
199
|
-
teuth_bin_path = os.path.join(teuth_path, 'virtualenv', 'bin')
|
200
|
-
if not os.path.isdir(teuth_bin_path):
|
201
|
-
raise RuntimeError("teuthology branch %s at %s not bootstrapped!" %
|
202
|
-
(teuthology_branch, teuth_bin_path))
|
203
|
-
return job_config, teuth_bin_path
|
204
|
-
|
205
|
-
|
206
|
-
def run_job(job_config, teuth_bin_path, archive_dir, verbose):
|
207
|
-
safe_archive = safepath.munge(job_config['name'])
|
208
|
-
if job_config.get('first_in_suite') or job_config.get('last_in_suite'):
|
209
|
-
if teuth_config.results_server:
|
210
|
-
try:
|
211
|
-
report.try_delete_jobs(job_config['name'], job_config['job_id'])
|
212
|
-
except Exception as e:
|
213
|
-
log.warning("Unable to delete job %s, exception occurred: %s",
|
214
|
-
job_config['job_id'], e)
|
215
|
-
suite_archive_dir = os.path.join(archive_dir, safe_archive)
|
216
|
-
safepath.makedirs('/', suite_archive_dir)
|
217
|
-
args = [
|
218
|
-
os.path.join(teuth_bin_path, 'teuthology-results'),
|
219
|
-
'--archive-dir', suite_archive_dir,
|
220
|
-
'--name', job_config['name'],
|
221
|
-
]
|
222
|
-
if job_config.get('first_in_suite'):
|
223
|
-
log.info('Generating memo for %s', job_config['name'])
|
224
|
-
if job_config.get('seed'):
|
225
|
-
args.extend(['--seed', job_config['seed']])
|
226
|
-
if job_config.get('subset'):
|
227
|
-
args.extend(['--subset', job_config['subset']])
|
228
|
-
else:
|
229
|
-
log.info('Generating results for %s', job_config['name'])
|
230
|
-
timeout = job_config.get('results_timeout',
|
231
|
-
teuth_config.results_timeout)
|
232
|
-
args.extend(['--timeout', str(timeout)])
|
233
|
-
if job_config.get('email'):
|
234
|
-
args.extend(['--email', job_config['email']])
|
235
|
-
# Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to
|
236
|
-
# make sure that it will continue to run if this worker process
|
237
|
-
# dies (e.g. because of a restart)
|
238
|
-
result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp)
|
239
|
-
log.info("teuthology-results PID: %s", result_proc.pid)
|
240
|
-
return
|
241
|
-
|
242
|
-
log.info('Creating archive dir %s', job_config['archive_path'])
|
243
|
-
safepath.makedirs('/', job_config['archive_path'])
|
244
|
-
log.info('Running job %s', job_config['job_id'])
|
245
|
-
|
246
|
-
suite_path = job_config['suite_path']
|
247
|
-
arg = [
|
248
|
-
os.path.join(teuth_bin_path, 'teuthology'),
|
249
|
-
]
|
250
|
-
# The following is for compatibility with older schedulers, from before we
|
251
|
-
# started merging the contents of job_config['config'] into job_config
|
252
|
-
# itself.
|
253
|
-
if 'config' in job_config:
|
254
|
-
inner_config = job_config.pop('config')
|
255
|
-
if not isinstance(inner_config, dict):
|
256
|
-
log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
|
257
|
-
str(type(inner_config)))
|
258
|
-
else:
|
259
|
-
job_config.update(inner_config)
|
260
|
-
|
261
|
-
if verbose or job_config['verbose']:
|
262
|
-
arg.append('-v')
|
263
|
-
|
264
|
-
arg.extend([
|
265
|
-
'--lock',
|
266
|
-
'--block',
|
267
|
-
'--owner', job_config['owner'],
|
268
|
-
'--archive', job_config['archive_path'],
|
269
|
-
'--name', job_config['name'],
|
270
|
-
])
|
271
|
-
if job_config['description'] is not None:
|
272
|
-
arg.extend(['--description', job_config['description']])
|
273
|
-
arg.append('--')
|
274
|
-
|
275
|
-
with tempfile.NamedTemporaryFile(prefix='teuthology-worker.',
|
276
|
-
suffix='.tmp', mode='w+t') as tmp:
|
277
|
-
yaml.safe_dump(data=job_config, stream=tmp)
|
278
|
-
tmp.flush()
|
279
|
-
arg.append(tmp.name)
|
280
|
-
env = os.environ.copy()
|
281
|
-
python_path = env.get('PYTHONPATH', '')
|
282
|
-
python_path = ':'.join([suite_path, python_path]).strip(':')
|
283
|
-
env['PYTHONPATH'] = python_path
|
284
|
-
log.debug("Running: %s" % ' '.join(arg))
|
285
|
-
p = subprocess.Popen(args=arg, env=env)
|
286
|
-
log.info("Job archive: %s", job_config['archive_path'])
|
287
|
-
log.info("Job PID: %s", str(p.pid))
|
288
|
-
|
289
|
-
if teuth_config.results_server:
|
290
|
-
log.info("Running with watchdog")
|
291
|
-
try:
|
292
|
-
run_with_watchdog(p, job_config)
|
293
|
-
except Exception:
|
294
|
-
log.exception("run_with_watchdog had an unhandled exception")
|
295
|
-
raise
|
296
|
-
else:
|
297
|
-
log.info("Running without watchdog")
|
298
|
-
# This sleep() is to give the child time to start up and create the
|
299
|
-
# archive dir.
|
300
|
-
time.sleep(5)
|
301
|
-
symlink_worker_log(job_config['worker_log'],
|
302
|
-
job_config['archive_path'])
|
303
|
-
p.wait()
|
304
|
-
|
305
|
-
if p.returncode != 0:
|
306
|
-
log.error('Child exited with code %d', p.returncode)
|
307
|
-
else:
|
308
|
-
log.info('Success!')
|
309
|
-
|
310
|
-
|
311
|
-
def run_with_watchdog(process, job_config):
|
312
|
-
job_start_time = datetime.utcnow()
|
313
|
-
|
314
|
-
# Only push the information that's relevant to the watchdog, to save db
|
315
|
-
# load
|
316
|
-
job_info = dict(
|
317
|
-
name=job_config['name'],
|
318
|
-
job_id=job_config['job_id'],
|
319
|
-
)
|
320
|
-
|
321
|
-
# Sleep once outside of the loop to avoid double-posting jobs
|
322
|
-
time.sleep(teuth_config.watchdog_interval)
|
323
|
-
symlink_worker_log(job_config['worker_log'], job_config['archive_path'])
|
324
|
-
while process.poll() is None:
|
325
|
-
# Kill jobs that have been running longer than the global max
|
326
|
-
run_time = datetime.utcnow() - job_start_time
|
327
|
-
total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds
|
328
|
-
if total_seconds > teuth_config.max_job_time:
|
329
|
-
log.warning("Job ran longer than {max}s. Killing...".format(
|
330
|
-
max=teuth_config.max_job_time))
|
331
|
-
kill_job(job_info['name'], job_info['job_id'],
|
332
|
-
teuth_config.archive_base, job_config['owner'])
|
333
|
-
|
334
|
-
# calling this without a status just updates the jobs updated time
|
335
|
-
report.try_push_job_info(job_info)
|
336
|
-
time.sleep(teuth_config.watchdog_interval)
|
337
|
-
|
338
|
-
# we no longer support testing theses old branches
|
339
|
-
assert(job_config.get('teuthology_branch') not in ('argonaut', 'bobtail',
|
340
|
-
'cuttlefish', 'dumpling'))
|
341
|
-
|
342
|
-
# Let's make sure that paddles knows the job is finished. We don't know
|
343
|
-
# the status, but if it was a pass or fail it will have already been
|
344
|
-
# reported to paddles. In that case paddles ignores the 'dead' status.
|
345
|
-
# If the job was killed, paddles will use the 'dead' status.
|
346
|
-
report.try_push_job_info(job_info, dict(status='dead'))
|
347
|
-
|
348
|
-
|
349
|
-
def symlink_worker_log(worker_log_path, archive_dir):
|
350
|
-
try:
|
351
|
-
log.debug("Worker log: %s", worker_log_path)
|
352
|
-
os.symlink(worker_log_path, os.path.join(archive_dir, 'worker.log'))
|
353
|
-
except Exception:
|
354
|
-
log.exception("Failed to symlink worker log")
|