teuthology 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. scripts/describe.py +1 -0
  2. scripts/dispatcher.py +55 -26
  3. scripts/exporter.py +18 -0
  4. scripts/lock.py +1 -1
  5. scripts/node_cleanup.py +58 -0
  6. scripts/openstack.py +9 -9
  7. scripts/results.py +12 -11
  8. scripts/schedule.py +4 -0
  9. scripts/suite.py +57 -16
  10. scripts/supervisor.py +44 -0
  11. scripts/update_inventory.py +10 -4
  12. teuthology/__init__.py +24 -26
  13. teuthology/beanstalk.py +4 -3
  14. teuthology/config.py +16 -6
  15. teuthology/contextutil.py +18 -14
  16. teuthology/describe_tests.py +25 -18
  17. teuthology/dispatcher/__init__.py +210 -35
  18. teuthology/dispatcher/supervisor.py +140 -58
  19. teuthology/exceptions.py +43 -0
  20. teuthology/exporter.py +347 -0
  21. teuthology/kill.py +76 -81
  22. teuthology/lock/cli.py +3 -3
  23. teuthology/lock/ops.py +135 -61
  24. teuthology/lock/query.py +61 -44
  25. teuthology/ls.py +1 -1
  26. teuthology/misc.py +61 -75
  27. teuthology/nuke/__init__.py +12 -353
  28. teuthology/openstack/__init__.py +4 -3
  29. teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
  30. teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
  31. teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
  32. teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
  33. teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
  34. teuthology/openstack/openstack-teuthology.cron +0 -1
  35. teuthology/orchestra/cluster.py +49 -7
  36. teuthology/orchestra/connection.py +17 -4
  37. teuthology/orchestra/console.py +111 -50
  38. teuthology/orchestra/daemon/cephadmunit.py +15 -2
  39. teuthology/orchestra/daemon/state.py +8 -1
  40. teuthology/orchestra/daemon/systemd.py +4 -4
  41. teuthology/orchestra/opsys.py +30 -11
  42. teuthology/orchestra/remote.py +405 -338
  43. teuthology/orchestra/run.py +3 -3
  44. teuthology/packaging.py +19 -16
  45. teuthology/provision/__init__.py +30 -10
  46. teuthology/provision/cloud/openstack.py +12 -6
  47. teuthology/provision/cloud/util.py +1 -2
  48. teuthology/provision/downburst.py +4 -3
  49. teuthology/provision/fog.py +68 -20
  50. teuthology/provision/openstack.py +5 -4
  51. teuthology/provision/pelagos.py +1 -1
  52. teuthology/repo_utils.py +43 -13
  53. teuthology/report.py +57 -35
  54. teuthology/results.py +5 -3
  55. teuthology/run.py +13 -14
  56. teuthology/run_tasks.py +27 -43
  57. teuthology/schedule.py +4 -3
  58. teuthology/scrape.py +28 -22
  59. teuthology/suite/__init__.py +74 -45
  60. teuthology/suite/build_matrix.py +34 -24
  61. teuthology/suite/fragment-merge.lua +105 -0
  62. teuthology/suite/matrix.py +31 -2
  63. teuthology/suite/merge.py +175 -0
  64. teuthology/suite/placeholder.py +6 -9
  65. teuthology/suite/run.py +175 -100
  66. teuthology/suite/util.py +64 -218
  67. teuthology/task/__init__.py +1 -1
  68. teuthology/task/ansible.py +101 -32
  69. teuthology/task/buildpackages.py +2 -2
  70. teuthology/task/ceph_ansible.py +13 -6
  71. teuthology/task/cephmetrics.py +2 -1
  72. teuthology/task/clock.py +33 -14
  73. teuthology/task/exec.py +18 -0
  74. teuthology/task/hadoop.py +2 -2
  75. teuthology/task/install/__init__.py +29 -7
  76. teuthology/task/install/bin/adjust-ulimits +16 -0
  77. teuthology/task/install/bin/daemon-helper +114 -0
  78. teuthology/task/install/bin/stdin-killer +263 -0
  79. teuthology/task/install/deb.py +1 -1
  80. teuthology/task/install/rpm.py +17 -5
  81. teuthology/task/install/util.py +3 -3
  82. teuthology/task/internal/__init__.py +41 -10
  83. teuthology/task/internal/edit_sudoers.sh +10 -0
  84. teuthology/task/internal/lock_machines.py +2 -9
  85. teuthology/task/internal/redhat.py +31 -1
  86. teuthology/task/internal/syslog.py +31 -8
  87. teuthology/task/kernel.py +152 -145
  88. teuthology/task/lockfile.py +1 -1
  89. teuthology/task/mpi.py +10 -10
  90. teuthology/task/pcp.py +1 -1
  91. teuthology/task/selinux.py +16 -8
  92. teuthology/task/ssh_keys.py +4 -4
  93. teuthology/timer.py +3 -3
  94. teuthology/util/loggerfile.py +19 -0
  95. teuthology/util/scanner.py +159 -0
  96. teuthology/util/sentry.py +52 -0
  97. teuthology/util/time.py +52 -0
  98. teuthology-1.2.1.data/scripts/adjust-ulimits +16 -0
  99. teuthology-1.2.1.data/scripts/daemon-helper +114 -0
  100. teuthology-1.2.1.data/scripts/stdin-killer +263 -0
  101. teuthology-1.2.1.dist-info/METADATA +88 -0
  102. teuthology-1.2.1.dist-info/RECORD +168 -0
  103. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/WHEEL +1 -1
  104. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/entry_points.txt +3 -2
  105. scripts/nuke.py +0 -47
  106. scripts/worker.py +0 -37
  107. teuthology/lock/test/__init__.py +0 -0
  108. teuthology/lock/test/test_lock.py +0 -7
  109. teuthology/nuke/actions.py +0 -456
  110. teuthology/openstack/test/__init__.py +0 -0
  111. teuthology/openstack/test/openstack-integration.py +0 -286
  112. teuthology/openstack/test/test_config.py +0 -35
  113. teuthology/openstack/test/test_openstack.py +0 -1695
  114. teuthology/orchestra/test/__init__.py +0 -0
  115. teuthology/orchestra/test/integration/__init__.py +0 -0
  116. teuthology/orchestra/test/integration/test_integration.py +0 -94
  117. teuthology/orchestra/test/test_cluster.py +0 -240
  118. teuthology/orchestra/test/test_connection.py +0 -106
  119. teuthology/orchestra/test/test_console.py +0 -217
  120. teuthology/orchestra/test/test_opsys.py +0 -404
  121. teuthology/orchestra/test/test_remote.py +0 -185
  122. teuthology/orchestra/test/test_run.py +0 -286
  123. teuthology/orchestra/test/test_systemd.py +0 -54
  124. teuthology/orchestra/test/util.py +0 -12
  125. teuthology/task/tests/__init__.py +0 -110
  126. teuthology/task/tests/test_locking.py +0 -25
  127. teuthology/task/tests/test_run.py +0 -40
  128. teuthology/test/__init__.py +0 -0
  129. teuthology/test/fake_archive.py +0 -107
  130. teuthology/test/fake_fs.py +0 -92
  131. teuthology/test/integration/__init__.py +0 -0
  132. teuthology/test/integration/test_suite.py +0 -86
  133. teuthology/test/task/__init__.py +0 -205
  134. teuthology/test/task/test_ansible.py +0 -624
  135. teuthology/test/task/test_ceph_ansible.py +0 -176
  136. teuthology/test/task/test_console_log.py +0 -88
  137. teuthology/test/task/test_install.py +0 -337
  138. teuthology/test/task/test_internal.py +0 -57
  139. teuthology/test/task/test_kernel.py +0 -243
  140. teuthology/test/task/test_pcp.py +0 -379
  141. teuthology/test/task/test_selinux.py +0 -35
  142. teuthology/test/test_config.py +0 -189
  143. teuthology/test/test_contextutil.py +0 -68
  144. teuthology/test/test_describe_tests.py +0 -316
  145. teuthology/test/test_email_sleep_before_teardown.py +0 -81
  146. teuthology/test/test_exit.py +0 -97
  147. teuthology/test/test_get_distro.py +0 -47
  148. teuthology/test/test_get_distro_version.py +0 -47
  149. teuthology/test/test_get_multi_machine_types.py +0 -27
  150. teuthology/test/test_job_status.py +0 -60
  151. teuthology/test/test_ls.py +0 -48
  152. teuthology/test/test_misc.py +0 -391
  153. teuthology/test/test_nuke.py +0 -290
  154. teuthology/test/test_packaging.py +0 -763
  155. teuthology/test/test_parallel.py +0 -28
  156. teuthology/test/test_repo_utils.py +0 -225
  157. teuthology/test/test_report.py +0 -77
  158. teuthology/test/test_results.py +0 -155
  159. teuthology/test/test_run.py +0 -239
  160. teuthology/test/test_safepath.py +0 -55
  161. teuthology/test/test_schedule.py +0 -45
  162. teuthology/test/test_scrape.py +0 -167
  163. teuthology/test/test_timer.py +0 -80
  164. teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
  165. teuthology/test/test_worker.py +0 -303
  166. teuthology/worker.py +0 -354
  167. teuthology-1.1.0.dist-info/METADATA +0 -76
  168. teuthology-1.1.0.dist-info/RECORD +0 -213
  169. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/LICENSE +0 -0
  170. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/top_level.txt +0 -0
@@ -1,286 +0,0 @@
1
- from io import BytesIO
2
-
3
- import paramiko
4
- import socket
5
-
6
- from mock import MagicMock, patch
7
- from pytest import raises
8
-
9
- from teuthology.orchestra import run
10
- from teuthology.exceptions import (CommandCrashedError, CommandFailedError,
11
- ConnectionLostError)
12
-
13
- def set_buffer_contents(buf, contents):
14
- buf.seek(0)
15
- if isinstance(contents, bytes):
16
- buf.write(contents)
17
- elif isinstance(contents, (list, tuple)):
18
- buf.writelines(contents)
19
- elif isinstance(contents, str):
20
- buf.write(contents.encode())
21
- else:
22
- raise TypeError(
23
- "%s is a %s; should be a byte string, list or tuple" % (
24
- contents, type(contents)
25
- )
26
- )
27
- buf.seek(0)
28
-
29
-
30
- class TestRun(object):
31
- def setup(self):
32
- self.start_patchers()
33
-
34
- def teardown(self):
35
- self.stop_patchers()
36
-
37
- def start_patchers(self):
38
- self.m_remote_process = MagicMock(wraps=run.RemoteProcess)
39
- self.patcher_remote_proc = patch(
40
- 'teuthology.orchestra.run.RemoteProcess',
41
- self.m_remote_process,
42
- )
43
- self.m_channel = MagicMock(spec=paramiko.Channel)()
44
- """
45
- self.m_channelfile = MagicMock(wraps=paramiko.ChannelFile)
46
- self.m_stdin_buf = self.m_channelfile(self.m_channel())
47
- self.m_stdout_buf = self.m_channelfile(self.m_channel())
48
- self.m_stderr_buf = self.m_channelfile(self.m_channel())
49
- """
50
- class M_ChannelFile(BytesIO):
51
- channel = MagicMock(spec=paramiko.Channel)()
52
-
53
- self.m_channelfile = M_ChannelFile
54
- self.m_stdin_buf = self.m_channelfile()
55
- self.m_stdout_buf = self.m_channelfile()
56
- self.m_stderr_buf = self.m_channelfile()
57
- self.m_ssh = MagicMock()
58
- self.m_ssh.exec_command.return_value = (
59
- self.m_stdin_buf,
60
- self.m_stdout_buf,
61
- self.m_stderr_buf,
62
- )
63
- self.m_transport = MagicMock()
64
- self.m_transport.getpeername.return_value = ('name', 22)
65
- self.m_ssh.get_transport.return_value = self.m_transport
66
- self.patcher_ssh = patch(
67
- 'teuthology.orchestra.connection.paramiko.SSHClient',
68
- self.m_ssh,
69
- )
70
- self.patcher_ssh.start()
71
- # Tests must start this if they wish to use it
72
- # self.patcher_remote_proc.start()
73
-
74
- def stop_patchers(self):
75
- # If this patcher wasn't started, it's ok
76
- try:
77
- self.patcher_remote_proc.stop()
78
- except RuntimeError:
79
- pass
80
- self.patcher_ssh.stop()
81
-
82
- def test_exitstatus(self):
83
- self.m_stdout_buf.channel.recv_exit_status.return_value = 0
84
- proc = run.run(
85
- client=self.m_ssh,
86
- args=['foo', 'bar baz'],
87
- )
88
- assert proc.exitstatus == 0
89
-
90
- def test_run_cwd(self):
91
- self.m_stdout_buf.channel.recv_exit_status.return_value = 0
92
- run.run(
93
- client=self.m_ssh,
94
- args=['foo_bar_baz'],
95
- cwd='/cwd/test',
96
- )
97
- self.m_ssh.exec_command.assert_called_with('(cd /cwd/test && exec foo_bar_baz)')
98
-
99
- def test_capture_stdout(self):
100
- output = 'foo\nbar'
101
- set_buffer_contents(self.m_stdout_buf, output)
102
- self.m_stdout_buf.channel.recv_exit_status.return_value = 0
103
- stdout = BytesIO()
104
- proc = run.run(
105
- client=self.m_ssh,
106
- args=['foo', 'bar baz'],
107
- stdout=stdout,
108
- )
109
- assert proc.stdout is stdout
110
- assert proc.stdout.read().decode() == output
111
- assert proc.stdout.getvalue().decode() == output
112
-
113
- def test_capture_stderr_newline(self):
114
- output = 'foo\nbar\n'
115
- set_buffer_contents(self.m_stderr_buf, output)
116
- self.m_stderr_buf.channel.recv_exit_status.return_value = 0
117
- stderr = BytesIO()
118
- proc = run.run(
119
- client=self.m_ssh,
120
- args=['foo', 'bar baz'],
121
- stderr=stderr,
122
- )
123
- assert proc.stderr is stderr
124
- assert proc.stderr.read().decode() == output
125
- assert proc.stderr.getvalue().decode() == output
126
-
127
- def test_status_bad(self):
128
- self.m_stdout_buf.channel.recv_exit_status.return_value = 42
129
- with raises(CommandFailedError) as exc:
130
- run.run(
131
- client=self.m_ssh,
132
- args=['foo'],
133
- )
134
- assert str(exc.value) == "Command failed on name with status 42: 'foo'"
135
-
136
- def test_status_bad_nocheck(self):
137
- self.m_stdout_buf.channel.recv_exit_status.return_value = 42
138
- proc = run.run(
139
- client=self.m_ssh,
140
- args=['foo'],
141
- check_status=False,
142
- )
143
- assert proc.exitstatus == 42
144
-
145
- def test_status_crash(self):
146
- self.m_stdout_buf.channel.recv_exit_status.return_value = -1
147
- with raises(CommandCrashedError) as exc:
148
- run.run(
149
- client=self.m_ssh,
150
- args=['foo'],
151
- )
152
- assert str(exc.value) == "Command crashed: 'foo'"
153
-
154
- def test_status_crash_nocheck(self):
155
- self.m_stdout_buf.channel.recv_exit_status.return_value = -1
156
- proc = run.run(
157
- client=self.m_ssh,
158
- args=['foo'],
159
- check_status=False,
160
- )
161
- assert proc.exitstatus == -1
162
-
163
- def test_status_lost(self):
164
- m_transport = MagicMock()
165
- m_transport.getpeername.return_value = ('name', 22)
166
- m_transport.is_active.return_value = False
167
- self.m_stdout_buf.channel.recv_exit_status.return_value = -1
168
- self.m_ssh.get_transport.return_value = m_transport
169
- with raises(ConnectionLostError) as exc:
170
- run.run(
171
- client=self.m_ssh,
172
- args=['foo'],
173
- )
174
- assert str(exc.value) == "SSH connection to name was lost: 'foo'"
175
-
176
- def test_status_lost_socket(self):
177
- m_transport = MagicMock()
178
- m_transport.getpeername.side_effect = socket.error
179
- self.m_ssh.get_transport.return_value = m_transport
180
- with raises(ConnectionLostError) as exc:
181
- run.run(
182
- client=self.m_ssh,
183
- args=['foo'],
184
- )
185
- assert str(exc.value) == "SSH connection was lost: 'foo'"
186
-
187
- def test_status_lost_nocheck(self):
188
- m_transport = MagicMock()
189
- m_transport.getpeername.return_value = ('name', 22)
190
- m_transport.is_active.return_value = False
191
- self.m_stdout_buf.channel.recv_exit_status.return_value = -1
192
- self.m_ssh.get_transport.return_value = m_transport
193
- proc = run.run(
194
- client=self.m_ssh,
195
- args=['foo'],
196
- check_status=False,
197
- )
198
- assert proc.exitstatus == -1
199
-
200
- def test_status_bad_nowait(self):
201
- self.m_stdout_buf.channel.recv_exit_status.return_value = 42
202
- proc = run.run(
203
- client=self.m_ssh,
204
- args=['foo'],
205
- wait=False,
206
- )
207
- with raises(CommandFailedError) as exc:
208
- proc.wait()
209
- assert proc.returncode == 42
210
- assert str(exc.value) == "Command failed on name with status 42: 'foo'"
211
-
212
- def test_stdin_pipe(self):
213
- self.m_stdout_buf.channel.recv_exit_status.return_value = 0
214
- proc = run.run(
215
- client=self.m_ssh,
216
- args=['foo'],
217
- stdin=run.PIPE,
218
- wait=False
219
- )
220
- assert proc.poll() == 0
221
- code = proc.wait()
222
- assert code == 0
223
- assert proc.exitstatus == 0
224
-
225
- def test_stdout_pipe(self):
226
- self.m_stdout_buf.channel.recv_exit_status.return_value = 0
227
- lines = [b'one\n', b'two', b'']
228
- set_buffer_contents(self.m_stdout_buf, lines)
229
- proc = run.run(
230
- client=self.m_ssh,
231
- args=['foo'],
232
- stdout=run.PIPE,
233
- wait=False
234
- )
235
- assert proc.poll() == 0
236
- assert proc.stdout.readline() == lines[0]
237
- assert proc.stdout.readline() == lines[1]
238
- assert proc.stdout.readline() == lines[2]
239
- code = proc.wait()
240
- assert code == 0
241
- assert proc.exitstatus == 0
242
-
243
- def test_stderr_pipe(self):
244
- self.m_stdout_buf.channel.recv_exit_status.return_value = 0
245
- lines = [b'one\n', b'two', b'']
246
- set_buffer_contents(self.m_stderr_buf, lines)
247
- proc = run.run(
248
- client=self.m_ssh,
249
- args=['foo'],
250
- stderr=run.PIPE,
251
- wait=False
252
- )
253
- assert proc.poll() == 0
254
- assert proc.stderr.readline() == lines[0]
255
- assert proc.stderr.readline() == lines[1]
256
- assert proc.stderr.readline() == lines[2]
257
- code = proc.wait()
258
- assert code == 0
259
- assert proc.exitstatus == 0
260
-
261
- def test_copy_and_close(self):
262
- run.copy_and_close(None, MagicMock())
263
- run.copy_and_close('', MagicMock())
264
- run.copy_and_close(b'', MagicMock())
265
-
266
-
267
- class TestQuote(object):
268
- def test_quote_simple(self):
269
- got = run.quote(['a b', ' c', 'd e '])
270
- assert got == "'a b' ' c' 'd e '"
271
-
272
- def test_quote_and_quote(self):
273
- got = run.quote(['echo', 'this && is embedded', '&&',
274
- 'that was standalone'])
275
- assert got == "echo 'this && is embedded' '&&' 'that was standalone'"
276
-
277
- def test_quote_and_raw(self):
278
- got = run.quote(['true', run.Raw('&&'), 'echo', 'yay'])
279
- assert got == "true && echo yay"
280
-
281
-
282
- class TestRaw(object):
283
- def test_eq(self):
284
- str_ = "I am a raw something or other"
285
- raw = run.Raw(str_)
286
- assert raw == run.Raw(str_)
@@ -1,54 +0,0 @@
1
- import argparse
2
- import os
3
-
4
- from logging import debug
5
- from teuthology import misc
6
- from teuthology.orchestra import cluster
7
- from teuthology.orchestra.run import quote
8
- from teuthology.orchestra.daemon.group import DaemonGroup
9
- import subprocess
10
-
11
-
12
- class FakeRemote(object):
13
- pass
14
-
15
-
16
- def test_pid():
17
- ctx = argparse.Namespace()
18
- ctx.daemons = DaemonGroup(use_systemd=True)
19
- remote = FakeRemote()
20
-
21
- ps_ef_output_path = os.path.join(
22
- os.path.dirname(__file__),
23
- "files/daemon-systemdstate-pid-ps-ef.output"
24
- )
25
-
26
- # patching ps -ef command output using a file
27
- def sh(args):
28
- args[0:2] = ["cat", ps_ef_output_path]
29
- debug(args)
30
- return subprocess.getoutput(quote(args))
31
-
32
- remote.sh = sh
33
- remote.init_system = 'systemd'
34
- remote.shortname = 'host1'
35
-
36
- ctx.cluster = cluster.Cluster(
37
- remotes=[
38
- (remote, ['rgw.0', 'mon.a', 'mgr.a', 'mds.a', 'osd.0'])
39
- ],
40
- )
41
-
42
- for remote, roles in ctx.cluster.remotes.items():
43
- for role in roles:
44
- _, rol, id_ = misc.split_role(role)
45
- if any(rol.startswith(x) for x in ['mon', 'mgr', 'mds']):
46
- ctx.daemons.register_daemon(remote, rol, remote.shortname)
47
- else:
48
- ctx.daemons.register_daemon(remote, rol, id_)
49
-
50
- for _, daemons in ctx.daemons.daemons.items():
51
- for daemon in daemons.values():
52
- pid = daemon.pid
53
- debug(pid)
54
- assert pid
@@ -1,12 +0,0 @@
1
- def assert_raises(excClass, callableObj, *args, **kwargs):
2
- """
3
- Like unittest.TestCase.assertRaises, but returns the exception.
4
- """
5
- try:
6
- callableObj(*args, **kwargs)
7
- except excClass as e:
8
- return e
9
- else:
10
- if hasattr(excClass,'__name__'): excName = excClass.__name__
11
- else: excName = str(excClass)
12
- raise AssertionError("%s not raised" % excName)
@@ -1,110 +0,0 @@
1
- """
2
- This task is used to integration test teuthology. Including this
3
- task in your yaml config will execute pytest which finds any tests in
4
- the current directory. Each test that is discovered will be passed the
5
- teuthology ctx and config args that each teuthology task usually gets.
6
- This allows the tests to operate against the cluster.
7
-
8
- An example::
9
-
10
- tasks
11
- - tests:
12
-
13
- """
14
- import logging
15
- import pytest
16
-
17
-
18
- log = logging.getLogger(__name__)
19
-
20
-
21
- @pytest.fixture
22
- def ctx():
23
- return {}
24
-
25
-
26
- @pytest.fixture
27
- def config():
28
- return []
29
-
30
-
31
- class TeuthologyContextPlugin(object):
32
- def __init__(self, ctx, config):
33
- self.ctx = ctx
34
- self.config = config
35
- self.failures = list()
36
-
37
- # this is pytest hook for generating tests with custom parameters
38
- def pytest_generate_tests(self, metafunc):
39
- # pass the teuthology ctx and config to each test method
40
- metafunc.parametrize(["ctx", "config"], [(self.ctx, self.config),])
41
-
42
- @pytest.mark.trylast
43
- def pytest_configure(self, config):
44
- # removes the default pytest TerminalReporter
45
- # this fixes failures with scheduled jobs; when run by a worker
46
- # there is no terminal to report to and pytest dies
47
- standard_reporter = config.pluginmanager.getplugin('terminalreporter')
48
- config.pluginmanager.unregister(standard_reporter)
49
- log.info("removing pytest terminal reporter")
50
-
51
- # log the outcome of each test
52
- def pytest_runtest_makereport(self, __multicall__, item, call):
53
- report = __multicall__.execute()
54
-
55
- # after the test has been called, get it's report and log it
56
- if call.when == 'call':
57
- # item.location[0] is a slash delimeted path to the test file
58
- # being ran. We only want the portion after teuthology.task.tests
59
- test_path = item.location[0].replace("/", ".").split(".")
60
- test_path = ".".join(test_path[4:-1])
61
- # removes the string '[ctx0, config0]' after the test name
62
- test_name = item.location[2].split("[")[0]
63
- name = "{path}:{name}".format(path=test_path, name=test_name)
64
- if report.passed:
65
- log.info("{name} Passed".format(name=name))
66
- elif report.skipped:
67
- log.info("{name} {info}".format(
68
- name=name,
69
- info=call.excinfo.exconly()
70
- ))
71
- else:
72
- # TODO: figure out a way to log the traceback
73
- log.error("{name} Failed:\n {info}".format(
74
- name=name,
75
- info=call.excinfo.exconly()
76
- ))
77
- failure = "{name}: {err}".format(
78
- name=name,
79
- err=call.excinfo.exconly().replace("\n", "")
80
- )
81
- self.failures.append(failure)
82
- self.ctx.summary['failure_reason'] = self.failures
83
-
84
- return report
85
-
86
-
87
- def task(ctx, config):
88
- """
89
- Use pytest to recurse through this directory, finding any tests
90
- and then executing them with the teuthology ctx and config args.
91
- Your tests must follow standard pytest conventions to be discovered.
92
- """
93
- try:
94
- status = pytest.main(
95
- args=[
96
- '-q',
97
- '--pyargs', __name__
98
- ],
99
- plugins=[TeuthologyContextPlugin(ctx, config)]
100
- )
101
- except Exception:
102
- log.exception("Saw failure running pytest")
103
- ctx.summary["status"] = "dead"
104
- else:
105
- if status == 0:
106
- log.info("OK. All tests passed!")
107
- ctx.summary["status"] = "pass"
108
- else:
109
- log.error("FAIL. Saw test failures...")
110
- ctx.summary["status"] = "fail"
@@ -1,25 +0,0 @@
1
- import pytest
2
-
3
-
4
- class TestLocking(object):
5
-
6
- def test_correct_os_type(self, ctx, config):
7
- os_type = ctx.config.get("os_type")
8
- if os_type is None:
9
- pytest.skip('os_type was not defined')
10
- for remote in ctx.cluster.remotes.keys():
11
- assert remote.os.name == os_type
12
-
13
- def test_correct_os_version(self, ctx, config):
14
- os_version = ctx.config.get("os_version")
15
- if os_version is None:
16
- pytest.skip('os_version was not defined')
17
- if ctx.config.get("os_type") == "debian":
18
- pytest.skip('known issue with debian versions; see: issue #10878')
19
- for remote in ctx.cluster.remotes.keys():
20
- assert remote.inventory_info['os_version'] == os_version
21
-
22
- def test_correct_machine_type(self, ctx, config):
23
- machine_type = ctx.machine_type
24
- for remote in ctx.cluster.remotes.keys():
25
- assert remote.machine_type in machine_type
@@ -1,40 +0,0 @@
1
- import logging
2
- import pytest
3
-
4
- from StringIO import StringIO
5
-
6
- from teuthology.exceptions import CommandFailedError
7
-
8
- log = logging.getLogger(__name__)
9
-
10
-
11
- class TestRun(object):
12
- """
13
- Tests to see if we can make remote procedure calls to the current cluster
14
- """
15
-
16
- def test_command_failed_label(self, ctx, config):
17
- result = ""
18
- try:
19
- ctx.cluster.run(
20
- args=["python", "-c", "assert False"],
21
- label="working as expected, nothing to see here"
22
- )
23
- except CommandFailedError as e:
24
- result = str(e)
25
-
26
- assert "working as expected" in result
27
-
28
- def test_command_failed_no_label(self, ctx, config):
29
- with pytest.raises(CommandFailedError):
30
- ctx.cluster.run(
31
- args=["python", "-c", "assert False"],
32
- )
33
-
34
- def test_command_success(self, ctx, config):
35
- result = StringIO()
36
- ctx.cluster.run(
37
- args=["python", "-c", "print('hi')"],
38
- stdout=result
39
- )
40
- assert result.getvalue().strip() == "hi"
File without changes
@@ -1,107 +0,0 @@
1
- import os
2
- import shutil
3
- import yaml
4
- import random
5
-
6
-
7
- class FakeArchive(object):
8
- def __init__(self, archive_base="./test_archive"):
9
- self.archive_base = archive_base
10
-
11
- def get_random_metadata(self, run_name, job_id=None, hung=False):
12
- """
13
- Generate a random info dict for a fake job. If 'hung' is not True, also
14
- generate a summary dict.
15
-
16
- :param run_name: Run name e.g. 'test_foo'
17
- :param job_id: Job ID e.g. '12345'
18
- :param hung: Simulate a hung job e.g. don't return a summary.yaml
19
- :return: A dict with keys 'job_id', 'info' and possibly
20
- 'summary', with corresponding values
21
- """
22
- rand = random.Random()
23
-
24
- description = 'description for job with id %s' % job_id
25
- owner = 'job@owner'
26
- duration = rand.randint(1, 36000)
27
- pid = rand.randint(1000, 99999)
28
- job_id = rand.randint(1, 99999)
29
-
30
- info = {
31
- 'description': description,
32
- 'job_id': job_id,
33
- 'run_name': run_name,
34
- 'owner': owner,
35
- 'pid': pid,
36
- }
37
-
38
- metadata = {
39
- 'info': info,
40
- 'job_id': job_id,
41
- }
42
-
43
- if not hung:
44
- success = True if rand.randint(0, 1) != 1 else False
45
-
46
- summary = {
47
- 'description': description,
48
- 'duration': duration,
49
- 'owner': owner,
50
- 'success': success,
51
- }
52
-
53
- if not success:
54
- summary['failure_reason'] = 'Failure reason!'
55
- metadata['summary'] = summary
56
-
57
- return metadata
58
-
59
- def setup(self):
60
- if os.path.exists(self.archive_base):
61
- shutil.rmtree(self.archive_base)
62
- os.mkdir(self.archive_base)
63
-
64
- def teardown(self):
65
- shutil.rmtree(self.archive_base)
66
-
67
- def populate_archive(self, run_name, jobs):
68
- run_archive_dir = os.path.join(self.archive_base, run_name)
69
- os.mkdir(run_archive_dir)
70
- for job in jobs:
71
- archive_dir = os.path.join(run_archive_dir, str(job['job_id']))
72
- os.mkdir(archive_dir)
73
-
74
- with open(os.path.join(archive_dir, 'info.yaml'), 'w') as yfile:
75
- yaml.safe_dump(job['info'], yfile)
76
-
77
- if 'summary' in job:
78
- summary_path = os.path.join(archive_dir, 'summary.yaml')
79
- with open(summary_path, 'w') as yfile:
80
- yaml.safe_dump(job['summary'], yfile)
81
-
82
- def create_fake_run(self, run_name, job_count, yaml_path, num_hung=0):
83
- """
84
- Creates a fake run using run_name. Uses the YAML specified for each
85
- job's config.yaml
86
-
87
- Returns a list of job_ids
88
- """
89
- assert os.path.exists(yaml_path)
90
- assert job_count > 0
91
- jobs = []
92
- made_hung = 0
93
- for i in range(job_count):
94
- if made_hung < num_hung:
95
- jobs.append(self.get_random_metadata(run_name, hung=True))
96
- made_hung += 1
97
- else:
98
- jobs.append(self.get_random_metadata(run_name, hung=False))
99
- #job_config = yaml.safe_load(yaml_path)
100
- self.populate_archive(run_name, jobs)
101
- for job in jobs:
102
- job_id = job['job_id']
103
- job_yaml_path = os.path.join(self.archive_base, run_name,
104
- str(job_id), 'config.yaml')
105
- shutil.copyfile(yaml_path, job_yaml_path)
106
- return jobs
107
-