teuthology 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. scripts/describe.py +1 -0
  2. scripts/dispatcher.py +55 -26
  3. scripts/exporter.py +18 -0
  4. scripts/lock.py +1 -1
  5. scripts/node_cleanup.py +58 -0
  6. scripts/openstack.py +9 -9
  7. scripts/results.py +12 -11
  8. scripts/schedule.py +4 -0
  9. scripts/suite.py +57 -16
  10. scripts/supervisor.py +44 -0
  11. scripts/update_inventory.py +10 -4
  12. teuthology/__init__.py +24 -26
  13. teuthology/beanstalk.py +4 -3
  14. teuthology/config.py +16 -6
  15. teuthology/contextutil.py +18 -14
  16. teuthology/describe_tests.py +25 -18
  17. teuthology/dispatcher/__init__.py +210 -35
  18. teuthology/dispatcher/supervisor.py +140 -58
  19. teuthology/exceptions.py +43 -0
  20. teuthology/exporter.py +347 -0
  21. teuthology/kill.py +76 -81
  22. teuthology/lock/cli.py +3 -3
  23. teuthology/lock/ops.py +135 -61
  24. teuthology/lock/query.py +61 -44
  25. teuthology/ls.py +1 -1
  26. teuthology/misc.py +61 -75
  27. teuthology/nuke/__init__.py +12 -353
  28. teuthology/openstack/__init__.py +4 -3
  29. teuthology/openstack/openstack-centos-7.0-user-data.txt +1 -1
  30. teuthology/openstack/openstack-centos-7.1-user-data.txt +1 -1
  31. teuthology/openstack/openstack-centos-7.2-user-data.txt +1 -1
  32. teuthology/openstack/openstack-debian-8.0-user-data.txt +1 -1
  33. teuthology/openstack/openstack-opensuse-42.1-user-data.txt +1 -1
  34. teuthology/openstack/openstack-teuthology.cron +0 -1
  35. teuthology/orchestra/cluster.py +49 -7
  36. teuthology/orchestra/connection.py +17 -4
  37. teuthology/orchestra/console.py +111 -50
  38. teuthology/orchestra/daemon/cephadmunit.py +15 -2
  39. teuthology/orchestra/daemon/state.py +8 -1
  40. teuthology/orchestra/daemon/systemd.py +4 -4
  41. teuthology/orchestra/opsys.py +30 -11
  42. teuthology/orchestra/remote.py +405 -338
  43. teuthology/orchestra/run.py +3 -3
  44. teuthology/packaging.py +19 -16
  45. teuthology/provision/__init__.py +30 -10
  46. teuthology/provision/cloud/openstack.py +12 -6
  47. teuthology/provision/cloud/util.py +1 -2
  48. teuthology/provision/downburst.py +4 -3
  49. teuthology/provision/fog.py +68 -20
  50. teuthology/provision/openstack.py +5 -4
  51. teuthology/provision/pelagos.py +1 -1
  52. teuthology/repo_utils.py +43 -13
  53. teuthology/report.py +57 -35
  54. teuthology/results.py +5 -3
  55. teuthology/run.py +13 -14
  56. teuthology/run_tasks.py +27 -43
  57. teuthology/schedule.py +4 -3
  58. teuthology/scrape.py +28 -22
  59. teuthology/suite/__init__.py +74 -45
  60. teuthology/suite/build_matrix.py +34 -24
  61. teuthology/suite/fragment-merge.lua +105 -0
  62. teuthology/suite/matrix.py +31 -2
  63. teuthology/suite/merge.py +175 -0
  64. teuthology/suite/placeholder.py +6 -9
  65. teuthology/suite/run.py +175 -100
  66. teuthology/suite/util.py +64 -218
  67. teuthology/task/__init__.py +1 -1
  68. teuthology/task/ansible.py +101 -32
  69. teuthology/task/buildpackages.py +2 -2
  70. teuthology/task/ceph_ansible.py +13 -6
  71. teuthology/task/cephmetrics.py +2 -1
  72. teuthology/task/clock.py +33 -14
  73. teuthology/task/exec.py +18 -0
  74. teuthology/task/hadoop.py +2 -2
  75. teuthology/task/install/__init__.py +29 -7
  76. teuthology/task/install/bin/adjust-ulimits +16 -0
  77. teuthology/task/install/bin/daemon-helper +114 -0
  78. teuthology/task/install/bin/stdin-killer +263 -0
  79. teuthology/task/install/deb.py +1 -1
  80. teuthology/task/install/rpm.py +17 -5
  81. teuthology/task/install/util.py +3 -3
  82. teuthology/task/internal/__init__.py +41 -10
  83. teuthology/task/internal/edit_sudoers.sh +10 -0
  84. teuthology/task/internal/lock_machines.py +2 -9
  85. teuthology/task/internal/redhat.py +31 -1
  86. teuthology/task/internal/syslog.py +31 -8
  87. teuthology/task/kernel.py +152 -145
  88. teuthology/task/lockfile.py +1 -1
  89. teuthology/task/mpi.py +10 -10
  90. teuthology/task/pcp.py +1 -1
  91. teuthology/task/selinux.py +16 -8
  92. teuthology/task/ssh_keys.py +4 -4
  93. teuthology/timer.py +3 -3
  94. teuthology/util/loggerfile.py +19 -0
  95. teuthology/util/scanner.py +159 -0
  96. teuthology/util/sentry.py +52 -0
  97. teuthology/util/time.py +52 -0
  98. teuthology-1.2.1.data/scripts/adjust-ulimits +16 -0
  99. teuthology-1.2.1.data/scripts/daemon-helper +114 -0
  100. teuthology-1.2.1.data/scripts/stdin-killer +263 -0
  101. teuthology-1.2.1.dist-info/METADATA +88 -0
  102. teuthology-1.2.1.dist-info/RECORD +168 -0
  103. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/WHEEL +1 -1
  104. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/entry_points.txt +3 -2
  105. scripts/nuke.py +0 -47
  106. scripts/worker.py +0 -37
  107. teuthology/lock/test/__init__.py +0 -0
  108. teuthology/lock/test/test_lock.py +0 -7
  109. teuthology/nuke/actions.py +0 -456
  110. teuthology/openstack/test/__init__.py +0 -0
  111. teuthology/openstack/test/openstack-integration.py +0 -286
  112. teuthology/openstack/test/test_config.py +0 -35
  113. teuthology/openstack/test/test_openstack.py +0 -1695
  114. teuthology/orchestra/test/__init__.py +0 -0
  115. teuthology/orchestra/test/integration/__init__.py +0 -0
  116. teuthology/orchestra/test/integration/test_integration.py +0 -94
  117. teuthology/orchestra/test/test_cluster.py +0 -240
  118. teuthology/orchestra/test/test_connection.py +0 -106
  119. teuthology/orchestra/test/test_console.py +0 -217
  120. teuthology/orchestra/test/test_opsys.py +0 -404
  121. teuthology/orchestra/test/test_remote.py +0 -185
  122. teuthology/orchestra/test/test_run.py +0 -286
  123. teuthology/orchestra/test/test_systemd.py +0 -54
  124. teuthology/orchestra/test/util.py +0 -12
  125. teuthology/task/tests/__init__.py +0 -110
  126. teuthology/task/tests/test_locking.py +0 -25
  127. teuthology/task/tests/test_run.py +0 -40
  128. teuthology/test/__init__.py +0 -0
  129. teuthology/test/fake_archive.py +0 -107
  130. teuthology/test/fake_fs.py +0 -92
  131. teuthology/test/integration/__init__.py +0 -0
  132. teuthology/test/integration/test_suite.py +0 -86
  133. teuthology/test/task/__init__.py +0 -205
  134. teuthology/test/task/test_ansible.py +0 -624
  135. teuthology/test/task/test_ceph_ansible.py +0 -176
  136. teuthology/test/task/test_console_log.py +0 -88
  137. teuthology/test/task/test_install.py +0 -337
  138. teuthology/test/task/test_internal.py +0 -57
  139. teuthology/test/task/test_kernel.py +0 -243
  140. teuthology/test/task/test_pcp.py +0 -379
  141. teuthology/test/task/test_selinux.py +0 -35
  142. teuthology/test/test_config.py +0 -189
  143. teuthology/test/test_contextutil.py +0 -68
  144. teuthology/test/test_describe_tests.py +0 -316
  145. teuthology/test/test_email_sleep_before_teardown.py +0 -81
  146. teuthology/test/test_exit.py +0 -97
  147. teuthology/test/test_get_distro.py +0 -47
  148. teuthology/test/test_get_distro_version.py +0 -47
  149. teuthology/test/test_get_multi_machine_types.py +0 -27
  150. teuthology/test/test_job_status.py +0 -60
  151. teuthology/test/test_ls.py +0 -48
  152. teuthology/test/test_misc.py +0 -391
  153. teuthology/test/test_nuke.py +0 -290
  154. teuthology/test/test_packaging.py +0 -763
  155. teuthology/test/test_parallel.py +0 -28
  156. teuthology/test/test_repo_utils.py +0 -225
  157. teuthology/test/test_report.py +0 -77
  158. teuthology/test/test_results.py +0 -155
  159. teuthology/test/test_run.py +0 -239
  160. teuthology/test/test_safepath.py +0 -55
  161. teuthology/test/test_schedule.py +0 -45
  162. teuthology/test/test_scrape.py +0 -167
  163. teuthology/test/test_timer.py +0 -80
  164. teuthology/test/test_vps_os_vers_parameter_checking.py +0 -84
  165. teuthology/test/test_worker.py +0 -303
  166. teuthology/worker.py +0 -354
  167. teuthology-1.1.0.dist-info/METADATA +0 -76
  168. teuthology-1.1.0.dist-info/RECORD +0 -213
  169. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/LICENSE +0 -0
  170. {teuthology-1.1.0.dist-info → teuthology-1.2.1.dist-info}/top_level.txt +0 -0
@@ -1,65 +1,79 @@
1
+ import datetime
1
2
  import logging
2
3
  import os
3
4
  import subprocess
4
5
  import time
5
6
  import yaml
7
+ import requests
6
8
 
7
- from datetime import datetime
9
+ from urllib.parse import urljoin
8
10
 
9
- import teuthology
10
- from teuthology import report
11
- from teuthology import safepath
11
+ from teuthology import exporter, dispatcher, kill, report, safepath
12
12
  from teuthology.config import config as teuth_config
13
- from teuthology.exceptions import SkipJob
13
+ from teuthology.exceptions import SkipJob, MaxWhileTries
14
14
  from teuthology import setup_log_file, install_except_hook
15
- from teuthology.lock.ops import reimage_machines
16
15
  from teuthology.misc import get_user, archive_logs, compress_logs
17
16
  from teuthology.config import FakeNamespace
18
- from teuthology.job_status import get_status
19
- from teuthology.nuke import nuke
20
- from teuthology.kill import kill_job
21
- from teuthology.task.internal import add_remotes
17
+ from teuthology.lock import ops as lock_ops
18
+ from teuthology.task import internal
22
19
  from teuthology.misc import decanonicalize_hostname as shortname
23
20
  from teuthology.lock import query
21
+ from teuthology.util import sentry
24
22
 
25
23
  log = logging.getLogger(__name__)
26
24
 
27
25
 
28
26
  def main(args):
29
-
30
- verbose = args["--verbose"]
31
- archive_dir = args["--archive-dir"]
32
- teuth_bin_path = args["--bin-path"]
33
- config_file_path = args["--job-config"]
34
-
35
- with open(config_file_path, 'r') as config_file:
27
+ with open(args.job_config, 'r') as config_file:
36
28
  job_config = yaml.safe_load(config_file)
37
29
 
38
30
  loglevel = logging.INFO
39
- if verbose:
31
+ if args.verbose:
40
32
  loglevel = logging.DEBUG
33
+ logging.getLogger().setLevel(loglevel)
41
34
  log.setLevel(loglevel)
42
35
 
43
36
  log_file_path = os.path.join(job_config['archive_path'],
44
37
  f"supervisor.{job_config['job_id']}.log")
45
38
  setup_log_file(log_file_path)
46
39
  install_except_hook()
40
+ try:
41
+ dispatcher.check_job_expiration(job_config)
42
+ except SkipJob:
43
+ return 0
47
44
 
48
45
  # reimage target machines before running the job
49
46
  if 'targets' in job_config:
50
- reimage(job_config)
51
- with open(config_file_path, 'w') as f:
47
+ node_count = len(job_config["targets"])
48
+ # If a job (e.g. from the nop suite) doesn't need nodes, avoid
49
+ # submitting a zero here.
50
+ if node_count:
51
+ with exporter.NodeReimagingTime().time(
52
+ machine_type=job_config["machine_type"],
53
+ node_count=node_count,
54
+ ):
55
+ reimage(job_config)
56
+ else:
57
+ reimage(job_config)
58
+ with open(args.job_config, 'w') as f:
52
59
  yaml.safe_dump(job_config, f, default_flow_style=False)
53
60
 
54
- try:
55
- run_job(
61
+ suite = job_config.get("suite")
62
+ if suite:
63
+ with exporter.JobTime().time(suite=suite):
64
+ return run_job(
65
+ job_config,
66
+ args.bin_path,
67
+ args.archive_dir,
68
+ args.verbose
69
+ )
70
+ else:
71
+ return run_job(
56
72
  job_config,
57
- teuth_bin_path,
58
- archive_dir,
59
- verbose
73
+ args.bin_path,
74
+ args.archive_dir,
75
+ args.verbose
60
76
  )
61
- except SkipJob:
62
- return
63
77
 
64
78
 
65
79
  def run_job(job_config, teuth_bin_path, archive_dir, verbose):
@@ -77,6 +91,8 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
77
91
  args.extend(['--seed', job_config['seed']])
78
92
  if job_config.get('subset'):
79
93
  args.extend(['--subset', job_config['subset']])
94
+ if job_config.get('no_nested_subset'):
95
+ args.extend(['--no-nested-subset'])
80
96
  else:
81
97
  log.info('Generating results for %s', job_config['name'])
82
98
  timeout = job_config.get('results_timeout',
@@ -107,7 +123,7 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
107
123
  if 'config' in job_config:
108
124
  inner_config = job_config.pop('config')
109
125
  if not isinstance(inner_config, dict):
110
- log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
126
+ log.warning("run_job: job_config['config'] isn't a dict, it's a %s",
111
127
  str(type(inner_config)))
112
128
  else:
113
129
  job_config.update(inner_config)
@@ -126,7 +142,11 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
126
142
  arg.extend(['--', job_archive])
127
143
 
128
144
  log.debug("Running: %s" % ' '.join(arg))
129
- p = subprocess.Popen(args=arg)
145
+ p = subprocess.Popen(
146
+ args=arg,
147
+ stdout=subprocess.DEVNULL,
148
+ stderr=subprocess.DEVNULL,
149
+ )
130
150
  log.info("Job archive: %s", job_config['archive_path'])
131
151
  log.info("Job PID: %s", str(p.pid))
132
152
 
@@ -150,6 +170,49 @@ def run_job(job_config, teuth_bin_path, archive_dir, verbose):
150
170
  log.info('Success!')
151
171
  if 'targets' in job_config:
152
172
  unlock_targets(job_config)
173
+ return p.returncode
174
+
175
+ def failure_is_reimage(failure_reason):
176
+ if not failure_reason:
177
+ return False
178
+ reimage_failure = "Error reimaging machines:"
179
+ if reimage_failure in failure_reason:
180
+ return True
181
+ else:
182
+ return False
183
+
184
+
185
+ def check_for_reimage_failures_and_mark_down(targets, count=10):
186
+ # Grab paddles history of jobs in the machine
187
+ # and count the number of reimaging errors
188
+ # if it fails N times then mark the machine down
189
+ base_url = teuth_config.results_server
190
+ for k, _ in targets.items():
191
+ machine = k.split('@')[-1]
192
+ url = urljoin(
193
+ base_url,
194
+ '/nodes/{0}/jobs/?count={1}'.format(machine, count)
195
+ )
196
+ resp = requests.get(url)
197
+ jobs = resp.json()
198
+ if len(jobs) < count:
199
+ continue
200
+ reimage_failures = list(filter(
201
+ lambda j: failure_is_reimage(j['failure_reason']),
202
+ jobs
203
+ ))
204
+ if len(reimage_failures) < count:
205
+ continue
206
+ # Mark machine down
207
+ machine_name = shortname(k)
208
+ lock_ops.update_lock(
209
+ machine_name,
210
+ description='reimage failed {0} times'.format(count),
211
+ status='down',
212
+ )
213
+ log.error(
214
+ 'Reimage failed {0} times ... marking machine down'.format(count)
215
+ )
153
216
 
154
217
 
155
218
  def reimage(job_config):
@@ -160,12 +223,21 @@ def reimage(job_config):
160
223
  report.try_push_job_info(ctx.config, dict(status='waiting'))
161
224
  targets = job_config['targets']
162
225
  try:
163
- reimaged = reimage_machines(ctx, targets, job_config['machine_type'])
226
+ reimaged = lock_ops.reimage_machines(ctx, targets, job_config['machine_type'])
164
227
  except Exception as e:
165
228
  log.exception('Reimaging error. Nuking machines...')
166
229
  # Reimage failures should map to the 'dead' status instead of 'fail'
167
- report.try_push_job_info(ctx.config, dict(status='dead', failure_reason='Error reimaging machines: ' + str(e)))
168
- nuke(ctx, True)
230
+ report.try_push_job_info(
231
+ ctx.config,
232
+ dict(status='dead', failure_reason='Error reimaging machines: ' + str(e))
233
+ )
234
+ # There isn't an actual task called "reimage", but it doesn't seem
235
+ # necessary to create a whole new Sentry tag for this.
236
+ ctx.summary = {
237
+ 'sentry_event': sentry.report_error(job_config, e, task_name="reimage")
238
+ }
239
+ # Machine that fails to reimage after 10 times will be marked down
240
+ check_for_reimage_failures_and_mark_down(targets)
169
241
  raise
170
242
  ctx.config['targets'] = reimaged
171
243
  # change the status to running after the reimaging process
@@ -175,29 +247,30 @@ def reimage(job_config):
175
247
  def unlock_targets(job_config):
176
248
  serializer = report.ResultsSerializer(teuth_config.archive_base)
177
249
  job_info = serializer.job_info(job_config['name'], job_config['job_id'])
178
- machine_status = query.get_statuses(job_info['targets'].keys())
179
- # only unlock/nuke targets if locked in the first place
180
- locked = [shortname(_['name'])
181
- for _ in machine_status if _['locked']]
250
+ machine_statuses = query.get_statuses(job_info['targets'].keys())
251
+ # only unlock targets if locked and description matches
252
+ locked = []
253
+ for status in machine_statuses:
254
+ name = shortname(status['name'])
255
+ description = status['description']
256
+ if not status['locked']:
257
+ continue
258
+ if description != job_info['archive_path']:
259
+ log.warning(
260
+ "Was going to unlock %s but it was locked by another job: %s",
261
+ name, description
262
+ )
263
+ continue
264
+ locked.append(name)
182
265
  if not locked:
183
266
  return
184
- job_status = get_status(job_info)
185
- if job_status == 'pass' or \
186
- (job_config.get('unlock_on_failure', False) and not job_config.get('nuke-on-error', False)):
267
+ if job_config.get("unlock_on_failure", True):
187
268
  log.info('Unlocking machines...')
188
- fake_ctx = create_fake_context(job_config)
189
- for machine in locked:
190
- teuthology.lock.ops.unlock_one(fake_ctx,
191
- machine, job_info['owner'],
192
- job_info['archive_path'])
193
- if job_status != 'pass' and job_config.get('nuke-on-error', False):
194
- log.info('Nuking machines...')
195
- fake_ctx = create_fake_context(job_config)
196
- nuke(fake_ctx, True)
269
+ lock_ops.unlock_safe(locked, job_info["owner"], job_info["name"], job_info["job_id"])
197
270
 
198
271
 
199
272
  def run_with_watchdog(process, job_config):
200
- job_start_time = datetime.utcnow()
273
+ job_start_time = datetime.datetime.now(datetime.timezone.utc)
201
274
 
202
275
  # Only push the information that's relevant to the watchdog, to save db
203
276
  # load
@@ -211,7 +284,7 @@ def run_with_watchdog(process, job_config):
211
284
  hit_max_timeout = False
212
285
  while process.poll() is None:
213
286
  # Kill jobs that have been running longer than the global max
214
- run_time = datetime.utcnow() - job_start_time
287
+ run_time = datetime.datetime.now(datetime.timezone.utc) - job_start_time
215
288
  total_seconds = run_time.days * 60 * 60 * 24 + run_time.seconds
216
289
  if total_seconds > teuth_config.max_job_time:
217
290
  hit_max_timeout = True
@@ -220,9 +293,11 @@ def run_with_watchdog(process, job_config):
220
293
  try:
221
294
  # kill processes but do not unlock yet so we can save
222
295
  # the logs, coredumps, etc.
223
- kill_job(job_info['name'], job_info['job_id'],
224
- teuth_config.archive_base, job_config['owner'],
225
- save_logs=True)
296
+ kill.kill_job(
297
+ job_info['name'], job_info['job_id'],
298
+ teuth_config.archive_base, job_config['owner'],
299
+ skip_unlock=True
300
+ )
226
301
  except Exception:
227
302
  log.exception('Failed to kill job')
228
303
 
@@ -234,13 +309,18 @@ def run_with_watchdog(process, job_config):
234
309
 
235
310
  try:
236
311
  # this time remove everything and unlock the machines
237
- kill_job(job_info['name'], job_info['job_id'],
238
- teuth_config.archive_base, job_config['owner'])
312
+ kill.kill_job(
313
+ job_info['name'], job_info['job_id'],
314
+ teuth_config.archive_base, job_config['owner']
315
+ )
239
316
  except Exception:
240
317
  log.exception('Failed to kill job and unlock machines')
241
318
 
242
319
  # calling this without a status just updates the jobs updated time
243
- report.try_push_job_info(job_info)
320
+ try:
321
+ report.try_push_job_info(job_info)
322
+ except MaxWhileTries:
323
+ log.exception("Failed to report job status; ignoring")
244
324
  time.sleep(teuth_config.watchdog_interval)
245
325
 
246
326
  # we no longer support testing theses old branches
@@ -254,7 +334,8 @@ def run_with_watchdog(process, job_config):
254
334
  extra_info = dict(status='dead')
255
335
  if hit_max_timeout:
256
336
  extra_info['failure_reason'] = 'hit max job timeout'
257
- report.try_push_job_info(job_info, extra_info)
337
+ if not (job_config.get('first_in_suite') or job_config.get('last_in_suite')):
338
+ report.try_push_job_info(job_info, extra_info)
258
339
 
259
340
 
260
341
  def create_fake_context(job_config, block=False):
@@ -270,6 +351,7 @@ def create_fake_context(job_config, block=False):
270
351
  'os_type': job_config.get('os_type', 'ubuntu'),
271
352
  'os_version': os_version,
272
353
  'name': job_config['name'],
354
+ 'job_id': job_config['job_id'],
273
355
  }
274
356
 
275
357
  return FakeNamespace(ctx_args)
@@ -281,7 +363,7 @@ def transfer_archives(run_name, job_id, archive_base, job_config):
281
363
 
282
364
  if 'archive' in job_info:
283
365
  ctx = create_fake_context(job_config)
284
- add_remotes(ctx, job_config)
366
+ internal.add_remotes(ctx, job_config)
285
367
 
286
368
  for log_type, log_path in job_info['archive'].items():
287
369
  if log_type == 'init':
teuthology/exceptions.py CHANGED
@@ -12,6 +12,18 @@ class BranchNotFoundError(ValueError):
12
12
  branch=self.branch, repo_str=repo_str)
13
13
 
14
14
 
15
+ class BranchMismatchError(ValueError):
16
+ def __init__(self, branch, repo, reason=None):
17
+ self.branch = branch
18
+ self.repo = repo
19
+ self.reason = reason
20
+
21
+ def __str__(self):
22
+ msg = f"Cannot use branch {self.branch} with repo {self.repo}"
23
+ if self.reason:
24
+ msg = f"{msg} because {self.reason}"
25
+ return msg
26
+
15
27
  class CommitNotFoundError(ValueError):
16
28
  def __init__(self, commit, repo=None):
17
29
  self.commit = commit
@@ -93,6 +105,13 @@ class AnsibleFailedError(Exception):
93
105
  failures=self.failures,
94
106
  )
95
107
 
108
+ def fingerprint(self):
109
+ """
110
+ Sentry will use this to group events by their failure reasons, rather
111
+ than lumping all AnsibleFailedErrors together
112
+ """
113
+ return self.failures
114
+
96
115
 
97
116
  class CommandCrashedError(Exception):
98
117
 
@@ -193,3 +212,27 @@ class NoRemoteError(Exception):
193
212
 
194
213
  def __str__(self):
195
214
  return self.message
215
+
216
+
217
+ class UnitTestError(Exception):
218
+ """
219
+ Exception thrown on unit test failure
220
+ """
221
+ def __init__(self, exitstatus=None, node=None, label=None, message=None):
222
+ self.exitstatus = exitstatus
223
+ self.node = node
224
+ self.label = label
225
+ self.message = message
226
+
227
+ def __str__(self):
228
+ prefix = "Unit test failed"
229
+ if self.label:
230
+ prefix += " ({label})".format(label=self.label)
231
+ if self.node:
232
+ prefix += " on {node}".format(node=self.node)
233
+ if self.exitstatus:
234
+ prefix += " with status {status}".format(status=self.exitstatus)
235
+ return "{prefix}: '{message}'".format(
236
+ prefix=prefix,
237
+ message=self.message,
238
+ )