cocotb 1.9.2__cp36-cp36m-win_amd64.whl → 2.0.0b1__cp36-cp36m-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cocotb might be problematic. Click here for more details.

Files changed (151) hide show
  1. cocotb/{ANSI.py → _ANSI.py} +5 -25
  2. cocotb/__init__.py +76 -332
  3. cocotb/_base_triggers.py +513 -0
  4. cocotb/_bridge.py +187 -0
  5. cocotb/_decorators.py +515 -0
  6. cocotb/_deprecation.py +3 -3
  7. cocotb/_exceptions.py +7 -0
  8. cocotb/_extended_awaitables.py +419 -0
  9. cocotb/_gpi_triggers.py +382 -0
  10. cocotb/_init.py +295 -0
  11. cocotb/_outcomes.py +54 -0
  12. cocotb/_profiling.py +46 -0
  13. cocotb/_py_compat.py +100 -29
  14. cocotb/_scheduler.py +454 -0
  15. cocotb/_test.py +245 -0
  16. cocotb/_test_factory.py +309 -0
  17. cocotb/_test_functions.py +42 -0
  18. cocotb/_typing.py +7 -0
  19. cocotb/_utils.py +296 -0
  20. cocotb/_version.py +3 -7
  21. cocotb/_xunit_reporter.py +66 -0
  22. cocotb/clock.py +271 -108
  23. cocotb/handle.py +1342 -795
  24. cocotb/libs/cocotb.dll +0 -0
  25. cocotb/libs/cocotb.exp +0 -0
  26. cocotb/libs/cocotb.lib +0 -0
  27. cocotb/libs/cocotbfli_modelsim.dll +0 -0
  28. cocotb/libs/cocotbfli_modelsim.exp +0 -0
  29. cocotb/libs/cocotbfli_modelsim.lib +0 -0
  30. cocotb/libs/cocotbutils.dll +0 -0
  31. cocotb/libs/cocotbutils.exp +0 -0
  32. cocotb/libs/cocotbutils.lib +0 -0
  33. cocotb/libs/cocotbvhpi_aldec.dll +0 -0
  34. cocotb/libs/cocotbvhpi_aldec.exp +0 -0
  35. cocotb/libs/cocotbvhpi_aldec.lib +0 -0
  36. cocotb/libs/cocotbvhpi_modelsim.dll +0 -0
  37. cocotb/libs/cocotbvhpi_modelsim.exp +0 -0
  38. cocotb/libs/cocotbvhpi_modelsim.lib +0 -0
  39. cocotb/libs/cocotbvpi_aldec.dll +0 -0
  40. cocotb/libs/cocotbvpi_aldec.exp +0 -0
  41. cocotb/libs/cocotbvpi_aldec.lib +0 -0
  42. cocotb/libs/cocotbvpi_ghdl.dll +0 -0
  43. cocotb/libs/cocotbvpi_ghdl.exp +0 -0
  44. cocotb/libs/cocotbvpi_ghdl.lib +0 -0
  45. cocotb/libs/cocotbvpi_icarus.exp +0 -0
  46. cocotb/libs/cocotbvpi_icarus.lib +0 -0
  47. cocotb/libs/cocotbvpi_icarus.vpl +0 -0
  48. cocotb/libs/cocotbvpi_modelsim.dll +0 -0
  49. cocotb/libs/cocotbvpi_modelsim.exp +0 -0
  50. cocotb/libs/cocotbvpi_modelsim.lib +0 -0
  51. cocotb/libs/embed.dll +0 -0
  52. cocotb/libs/embed.exp +0 -0
  53. cocotb/libs/embed.lib +0 -0
  54. cocotb/libs/gpi.dll +0 -0
  55. cocotb/libs/gpi.exp +0 -0
  56. cocotb/libs/gpi.lib +0 -0
  57. cocotb/libs/gpilog.dll +0 -0
  58. cocotb/libs/gpilog.exp +0 -0
  59. cocotb/libs/gpilog.lib +0 -0
  60. cocotb/libs/pygpilog.dll +0 -0
  61. cocotb/libs/pygpilog.exp +0 -0
  62. cocotb/libs/pygpilog.lib +0 -0
  63. cocotb/{log.py → logging.py} +105 -110
  64. cocotb/queue.py +103 -57
  65. cocotb/regression.py +667 -712
  66. cocotb/result.py +17 -188
  67. cocotb/share/def/aldec.exp +0 -0
  68. cocotb/share/def/aldec.lib +0 -0
  69. cocotb/share/def/ghdl.exp +0 -0
  70. cocotb/share/def/ghdl.lib +0 -0
  71. cocotb/share/def/icarus.exp +0 -0
  72. cocotb/share/def/icarus.lib +0 -0
  73. cocotb/share/def/modelsim.def +1 -0
  74. cocotb/share/def/modelsim.exp +0 -0
  75. cocotb/share/def/modelsim.lib +0 -0
  76. cocotb/share/include/cocotb_utils.h +6 -29
  77. cocotb/share/include/embed.h +5 -28
  78. cocotb/share/include/gpi.h +137 -92
  79. cocotb/share/include/gpi_logging.h +221 -142
  80. cocotb/share/include/py_gpi_logging.h +7 -4
  81. cocotb/share/include/vpi_user_ext.h +4 -26
  82. cocotb/share/lib/verilator/verilator.cpp +59 -54
  83. cocotb/simulator.cp36-win_amd64.exp +0 -0
  84. cocotb/simulator.cp36-win_amd64.lib +0 -0
  85. cocotb/simulator.cp36-win_amd64.pyd +0 -0
  86. cocotb/task.py +434 -212
  87. cocotb/triggers.py +55 -1092
  88. cocotb/types/__init__.py +25 -47
  89. cocotb/types/_abstract_array.py +151 -0
  90. cocotb/types/_array.py +264 -0
  91. cocotb/types/_logic.py +296 -0
  92. cocotb/types/_logic_array.py +834 -0
  93. cocotb/types/{range.py → _range.py} +36 -44
  94. cocotb/types/_resolve.py +76 -0
  95. cocotb/utils.py +119 -587
  96. cocotb-2.0.0b1.dist-info/METADATA +48 -0
  97. cocotb-2.0.0b1.dist-info/RECORD +139 -0
  98. cocotb-2.0.0b1.dist-info/entry_points.txt +3 -0
  99. {cocotb-1.9.2.dist-info → cocotb-2.0.0b1.dist-info}/top_level.txt +1 -0
  100. cocotb_tools/_coverage.py +33 -0
  101. cocotb_tools/_vendor/__init__.py +3 -0
  102. cocotb_tools/check_results.py +65 -0
  103. cocotb_tools/combine_results.py +152 -0
  104. cocotb_tools/config.py +241 -0
  105. {cocotb → cocotb_tools}/ipython_support.py +29 -22
  106. cocotb_tools/makefiles/Makefile.deprecations +27 -0
  107. {cocotb/share → cocotb_tools}/makefiles/Makefile.inc +82 -54
  108. {cocotb/share → cocotb_tools}/makefiles/Makefile.sim +8 -33
  109. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.activehdl +9 -16
  110. cocotb_tools/makefiles/simulators/Makefile.cvc +61 -0
  111. cocotb_tools/makefiles/simulators/Makefile.dsim +39 -0
  112. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.ghdl +13 -42
  113. cocotb_tools/makefiles/simulators/Makefile.icarus +80 -0
  114. cocotb_tools/makefiles/simulators/Makefile.ius +93 -0
  115. cocotb_tools/makefiles/simulators/Makefile.modelsim +9 -0
  116. cocotb_tools/makefiles/simulators/Makefile.nvc +60 -0
  117. cocotb_tools/makefiles/simulators/Makefile.questa +29 -0
  118. cocotb/share/makefiles/simulators/Makefile.questa → cocotb_tools/makefiles/simulators/Makefile.questa-compat +26 -54
  119. cocotb_tools/makefiles/simulators/Makefile.questa-qisqrun +149 -0
  120. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.riviera +17 -56
  121. cocotb_tools/makefiles/simulators/Makefile.vcs +65 -0
  122. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.verilator +15 -22
  123. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.xcelium +20 -52
  124. {cocotb → cocotb_tools}/runner.py +794 -361
  125. cocotb/_sim_versions.py → cocotb_tools/sim_versions.py +16 -21
  126. pygpi/entry.py +34 -17
  127. cocotb/binary.py +0 -858
  128. cocotb/config.py +0 -289
  129. cocotb/decorators.py +0 -332
  130. cocotb/memdebug.py +0 -35
  131. cocotb/outcomes.py +0 -56
  132. cocotb/scheduler.py +0 -1099
  133. cocotb/share/makefiles/Makefile.deprecations +0 -12
  134. cocotb/share/makefiles/simulators/Makefile.cvc +0 -94
  135. cocotb/share/makefiles/simulators/Makefile.icarus +0 -111
  136. cocotb/share/makefiles/simulators/Makefile.ius +0 -125
  137. cocotb/share/makefiles/simulators/Makefile.modelsim +0 -32
  138. cocotb/share/makefiles/simulators/Makefile.nvc +0 -64
  139. cocotb/share/makefiles/simulators/Makefile.vcs +0 -98
  140. cocotb/types/array.py +0 -309
  141. cocotb/types/logic.py +0 -292
  142. cocotb/types/logic_array.py +0 -298
  143. cocotb/wavedrom.py +0 -199
  144. cocotb/xunit_reporter.py +0 -80
  145. cocotb-1.9.2.dist-info/METADATA +0 -170
  146. cocotb-1.9.2.dist-info/RECORD +0 -121
  147. cocotb-1.9.2.dist-info/entry_points.txt +0 -3
  148. {cocotb-1.9.2.dist-info → cocotb-2.0.0b1.dist-info}/LICENSE +0 -0
  149. {cocotb-1.9.2.dist-info → cocotb-2.0.0b1.dist-info}/WHEEL +0 -0
  150. {cocotb/_vendor → cocotb_tools}/__init__.py +0 -0
  151. {cocotb → cocotb_tools}/_vendor/distutils_version.py +0 -0
cocotb/regression.py CHANGED
@@ -1,257 +1,253 @@
1
+ # Copyright cocotb contributors
1
2
  # Copyright (c) 2013, 2018 Potential Ventures Ltd
2
3
  # Copyright (c) 2013 SolarFlare Communications Inc
3
- # All rights reserved.
4
- #
5
- # Redistribution and use in source and binary forms, with or without
6
- # modification, are permitted provided that the following conditions are met:
7
- # * Redistributions of source code must retain the above copyright
8
- # notice, this list of conditions and the following disclaimer.
9
- # * Redistributions in binary form must reproduce the above copyright
10
- # notice, this list of conditions and the following disclaimer in the
11
- # documentation and/or other materials provided with the distribution.
12
- # * Neither the name of Potential Ventures Ltd,
13
- # SolarFlare Communications Inc nor the
14
- # names of its contributors may be used to endorse or promote products
15
- # derived from this software without specific prior written permission.
16
- #
17
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18
- # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19
- # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20
- # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21
- # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22
- # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23
- # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24
- # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26
- # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4
+ # Licensed under the Revised BSD License, see LICENSE for details.
5
+ # SPDX-License-Identifier: BSD-3-Clause
27
6
 
28
7
  """All things relating to regression capabilities."""
29
8
 
9
+ import functools
30
10
  import hashlib
31
11
  import inspect
32
- import math
12
+ import logging
33
13
  import os
34
- import pdb
35
14
  import random
36
- import sys
15
+ import re
37
16
  import time
38
- import traceback
39
- from itertools import product
40
- from typing import Any, Iterable, Optional, Tuple, Type
17
+ import warnings
18
+ from enum import auto
19
+ from importlib import import_module
20
+ from typing import (
21
+ Callable,
22
+ Coroutine,
23
+ List,
24
+ Union,
25
+ )
41
26
 
42
27
  import cocotb
43
- import cocotb.ANSI as ANSI
44
- from cocotb import simulator
45
- from cocotb._deprecation import deprecated
46
- from cocotb.decorators import test as Test
47
- from cocotb.handle import SimHandle
48
- from cocotb.log import SimLog
49
- from cocotb.outcomes import Error, Outcome
50
- from cocotb.result import SimFailure, TestSuccess
28
+ import cocotb._gpi_triggers
29
+ import cocotb.handle
30
+ from cocotb import _ANSI, simulator
31
+ from cocotb._base_triggers import Trigger
32
+ from cocotb._decorators import Parameterized, Test
33
+ from cocotb._extended_awaitables import SimTimeoutError, with_timeout
34
+ from cocotb._gpi_triggers import GPITrigger, Timer
35
+ from cocotb._outcomes import Error, Outcome
36
+ from cocotb._test import RunningTest
37
+ from cocotb._test_factory import TestFactory
38
+ from cocotb._test_functions import Failed
39
+ from cocotb._utils import (
40
+ DocEnum,
41
+ remove_traceback_frames,
42
+ safe_divide,
43
+ want_color_output,
44
+ )
45
+ from cocotb._xunit_reporter import XUnitReporter
51
46
  from cocotb.task import Task
52
- from cocotb.utils import get_sim_time, remove_traceback_frames, want_color_output
53
- from cocotb.xunit_reporter import XUnitReporter
54
-
55
- _pdb_on_exception = "COCOTB_PDB_ON_EXCEPTION" in os.environ
56
-
57
- # Optional support for coverage collection of testbench files
58
- coverage = None
59
- if "COVERAGE" in os.environ:
60
- try:
61
- import coverage
62
- except ImportError as e:
63
- msg = (
64
- "Coverage collection requested but coverage module not available"
65
- "\n"
66
- "Import error was: %s\n" % repr(e)
67
- )
68
- sys.stderr.write(msg)
69
-
70
-
71
- def _my_import(name: str) -> Any:
72
- mod = __import__(name)
73
- components = name.split(".")
74
- for comp in components[1:]:
75
- mod = getattr(mod, comp)
76
- return mod
77
-
78
-
79
- _logger = SimLog(__name__)
80
-
81
- _Failed: Type[BaseException]
82
- try:
83
- import pytest
84
- except ModuleNotFoundError:
85
- _Failed = AssertionError
86
- else:
87
- try:
88
- with pytest.raises(Exception):
89
- pass
90
- except BaseException as _raises_e:
91
- _Failed = type(_raises_e)
47
+ from cocotb.utils import get_sim_time
48
+
49
+ __all__ = (
50
+ "Parameterized",
51
+ "RegressionManager",
52
+ "RegressionMode",
53
+ "SimFailure",
54
+ "Test",
55
+ "TestFactory",
56
+ )
57
+
58
+ # Set __module__ on re-exports
59
+ Parameterized.__module__ = __name__
60
+ Test.__module__ = __name__
61
+ TestFactory.__module__ = __name__
62
+
63
+
64
+ class SimFailure(BaseException):
65
+ """A Test failure due to simulator failure."""
66
+
67
+
68
+ _logger = logging.getLogger(__name__)
69
+
70
+
71
+ def _format_doc(docstring: Union[str, None]) -> str:
72
+ if docstring is None:
73
+ return ""
92
74
  else:
93
- assert "pytest.raises doesn't raise an exception when it fails"
75
+ brief = docstring.split("\n")[0]
76
+ return f"\n {brief}"
77
+
78
+
79
+ class RegressionMode(DocEnum):
80
+ """The mode of the :class:`RegressionManager`."""
81
+
82
+ REGRESSION = (
83
+ auto(),
84
+ """Tests are run if included. Skipped tests are skipped, expected failures and errors are respected.""",
85
+ )
86
+
87
+ TESTCASE = (
88
+ auto(),
89
+ """Like :attr:`REGRESSION`, but skipped tests are *not* skipped if included.""",
90
+ )
91
+
92
+
93
+ class _TestResults:
94
+ # TODO Replace with dataclass in Python 3.7+
95
+
96
+ def __init__(
97
+ self,
98
+ test_fullname: str,
99
+ passed: Union[None, bool],
100
+ wall_time_s: float,
101
+ sim_time_ns: float,
102
+ ) -> None:
103
+ self.test_fullname = test_fullname
104
+ self.passed = passed
105
+ self.wall_time_s = wall_time_s
106
+ self.sim_time_ns = sim_time_ns
107
+
108
+ @property
109
+ def ratio(self) -> float:
110
+ return safe_divide(self.sim_time_ns, self.wall_time_s)
94
111
 
95
112
 
96
113
  class RegressionManager:
97
- """Encapsulates all regression capability into a single place"""
114
+ """Object which manages tests.
98
115
 
99
- def __init__(self, dut: SimHandle, tests: Iterable[Test]):
100
- """
101
- Args:
102
- dut (SimHandle): The root handle to pass into test functions.
103
- tests (Iterable[Test]): tests to run
104
- """
105
- self._dut = dut
106
- self._test = None
107
- self._test_task = None
108
- self._test_start_time = None
109
- self._test_start_sim_time = None
110
- self._cov = None
116
+ This object uses the builder pattern to build up a regression.
117
+ Tests are added using :meth:`register_test` or :meth:`discover_tests`.
118
+ Inclusion filters for tests can be added using :meth:`add_filters`.
119
+ The "mode" of the regression can be controlled using :meth:`set_mode`.
120
+ These methods can be called in any order any number of times before :meth:`start_regression` is called,
121
+ and should not be called again after that.
122
+
123
+ Once all the tests, filters, and regression behavior configuration is done,
124
+ the user starts the regression with :meth:`start_regression`.
125
+ This method must be called exactly once.
126
+
127
+ Until the regression is started, :attr:`total_tests`, :attr:`count`, :attr:`passed`,
128
+ :attr:`skipped`, and :attr:`failures` hold placeholder values.
129
+ """
130
+
131
+ _timer1 = Timer(1)
132
+
133
+ def __init__(self) -> None:
134
+ self._test: Test
135
+ self._running_test: RunningTest
111
136
  self.log = _logger
112
- self.start_time = time.time()
113
- self.test_results = []
137
+ self._regression_start_time: float
138
+ self._test_results: List[_TestResults] = []
139
+ self.total_tests = 0
140
+ """Total number of tests that will be run or skipped."""
114
141
  self.count = 0
142
+ """The current test count."""
115
143
  self.passed = 0
144
+ """The current number of passed tests."""
116
145
  self.skipped = 0
146
+ """The current number of skipped tests."""
117
147
  self.failures = 0
148
+ """The current number of failed tests."""
118
149
  self._tearing_down = False
150
+ self._test_queue: List[Test] = []
151
+ self._filters: List[re.Pattern[str]] = []
152
+ self._mode = RegressionMode.REGRESSION
153
+ self._included: List[bool]
154
+ self._sim_failure: Union[Error[None], None] = None
119
155
 
120
156
  # Setup XUnit
121
157
  ###################
122
158
 
123
159
  results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
124
- suite_name = os.getenv("RESULT_TESTSUITE", "all")
125
- package_name = os.getenv("RESULT_TESTPACKAGE", "all")
160
+ suite_name = os.getenv("COCOTB_RESULT_TESTSUITE", "all")
161
+ package_name = os.getenv("COCOTB_RESULT_TESTPACKAGE", "all")
126
162
 
127
163
  self.xunit = XUnitReporter(filename=results_filename)
128
-
129
164
  self.xunit.add_testsuite(name=suite_name, package=package_name)
130
-
131
165
  self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))
132
166
 
133
- # Setup Coverage
134
- ####################
135
-
136
- if coverage is not None:
137
- self.log.info("Enabling coverage collection of Python code")
138
- config_filepath = os.getenv("COVERAGE_RCFILE")
139
- if config_filepath is None:
140
- # Exclude cocotb itself from coverage collection.
141
- cocotb_package_dir = os.path.dirname(__file__)
142
- self._cov = coverage.coverage(
143
- branch=True, omit=[f"{cocotb_package_dir}/*"]
144
- )
145
- else:
146
- # Allow the config file to handle all configuration
147
- self._cov = coverage.coverage()
148
- self._cov.start()
149
-
150
- # Test Discovery
151
- ####################
152
- self._queue = []
153
- for test in tests:
154
- self.log.info(f"Found test {test.__module__}.{test.__qualname__}")
155
- self._queue.append(test)
156
- self.ntests = len(self._queue)
157
-
158
- if not self._queue:
159
- self.log.warning("No tests were discovered")
167
+ def discover_tests(self, *modules: str) -> None:
168
+ """Discover tests in files automatically.
160
169
 
161
- self._queue.sort(key=lambda test: (test.stage, test._id))
162
-
163
- @classmethod
164
- def from_discovery(cls, dut: SimHandle):
165
- """
166
- Obtains the test list by discovery.
167
-
168
- See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
170
+ Should be called before :meth:`start_regression` is called.
169
171
 
170
172
  Args:
171
- dut (SimHandle): The root handle to pass into test functions.
173
+ modules: Each argument given is the name of a module where tests are found.
172
174
  """
173
- tests = cls._discover_tests()
174
- return cls(dut, tests)
175
+ for module_name in modules:
176
+ mod = import_module(module_name)
177
+
178
+ found_test: bool = False
179
+ for obj_name, obj in vars(mod).items():
180
+ if isinstance(obj, Test):
181
+ found_test = True
182
+ self.register_test(obj)
183
+ elif isinstance(obj, Parameterized):
184
+ found_test = True
185
+ generated_tests: bool = False
186
+ for test in obj.generate_tests():
187
+ generated_tests = True
188
+ self.register_test(test)
189
+ if not generated_tests:
190
+ warnings.warn(
191
+ f"Parametrize object generated no tests: {module_name}.{obj_name}",
192
+ stacklevel=2,
193
+ )
175
194
 
176
- @classmethod
177
- def _discover_tests(cls) -> Iterable[Test]:
178
- """
179
- Discovers tests in files automatically.
195
+ if not found_test:
196
+ warnings.warn(
197
+ f"No tests were discovered in module: {module_name}", stacklevel=2
198
+ )
180
199
 
181
- See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
182
- """
183
- module_str = os.getenv("MODULE")
184
- test_str = os.getenv("TESTCASE")
200
+ # error if no tests were discovered
201
+ if not self._test_queue:
202
+ modules_str = ", ".join(repr(m) for m in modules)
203
+ raise RuntimeError(f"No tests were discovered in any module: {modules_str}")
185
204
 
186
- if module_str is None:
187
- raise ValueError(
188
- "Environment variable MODULE, which defines the module(s) to execute, is not defined."
189
- )
205
+ def add_filters(self, *filters: str) -> None:
206
+ """Add regular expressions to filter-in registered tests.
190
207
 
191
- modules = [s.strip() for s in module_str.split(",") if s.strip()]
208
+ Only those tests which match at least one of the given filters are included;
209
+ the rest are excluded.
192
210
 
193
- cls._setup_pytest_assertion_rewriting(modules)
211
+ Should be called before :meth:`start_regression` is called.
194
212
 
195
- tests = None
196
- if test_str:
197
- tests = [s.strip() for s in test_str.split(",") if s.strip()]
213
+ Args:
214
+ filters: Each argument given is a regex pattern for test names.
215
+ A match *includes* the test.
216
+ """
217
+ for filter in filters:
218
+ compiled_filter = re.compile(filter)
219
+ self._filters.append(compiled_filter)
198
220
 
199
- for module_name in modules:
200
- try:
201
- _logger.debug("Python Path: " + ",".join(sys.path))
202
- _logger.debug("PWD: " + os.getcwd())
203
- module = _my_import(module_name)
204
- except Exception as E:
205
- _logger.critical("Failed to import module %s: %s", module_name, E)
206
- _logger.info('MODULE variable was "%s"', ".".join(modules))
207
- _logger.info(traceback.format_exc())
208
- raise
209
-
210
- if tests is not None:
211
- not_found_tests = []
212
- # Specific functions specified, don't auto-discover
213
- for test_name in tests:
214
- try:
215
- test = getattr(module, test_name)
216
- except AttributeError:
217
- not_found_tests.append(test_name)
218
- continue
219
-
220
- if not isinstance(test, Test):
221
- _logger.error(
222
- "Requested %s from module %s isn't a cocotb.test decorated coroutine",
223
- test_name,
224
- module_name,
225
- )
226
- raise ImportError(
227
- "Failed to find requested test %s" % test_name
228
- )
221
+ def set_mode(self, mode: RegressionMode) -> None:
222
+ """Set the regression mode.
229
223
 
230
- # If we request a test manually, it should be run even if skip=True is set.
231
- test.skip = False
224
+ See :class:`RegressionMode` for more details on how each mode affects :class:`RegressionManager` behavior.
225
+ Should be called before :meth:`start_regression` is called.
232
226
 
233
- yield test
227
+ Args:
228
+ mode: The regression mode to set.
229
+ """
230
+ self._mode = mode
234
231
 
235
- # Use the non-matching test names in the next module search
236
- tests = not_found_tests
232
+ def register_test(self, test: Test) -> None:
233
+ """Register a test with the :class:`RegressionManager`.
237
234
 
238
- else:
239
- # auto-discover
240
- for thing in vars(module).values():
241
- if isinstance(thing, Test):
242
- yield thing
243
-
244
- # If any test were not found in any module, raise an error
245
- if tests:
246
- _logger.error(
247
- "Requested test(s) %s wasn't found in module(s) %s", tests, modules
248
- )
249
- raise AttributeError("Test(s) %s doesn't exist in %s" % (tests, modules))
235
+ Should be called before :meth:`start_regression` is called.
236
+
237
+ Args:
238
+ test: The test object to register.
239
+ """
240
+ self.log.debug("Registered test %r", test.fullname)
241
+ self._test_queue.append(test)
250
242
 
251
243
  @classmethod
252
- def _setup_pytest_assertion_rewriting(cls, test_modules: Iterable[str]) -> None:
244
+ def setup_pytest_assertion_rewriting(cls) -> None:
245
+ """Configure pytest to rewrite assertions for better failure messages.
246
+
247
+ Must be called before all modules containing tests are imported.
248
+ """
253
249
  try:
254
- import pytest
250
+ import pytest # noqa: PLC0415
255
251
  except ImportError:
256
252
  _logger.info(
257
253
  "pytest not found, install it to enable better AssertionError messages"
@@ -260,202 +256,414 @@ class RegressionManager:
260
256
  try:
261
257
  # Install the assertion rewriting hook, which must be done before we
262
258
  # import the test modules.
263
- from _pytest.assertion import install_importhook
264
- from _pytest.config import Config
259
+ from _pytest.assertion import install_importhook # noqa: PLC0415
260
+ from _pytest.config import Config # noqa: PLC0415
261
+
262
+ python_files = os.getenv("COCOTB_REWRITE_ASSERTION_FILES", "*.py").strip()
263
+ if not python_files:
264
+ # Even running the hook causes exceptions in some cases, so if the user
265
+ # selects nothing, don't install the hook at all.
266
+ return
265
267
 
266
268
  pytest_conf = Config.fromdictargs(
267
- {}, ["--capture=no", "-o", "python_files=*.py"]
269
+ {}, ["--capture=no", "-o", f"python_files={python_files}"]
268
270
  )
269
271
  install_importhook(pytest_conf)
270
272
  except Exception:
271
273
  _logger.exception(
272
- "Configuring the assertion rewrite hook using pytest {} failed. "
273
- "Please file a bug report!".format(pytest.__version__)
274
+ "Configuring the assertion rewrite hook using pytest %s failed. "
275
+ "Please file a bug report!",
276
+ pytest.__version__,
277
+ )
278
+
279
+ def start_regression(self) -> None:
280
+ """Start the regression."""
281
+
282
+ # sort tests into stages
283
+ self._test_queue.sort(key=lambda test: test.stage)
284
+
285
+ # mark tests for running
286
+ if self._filters:
287
+ self._included = [False] * len(self._test_queue)
288
+ for i, test in enumerate(self._test_queue):
289
+ for filter in self._filters:
290
+ if filter.search(test.fullname):
291
+ self._included[i] = True
292
+ else:
293
+ self._included = [True] * len(self._test_queue)
294
+
295
+ # compute counts
296
+ self.count = 1
297
+ self.total_tests = sum(self._included)
298
+ if self.total_tests == 0:
299
+ self.log.warning(
300
+ "No tests left after filtering with: %s",
301
+ ", ".join(f.pattern for f in self._filters),
274
302
  )
275
303
 
276
- @deprecated("This method is now private.")
277
- def tear_down(self) -> None:
278
- self._tear_down()
304
+ # start write scheduler
305
+ cocotb.handle._start_write_scheduler()
306
+
307
+ # start test loop
308
+ self._regression_start_time = time.time()
309
+ self._first_test = True
310
+ self._execute()
311
+
312
+ def _execute(self) -> None:
313
+ """Run the main regression loop.
314
+
315
+ Used by :meth:`start_regression` and :meth:`_test_complete` to continue to the main test running loop,
316
+ and by :meth:`_fail_regression` to shutdown the regression when a simulation failure occurs.
317
+ """
318
+
319
+ while self._test_queue:
320
+ self._test = self._test_queue.pop(0)
321
+ included = self._included.pop(0)
322
+
323
+ # if the test is not included, record and continue
324
+ if not included:
325
+ self._record_test_excluded()
326
+ continue
327
+
328
+ # if the test is skipped, record and continue
329
+ if self._test.skip and self._mode != RegressionMode.TESTCASE:
330
+ self._record_test_skipped()
331
+ continue
332
+
333
+ # if the test should be run, but the simulator has failed, record and continue
334
+ if self._sim_failure is not None:
335
+ self._score_test(
336
+ self._sim_failure,
337
+ 0,
338
+ 0,
339
+ )
340
+ continue
341
+
342
+ # initialize the test, if it fails, record and continue
343
+ try:
344
+ self._running_test = self._init_test()
345
+ except Exception:
346
+ self._record_test_init_failed()
347
+ continue
348
+
349
+ self._log_test_start()
350
+
351
+ if self._first_test:
352
+ self._first_test = False
353
+ return self._schedule_next_test()
354
+ else:
355
+ return self._timer1._prime(self._schedule_next_test)
356
+
357
+ return self._tear_down()
358
+
359
+ def _init_test(self) -> RunningTest:
360
+ # wrap test function in timeout
361
+ func: Callable[..., Coroutine[Trigger, None, None]]
362
+ timeout = self._test.timeout_time
363
+ if timeout is not None:
364
+ f = self._test.func
365
+
366
+ @functools.wraps(f)
367
+ async def func(*args: object, **kwargs: object) -> None:
368
+ running_co = Task(f(*args, **kwargs))
369
+
370
+ try:
371
+ await with_timeout(running_co, timeout, self._test.timeout_unit)
372
+ except SimTimeoutError:
373
+ running_co.cancel()
374
+ raise
375
+ else:
376
+ func = self._test.func
377
+
378
+ main_task = Task(func(cocotb.top), name=f"Test {self._test.name}")
379
+ return RunningTest(self._test_complete, main_task)
380
+
381
+ def _schedule_next_test(self, trigger: Union[GPITrigger, None] = None) -> None:
382
+ if trigger is not None:
383
+ # TODO move to Trigger object
384
+ cocotb._gpi_triggers._current_gpi_trigger = trigger
385
+ trigger._cleanup()
386
+
387
+ # seed random number generator based on test module, name, and COCOTB_RANDOM_SEED
388
+ hasher = hashlib.sha1()
389
+ hasher.update(self._test.fullname.encode())
390
+ seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
391
+ random.seed(seed)
392
+
393
+ self._start_sim_time = get_sim_time("ns")
394
+ self._start_time = time.time()
395
+
396
+ self._running_test.start()
279
397
 
280
398
  def _tear_down(self) -> None:
399
+ """Called by :meth:`_execute` when there are no more tests to run to finalize the regression."""
281
400
  # prevent re-entering the tear down procedure
282
401
  if not self._tearing_down:
283
402
  self._tearing_down = True
284
403
  else:
285
404
  return
286
405
 
287
- # fail remaining tests
288
- while True:
289
- test = self._next_test()
290
- if test is None:
291
- break
292
- self._record_result(
293
- test=test, outcome=Error(SimFailure), wall_time_s=0, sim_time_ns=0
294
- )
406
+ assert not self._test_queue
407
+
408
+ # stop the write scheduler
409
+ cocotb.handle._stop_write_scheduler()
295
410
 
296
411
  # Write out final log messages
297
412
  self._log_test_summary()
298
413
 
299
414
  # Generate output reports
300
415
  self.xunit.write()
301
- if self._cov:
302
- self._cov.stop()
303
- self.log.info("Writing coverage data")
304
- self._cov.save()
305
- self._cov.html_report()
416
+
417
+ # TODO refactor initialization and finalization into their own module
418
+ # to prevent circular imports requiring local imports
419
+ from cocotb._init import _shutdown_testbench # noqa: PLC0415
420
+
421
+ _shutdown_testbench()
306
422
 
307
423
  # Setup simulator finalization
308
424
  simulator.stop_simulator()
309
- cocotb._stop_library_coverage()
310
425
 
311
- @deprecated("This method is now private.")
312
- def next_test(self) -> Optional[Test]:
313
- return self._next_test()
426
+ def _test_complete(self) -> None:
427
+ """Callback given to the test to be called when the test finished."""
314
428
 
315
- def _next_test(self) -> Optional[Test]:
316
- """Get the next test to run"""
317
- if not self._queue:
318
- return None
319
- self.count += 1
320
- return self._queue.pop(0)
321
-
322
- @deprecated("This method is now private.")
323
- def handle_result(self, test: Task) -> None:
324
- self._handle_result(test)
429
+ # compute wall time
430
+ wall_time = time.time() - self._start_time
431
+ sim_time_ns = get_sim_time("ns") - self._start_sim_time
325
432
 
326
- def _handle_result(self, test: Task) -> None:
327
- """Handle a test completing.
433
+ # Judge and record pass/fail.
434
+ self._score_test(
435
+ self._running_test.result(),
436
+ wall_time,
437
+ sim_time_ns,
438
+ )
328
439
 
329
- Dump result to XML and schedule the next test (if any). Entered by the scheduler.
440
+ # Run next test.
441
+ return self._execute()
330
442
 
331
- Args:
332
- test: The test that completed
333
- """
334
- assert test is self._test_task
335
-
336
- real_time = time.time() - self._test_start_time
337
- sim_time_ns = get_sim_time("ns") - self._test_start_sim_time
443
+ def _score_test(
444
+ self,
445
+ outcome: Outcome[None],
446
+ wall_time_s: float,
447
+ sim_time_ns: float,
448
+ ) -> None:
449
+ test = self._test
338
450
 
339
- self._record_result(
340
- test=self._test,
341
- outcome=self._test_task._outcome,
342
- wall_time_s=real_time,
343
- sim_time_ns=sim_time_ns,
344
- )
451
+ # score test
452
+ passed: bool
453
+ msg: Union[str, None]
454
+ exc: Union[BaseException, None]
455
+ try:
456
+ outcome.get()
457
+ except BaseException as e:
458
+ passed, msg = False, None
459
+ exc = remove_traceback_frames(e, ["_score_test", "get"])
460
+ else:
461
+ passed, msg, exc = True, None, None
462
+
463
+ if passed:
464
+ if test.expect_error:
465
+ self._record_test_failed(
466
+ wall_time_s=wall_time_s,
467
+ sim_time_ns=sim_time_ns,
468
+ result=exc,
469
+ msg="passed but we expected an error",
470
+ )
471
+ passed = False
472
+
473
+ elif test.expect_fail:
474
+ self._record_test_failed(
475
+ wall_time_s=wall_time_s,
476
+ sim_time_ns=sim_time_ns,
477
+ result=exc,
478
+ msg="passed but we expected a failure",
479
+ )
480
+ passed = False
345
481
 
346
- self._execute()
482
+ else:
483
+ self._record_test_passed(
484
+ wall_time_s=wall_time_s,
485
+ sim_time_ns=sim_time_ns,
486
+ result=None,
487
+ msg=msg,
488
+ )
347
489
 
348
- def _init_test(self, test: Test) -> Optional[Task]:
349
- """Initialize a test.
490
+ elif test.expect_fail:
491
+ if isinstance(exc, (AssertionError, Failed)):
492
+ self._record_test_passed(
493
+ wall_time_s=wall_time_s,
494
+ sim_time_ns=sim_time_ns,
495
+ result=None,
496
+ msg="failed as expected",
497
+ )
350
498
 
351
- Record outcome if the initialization fails.
352
- Record skip if the test is skipped.
353
- Save the initialized test if it successfully initializes.
354
- """
499
+ else:
500
+ self._record_test_failed(
501
+ wall_time_s=wall_time_s,
502
+ sim_time_ns=sim_time_ns,
503
+ result=exc,
504
+ msg="expected failure, but errored with unexpected type",
505
+ )
506
+ passed = False
355
507
 
356
- if test.skip:
357
- hilight_start = ANSI.COLOR_SKIPPED if want_color_output() else ""
358
- hilight_end = ANSI.COLOR_DEFAULT if want_color_output() else ""
359
- # Want this to stand out a little bit
360
- self.log.info(
361
- "{start}skipping{end} {name} ({i}/{total})".format(
362
- start=hilight_start,
363
- i=self.count,
364
- total=self.ntests,
365
- end=hilight_end,
366
- name=test.__qualname__,
508
+ elif test.expect_error:
509
+ if isinstance(exc, test.expect_error):
510
+ self._record_test_passed(
511
+ wall_time_s=wall_time_s,
512
+ sim_time_ns=sim_time_ns,
513
+ result=None,
514
+ msg="errored as expected",
367
515
  )
368
- )
369
- self._record_result(test, None, 0, 0)
370
- return None
371
516
 
372
- test_init_outcome = cocotb.outcomes.capture(test, self._dut)
517
+ else:
518
+ self._record_test_failed(
519
+ wall_time_s=wall_time_s,
520
+ sim_time_ns=sim_time_ns,
521
+ result=exc,
522
+ msg="errored with unexpected type",
523
+ )
524
+ passed = False
373
525
 
374
- if isinstance(test_init_outcome, cocotb.outcomes.Error):
375
- self.log.error(
376
- "Failed to initialize test %s" % test.__qualname__,
377
- exc_info=test_init_outcome.error,
526
+ else:
527
+ self._record_test_failed(
528
+ wall_time_s=wall_time_s,
529
+ sim_time_ns=sim_time_ns,
530
+ result=exc,
531
+ msg=msg,
378
532
  )
379
- self._record_result(test, test_init_outcome, 0, 0)
380
- return None
381
533
 
382
- running_test = test_init_outcome.get()
534
+ def _get_lineno(self, test: Test) -> int:
535
+ try:
536
+ return inspect.getsourcelines(test.func)[1]
537
+ except OSError:
538
+ return 1
383
539
 
384
- # seed random number generator based on test module, name, and RANDOM_SEED
385
- hasher = hashlib.sha1()
386
- hasher.update(test.__qualname__.encode())
387
- hasher.update(test.__module__.encode())
388
- seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
389
- random.seed(seed)
540
+ def _log_test_start(self) -> None:
541
+ """Called by :meth:`_execute` to log that a test is starting."""
542
+ hilight_start = _ANSI.COLOR_TEST if want_color_output() else ""
543
+ hilight_end = _ANSI.COLOR_DEFAULT if want_color_output() else ""
544
+ self.log.info(
545
+ "%srunning%s %s (%d/%d)%s",
546
+ hilight_start,
547
+ hilight_end,
548
+ self._test.fullname,
549
+ self.count,
550
+ self.total_tests,
551
+ _format_doc(self._test.doc),
552
+ )
390
553
 
391
- return running_test
554
+ def _record_test_excluded(self) -> None:
555
+ """Called by :meth:`_execute` when a test is excluded by filters."""
392
556
 
393
- def _score_test(self, test: Test, outcome: Outcome) -> Tuple[bool, bool]:
394
- """
395
- Given a test and the test's outcome, determine if the test met expectations and log pertinent information
396
- """
557
+ # write out xunit results
558
+ lineno = self._get_lineno(self._test)
559
+ self.xunit.add_testcase(
560
+ name=self._test.name,
561
+ classname=self._test.module,
562
+ file=inspect.getfile(self._test.func),
563
+ lineno=repr(lineno),
564
+ time=repr(0),
565
+ sim_time_ns=repr(0),
566
+ ratio_time=repr(0),
567
+ )
568
+ self.xunit.add_skipped()
397
569
 
398
- # scoring outcomes
399
- result_pass = True
400
- sim_failed = False
570
+ # do not log anything, nor save details for the summary
401
571
 
402
- try:
403
- outcome.get()
404
- except (KeyboardInterrupt, SystemExit):
405
- raise
406
- except BaseException as e:
407
- result = remove_traceback_frames(e, ["_score_test", "get"])
408
- else:
409
- result = TestSuccess()
410
-
411
- if (
412
- isinstance(result, TestSuccess)
413
- and not test.expect_fail
414
- and not test.expect_error
415
- ):
416
- self._log_test_passed(test, None, None)
417
-
418
- elif isinstance(result, TestSuccess) and test.expect_error:
419
- self._log_test_failed(test, None, "passed but we expected an error")
420
- result_pass = False
421
-
422
- elif isinstance(result, TestSuccess):
423
- self._log_test_failed(test, None, "passed but we expected a failure")
424
- result_pass = False
425
-
426
- elif isinstance(result, SimFailure):
427
- if isinstance(result, test.expect_error):
428
- self._log_test_passed(test, result, "errored as expected")
429
- else:
430
- self.log.error("Test error has lead to simulator shutting us down")
431
- result_pass = False
432
- # whether we expected it or not, the simulation has failed unrecoverably
433
- sim_failed = True
572
+ def _record_test_skipped(self) -> None:
573
+ """Called by :meth:`_execute` when a test is skipped."""
434
574
 
435
- elif isinstance(result, (AssertionError, _Failed)) and test.expect_fail:
436
- self._log_test_passed(test, result, "failed as expected")
575
+ # log test results
576
+ hilight_start = _ANSI.COLOR_SKIPPED if want_color_output() else ""
577
+ hilight_end = _ANSI.COLOR_DEFAULT if want_color_output() else ""
578
+ self.log.info(
579
+ "%sskipping%s %s (%d/%d)%s",
580
+ hilight_start,
581
+ hilight_end,
582
+ self._test.fullname,
583
+ self.count,
584
+ self.total_tests,
585
+ _format_doc(self._test.doc),
586
+ )
437
587
 
438
- elif test.expect_error:
439
- if isinstance(result, test.expect_error):
440
- self._log_test_passed(test, result, "errored as expected")
441
- else:
442
- self._log_test_failed(test, result, "errored with unexpected type ")
443
- result_pass = False
588
+ # write out xunit results
589
+ lineno = self._get_lineno(self._test)
590
+ self.xunit.add_testcase(
591
+ name=self._test.name,
592
+ classname=self._test.module,
593
+ file=inspect.getfile(self._test.func),
594
+ lineno=repr(lineno),
595
+ time=repr(0),
596
+ sim_time_ns=repr(0),
597
+ ratio_time=repr(0),
598
+ )
599
+ self.xunit.add_skipped()
600
+
601
+ # save details for summary
602
+ self._test_results.append(
603
+ _TestResults(
604
+ test_fullname=self._test.fullname,
605
+ passed=None,
606
+ sim_time_ns=0,
607
+ wall_time_s=0,
608
+ )
609
+ )
444
610
 
445
- else:
446
- self._log_test_failed(test, result, None)
447
- result_pass = False
611
+ # update running passed/failed/skipped counts
612
+ self.skipped += 1
613
+ self.count += 1
448
614
 
449
- if _pdb_on_exception:
450
- pdb.post_mortem(result.__traceback__)
615
+ def _record_test_init_failed(self) -> None:
616
+ """Called by :meth:`_execute` when a test initialization fails."""
617
+
618
+ # log test results
619
+ hilight_start = _ANSI.COLOR_FAILED if want_color_output() else ""
620
+ hilight_end = _ANSI.COLOR_DEFAULT if want_color_output() else ""
621
+ self.log.exception(
622
+ "%sFailed to initialize%s %s! (%d/%d)%s",
623
+ hilight_start,
624
+ hilight_end,
625
+ self._test.fullname,
626
+ self.count,
627
+ self.total_tests,
628
+ _format_doc(self._test.doc),
629
+ )
451
630
 
452
- return result_pass, sim_failed
631
+ # write out xunit results
632
+ lineno = self._get_lineno(self._test)
633
+ self.xunit.add_testcase(
634
+ name=self._test.name,
635
+ classname=self._test.module,
636
+ file=inspect.getfile(self._test.func),
637
+ lineno=repr(lineno),
638
+ time=repr(0),
639
+ sim_time_ns=repr(0),
640
+ ratio_time=repr(0),
641
+ )
642
+ self.xunit.add_failure(msg="Test initialization failed")
643
+
644
+ # save details for summary
645
+ self._test_results.append(
646
+ _TestResults(
647
+ test_fullname=self._test.fullname,
648
+ passed=False,
649
+ sim_time_ns=0,
650
+ wall_time_s=0,
651
+ )
652
+ )
453
653
 
454
- def _log_test_passed(
455
- self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
654
+ # update running passed/failed/skipped counts
655
+ self.failures += 1
656
+ self.count += 1
657
+
658
+ def _record_test_passed(
659
+ self,
660
+ wall_time_s: float,
661
+ sim_time_ns: float,
662
+ result: Union[Exception, None],
663
+ msg: Union[str, None],
456
664
  ) -> None:
457
- start_hilight = ANSI.COLOR_PASSED if want_color_output() else ""
458
- stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
665
+ start_hilight = _ANSI.COLOR_PASSED if want_color_output() else ""
666
+ stop_hilight = _ANSI.COLOR_DEFAULT if want_color_output() else ""
459
667
  if msg is None:
460
668
  rest = ""
461
669
  else:
@@ -465,119 +673,98 @@ class RegressionManager:
465
673
  else:
466
674
  result_was = f" (result was {type(result).__qualname__})"
467
675
  self.log.info(
468
- f"{test.__qualname__} {start_hilight}passed{stop_hilight}{rest}{result_was}"
676
+ "%s %spassed%s%s%s",
677
+ self._test.fullname,
678
+ start_hilight,
679
+ stop_hilight,
680
+ rest,
681
+ result_was,
469
682
  )
470
683
 
471
- def _log_test_failed(
472
- self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
473
- ) -> None:
474
- start_hilight = ANSI.COLOR_FAILED if want_color_output() else ""
475
- stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
476
- if msg is None:
477
- rest = ""
478
- else:
479
- rest = f": {msg}"
480
- self.log.info(
481
- f"{test.__qualname__} {start_hilight}failed{stop_hilight}{rest}",
482
- exc_info=result,
684
+ # write out xunit results
685
+ ratio_time = safe_divide(sim_time_ns, wall_time_s)
686
+ lineno = self._get_lineno(self._test)
687
+ self.xunit.add_testcase(
688
+ name=self._test.name,
689
+ classname=self._test.module,
690
+ file=inspect.getfile(self._test.func),
691
+ lineno=repr(lineno),
692
+ time=repr(wall_time_s),
693
+ sim_time_ns=repr(sim_time_ns),
694
+ ratio_time=repr(ratio_time),
483
695
  )
484
696
 
485
- def _record_result(
697
+ # update running passed/failed/skipped counts
698
+ self.passed += 1
699
+ self.count += 1
700
+
701
+ # save details for summary
702
+ self._test_results.append(
703
+ _TestResults(
704
+ test_fullname=self._test.fullname,
705
+ passed=True,
706
+ sim_time_ns=sim_time_ns,
707
+ wall_time_s=wall_time_s,
708
+ )
709
+ )
710
+
711
+ def _record_test_failed(
486
712
  self,
487
- test: Test,
488
- outcome: Optional[Outcome],
489
713
  wall_time_s: float,
490
714
  sim_time_ns: float,
715
+ result: Union[BaseException, None],
716
+ msg: Union[str, None],
491
717
  ) -> None:
718
+ start_hilight = _ANSI.COLOR_FAILED if want_color_output() else ""
719
+ stop_hilight = _ANSI.COLOR_DEFAULT if want_color_output() else ""
720
+ if msg is None:
721
+ rest = ""
722
+ else:
723
+ rest = f": {msg}"
724
+ self.log.warning(
725
+ "%s%s %sfailed%s%s",
726
+ stop_hilight,
727
+ self._test.fullname,
728
+ start_hilight,
729
+ stop_hilight,
730
+ rest,
731
+ )
492
732
 
493
- ratio_time = self._safe_divide(sim_time_ns, wall_time_s)
494
- try:
495
- lineno = inspect.getsourcelines(test._func)[1]
496
- except OSError:
497
- lineno = 1
498
-
733
+ # write out xunit results
734
+ ratio_time = safe_divide(sim_time_ns, wall_time_s)
735
+ lineno = self._get_lineno(self._test)
499
736
  self.xunit.add_testcase(
500
- name=test.__qualname__,
501
- classname=test.__module__,
502
- file=inspect.getfile(test._func),
737
+ name=self._test.name,
738
+ classname=self._test.module,
739
+ file=inspect.getfile(self._test.func),
503
740
  lineno=repr(lineno),
504
741
  time=repr(wall_time_s),
505
742
  sim_time_ns=repr(sim_time_ns),
506
743
  ratio_time=repr(ratio_time),
507
744
  )
745
+ self.xunit.add_failure(error_type=type(result).__name__, error_msg=str(result))
508
746
 
509
- if outcome is None: # skipped
510
- test_pass, sim_failed = None, False
511
- self.xunit.add_skipped()
512
- self.skipped += 1
513
-
514
- else:
515
- test_pass, sim_failed = self._score_test(test, outcome)
516
- if not test_pass:
517
- self.xunit.add_failure(
518
- message=f"Test failed with RANDOM_SEED={cocotb.RANDOM_SEED}"
519
- )
520
- self.failures += 1
521
- else:
522
- self.passed += 1
523
-
524
- self.test_results.append(
525
- {
526
- "test": ".".join([test.__module__, test.__qualname__]),
527
- "pass": test_pass,
528
- "sim": sim_time_ns,
529
- "real": wall_time_s,
530
- "ratio": ratio_time,
531
- }
532
- )
533
-
534
- if sim_failed:
535
- self._tear_down()
536
- return
537
-
538
- @deprecated("This method is now private.")
539
- def execute(self) -> None:
540
- self._execute()
747
+ # update running passed/failed/skipped counts
748
+ self.failures += 1
749
+ self.count += 1
541
750
 
542
- def _execute(self) -> None:
543
- while True:
544
- self._test = self._next_test()
545
- if self._test is None:
546
- return self._tear_down()
547
-
548
- self._test_task = self._init_test(self._test)
549
- if self._test_task is not None:
550
- return self._start_test()
551
-
552
- def _start_test(self) -> None:
553
- # Want this to stand out a little bit
554
- start = ""
555
- end = ""
556
- if want_color_output():
557
- start = ANSI.COLOR_TEST
558
- end = ANSI.COLOR_DEFAULT
559
- self.log.info(
560
- "{start}running{end} {name} ({i}/{total}){description}".format(
561
- start=start,
562
- i=self.count,
563
- total=self.ntests,
564
- end=end,
565
- name=self._test.__qualname__,
566
- description=_trim(self._test.__doc__),
751
+ # save details for summary
752
+ self._test_results.append(
753
+ _TestResults(
754
+ test_fullname=self._test.fullname,
755
+ passed=False,
756
+ sim_time_ns=sim_time_ns,
757
+ wall_time_s=wall_time_s,
567
758
  )
568
759
  )
569
760
 
570
- self._test_start_time = time.time()
571
- self._test_start_sim_time = get_sim_time("ns")
572
- cocotb.scheduler._add_test(self._test_task)
573
-
574
761
  def _log_test_summary(self) -> None:
575
-
576
- real_time = time.time() - self.start_time
762
+ """Called by :meth:`_tear_down` to log the test summary."""
763
+ real_time = time.time() - self._regression_start_time
577
764
  sim_time_ns = get_sim_time("ns")
578
- ratio_time = self._safe_divide(sim_time_ns, real_time)
765
+ ratio_time = safe_divide(sim_time_ns, real_time)
579
766
 
580
- if len(self.test_results) == 0:
767
+ if len(self._test_results) == 0:
581
768
  return
582
769
 
583
770
  TEST_FIELD = "TEST"
@@ -585,30 +772,30 @@ class RegressionManager:
585
772
  SIM_FIELD = "SIM TIME (ns)"
586
773
  REAL_FIELD = "REAL TIME (s)"
587
774
  RATIO_FIELD = "RATIO (ns/s)"
588
- TOTAL_NAME = f"TESTS={self.ntests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
775
+ TOTAL_NAME = f"TESTS={self.total_tests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
589
776
 
590
777
  TEST_FIELD_LEN = max(
591
778
  len(TEST_FIELD),
592
779
  len(TOTAL_NAME),
593
- len(max([x["test"] for x in self.test_results], key=len)),
780
+ len(max([x.test_fullname for x in self._test_results], key=len)),
594
781
  )
595
782
  RESULT_FIELD_LEN = len(RESULT_FIELD)
596
783
  SIM_FIELD_LEN = len(SIM_FIELD)
597
784
  REAL_FIELD_LEN = len(REAL_FIELD)
598
785
  RATIO_FIELD_LEN = len(RATIO_FIELD)
599
786
 
600
- header_dict = dict(
601
- a=TEST_FIELD,
602
- b=RESULT_FIELD,
603
- c=SIM_FIELD,
604
- d=REAL_FIELD,
605
- e=RATIO_FIELD,
606
- a_len=TEST_FIELD_LEN,
607
- b_len=RESULT_FIELD_LEN,
608
- c_len=SIM_FIELD_LEN,
609
- d_len=REAL_FIELD_LEN,
610
- e_len=RATIO_FIELD_LEN,
611
- )
787
+ header_dict = {
788
+ "a": TEST_FIELD,
789
+ "b": RESULT_FIELD,
790
+ "c": SIM_FIELD,
791
+ "d": REAL_FIELD,
792
+ "e": RATIO_FIELD,
793
+ "a_len": TEST_FIELD_LEN,
794
+ "b_len": RESULT_FIELD_LEN,
795
+ "c_len": SIM_FIELD_LEN,
796
+ "d_len": REAL_FIELD_LEN,
797
+ "e_len": RATIO_FIELD_LEN,
798
+ }
612
799
 
613
800
  LINE_LEN = (
614
801
  3
@@ -634,43 +821,43 @@ class RegressionManager:
634
821
  summary += LINE_SEP
635
822
 
636
823
  test_line = "** {a:<{a_len}} {start}{b:^{b_len}}{end} {c:>{c_len}.2f} {d:>{d_len}.2f} {e:>{e_len}} **\n"
637
- for result in self.test_results:
824
+ for result in self._test_results:
638
825
  hilite = ""
639
826
  lolite = ""
640
827
 
641
- if result["pass"] is None:
828
+ if result.passed is None:
642
829
  ratio = "-.--"
643
830
  pass_fail_str = "SKIP"
644
831
  if want_color_output():
645
- hilite = ANSI.COLOR_SKIPPED
646
- lolite = ANSI.COLOR_DEFAULT
647
- elif result["pass"]:
648
- ratio = format(result["ratio"], "0.2f")
832
+ hilite = _ANSI.COLOR_SKIPPED
833
+ lolite = _ANSI.COLOR_DEFAULT
834
+ elif result.passed:
835
+ ratio = format(result.ratio, "0.2f")
649
836
  pass_fail_str = "PASS"
650
837
  if want_color_output():
651
- hilite = ANSI.COLOR_PASSED
652
- lolite = ANSI.COLOR_DEFAULT
838
+ hilite = _ANSI.COLOR_PASSED
839
+ lolite = _ANSI.COLOR_DEFAULT
653
840
  else:
654
- ratio = format(result["ratio"], "0.2f")
841
+ ratio = format(result.ratio, "0.2f")
655
842
  pass_fail_str = "FAIL"
656
843
  if want_color_output():
657
- hilite = ANSI.COLOR_FAILED
658
- lolite = ANSI.COLOR_DEFAULT
659
-
660
- test_dict = dict(
661
- a=result["test"],
662
- b=pass_fail_str,
663
- c=result["sim"],
664
- d=result["real"],
665
- e=ratio,
666
- a_len=TEST_FIELD_LEN,
667
- b_len=RESULT_FIELD_LEN,
668
- c_len=SIM_FIELD_LEN - 1,
669
- d_len=REAL_FIELD_LEN - 1,
670
- e_len=RATIO_FIELD_LEN - 1,
671
- start=hilite,
672
- end=lolite,
673
- )
844
+ hilite = _ANSI.COLOR_FAILED
845
+ lolite = _ANSI.COLOR_DEFAULT
846
+
847
+ test_dict = {
848
+ "a": result.test_fullname,
849
+ "b": pass_fail_str,
850
+ "c": result.sim_time_ns,
851
+ "d": result.wall_time_s,
852
+ "e": ratio,
853
+ "a_len": TEST_FIELD_LEN,
854
+ "b_len": RESULT_FIELD_LEN,
855
+ "c_len": SIM_FIELD_LEN - 1,
856
+ "d_len": REAL_FIELD_LEN - 1,
857
+ "e_len": RATIO_FIELD_LEN - 1,
858
+ "start": hilite,
859
+ "end": lolite,
860
+ }
674
861
 
675
862
  summary += test_line.format(**test_dict)
676
863
 
@@ -695,239 +882,7 @@ class RegressionManager:
695
882
 
696
883
  self.log.info(summary)
697
884
 
698
- @staticmethod
699
- def _safe_divide(a: float, b: float) -> float:
700
- try:
701
- return a / b
702
- except ZeroDivisionError:
703
- if a == 0:
704
- return float("nan")
705
- else:
706
- return float("inf")
707
-
708
-
709
- def _create_test(function, name, documentation, mod, *args, **kwargs):
710
- """Factory function to create tests, avoids late binding.
711
-
712
- Creates a test dynamically. The test will call the supplied
713
- function with the supplied arguments.
714
-
715
- Args:
716
- function (function): The test function to run.
717
- name (str): The name of the test.
718
- documentation (str): The docstring for the test.
719
- mod (module): The module this function belongs to.
720
- *args: Remaining args to pass to test function.
721
- **kwargs: Passed to the test function.
722
-
723
- Returns:
724
- Decorated test function
725
- """
726
-
727
- async def _my_test(dut):
728
- await function(dut, *args, **kwargs)
729
-
730
- _my_test.__name__ = name
731
- _my_test.__qualname__ = name
732
- _my_test.__doc__ = documentation
733
- _my_test.__module__ = mod.__name__
734
-
735
- return cocotb.test()(_my_test)
736
-
737
-
738
- class TestFactory:
739
- """Factory to automatically generate tests.
740
-
741
- Args:
742
- test_function: A Callable that returns the test Coroutine.
743
- Must take *dut* as the first argument.
744
- *args: Remaining arguments are passed directly to the test function.
745
- Note that these arguments are not varied. An argument that
746
- varies with each test must be a keyword argument to the
747
- test function.
748
- **kwargs: Remaining keyword arguments are passed directly to the test function.
749
- Note that these arguments are not varied. An argument that
750
- varies with each test must be a keyword argument to the
751
- test function.
752
-
753
- Assuming we have a common test function that will run a test. This test
754
- function will take keyword arguments (for example generators for each of
755
- the input interfaces) and generate tests that call the supplied function.
756
-
757
- This Factory allows us to generate sets of tests based on the different
758
- permutations of the possible arguments to the test function.
759
-
760
- For example, if we have a module that takes backpressure, has two configurable
761
- features where enabling ``feature_b`` requires ``feature_a`` to be active, and
762
- need to test against data generation routines ``gen_a`` and ``gen_b``:
763
-
764
- >>> tf = TestFactory(test_function=run_test)
765
- >>> tf.add_option(name='data_in', optionlist=[gen_a, gen_b])
766
- >>> tf.add_option('backpressure', [None, random_backpressure])
767
- >>> tf.add_option(('feature_a', 'feature_b'), [(False, False), (True, False), (True, True)])
768
- >>> tf.generate_tests()
769
-
770
- We would get the following tests:
771
-
772
- * ``gen_a`` with no backpressure and both features disabled
773
- * ``gen_a`` with no backpressure and only ``feature_a`` enabled
774
- * ``gen_a`` with no backpressure and both features enabled
775
- * ``gen_a`` with ``random_backpressure`` and both features disabled
776
- * ``gen_a`` with ``random_backpressure`` and only ``feature_a`` enabled
777
- * ``gen_a`` with ``random_backpressure`` and both features enabled
778
- * ``gen_b`` with no backpressure and both features disabled
779
- * ``gen_b`` with no backpressure and only ``feature_a`` enabled
780
- * ``gen_b`` with no backpressure and both features enabled
781
- * ``gen_b`` with ``random_backpressure`` and both features disabled
782
- * ``gen_b`` with ``random_backpressure`` and only ``feature_a`` enabled
783
- * ``gen_b`` with ``random_backpressure`` and both features enabled
784
-
785
- The tests are appended to the calling module for auto-discovery.
786
-
787
- Tests are simply named ``test_function_N``. The docstring for the test (hence
788
- the test description) includes the name and description of each generator.
789
-
790
- .. versionchanged:: 1.5
791
- Groups of options are now supported
792
- """
793
-
794
- # Prevent warnings from collection of TestFactories by unit testing frameworks.
795
- __test__ = False
796
-
797
- def __init__(self, test_function, *args, **kwargs):
798
- self.test_function = test_function
799
- self.name = self.test_function.__qualname__
800
-
801
- self.args = args
802
- self.kwargs_constant = kwargs
803
- self.kwargs = {}
804
- self.log = _logger
805
-
806
- def add_option(self, name, optionlist):
807
- """Add a named option to the test.
808
-
809
- Args:
810
- name (str or iterable of str): An option name, or an iterable of
811
- several option names. Passed to test as keyword arguments.
812
-
813
- optionlist (list): A list of possible options for this test knob.
814
- If N names were specified, this must be a list of N-tuples or
815
- lists, where each element specifies a value for its respective
816
- option.
817
-
818
- .. versionchanged:: 1.5
819
- Groups of options are now supported
820
- """
821
- if not isinstance(name, str):
822
- name = tuple(name)
823
- for opt in optionlist:
824
- if len(name) != len(opt):
825
- raise ValueError(
826
- "Mismatch between number of options and number of option values in group"
827
- )
828
- self.kwargs[name] = optionlist
829
-
830
- def generate_tests(self, prefix="", postfix=""):
831
- """
832
- Generate an exhaustive set of tests using the cartesian product of the
833
- possible keyword arguments.
834
-
835
- The generated tests are appended to the namespace of the calling
836
- module.
837
-
838
- Args:
839
- prefix (str): Text string to append to start of ``test_function`` name
840
- when naming generated test cases. This allows reuse of
841
- a single ``test_function`` with multiple
842
- :class:`TestFactories <.TestFactory>` without name clashes.
843
- postfix (str): Text string to append to end of ``test_function`` name
844
- when naming generated test cases. This allows reuse of
845
- a single ``test_function`` with multiple
846
- :class:`TestFactories <.TestFactory>` without name clashes.
847
- """
848
-
849
- frm = inspect.stack()[1]
850
- mod = inspect.getmodule(frm[0])
851
-
852
- d = self.kwargs
853
-
854
- for index, testoptions in enumerate(
855
- dict(zip(d, v)) for v in product(*d.values())
856
- ):
857
-
858
- name = "%s%s%s_%03d" % (prefix, self.name, postfix, index + 1)
859
- doc = "Automatically generated test\n\n"
860
-
861
- # preprocess testoptions to split tuples
862
- testoptions_split = {}
863
- for optname, optvalue in testoptions.items():
864
- if isinstance(optname, str):
865
- testoptions_split[optname] = optvalue
866
- else:
867
- # previously checked in add_option; ensure nothing has changed
868
- assert len(optname) == len(optvalue)
869
- for n, v in zip(optname, optvalue):
870
- testoptions_split[n] = v
871
-
872
- for optname, optvalue in testoptions_split.items():
873
- if callable(optvalue):
874
- if not optvalue.__doc__:
875
- desc = "No docstring supplied"
876
- else:
877
- desc = optvalue.__doc__.split("\n")[0]
878
- doc += "\t{}: {} ({})\n".format(
879
- optname, optvalue.__qualname__, desc
880
- )
881
- else:
882
- doc += "\t{}: {}\n".format(optname, repr(optvalue))
883
-
884
- self.log.debug(
885
- 'Adding generated test "%s" to module "%s"' % (name, mod.__name__)
886
- )
887
- kwargs = {}
888
- kwargs.update(self.kwargs_constant)
889
- kwargs.update(testoptions_split)
890
- if hasattr(mod, name):
891
- self.log.error(
892
- "Overwriting %s in module %s. "
893
- "This causes a previously defined testcase "
894
- "not to be run. Consider setting/changing "
895
- "name_postfix" % (name, mod)
896
- )
897
- setattr(
898
- mod,
899
- name,
900
- _create_test(self.test_function, name, doc, mod, *self.args, **kwargs),
901
- )
902
-
903
-
904
- def _trim(docstring: Optional[str]) -> str:
905
- """Normalizes test docstrings
906
-
907
- Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation.
908
- """
909
- if docstring is None or docstring == "":
910
- return ""
911
- # Convert tabs to spaces (following the normal Python rules)
912
- # and split into a list of lines:
913
- lines = docstring.expandtabs().splitlines()
914
- # Determine minimum indentation (first line doesn't count):
915
- indent = math.inf
916
- for line in lines[1:]:
917
- stripped = line.lstrip()
918
- if stripped:
919
- indent = min(indent, len(line) - len(stripped))
920
- # Remove indentation (first line is special):
921
- trimmed = [lines[0].strip()]
922
- if indent < math.inf:
923
- for line in lines[1:]:
924
- trimmed.append(line[indent:].rstrip())
925
- # Strip off trailing and leading blank lines:
926
- while trimmed and not trimmed[-1]:
927
- trimmed.pop()
928
- while trimmed and not trimmed[0]:
929
- trimmed.pop(0)
930
- # Add one newline back
931
- trimmed.insert(0, "")
932
- # Return a single string:
933
- return "\n ".join(trimmed)
885
+ def _fail_simulation(self, msg: str) -> None:
886
+ self._sim_failure = Error(SimFailure(msg))
887
+ self._running_test.abort(self._sim_failure)
888
+ cocotb._scheduler_inst._event_loop()