cocotb 1.9.1__cp310-cp310-win_amd64.whl → 2.0.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cocotb might be problematic. Click here for more details.

Files changed (163) hide show
  1. cocotb/_ANSI.py +65 -0
  2. cocotb/__init__.py +81 -310
  3. cocotb/_base_triggers.py +515 -0
  4. cocotb/_bridge.py +186 -0
  5. cocotb/_decorators.py +515 -0
  6. cocotb/_deprecation.py +3 -3
  7. cocotb/_exceptions.py +7 -0
  8. cocotb/_extended_awaitables.py +419 -0
  9. cocotb/_gpi_triggers.py +385 -0
  10. cocotb/_init.py +301 -0
  11. cocotb/_outcomes.py +54 -0
  12. cocotb/_profiling.py +46 -0
  13. cocotb/_py_compat.py +114 -29
  14. cocotb/_scheduler.py +448 -0
  15. cocotb/_test.py +248 -0
  16. cocotb/_test_factory.py +312 -0
  17. cocotb/_test_functions.py +42 -0
  18. cocotb/_typing.py +7 -0
  19. cocotb/_utils.py +274 -0
  20. cocotb/_version.py +3 -7
  21. cocotb/_xunit_reporter.py +66 -0
  22. cocotb/clock.py +353 -108
  23. cocotb/debug.py +24 -0
  24. cocotb/handle.py +1370 -793
  25. cocotb/libs/cocotb.dll +0 -0
  26. cocotb/libs/cocotb.exp +0 -0
  27. cocotb/libs/cocotb.lib +0 -0
  28. cocotb/libs/cocotbfli_modelsim.dll +0 -0
  29. cocotb/libs/cocotbfli_modelsim.exp +0 -0
  30. cocotb/libs/cocotbfli_modelsim.lib +0 -0
  31. cocotb/libs/cocotbutils.dll +0 -0
  32. cocotb/libs/cocotbutils.exp +0 -0
  33. cocotb/libs/cocotbutils.lib +0 -0
  34. cocotb/libs/cocotbvhpi_aldec.dll +0 -0
  35. cocotb/libs/cocotbvhpi_aldec.exp +0 -0
  36. cocotb/libs/cocotbvhpi_aldec.lib +0 -0
  37. cocotb/libs/cocotbvhpi_modelsim.dll +0 -0
  38. cocotb/libs/cocotbvhpi_modelsim.exp +0 -0
  39. cocotb/libs/cocotbvhpi_modelsim.lib +0 -0
  40. cocotb/libs/cocotbvpi_aldec.dll +0 -0
  41. cocotb/libs/cocotbvpi_aldec.exp +0 -0
  42. cocotb/libs/cocotbvpi_aldec.lib +0 -0
  43. cocotb/libs/cocotbvpi_ghdl.dll +0 -0
  44. cocotb/libs/cocotbvpi_ghdl.exp +0 -0
  45. cocotb/libs/cocotbvpi_ghdl.lib +0 -0
  46. cocotb/libs/cocotbvpi_icarus.exp +0 -0
  47. cocotb/libs/cocotbvpi_icarus.lib +0 -0
  48. cocotb/libs/cocotbvpi_icarus.vpl +0 -0
  49. cocotb/libs/cocotbvpi_modelsim.dll +0 -0
  50. cocotb/libs/cocotbvpi_modelsim.exp +0 -0
  51. cocotb/libs/cocotbvpi_modelsim.lib +0 -0
  52. cocotb/libs/embed.dll +0 -0
  53. cocotb/libs/embed.exp +0 -0
  54. cocotb/libs/embed.lib +0 -0
  55. cocotb/libs/gpi.dll +0 -0
  56. cocotb/libs/gpi.exp +0 -0
  57. cocotb/libs/gpi.lib +0 -0
  58. cocotb/libs/gpilog.dll +0 -0
  59. cocotb/libs/gpilog.exp +0 -0
  60. cocotb/libs/gpilog.lib +0 -0
  61. cocotb/libs/pygpilog.dll +0 -0
  62. cocotb/libs/pygpilog.exp +0 -0
  63. cocotb/libs/pygpilog.lib +0 -0
  64. cocotb/logging.py +424 -0
  65. cocotb/queue.py +103 -57
  66. cocotb/regression.py +680 -721
  67. cocotb/result.py +17 -188
  68. cocotb/share/def/aldec.exp +0 -0
  69. cocotb/share/def/aldec.lib +0 -0
  70. cocotb/share/def/ghdl.exp +0 -0
  71. cocotb/share/def/ghdl.lib +0 -0
  72. cocotb/share/def/icarus.exp +0 -0
  73. cocotb/share/def/icarus.lib +0 -0
  74. cocotb/share/def/modelsim.def +1 -0
  75. cocotb/share/def/modelsim.exp +0 -0
  76. cocotb/share/def/modelsim.lib +0 -0
  77. cocotb/share/include/cocotb_utils.h +9 -32
  78. cocotb/share/include/embed.h +7 -30
  79. cocotb/share/include/gpi.h +331 -137
  80. cocotb/share/include/gpi_logging.h +221 -142
  81. cocotb/share/include/py_gpi_logging.h +8 -5
  82. cocotb/share/include/vpi_user_ext.h +4 -26
  83. cocotb/share/lib/verilator/verilator.cpp +80 -67
  84. cocotb/simtime.py +230 -0
  85. cocotb/simulator.cp310-win_amd64.exp +0 -0
  86. cocotb/simulator.cp310-win_amd64.lib +0 -0
  87. cocotb/simulator.cp310-win_amd64.pyd +0 -0
  88. cocotb/simulator.pyi +107 -0
  89. cocotb/task.py +478 -213
  90. cocotb/triggers.py +55 -1092
  91. cocotb/types/__init__.py +28 -47
  92. cocotb/types/_abstract_array.py +151 -0
  93. cocotb/types/_array.py +295 -0
  94. cocotb/types/_indexing.py +17 -0
  95. cocotb/types/_logic.py +333 -0
  96. cocotb/types/_logic_array.py +868 -0
  97. cocotb/types/{range.py → _range.py} +47 -48
  98. cocotb/types/_resolve.py +76 -0
  99. cocotb/utils.py +58 -646
  100. cocotb-2.0.0.dist-info/METADATA +60 -0
  101. cocotb-2.0.0.dist-info/RECORD +146 -0
  102. {cocotb-1.9.1.dist-info → cocotb-2.0.0.dist-info}/WHEEL +1 -1
  103. cocotb-2.0.0.dist-info/entry_points.txt +2 -0
  104. {cocotb-1.9.1.dist-info → cocotb-2.0.0.dist-info/licenses}/LICENSE +1 -0
  105. {cocotb-1.9.1.dist-info → cocotb-2.0.0.dist-info}/top_level.txt +1 -0
  106. cocotb_tools/__init__.py +0 -0
  107. cocotb_tools/_coverage.py +33 -0
  108. cocotb_tools/_vendor/__init__.py +3 -0
  109. cocotb_tools/check_results.py +65 -0
  110. cocotb_tools/combine_results.py +152 -0
  111. cocotb_tools/config.py +241 -0
  112. {cocotb → cocotb_tools}/ipython_support.py +29 -22
  113. cocotb_tools/makefiles/Makefile.deprecations +27 -0
  114. {cocotb/share → cocotb_tools}/makefiles/Makefile.inc +77 -55
  115. {cocotb/share → cocotb_tools}/makefiles/Makefile.sim +16 -33
  116. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.activehdl +18 -18
  117. cocotb_tools/makefiles/simulators/Makefile.cvc +61 -0
  118. cocotb_tools/makefiles/simulators/Makefile.dsim +39 -0
  119. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.ghdl +13 -42
  120. cocotb_tools/makefiles/simulators/Makefile.icarus +80 -0
  121. cocotb_tools/makefiles/simulators/Makefile.ius +93 -0
  122. cocotb_tools/makefiles/simulators/Makefile.modelsim +9 -0
  123. cocotb_tools/makefiles/simulators/Makefile.nvc +60 -0
  124. cocotb_tools/makefiles/simulators/Makefile.questa +29 -0
  125. cocotb_tools/makefiles/simulators/Makefile.questa-compat +143 -0
  126. cocotb_tools/makefiles/simulators/Makefile.questa-qisqrun +149 -0
  127. cocotb_tools/makefiles/simulators/Makefile.riviera +144 -0
  128. cocotb_tools/makefiles/simulators/Makefile.vcs +65 -0
  129. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.verilator +15 -22
  130. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.xcelium +20 -52
  131. cocotb_tools/py.typed +0 -0
  132. cocotb_tools/runner.py +1868 -0
  133. cocotb/_sim_versions.py → cocotb_tools/sim_versions.py +16 -21
  134. pygpi/entry.py +34 -18
  135. pygpi/py.typed +0 -0
  136. cocotb/ANSI.py +0 -92
  137. cocotb/binary.py +0 -858
  138. cocotb/config.py +0 -289
  139. cocotb/decorators.py +0 -332
  140. cocotb/log.py +0 -303
  141. cocotb/memdebug.py +0 -35
  142. cocotb/outcomes.py +0 -56
  143. cocotb/runner.py +0 -1400
  144. cocotb/scheduler.py +0 -1099
  145. cocotb/share/makefiles/Makefile.deprecations +0 -12
  146. cocotb/share/makefiles/simulators/Makefile.cvc +0 -94
  147. cocotb/share/makefiles/simulators/Makefile.icarus +0 -111
  148. cocotb/share/makefiles/simulators/Makefile.ius +0 -125
  149. cocotb/share/makefiles/simulators/Makefile.modelsim +0 -32
  150. cocotb/share/makefiles/simulators/Makefile.nvc +0 -64
  151. cocotb/share/makefiles/simulators/Makefile.questa +0 -168
  152. cocotb/share/makefiles/simulators/Makefile.riviera +0 -177
  153. cocotb/share/makefiles/simulators/Makefile.vcs +0 -98
  154. cocotb/types/array.py +0 -309
  155. cocotb/types/logic.py +0 -292
  156. cocotb/types/logic_array.py +0 -298
  157. cocotb/wavedrom.py +0 -199
  158. cocotb/xunit_reporter.py +0 -80
  159. cocotb-1.9.1.dist-info/METADATA +0 -166
  160. cocotb-1.9.1.dist-info/RECORD +0 -121
  161. cocotb-1.9.1.dist-info/entry_points.txt +0 -2
  162. /cocotb/{_vendor/__init__.py → py.typed} +0 -0
  163. {cocotb → cocotb_tools}/_vendor/distutils_version.py +0 -0
cocotb/regression.py CHANGED
@@ -1,258 +1,267 @@
1
+ # Copyright cocotb contributors
1
2
  # Copyright (c) 2013, 2018 Potential Ventures Ltd
2
3
  # Copyright (c) 2013 SolarFlare Communications Inc
3
- # All rights reserved.
4
- #
5
- # Redistribution and use in source and binary forms, with or without
6
- # modification, are permitted provided that the following conditions are met:
7
- # * Redistributions of source code must retain the above copyright
8
- # notice, this list of conditions and the following disclaimer.
9
- # * Redistributions in binary form must reproduce the above copyright
10
- # notice, this list of conditions and the following disclaimer in the
11
- # documentation and/or other materials provided with the distribution.
12
- # * Neither the name of Potential Ventures Ltd,
13
- # SolarFlare Communications Inc nor the
14
- # names of its contributors may be used to endorse or promote products
15
- # derived from this software without specific prior written permission.
16
- #
17
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18
- # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19
- # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20
- # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21
- # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22
- # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23
- # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24
- # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26
- # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4
+ # Licensed under the Revised BSD License, see LICENSE for details.
5
+ # SPDX-License-Identifier: BSD-3-Clause
27
6
 
28
7
  """All things relating to regression capabilities."""
29
8
 
9
+ import functools
30
10
  import hashlib
31
11
  import inspect
32
- import math
12
+ import logging
33
13
  import os
34
- import pdb
35
14
  import random
36
- import sys
15
+ import re
37
16
  import time
38
- import traceback
39
- from itertools import product
40
- from typing import Any, Iterable, Optional, Tuple, Type
17
+ import warnings
18
+ from enum import auto
19
+ from importlib import import_module
20
+ from typing import (
21
+ TYPE_CHECKING,
22
+ Callable,
23
+ Coroutine,
24
+ List,
25
+ Union,
26
+ )
41
27
 
42
28
  import cocotb
43
- import cocotb.ANSI as ANSI
29
+ import cocotb._gpi_triggers
30
+ import cocotb.handle
31
+ from cocotb import logging as cocotb_logging
44
32
  from cocotb import simulator
45
- from cocotb._deprecation import deprecated
46
- from cocotb.decorators import test as Test
47
- from cocotb.handle import SimHandle
48
- from cocotb.log import SimLog
49
- from cocotb.outcomes import Error, Outcome
50
- from cocotb.result import SimFailure, TestSuccess
33
+ from cocotb._decorators import Parameterized, Test
34
+ from cocotb._extended_awaitables import with_timeout
35
+ from cocotb._gpi_triggers import GPITrigger, Timer
36
+ from cocotb._outcomes import Error, Outcome
37
+ from cocotb._test import RunningTest
38
+ from cocotb._test_factory import TestFactory
39
+ from cocotb._test_functions import Failed
40
+ from cocotb._utils import (
41
+ DocEnum,
42
+ remove_traceback_frames,
43
+ safe_divide,
44
+ )
45
+ from cocotb._xunit_reporter import XUnitReporter
46
+ from cocotb.logging import ANSI
47
+ from cocotb.simtime import get_sim_time
51
48
  from cocotb.task import Task
52
- from cocotb.utils import get_sim_time, remove_traceback_frames, want_color_output
53
- from cocotb.xunit_reporter import XUnitReporter
54
-
55
- _pdb_on_exception = "COCOTB_PDB_ON_EXCEPTION" in os.environ
56
-
57
- # Optional support for coverage collection of testbench files
58
- coverage = None
59
- if "COVERAGE" in os.environ:
60
- try:
61
- import coverage
62
- except ImportError as e:
63
- msg = (
64
- "Coverage collection requested but coverage module not available"
65
- "\n"
66
- "Import error was: %s\n" % repr(e)
67
- )
68
- sys.stderr.write(msg)
69
-
70
-
71
- def _my_import(name: str) -> Any:
72
- mod = __import__(name)
73
- components = name.split(".")
74
- for comp in components[1:]:
75
- mod = getattr(mod, comp)
76
- return mod
77
-
78
-
79
- _logger = SimLog(__name__)
80
-
81
- _Failed: Type[BaseException]
82
- try:
83
- import pytest
84
- except ModuleNotFoundError:
85
- _Failed = AssertionError
86
- else:
87
- try:
88
- with pytest.raises(Exception):
89
- pass
90
- except BaseException as _raises_e:
91
- _Failed = type(_raises_e)
49
+
50
+ if TYPE_CHECKING:
51
+ from cocotb._base_triggers import Trigger
52
+
53
+ __all__ = (
54
+ "Parameterized",
55
+ "RegressionManager",
56
+ "RegressionMode",
57
+ "SimFailure",
58
+ "Test",
59
+ "TestFactory",
60
+ )
61
+
62
+ # Set __module__ on re-exports
63
+ Parameterized.__module__ = __name__
64
+ Test.__module__ = __name__
65
+ TestFactory.__module__ = __name__
66
+
67
+
68
+ class SimFailure(BaseException):
69
+ """A Test failure due to simulator failure.
70
+
71
+ .. caution::
72
+ Not to be raised or caught within a test.
73
+ Only used for marking expected failure with ``expect_error`` in :func:`cocotb.test`.
74
+ """
75
+
76
+
77
+ _logger = logging.getLogger(__name__)
78
+
79
+
80
+ def _format_doc(docstring: Union[str, None]) -> str:
81
+ if docstring is None:
82
+ return ""
92
83
  else:
93
- assert "pytest.raises doesn't raise an exception when it fails"
84
+ brief = docstring.split("\n")[0]
85
+ return f"\n {brief}"
86
+
87
+
88
+ class RegressionMode(DocEnum):
89
+ """The mode of the :class:`RegressionManager`."""
90
+
91
+ REGRESSION = (
92
+ auto(),
93
+ """Tests are run if included. Skipped tests are skipped, expected failures and errors are respected.""",
94
+ )
95
+
96
+ TESTCASE = (
97
+ auto(),
98
+ """Like :attr:`REGRESSION`, but skipped tests are *not* skipped if included.""",
99
+ )
100
+
101
+
102
+ class _TestResults:
103
+ # TODO Replace with dataclass in Python 3.7+
104
+
105
+ def __init__(
106
+ self,
107
+ test_fullname: str,
108
+ passed: Union[None, bool],
109
+ wall_time_s: float,
110
+ sim_time_ns: float,
111
+ ) -> None:
112
+ self.test_fullname = test_fullname
113
+ self.passed = passed
114
+ self.wall_time_s = wall_time_s
115
+ self.sim_time_ns = sim_time_ns
116
+
117
+ @property
118
+ def ratio(self) -> float:
119
+ return safe_divide(self.sim_time_ns, self.wall_time_s)
94
120
 
95
121
 
96
122
  class RegressionManager:
97
- """Encapsulates all regression capability into a single place"""
123
+ """Object which manages tests.
98
124
 
99
- def __init__(self, dut: SimHandle, tests: Iterable[Test]):
100
- """
101
- Args:
102
- dut (SimHandle): The root handle to pass into test functions.
103
- tests (Iterable[Test]): tests to run
104
- """
105
- self._dut = dut
106
- self._test = None
107
- self._test_task = None
108
- self._test_start_time = None
109
- self._test_start_sim_time = None
110
- self._cov = None
125
+ This object uses the builder pattern to build up a regression.
126
+ Tests are added using :meth:`register_test` or :meth:`discover_tests`.
127
+ Inclusion filters for tests can be added using :meth:`add_filters`.
128
+ The "mode" of the regression can be controlled using :meth:`set_mode`.
129
+ These methods can be called in any order any number of times before :meth:`start_regression` is called,
130
+ and should not be called again after that.
131
+
132
+ Once all the tests, filters, and regression behavior configuration is done,
133
+ the user starts the regression with :meth:`start_regression`.
134
+ This method must be called exactly once.
135
+
136
+ Until the regression is started, :attr:`total_tests`, :attr:`count`, :attr:`passed`,
137
+ :attr:`skipped`, and :attr:`failures` hold placeholder values.
138
+ """
139
+
140
+ COLOR_TEST = ANSI.BLUE_FG
141
+ COLOR_PASSED = ANSI.GREEN_FG
142
+ COLOR_SKIPPED = ANSI.YELLOW_FG
143
+ COLOR_FAILED = ANSI.RED_FG
144
+
145
+ _timer1 = Timer(1)
146
+
147
+ def __init__(self) -> None:
148
+ self._test: Test
149
+ self._running_test: RunningTest
111
150
  self.log = _logger
112
- self.start_time = time.time()
113
- self.test_results = []
151
+ self._regression_start_time: float
152
+ self._test_results: List[_TestResults] = []
153
+ self.total_tests = 0
154
+ """Total number of tests that will be run or skipped."""
114
155
  self.count = 0
156
+ """The current test count."""
115
157
  self.passed = 0
158
+ """The current number of passed tests."""
116
159
  self.skipped = 0
160
+ """The current number of skipped tests."""
117
161
  self.failures = 0
162
+ """The current number of failed tests."""
118
163
  self._tearing_down = False
164
+ self._test_queue: List[Test] = []
165
+ self._filters: List[re.Pattern[str]] = []
166
+ self._mode = RegressionMode.REGRESSION
167
+ self._included: List[bool]
168
+ self._sim_failure: Union[Error[None], None] = None
119
169
 
120
170
  # Setup XUnit
121
171
  ###################
122
172
 
123
173
  results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
124
- suite_name = os.getenv("RESULT_TESTSUITE", "all")
125
- package_name = os.getenv("RESULT_TESTPACKAGE", "all")
174
+ suite_name = os.getenv("COCOTB_RESULT_TESTSUITE", "all")
175
+ package_name = os.getenv("COCOTB_RESULT_TESTPACKAGE", "all")
126
176
 
127
177
  self.xunit = XUnitReporter(filename=results_filename)
128
-
129
178
  self.xunit.add_testsuite(name=suite_name, package=package_name)
130
-
131
179
  self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))
132
180
 
133
- # Setup Coverage
134
- ####################
135
-
136
- if coverage is not None:
137
- self.log.info("Enabling coverage collection of Python code")
138
- config_filepath = os.getenv("COVERAGE_RCFILE")
139
- if config_filepath is None:
140
- # Exclude cocotb itself from coverage collection.
141
- cocotb_package_dir = os.path.dirname(__file__)
142
- self._cov = coverage.coverage(
143
- branch=True, omit=[f"{cocotb_package_dir}/*"]
144
- )
145
- else:
146
- # Allow the config file to handle all configuration
147
- self._cov = coverage.coverage()
148
- self._cov.start()
149
-
150
- # Test Discovery
151
- ####################
152
- self._queue = []
153
- for test in tests:
154
- self.log.info(f"Found test {test.__module__}.{test.__qualname__}")
155
- self._queue.append(test)
156
- self.ntests = len(self._queue)
181
+ def discover_tests(self, *modules: str) -> None:
182
+ """Discover tests in files automatically.
157
183
 
158
- if not self._queue:
159
- self.log.warning("No tests were discovered")
160
-
161
- self._queue.sort(key=lambda test: (test.stage, test._id))
162
-
163
- @classmethod
164
- def from_discovery(cls, dut: SimHandle):
165
- """
166
- Obtains the test list by discovery.
167
-
168
- See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
184
+ Should be called before :meth:`start_regression` is called.
169
185
 
170
186
  Args:
171
- dut (SimHandle): The root handle to pass into test functions.
187
+ modules: Each argument given is the name of a module where tests are found.
172
188
  """
173
- tests = cls._discover_tests()
174
- return cls(dut, tests)
189
+ for module_name in modules:
190
+ mod = import_module(module_name)
191
+
192
+ found_test: bool = False
193
+ for obj_name, obj in vars(mod).items():
194
+ if isinstance(obj, Test):
195
+ found_test = True
196
+ self.register_test(obj)
197
+ elif isinstance(obj, Parameterized):
198
+ found_test = True
199
+ generated_tests: bool = False
200
+ for test in obj.generate_tests():
201
+ generated_tests = True
202
+ self.register_test(test)
203
+ if not generated_tests:
204
+ warnings.warn(
205
+ f"Parametrize object generated no tests: {module_name}.{obj_name}",
206
+ stacklevel=2,
207
+ )
175
208
 
176
- @classmethod
177
- def _discover_tests(cls) -> Iterable[Test]:
178
- """
179
- Discovers tests in files automatically.
209
+ if not found_test:
210
+ warnings.warn(
211
+ f"No tests were discovered in module: {module_name}", stacklevel=2
212
+ )
180
213
 
181
- See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
182
- """
183
- module_str = os.getenv("MODULE")
184
- test_str = os.getenv("TESTCASE")
214
+ # error if no tests were discovered
215
+ if not self._test_queue:
216
+ modules_str = ", ".join(repr(m) for m in modules)
217
+ raise RuntimeError(f"No tests were discovered in any module: {modules_str}")
185
218
 
186
- if module_str is None:
187
- raise ValueError(
188
- "Environment variable MODULE, which defines the module(s) to execute, is not defined."
189
- )
219
+ def add_filters(self, *filters: str) -> None:
220
+ """Add regular expressions to filter-in registered tests.
190
221
 
191
- modules = [s.strip() for s in module_str.split(",") if s.strip()]
222
+ Only those tests which match at least one of the given filters are included;
223
+ the rest are excluded.
192
224
 
193
- cls._setup_pytest_assertion_rewriting(modules)
225
+ Should be called before :meth:`start_regression` is called.
194
226
 
195
- tests = None
196
- if test_str:
197
- tests = [s.strip() for s in test_str.split(",") if s.strip()]
227
+ Args:
228
+ filters: Each argument given is a regex pattern for test names.
229
+ A match *includes* the test.
230
+ """
231
+ for filter in filters:
232
+ compiled_filter = re.compile(filter)
233
+ self._filters.append(compiled_filter)
198
234
 
199
- for module_name in modules:
200
- try:
201
- _logger.debug("Python Path: " + ",".join(sys.path))
202
- _logger.debug("PWD: " + os.getcwd())
203
- module = _my_import(module_name)
204
- except Exception as E:
205
- _logger.critical("Failed to import module %s: %s", module_name, E)
206
- _logger.info('MODULE variable was "%s"', ".".join(modules))
207
- _logger.info("Traceback: ")
208
- _logger.info(traceback.format_exc())
209
- raise
210
-
211
- if tests is not None:
212
- not_found_tests = []
213
- # Specific functions specified, don't auto-discover
214
- for test_name in tests:
215
- try:
216
- test = getattr(module, test_name)
217
- except AttributeError:
218
- not_found_tests.append(test_name)
219
- continue
220
-
221
- if not isinstance(test, Test):
222
- _logger.error(
223
- "Requested %s from module %s isn't a cocotb.test decorated coroutine",
224
- test_name,
225
- module_name,
226
- )
227
- raise ImportError(
228
- "Failed to find requested test %s" % test_name
229
- )
235
+ def set_mode(self, mode: RegressionMode) -> None:
236
+ """Set the regression mode.
230
237
 
231
- # If we request a test manually, it should be run even if skip=True is set.
232
- test.skip = False
238
+ See :class:`RegressionMode` for more details on how each mode affects :class:`RegressionManager` behavior.
239
+ Should be called before :meth:`start_regression` is called.
233
240
 
234
- yield test
241
+ Args:
242
+ mode: The regression mode to set.
243
+ """
244
+ self._mode = mode
235
245
 
236
- # Use the non-matching test names in the next module search
237
- tests = not_found_tests
246
+ def register_test(self, test: Test) -> None:
247
+ """Register a test with the :class:`RegressionManager`.
238
248
 
239
- else:
240
- # auto-discover
241
- for thing in vars(module).values():
242
- if isinstance(thing, Test):
243
- yield thing
244
-
245
- # If any test were not found in any module, raise an error
246
- if tests:
247
- _logger.error(
248
- "Requested test(s) %s wasn't found in module(s) %s", tests, modules
249
- )
250
- raise AttributeError("Test(s) %s doesn't exist in %s" % (tests, modules))
249
+ Should be called before :meth:`start_regression` is called.
250
+
251
+ Args:
252
+ test: The test object to register.
253
+ """
254
+ self.log.debug("Registered test %r", test.fullname)
255
+ self._test_queue.append(test)
251
256
 
252
257
  @classmethod
253
- def _setup_pytest_assertion_rewriting(cls, test_modules: Iterable[str]) -> None:
258
+ def setup_pytest_assertion_rewriting(cls) -> None:
259
+ """Configure pytest to rewrite assertions for better failure messages.
260
+
261
+ Must be called before all modules containing tests are imported.
262
+ """
254
263
  try:
255
- import pytest
264
+ import pytest # noqa: PLC0415
256
265
  except ImportError:
257
266
  _logger.info(
258
267
  "pytest not found, install it to enable better AssertionError messages"
@@ -261,205 +270,408 @@ class RegressionManager:
261
270
  try:
262
271
  # Install the assertion rewriting hook, which must be done before we
263
272
  # import the test modules.
264
- from _pytest.assertion import install_importhook
265
- from _pytest.config import Config
273
+ from _pytest.assertion import install_importhook # noqa: PLC0415
274
+ from _pytest.config import Config # noqa: PLC0415
275
+
276
+ python_files = os.getenv("COCOTB_REWRITE_ASSERTION_FILES", "*.py").strip()
277
+ if not python_files:
278
+ # Even running the hook causes exceptions in some cases, so if the user
279
+ # selects nothing, don't install the hook at all.
280
+ return
266
281
 
267
282
  pytest_conf = Config.fromdictargs(
268
- {}, ["--capture=no", "-o", "python_files=*.py"]
283
+ {}, ["--capture=no", "-o", f"python_files={python_files}"]
269
284
  )
270
285
  install_importhook(pytest_conf)
271
286
  except Exception:
272
287
  _logger.exception(
273
- "Configuring the assertion rewrite hook using pytest {} failed. "
274
- "Please file a bug report!".format(pytest.__version__)
288
+ "Configuring the assertion rewrite hook using pytest %s failed. "
289
+ "Please file a bug report!",
290
+ pytest.__version__,
291
+ )
292
+
293
+ def start_regression(self) -> None:
294
+ """Start the regression."""
295
+
296
+ # sort tests into stages
297
+ self._test_queue.sort(key=lambda test: test.stage)
298
+
299
+ # mark tests for running
300
+ if self._filters:
301
+ self._included = [False] * len(self._test_queue)
302
+ for i, test in enumerate(self._test_queue):
303
+ for filter in self._filters:
304
+ if filter.search(test.fullname):
305
+ self._included[i] = True
306
+ else:
307
+ self._included = [True] * len(self._test_queue)
308
+
309
+ # compute counts
310
+ self.count = 1
311
+ self.total_tests = sum(self._included)
312
+ if self.total_tests == 0:
313
+ self.log.warning(
314
+ "No tests left after filtering with: %s",
315
+ ", ".join(f.pattern for f in self._filters),
275
316
  )
276
317
 
277
- @deprecated("This method is now private.")
278
- def tear_down(self) -> None:
279
- self._tear_down()
318
+ # start write scheduler
319
+ cocotb.handle._start_write_scheduler()
320
+
321
+ # start test loop
322
+ self._regression_start_time = time.time()
323
+ self._first_test = True
324
+ self._execute()
325
+
326
+ def _execute(self) -> None:
327
+ """Run the main regression loop.
328
+
329
+ Used by :meth:`start_regression` and :meth:`_test_complete` to continue to the main test running loop,
330
+ and by :meth:`_fail_regression` to shutdown the regression when a simulation failure occurs.
331
+ """
332
+
333
+ while self._test_queue:
334
+ self._test = self._test_queue.pop(0)
335
+ included = self._included.pop(0)
336
+
337
+ # if the test is not included, record and continue
338
+ if not included:
339
+ self._record_test_excluded()
340
+ continue
341
+
342
+ # if the test is skipped, record and continue
343
+ if self._test.skip and self._mode != RegressionMode.TESTCASE:
344
+ self._record_test_skipped()
345
+ continue
346
+
347
+ # if the test should be run, but the simulator has failed, record and continue
348
+ if self._sim_failure is not None:
349
+ self._score_test(
350
+ self._sim_failure,
351
+ 0,
352
+ 0,
353
+ )
354
+ continue
355
+
356
+ # initialize the test, if it fails, record and continue
357
+ try:
358
+ self._running_test = self._init_test()
359
+ except Exception:
360
+ self._record_test_init_failed()
361
+ continue
362
+
363
+ self._log_test_start()
364
+
365
+ if self._first_test:
366
+ self._first_test = False
367
+ return self._schedule_next_test()
368
+ else:
369
+ return self._timer1._prime(self._schedule_next_test)
370
+
371
+ return self._tear_down()
372
+
373
+ def _init_test(self) -> RunningTest:
374
+ # wrap test function in timeout
375
+ func: Callable[..., Coroutine[Trigger, None, None]]
376
+ timeout = self._test.timeout_time
377
+ if timeout is not None:
378
+ f = self._test.func
379
+
380
+ @functools.wraps(f)
381
+ async def func(*args: object, **kwargs: object) -> None:
382
+ await with_timeout(f(*args, **kwargs), timeout, self._test.timeout_unit)
383
+ else:
384
+ func = self._test.func
385
+
386
+ main_task = Task(func(cocotb.top), name=f"Test {self._test.name}")
387
+ return RunningTest(self._test_complete, main_task)
388
+
389
+ def _schedule_next_test(self, trigger: Union[GPITrigger, None] = None) -> None:
390
+ if trigger is not None:
391
+ # TODO move to Trigger object
392
+ cocotb._gpi_triggers._current_gpi_trigger = trigger
393
+ trigger._cleanup()
394
+
395
+ # seed random number generator based on test module, name, and COCOTB_RANDOM_SEED
396
+ hasher = hashlib.sha1()
397
+ hasher.update(self._test.fullname.encode())
398
+ seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
399
+ random.seed(seed)
400
+
401
+ self._start_sim_time = get_sim_time("ns")
402
+ self._start_time = time.time()
403
+
404
+ self._running_test.start()
280
405
 
281
406
  def _tear_down(self) -> None:
407
+ """Called by :meth:`_execute` when there are no more tests to run to finalize the regression."""
282
408
  # prevent re-entering the tear down procedure
283
409
  if not self._tearing_down:
284
410
  self._tearing_down = True
285
411
  else:
286
412
  return
287
413
 
288
- # fail remaining tests
289
- while True:
290
- test = self._next_test()
291
- if test is None:
292
- break
293
- self._record_result(
294
- test=test, outcome=Error(SimFailure), wall_time_s=0, sim_time_ns=0
295
- )
414
+ assert not self._test_queue
415
+
416
+ # stop the write scheduler
417
+ cocotb.handle._stop_write_scheduler()
296
418
 
297
419
  # Write out final log messages
298
420
  self._log_test_summary()
299
421
 
300
422
  # Generate output reports
301
423
  self.xunit.write()
302
- if self._cov:
303
- self._cov.stop()
304
- self.log.info("Writing coverage data")
305
- self._cov.save()
306
- self._cov.html_report()
307
- if cocotb._library_coverage is not None:
308
- # TODO: move this once we have normal shutdown behavior to _sim_event
309
- cocotb._library_coverage.stop()
310
- cocotb._library_coverage.save()
311
424
 
312
- # Setup simulator finalization
313
- simulator.stop_simulator()
425
+ # TODO refactor initialization and finalization into their own module
426
+ # to prevent circular imports requiring local imports
427
+ from cocotb._init import _shutdown_testbench # noqa: PLC0415
314
428
 
315
- @deprecated("This method is now private.")
316
- def next_test(self) -> Optional[Test]:
317
- return self._next_test()
429
+ _shutdown_testbench()
318
430
 
319
- def _next_test(self) -> Optional[Test]:
320
- """Get the next test to run"""
321
- if not self._queue:
322
- return None
323
- self.count += 1
324
- return self._queue.pop(0)
431
+ # Setup simulator finalization
432
+ simulator.stop_simulator()
325
433
 
326
- @deprecated("This method is now private.")
327
- def handle_result(self, test: Task) -> None:
328
- self._handle_result(test)
434
+ def _test_complete(self) -> None:
435
+ """Callback given to the test to be called when the test finished."""
329
436
 
330
- def _handle_result(self, test: Task) -> None:
331
- """Handle a test completing.
437
+ # compute wall time
438
+ wall_time = time.time() - self._start_time
439
+ sim_time_ns = get_sim_time("ns") - self._start_sim_time
332
440
 
333
- Dump result to XML and schedule the next test (if any). Entered by the scheduler.
441
+ # Judge and record pass/fail.
442
+ self._score_test(
443
+ self._running_test.result(),
444
+ wall_time,
445
+ sim_time_ns,
446
+ )
334
447
 
335
- Args:
336
- test: The test that completed
337
- """
338
- assert test is self._test_task
448
+ # Run next test.
449
+ return self._execute()
339
450
 
340
- real_time = time.time() - self._test_start_time
341
- sim_time_ns = get_sim_time("ns") - self._test_start_sim_time
451
+ def _score_test(
452
+ self,
453
+ outcome: Outcome[None],
454
+ wall_time_s: float,
455
+ sim_time_ns: float,
456
+ ) -> None:
457
+ test = self._test
342
458
 
343
- self._record_result(
344
- test=self._test,
345
- outcome=self._test_task._outcome,
346
- wall_time_s=real_time,
347
- sim_time_ns=sim_time_ns,
348
- )
459
+ # score test
460
+ passed: bool
461
+ msg: Union[str, None]
462
+ exc: Union[BaseException, None]
463
+ try:
464
+ outcome.get()
465
+ except BaseException as e:
466
+ passed, msg = False, None
467
+ exc = remove_traceback_frames(e, ["_score_test", "get"])
468
+ else:
469
+ passed, msg, exc = True, None, None
470
+
471
+ if passed:
472
+ if test.expect_error:
473
+ self._record_test_failed(
474
+ wall_time_s=wall_time_s,
475
+ sim_time_ns=sim_time_ns,
476
+ result=exc,
477
+ msg="passed but we expected an error",
478
+ )
479
+ passed = False
480
+
481
+ elif test.expect_fail:
482
+ self._record_test_failed(
483
+ wall_time_s=wall_time_s,
484
+ sim_time_ns=sim_time_ns,
485
+ result=exc,
486
+ msg="passed but we expected a failure",
487
+ )
488
+ passed = False
349
489
 
350
- self._execute()
490
+ else:
491
+ self._record_test_passed(
492
+ wall_time_s=wall_time_s,
493
+ sim_time_ns=sim_time_ns,
494
+ result=None,
495
+ msg=msg,
496
+ )
351
497
 
352
- def _init_test(self, test: Test) -> Optional[Task]:
353
- """Initialize a test.
498
+ elif test.expect_fail:
499
+ if isinstance(exc, (AssertionError, Failed)):
500
+ self._record_test_passed(
501
+ wall_time_s=wall_time_s,
502
+ sim_time_ns=sim_time_ns,
503
+ result=None,
504
+ msg="failed as expected",
505
+ )
354
506
 
355
- Record outcome if the initialization fails.
356
- Record skip if the test is skipped.
357
- Save the initialized test if it successfully initializes.
358
- """
507
+ else:
508
+ self._record_test_failed(
509
+ wall_time_s=wall_time_s,
510
+ sim_time_ns=sim_time_ns,
511
+ result=exc,
512
+ msg="expected failure, but errored with unexpected type",
513
+ )
514
+ passed = False
359
515
 
360
- if test.skip:
361
- hilight_start = ANSI.COLOR_SKIPPED if want_color_output() else ""
362
- hilight_end = ANSI.COLOR_DEFAULT if want_color_output() else ""
363
- # Want this to stand out a little bit
364
- self.log.info(
365
- "{start}skipping{end} {name} ({i}/{total})".format(
366
- start=hilight_start,
367
- i=self.count,
368
- total=self.ntests,
369
- end=hilight_end,
370
- name=test.__qualname__,
516
+ elif test.expect_error:
517
+ if isinstance(exc, test.expect_error):
518
+ self._record_test_passed(
519
+ wall_time_s=wall_time_s,
520
+ sim_time_ns=sim_time_ns,
521
+ result=None,
522
+ msg="errored as expected",
371
523
  )
372
- )
373
- self._record_result(test, None, 0, 0)
374
- return None
375
524
 
376
- test_init_outcome = cocotb.outcomes.capture(test, self._dut)
525
+ else:
526
+ self._record_test_failed(
527
+ wall_time_s=wall_time_s,
528
+ sim_time_ns=sim_time_ns,
529
+ result=exc,
530
+ msg="errored with unexpected type",
531
+ )
532
+ passed = False
377
533
 
378
- if isinstance(test_init_outcome, cocotb.outcomes.Error):
379
- self.log.error(
380
- "Failed to initialize test %s" % test.__qualname__,
381
- exc_info=test_init_outcome.error,
534
+ else:
535
+ self._record_test_failed(
536
+ wall_time_s=wall_time_s,
537
+ sim_time_ns=sim_time_ns,
538
+ result=exc,
539
+ msg=msg,
382
540
  )
383
- self._record_result(test, test_init_outcome, 0, 0)
384
- return None
385
541
 
386
- running_test = test_init_outcome.get()
542
+ def _get_lineno(self, test: Test) -> int:
543
+ try:
544
+ return inspect.getsourcelines(test.func)[1]
545
+ except OSError:
546
+ return 1
387
547
 
388
- # seed random number generator based on test module, name, and RANDOM_SEED
389
- hasher = hashlib.sha1()
390
- hasher.update(test.__qualname__.encode())
391
- hasher.update(test.__module__.encode())
392
- seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
393
- random.seed(seed)
548
+ def _log_test_start(self) -> None:
549
+ """Called by :meth:`_execute` to log that a test is starting."""
550
+ hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_TEST
551
+ hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
552
+ self.log.info(
553
+ "%srunning%s %s (%d/%d)%s",
554
+ hilight_start,
555
+ hilight_end,
556
+ self._test.fullname,
557
+ self.count,
558
+ self.total_tests,
559
+ _format_doc(self._test.doc),
560
+ )
394
561
 
395
- return running_test
562
+ def _record_test_excluded(self) -> None:
563
+ """Called by :meth:`_execute` when a test is excluded by filters."""
396
564
 
397
- def _score_test(self, test: Test, outcome: Outcome) -> Tuple[bool, bool]:
398
- """
399
- Given a test and the test's outcome, determine if the test met expectations and log pertinent information
400
- """
565
+ # write out xunit results
566
+ lineno = self._get_lineno(self._test)
567
+ self.xunit.add_testcase(
568
+ name=self._test.name,
569
+ classname=self._test.module,
570
+ file=inspect.getfile(self._test.func),
571
+ lineno=repr(lineno),
572
+ time=repr(0),
573
+ sim_time_ns=repr(0),
574
+ ratio_time=repr(0),
575
+ )
576
+ self.xunit.add_skipped()
401
577
 
402
- # scoring outcomes
403
- result_pass = True
404
- sim_failed = False
578
+ # do not log anything, nor save details for the summary
405
579
 
406
- try:
407
- outcome.get()
408
- except (KeyboardInterrupt, SystemExit):
409
- raise
410
- except BaseException as e:
411
- result = remove_traceback_frames(e, ["_score_test", "get"])
412
- else:
413
- result = TestSuccess()
414
-
415
- if (
416
- isinstance(result, TestSuccess)
417
- and not test.expect_fail
418
- and not test.expect_error
419
- ):
420
- self._log_test_passed(test, None, None)
421
-
422
- elif isinstance(result, TestSuccess) and test.expect_error:
423
- self._log_test_failed(test, None, "passed but we expected an error")
424
- result_pass = False
425
-
426
- elif isinstance(result, TestSuccess):
427
- self._log_test_failed(test, None, "passed but we expected a failure")
428
- result_pass = False
429
-
430
- elif isinstance(result, SimFailure):
431
- if isinstance(result, test.expect_error):
432
- self._log_test_passed(test, result, "errored as expected")
433
- else:
434
- self.log.error("Test error has lead to simulator shutting us down")
435
- result_pass = False
436
- # whether we expected it or not, the simulation has failed unrecoverably
437
- sim_failed = True
580
+ def _record_test_skipped(self) -> None:
581
+ """Called by :meth:`_execute` when a test is skipped."""
438
582
 
439
- elif isinstance(result, (AssertionError, _Failed)) and test.expect_fail:
440
- self._log_test_passed(test, result, "failed as expected")
583
+ # log test results
584
+ hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_SKIPPED
585
+ hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
586
+ self.log.info(
587
+ "%sskipping%s %s (%d/%d)%s",
588
+ hilight_start,
589
+ hilight_end,
590
+ self._test.fullname,
591
+ self.count,
592
+ self.total_tests,
593
+ _format_doc(self._test.doc),
594
+ )
441
595
 
442
- elif test.expect_error:
443
- if isinstance(result, test.expect_error):
444
- self._log_test_passed(test, result, "errored as expected")
445
- else:
446
- self._log_test_failed(test, result, "errored with unexpected type ")
447
- result_pass = False
596
+ # write out xunit results
597
+ lineno = self._get_lineno(self._test)
598
+ self.xunit.add_testcase(
599
+ name=self._test.name,
600
+ classname=self._test.module,
601
+ file=inspect.getfile(self._test.func),
602
+ lineno=repr(lineno),
603
+ time=repr(0),
604
+ sim_time_ns=repr(0),
605
+ ratio_time=repr(0),
606
+ )
607
+ self.xunit.add_skipped()
608
+
609
+ # save details for summary
610
+ self._test_results.append(
611
+ _TestResults(
612
+ test_fullname=self._test.fullname,
613
+ passed=None,
614
+ sim_time_ns=0,
615
+ wall_time_s=0,
616
+ )
617
+ )
448
618
 
449
- else:
450
- self._log_test_failed(test, result, None)
451
- result_pass = False
619
+ # update running passed/failed/skipped counts
620
+ self.skipped += 1
621
+ self.count += 1
622
+
623
+ def _record_test_init_failed(self) -> None:
624
+ """Called by :meth:`_execute` when a test initialization fails."""
625
+
626
+ # log test results
627
+ hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_FAILED
628
+ hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
629
+ self.log.exception(
630
+ "%sFailed to initialize%s %s! (%d/%d)%s",
631
+ hilight_start,
632
+ hilight_end,
633
+ self._test.fullname,
634
+ self.count,
635
+ self.total_tests,
636
+ _format_doc(self._test.doc),
637
+ )
452
638
 
453
- if _pdb_on_exception:
454
- pdb.post_mortem(result.__traceback__)
639
+ # write out xunit results
640
+ lineno = self._get_lineno(self._test)
641
+ self.xunit.add_testcase(
642
+ name=self._test.name,
643
+ classname=self._test.module,
644
+ file=inspect.getfile(self._test.func),
645
+ lineno=repr(lineno),
646
+ time=repr(0),
647
+ sim_time_ns=repr(0),
648
+ ratio_time=repr(0),
649
+ )
650
+ self.xunit.add_failure(msg="Test initialization failed")
651
+
652
+ # save details for summary
653
+ self._test_results.append(
654
+ _TestResults(
655
+ test_fullname=self._test.fullname,
656
+ passed=False,
657
+ sim_time_ns=0,
658
+ wall_time_s=0,
659
+ )
660
+ )
455
661
 
456
- return result_pass, sim_failed
662
+ # update running passed/failed/skipped counts
663
+ self.failures += 1
664
+ self.count += 1
457
665
 
458
- def _log_test_passed(
459
- self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
666
+ def _record_test_passed(
667
+ self,
668
+ wall_time_s: float,
669
+ sim_time_ns: float,
670
+ result: Union[Exception, None],
671
+ msg: Union[str, None],
460
672
  ) -> None:
461
- start_hilight = ANSI.COLOR_PASSED if want_color_output() else ""
462
- stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
673
+ start_hilight = "" if cocotb_logging.strip_ansi else self.COLOR_PASSED
674
+ stop_hilight = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
463
675
  if msg is None:
464
676
  rest = ""
465
677
  else:
@@ -469,119 +681,98 @@ class RegressionManager:
469
681
  else:
470
682
  result_was = f" (result was {type(result).__qualname__})"
471
683
  self.log.info(
472
- f"{test.__qualname__} {start_hilight}passed{stop_hilight}{rest}{result_was}"
684
+ "%s %spassed%s%s%s",
685
+ self._test.fullname,
686
+ start_hilight,
687
+ stop_hilight,
688
+ rest,
689
+ result_was,
473
690
  )
474
691
 
475
- def _log_test_failed(
476
- self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
477
- ) -> None:
478
- start_hilight = ANSI.COLOR_FAILED if want_color_output() else ""
479
- stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
480
- if msg is None:
481
- rest = ""
482
- else:
483
- rest = f": {msg}"
484
- self.log.info(
485
- f"{test.__qualname__} {start_hilight}failed{stop_hilight}{rest}",
486
- exc_info=result,
692
+ # write out xunit results
693
+ ratio_time = safe_divide(sim_time_ns, wall_time_s)
694
+ lineno = self._get_lineno(self._test)
695
+ self.xunit.add_testcase(
696
+ name=self._test.name,
697
+ classname=self._test.module,
698
+ file=inspect.getfile(self._test.func),
699
+ lineno=repr(lineno),
700
+ time=repr(wall_time_s),
701
+ sim_time_ns=repr(sim_time_ns),
702
+ ratio_time=repr(ratio_time),
487
703
  )
488
704
 
489
- def _record_result(
705
+ # update running passed/failed/skipped counts
706
+ self.passed += 1
707
+ self.count += 1
708
+
709
+ # save details for summary
710
+ self._test_results.append(
711
+ _TestResults(
712
+ test_fullname=self._test.fullname,
713
+ passed=True,
714
+ sim_time_ns=sim_time_ns,
715
+ wall_time_s=wall_time_s,
716
+ )
717
+ )
718
+
719
+ def _record_test_failed(
490
720
  self,
491
- test: Test,
492
- outcome: Optional[Outcome],
493
721
  wall_time_s: float,
494
722
  sim_time_ns: float,
723
+ result: Union[BaseException, None],
724
+ msg: Union[str, None],
495
725
  ) -> None:
726
+ start_hilight = "" if cocotb_logging.strip_ansi else self.COLOR_FAILED
727
+ stop_hilight = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
728
+ if msg is None:
729
+ rest = ""
730
+ else:
731
+ rest = f": {msg}"
732
+ self.log.warning(
733
+ "%s%s %sfailed%s%s",
734
+ stop_hilight,
735
+ self._test.fullname,
736
+ start_hilight,
737
+ stop_hilight,
738
+ rest,
739
+ )
496
740
 
497
- ratio_time = self._safe_divide(sim_time_ns, wall_time_s)
498
- try:
499
- lineno = inspect.getsourcelines(test._func)[1]
500
- except OSError:
501
- lineno = 1
502
-
741
+ # write out xunit results
742
+ ratio_time = safe_divide(sim_time_ns, wall_time_s)
743
+ lineno = self._get_lineno(self._test)
503
744
  self.xunit.add_testcase(
504
- name=test.__qualname__,
505
- classname=test.__module__,
506
- file=inspect.getfile(test._func),
745
+ name=self._test.name,
746
+ classname=self._test.module,
747
+ file=inspect.getfile(self._test.func),
507
748
  lineno=repr(lineno),
508
749
  time=repr(wall_time_s),
509
750
  sim_time_ns=repr(sim_time_ns),
510
751
  ratio_time=repr(ratio_time),
511
752
  )
753
+ self.xunit.add_failure(error_type=type(result).__name__, error_msg=str(result))
512
754
 
513
- if outcome is None: # skipped
514
- test_pass, sim_failed = None, False
515
- self.xunit.add_skipped()
516
- self.skipped += 1
517
-
518
- else:
519
- test_pass, sim_failed = self._score_test(test, outcome)
520
- if not test_pass:
521
- self.xunit.add_failure(
522
- message=f"Test failed with RANDOM_SEED={cocotb.RANDOM_SEED}"
523
- )
524
- self.failures += 1
525
- else:
526
- self.passed += 1
527
-
528
- self.test_results.append(
529
- {
530
- "test": ".".join([test.__module__, test.__qualname__]),
531
- "pass": test_pass,
532
- "sim": sim_time_ns,
533
- "real": wall_time_s,
534
- "ratio": ratio_time,
535
- }
536
- )
537
-
538
- if sim_failed:
539
- self._tear_down()
540
- return
541
-
542
- @deprecated("This method is now private.")
543
- def execute(self) -> None:
544
- self._execute()
755
+ # update running passed/failed/skipped counts
756
+ self.failures += 1
757
+ self.count += 1
545
758
 
546
- def _execute(self) -> None:
547
- while True:
548
- self._test = self._next_test()
549
- if self._test is None:
550
- return self._tear_down()
551
-
552
- self._test_task = self._init_test(self._test)
553
- if self._test_task is not None:
554
- return self._start_test()
555
-
556
- def _start_test(self) -> None:
557
- # Want this to stand out a little bit
558
- start = ""
559
- end = ""
560
- if want_color_output():
561
- start = ANSI.COLOR_TEST
562
- end = ANSI.COLOR_DEFAULT
563
- self.log.info(
564
- "{start}running{end} {name} ({i}/{total}){description}".format(
565
- start=start,
566
- i=self.count,
567
- total=self.ntests,
568
- end=end,
569
- name=self._test.__qualname__,
570
- description=_trim(self._test.__doc__),
759
+ # save details for summary
760
+ self._test_results.append(
761
+ _TestResults(
762
+ test_fullname=self._test.fullname,
763
+ passed=False,
764
+ sim_time_ns=sim_time_ns,
765
+ wall_time_s=wall_time_s,
571
766
  )
572
767
  )
573
768
 
574
- self._test_start_time = time.time()
575
- self._test_start_sim_time = get_sim_time("ns")
576
- cocotb.scheduler._add_test(self._test_task)
577
-
578
769
  def _log_test_summary(self) -> None:
579
-
580
- real_time = time.time() - self.start_time
770
+ """Called by :meth:`_tear_down` to log the test summary."""
771
+ real_time = time.time() - self._regression_start_time
581
772
  sim_time_ns = get_sim_time("ns")
582
- ratio_time = self._safe_divide(sim_time_ns, real_time)
773
+ ratio_time = safe_divide(sim_time_ns, real_time)
583
774
 
584
- if len(self.test_results) == 0:
775
+ if len(self._test_results) == 0:
585
776
  return
586
777
 
587
778
  TEST_FIELD = "TEST"
@@ -589,30 +780,30 @@ class RegressionManager:
589
780
  SIM_FIELD = "SIM TIME (ns)"
590
781
  REAL_FIELD = "REAL TIME (s)"
591
782
  RATIO_FIELD = "RATIO (ns/s)"
592
- TOTAL_NAME = f"TESTS={self.ntests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
783
+ TOTAL_NAME = f"TESTS={self.total_tests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
593
784
 
594
785
  TEST_FIELD_LEN = max(
595
786
  len(TEST_FIELD),
596
787
  len(TOTAL_NAME),
597
- len(max([x["test"] for x in self.test_results], key=len)),
788
+ len(max([x.test_fullname for x in self._test_results], key=len)),
598
789
  )
599
790
  RESULT_FIELD_LEN = len(RESULT_FIELD)
600
791
  SIM_FIELD_LEN = len(SIM_FIELD)
601
792
  REAL_FIELD_LEN = len(REAL_FIELD)
602
793
  RATIO_FIELD_LEN = len(RATIO_FIELD)
603
794
 
604
- header_dict = dict(
605
- a=TEST_FIELD,
606
- b=RESULT_FIELD,
607
- c=SIM_FIELD,
608
- d=REAL_FIELD,
609
- e=RATIO_FIELD,
610
- a_len=TEST_FIELD_LEN,
611
- b_len=RESULT_FIELD_LEN,
612
- c_len=SIM_FIELD_LEN,
613
- d_len=REAL_FIELD_LEN,
614
- e_len=RATIO_FIELD_LEN,
615
- )
795
+ header_dict = {
796
+ "a": TEST_FIELD,
797
+ "b": RESULT_FIELD,
798
+ "c": SIM_FIELD,
799
+ "d": REAL_FIELD,
800
+ "e": RATIO_FIELD,
801
+ "a_len": TEST_FIELD_LEN,
802
+ "b_len": RESULT_FIELD_LEN,
803
+ "c_len": SIM_FIELD_LEN,
804
+ "d_len": REAL_FIELD_LEN,
805
+ "e_len": RATIO_FIELD_LEN,
806
+ }
616
807
 
617
808
  LINE_LEN = (
618
809
  3
@@ -638,43 +829,43 @@ class RegressionManager:
638
829
  summary += LINE_SEP
639
830
 
640
831
  test_line = "** {a:<{a_len}} {start}{b:^{b_len}}{end} {c:>{c_len}.2f} {d:>{d_len}.2f} {e:>{e_len}} **\n"
641
- for result in self.test_results:
642
- hilite = ""
643
- lolite = ""
644
-
645
- if result["pass"] is None:
832
+ hilite: str
833
+ lolite: str
834
+ for result in self._test_results:
835
+ if result.passed is None:
646
836
  ratio = "-.--"
647
837
  pass_fail_str = "SKIP"
648
- if want_color_output():
649
- hilite = ANSI.COLOR_SKIPPED
650
- lolite = ANSI.COLOR_DEFAULT
651
- elif result["pass"]:
652
- ratio = format(result["ratio"], "0.2f")
838
+ hilite = self.COLOR_SKIPPED
839
+ lolite = ANSI.DEFAULT
840
+ elif result.passed:
841
+ ratio = format(result.ratio, "0.2f")
653
842
  pass_fail_str = "PASS"
654
- if want_color_output():
655
- hilite = ANSI.COLOR_PASSED
656
- lolite = ANSI.COLOR_DEFAULT
843
+ hilite = self.COLOR_PASSED
844
+ lolite = ANSI.DEFAULT
657
845
  else:
658
- ratio = format(result["ratio"], "0.2f")
846
+ ratio = format(result.ratio, "0.2f")
659
847
  pass_fail_str = "FAIL"
660
- if want_color_output():
661
- hilite = ANSI.COLOR_FAILED
662
- lolite = ANSI.COLOR_DEFAULT
663
-
664
- test_dict = dict(
665
- a=result["test"],
666
- b=pass_fail_str,
667
- c=result["sim"],
668
- d=result["real"],
669
- e=ratio,
670
- a_len=TEST_FIELD_LEN,
671
- b_len=RESULT_FIELD_LEN,
672
- c_len=SIM_FIELD_LEN - 1,
673
- d_len=REAL_FIELD_LEN - 1,
674
- e_len=RATIO_FIELD_LEN - 1,
675
- start=hilite,
676
- end=lolite,
677
- )
848
+ hilite = self.COLOR_FAILED
849
+ lolite = ANSI.DEFAULT
850
+
851
+ if cocotb_logging.strip_ansi:
852
+ hilite = ""
853
+ lolite = ""
854
+
855
+ test_dict = {
856
+ "a": result.test_fullname,
857
+ "b": pass_fail_str,
858
+ "c": result.sim_time_ns,
859
+ "d": result.wall_time_s,
860
+ "e": ratio,
861
+ "a_len": TEST_FIELD_LEN,
862
+ "b_len": RESULT_FIELD_LEN,
863
+ "c_len": SIM_FIELD_LEN - 1,
864
+ "d_len": REAL_FIELD_LEN - 1,
865
+ "e_len": RATIO_FIELD_LEN - 1,
866
+ "start": hilite,
867
+ "end": lolite,
868
+ }
678
869
 
679
870
  summary += test_line.format(**test_dict)
680
871
 
@@ -699,239 +890,7 @@ class RegressionManager:
699
890
 
700
891
  self.log.info(summary)
701
892
 
702
- @staticmethod
703
- def _safe_divide(a: float, b: float) -> float:
704
- try:
705
- return a / b
706
- except ZeroDivisionError:
707
- if a == 0:
708
- return float("nan")
709
- else:
710
- return float("inf")
711
-
712
-
713
- def _create_test(function, name, documentation, mod, *args, **kwargs):
714
- """Factory function to create tests, avoids late binding.
715
-
716
- Creates a test dynamically. The test will call the supplied
717
- function with the supplied arguments.
718
-
719
- Args:
720
- function (function): The test function to run.
721
- name (str): The name of the test.
722
- documentation (str): The docstring for the test.
723
- mod (module): The module this function belongs to.
724
- *args: Remaining args to pass to test function.
725
- **kwargs: Passed to the test function.
726
-
727
- Returns:
728
- Decorated test function
729
- """
730
-
731
- async def _my_test(dut):
732
- await function(dut, *args, **kwargs)
733
-
734
- _my_test.__name__ = name
735
- _my_test.__qualname__ = name
736
- _my_test.__doc__ = documentation
737
- _my_test.__module__ = mod.__name__
738
-
739
- return cocotb.test()(_my_test)
740
-
741
-
742
- class TestFactory:
743
- """Factory to automatically generate tests.
744
-
745
- Args:
746
- test_function: A Callable that returns the test Coroutine.
747
- Must take *dut* as the first argument.
748
- *args: Remaining arguments are passed directly to the test function.
749
- Note that these arguments are not varied. An argument that
750
- varies with each test must be a keyword argument to the
751
- test function.
752
- **kwargs: Remaining keyword arguments are passed directly to the test function.
753
- Note that these arguments are not varied. An argument that
754
- varies with each test must be a keyword argument to the
755
- test function.
756
-
757
- Assuming we have a common test function that will run a test. This test
758
- function will take keyword arguments (for example generators for each of
759
- the input interfaces) and generate tests that call the supplied function.
760
-
761
- This Factory allows us to generate sets of tests based on the different
762
- permutations of the possible arguments to the test function.
763
-
764
- For example, if we have a module that takes backpressure, has two configurable
765
- features where enabling ``feature_b`` requires ``feature_a`` to be active, and
766
- need to test against data generation routines ``gen_a`` and ``gen_b``:
767
-
768
- >>> tf = TestFactory(test_function=run_test)
769
- >>> tf.add_option(name='data_in', optionlist=[gen_a, gen_b])
770
- >>> tf.add_option('backpressure', [None, random_backpressure])
771
- >>> tf.add_option(('feature_a', 'feature_b'), [(False, False), (True, False), (True, True)])
772
- >>> tf.generate_tests()
773
-
774
- We would get the following tests:
775
-
776
- * ``gen_a`` with no backpressure and both features disabled
777
- * ``gen_a`` with no backpressure and only ``feature_a`` enabled
778
- * ``gen_a`` with no backpressure and both features enabled
779
- * ``gen_a`` with ``random_backpressure`` and both features disabled
780
- * ``gen_a`` with ``random_backpressure`` and only ``feature_a`` enabled
781
- * ``gen_a`` with ``random_backpressure`` and both features enabled
782
- * ``gen_b`` with no backpressure and both features disabled
783
- * ``gen_b`` with no backpressure and only ``feature_a`` enabled
784
- * ``gen_b`` with no backpressure and both features enabled
785
- * ``gen_b`` with ``random_backpressure`` and both features disabled
786
- * ``gen_b`` with ``random_backpressure`` and only ``feature_a`` enabled
787
- * ``gen_b`` with ``random_backpressure`` and both features enabled
788
-
789
- The tests are appended to the calling module for auto-discovery.
790
-
791
- Tests are simply named ``test_function_N``. The docstring for the test (hence
792
- the test description) includes the name and description of each generator.
793
-
794
- .. versionchanged:: 1.5
795
- Groups of options are now supported
796
- """
797
-
798
- # Prevent warnings from collection of TestFactories by unit testing frameworks.
799
- __test__ = False
800
-
801
- def __init__(self, test_function, *args, **kwargs):
802
- self.test_function = test_function
803
- self.name = self.test_function.__qualname__
804
-
805
- self.args = args
806
- self.kwargs_constant = kwargs
807
- self.kwargs = {}
808
- self.log = _logger
809
-
810
- def add_option(self, name, optionlist):
811
- """Add a named option to the test.
812
-
813
- Args:
814
- name (str or iterable of str): An option name, or an iterable of
815
- several option names. Passed to test as keyword arguments.
816
-
817
- optionlist (list): A list of possible options for this test knob.
818
- If N names were specified, this must be a list of N-tuples or
819
- lists, where each element specifies a value for its respective
820
- option.
821
-
822
- .. versionchanged:: 1.5
823
- Groups of options are now supported
824
- """
825
- if not isinstance(name, str):
826
- name = tuple(name)
827
- for opt in optionlist:
828
- if len(name) != len(opt):
829
- raise ValueError(
830
- "Mismatch between number of options and number of option values in group"
831
- )
832
- self.kwargs[name] = optionlist
833
-
834
- def generate_tests(self, prefix="", postfix=""):
835
- """
836
- Generate an exhaustive set of tests using the cartesian product of the
837
- possible keyword arguments.
838
-
839
- The generated tests are appended to the namespace of the calling
840
- module.
841
-
842
- Args:
843
- prefix (str): Text string to append to start of ``test_function`` name
844
- when naming generated test cases. This allows reuse of
845
- a single ``test_function`` with multiple
846
- :class:`TestFactories <.TestFactory>` without name clashes.
847
- postfix (str): Text string to append to end of ``test_function`` name
848
- when naming generated test cases. This allows reuse of
849
- a single ``test_function`` with multiple
850
- :class:`TestFactories <.TestFactory>` without name clashes.
851
- """
852
-
853
- frm = inspect.stack()[1]
854
- mod = inspect.getmodule(frm[0])
855
-
856
- d = self.kwargs
857
-
858
- for index, testoptions in enumerate(
859
- dict(zip(d, v)) for v in product(*d.values())
860
- ):
861
-
862
- name = "%s%s%s_%03d" % (prefix, self.name, postfix, index + 1)
863
- doc = "Automatically generated test\n\n"
864
-
865
- # preprocess testoptions to split tuples
866
- testoptions_split = {}
867
- for optname, optvalue in testoptions.items():
868
- if isinstance(optname, str):
869
- testoptions_split[optname] = optvalue
870
- else:
871
- # previously checked in add_option; ensure nothing has changed
872
- assert len(optname) == len(optvalue)
873
- for n, v in zip(optname, optvalue):
874
- testoptions_split[n] = v
875
-
876
- for optname, optvalue in testoptions_split.items():
877
- if callable(optvalue):
878
- if not optvalue.__doc__:
879
- desc = "No docstring supplied"
880
- else:
881
- desc = optvalue.__doc__.split("\n")[0]
882
- doc += "\t{}: {} ({})\n".format(
883
- optname, optvalue.__qualname__, desc
884
- )
885
- else:
886
- doc += "\t{}: {}\n".format(optname, repr(optvalue))
887
-
888
- self.log.debug(
889
- 'Adding generated test "%s" to module "%s"' % (name, mod.__name__)
890
- )
891
- kwargs = {}
892
- kwargs.update(self.kwargs_constant)
893
- kwargs.update(testoptions_split)
894
- if hasattr(mod, name):
895
- self.log.error(
896
- "Overwriting %s in module %s. "
897
- "This causes a previously defined testcase "
898
- "not to be run. Consider setting/changing "
899
- "name_postfix" % (name, mod)
900
- )
901
- setattr(
902
- mod,
903
- name,
904
- _create_test(self.test_function, name, doc, mod, *self.args, **kwargs),
905
- )
906
-
907
-
908
- def _trim(docstring: Optional[str]) -> str:
909
- """Normalizes test docstrings
910
-
911
- Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation.
912
- """
913
- if docstring is None or docstring == "":
914
- return ""
915
- # Convert tabs to spaces (following the normal Python rules)
916
- # and split into a list of lines:
917
- lines = docstring.expandtabs().splitlines()
918
- # Determine minimum indentation (first line doesn't count):
919
- indent = math.inf
920
- for line in lines[1:]:
921
- stripped = line.lstrip()
922
- if stripped:
923
- indent = min(indent, len(line) - len(stripped))
924
- # Remove indentation (first line is special):
925
- trimmed = [lines[0].strip()]
926
- if indent < math.inf:
927
- for line in lines[1:]:
928
- trimmed.append(line[indent:].rstrip())
929
- # Strip off trailing and leading blank lines:
930
- while trimmed and not trimmed[-1]:
931
- trimmed.pop()
932
- while trimmed and not trimmed[0]:
933
- trimmed.pop(0)
934
- # Add one newline back
935
- trimmed.insert(0, "")
936
- # Return a single string:
937
- return "\n ".join(trimmed)
893
+ def _fail_simulation(self, msg: str) -> None:
894
+ self._sim_failure = Error(SimFailure(msg))
895
+ self._running_test.abort(self._sim_failure)
896
+ cocotb._scheduler_inst._event_loop()