cocotb 1.9.1__cp39-cp39-win32.whl → 2.0.0b1__cp39-cp39-win32.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of cocotb might be problematic. Click here for more details.

Files changed (157) hide show
  1. cocotb/{ANSI.py → _ANSI.py} +5 -25
  2. cocotb/__init__.py +76 -315
  3. cocotb/_base_triggers.py +513 -0
  4. cocotb/_bridge.py +187 -0
  5. cocotb/_decorators.py +515 -0
  6. cocotb/_deprecation.py +3 -3
  7. cocotb/_exceptions.py +7 -0
  8. cocotb/_extended_awaitables.py +419 -0
  9. cocotb/_gpi_triggers.py +382 -0
  10. cocotb/_init.py +295 -0
  11. cocotb/_outcomes.py +54 -0
  12. cocotb/_profiling.py +46 -0
  13. cocotb/_py_compat.py +100 -29
  14. cocotb/_scheduler.py +454 -0
  15. cocotb/_test.py +245 -0
  16. cocotb/_test_factory.py +309 -0
  17. cocotb/_test_functions.py +42 -0
  18. cocotb/_typing.py +7 -0
  19. cocotb/_utils.py +296 -0
  20. cocotb/_version.py +3 -7
  21. cocotb/_xunit_reporter.py +66 -0
  22. cocotb/clock.py +271 -108
  23. cocotb/handle.py +1342 -795
  24. cocotb/libs/cocotb.dll +0 -0
  25. cocotb/libs/cocotb.exp +0 -0
  26. cocotb/libs/cocotb.lib +0 -0
  27. cocotb/libs/cocotbfli_modelsim.dll +0 -0
  28. cocotb/libs/cocotbfli_modelsim.exp +0 -0
  29. cocotb/libs/cocotbfli_modelsim.lib +0 -0
  30. cocotb/libs/cocotbutils.dll +0 -0
  31. cocotb/libs/cocotbutils.exp +0 -0
  32. cocotb/libs/cocotbutils.lib +0 -0
  33. cocotb/libs/cocotbvhpi_aldec.dll +0 -0
  34. cocotb/libs/cocotbvhpi_aldec.exp +0 -0
  35. cocotb/libs/cocotbvhpi_aldec.lib +0 -0
  36. cocotb/libs/cocotbvhpi_modelsim.dll +0 -0
  37. cocotb/libs/cocotbvhpi_modelsim.exp +0 -0
  38. cocotb/libs/cocotbvhpi_modelsim.lib +0 -0
  39. cocotb/libs/cocotbvpi_aldec.dll +0 -0
  40. cocotb/libs/cocotbvpi_aldec.exp +0 -0
  41. cocotb/libs/cocotbvpi_aldec.lib +0 -0
  42. cocotb/libs/cocotbvpi_ghdl.dll +0 -0
  43. cocotb/libs/cocotbvpi_ghdl.exp +0 -0
  44. cocotb/libs/cocotbvpi_ghdl.lib +0 -0
  45. cocotb/libs/cocotbvpi_icarus.exp +0 -0
  46. cocotb/libs/cocotbvpi_icarus.lib +0 -0
  47. cocotb/libs/cocotbvpi_icarus.vpl +0 -0
  48. cocotb/libs/cocotbvpi_modelsim.dll +0 -0
  49. cocotb/libs/cocotbvpi_modelsim.exp +0 -0
  50. cocotb/libs/cocotbvpi_modelsim.lib +0 -0
  51. cocotb/libs/embed.dll +0 -0
  52. cocotb/libs/embed.exp +0 -0
  53. cocotb/libs/embed.lib +0 -0
  54. cocotb/libs/gpi.dll +0 -0
  55. cocotb/libs/gpi.exp +0 -0
  56. cocotb/libs/gpi.lib +0 -0
  57. cocotb/libs/gpilog.dll +0 -0
  58. cocotb/libs/gpilog.exp +0 -0
  59. cocotb/libs/gpilog.lib +0 -0
  60. cocotb/libs/pygpilog.dll +0 -0
  61. cocotb/libs/pygpilog.exp +0 -0
  62. cocotb/libs/pygpilog.lib +0 -0
  63. cocotb/{log.py → logging.py} +105 -110
  64. cocotb/queue.py +103 -57
  65. cocotb/regression.py +667 -716
  66. cocotb/result.py +17 -188
  67. cocotb/share/def/aldec.exp +0 -0
  68. cocotb/share/def/aldec.lib +0 -0
  69. cocotb/share/def/ghdl.exp +0 -0
  70. cocotb/share/def/ghdl.lib +0 -0
  71. cocotb/share/def/icarus.exp +0 -0
  72. cocotb/share/def/icarus.lib +0 -0
  73. cocotb/share/def/modelsim.def +1 -0
  74. cocotb/share/def/modelsim.exp +0 -0
  75. cocotb/share/def/modelsim.lib +0 -0
  76. cocotb/share/include/cocotb_utils.h +6 -29
  77. cocotb/share/include/embed.h +5 -28
  78. cocotb/share/include/gpi.h +137 -92
  79. cocotb/share/include/gpi_logging.h +221 -142
  80. cocotb/share/include/py_gpi_logging.h +7 -4
  81. cocotb/share/include/vpi_user_ext.h +4 -26
  82. cocotb/share/lib/verilator/verilator.cpp +59 -54
  83. cocotb/simulator.cp39-win32.exp +0 -0
  84. cocotb/simulator.cp39-win32.lib +0 -0
  85. cocotb/simulator.cp39-win32.pyd +0 -0
  86. cocotb/simulator.pyi +107 -0
  87. cocotb/task.py +434 -212
  88. cocotb/triggers.py +55 -1092
  89. cocotb/types/__init__.py +25 -47
  90. cocotb/types/_abstract_array.py +151 -0
  91. cocotb/types/_array.py +264 -0
  92. cocotb/types/_logic.py +296 -0
  93. cocotb/types/_logic_array.py +834 -0
  94. cocotb/types/{range.py → _range.py} +36 -44
  95. cocotb/types/_resolve.py +76 -0
  96. cocotb/utils.py +119 -587
  97. cocotb-2.0.0b1.dist-info/METADATA +60 -0
  98. cocotb-2.0.0b1.dist-info/RECORD +143 -0
  99. {cocotb-1.9.1.dist-info → cocotb-2.0.0b1.dist-info}/WHEEL +1 -1
  100. cocotb-2.0.0b1.dist-info/entry_points.txt +2 -0
  101. {cocotb-1.9.1.dist-info → cocotb-2.0.0b1.dist-info}/top_level.txt +1 -0
  102. cocotb_tools/__init__.py +0 -0
  103. cocotb_tools/_coverage.py +33 -0
  104. cocotb_tools/_vendor/__init__.py +3 -0
  105. cocotb_tools/check_results.py +65 -0
  106. cocotb_tools/combine_results.py +152 -0
  107. cocotb_tools/config.py +241 -0
  108. {cocotb → cocotb_tools}/ipython_support.py +29 -22
  109. cocotb_tools/makefiles/Makefile.deprecations +27 -0
  110. {cocotb/share → cocotb_tools}/makefiles/Makefile.inc +82 -54
  111. {cocotb/share → cocotb_tools}/makefiles/Makefile.sim +8 -33
  112. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.activehdl +18 -18
  113. cocotb_tools/makefiles/simulators/Makefile.cvc +61 -0
  114. cocotb_tools/makefiles/simulators/Makefile.dsim +39 -0
  115. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.ghdl +13 -42
  116. cocotb_tools/makefiles/simulators/Makefile.icarus +80 -0
  117. cocotb_tools/makefiles/simulators/Makefile.ius +93 -0
  118. cocotb_tools/makefiles/simulators/Makefile.modelsim +9 -0
  119. cocotb_tools/makefiles/simulators/Makefile.nvc +60 -0
  120. cocotb_tools/makefiles/simulators/Makefile.questa +29 -0
  121. cocotb_tools/makefiles/simulators/Makefile.questa-compat +143 -0
  122. cocotb_tools/makefiles/simulators/Makefile.questa-qisqrun +149 -0
  123. cocotb_tools/makefiles/simulators/Makefile.riviera +144 -0
  124. cocotb_tools/makefiles/simulators/Makefile.vcs +65 -0
  125. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.verilator +15 -22
  126. {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.xcelium +20 -52
  127. cocotb_tools/py.typed +0 -0
  128. {cocotb → cocotb_tools}/runner.py +794 -361
  129. cocotb/_sim_versions.py → cocotb_tools/sim_versions.py +16 -21
  130. pygpi/entry.py +34 -17
  131. pygpi/py.typed +0 -0
  132. cocotb/binary.py +0 -858
  133. cocotb/config.py +0 -289
  134. cocotb/decorators.py +0 -332
  135. cocotb/memdebug.py +0 -35
  136. cocotb/outcomes.py +0 -56
  137. cocotb/scheduler.py +0 -1099
  138. cocotb/share/makefiles/Makefile.deprecations +0 -12
  139. cocotb/share/makefiles/simulators/Makefile.cvc +0 -94
  140. cocotb/share/makefiles/simulators/Makefile.icarus +0 -111
  141. cocotb/share/makefiles/simulators/Makefile.ius +0 -125
  142. cocotb/share/makefiles/simulators/Makefile.modelsim +0 -32
  143. cocotb/share/makefiles/simulators/Makefile.nvc +0 -64
  144. cocotb/share/makefiles/simulators/Makefile.questa +0 -168
  145. cocotb/share/makefiles/simulators/Makefile.riviera +0 -177
  146. cocotb/share/makefiles/simulators/Makefile.vcs +0 -98
  147. cocotb/types/array.py +0 -309
  148. cocotb/types/logic.py +0 -292
  149. cocotb/types/logic_array.py +0 -298
  150. cocotb/wavedrom.py +0 -199
  151. cocotb/xunit_reporter.py +0 -80
  152. cocotb-1.9.1.dist-info/METADATA +0 -166
  153. cocotb-1.9.1.dist-info/RECORD +0 -121
  154. cocotb-1.9.1.dist-info/entry_points.txt +0 -2
  155. /cocotb/{_vendor/__init__.py → py.typed} +0 -0
  156. {cocotb-1.9.1.dist-info → cocotb-2.0.0b1.dist-info/licenses}/LICENSE +0 -0
  157. {cocotb → cocotb_tools}/_vendor/distutils_version.py +0 -0
cocotb/regression.py CHANGED
@@ -1,258 +1,253 @@
1
+ # Copyright cocotb contributors
1
2
  # Copyright (c) 2013, 2018 Potential Ventures Ltd
2
3
  # Copyright (c) 2013 SolarFlare Communications Inc
3
- # All rights reserved.
4
- #
5
- # Redistribution and use in source and binary forms, with or without
6
- # modification, are permitted provided that the following conditions are met:
7
- # * Redistributions of source code must retain the above copyright
8
- # notice, this list of conditions and the following disclaimer.
9
- # * Redistributions in binary form must reproduce the above copyright
10
- # notice, this list of conditions and the following disclaimer in the
11
- # documentation and/or other materials provided with the distribution.
12
- # * Neither the name of Potential Ventures Ltd,
13
- # SolarFlare Communications Inc nor the
14
- # names of its contributors may be used to endorse or promote products
15
- # derived from this software without specific prior written permission.
16
- #
17
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
18
- # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19
- # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20
- # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
21
- # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22
- # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23
- # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24
- # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
26
- # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
4
+ # Licensed under the Revised BSD License, see LICENSE for details.
5
+ # SPDX-License-Identifier: BSD-3-Clause
27
6
 
28
7
  """All things relating to regression capabilities."""
29
8
 
9
+ import functools
30
10
  import hashlib
31
11
  import inspect
32
- import math
12
+ import logging
33
13
  import os
34
- import pdb
35
14
  import random
36
- import sys
15
+ import re
37
16
  import time
38
- import traceback
39
- from itertools import product
40
- from typing import Any, Iterable, Optional, Tuple, Type
17
+ import warnings
18
+ from enum import auto
19
+ from importlib import import_module
20
+ from typing import (
21
+ Callable,
22
+ Coroutine,
23
+ List,
24
+ Union,
25
+ )
41
26
 
42
27
  import cocotb
43
- import cocotb.ANSI as ANSI
44
- from cocotb import simulator
45
- from cocotb._deprecation import deprecated
46
- from cocotb.decorators import test as Test
47
- from cocotb.handle import SimHandle
48
- from cocotb.log import SimLog
49
- from cocotb.outcomes import Error, Outcome
50
- from cocotb.result import SimFailure, TestSuccess
28
+ import cocotb._gpi_triggers
29
+ import cocotb.handle
30
+ from cocotb import _ANSI, simulator
31
+ from cocotb._base_triggers import Trigger
32
+ from cocotb._decorators import Parameterized, Test
33
+ from cocotb._extended_awaitables import SimTimeoutError, with_timeout
34
+ from cocotb._gpi_triggers import GPITrigger, Timer
35
+ from cocotb._outcomes import Error, Outcome
36
+ from cocotb._test import RunningTest
37
+ from cocotb._test_factory import TestFactory
38
+ from cocotb._test_functions import Failed
39
+ from cocotb._utils import (
40
+ DocEnum,
41
+ remove_traceback_frames,
42
+ safe_divide,
43
+ want_color_output,
44
+ )
45
+ from cocotb._xunit_reporter import XUnitReporter
51
46
  from cocotb.task import Task
52
- from cocotb.utils import get_sim_time, remove_traceback_frames, want_color_output
53
- from cocotb.xunit_reporter import XUnitReporter
54
-
55
- _pdb_on_exception = "COCOTB_PDB_ON_EXCEPTION" in os.environ
56
-
57
- # Optional support for coverage collection of testbench files
58
- coverage = None
59
- if "COVERAGE" in os.environ:
60
- try:
61
- import coverage
62
- except ImportError as e:
63
- msg = (
64
- "Coverage collection requested but coverage module not available"
65
- "\n"
66
- "Import error was: %s\n" % repr(e)
67
- )
68
- sys.stderr.write(msg)
69
-
70
-
71
- def _my_import(name: str) -> Any:
72
- mod = __import__(name)
73
- components = name.split(".")
74
- for comp in components[1:]:
75
- mod = getattr(mod, comp)
76
- return mod
77
-
78
-
79
- _logger = SimLog(__name__)
80
-
81
- _Failed: Type[BaseException]
82
- try:
83
- import pytest
84
- except ModuleNotFoundError:
85
- _Failed = AssertionError
86
- else:
87
- try:
88
- with pytest.raises(Exception):
89
- pass
90
- except BaseException as _raises_e:
91
- _Failed = type(_raises_e)
47
+ from cocotb.utils import get_sim_time
48
+
49
+ __all__ = (
50
+ "Parameterized",
51
+ "RegressionManager",
52
+ "RegressionMode",
53
+ "SimFailure",
54
+ "Test",
55
+ "TestFactory",
56
+ )
57
+
58
+ # Set __module__ on re-exports
59
+ Parameterized.__module__ = __name__
60
+ Test.__module__ = __name__
61
+ TestFactory.__module__ = __name__
62
+
63
+
64
+ class SimFailure(BaseException):
65
+ """A Test failure due to simulator failure."""
66
+
67
+
68
+ _logger = logging.getLogger(__name__)
69
+
70
+
71
+ def _format_doc(docstring: Union[str, None]) -> str:
72
+ if docstring is None:
73
+ return ""
92
74
  else:
93
- assert "pytest.raises doesn't raise an exception when it fails"
75
+ brief = docstring.split("\n")[0]
76
+ return f"\n {brief}"
77
+
78
+
79
+ class RegressionMode(DocEnum):
80
+ """The mode of the :class:`RegressionManager`."""
81
+
82
+ REGRESSION = (
83
+ auto(),
84
+ """Tests are run if included. Skipped tests are skipped, expected failures and errors are respected.""",
85
+ )
86
+
87
+ TESTCASE = (
88
+ auto(),
89
+ """Like :attr:`REGRESSION`, but skipped tests are *not* skipped if included.""",
90
+ )
91
+
92
+
93
+ class _TestResults:
94
+ # TODO Replace with dataclass in Python 3.7+
95
+
96
+ def __init__(
97
+ self,
98
+ test_fullname: str,
99
+ passed: Union[None, bool],
100
+ wall_time_s: float,
101
+ sim_time_ns: float,
102
+ ) -> None:
103
+ self.test_fullname = test_fullname
104
+ self.passed = passed
105
+ self.wall_time_s = wall_time_s
106
+ self.sim_time_ns = sim_time_ns
107
+
108
+ @property
109
+ def ratio(self) -> float:
110
+ return safe_divide(self.sim_time_ns, self.wall_time_s)
94
111
 
95
112
 
96
113
  class RegressionManager:
97
- """Encapsulates all regression capability into a single place"""
114
+ """Object which manages tests.
98
115
 
99
- def __init__(self, dut: SimHandle, tests: Iterable[Test]):
100
- """
101
- Args:
102
- dut (SimHandle): The root handle to pass into test functions.
103
- tests (Iterable[Test]): tests to run
104
- """
105
- self._dut = dut
106
- self._test = None
107
- self._test_task = None
108
- self._test_start_time = None
109
- self._test_start_sim_time = None
110
- self._cov = None
116
+ This object uses the builder pattern to build up a regression.
117
+ Tests are added using :meth:`register_test` or :meth:`discover_tests`.
118
+ Inclusion filters for tests can be added using :meth:`add_filters`.
119
+ The "mode" of the regression can be controlled using :meth:`set_mode`.
120
+ These methods can be called in any order any number of times before :meth:`start_regression` is called,
121
+ and should not be called again after that.
122
+
123
+ Once all the tests, filters, and regression behavior configuration is done,
124
+ the user starts the regression with :meth:`start_regression`.
125
+ This method must be called exactly once.
126
+
127
+ Until the regression is started, :attr:`total_tests`, :attr:`count`, :attr:`passed`,
128
+ :attr:`skipped`, and :attr:`failures` hold placeholder values.
129
+ """
130
+
131
+ _timer1 = Timer(1)
132
+
133
+ def __init__(self) -> None:
134
+ self._test: Test
135
+ self._running_test: RunningTest
111
136
  self.log = _logger
112
- self.start_time = time.time()
113
- self.test_results = []
137
+ self._regression_start_time: float
138
+ self._test_results: List[_TestResults] = []
139
+ self.total_tests = 0
140
+ """Total number of tests that will be run or skipped."""
114
141
  self.count = 0
142
+ """The current test count."""
115
143
  self.passed = 0
144
+ """The current number of passed tests."""
116
145
  self.skipped = 0
146
+ """The current number of skipped tests."""
117
147
  self.failures = 0
148
+ """The current number of failed tests."""
118
149
  self._tearing_down = False
150
+ self._test_queue: List[Test] = []
151
+ self._filters: List[re.Pattern[str]] = []
152
+ self._mode = RegressionMode.REGRESSION
153
+ self._included: List[bool]
154
+ self._sim_failure: Union[Error[None], None] = None
119
155
 
120
156
  # Setup XUnit
121
157
  ###################
122
158
 
123
159
  results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
124
- suite_name = os.getenv("RESULT_TESTSUITE", "all")
125
- package_name = os.getenv("RESULT_TESTPACKAGE", "all")
160
+ suite_name = os.getenv("COCOTB_RESULT_TESTSUITE", "all")
161
+ package_name = os.getenv("COCOTB_RESULT_TESTPACKAGE", "all")
126
162
 
127
163
  self.xunit = XUnitReporter(filename=results_filename)
128
-
129
164
  self.xunit.add_testsuite(name=suite_name, package=package_name)
130
-
131
165
  self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))
132
166
 
133
- # Setup Coverage
134
- ####################
135
-
136
- if coverage is not None:
137
- self.log.info("Enabling coverage collection of Python code")
138
- config_filepath = os.getenv("COVERAGE_RCFILE")
139
- if config_filepath is None:
140
- # Exclude cocotb itself from coverage collection.
141
- cocotb_package_dir = os.path.dirname(__file__)
142
- self._cov = coverage.coverage(
143
- branch=True, omit=[f"{cocotb_package_dir}/*"]
144
- )
145
- else:
146
- # Allow the config file to handle all configuration
147
- self._cov = coverage.coverage()
148
- self._cov.start()
149
-
150
- # Test Discovery
151
- ####################
152
- self._queue = []
153
- for test in tests:
154
- self.log.info(f"Found test {test.__module__}.{test.__qualname__}")
155
- self._queue.append(test)
156
- self.ntests = len(self._queue)
157
-
158
- if not self._queue:
159
- self.log.warning("No tests were discovered")
167
+ def discover_tests(self, *modules: str) -> None:
168
+ """Discover tests in files automatically.
160
169
 
161
- self._queue.sort(key=lambda test: (test.stage, test._id))
162
-
163
- @classmethod
164
- def from_discovery(cls, dut: SimHandle):
165
- """
166
- Obtains the test list by discovery.
167
-
168
- See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
170
+ Should be called before :meth:`start_regression` is called.
169
171
 
170
172
  Args:
171
- dut (SimHandle): The root handle to pass into test functions.
173
+ modules: Each argument given is the name of a module where tests are found.
172
174
  """
173
- tests = cls._discover_tests()
174
- return cls(dut, tests)
175
+ for module_name in modules:
176
+ mod = import_module(module_name)
177
+
178
+ found_test: bool = False
179
+ for obj_name, obj in vars(mod).items():
180
+ if isinstance(obj, Test):
181
+ found_test = True
182
+ self.register_test(obj)
183
+ elif isinstance(obj, Parameterized):
184
+ found_test = True
185
+ generated_tests: bool = False
186
+ for test in obj.generate_tests():
187
+ generated_tests = True
188
+ self.register_test(test)
189
+ if not generated_tests:
190
+ warnings.warn(
191
+ f"Parametrize object generated no tests: {module_name}.{obj_name}",
192
+ stacklevel=2,
193
+ )
175
194
 
176
- @classmethod
177
- def _discover_tests(cls) -> Iterable[Test]:
178
- """
179
- Discovers tests in files automatically.
195
+ if not found_test:
196
+ warnings.warn(
197
+ f"No tests were discovered in module: {module_name}", stacklevel=2
198
+ )
180
199
 
181
- See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
182
- """
183
- module_str = os.getenv("MODULE")
184
- test_str = os.getenv("TESTCASE")
200
+ # error if no tests were discovered
201
+ if not self._test_queue:
202
+ modules_str = ", ".join(repr(m) for m in modules)
203
+ raise RuntimeError(f"No tests were discovered in any module: {modules_str}")
185
204
 
186
- if module_str is None:
187
- raise ValueError(
188
- "Environment variable MODULE, which defines the module(s) to execute, is not defined."
189
- )
205
+ def add_filters(self, *filters: str) -> None:
206
+ """Add regular expressions to filter-in registered tests.
190
207
 
191
- modules = [s.strip() for s in module_str.split(",") if s.strip()]
208
+ Only those tests which match at least one of the given filters are included;
209
+ the rest are excluded.
192
210
 
193
- cls._setup_pytest_assertion_rewriting(modules)
211
+ Should be called before :meth:`start_regression` is called.
194
212
 
195
- tests = None
196
- if test_str:
197
- tests = [s.strip() for s in test_str.split(",") if s.strip()]
213
+ Args:
214
+ filters: Each argument given is a regex pattern for test names.
215
+ A match *includes* the test.
216
+ """
217
+ for filter in filters:
218
+ compiled_filter = re.compile(filter)
219
+ self._filters.append(compiled_filter)
198
220
 
199
- for module_name in modules:
200
- try:
201
- _logger.debug("Python Path: " + ",".join(sys.path))
202
- _logger.debug("PWD: " + os.getcwd())
203
- module = _my_import(module_name)
204
- except Exception as E:
205
- _logger.critical("Failed to import module %s: %s", module_name, E)
206
- _logger.info('MODULE variable was "%s"', ".".join(modules))
207
- _logger.info("Traceback: ")
208
- _logger.info(traceback.format_exc())
209
- raise
210
-
211
- if tests is not None:
212
- not_found_tests = []
213
- # Specific functions specified, don't auto-discover
214
- for test_name in tests:
215
- try:
216
- test = getattr(module, test_name)
217
- except AttributeError:
218
- not_found_tests.append(test_name)
219
- continue
220
-
221
- if not isinstance(test, Test):
222
- _logger.error(
223
- "Requested %s from module %s isn't a cocotb.test decorated coroutine",
224
- test_name,
225
- module_name,
226
- )
227
- raise ImportError(
228
- "Failed to find requested test %s" % test_name
229
- )
221
+ def set_mode(self, mode: RegressionMode) -> None:
222
+ """Set the regression mode.
230
223
 
231
- # If we request a test manually, it should be run even if skip=True is set.
232
- test.skip = False
224
+ See :class:`RegressionMode` for more details on how each mode affects :class:`RegressionManager` behavior.
225
+ Should be called before :meth:`start_regression` is called.
233
226
 
234
- yield test
227
+ Args:
228
+ mode: The regression mode to set.
229
+ """
230
+ self._mode = mode
235
231
 
236
- # Use the non-matching test names in the next module search
237
- tests = not_found_tests
232
+ def register_test(self, test: Test) -> None:
233
+ """Register a test with the :class:`RegressionManager`.
238
234
 
239
- else:
240
- # auto-discover
241
- for thing in vars(module).values():
242
- if isinstance(thing, Test):
243
- yield thing
244
-
245
- # If any test were not found in any module, raise an error
246
- if tests:
247
- _logger.error(
248
- "Requested test(s) %s wasn't found in module(s) %s", tests, modules
249
- )
250
- raise AttributeError("Test(s) %s doesn't exist in %s" % (tests, modules))
235
+ Should be called before :meth:`start_regression` is called.
236
+
237
+ Args:
238
+ test: The test object to register.
239
+ """
240
+ self.log.debug("Registered test %r", test.fullname)
241
+ self._test_queue.append(test)
251
242
 
252
243
  @classmethod
253
- def _setup_pytest_assertion_rewriting(cls, test_modules: Iterable[str]) -> None:
244
+ def setup_pytest_assertion_rewriting(cls) -> None:
245
+ """Configure pytest to rewrite assertions for better failure messages.
246
+
247
+ Must be called before all modules containing tests are imported.
248
+ """
254
249
  try:
255
- import pytest
250
+ import pytest # noqa: PLC0415
256
251
  except ImportError:
257
252
  _logger.info(
258
253
  "pytest not found, install it to enable better AssertionError messages"
@@ -261,205 +256,414 @@ class RegressionManager:
261
256
  try:
262
257
  # Install the assertion rewriting hook, which must be done before we
263
258
  # import the test modules.
264
- from _pytest.assertion import install_importhook
265
- from _pytest.config import Config
259
+ from _pytest.assertion import install_importhook # noqa: PLC0415
260
+ from _pytest.config import Config # noqa: PLC0415
261
+
262
+ python_files = os.getenv("COCOTB_REWRITE_ASSERTION_FILES", "*.py").strip()
263
+ if not python_files:
264
+ # Even running the hook causes exceptions in some cases, so if the user
265
+ # selects nothing, don't install the hook at all.
266
+ return
266
267
 
267
268
  pytest_conf = Config.fromdictargs(
268
- {}, ["--capture=no", "-o", "python_files=*.py"]
269
+ {}, ["--capture=no", "-o", f"python_files={python_files}"]
269
270
  )
270
271
  install_importhook(pytest_conf)
271
272
  except Exception:
272
273
  _logger.exception(
273
- "Configuring the assertion rewrite hook using pytest {} failed. "
274
- "Please file a bug report!".format(pytest.__version__)
274
+ "Configuring the assertion rewrite hook using pytest %s failed. "
275
+ "Please file a bug report!",
276
+ pytest.__version__,
277
+ )
278
+
279
+ def start_regression(self) -> None:
280
+ """Start the regression."""
281
+
282
+ # sort tests into stages
283
+ self._test_queue.sort(key=lambda test: test.stage)
284
+
285
+ # mark tests for running
286
+ if self._filters:
287
+ self._included = [False] * len(self._test_queue)
288
+ for i, test in enumerate(self._test_queue):
289
+ for filter in self._filters:
290
+ if filter.search(test.fullname):
291
+ self._included[i] = True
292
+ else:
293
+ self._included = [True] * len(self._test_queue)
294
+
295
+ # compute counts
296
+ self.count = 1
297
+ self.total_tests = sum(self._included)
298
+ if self.total_tests == 0:
299
+ self.log.warning(
300
+ "No tests left after filtering with: %s",
301
+ ", ".join(f.pattern for f in self._filters),
275
302
  )
276
303
 
277
- @deprecated("This method is now private.")
278
- def tear_down(self) -> None:
279
- self._tear_down()
304
+ # start write scheduler
305
+ cocotb.handle._start_write_scheduler()
306
+
307
+ # start test loop
308
+ self._regression_start_time = time.time()
309
+ self._first_test = True
310
+ self._execute()
311
+
312
+ def _execute(self) -> None:
313
+ """Run the main regression loop.
314
+
315
+ Used by :meth:`start_regression` and :meth:`_test_complete` to continue to the main test running loop,
316
+ and by :meth:`_fail_regression` to shutdown the regression when a simulation failure occurs.
317
+ """
318
+
319
+ while self._test_queue:
320
+ self._test = self._test_queue.pop(0)
321
+ included = self._included.pop(0)
322
+
323
+ # if the test is not included, record and continue
324
+ if not included:
325
+ self._record_test_excluded()
326
+ continue
327
+
328
+ # if the test is skipped, record and continue
329
+ if self._test.skip and self._mode != RegressionMode.TESTCASE:
330
+ self._record_test_skipped()
331
+ continue
332
+
333
+ # if the test should be run, but the simulator has failed, record and continue
334
+ if self._sim_failure is not None:
335
+ self._score_test(
336
+ self._sim_failure,
337
+ 0,
338
+ 0,
339
+ )
340
+ continue
341
+
342
+ # initialize the test, if it fails, record and continue
343
+ try:
344
+ self._running_test = self._init_test()
345
+ except Exception:
346
+ self._record_test_init_failed()
347
+ continue
348
+
349
+ self._log_test_start()
350
+
351
+ if self._first_test:
352
+ self._first_test = False
353
+ return self._schedule_next_test()
354
+ else:
355
+ return self._timer1._prime(self._schedule_next_test)
356
+
357
+ return self._tear_down()
358
+
359
+ def _init_test(self) -> RunningTest:
360
+ # wrap test function in timeout
361
+ func: Callable[..., Coroutine[Trigger, None, None]]
362
+ timeout = self._test.timeout_time
363
+ if timeout is not None:
364
+ f = self._test.func
365
+
366
+ @functools.wraps(f)
367
+ async def func(*args: object, **kwargs: object) -> None:
368
+ running_co = Task(f(*args, **kwargs))
369
+
370
+ try:
371
+ await with_timeout(running_co, timeout, self._test.timeout_unit)
372
+ except SimTimeoutError:
373
+ running_co.cancel()
374
+ raise
375
+ else:
376
+ func = self._test.func
377
+
378
+ main_task = Task(func(cocotb.top), name=f"Test {self._test.name}")
379
+ return RunningTest(self._test_complete, main_task)
380
+
381
+ def _schedule_next_test(self, trigger: Union[GPITrigger, None] = None) -> None:
382
+ if trigger is not None:
383
+ # TODO move to Trigger object
384
+ cocotb._gpi_triggers._current_gpi_trigger = trigger
385
+ trigger._cleanup()
386
+
387
+ # seed random number generator based on test module, name, and COCOTB_RANDOM_SEED
388
+ hasher = hashlib.sha1()
389
+ hasher.update(self._test.fullname.encode())
390
+ seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
391
+ random.seed(seed)
392
+
393
+ self._start_sim_time = get_sim_time("ns")
394
+ self._start_time = time.time()
395
+
396
+ self._running_test.start()
280
397
 
281
398
  def _tear_down(self) -> None:
399
+ """Called by :meth:`_execute` when there are no more tests to run to finalize the regression."""
282
400
  # prevent re-entering the tear down procedure
283
401
  if not self._tearing_down:
284
402
  self._tearing_down = True
285
403
  else:
286
404
  return
287
405
 
288
- # fail remaining tests
289
- while True:
290
- test = self._next_test()
291
- if test is None:
292
- break
293
- self._record_result(
294
- test=test, outcome=Error(SimFailure), wall_time_s=0, sim_time_ns=0
295
- )
406
+ assert not self._test_queue
407
+
408
+ # stop the write scheduler
409
+ cocotb.handle._stop_write_scheduler()
296
410
 
297
411
  # Write out final log messages
298
412
  self._log_test_summary()
299
413
 
300
414
  # Generate output reports
301
415
  self.xunit.write()
302
- if self._cov:
303
- self._cov.stop()
304
- self.log.info("Writing coverage data")
305
- self._cov.save()
306
- self._cov.html_report()
307
- if cocotb._library_coverage is not None:
308
- # TODO: move this once we have normal shutdown behavior to _sim_event
309
- cocotb._library_coverage.stop()
310
- cocotb._library_coverage.save()
416
+
417
+ # TODO refactor initialization and finalization into their own module
418
+ # to prevent circular imports requiring local imports
419
+ from cocotb._init import _shutdown_testbench # noqa: PLC0415
420
+
421
+ _shutdown_testbench()
311
422
 
312
423
  # Setup simulator finalization
313
424
  simulator.stop_simulator()
314
425
 
315
- @deprecated("This method is now private.")
316
- def next_test(self) -> Optional[Test]:
317
- return self._next_test()
426
+ def _test_complete(self) -> None:
427
+ """Callback given to the test to be called when the test finished."""
318
428
 
319
- def _next_test(self) -> Optional[Test]:
320
- """Get the next test to run"""
321
- if not self._queue:
322
- return None
323
- self.count += 1
324
- return self._queue.pop(0)
325
-
326
- @deprecated("This method is now private.")
327
- def handle_result(self, test: Task) -> None:
328
- self._handle_result(test)
429
+ # compute wall time
430
+ wall_time = time.time() - self._start_time
431
+ sim_time_ns = get_sim_time("ns") - self._start_sim_time
329
432
 
330
- def _handle_result(self, test: Task) -> None:
331
- """Handle a test completing.
433
+ # Judge and record pass/fail.
434
+ self._score_test(
435
+ self._running_test.result(),
436
+ wall_time,
437
+ sim_time_ns,
438
+ )
332
439
 
333
- Dump result to XML and schedule the next test (if any). Entered by the scheduler.
440
+ # Run next test.
441
+ return self._execute()
334
442
 
335
- Args:
336
- test: The test that completed
337
- """
338
- assert test is self._test_task
339
-
340
- real_time = time.time() - self._test_start_time
341
- sim_time_ns = get_sim_time("ns") - self._test_start_sim_time
443
+ def _score_test(
444
+ self,
445
+ outcome: Outcome[None],
446
+ wall_time_s: float,
447
+ sim_time_ns: float,
448
+ ) -> None:
449
+ test = self._test
342
450
 
343
- self._record_result(
344
- test=self._test,
345
- outcome=self._test_task._outcome,
346
- wall_time_s=real_time,
347
- sim_time_ns=sim_time_ns,
348
- )
451
+ # score test
452
+ passed: bool
453
+ msg: Union[str, None]
454
+ exc: Union[BaseException, None]
455
+ try:
456
+ outcome.get()
457
+ except BaseException as e:
458
+ passed, msg = False, None
459
+ exc = remove_traceback_frames(e, ["_score_test", "get"])
460
+ else:
461
+ passed, msg, exc = True, None, None
462
+
463
+ if passed:
464
+ if test.expect_error:
465
+ self._record_test_failed(
466
+ wall_time_s=wall_time_s,
467
+ sim_time_ns=sim_time_ns,
468
+ result=exc,
469
+ msg="passed but we expected an error",
470
+ )
471
+ passed = False
472
+
473
+ elif test.expect_fail:
474
+ self._record_test_failed(
475
+ wall_time_s=wall_time_s,
476
+ sim_time_ns=sim_time_ns,
477
+ result=exc,
478
+ msg="passed but we expected a failure",
479
+ )
480
+ passed = False
349
481
 
350
- self._execute()
482
+ else:
483
+ self._record_test_passed(
484
+ wall_time_s=wall_time_s,
485
+ sim_time_ns=sim_time_ns,
486
+ result=None,
487
+ msg=msg,
488
+ )
351
489
 
352
- def _init_test(self, test: Test) -> Optional[Task]:
353
- """Initialize a test.
490
+ elif test.expect_fail:
491
+ if isinstance(exc, (AssertionError, Failed)):
492
+ self._record_test_passed(
493
+ wall_time_s=wall_time_s,
494
+ sim_time_ns=sim_time_ns,
495
+ result=None,
496
+ msg="failed as expected",
497
+ )
354
498
 
355
- Record outcome if the initialization fails.
356
- Record skip if the test is skipped.
357
- Save the initialized test if it successfully initializes.
358
- """
499
+ else:
500
+ self._record_test_failed(
501
+ wall_time_s=wall_time_s,
502
+ sim_time_ns=sim_time_ns,
503
+ result=exc,
504
+ msg="expected failure, but errored with unexpected type",
505
+ )
506
+ passed = False
359
507
 
360
- if test.skip:
361
- hilight_start = ANSI.COLOR_SKIPPED if want_color_output() else ""
362
- hilight_end = ANSI.COLOR_DEFAULT if want_color_output() else ""
363
- # Want this to stand out a little bit
364
- self.log.info(
365
- "{start}skipping{end} {name} ({i}/{total})".format(
366
- start=hilight_start,
367
- i=self.count,
368
- total=self.ntests,
369
- end=hilight_end,
370
- name=test.__qualname__,
508
+ elif test.expect_error:
509
+ if isinstance(exc, test.expect_error):
510
+ self._record_test_passed(
511
+ wall_time_s=wall_time_s,
512
+ sim_time_ns=sim_time_ns,
513
+ result=None,
514
+ msg="errored as expected",
371
515
  )
372
- )
373
- self._record_result(test, None, 0, 0)
374
- return None
375
516
 
376
- test_init_outcome = cocotb.outcomes.capture(test, self._dut)
517
+ else:
518
+ self._record_test_failed(
519
+ wall_time_s=wall_time_s,
520
+ sim_time_ns=sim_time_ns,
521
+ result=exc,
522
+ msg="errored with unexpected type",
523
+ )
524
+ passed = False
377
525
 
378
- if isinstance(test_init_outcome, cocotb.outcomes.Error):
379
- self.log.error(
380
- "Failed to initialize test %s" % test.__qualname__,
381
- exc_info=test_init_outcome.error,
526
+ else:
527
+ self._record_test_failed(
528
+ wall_time_s=wall_time_s,
529
+ sim_time_ns=sim_time_ns,
530
+ result=exc,
531
+ msg=msg,
382
532
  )
383
- self._record_result(test, test_init_outcome, 0, 0)
384
- return None
385
533
 
386
- running_test = test_init_outcome.get()
534
+ def _get_lineno(self, test: Test) -> int:
535
+ try:
536
+ return inspect.getsourcelines(test.func)[1]
537
+ except OSError:
538
+ return 1
387
539
 
388
- # seed random number generator based on test module, name, and RANDOM_SEED
389
- hasher = hashlib.sha1()
390
- hasher.update(test.__qualname__.encode())
391
- hasher.update(test.__module__.encode())
392
- seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
393
- random.seed(seed)
540
+ def _log_test_start(self) -> None:
541
+ """Called by :meth:`_execute` to log that a test is starting."""
542
+ hilight_start = _ANSI.COLOR_TEST if want_color_output() else ""
543
+ hilight_end = _ANSI.COLOR_DEFAULT if want_color_output() else ""
544
+ self.log.info(
545
+ "%srunning%s %s (%d/%d)%s",
546
+ hilight_start,
547
+ hilight_end,
548
+ self._test.fullname,
549
+ self.count,
550
+ self.total_tests,
551
+ _format_doc(self._test.doc),
552
+ )
394
553
 
395
- return running_test
554
+ def _record_test_excluded(self) -> None:
555
+ """Called by :meth:`_execute` when a test is excluded by filters."""
396
556
 
397
- def _score_test(self, test: Test, outcome: Outcome) -> Tuple[bool, bool]:
398
- """
399
- Given a test and the test's outcome, determine if the test met expectations and log pertinent information
400
- """
557
+ # write out xunit results
558
+ lineno = self._get_lineno(self._test)
559
+ self.xunit.add_testcase(
560
+ name=self._test.name,
561
+ classname=self._test.module,
562
+ file=inspect.getfile(self._test.func),
563
+ lineno=repr(lineno),
564
+ time=repr(0),
565
+ sim_time_ns=repr(0),
566
+ ratio_time=repr(0),
567
+ )
568
+ self.xunit.add_skipped()
401
569
 
402
- # scoring outcomes
403
- result_pass = True
404
- sim_failed = False
570
+ # do not log anything, nor save details for the summary
405
571
 
406
- try:
407
- outcome.get()
408
- except (KeyboardInterrupt, SystemExit):
409
- raise
410
- except BaseException as e:
411
- result = remove_traceback_frames(e, ["_score_test", "get"])
412
- else:
413
- result = TestSuccess()
414
-
415
- if (
416
- isinstance(result, TestSuccess)
417
- and not test.expect_fail
418
- and not test.expect_error
419
- ):
420
- self._log_test_passed(test, None, None)
421
-
422
- elif isinstance(result, TestSuccess) and test.expect_error:
423
- self._log_test_failed(test, None, "passed but we expected an error")
424
- result_pass = False
425
-
426
- elif isinstance(result, TestSuccess):
427
- self._log_test_failed(test, None, "passed but we expected a failure")
428
- result_pass = False
429
-
430
- elif isinstance(result, SimFailure):
431
- if isinstance(result, test.expect_error):
432
- self._log_test_passed(test, result, "errored as expected")
433
- else:
434
- self.log.error("Test error has lead to simulator shutting us down")
435
- result_pass = False
436
- # whether we expected it or not, the simulation has failed unrecoverably
437
- sim_failed = True
572
+ def _record_test_skipped(self) -> None:
573
+ """Called by :meth:`_execute` when a test is skipped."""
438
574
 
439
- elif isinstance(result, (AssertionError, _Failed)) and test.expect_fail:
440
- self._log_test_passed(test, result, "failed as expected")
575
+ # log test results
576
+ hilight_start = _ANSI.COLOR_SKIPPED if want_color_output() else ""
577
+ hilight_end = _ANSI.COLOR_DEFAULT if want_color_output() else ""
578
+ self.log.info(
579
+ "%sskipping%s %s (%d/%d)%s",
580
+ hilight_start,
581
+ hilight_end,
582
+ self._test.fullname,
583
+ self.count,
584
+ self.total_tests,
585
+ _format_doc(self._test.doc),
586
+ )
441
587
 
442
- elif test.expect_error:
443
- if isinstance(result, test.expect_error):
444
- self._log_test_passed(test, result, "errored as expected")
445
- else:
446
- self._log_test_failed(test, result, "errored with unexpected type ")
447
- result_pass = False
588
+ # write out xunit results
589
+ lineno = self._get_lineno(self._test)
590
+ self.xunit.add_testcase(
591
+ name=self._test.name,
592
+ classname=self._test.module,
593
+ file=inspect.getfile(self._test.func),
594
+ lineno=repr(lineno),
595
+ time=repr(0),
596
+ sim_time_ns=repr(0),
597
+ ratio_time=repr(0),
598
+ )
599
+ self.xunit.add_skipped()
600
+
601
+ # save details for summary
602
+ self._test_results.append(
603
+ _TestResults(
604
+ test_fullname=self._test.fullname,
605
+ passed=None,
606
+ sim_time_ns=0,
607
+ wall_time_s=0,
608
+ )
609
+ )
448
610
 
449
- else:
450
- self._log_test_failed(test, result, None)
451
- result_pass = False
611
+ # update running passed/failed/skipped counts
612
+ self.skipped += 1
613
+ self.count += 1
452
614
 
453
- if _pdb_on_exception:
454
- pdb.post_mortem(result.__traceback__)
615
+ def _record_test_init_failed(self) -> None:
616
+ """Called by :meth:`_execute` when a test initialization fails."""
617
+
618
+ # log test results
619
+ hilight_start = _ANSI.COLOR_FAILED if want_color_output() else ""
620
+ hilight_end = _ANSI.COLOR_DEFAULT if want_color_output() else ""
621
+ self.log.exception(
622
+ "%sFailed to initialize%s %s! (%d/%d)%s",
623
+ hilight_start,
624
+ hilight_end,
625
+ self._test.fullname,
626
+ self.count,
627
+ self.total_tests,
628
+ _format_doc(self._test.doc),
629
+ )
455
630
 
456
- return result_pass, sim_failed
631
+ # write out xunit results
632
+ lineno = self._get_lineno(self._test)
633
+ self.xunit.add_testcase(
634
+ name=self._test.name,
635
+ classname=self._test.module,
636
+ file=inspect.getfile(self._test.func),
637
+ lineno=repr(lineno),
638
+ time=repr(0),
639
+ sim_time_ns=repr(0),
640
+ ratio_time=repr(0),
641
+ )
642
+ self.xunit.add_failure(msg="Test initialization failed")
643
+
644
+ # save details for summary
645
+ self._test_results.append(
646
+ _TestResults(
647
+ test_fullname=self._test.fullname,
648
+ passed=False,
649
+ sim_time_ns=0,
650
+ wall_time_s=0,
651
+ )
652
+ )
457
653
 
458
- def _log_test_passed(
459
- self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
654
+ # update running passed/failed/skipped counts
655
+ self.failures += 1
656
+ self.count += 1
657
+
658
+ def _record_test_passed(
659
+ self,
660
+ wall_time_s: float,
661
+ sim_time_ns: float,
662
+ result: Union[Exception, None],
663
+ msg: Union[str, None],
460
664
  ) -> None:
461
- start_hilight = ANSI.COLOR_PASSED if want_color_output() else ""
462
- stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
665
+ start_hilight = _ANSI.COLOR_PASSED if want_color_output() else ""
666
+ stop_hilight = _ANSI.COLOR_DEFAULT if want_color_output() else ""
463
667
  if msg is None:
464
668
  rest = ""
465
669
  else:
@@ -469,119 +673,98 @@ class RegressionManager:
469
673
  else:
470
674
  result_was = f" (result was {type(result).__qualname__})"
471
675
  self.log.info(
472
- f"{test.__qualname__} {start_hilight}passed{stop_hilight}{rest}{result_was}"
676
+ "%s %spassed%s%s%s",
677
+ self._test.fullname,
678
+ start_hilight,
679
+ stop_hilight,
680
+ rest,
681
+ result_was,
473
682
  )
474
683
 
475
- def _log_test_failed(
476
- self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
477
- ) -> None:
478
- start_hilight = ANSI.COLOR_FAILED if want_color_output() else ""
479
- stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
480
- if msg is None:
481
- rest = ""
482
- else:
483
- rest = f": {msg}"
484
- self.log.info(
485
- f"{test.__qualname__} {start_hilight}failed{stop_hilight}{rest}",
486
- exc_info=result,
684
+ # write out xunit results
685
+ ratio_time = safe_divide(sim_time_ns, wall_time_s)
686
+ lineno = self._get_lineno(self._test)
687
+ self.xunit.add_testcase(
688
+ name=self._test.name,
689
+ classname=self._test.module,
690
+ file=inspect.getfile(self._test.func),
691
+ lineno=repr(lineno),
692
+ time=repr(wall_time_s),
693
+ sim_time_ns=repr(sim_time_ns),
694
+ ratio_time=repr(ratio_time),
487
695
  )
488
696
 
489
- def _record_result(
697
+ # update running passed/failed/skipped counts
698
+ self.passed += 1
699
+ self.count += 1
700
+
701
+ # save details for summary
702
+ self._test_results.append(
703
+ _TestResults(
704
+ test_fullname=self._test.fullname,
705
+ passed=True,
706
+ sim_time_ns=sim_time_ns,
707
+ wall_time_s=wall_time_s,
708
+ )
709
+ )
710
+
711
+ def _record_test_failed(
490
712
  self,
491
- test: Test,
492
- outcome: Optional[Outcome],
493
713
  wall_time_s: float,
494
714
  sim_time_ns: float,
715
+ result: Union[BaseException, None],
716
+ msg: Union[str, None],
495
717
  ) -> None:
718
+ start_hilight = _ANSI.COLOR_FAILED if want_color_output() else ""
719
+ stop_hilight = _ANSI.COLOR_DEFAULT if want_color_output() else ""
720
+ if msg is None:
721
+ rest = ""
722
+ else:
723
+ rest = f": {msg}"
724
+ self.log.warning(
725
+ "%s%s %sfailed%s%s",
726
+ stop_hilight,
727
+ self._test.fullname,
728
+ start_hilight,
729
+ stop_hilight,
730
+ rest,
731
+ )
496
732
 
497
- ratio_time = self._safe_divide(sim_time_ns, wall_time_s)
498
- try:
499
- lineno = inspect.getsourcelines(test._func)[1]
500
- except OSError:
501
- lineno = 1
502
-
733
+ # write out xunit results
734
+ ratio_time = safe_divide(sim_time_ns, wall_time_s)
735
+ lineno = self._get_lineno(self._test)
503
736
  self.xunit.add_testcase(
504
- name=test.__qualname__,
505
- classname=test.__module__,
506
- file=inspect.getfile(test._func),
737
+ name=self._test.name,
738
+ classname=self._test.module,
739
+ file=inspect.getfile(self._test.func),
507
740
  lineno=repr(lineno),
508
741
  time=repr(wall_time_s),
509
742
  sim_time_ns=repr(sim_time_ns),
510
743
  ratio_time=repr(ratio_time),
511
744
  )
745
+ self.xunit.add_failure(error_type=type(result).__name__, error_msg=str(result))
512
746
 
513
- if outcome is None: # skipped
514
- test_pass, sim_failed = None, False
515
- self.xunit.add_skipped()
516
- self.skipped += 1
517
-
518
- else:
519
- test_pass, sim_failed = self._score_test(test, outcome)
520
- if not test_pass:
521
- self.xunit.add_failure(
522
- message=f"Test failed with RANDOM_SEED={cocotb.RANDOM_SEED}"
523
- )
524
- self.failures += 1
525
- else:
526
- self.passed += 1
527
-
528
- self.test_results.append(
529
- {
530
- "test": ".".join([test.__module__, test.__qualname__]),
531
- "pass": test_pass,
532
- "sim": sim_time_ns,
533
- "real": wall_time_s,
534
- "ratio": ratio_time,
535
- }
536
- )
537
-
538
- if sim_failed:
539
- self._tear_down()
540
- return
541
-
542
- @deprecated("This method is now private.")
543
- def execute(self) -> None:
544
- self._execute()
747
+ # update running passed/failed/skipped counts
748
+ self.failures += 1
749
+ self.count += 1
545
750
 
546
- def _execute(self) -> None:
547
- while True:
548
- self._test = self._next_test()
549
- if self._test is None:
550
- return self._tear_down()
551
-
552
- self._test_task = self._init_test(self._test)
553
- if self._test_task is not None:
554
- return self._start_test()
555
-
556
- def _start_test(self) -> None:
557
- # Want this to stand out a little bit
558
- start = ""
559
- end = ""
560
- if want_color_output():
561
- start = ANSI.COLOR_TEST
562
- end = ANSI.COLOR_DEFAULT
563
- self.log.info(
564
- "{start}running{end} {name} ({i}/{total}){description}".format(
565
- start=start,
566
- i=self.count,
567
- total=self.ntests,
568
- end=end,
569
- name=self._test.__qualname__,
570
- description=_trim(self._test.__doc__),
751
+ # save details for summary
752
+ self._test_results.append(
753
+ _TestResults(
754
+ test_fullname=self._test.fullname,
755
+ passed=False,
756
+ sim_time_ns=sim_time_ns,
757
+ wall_time_s=wall_time_s,
571
758
  )
572
759
  )
573
760
 
574
- self._test_start_time = time.time()
575
- self._test_start_sim_time = get_sim_time("ns")
576
- cocotb.scheduler._add_test(self._test_task)
577
-
578
761
  def _log_test_summary(self) -> None:
579
-
580
- real_time = time.time() - self.start_time
762
+ """Called by :meth:`_tear_down` to log the test summary."""
763
+ real_time = time.time() - self._regression_start_time
581
764
  sim_time_ns = get_sim_time("ns")
582
- ratio_time = self._safe_divide(sim_time_ns, real_time)
765
+ ratio_time = safe_divide(sim_time_ns, real_time)
583
766
 
584
- if len(self.test_results) == 0:
767
+ if len(self._test_results) == 0:
585
768
  return
586
769
 
587
770
  TEST_FIELD = "TEST"
@@ -589,30 +772,30 @@ class RegressionManager:
589
772
  SIM_FIELD = "SIM TIME (ns)"
590
773
  REAL_FIELD = "REAL TIME (s)"
591
774
  RATIO_FIELD = "RATIO (ns/s)"
592
- TOTAL_NAME = f"TESTS={self.ntests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
775
+ TOTAL_NAME = f"TESTS={self.total_tests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
593
776
 
594
777
  TEST_FIELD_LEN = max(
595
778
  len(TEST_FIELD),
596
779
  len(TOTAL_NAME),
597
- len(max([x["test"] for x in self.test_results], key=len)),
780
+ len(max([x.test_fullname for x in self._test_results], key=len)),
598
781
  )
599
782
  RESULT_FIELD_LEN = len(RESULT_FIELD)
600
783
  SIM_FIELD_LEN = len(SIM_FIELD)
601
784
  REAL_FIELD_LEN = len(REAL_FIELD)
602
785
  RATIO_FIELD_LEN = len(RATIO_FIELD)
603
786
 
604
- header_dict = dict(
605
- a=TEST_FIELD,
606
- b=RESULT_FIELD,
607
- c=SIM_FIELD,
608
- d=REAL_FIELD,
609
- e=RATIO_FIELD,
610
- a_len=TEST_FIELD_LEN,
611
- b_len=RESULT_FIELD_LEN,
612
- c_len=SIM_FIELD_LEN,
613
- d_len=REAL_FIELD_LEN,
614
- e_len=RATIO_FIELD_LEN,
615
- )
787
+ header_dict = {
788
+ "a": TEST_FIELD,
789
+ "b": RESULT_FIELD,
790
+ "c": SIM_FIELD,
791
+ "d": REAL_FIELD,
792
+ "e": RATIO_FIELD,
793
+ "a_len": TEST_FIELD_LEN,
794
+ "b_len": RESULT_FIELD_LEN,
795
+ "c_len": SIM_FIELD_LEN,
796
+ "d_len": REAL_FIELD_LEN,
797
+ "e_len": RATIO_FIELD_LEN,
798
+ }
616
799
 
617
800
  LINE_LEN = (
618
801
  3
@@ -638,43 +821,43 @@ class RegressionManager:
638
821
  summary += LINE_SEP
639
822
 
640
823
  test_line = "** {a:<{a_len}} {start}{b:^{b_len}}{end} {c:>{c_len}.2f} {d:>{d_len}.2f} {e:>{e_len}} **\n"
641
- for result in self.test_results:
824
+ for result in self._test_results:
642
825
  hilite = ""
643
826
  lolite = ""
644
827
 
645
- if result["pass"] is None:
828
+ if result.passed is None:
646
829
  ratio = "-.--"
647
830
  pass_fail_str = "SKIP"
648
831
  if want_color_output():
649
- hilite = ANSI.COLOR_SKIPPED
650
- lolite = ANSI.COLOR_DEFAULT
651
- elif result["pass"]:
652
- ratio = format(result["ratio"], "0.2f")
832
+ hilite = _ANSI.COLOR_SKIPPED
833
+ lolite = _ANSI.COLOR_DEFAULT
834
+ elif result.passed:
835
+ ratio = format(result.ratio, "0.2f")
653
836
  pass_fail_str = "PASS"
654
837
  if want_color_output():
655
- hilite = ANSI.COLOR_PASSED
656
- lolite = ANSI.COLOR_DEFAULT
838
+ hilite = _ANSI.COLOR_PASSED
839
+ lolite = _ANSI.COLOR_DEFAULT
657
840
  else:
658
- ratio = format(result["ratio"], "0.2f")
841
+ ratio = format(result.ratio, "0.2f")
659
842
  pass_fail_str = "FAIL"
660
843
  if want_color_output():
661
- hilite = ANSI.COLOR_FAILED
662
- lolite = ANSI.COLOR_DEFAULT
663
-
664
- test_dict = dict(
665
- a=result["test"],
666
- b=pass_fail_str,
667
- c=result["sim"],
668
- d=result["real"],
669
- e=ratio,
670
- a_len=TEST_FIELD_LEN,
671
- b_len=RESULT_FIELD_LEN,
672
- c_len=SIM_FIELD_LEN - 1,
673
- d_len=REAL_FIELD_LEN - 1,
674
- e_len=RATIO_FIELD_LEN - 1,
675
- start=hilite,
676
- end=lolite,
677
- )
844
+ hilite = _ANSI.COLOR_FAILED
845
+ lolite = _ANSI.COLOR_DEFAULT
846
+
847
+ test_dict = {
848
+ "a": result.test_fullname,
849
+ "b": pass_fail_str,
850
+ "c": result.sim_time_ns,
851
+ "d": result.wall_time_s,
852
+ "e": ratio,
853
+ "a_len": TEST_FIELD_LEN,
854
+ "b_len": RESULT_FIELD_LEN,
855
+ "c_len": SIM_FIELD_LEN - 1,
856
+ "d_len": REAL_FIELD_LEN - 1,
857
+ "e_len": RATIO_FIELD_LEN - 1,
858
+ "start": hilite,
859
+ "end": lolite,
860
+ }
678
861
 
679
862
  summary += test_line.format(**test_dict)
680
863
 
@@ -699,239 +882,7 @@ class RegressionManager:
699
882
 
700
883
  self.log.info(summary)
701
884
 
702
- @staticmethod
703
- def _safe_divide(a: float, b: float) -> float:
704
- try:
705
- return a / b
706
- except ZeroDivisionError:
707
- if a == 0:
708
- return float("nan")
709
- else:
710
- return float("inf")
711
-
712
-
713
- def _create_test(function, name, documentation, mod, *args, **kwargs):
714
- """Factory function to create tests, avoids late binding.
715
-
716
- Creates a test dynamically. The test will call the supplied
717
- function with the supplied arguments.
718
-
719
- Args:
720
- function (function): The test function to run.
721
- name (str): The name of the test.
722
- documentation (str): The docstring for the test.
723
- mod (module): The module this function belongs to.
724
- *args: Remaining args to pass to test function.
725
- **kwargs: Passed to the test function.
726
-
727
- Returns:
728
- Decorated test function
729
- """
730
-
731
- async def _my_test(dut):
732
- await function(dut, *args, **kwargs)
733
-
734
- _my_test.__name__ = name
735
- _my_test.__qualname__ = name
736
- _my_test.__doc__ = documentation
737
- _my_test.__module__ = mod.__name__
738
-
739
- return cocotb.test()(_my_test)
740
-
741
-
742
- class TestFactory:
743
- """Factory to automatically generate tests.
744
-
745
- Args:
746
- test_function: A Callable that returns the test Coroutine.
747
- Must take *dut* as the first argument.
748
- *args: Remaining arguments are passed directly to the test function.
749
- Note that these arguments are not varied. An argument that
750
- varies with each test must be a keyword argument to the
751
- test function.
752
- **kwargs: Remaining keyword arguments are passed directly to the test function.
753
- Note that these arguments are not varied. An argument that
754
- varies with each test must be a keyword argument to the
755
- test function.
756
-
757
- Assuming we have a common test function that will run a test. This test
758
- function will take keyword arguments (for example generators for each of
759
- the input interfaces) and generate tests that call the supplied function.
760
-
761
- This Factory allows us to generate sets of tests based on the different
762
- permutations of the possible arguments to the test function.
763
-
764
- For example, if we have a module that takes backpressure, has two configurable
765
- features where enabling ``feature_b`` requires ``feature_a`` to be active, and
766
- need to test against data generation routines ``gen_a`` and ``gen_b``:
767
-
768
- >>> tf = TestFactory(test_function=run_test)
769
- >>> tf.add_option(name='data_in', optionlist=[gen_a, gen_b])
770
- >>> tf.add_option('backpressure', [None, random_backpressure])
771
- >>> tf.add_option(('feature_a', 'feature_b'), [(False, False), (True, False), (True, True)])
772
- >>> tf.generate_tests()
773
-
774
- We would get the following tests:
775
-
776
- * ``gen_a`` with no backpressure and both features disabled
777
- * ``gen_a`` with no backpressure and only ``feature_a`` enabled
778
- * ``gen_a`` with no backpressure and both features enabled
779
- * ``gen_a`` with ``random_backpressure`` and both features disabled
780
- * ``gen_a`` with ``random_backpressure`` and only ``feature_a`` enabled
781
- * ``gen_a`` with ``random_backpressure`` and both features enabled
782
- * ``gen_b`` with no backpressure and both features disabled
783
- * ``gen_b`` with no backpressure and only ``feature_a`` enabled
784
- * ``gen_b`` with no backpressure and both features enabled
785
- * ``gen_b`` with ``random_backpressure`` and both features disabled
786
- * ``gen_b`` with ``random_backpressure`` and only ``feature_a`` enabled
787
- * ``gen_b`` with ``random_backpressure`` and both features enabled
788
-
789
- The tests are appended to the calling module for auto-discovery.
790
-
791
- Tests are simply named ``test_function_N``. The docstring for the test (hence
792
- the test description) includes the name and description of each generator.
793
-
794
- .. versionchanged:: 1.5
795
- Groups of options are now supported
796
- """
797
-
798
- # Prevent warnings from collection of TestFactories by unit testing frameworks.
799
- __test__ = False
800
-
801
- def __init__(self, test_function, *args, **kwargs):
802
- self.test_function = test_function
803
- self.name = self.test_function.__qualname__
804
-
805
- self.args = args
806
- self.kwargs_constant = kwargs
807
- self.kwargs = {}
808
- self.log = _logger
809
-
810
- def add_option(self, name, optionlist):
811
- """Add a named option to the test.
812
-
813
- Args:
814
- name (str or iterable of str): An option name, or an iterable of
815
- several option names. Passed to test as keyword arguments.
816
-
817
- optionlist (list): A list of possible options for this test knob.
818
- If N names were specified, this must be a list of N-tuples or
819
- lists, where each element specifies a value for its respective
820
- option.
821
-
822
- .. versionchanged:: 1.5
823
- Groups of options are now supported
824
- """
825
- if not isinstance(name, str):
826
- name = tuple(name)
827
- for opt in optionlist:
828
- if len(name) != len(opt):
829
- raise ValueError(
830
- "Mismatch between number of options and number of option values in group"
831
- )
832
- self.kwargs[name] = optionlist
833
-
834
- def generate_tests(self, prefix="", postfix=""):
835
- """
836
- Generate an exhaustive set of tests using the cartesian product of the
837
- possible keyword arguments.
838
-
839
- The generated tests are appended to the namespace of the calling
840
- module.
841
-
842
- Args:
843
- prefix (str): Text string to append to start of ``test_function`` name
844
- when naming generated test cases. This allows reuse of
845
- a single ``test_function`` with multiple
846
- :class:`TestFactories <.TestFactory>` without name clashes.
847
- postfix (str): Text string to append to end of ``test_function`` name
848
- when naming generated test cases. This allows reuse of
849
- a single ``test_function`` with multiple
850
- :class:`TestFactories <.TestFactory>` without name clashes.
851
- """
852
-
853
- frm = inspect.stack()[1]
854
- mod = inspect.getmodule(frm[0])
855
-
856
- d = self.kwargs
857
-
858
- for index, testoptions in enumerate(
859
- dict(zip(d, v)) for v in product(*d.values())
860
- ):
861
-
862
- name = "%s%s%s_%03d" % (prefix, self.name, postfix, index + 1)
863
- doc = "Automatically generated test\n\n"
864
-
865
- # preprocess testoptions to split tuples
866
- testoptions_split = {}
867
- for optname, optvalue in testoptions.items():
868
- if isinstance(optname, str):
869
- testoptions_split[optname] = optvalue
870
- else:
871
- # previously checked in add_option; ensure nothing has changed
872
- assert len(optname) == len(optvalue)
873
- for n, v in zip(optname, optvalue):
874
- testoptions_split[n] = v
875
-
876
- for optname, optvalue in testoptions_split.items():
877
- if callable(optvalue):
878
- if not optvalue.__doc__:
879
- desc = "No docstring supplied"
880
- else:
881
- desc = optvalue.__doc__.split("\n")[0]
882
- doc += "\t{}: {} ({})\n".format(
883
- optname, optvalue.__qualname__, desc
884
- )
885
- else:
886
- doc += "\t{}: {}\n".format(optname, repr(optvalue))
887
-
888
- self.log.debug(
889
- 'Adding generated test "%s" to module "%s"' % (name, mod.__name__)
890
- )
891
- kwargs = {}
892
- kwargs.update(self.kwargs_constant)
893
- kwargs.update(testoptions_split)
894
- if hasattr(mod, name):
895
- self.log.error(
896
- "Overwriting %s in module %s. "
897
- "This causes a previously defined testcase "
898
- "not to be run. Consider setting/changing "
899
- "name_postfix" % (name, mod)
900
- )
901
- setattr(
902
- mod,
903
- name,
904
- _create_test(self.test_function, name, doc, mod, *self.args, **kwargs),
905
- )
906
-
907
-
908
- def _trim(docstring: Optional[str]) -> str:
909
- """Normalizes test docstrings
910
-
911
- Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation.
912
- """
913
- if docstring is None or docstring == "":
914
- return ""
915
- # Convert tabs to spaces (following the normal Python rules)
916
- # and split into a list of lines:
917
- lines = docstring.expandtabs().splitlines()
918
- # Determine minimum indentation (first line doesn't count):
919
- indent = math.inf
920
- for line in lines[1:]:
921
- stripped = line.lstrip()
922
- if stripped:
923
- indent = min(indent, len(line) - len(stripped))
924
- # Remove indentation (first line is special):
925
- trimmed = [lines[0].strip()]
926
- if indent < math.inf:
927
- for line in lines[1:]:
928
- trimmed.append(line[indent:].rstrip())
929
- # Strip off trailing and leading blank lines:
930
- while trimmed and not trimmed[-1]:
931
- trimmed.pop()
932
- while trimmed and not trimmed[0]:
933
- trimmed.pop(0)
934
- # Add one newline back
935
- trimmed.insert(0, "")
936
- # Return a single string:
937
- return "\n ".join(trimmed)
885
+ def _fail_simulation(self, msg: str) -> None:
886
+ self._sim_failure = Error(SimFailure(msg))
887
+ self._running_test.abort(self._sim_failure)
888
+ cocotb._scheduler_inst._event_loop()