cocotb 2.0.0rc2__cp39-cp39-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cocotb might be problematic. Click here for more details.
- cocotb/_ANSI.py +65 -0
- cocotb/__init__.py +125 -0
- cocotb/_base_triggers.py +515 -0
- cocotb/_bridge.py +186 -0
- cocotb/_decorators.py +515 -0
- cocotb/_deprecation.py +36 -0
- cocotb/_exceptions.py +7 -0
- cocotb/_extended_awaitables.py +419 -0
- cocotb/_gpi_triggers.py +385 -0
- cocotb/_init.py +301 -0
- cocotb/_outcomes.py +54 -0
- cocotb/_profiling.py +46 -0
- cocotb/_py_compat.py +148 -0
- cocotb/_scheduler.py +448 -0
- cocotb/_test.py +248 -0
- cocotb/_test_factory.py +312 -0
- cocotb/_test_functions.py +42 -0
- cocotb/_typing.py +7 -0
- cocotb/_utils.py +274 -0
- cocotb/_version.py +4 -0
- cocotb/_xunit_reporter.py +66 -0
- cocotb/clock.py +419 -0
- cocotb/debug.py +24 -0
- cocotb/handle.py +1752 -0
- cocotb/libs/libcocotb.so +0 -0
- cocotb/libs/libcocotbfli_modelsim.so +0 -0
- cocotb/libs/libcocotbutils.so +0 -0
- cocotb/libs/libcocotbvhpi_aldec.so +0 -0
- cocotb/libs/libcocotbvhpi_ius.so +0 -0
- cocotb/libs/libcocotbvhpi_modelsim.so +0 -0
- cocotb/libs/libcocotbvhpi_nvc.so +0 -0
- cocotb/libs/libcocotbvpi_aldec.so +0 -0
- cocotb/libs/libcocotbvpi_dsim.so +0 -0
- cocotb/libs/libcocotbvpi_ghdl.so +0 -0
- cocotb/libs/libcocotbvpi_icarus.vpl +0 -0
- cocotb/libs/libcocotbvpi_ius.so +0 -0
- cocotb/libs/libcocotbvpi_modelsim.so +0 -0
- cocotb/libs/libcocotbvpi_vcs.so +0 -0
- cocotb/libs/libcocotbvpi_verilator.so +0 -0
- cocotb/libs/libembed.so +0 -0
- cocotb/libs/libgpi.so +0 -0
- cocotb/libs/libgpilog.so +0 -0
- cocotb/libs/libpygpilog.so +0 -0
- cocotb/logging.py +424 -0
- cocotb/py.typed +0 -0
- cocotb/queue.py +225 -0
- cocotb/regression.py +896 -0
- cocotb/result.py +38 -0
- cocotb/share/def/.gitignore +2 -0
- cocotb/share/def/README.md +4 -0
- cocotb/share/def/aldec.def +61 -0
- cocotb/share/def/ghdl.def +43 -0
- cocotb/share/def/icarus.def +43 -0
- cocotb/share/def/modelsim.def +138 -0
- cocotb/share/include/cocotb_utils.h +70 -0
- cocotb/share/include/embed.h +33 -0
- cocotb/share/include/exports.h +20 -0
- cocotb/share/include/gpi.h +459 -0
- cocotb/share/include/gpi_logging.h +291 -0
- cocotb/share/include/py_gpi_logging.h +33 -0
- cocotb/share/include/vhpi_user_ext.h +26 -0
- cocotb/share/include/vpi_user_ext.h +33 -0
- cocotb/share/lib/verilator/verilator.cpp +209 -0
- cocotb/simtime.py +230 -0
- cocotb/simulator.cpython-39-darwin.so +0 -0
- cocotb/simulator.pyi +107 -0
- cocotb/task.py +590 -0
- cocotb/triggers.py +67 -0
- cocotb/types/__init__.py +31 -0
- cocotb/types/_abstract_array.py +151 -0
- cocotb/types/_array.py +295 -0
- cocotb/types/_indexing.py +17 -0
- cocotb/types/_logic.py +333 -0
- cocotb/types/_logic_array.py +868 -0
- cocotb/types/_range.py +197 -0
- cocotb/types/_resolve.py +76 -0
- cocotb/utils.py +110 -0
- cocotb-2.0.0rc2.dist-info/METADATA +60 -0
- cocotb-2.0.0rc2.dist-info/RECORD +115 -0
- cocotb-2.0.0rc2.dist-info/WHEEL +5 -0
- cocotb-2.0.0rc2.dist-info/entry_points.txt +2 -0
- cocotb-2.0.0rc2.dist-info/licenses/LICENSE +29 -0
- cocotb-2.0.0rc2.dist-info/top_level.txt +23 -0
- cocotb_tools/__init__.py +0 -0
- cocotb_tools/_coverage.py +33 -0
- cocotb_tools/_vendor/__init__.py +3 -0
- cocotb_tools/_vendor/distutils_version.py +346 -0
- cocotb_tools/check_results.py +65 -0
- cocotb_tools/combine_results.py +152 -0
- cocotb_tools/config.py +241 -0
- cocotb_tools/ipython_support.py +99 -0
- cocotb_tools/makefiles/Makefile.deprecations +27 -0
- cocotb_tools/makefiles/Makefile.inc +198 -0
- cocotb_tools/makefiles/Makefile.sim +96 -0
- cocotb_tools/makefiles/simulators/Makefile.activehdl +72 -0
- cocotb_tools/makefiles/simulators/Makefile.cvc +61 -0
- cocotb_tools/makefiles/simulators/Makefile.dsim +39 -0
- cocotb_tools/makefiles/simulators/Makefile.ghdl +84 -0
- cocotb_tools/makefiles/simulators/Makefile.icarus +80 -0
- cocotb_tools/makefiles/simulators/Makefile.ius +93 -0
- cocotb_tools/makefiles/simulators/Makefile.modelsim +9 -0
- cocotb_tools/makefiles/simulators/Makefile.nvc +60 -0
- cocotb_tools/makefiles/simulators/Makefile.questa +29 -0
- cocotb_tools/makefiles/simulators/Makefile.questa-compat +143 -0
- cocotb_tools/makefiles/simulators/Makefile.questa-qisqrun +149 -0
- cocotb_tools/makefiles/simulators/Makefile.riviera +144 -0
- cocotb_tools/makefiles/simulators/Makefile.vcs +65 -0
- cocotb_tools/makefiles/simulators/Makefile.verilator +79 -0
- cocotb_tools/makefiles/simulators/Makefile.xcelium +104 -0
- cocotb_tools/py.typed +0 -0
- cocotb_tools/runner.py +1868 -0
- cocotb_tools/sim_versions.py +140 -0
- pygpi/__init__.py +0 -0
- pygpi/entry.py +42 -0
- pygpi/py.typed +0 -0
cocotb/regression.py
ADDED
|
@@ -0,0 +1,896 @@
|
|
|
1
|
+
# Copyright cocotb contributors
|
|
2
|
+
# Copyright (c) 2013, 2018 Potential Ventures Ltd
|
|
3
|
+
# Copyright (c) 2013 SolarFlare Communications Inc
|
|
4
|
+
# Licensed under the Revised BSD License, see LICENSE for details.
|
|
5
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
6
|
+
|
|
7
|
+
"""All things relating to regression capabilities."""
|
|
8
|
+
|
|
9
|
+
import functools
|
|
10
|
+
import hashlib
|
|
11
|
+
import inspect
|
|
12
|
+
import logging
|
|
13
|
+
import os
|
|
14
|
+
import random
|
|
15
|
+
import re
|
|
16
|
+
import time
|
|
17
|
+
import warnings
|
|
18
|
+
from enum import auto
|
|
19
|
+
from importlib import import_module
|
|
20
|
+
from typing import (
|
|
21
|
+
TYPE_CHECKING,
|
|
22
|
+
Callable,
|
|
23
|
+
Coroutine,
|
|
24
|
+
List,
|
|
25
|
+
Union,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
import cocotb
|
|
29
|
+
import cocotb._gpi_triggers
|
|
30
|
+
import cocotb.handle
|
|
31
|
+
from cocotb import logging as cocotb_logging
|
|
32
|
+
from cocotb import simulator
|
|
33
|
+
from cocotb._decorators import Parameterized, Test
|
|
34
|
+
from cocotb._extended_awaitables import with_timeout
|
|
35
|
+
from cocotb._gpi_triggers import GPITrigger, Timer
|
|
36
|
+
from cocotb._outcomes import Error, Outcome
|
|
37
|
+
from cocotb._test import RunningTest
|
|
38
|
+
from cocotb._test_factory import TestFactory
|
|
39
|
+
from cocotb._test_functions import Failed
|
|
40
|
+
from cocotb._utils import (
|
|
41
|
+
DocEnum,
|
|
42
|
+
remove_traceback_frames,
|
|
43
|
+
safe_divide,
|
|
44
|
+
)
|
|
45
|
+
from cocotb._xunit_reporter import XUnitReporter
|
|
46
|
+
from cocotb.logging import ANSI
|
|
47
|
+
from cocotb.simtime import get_sim_time
|
|
48
|
+
from cocotb.task import Task
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from cocotb._base_triggers import Trigger
|
|
52
|
+
|
|
53
|
+
__all__ = (
|
|
54
|
+
"Parameterized",
|
|
55
|
+
"RegressionManager",
|
|
56
|
+
"RegressionMode",
|
|
57
|
+
"SimFailure",
|
|
58
|
+
"Test",
|
|
59
|
+
"TestFactory",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Set __module__ on re-exports
|
|
63
|
+
Parameterized.__module__ = __name__
|
|
64
|
+
Test.__module__ = __name__
|
|
65
|
+
TestFactory.__module__ = __name__
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SimFailure(BaseException):
|
|
69
|
+
"""A Test failure due to simulator failure.
|
|
70
|
+
|
|
71
|
+
.. caution::
|
|
72
|
+
Not to be raised or caught within a test.
|
|
73
|
+
Only used for marking expected failure with ``expect_error`` in :func:`cocotb.test`.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
_logger = logging.getLogger(__name__)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _format_doc(docstring: Union[str, None]) -> str:
|
|
81
|
+
if docstring is None:
|
|
82
|
+
return ""
|
|
83
|
+
else:
|
|
84
|
+
brief = docstring.split("\n")[0]
|
|
85
|
+
return f"\n {brief}"
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class RegressionMode(DocEnum):
|
|
89
|
+
"""The mode of the :class:`RegressionManager`."""
|
|
90
|
+
|
|
91
|
+
REGRESSION = (
|
|
92
|
+
auto(),
|
|
93
|
+
"""Tests are run if included. Skipped tests are skipped, expected failures and errors are respected.""",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
TESTCASE = (
|
|
97
|
+
auto(),
|
|
98
|
+
"""Like :attr:`REGRESSION`, but skipped tests are *not* skipped if included.""",
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class _TestResults:
|
|
103
|
+
# TODO Replace with dataclass in Python 3.7+
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
test_fullname: str,
|
|
108
|
+
passed: Union[None, bool],
|
|
109
|
+
wall_time_s: float,
|
|
110
|
+
sim_time_ns: float,
|
|
111
|
+
) -> None:
|
|
112
|
+
self.test_fullname = test_fullname
|
|
113
|
+
self.passed = passed
|
|
114
|
+
self.wall_time_s = wall_time_s
|
|
115
|
+
self.sim_time_ns = sim_time_ns
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def ratio(self) -> float:
|
|
119
|
+
return safe_divide(self.sim_time_ns, self.wall_time_s)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class RegressionManager:
|
|
123
|
+
"""Object which manages tests.
|
|
124
|
+
|
|
125
|
+
This object uses the builder pattern to build up a regression.
|
|
126
|
+
Tests are added using :meth:`register_test` or :meth:`discover_tests`.
|
|
127
|
+
Inclusion filters for tests can be added using :meth:`add_filters`.
|
|
128
|
+
The "mode" of the regression can be controlled using :meth:`set_mode`.
|
|
129
|
+
These methods can be called in any order any number of times before :meth:`start_regression` is called,
|
|
130
|
+
and should not be called again after that.
|
|
131
|
+
|
|
132
|
+
Once all the tests, filters, and regression behavior configuration is done,
|
|
133
|
+
the user starts the regression with :meth:`start_regression`.
|
|
134
|
+
This method must be called exactly once.
|
|
135
|
+
|
|
136
|
+
Until the regression is started, :attr:`total_tests`, :attr:`count`, :attr:`passed`,
|
|
137
|
+
:attr:`skipped`, and :attr:`failures` hold placeholder values.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
COLOR_TEST = ANSI.BLUE_FG
|
|
141
|
+
COLOR_PASSED = ANSI.GREEN_FG
|
|
142
|
+
COLOR_SKIPPED = ANSI.YELLOW_FG
|
|
143
|
+
COLOR_FAILED = ANSI.RED_FG
|
|
144
|
+
|
|
145
|
+
_timer1 = Timer(1)
|
|
146
|
+
|
|
147
|
+
def __init__(self) -> None:
|
|
148
|
+
self._test: Test
|
|
149
|
+
self._running_test: RunningTest
|
|
150
|
+
self.log = _logger
|
|
151
|
+
self._regression_start_time: float
|
|
152
|
+
self._test_results: List[_TestResults] = []
|
|
153
|
+
self.total_tests = 0
|
|
154
|
+
"""Total number of tests that will be run or skipped."""
|
|
155
|
+
self.count = 0
|
|
156
|
+
"""The current test count."""
|
|
157
|
+
self.passed = 0
|
|
158
|
+
"""The current number of passed tests."""
|
|
159
|
+
self.skipped = 0
|
|
160
|
+
"""The current number of skipped tests."""
|
|
161
|
+
self.failures = 0
|
|
162
|
+
"""The current number of failed tests."""
|
|
163
|
+
self._tearing_down = False
|
|
164
|
+
self._test_queue: List[Test] = []
|
|
165
|
+
self._filters: List[re.Pattern[str]] = []
|
|
166
|
+
self._mode = RegressionMode.REGRESSION
|
|
167
|
+
self._included: List[bool]
|
|
168
|
+
self._sim_failure: Union[Error[None], None] = None
|
|
169
|
+
|
|
170
|
+
# Setup XUnit
|
|
171
|
+
###################
|
|
172
|
+
|
|
173
|
+
results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
|
|
174
|
+
suite_name = os.getenv("COCOTB_RESULT_TESTSUITE", "all")
|
|
175
|
+
package_name = os.getenv("COCOTB_RESULT_TESTPACKAGE", "all")
|
|
176
|
+
|
|
177
|
+
self.xunit = XUnitReporter(filename=results_filename)
|
|
178
|
+
self.xunit.add_testsuite(name=suite_name, package=package_name)
|
|
179
|
+
self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))
|
|
180
|
+
|
|
181
|
+
def discover_tests(self, *modules: str) -> None:
|
|
182
|
+
"""Discover tests in files automatically.
|
|
183
|
+
|
|
184
|
+
Should be called before :meth:`start_regression` is called.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
modules: Each argument given is the name of a module where tests are found.
|
|
188
|
+
"""
|
|
189
|
+
for module_name in modules:
|
|
190
|
+
mod = import_module(module_name)
|
|
191
|
+
|
|
192
|
+
found_test: bool = False
|
|
193
|
+
for obj_name, obj in vars(mod).items():
|
|
194
|
+
if isinstance(obj, Test):
|
|
195
|
+
found_test = True
|
|
196
|
+
self.register_test(obj)
|
|
197
|
+
elif isinstance(obj, Parameterized):
|
|
198
|
+
found_test = True
|
|
199
|
+
generated_tests: bool = False
|
|
200
|
+
for test in obj.generate_tests():
|
|
201
|
+
generated_tests = True
|
|
202
|
+
self.register_test(test)
|
|
203
|
+
if not generated_tests:
|
|
204
|
+
warnings.warn(
|
|
205
|
+
f"Parametrize object generated no tests: {module_name}.{obj_name}",
|
|
206
|
+
stacklevel=2,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
if not found_test:
|
|
210
|
+
warnings.warn(
|
|
211
|
+
f"No tests were discovered in module: {module_name}", stacklevel=2
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# error if no tests were discovered
|
|
215
|
+
if not self._test_queue:
|
|
216
|
+
modules_str = ", ".join(repr(m) for m in modules)
|
|
217
|
+
raise RuntimeError(f"No tests were discovered in any module: {modules_str}")
|
|
218
|
+
|
|
219
|
+
def add_filters(self, *filters: str) -> None:
|
|
220
|
+
"""Add regular expressions to filter-in registered tests.
|
|
221
|
+
|
|
222
|
+
Only those tests which match at least one of the given filters are included;
|
|
223
|
+
the rest are excluded.
|
|
224
|
+
|
|
225
|
+
Should be called before :meth:`start_regression` is called.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
filters: Each argument given is a regex pattern for test names.
|
|
229
|
+
A match *includes* the test.
|
|
230
|
+
"""
|
|
231
|
+
for filter in filters:
|
|
232
|
+
compiled_filter = re.compile(filter)
|
|
233
|
+
self._filters.append(compiled_filter)
|
|
234
|
+
|
|
235
|
+
def set_mode(self, mode: RegressionMode) -> None:
|
|
236
|
+
"""Set the regression mode.
|
|
237
|
+
|
|
238
|
+
See :class:`RegressionMode` for more details on how each mode affects :class:`RegressionManager` behavior.
|
|
239
|
+
Should be called before :meth:`start_regression` is called.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
mode: The regression mode to set.
|
|
243
|
+
"""
|
|
244
|
+
self._mode = mode
|
|
245
|
+
|
|
246
|
+
def register_test(self, test: Test) -> None:
|
|
247
|
+
"""Register a test with the :class:`RegressionManager`.
|
|
248
|
+
|
|
249
|
+
Should be called before :meth:`start_regression` is called.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
test: The test object to register.
|
|
253
|
+
"""
|
|
254
|
+
self.log.debug("Registered test %r", test.fullname)
|
|
255
|
+
self._test_queue.append(test)
|
|
256
|
+
|
|
257
|
+
@classmethod
|
|
258
|
+
def setup_pytest_assertion_rewriting(cls) -> None:
|
|
259
|
+
"""Configure pytest to rewrite assertions for better failure messages.
|
|
260
|
+
|
|
261
|
+
Must be called before all modules containing tests are imported.
|
|
262
|
+
"""
|
|
263
|
+
try:
|
|
264
|
+
import pytest # noqa: PLC0415
|
|
265
|
+
except ImportError:
|
|
266
|
+
_logger.info(
|
|
267
|
+
"pytest not found, install it to enable better AssertionError messages"
|
|
268
|
+
)
|
|
269
|
+
return
|
|
270
|
+
try:
|
|
271
|
+
# Install the assertion rewriting hook, which must be done before we
|
|
272
|
+
# import the test modules.
|
|
273
|
+
from _pytest.assertion import install_importhook # noqa: PLC0415
|
|
274
|
+
from _pytest.config import Config # noqa: PLC0415
|
|
275
|
+
|
|
276
|
+
python_files = os.getenv("COCOTB_REWRITE_ASSERTION_FILES", "*.py").strip()
|
|
277
|
+
if not python_files:
|
|
278
|
+
# Even running the hook causes exceptions in some cases, so if the user
|
|
279
|
+
# selects nothing, don't install the hook at all.
|
|
280
|
+
return
|
|
281
|
+
|
|
282
|
+
pytest_conf = Config.fromdictargs(
|
|
283
|
+
{}, ["--capture=no", "-o", f"python_files={python_files}"]
|
|
284
|
+
)
|
|
285
|
+
install_importhook(pytest_conf)
|
|
286
|
+
except Exception:
|
|
287
|
+
_logger.exception(
|
|
288
|
+
"Configuring the assertion rewrite hook using pytest %s failed. "
|
|
289
|
+
"Please file a bug report!",
|
|
290
|
+
pytest.__version__,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
def start_regression(self) -> None:
|
|
294
|
+
"""Start the regression."""
|
|
295
|
+
|
|
296
|
+
# sort tests into stages
|
|
297
|
+
self._test_queue.sort(key=lambda test: test.stage)
|
|
298
|
+
|
|
299
|
+
# mark tests for running
|
|
300
|
+
if self._filters:
|
|
301
|
+
self._included = [False] * len(self._test_queue)
|
|
302
|
+
for i, test in enumerate(self._test_queue):
|
|
303
|
+
for filter in self._filters:
|
|
304
|
+
if filter.search(test.fullname):
|
|
305
|
+
self._included[i] = True
|
|
306
|
+
else:
|
|
307
|
+
self._included = [True] * len(self._test_queue)
|
|
308
|
+
|
|
309
|
+
# compute counts
|
|
310
|
+
self.count = 1
|
|
311
|
+
self.total_tests = sum(self._included)
|
|
312
|
+
if self.total_tests == 0:
|
|
313
|
+
self.log.warning(
|
|
314
|
+
"No tests left after filtering with: %s",
|
|
315
|
+
", ".join(f.pattern for f in self._filters),
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# start write scheduler
|
|
319
|
+
cocotb.handle._start_write_scheduler()
|
|
320
|
+
|
|
321
|
+
# start test loop
|
|
322
|
+
self._regression_start_time = time.time()
|
|
323
|
+
self._first_test = True
|
|
324
|
+
self._execute()
|
|
325
|
+
|
|
326
|
+
def _execute(self) -> None:
|
|
327
|
+
"""Run the main regression loop.
|
|
328
|
+
|
|
329
|
+
Used by :meth:`start_regression` and :meth:`_test_complete` to continue to the main test running loop,
|
|
330
|
+
and by :meth:`_fail_regression` to shutdown the regression when a simulation failure occurs.
|
|
331
|
+
"""
|
|
332
|
+
|
|
333
|
+
while self._test_queue:
|
|
334
|
+
self._test = self._test_queue.pop(0)
|
|
335
|
+
included = self._included.pop(0)
|
|
336
|
+
|
|
337
|
+
# if the test is not included, record and continue
|
|
338
|
+
if not included:
|
|
339
|
+
self._record_test_excluded()
|
|
340
|
+
continue
|
|
341
|
+
|
|
342
|
+
# if the test is skipped, record and continue
|
|
343
|
+
if self._test.skip and self._mode != RegressionMode.TESTCASE:
|
|
344
|
+
self._record_test_skipped()
|
|
345
|
+
continue
|
|
346
|
+
|
|
347
|
+
# if the test should be run, but the simulator has failed, record and continue
|
|
348
|
+
if self._sim_failure is not None:
|
|
349
|
+
self._score_test(
|
|
350
|
+
self._sim_failure,
|
|
351
|
+
0,
|
|
352
|
+
0,
|
|
353
|
+
)
|
|
354
|
+
continue
|
|
355
|
+
|
|
356
|
+
# initialize the test, if it fails, record and continue
|
|
357
|
+
try:
|
|
358
|
+
self._running_test = self._init_test()
|
|
359
|
+
except Exception:
|
|
360
|
+
self._record_test_init_failed()
|
|
361
|
+
continue
|
|
362
|
+
|
|
363
|
+
self._log_test_start()
|
|
364
|
+
|
|
365
|
+
if self._first_test:
|
|
366
|
+
self._first_test = False
|
|
367
|
+
return self._schedule_next_test()
|
|
368
|
+
else:
|
|
369
|
+
return self._timer1._prime(self._schedule_next_test)
|
|
370
|
+
|
|
371
|
+
return self._tear_down()
|
|
372
|
+
|
|
373
|
+
def _init_test(self) -> RunningTest:
|
|
374
|
+
# wrap test function in timeout
|
|
375
|
+
func: Callable[..., Coroutine[Trigger, None, None]]
|
|
376
|
+
timeout = self._test.timeout_time
|
|
377
|
+
if timeout is not None:
|
|
378
|
+
f = self._test.func
|
|
379
|
+
|
|
380
|
+
@functools.wraps(f)
|
|
381
|
+
async def func(*args: object, **kwargs: object) -> None:
|
|
382
|
+
await with_timeout(f(*args, **kwargs), timeout, self._test.timeout_unit)
|
|
383
|
+
else:
|
|
384
|
+
func = self._test.func
|
|
385
|
+
|
|
386
|
+
main_task = Task(func(cocotb.top), name=f"Test {self._test.name}")
|
|
387
|
+
return RunningTest(self._test_complete, main_task)
|
|
388
|
+
|
|
389
|
+
def _schedule_next_test(self, trigger: Union[GPITrigger, None] = None) -> None:
|
|
390
|
+
if trigger is not None:
|
|
391
|
+
# TODO move to Trigger object
|
|
392
|
+
cocotb._gpi_triggers._current_gpi_trigger = trigger
|
|
393
|
+
trigger._cleanup()
|
|
394
|
+
|
|
395
|
+
# seed random number generator based on test module, name, and COCOTB_RANDOM_SEED
|
|
396
|
+
hasher = hashlib.sha1()
|
|
397
|
+
hasher.update(self._test.fullname.encode())
|
|
398
|
+
seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
|
|
399
|
+
random.seed(seed)
|
|
400
|
+
|
|
401
|
+
self._start_sim_time = get_sim_time("ns")
|
|
402
|
+
self._start_time = time.time()
|
|
403
|
+
|
|
404
|
+
self._running_test.start()
|
|
405
|
+
|
|
406
|
+
def _tear_down(self) -> None:
|
|
407
|
+
"""Called by :meth:`_execute` when there are no more tests to run to finalize the regression."""
|
|
408
|
+
# prevent re-entering the tear down procedure
|
|
409
|
+
if not self._tearing_down:
|
|
410
|
+
self._tearing_down = True
|
|
411
|
+
else:
|
|
412
|
+
return
|
|
413
|
+
|
|
414
|
+
assert not self._test_queue
|
|
415
|
+
|
|
416
|
+
# stop the write scheduler
|
|
417
|
+
cocotb.handle._stop_write_scheduler()
|
|
418
|
+
|
|
419
|
+
# Write out final log messages
|
|
420
|
+
self._log_test_summary()
|
|
421
|
+
|
|
422
|
+
# Generate output reports
|
|
423
|
+
self.xunit.write()
|
|
424
|
+
|
|
425
|
+
# TODO refactor initialization and finalization into their own module
|
|
426
|
+
# to prevent circular imports requiring local imports
|
|
427
|
+
from cocotb._init import _shutdown_testbench # noqa: PLC0415
|
|
428
|
+
|
|
429
|
+
_shutdown_testbench()
|
|
430
|
+
|
|
431
|
+
# Setup simulator finalization
|
|
432
|
+
simulator.stop_simulator()
|
|
433
|
+
|
|
434
|
+
def _test_complete(self) -> None:
|
|
435
|
+
"""Callback given to the test to be called when the test finished."""
|
|
436
|
+
|
|
437
|
+
# compute wall time
|
|
438
|
+
wall_time = time.time() - self._start_time
|
|
439
|
+
sim_time_ns = get_sim_time("ns") - self._start_sim_time
|
|
440
|
+
|
|
441
|
+
# Judge and record pass/fail.
|
|
442
|
+
self._score_test(
|
|
443
|
+
self._running_test.result(),
|
|
444
|
+
wall_time,
|
|
445
|
+
sim_time_ns,
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
# Run next test.
|
|
449
|
+
return self._execute()
|
|
450
|
+
|
|
451
|
+
def _score_test(
|
|
452
|
+
self,
|
|
453
|
+
outcome: Outcome[None],
|
|
454
|
+
wall_time_s: float,
|
|
455
|
+
sim_time_ns: float,
|
|
456
|
+
) -> None:
|
|
457
|
+
test = self._test
|
|
458
|
+
|
|
459
|
+
# score test
|
|
460
|
+
passed: bool
|
|
461
|
+
msg: Union[str, None]
|
|
462
|
+
exc: Union[BaseException, None]
|
|
463
|
+
try:
|
|
464
|
+
outcome.get()
|
|
465
|
+
except BaseException as e:
|
|
466
|
+
passed, msg = False, None
|
|
467
|
+
exc = remove_traceback_frames(e, ["_score_test", "get"])
|
|
468
|
+
else:
|
|
469
|
+
passed, msg, exc = True, None, None
|
|
470
|
+
|
|
471
|
+
if passed:
|
|
472
|
+
if test.expect_error:
|
|
473
|
+
self._record_test_failed(
|
|
474
|
+
wall_time_s=wall_time_s,
|
|
475
|
+
sim_time_ns=sim_time_ns,
|
|
476
|
+
result=exc,
|
|
477
|
+
msg="passed but we expected an error",
|
|
478
|
+
)
|
|
479
|
+
passed = False
|
|
480
|
+
|
|
481
|
+
elif test.expect_fail:
|
|
482
|
+
self._record_test_failed(
|
|
483
|
+
wall_time_s=wall_time_s,
|
|
484
|
+
sim_time_ns=sim_time_ns,
|
|
485
|
+
result=exc,
|
|
486
|
+
msg="passed but we expected a failure",
|
|
487
|
+
)
|
|
488
|
+
passed = False
|
|
489
|
+
|
|
490
|
+
else:
|
|
491
|
+
self._record_test_passed(
|
|
492
|
+
wall_time_s=wall_time_s,
|
|
493
|
+
sim_time_ns=sim_time_ns,
|
|
494
|
+
result=None,
|
|
495
|
+
msg=msg,
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
elif test.expect_fail:
|
|
499
|
+
if isinstance(exc, (AssertionError, Failed)):
|
|
500
|
+
self._record_test_passed(
|
|
501
|
+
wall_time_s=wall_time_s,
|
|
502
|
+
sim_time_ns=sim_time_ns,
|
|
503
|
+
result=None,
|
|
504
|
+
msg="failed as expected",
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
else:
|
|
508
|
+
self._record_test_failed(
|
|
509
|
+
wall_time_s=wall_time_s,
|
|
510
|
+
sim_time_ns=sim_time_ns,
|
|
511
|
+
result=exc,
|
|
512
|
+
msg="expected failure, but errored with unexpected type",
|
|
513
|
+
)
|
|
514
|
+
passed = False
|
|
515
|
+
|
|
516
|
+
elif test.expect_error:
|
|
517
|
+
if isinstance(exc, test.expect_error):
|
|
518
|
+
self._record_test_passed(
|
|
519
|
+
wall_time_s=wall_time_s,
|
|
520
|
+
sim_time_ns=sim_time_ns,
|
|
521
|
+
result=None,
|
|
522
|
+
msg="errored as expected",
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
else:
|
|
526
|
+
self._record_test_failed(
|
|
527
|
+
wall_time_s=wall_time_s,
|
|
528
|
+
sim_time_ns=sim_time_ns,
|
|
529
|
+
result=exc,
|
|
530
|
+
msg="errored with unexpected type",
|
|
531
|
+
)
|
|
532
|
+
passed = False
|
|
533
|
+
|
|
534
|
+
else:
|
|
535
|
+
self._record_test_failed(
|
|
536
|
+
wall_time_s=wall_time_s,
|
|
537
|
+
sim_time_ns=sim_time_ns,
|
|
538
|
+
result=exc,
|
|
539
|
+
msg=msg,
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
def _get_lineno(self, test: Test) -> int:
|
|
543
|
+
try:
|
|
544
|
+
return inspect.getsourcelines(test.func)[1]
|
|
545
|
+
except OSError:
|
|
546
|
+
return 1
|
|
547
|
+
|
|
548
|
+
def _log_test_start(self) -> None:
|
|
549
|
+
"""Called by :meth:`_execute` to log that a test is starting."""
|
|
550
|
+
hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_TEST
|
|
551
|
+
hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
552
|
+
self.log.info(
|
|
553
|
+
"%srunning%s %s (%d/%d)%s",
|
|
554
|
+
hilight_start,
|
|
555
|
+
hilight_end,
|
|
556
|
+
self._test.fullname,
|
|
557
|
+
self.count,
|
|
558
|
+
self.total_tests,
|
|
559
|
+
_format_doc(self._test.doc),
|
|
560
|
+
)
|
|
561
|
+
|
|
562
|
+
def _record_test_excluded(self) -> None:
|
|
563
|
+
"""Called by :meth:`_execute` when a test is excluded by filters."""
|
|
564
|
+
|
|
565
|
+
# write out xunit results
|
|
566
|
+
lineno = self._get_lineno(self._test)
|
|
567
|
+
self.xunit.add_testcase(
|
|
568
|
+
name=self._test.name,
|
|
569
|
+
classname=self._test.module,
|
|
570
|
+
file=inspect.getfile(self._test.func),
|
|
571
|
+
lineno=repr(lineno),
|
|
572
|
+
time=repr(0),
|
|
573
|
+
sim_time_ns=repr(0),
|
|
574
|
+
ratio_time=repr(0),
|
|
575
|
+
)
|
|
576
|
+
self.xunit.add_skipped()
|
|
577
|
+
|
|
578
|
+
# do not log anything, nor save details for the summary
|
|
579
|
+
|
|
580
|
+
def _record_test_skipped(self) -> None:
|
|
581
|
+
"""Called by :meth:`_execute` when a test is skipped."""
|
|
582
|
+
|
|
583
|
+
# log test results
|
|
584
|
+
hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_SKIPPED
|
|
585
|
+
hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
586
|
+
self.log.info(
|
|
587
|
+
"%sskipping%s %s (%d/%d)%s",
|
|
588
|
+
hilight_start,
|
|
589
|
+
hilight_end,
|
|
590
|
+
self._test.fullname,
|
|
591
|
+
self.count,
|
|
592
|
+
self.total_tests,
|
|
593
|
+
_format_doc(self._test.doc),
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
# write out xunit results
|
|
597
|
+
lineno = self._get_lineno(self._test)
|
|
598
|
+
self.xunit.add_testcase(
|
|
599
|
+
name=self._test.name,
|
|
600
|
+
classname=self._test.module,
|
|
601
|
+
file=inspect.getfile(self._test.func),
|
|
602
|
+
lineno=repr(lineno),
|
|
603
|
+
time=repr(0),
|
|
604
|
+
sim_time_ns=repr(0),
|
|
605
|
+
ratio_time=repr(0),
|
|
606
|
+
)
|
|
607
|
+
self.xunit.add_skipped()
|
|
608
|
+
|
|
609
|
+
# save details for summary
|
|
610
|
+
self._test_results.append(
|
|
611
|
+
_TestResults(
|
|
612
|
+
test_fullname=self._test.fullname,
|
|
613
|
+
passed=None,
|
|
614
|
+
sim_time_ns=0,
|
|
615
|
+
wall_time_s=0,
|
|
616
|
+
)
|
|
617
|
+
)
|
|
618
|
+
|
|
619
|
+
# update running passed/failed/skipped counts
|
|
620
|
+
self.skipped += 1
|
|
621
|
+
self.count += 1
|
|
622
|
+
|
|
623
|
+
def _record_test_init_failed(self) -> None:
|
|
624
|
+
"""Called by :meth:`_execute` when a test initialization fails."""
|
|
625
|
+
|
|
626
|
+
# log test results
|
|
627
|
+
hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_FAILED
|
|
628
|
+
hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
629
|
+
self.log.exception(
|
|
630
|
+
"%sFailed to initialize%s %s! (%d/%d)%s",
|
|
631
|
+
hilight_start,
|
|
632
|
+
hilight_end,
|
|
633
|
+
self._test.fullname,
|
|
634
|
+
self.count,
|
|
635
|
+
self.total_tests,
|
|
636
|
+
_format_doc(self._test.doc),
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
# write out xunit results
|
|
640
|
+
lineno = self._get_lineno(self._test)
|
|
641
|
+
self.xunit.add_testcase(
|
|
642
|
+
name=self._test.name,
|
|
643
|
+
classname=self._test.module,
|
|
644
|
+
file=inspect.getfile(self._test.func),
|
|
645
|
+
lineno=repr(lineno),
|
|
646
|
+
time=repr(0),
|
|
647
|
+
sim_time_ns=repr(0),
|
|
648
|
+
ratio_time=repr(0),
|
|
649
|
+
)
|
|
650
|
+
self.xunit.add_failure(msg="Test initialization failed")
|
|
651
|
+
|
|
652
|
+
# save details for summary
|
|
653
|
+
self._test_results.append(
|
|
654
|
+
_TestResults(
|
|
655
|
+
test_fullname=self._test.fullname,
|
|
656
|
+
passed=False,
|
|
657
|
+
sim_time_ns=0,
|
|
658
|
+
wall_time_s=0,
|
|
659
|
+
)
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
# update running passed/failed/skipped counts
|
|
663
|
+
self.failures += 1
|
|
664
|
+
self.count += 1
|
|
665
|
+
|
|
666
|
+
def _record_test_passed(
|
|
667
|
+
self,
|
|
668
|
+
wall_time_s: float,
|
|
669
|
+
sim_time_ns: float,
|
|
670
|
+
result: Union[Exception, None],
|
|
671
|
+
msg: Union[str, None],
|
|
672
|
+
) -> None:
|
|
673
|
+
start_hilight = "" if cocotb_logging.strip_ansi else self.COLOR_PASSED
|
|
674
|
+
stop_hilight = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
675
|
+
if msg is None:
|
|
676
|
+
rest = ""
|
|
677
|
+
else:
|
|
678
|
+
rest = f": {msg}"
|
|
679
|
+
if result is None:
|
|
680
|
+
result_was = ""
|
|
681
|
+
else:
|
|
682
|
+
result_was = f" (result was {type(result).__qualname__})"
|
|
683
|
+
self.log.info(
|
|
684
|
+
"%s %spassed%s%s%s",
|
|
685
|
+
self._test.fullname,
|
|
686
|
+
start_hilight,
|
|
687
|
+
stop_hilight,
|
|
688
|
+
rest,
|
|
689
|
+
result_was,
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
# write out xunit results
|
|
693
|
+
ratio_time = safe_divide(sim_time_ns, wall_time_s)
|
|
694
|
+
lineno = self._get_lineno(self._test)
|
|
695
|
+
self.xunit.add_testcase(
|
|
696
|
+
name=self._test.name,
|
|
697
|
+
classname=self._test.module,
|
|
698
|
+
file=inspect.getfile(self._test.func),
|
|
699
|
+
lineno=repr(lineno),
|
|
700
|
+
time=repr(wall_time_s),
|
|
701
|
+
sim_time_ns=repr(sim_time_ns),
|
|
702
|
+
ratio_time=repr(ratio_time),
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
# update running passed/failed/skipped counts
|
|
706
|
+
self.passed += 1
|
|
707
|
+
self.count += 1
|
|
708
|
+
|
|
709
|
+
# save details for summary
|
|
710
|
+
self._test_results.append(
|
|
711
|
+
_TestResults(
|
|
712
|
+
test_fullname=self._test.fullname,
|
|
713
|
+
passed=True,
|
|
714
|
+
sim_time_ns=sim_time_ns,
|
|
715
|
+
wall_time_s=wall_time_s,
|
|
716
|
+
)
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
def _record_test_failed(
|
|
720
|
+
self,
|
|
721
|
+
wall_time_s: float,
|
|
722
|
+
sim_time_ns: float,
|
|
723
|
+
result: Union[BaseException, None],
|
|
724
|
+
msg: Union[str, None],
|
|
725
|
+
) -> None:
|
|
726
|
+
start_hilight = "" if cocotb_logging.strip_ansi else self.COLOR_FAILED
|
|
727
|
+
stop_hilight = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
728
|
+
if msg is None:
|
|
729
|
+
rest = ""
|
|
730
|
+
else:
|
|
731
|
+
rest = f": {msg}"
|
|
732
|
+
self.log.warning(
|
|
733
|
+
"%s%s %sfailed%s%s",
|
|
734
|
+
stop_hilight,
|
|
735
|
+
self._test.fullname,
|
|
736
|
+
start_hilight,
|
|
737
|
+
stop_hilight,
|
|
738
|
+
rest,
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
# write out xunit results
|
|
742
|
+
ratio_time = safe_divide(sim_time_ns, wall_time_s)
|
|
743
|
+
lineno = self._get_lineno(self._test)
|
|
744
|
+
self.xunit.add_testcase(
|
|
745
|
+
name=self._test.name,
|
|
746
|
+
classname=self._test.module,
|
|
747
|
+
file=inspect.getfile(self._test.func),
|
|
748
|
+
lineno=repr(lineno),
|
|
749
|
+
time=repr(wall_time_s),
|
|
750
|
+
sim_time_ns=repr(sim_time_ns),
|
|
751
|
+
ratio_time=repr(ratio_time),
|
|
752
|
+
)
|
|
753
|
+
self.xunit.add_failure(error_type=type(result).__name__, error_msg=str(result))
|
|
754
|
+
|
|
755
|
+
# update running passed/failed/skipped counts
|
|
756
|
+
self.failures += 1
|
|
757
|
+
self.count += 1
|
|
758
|
+
|
|
759
|
+
# save details for summary
|
|
760
|
+
self._test_results.append(
|
|
761
|
+
_TestResults(
|
|
762
|
+
test_fullname=self._test.fullname,
|
|
763
|
+
passed=False,
|
|
764
|
+
sim_time_ns=sim_time_ns,
|
|
765
|
+
wall_time_s=wall_time_s,
|
|
766
|
+
)
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
def _log_test_summary(self) -> None:
|
|
770
|
+
"""Called by :meth:`_tear_down` to log the test summary."""
|
|
771
|
+
real_time = time.time() - self._regression_start_time
|
|
772
|
+
sim_time_ns = get_sim_time("ns")
|
|
773
|
+
ratio_time = safe_divide(sim_time_ns, real_time)
|
|
774
|
+
|
|
775
|
+
if len(self._test_results) == 0:
|
|
776
|
+
return
|
|
777
|
+
|
|
778
|
+
TEST_FIELD = "TEST"
|
|
779
|
+
RESULT_FIELD = "STATUS"
|
|
780
|
+
SIM_FIELD = "SIM TIME (ns)"
|
|
781
|
+
REAL_FIELD = "REAL TIME (s)"
|
|
782
|
+
RATIO_FIELD = "RATIO (ns/s)"
|
|
783
|
+
TOTAL_NAME = f"TESTS={self.total_tests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
|
|
784
|
+
|
|
785
|
+
TEST_FIELD_LEN = max(
|
|
786
|
+
len(TEST_FIELD),
|
|
787
|
+
len(TOTAL_NAME),
|
|
788
|
+
len(max([x.test_fullname for x in self._test_results], key=len)),
|
|
789
|
+
)
|
|
790
|
+
RESULT_FIELD_LEN = len(RESULT_FIELD)
|
|
791
|
+
SIM_FIELD_LEN = len(SIM_FIELD)
|
|
792
|
+
REAL_FIELD_LEN = len(REAL_FIELD)
|
|
793
|
+
RATIO_FIELD_LEN = len(RATIO_FIELD)
|
|
794
|
+
|
|
795
|
+
header_dict = {
|
|
796
|
+
"a": TEST_FIELD,
|
|
797
|
+
"b": RESULT_FIELD,
|
|
798
|
+
"c": SIM_FIELD,
|
|
799
|
+
"d": REAL_FIELD,
|
|
800
|
+
"e": RATIO_FIELD,
|
|
801
|
+
"a_len": TEST_FIELD_LEN,
|
|
802
|
+
"b_len": RESULT_FIELD_LEN,
|
|
803
|
+
"c_len": SIM_FIELD_LEN,
|
|
804
|
+
"d_len": REAL_FIELD_LEN,
|
|
805
|
+
"e_len": RATIO_FIELD_LEN,
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
LINE_LEN = (
|
|
809
|
+
3
|
|
810
|
+
+ TEST_FIELD_LEN
|
|
811
|
+
+ 2
|
|
812
|
+
+ RESULT_FIELD_LEN
|
|
813
|
+
+ 2
|
|
814
|
+
+ SIM_FIELD_LEN
|
|
815
|
+
+ 2
|
|
816
|
+
+ REAL_FIELD_LEN
|
|
817
|
+
+ 2
|
|
818
|
+
+ RATIO_FIELD_LEN
|
|
819
|
+
+ 3
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
LINE_SEP = "*" * LINE_LEN + "\n"
|
|
823
|
+
|
|
824
|
+
summary = ""
|
|
825
|
+
summary += LINE_SEP
|
|
826
|
+
summary += "** {a:<{a_len}} {b:^{b_len}} {c:>{c_len}} {d:>{d_len}} {e:>{e_len}} **\n".format(
|
|
827
|
+
**header_dict
|
|
828
|
+
)
|
|
829
|
+
summary += LINE_SEP
|
|
830
|
+
|
|
831
|
+
test_line = "** {a:<{a_len}} {start}{b:^{b_len}}{end} {c:>{c_len}.2f} {d:>{d_len}.2f} {e:>{e_len}} **\n"
|
|
832
|
+
hilite: str
|
|
833
|
+
lolite: str
|
|
834
|
+
for result in self._test_results:
|
|
835
|
+
if result.passed is None:
|
|
836
|
+
ratio = "-.--"
|
|
837
|
+
pass_fail_str = "SKIP"
|
|
838
|
+
hilite = self.COLOR_SKIPPED
|
|
839
|
+
lolite = ANSI.DEFAULT
|
|
840
|
+
elif result.passed:
|
|
841
|
+
ratio = format(result.ratio, "0.2f")
|
|
842
|
+
pass_fail_str = "PASS"
|
|
843
|
+
hilite = self.COLOR_PASSED
|
|
844
|
+
lolite = ANSI.DEFAULT
|
|
845
|
+
else:
|
|
846
|
+
ratio = format(result.ratio, "0.2f")
|
|
847
|
+
pass_fail_str = "FAIL"
|
|
848
|
+
hilite = self.COLOR_FAILED
|
|
849
|
+
lolite = ANSI.DEFAULT
|
|
850
|
+
|
|
851
|
+
if cocotb_logging.strip_ansi:
|
|
852
|
+
hilite = ""
|
|
853
|
+
lolite = ""
|
|
854
|
+
|
|
855
|
+
test_dict = {
|
|
856
|
+
"a": result.test_fullname,
|
|
857
|
+
"b": pass_fail_str,
|
|
858
|
+
"c": result.sim_time_ns,
|
|
859
|
+
"d": result.wall_time_s,
|
|
860
|
+
"e": ratio,
|
|
861
|
+
"a_len": TEST_FIELD_LEN,
|
|
862
|
+
"b_len": RESULT_FIELD_LEN,
|
|
863
|
+
"c_len": SIM_FIELD_LEN - 1,
|
|
864
|
+
"d_len": REAL_FIELD_LEN - 1,
|
|
865
|
+
"e_len": RATIO_FIELD_LEN - 1,
|
|
866
|
+
"start": hilite,
|
|
867
|
+
"end": lolite,
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
summary += test_line.format(**test_dict)
|
|
871
|
+
|
|
872
|
+
summary += LINE_SEP
|
|
873
|
+
|
|
874
|
+
summary += test_line.format(
|
|
875
|
+
a=TOTAL_NAME,
|
|
876
|
+
b="",
|
|
877
|
+
c=sim_time_ns,
|
|
878
|
+
d=real_time,
|
|
879
|
+
e=format(ratio_time, "0.2f"),
|
|
880
|
+
a_len=TEST_FIELD_LEN,
|
|
881
|
+
b_len=RESULT_FIELD_LEN,
|
|
882
|
+
c_len=SIM_FIELD_LEN - 1,
|
|
883
|
+
d_len=REAL_FIELD_LEN - 1,
|
|
884
|
+
e_len=RATIO_FIELD_LEN - 1,
|
|
885
|
+
start="",
|
|
886
|
+
end="",
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
summary += LINE_SEP
|
|
890
|
+
|
|
891
|
+
self.log.info(summary)
|
|
892
|
+
|
|
893
|
+
def _fail_simulation(self, msg: str) -> None:
|
|
894
|
+
self._sim_failure = Error(SimFailure(msg))
|
|
895
|
+
self._running_test.abort(self._sim_failure)
|
|
896
|
+
cocotb._scheduler_inst._event_loop()
|