cocotb 1.9.2__cp310-cp310-win32.whl → 2.0.0rc2__cp310-cp310-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cocotb might be problematic. Click here for more details.
- cocotb/_ANSI.py +65 -0
- cocotb/__init__.py +81 -327
- cocotb/_base_triggers.py +515 -0
- cocotb/_bridge.py +186 -0
- cocotb/_decorators.py +515 -0
- cocotb/_deprecation.py +3 -3
- cocotb/_exceptions.py +7 -0
- cocotb/_extended_awaitables.py +419 -0
- cocotb/_gpi_triggers.py +385 -0
- cocotb/_init.py +301 -0
- cocotb/_outcomes.py +54 -0
- cocotb/_profiling.py +46 -0
- cocotb/_py_compat.py +114 -29
- cocotb/_scheduler.py +448 -0
- cocotb/_test.py +248 -0
- cocotb/_test_factory.py +312 -0
- cocotb/_test_functions.py +42 -0
- cocotb/_typing.py +7 -0
- cocotb/_utils.py +274 -0
- cocotb/_version.py +3 -7
- cocotb/_xunit_reporter.py +66 -0
- cocotb/clock.py +353 -108
- cocotb/debug.py +24 -0
- cocotb/handle.py +1370 -793
- cocotb/libs/cocotb.dll +0 -0
- cocotb/libs/cocotb.exp +0 -0
- cocotb/libs/cocotb.lib +0 -0
- cocotb/libs/cocotbfli_modelsim.dll +0 -0
- cocotb/libs/cocotbfli_modelsim.exp +0 -0
- cocotb/libs/cocotbfli_modelsim.lib +0 -0
- cocotb/libs/cocotbutils.dll +0 -0
- cocotb/libs/cocotbutils.exp +0 -0
- cocotb/libs/cocotbutils.lib +0 -0
- cocotb/libs/cocotbvhpi_aldec.dll +0 -0
- cocotb/libs/cocotbvhpi_aldec.exp +0 -0
- cocotb/libs/cocotbvhpi_aldec.lib +0 -0
- cocotb/libs/cocotbvhpi_modelsim.dll +0 -0
- cocotb/libs/cocotbvhpi_modelsim.exp +0 -0
- cocotb/libs/cocotbvhpi_modelsim.lib +0 -0
- cocotb/libs/cocotbvpi_aldec.dll +0 -0
- cocotb/libs/cocotbvpi_aldec.exp +0 -0
- cocotb/libs/cocotbvpi_aldec.lib +0 -0
- cocotb/libs/cocotbvpi_ghdl.dll +0 -0
- cocotb/libs/cocotbvpi_ghdl.exp +0 -0
- cocotb/libs/cocotbvpi_ghdl.lib +0 -0
- cocotb/libs/cocotbvpi_icarus.exp +0 -0
- cocotb/libs/cocotbvpi_icarus.lib +0 -0
- cocotb/libs/cocotbvpi_icarus.vpl +0 -0
- cocotb/libs/cocotbvpi_modelsim.dll +0 -0
- cocotb/libs/cocotbvpi_modelsim.exp +0 -0
- cocotb/libs/cocotbvpi_modelsim.lib +0 -0
- cocotb/libs/embed.dll +0 -0
- cocotb/libs/embed.exp +0 -0
- cocotb/libs/embed.lib +0 -0
- cocotb/libs/gpi.dll +0 -0
- cocotb/libs/gpi.exp +0 -0
- cocotb/libs/gpi.lib +0 -0
- cocotb/libs/gpilog.dll +0 -0
- cocotb/libs/gpilog.exp +0 -0
- cocotb/libs/gpilog.lib +0 -0
- cocotb/libs/pygpilog.dll +0 -0
- cocotb/libs/pygpilog.exp +0 -0
- cocotb/libs/pygpilog.lib +0 -0
- cocotb/logging.py +424 -0
- cocotb/queue.py +103 -57
- cocotb/regression.py +680 -717
- cocotb/result.py +17 -188
- cocotb/share/def/aldec.exp +0 -0
- cocotb/share/def/aldec.lib +0 -0
- cocotb/share/def/ghdl.exp +0 -0
- cocotb/share/def/ghdl.lib +0 -0
- cocotb/share/def/icarus.exp +0 -0
- cocotb/share/def/icarus.lib +0 -0
- cocotb/share/def/modelsim.def +1 -0
- cocotb/share/def/modelsim.exp +0 -0
- cocotb/share/def/modelsim.lib +0 -0
- cocotb/share/include/cocotb_utils.h +9 -32
- cocotb/share/include/embed.h +7 -30
- cocotb/share/include/gpi.h +331 -137
- cocotb/share/include/gpi_logging.h +221 -142
- cocotb/share/include/py_gpi_logging.h +8 -5
- cocotb/share/include/vpi_user_ext.h +4 -26
- cocotb/share/lib/verilator/verilator.cpp +80 -67
- cocotb/simtime.py +230 -0
- cocotb/simulator.cp310-win32.exp +0 -0
- cocotb/simulator.cp310-win32.lib +0 -0
- cocotb/simulator.cp310-win32.pyd +0 -0
- cocotb/simulator.pyi +107 -0
- cocotb/task.py +478 -213
- cocotb/triggers.py +55 -1092
- cocotb/types/__init__.py +28 -47
- cocotb/types/_abstract_array.py +151 -0
- cocotb/types/_array.py +295 -0
- cocotb/types/_indexing.py +17 -0
- cocotb/types/_logic.py +333 -0
- cocotb/types/_logic_array.py +868 -0
- cocotb/types/{range.py → _range.py} +47 -48
- cocotb/types/_resolve.py +76 -0
- cocotb/utils.py +58 -646
- cocotb-2.0.0rc2.dist-info/METADATA +60 -0
- cocotb-2.0.0rc2.dist-info/RECORD +146 -0
- {cocotb-1.9.2.dist-info → cocotb-2.0.0rc2.dist-info}/WHEEL +1 -1
- cocotb-2.0.0rc2.dist-info/entry_points.txt +2 -0
- {cocotb-1.9.2.dist-info → cocotb-2.0.0rc2.dist-info/licenses}/LICENSE +1 -0
- {cocotb-1.9.2.dist-info → cocotb-2.0.0rc2.dist-info}/top_level.txt +1 -0
- cocotb_tools/__init__.py +0 -0
- cocotb_tools/_coverage.py +33 -0
- cocotb_tools/_vendor/__init__.py +3 -0
- cocotb_tools/check_results.py +65 -0
- cocotb_tools/combine_results.py +152 -0
- cocotb_tools/config.py +241 -0
- {cocotb → cocotb_tools}/ipython_support.py +29 -22
- cocotb_tools/makefiles/Makefile.deprecations +27 -0
- {cocotb/share → cocotb_tools}/makefiles/Makefile.inc +77 -55
- {cocotb/share → cocotb_tools}/makefiles/Makefile.sim +16 -33
- {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.activehdl +9 -16
- cocotb_tools/makefiles/simulators/Makefile.cvc +61 -0
- cocotb_tools/makefiles/simulators/Makefile.dsim +39 -0
- {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.ghdl +13 -42
- cocotb_tools/makefiles/simulators/Makefile.icarus +80 -0
- cocotb_tools/makefiles/simulators/Makefile.ius +93 -0
- cocotb_tools/makefiles/simulators/Makefile.modelsim +9 -0
- cocotb_tools/makefiles/simulators/Makefile.nvc +60 -0
- cocotb_tools/makefiles/simulators/Makefile.questa +29 -0
- cocotb/share/makefiles/simulators/Makefile.questa → cocotb_tools/makefiles/simulators/Makefile.questa-compat +26 -54
- cocotb_tools/makefiles/simulators/Makefile.questa-qisqrun +149 -0
- {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.riviera +17 -56
- cocotb_tools/makefiles/simulators/Makefile.vcs +65 -0
- {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.verilator +15 -22
- {cocotb/share → cocotb_tools}/makefiles/simulators/Makefile.xcelium +20 -52
- cocotb_tools/py.typed +0 -0
- cocotb_tools/runner.py +1868 -0
- cocotb/_sim_versions.py → cocotb_tools/sim_versions.py +16 -21
- pygpi/entry.py +34 -18
- pygpi/py.typed +0 -0
- cocotb/ANSI.py +0 -92
- cocotb/binary.py +0 -858
- cocotb/config.py +0 -289
- cocotb/decorators.py +0 -332
- cocotb/log.py +0 -303
- cocotb/memdebug.py +0 -35
- cocotb/outcomes.py +0 -56
- cocotb/runner.py +0 -1400
- cocotb/scheduler.py +0 -1099
- cocotb/share/makefiles/Makefile.deprecations +0 -12
- cocotb/share/makefiles/simulators/Makefile.cvc +0 -94
- cocotb/share/makefiles/simulators/Makefile.icarus +0 -111
- cocotb/share/makefiles/simulators/Makefile.ius +0 -125
- cocotb/share/makefiles/simulators/Makefile.modelsim +0 -32
- cocotb/share/makefiles/simulators/Makefile.nvc +0 -64
- cocotb/share/makefiles/simulators/Makefile.vcs +0 -98
- cocotb/types/array.py +0 -309
- cocotb/types/logic.py +0 -292
- cocotb/types/logic_array.py +0 -298
- cocotb/wavedrom.py +0 -199
- cocotb/xunit_reporter.py +0 -80
- cocotb-1.9.2.dist-info/METADATA +0 -168
- cocotb-1.9.2.dist-info/RECORD +0 -121
- cocotb-1.9.2.dist-info/entry_points.txt +0 -2
- /cocotb/{_vendor/__init__.py → py.typed} +0 -0
- {cocotb → cocotb_tools}/_vendor/distutils_version.py +0 -0
cocotb/regression.py
CHANGED
|
@@ -1,257 +1,267 @@
|
|
|
1
|
+
# Copyright cocotb contributors
|
|
1
2
|
# Copyright (c) 2013, 2018 Potential Ventures Ltd
|
|
2
3
|
# Copyright (c) 2013 SolarFlare Communications Inc
|
|
3
|
-
#
|
|
4
|
-
#
|
|
5
|
-
# Redistribution and use in source and binary forms, with or without
|
|
6
|
-
# modification, are permitted provided that the following conditions are met:
|
|
7
|
-
# * Redistributions of source code must retain the above copyright
|
|
8
|
-
# notice, this list of conditions and the following disclaimer.
|
|
9
|
-
# * Redistributions in binary form must reproduce the above copyright
|
|
10
|
-
# notice, this list of conditions and the following disclaimer in the
|
|
11
|
-
# documentation and/or other materials provided with the distribution.
|
|
12
|
-
# * Neither the name of Potential Ventures Ltd,
|
|
13
|
-
# SolarFlare Communications Inc nor the
|
|
14
|
-
# names of its contributors may be used to endorse or promote products
|
|
15
|
-
# derived from this software without specific prior written permission.
|
|
16
|
-
#
|
|
17
|
-
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
18
|
-
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
19
|
-
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
20
|
-
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
|
|
21
|
-
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
22
|
-
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
23
|
-
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
24
|
-
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
25
|
-
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
26
|
-
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
4
|
+
# Licensed under the Revised BSD License, see LICENSE for details.
|
|
5
|
+
# SPDX-License-Identifier: BSD-3-Clause
|
|
27
6
|
|
|
28
7
|
"""All things relating to regression capabilities."""
|
|
29
8
|
|
|
9
|
+
import functools
|
|
30
10
|
import hashlib
|
|
31
11
|
import inspect
|
|
32
|
-
import
|
|
12
|
+
import logging
|
|
33
13
|
import os
|
|
34
|
-
import pdb
|
|
35
14
|
import random
|
|
36
|
-
import
|
|
15
|
+
import re
|
|
37
16
|
import time
|
|
38
|
-
import
|
|
39
|
-
from
|
|
40
|
-
from
|
|
17
|
+
import warnings
|
|
18
|
+
from enum import auto
|
|
19
|
+
from importlib import import_module
|
|
20
|
+
from typing import (
|
|
21
|
+
TYPE_CHECKING,
|
|
22
|
+
Callable,
|
|
23
|
+
Coroutine,
|
|
24
|
+
List,
|
|
25
|
+
Union,
|
|
26
|
+
)
|
|
41
27
|
|
|
42
28
|
import cocotb
|
|
43
|
-
import cocotb.
|
|
29
|
+
import cocotb._gpi_triggers
|
|
30
|
+
import cocotb.handle
|
|
31
|
+
from cocotb import logging as cocotb_logging
|
|
44
32
|
from cocotb import simulator
|
|
45
|
-
from cocotb.
|
|
46
|
-
from cocotb.
|
|
47
|
-
from cocotb.
|
|
48
|
-
from cocotb.
|
|
49
|
-
from cocotb.
|
|
50
|
-
from cocotb.
|
|
33
|
+
from cocotb._decorators import Parameterized, Test
|
|
34
|
+
from cocotb._extended_awaitables import with_timeout
|
|
35
|
+
from cocotb._gpi_triggers import GPITrigger, Timer
|
|
36
|
+
from cocotb._outcomes import Error, Outcome
|
|
37
|
+
from cocotb._test import RunningTest
|
|
38
|
+
from cocotb._test_factory import TestFactory
|
|
39
|
+
from cocotb._test_functions import Failed
|
|
40
|
+
from cocotb._utils import (
|
|
41
|
+
DocEnum,
|
|
42
|
+
remove_traceback_frames,
|
|
43
|
+
safe_divide,
|
|
44
|
+
)
|
|
45
|
+
from cocotb._xunit_reporter import XUnitReporter
|
|
46
|
+
from cocotb.logging import ANSI
|
|
47
|
+
from cocotb.simtime import get_sim_time
|
|
51
48
|
from cocotb.task import Task
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
else:
|
|
87
|
-
try:
|
|
88
|
-
with pytest.raises(Exception):
|
|
89
|
-
pass
|
|
90
|
-
except BaseException as _raises_e:
|
|
91
|
-
_Failed = type(_raises_e)
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from cocotb._base_triggers import Trigger
|
|
52
|
+
|
|
53
|
+
__all__ = (
|
|
54
|
+
"Parameterized",
|
|
55
|
+
"RegressionManager",
|
|
56
|
+
"RegressionMode",
|
|
57
|
+
"SimFailure",
|
|
58
|
+
"Test",
|
|
59
|
+
"TestFactory",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Set __module__ on re-exports
|
|
63
|
+
Parameterized.__module__ = __name__
|
|
64
|
+
Test.__module__ = __name__
|
|
65
|
+
TestFactory.__module__ = __name__
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SimFailure(BaseException):
|
|
69
|
+
"""A Test failure due to simulator failure.
|
|
70
|
+
|
|
71
|
+
.. caution::
|
|
72
|
+
Not to be raised or caught within a test.
|
|
73
|
+
Only used for marking expected failure with ``expect_error`` in :func:`cocotb.test`.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
_logger = logging.getLogger(__name__)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _format_doc(docstring: Union[str, None]) -> str:
|
|
81
|
+
if docstring is None:
|
|
82
|
+
return ""
|
|
92
83
|
else:
|
|
93
|
-
|
|
84
|
+
brief = docstring.split("\n")[0]
|
|
85
|
+
return f"\n {brief}"
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class RegressionMode(DocEnum):
|
|
89
|
+
"""The mode of the :class:`RegressionManager`."""
|
|
90
|
+
|
|
91
|
+
REGRESSION = (
|
|
92
|
+
auto(),
|
|
93
|
+
"""Tests are run if included. Skipped tests are skipped, expected failures and errors are respected.""",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
TESTCASE = (
|
|
97
|
+
auto(),
|
|
98
|
+
"""Like :attr:`REGRESSION`, but skipped tests are *not* skipped if included.""",
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class _TestResults:
|
|
103
|
+
# TODO Replace with dataclass in Python 3.7+
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
test_fullname: str,
|
|
108
|
+
passed: Union[None, bool],
|
|
109
|
+
wall_time_s: float,
|
|
110
|
+
sim_time_ns: float,
|
|
111
|
+
) -> None:
|
|
112
|
+
self.test_fullname = test_fullname
|
|
113
|
+
self.passed = passed
|
|
114
|
+
self.wall_time_s = wall_time_s
|
|
115
|
+
self.sim_time_ns = sim_time_ns
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def ratio(self) -> float:
|
|
119
|
+
return safe_divide(self.sim_time_ns, self.wall_time_s)
|
|
94
120
|
|
|
95
121
|
|
|
96
122
|
class RegressionManager:
|
|
97
|
-
"""
|
|
123
|
+
"""Object which manages tests.
|
|
98
124
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
125
|
+
This object uses the builder pattern to build up a regression.
|
|
126
|
+
Tests are added using :meth:`register_test` or :meth:`discover_tests`.
|
|
127
|
+
Inclusion filters for tests can be added using :meth:`add_filters`.
|
|
128
|
+
The "mode" of the regression can be controlled using :meth:`set_mode`.
|
|
129
|
+
These methods can be called in any order any number of times before :meth:`start_regression` is called,
|
|
130
|
+
and should not be called again after that.
|
|
131
|
+
|
|
132
|
+
Once all the tests, filters, and regression behavior configuration is done,
|
|
133
|
+
the user starts the regression with :meth:`start_regression`.
|
|
134
|
+
This method must be called exactly once.
|
|
135
|
+
|
|
136
|
+
Until the regression is started, :attr:`total_tests`, :attr:`count`, :attr:`passed`,
|
|
137
|
+
:attr:`skipped`, and :attr:`failures` hold placeholder values.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
COLOR_TEST = ANSI.BLUE_FG
|
|
141
|
+
COLOR_PASSED = ANSI.GREEN_FG
|
|
142
|
+
COLOR_SKIPPED = ANSI.YELLOW_FG
|
|
143
|
+
COLOR_FAILED = ANSI.RED_FG
|
|
144
|
+
|
|
145
|
+
_timer1 = Timer(1)
|
|
146
|
+
|
|
147
|
+
def __init__(self) -> None:
|
|
148
|
+
self._test: Test
|
|
149
|
+
self._running_test: RunningTest
|
|
111
150
|
self.log = _logger
|
|
112
|
-
self.
|
|
113
|
-
self.
|
|
151
|
+
self._regression_start_time: float
|
|
152
|
+
self._test_results: List[_TestResults] = []
|
|
153
|
+
self.total_tests = 0
|
|
154
|
+
"""Total number of tests that will be run or skipped."""
|
|
114
155
|
self.count = 0
|
|
156
|
+
"""The current test count."""
|
|
115
157
|
self.passed = 0
|
|
158
|
+
"""The current number of passed tests."""
|
|
116
159
|
self.skipped = 0
|
|
160
|
+
"""The current number of skipped tests."""
|
|
117
161
|
self.failures = 0
|
|
162
|
+
"""The current number of failed tests."""
|
|
118
163
|
self._tearing_down = False
|
|
164
|
+
self._test_queue: List[Test] = []
|
|
165
|
+
self._filters: List[re.Pattern[str]] = []
|
|
166
|
+
self._mode = RegressionMode.REGRESSION
|
|
167
|
+
self._included: List[bool]
|
|
168
|
+
self._sim_failure: Union[Error[None], None] = None
|
|
119
169
|
|
|
120
170
|
# Setup XUnit
|
|
121
171
|
###################
|
|
122
172
|
|
|
123
173
|
results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
|
|
124
|
-
suite_name = os.getenv("
|
|
125
|
-
package_name = os.getenv("
|
|
174
|
+
suite_name = os.getenv("COCOTB_RESULT_TESTSUITE", "all")
|
|
175
|
+
package_name = os.getenv("COCOTB_RESULT_TESTPACKAGE", "all")
|
|
126
176
|
|
|
127
177
|
self.xunit = XUnitReporter(filename=results_filename)
|
|
128
|
-
|
|
129
178
|
self.xunit.add_testsuite(name=suite_name, package=package_name)
|
|
130
|
-
|
|
131
179
|
self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))
|
|
132
180
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
if coverage is not None:
|
|
137
|
-
self.log.info("Enabling coverage collection of Python code")
|
|
138
|
-
config_filepath = os.getenv("COVERAGE_RCFILE")
|
|
139
|
-
if config_filepath is None:
|
|
140
|
-
# Exclude cocotb itself from coverage collection.
|
|
141
|
-
cocotb_package_dir = os.path.dirname(__file__)
|
|
142
|
-
self._cov = coverage.coverage(
|
|
143
|
-
branch=True, omit=[f"{cocotb_package_dir}/*"]
|
|
144
|
-
)
|
|
145
|
-
else:
|
|
146
|
-
# Allow the config file to handle all configuration
|
|
147
|
-
self._cov = coverage.coverage()
|
|
148
|
-
self._cov.start()
|
|
149
|
-
|
|
150
|
-
# Test Discovery
|
|
151
|
-
####################
|
|
152
|
-
self._queue = []
|
|
153
|
-
for test in tests:
|
|
154
|
-
self.log.info(f"Found test {test.__module__}.{test.__qualname__}")
|
|
155
|
-
self._queue.append(test)
|
|
156
|
-
self.ntests = len(self._queue)
|
|
181
|
+
def discover_tests(self, *modules: str) -> None:
|
|
182
|
+
"""Discover tests in files automatically.
|
|
157
183
|
|
|
158
|
-
|
|
159
|
-
self.log.warning("No tests were discovered")
|
|
160
|
-
|
|
161
|
-
self._queue.sort(key=lambda test: (test.stage, test._id))
|
|
162
|
-
|
|
163
|
-
@classmethod
|
|
164
|
-
def from_discovery(cls, dut: SimHandle):
|
|
165
|
-
"""
|
|
166
|
-
Obtains the test list by discovery.
|
|
167
|
-
|
|
168
|
-
See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
|
|
184
|
+
Should be called before :meth:`start_regression` is called.
|
|
169
185
|
|
|
170
186
|
Args:
|
|
171
|
-
|
|
187
|
+
modules: Each argument given is the name of a module where tests are found.
|
|
172
188
|
"""
|
|
173
|
-
|
|
174
|
-
|
|
189
|
+
for module_name in modules:
|
|
190
|
+
mod = import_module(module_name)
|
|
191
|
+
|
|
192
|
+
found_test: bool = False
|
|
193
|
+
for obj_name, obj in vars(mod).items():
|
|
194
|
+
if isinstance(obj, Test):
|
|
195
|
+
found_test = True
|
|
196
|
+
self.register_test(obj)
|
|
197
|
+
elif isinstance(obj, Parameterized):
|
|
198
|
+
found_test = True
|
|
199
|
+
generated_tests: bool = False
|
|
200
|
+
for test in obj.generate_tests():
|
|
201
|
+
generated_tests = True
|
|
202
|
+
self.register_test(test)
|
|
203
|
+
if not generated_tests:
|
|
204
|
+
warnings.warn(
|
|
205
|
+
f"Parametrize object generated no tests: {module_name}.{obj_name}",
|
|
206
|
+
stacklevel=2,
|
|
207
|
+
)
|
|
175
208
|
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
209
|
+
if not found_test:
|
|
210
|
+
warnings.warn(
|
|
211
|
+
f"No tests were discovered in module: {module_name}", stacklevel=2
|
|
212
|
+
)
|
|
180
213
|
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
214
|
+
# error if no tests were discovered
|
|
215
|
+
if not self._test_queue:
|
|
216
|
+
modules_str = ", ".join(repr(m) for m in modules)
|
|
217
|
+
raise RuntimeError(f"No tests were discovered in any module: {modules_str}")
|
|
185
218
|
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
"Environment variable MODULE, which defines the module(s) to execute, is not defined."
|
|
189
|
-
)
|
|
219
|
+
def add_filters(self, *filters: str) -> None:
|
|
220
|
+
"""Add regular expressions to filter-in registered tests.
|
|
190
221
|
|
|
191
|
-
|
|
222
|
+
Only those tests which match at least one of the given filters are included;
|
|
223
|
+
the rest are excluded.
|
|
192
224
|
|
|
193
|
-
|
|
225
|
+
Should be called before :meth:`start_regression` is called.
|
|
194
226
|
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
227
|
+
Args:
|
|
228
|
+
filters: Each argument given is a regex pattern for test names.
|
|
229
|
+
A match *includes* the test.
|
|
230
|
+
"""
|
|
231
|
+
for filter in filters:
|
|
232
|
+
compiled_filter = re.compile(filter)
|
|
233
|
+
self._filters.append(compiled_filter)
|
|
198
234
|
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
_logger.debug("Python Path: " + ",".join(sys.path))
|
|
202
|
-
_logger.debug("PWD: " + os.getcwd())
|
|
203
|
-
module = _my_import(module_name)
|
|
204
|
-
except Exception as E:
|
|
205
|
-
_logger.critical("Failed to import module %s: %s", module_name, E)
|
|
206
|
-
_logger.info('MODULE variable was "%s"', ".".join(modules))
|
|
207
|
-
_logger.info(traceback.format_exc())
|
|
208
|
-
raise
|
|
209
|
-
|
|
210
|
-
if tests is not None:
|
|
211
|
-
not_found_tests = []
|
|
212
|
-
# Specific functions specified, don't auto-discover
|
|
213
|
-
for test_name in tests:
|
|
214
|
-
try:
|
|
215
|
-
test = getattr(module, test_name)
|
|
216
|
-
except AttributeError:
|
|
217
|
-
not_found_tests.append(test_name)
|
|
218
|
-
continue
|
|
219
|
-
|
|
220
|
-
if not isinstance(test, Test):
|
|
221
|
-
_logger.error(
|
|
222
|
-
"Requested %s from module %s isn't a cocotb.test decorated coroutine",
|
|
223
|
-
test_name,
|
|
224
|
-
module_name,
|
|
225
|
-
)
|
|
226
|
-
raise ImportError(
|
|
227
|
-
"Failed to find requested test %s" % test_name
|
|
228
|
-
)
|
|
235
|
+
def set_mode(self, mode: RegressionMode) -> None:
|
|
236
|
+
"""Set the regression mode.
|
|
229
237
|
|
|
230
|
-
|
|
231
|
-
|
|
238
|
+
See :class:`RegressionMode` for more details on how each mode affects :class:`RegressionManager` behavior.
|
|
239
|
+
Should be called before :meth:`start_regression` is called.
|
|
232
240
|
|
|
233
|
-
|
|
241
|
+
Args:
|
|
242
|
+
mode: The regression mode to set.
|
|
243
|
+
"""
|
|
244
|
+
self._mode = mode
|
|
234
245
|
|
|
235
|
-
|
|
236
|
-
|
|
246
|
+
def register_test(self, test: Test) -> None:
|
|
247
|
+
"""Register a test with the :class:`RegressionManager`.
|
|
237
248
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
if tests:
|
|
246
|
-
_logger.error(
|
|
247
|
-
"Requested test(s) %s wasn't found in module(s) %s", tests, modules
|
|
248
|
-
)
|
|
249
|
-
raise AttributeError("Test(s) %s doesn't exist in %s" % (tests, modules))
|
|
249
|
+
Should be called before :meth:`start_regression` is called.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
test: The test object to register.
|
|
253
|
+
"""
|
|
254
|
+
self.log.debug("Registered test %r", test.fullname)
|
|
255
|
+
self._test_queue.append(test)
|
|
250
256
|
|
|
251
257
|
@classmethod
|
|
252
|
-
def
|
|
258
|
+
def setup_pytest_assertion_rewriting(cls) -> None:
|
|
259
|
+
"""Configure pytest to rewrite assertions for better failure messages.
|
|
260
|
+
|
|
261
|
+
Must be called before all modules containing tests are imported.
|
|
262
|
+
"""
|
|
253
263
|
try:
|
|
254
|
-
import pytest
|
|
264
|
+
import pytest # noqa: PLC0415
|
|
255
265
|
except ImportError:
|
|
256
266
|
_logger.info(
|
|
257
267
|
"pytest not found, install it to enable better AssertionError messages"
|
|
@@ -260,202 +270,408 @@ class RegressionManager:
|
|
|
260
270
|
try:
|
|
261
271
|
# Install the assertion rewriting hook, which must be done before we
|
|
262
272
|
# import the test modules.
|
|
263
|
-
from _pytest.assertion import install_importhook
|
|
264
|
-
from _pytest.config import Config
|
|
273
|
+
from _pytest.assertion import install_importhook # noqa: PLC0415
|
|
274
|
+
from _pytest.config import Config # noqa: PLC0415
|
|
275
|
+
|
|
276
|
+
python_files = os.getenv("COCOTB_REWRITE_ASSERTION_FILES", "*.py").strip()
|
|
277
|
+
if not python_files:
|
|
278
|
+
# Even running the hook causes exceptions in some cases, so if the user
|
|
279
|
+
# selects nothing, don't install the hook at all.
|
|
280
|
+
return
|
|
265
281
|
|
|
266
282
|
pytest_conf = Config.fromdictargs(
|
|
267
|
-
{}, ["--capture=no", "-o", "python_files
|
|
283
|
+
{}, ["--capture=no", "-o", f"python_files={python_files}"]
|
|
268
284
|
)
|
|
269
285
|
install_importhook(pytest_conf)
|
|
270
286
|
except Exception:
|
|
271
287
|
_logger.exception(
|
|
272
|
-
"Configuring the assertion rewrite hook using pytest
|
|
273
|
-
"Please file a bug report!"
|
|
288
|
+
"Configuring the assertion rewrite hook using pytest %s failed. "
|
|
289
|
+
"Please file a bug report!",
|
|
290
|
+
pytest.__version__,
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
def start_regression(self) -> None:
|
|
294
|
+
"""Start the regression."""
|
|
295
|
+
|
|
296
|
+
# sort tests into stages
|
|
297
|
+
self._test_queue.sort(key=lambda test: test.stage)
|
|
298
|
+
|
|
299
|
+
# mark tests for running
|
|
300
|
+
if self._filters:
|
|
301
|
+
self._included = [False] * len(self._test_queue)
|
|
302
|
+
for i, test in enumerate(self._test_queue):
|
|
303
|
+
for filter in self._filters:
|
|
304
|
+
if filter.search(test.fullname):
|
|
305
|
+
self._included[i] = True
|
|
306
|
+
else:
|
|
307
|
+
self._included = [True] * len(self._test_queue)
|
|
308
|
+
|
|
309
|
+
# compute counts
|
|
310
|
+
self.count = 1
|
|
311
|
+
self.total_tests = sum(self._included)
|
|
312
|
+
if self.total_tests == 0:
|
|
313
|
+
self.log.warning(
|
|
314
|
+
"No tests left after filtering with: %s",
|
|
315
|
+
", ".join(f.pattern for f in self._filters),
|
|
274
316
|
)
|
|
275
317
|
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
318
|
+
# start write scheduler
|
|
319
|
+
cocotb.handle._start_write_scheduler()
|
|
320
|
+
|
|
321
|
+
# start test loop
|
|
322
|
+
self._regression_start_time = time.time()
|
|
323
|
+
self._first_test = True
|
|
324
|
+
self._execute()
|
|
325
|
+
|
|
326
|
+
def _execute(self) -> None:
|
|
327
|
+
"""Run the main regression loop.
|
|
328
|
+
|
|
329
|
+
Used by :meth:`start_regression` and :meth:`_test_complete` to continue to the main test running loop,
|
|
330
|
+
and by :meth:`_fail_regression` to shutdown the regression when a simulation failure occurs.
|
|
331
|
+
"""
|
|
332
|
+
|
|
333
|
+
while self._test_queue:
|
|
334
|
+
self._test = self._test_queue.pop(0)
|
|
335
|
+
included = self._included.pop(0)
|
|
336
|
+
|
|
337
|
+
# if the test is not included, record and continue
|
|
338
|
+
if not included:
|
|
339
|
+
self._record_test_excluded()
|
|
340
|
+
continue
|
|
341
|
+
|
|
342
|
+
# if the test is skipped, record and continue
|
|
343
|
+
if self._test.skip and self._mode != RegressionMode.TESTCASE:
|
|
344
|
+
self._record_test_skipped()
|
|
345
|
+
continue
|
|
346
|
+
|
|
347
|
+
# if the test should be run, but the simulator has failed, record and continue
|
|
348
|
+
if self._sim_failure is not None:
|
|
349
|
+
self._score_test(
|
|
350
|
+
self._sim_failure,
|
|
351
|
+
0,
|
|
352
|
+
0,
|
|
353
|
+
)
|
|
354
|
+
continue
|
|
355
|
+
|
|
356
|
+
# initialize the test, if it fails, record and continue
|
|
357
|
+
try:
|
|
358
|
+
self._running_test = self._init_test()
|
|
359
|
+
except Exception:
|
|
360
|
+
self._record_test_init_failed()
|
|
361
|
+
continue
|
|
362
|
+
|
|
363
|
+
self._log_test_start()
|
|
364
|
+
|
|
365
|
+
if self._first_test:
|
|
366
|
+
self._first_test = False
|
|
367
|
+
return self._schedule_next_test()
|
|
368
|
+
else:
|
|
369
|
+
return self._timer1._prime(self._schedule_next_test)
|
|
370
|
+
|
|
371
|
+
return self._tear_down()
|
|
372
|
+
|
|
373
|
+
def _init_test(self) -> RunningTest:
|
|
374
|
+
# wrap test function in timeout
|
|
375
|
+
func: Callable[..., Coroutine[Trigger, None, None]]
|
|
376
|
+
timeout = self._test.timeout_time
|
|
377
|
+
if timeout is not None:
|
|
378
|
+
f = self._test.func
|
|
379
|
+
|
|
380
|
+
@functools.wraps(f)
|
|
381
|
+
async def func(*args: object, **kwargs: object) -> None:
|
|
382
|
+
await with_timeout(f(*args, **kwargs), timeout, self._test.timeout_unit)
|
|
383
|
+
else:
|
|
384
|
+
func = self._test.func
|
|
385
|
+
|
|
386
|
+
main_task = Task(func(cocotb.top), name=f"Test {self._test.name}")
|
|
387
|
+
return RunningTest(self._test_complete, main_task)
|
|
388
|
+
|
|
389
|
+
def _schedule_next_test(self, trigger: Union[GPITrigger, None] = None) -> None:
|
|
390
|
+
if trigger is not None:
|
|
391
|
+
# TODO move to Trigger object
|
|
392
|
+
cocotb._gpi_triggers._current_gpi_trigger = trigger
|
|
393
|
+
trigger._cleanup()
|
|
394
|
+
|
|
395
|
+
# seed random number generator based on test module, name, and COCOTB_RANDOM_SEED
|
|
396
|
+
hasher = hashlib.sha1()
|
|
397
|
+
hasher.update(self._test.fullname.encode())
|
|
398
|
+
seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
|
|
399
|
+
random.seed(seed)
|
|
400
|
+
|
|
401
|
+
self._start_sim_time = get_sim_time("ns")
|
|
402
|
+
self._start_time = time.time()
|
|
403
|
+
|
|
404
|
+
self._running_test.start()
|
|
279
405
|
|
|
280
406
|
def _tear_down(self) -> None:
|
|
407
|
+
"""Called by :meth:`_execute` when there are no more tests to run to finalize the regression."""
|
|
281
408
|
# prevent re-entering the tear down procedure
|
|
282
409
|
if not self._tearing_down:
|
|
283
410
|
self._tearing_down = True
|
|
284
411
|
else:
|
|
285
412
|
return
|
|
286
413
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
break
|
|
292
|
-
self._record_result(
|
|
293
|
-
test=test, outcome=Error(SimFailure), wall_time_s=0, sim_time_ns=0
|
|
294
|
-
)
|
|
414
|
+
assert not self._test_queue
|
|
415
|
+
|
|
416
|
+
# stop the write scheduler
|
|
417
|
+
cocotb.handle._stop_write_scheduler()
|
|
295
418
|
|
|
296
419
|
# Write out final log messages
|
|
297
420
|
self._log_test_summary()
|
|
298
421
|
|
|
299
422
|
# Generate output reports
|
|
300
423
|
self.xunit.write()
|
|
301
|
-
if self._cov:
|
|
302
|
-
self._cov.stop()
|
|
303
|
-
self.log.info("Writing coverage data")
|
|
304
|
-
self._cov.save()
|
|
305
|
-
self._cov.html_report()
|
|
306
424
|
|
|
307
|
-
#
|
|
308
|
-
|
|
309
|
-
cocotb.
|
|
425
|
+
# TODO refactor initialization and finalization into their own module
|
|
426
|
+
# to prevent circular imports requiring local imports
|
|
427
|
+
from cocotb._init import _shutdown_testbench # noqa: PLC0415
|
|
310
428
|
|
|
311
|
-
|
|
312
|
-
def next_test(self) -> Optional[Test]:
|
|
313
|
-
return self._next_test()
|
|
429
|
+
_shutdown_testbench()
|
|
314
430
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
if not self._queue:
|
|
318
|
-
return None
|
|
319
|
-
self.count += 1
|
|
320
|
-
return self._queue.pop(0)
|
|
431
|
+
# Setup simulator finalization
|
|
432
|
+
simulator.stop_simulator()
|
|
321
433
|
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
self._handle_result(test)
|
|
434
|
+
def _test_complete(self) -> None:
|
|
435
|
+
"""Callback given to the test to be called when the test finished."""
|
|
325
436
|
|
|
326
|
-
|
|
327
|
-
|
|
437
|
+
# compute wall time
|
|
438
|
+
wall_time = time.time() - self._start_time
|
|
439
|
+
sim_time_ns = get_sim_time("ns") - self._start_sim_time
|
|
328
440
|
|
|
329
|
-
|
|
441
|
+
# Judge and record pass/fail.
|
|
442
|
+
self._score_test(
|
|
443
|
+
self._running_test.result(),
|
|
444
|
+
wall_time,
|
|
445
|
+
sim_time_ns,
|
|
446
|
+
)
|
|
330
447
|
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
"""
|
|
334
|
-
assert test is self._test_task
|
|
448
|
+
# Run next test.
|
|
449
|
+
return self._execute()
|
|
335
450
|
|
|
336
|
-
|
|
337
|
-
|
|
451
|
+
def _score_test(
|
|
452
|
+
self,
|
|
453
|
+
outcome: Outcome[None],
|
|
454
|
+
wall_time_s: float,
|
|
455
|
+
sim_time_ns: float,
|
|
456
|
+
) -> None:
|
|
457
|
+
test = self._test
|
|
338
458
|
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
459
|
+
# score test
|
|
460
|
+
passed: bool
|
|
461
|
+
msg: Union[str, None]
|
|
462
|
+
exc: Union[BaseException, None]
|
|
463
|
+
try:
|
|
464
|
+
outcome.get()
|
|
465
|
+
except BaseException as e:
|
|
466
|
+
passed, msg = False, None
|
|
467
|
+
exc = remove_traceback_frames(e, ["_score_test", "get"])
|
|
468
|
+
else:
|
|
469
|
+
passed, msg, exc = True, None, None
|
|
470
|
+
|
|
471
|
+
if passed:
|
|
472
|
+
if test.expect_error:
|
|
473
|
+
self._record_test_failed(
|
|
474
|
+
wall_time_s=wall_time_s,
|
|
475
|
+
sim_time_ns=sim_time_ns,
|
|
476
|
+
result=exc,
|
|
477
|
+
msg="passed but we expected an error",
|
|
478
|
+
)
|
|
479
|
+
passed = False
|
|
480
|
+
|
|
481
|
+
elif test.expect_fail:
|
|
482
|
+
self._record_test_failed(
|
|
483
|
+
wall_time_s=wall_time_s,
|
|
484
|
+
sim_time_ns=sim_time_ns,
|
|
485
|
+
result=exc,
|
|
486
|
+
msg="passed but we expected a failure",
|
|
487
|
+
)
|
|
488
|
+
passed = False
|
|
345
489
|
|
|
346
|
-
|
|
490
|
+
else:
|
|
491
|
+
self._record_test_passed(
|
|
492
|
+
wall_time_s=wall_time_s,
|
|
493
|
+
sim_time_ns=sim_time_ns,
|
|
494
|
+
result=None,
|
|
495
|
+
msg=msg,
|
|
496
|
+
)
|
|
347
497
|
|
|
348
|
-
|
|
349
|
-
|
|
498
|
+
elif test.expect_fail:
|
|
499
|
+
if isinstance(exc, (AssertionError, Failed)):
|
|
500
|
+
self._record_test_passed(
|
|
501
|
+
wall_time_s=wall_time_s,
|
|
502
|
+
sim_time_ns=sim_time_ns,
|
|
503
|
+
result=None,
|
|
504
|
+
msg="failed as expected",
|
|
505
|
+
)
|
|
350
506
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
507
|
+
else:
|
|
508
|
+
self._record_test_failed(
|
|
509
|
+
wall_time_s=wall_time_s,
|
|
510
|
+
sim_time_ns=sim_time_ns,
|
|
511
|
+
result=exc,
|
|
512
|
+
msg="expected failure, but errored with unexpected type",
|
|
513
|
+
)
|
|
514
|
+
passed = False
|
|
355
515
|
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
i=self.count,
|
|
364
|
-
total=self.ntests,
|
|
365
|
-
end=hilight_end,
|
|
366
|
-
name=test.__qualname__,
|
|
516
|
+
elif test.expect_error:
|
|
517
|
+
if isinstance(exc, test.expect_error):
|
|
518
|
+
self._record_test_passed(
|
|
519
|
+
wall_time_s=wall_time_s,
|
|
520
|
+
sim_time_ns=sim_time_ns,
|
|
521
|
+
result=None,
|
|
522
|
+
msg="errored as expected",
|
|
367
523
|
)
|
|
368
|
-
)
|
|
369
|
-
self._record_result(test, None, 0, 0)
|
|
370
|
-
return None
|
|
371
524
|
|
|
372
|
-
|
|
525
|
+
else:
|
|
526
|
+
self._record_test_failed(
|
|
527
|
+
wall_time_s=wall_time_s,
|
|
528
|
+
sim_time_ns=sim_time_ns,
|
|
529
|
+
result=exc,
|
|
530
|
+
msg="errored with unexpected type",
|
|
531
|
+
)
|
|
532
|
+
passed = False
|
|
373
533
|
|
|
374
|
-
|
|
375
|
-
self.
|
|
376
|
-
|
|
377
|
-
|
|
534
|
+
else:
|
|
535
|
+
self._record_test_failed(
|
|
536
|
+
wall_time_s=wall_time_s,
|
|
537
|
+
sim_time_ns=sim_time_ns,
|
|
538
|
+
result=exc,
|
|
539
|
+
msg=msg,
|
|
378
540
|
)
|
|
379
|
-
self._record_result(test, test_init_outcome, 0, 0)
|
|
380
|
-
return None
|
|
381
541
|
|
|
382
|
-
|
|
542
|
+
def _get_lineno(self, test: Test) -> int:
|
|
543
|
+
try:
|
|
544
|
+
return inspect.getsourcelines(test.func)[1]
|
|
545
|
+
except OSError:
|
|
546
|
+
return 1
|
|
383
547
|
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
548
|
+
def _log_test_start(self) -> None:
|
|
549
|
+
"""Called by :meth:`_execute` to log that a test is starting."""
|
|
550
|
+
hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_TEST
|
|
551
|
+
hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
552
|
+
self.log.info(
|
|
553
|
+
"%srunning%s %s (%d/%d)%s",
|
|
554
|
+
hilight_start,
|
|
555
|
+
hilight_end,
|
|
556
|
+
self._test.fullname,
|
|
557
|
+
self.count,
|
|
558
|
+
self.total_tests,
|
|
559
|
+
_format_doc(self._test.doc),
|
|
560
|
+
)
|
|
390
561
|
|
|
391
|
-
|
|
562
|
+
def _record_test_excluded(self) -> None:
|
|
563
|
+
"""Called by :meth:`_execute` when a test is excluded by filters."""
|
|
392
564
|
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
565
|
+
# write out xunit results
|
|
566
|
+
lineno = self._get_lineno(self._test)
|
|
567
|
+
self.xunit.add_testcase(
|
|
568
|
+
name=self._test.name,
|
|
569
|
+
classname=self._test.module,
|
|
570
|
+
file=inspect.getfile(self._test.func),
|
|
571
|
+
lineno=repr(lineno),
|
|
572
|
+
time=repr(0),
|
|
573
|
+
sim_time_ns=repr(0),
|
|
574
|
+
ratio_time=repr(0),
|
|
575
|
+
)
|
|
576
|
+
self.xunit.add_skipped()
|
|
397
577
|
|
|
398
|
-
#
|
|
399
|
-
result_pass = True
|
|
400
|
-
sim_failed = False
|
|
578
|
+
# do not log anything, nor save details for the summary
|
|
401
579
|
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
except (KeyboardInterrupt, SystemExit):
|
|
405
|
-
raise
|
|
406
|
-
except BaseException as e:
|
|
407
|
-
result = remove_traceback_frames(e, ["_score_test", "get"])
|
|
408
|
-
else:
|
|
409
|
-
result = TestSuccess()
|
|
410
|
-
|
|
411
|
-
if (
|
|
412
|
-
isinstance(result, TestSuccess)
|
|
413
|
-
and not test.expect_fail
|
|
414
|
-
and not test.expect_error
|
|
415
|
-
):
|
|
416
|
-
self._log_test_passed(test, None, None)
|
|
417
|
-
|
|
418
|
-
elif isinstance(result, TestSuccess) and test.expect_error:
|
|
419
|
-
self._log_test_failed(test, None, "passed but we expected an error")
|
|
420
|
-
result_pass = False
|
|
421
|
-
|
|
422
|
-
elif isinstance(result, TestSuccess):
|
|
423
|
-
self._log_test_failed(test, None, "passed but we expected a failure")
|
|
424
|
-
result_pass = False
|
|
425
|
-
|
|
426
|
-
elif isinstance(result, SimFailure):
|
|
427
|
-
if isinstance(result, test.expect_error):
|
|
428
|
-
self._log_test_passed(test, result, "errored as expected")
|
|
429
|
-
else:
|
|
430
|
-
self.log.error("Test error has lead to simulator shutting us down")
|
|
431
|
-
result_pass = False
|
|
432
|
-
# whether we expected it or not, the simulation has failed unrecoverably
|
|
433
|
-
sim_failed = True
|
|
580
|
+
def _record_test_skipped(self) -> None:
|
|
581
|
+
"""Called by :meth:`_execute` when a test is skipped."""
|
|
434
582
|
|
|
435
|
-
|
|
436
|
-
|
|
583
|
+
# log test results
|
|
584
|
+
hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_SKIPPED
|
|
585
|
+
hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
586
|
+
self.log.info(
|
|
587
|
+
"%sskipping%s %s (%d/%d)%s",
|
|
588
|
+
hilight_start,
|
|
589
|
+
hilight_end,
|
|
590
|
+
self._test.fullname,
|
|
591
|
+
self.count,
|
|
592
|
+
self.total_tests,
|
|
593
|
+
_format_doc(self._test.doc),
|
|
594
|
+
)
|
|
437
595
|
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
596
|
+
# write out xunit results
|
|
597
|
+
lineno = self._get_lineno(self._test)
|
|
598
|
+
self.xunit.add_testcase(
|
|
599
|
+
name=self._test.name,
|
|
600
|
+
classname=self._test.module,
|
|
601
|
+
file=inspect.getfile(self._test.func),
|
|
602
|
+
lineno=repr(lineno),
|
|
603
|
+
time=repr(0),
|
|
604
|
+
sim_time_ns=repr(0),
|
|
605
|
+
ratio_time=repr(0),
|
|
606
|
+
)
|
|
607
|
+
self.xunit.add_skipped()
|
|
608
|
+
|
|
609
|
+
# save details for summary
|
|
610
|
+
self._test_results.append(
|
|
611
|
+
_TestResults(
|
|
612
|
+
test_fullname=self._test.fullname,
|
|
613
|
+
passed=None,
|
|
614
|
+
sim_time_ns=0,
|
|
615
|
+
wall_time_s=0,
|
|
616
|
+
)
|
|
617
|
+
)
|
|
444
618
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
619
|
+
# update running passed/failed/skipped counts
|
|
620
|
+
self.skipped += 1
|
|
621
|
+
self.count += 1
|
|
622
|
+
|
|
623
|
+
def _record_test_init_failed(self) -> None:
|
|
624
|
+
"""Called by :meth:`_execute` when a test initialization fails."""
|
|
625
|
+
|
|
626
|
+
# log test results
|
|
627
|
+
hilight_start = "" if cocotb_logging.strip_ansi else self.COLOR_FAILED
|
|
628
|
+
hilight_end = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
629
|
+
self.log.exception(
|
|
630
|
+
"%sFailed to initialize%s %s! (%d/%d)%s",
|
|
631
|
+
hilight_start,
|
|
632
|
+
hilight_end,
|
|
633
|
+
self._test.fullname,
|
|
634
|
+
self.count,
|
|
635
|
+
self.total_tests,
|
|
636
|
+
_format_doc(self._test.doc),
|
|
637
|
+
)
|
|
448
638
|
|
|
449
|
-
|
|
450
|
-
|
|
639
|
+
# write out xunit results
|
|
640
|
+
lineno = self._get_lineno(self._test)
|
|
641
|
+
self.xunit.add_testcase(
|
|
642
|
+
name=self._test.name,
|
|
643
|
+
classname=self._test.module,
|
|
644
|
+
file=inspect.getfile(self._test.func),
|
|
645
|
+
lineno=repr(lineno),
|
|
646
|
+
time=repr(0),
|
|
647
|
+
sim_time_ns=repr(0),
|
|
648
|
+
ratio_time=repr(0),
|
|
649
|
+
)
|
|
650
|
+
self.xunit.add_failure(msg="Test initialization failed")
|
|
651
|
+
|
|
652
|
+
# save details for summary
|
|
653
|
+
self._test_results.append(
|
|
654
|
+
_TestResults(
|
|
655
|
+
test_fullname=self._test.fullname,
|
|
656
|
+
passed=False,
|
|
657
|
+
sim_time_ns=0,
|
|
658
|
+
wall_time_s=0,
|
|
659
|
+
)
|
|
660
|
+
)
|
|
451
661
|
|
|
452
|
-
|
|
662
|
+
# update running passed/failed/skipped counts
|
|
663
|
+
self.failures += 1
|
|
664
|
+
self.count += 1
|
|
453
665
|
|
|
454
|
-
def
|
|
455
|
-
self,
|
|
666
|
+
def _record_test_passed(
|
|
667
|
+
self,
|
|
668
|
+
wall_time_s: float,
|
|
669
|
+
sim_time_ns: float,
|
|
670
|
+
result: Union[Exception, None],
|
|
671
|
+
msg: Union[str, None],
|
|
456
672
|
) -> None:
|
|
457
|
-
start_hilight =
|
|
458
|
-
stop_hilight =
|
|
673
|
+
start_hilight = "" if cocotb_logging.strip_ansi else self.COLOR_PASSED
|
|
674
|
+
stop_hilight = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
459
675
|
if msg is None:
|
|
460
676
|
rest = ""
|
|
461
677
|
else:
|
|
@@ -465,119 +681,98 @@ class RegressionManager:
|
|
|
465
681
|
else:
|
|
466
682
|
result_was = f" (result was {type(result).__qualname__})"
|
|
467
683
|
self.log.info(
|
|
468
|
-
|
|
684
|
+
"%s %spassed%s%s%s",
|
|
685
|
+
self._test.fullname,
|
|
686
|
+
start_hilight,
|
|
687
|
+
stop_hilight,
|
|
688
|
+
rest,
|
|
689
|
+
result_was,
|
|
469
690
|
)
|
|
470
691
|
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
exc_info=result,
|
|
692
|
+
# write out xunit results
|
|
693
|
+
ratio_time = safe_divide(sim_time_ns, wall_time_s)
|
|
694
|
+
lineno = self._get_lineno(self._test)
|
|
695
|
+
self.xunit.add_testcase(
|
|
696
|
+
name=self._test.name,
|
|
697
|
+
classname=self._test.module,
|
|
698
|
+
file=inspect.getfile(self._test.func),
|
|
699
|
+
lineno=repr(lineno),
|
|
700
|
+
time=repr(wall_time_s),
|
|
701
|
+
sim_time_ns=repr(sim_time_ns),
|
|
702
|
+
ratio_time=repr(ratio_time),
|
|
483
703
|
)
|
|
484
704
|
|
|
485
|
-
|
|
705
|
+
# update running passed/failed/skipped counts
|
|
706
|
+
self.passed += 1
|
|
707
|
+
self.count += 1
|
|
708
|
+
|
|
709
|
+
# save details for summary
|
|
710
|
+
self._test_results.append(
|
|
711
|
+
_TestResults(
|
|
712
|
+
test_fullname=self._test.fullname,
|
|
713
|
+
passed=True,
|
|
714
|
+
sim_time_ns=sim_time_ns,
|
|
715
|
+
wall_time_s=wall_time_s,
|
|
716
|
+
)
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
def _record_test_failed(
|
|
486
720
|
self,
|
|
487
|
-
test: Test,
|
|
488
|
-
outcome: Optional[Outcome],
|
|
489
721
|
wall_time_s: float,
|
|
490
722
|
sim_time_ns: float,
|
|
723
|
+
result: Union[BaseException, None],
|
|
724
|
+
msg: Union[str, None],
|
|
491
725
|
) -> None:
|
|
726
|
+
start_hilight = "" if cocotb_logging.strip_ansi else self.COLOR_FAILED
|
|
727
|
+
stop_hilight = "" if cocotb_logging.strip_ansi else ANSI.DEFAULT
|
|
728
|
+
if msg is None:
|
|
729
|
+
rest = ""
|
|
730
|
+
else:
|
|
731
|
+
rest = f": {msg}"
|
|
732
|
+
self.log.warning(
|
|
733
|
+
"%s%s %sfailed%s%s",
|
|
734
|
+
stop_hilight,
|
|
735
|
+
self._test.fullname,
|
|
736
|
+
start_hilight,
|
|
737
|
+
stop_hilight,
|
|
738
|
+
rest,
|
|
739
|
+
)
|
|
492
740
|
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
except OSError:
|
|
497
|
-
lineno = 1
|
|
498
|
-
|
|
741
|
+
# write out xunit results
|
|
742
|
+
ratio_time = safe_divide(sim_time_ns, wall_time_s)
|
|
743
|
+
lineno = self._get_lineno(self._test)
|
|
499
744
|
self.xunit.add_testcase(
|
|
500
|
-
name=
|
|
501
|
-
classname=
|
|
502
|
-
file=inspect.getfile(
|
|
745
|
+
name=self._test.name,
|
|
746
|
+
classname=self._test.module,
|
|
747
|
+
file=inspect.getfile(self._test.func),
|
|
503
748
|
lineno=repr(lineno),
|
|
504
749
|
time=repr(wall_time_s),
|
|
505
750
|
sim_time_ns=repr(sim_time_ns),
|
|
506
751
|
ratio_time=repr(ratio_time),
|
|
507
752
|
)
|
|
753
|
+
self.xunit.add_failure(error_type=type(result).__name__, error_msg=str(result))
|
|
508
754
|
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
self.skipped += 1
|
|
513
|
-
|
|
514
|
-
else:
|
|
515
|
-
test_pass, sim_failed = self._score_test(test, outcome)
|
|
516
|
-
if not test_pass:
|
|
517
|
-
self.xunit.add_failure(
|
|
518
|
-
message=f"Test failed with RANDOM_SEED={cocotb.RANDOM_SEED}"
|
|
519
|
-
)
|
|
520
|
-
self.failures += 1
|
|
521
|
-
else:
|
|
522
|
-
self.passed += 1
|
|
523
|
-
|
|
524
|
-
self.test_results.append(
|
|
525
|
-
{
|
|
526
|
-
"test": ".".join([test.__module__, test.__qualname__]),
|
|
527
|
-
"pass": test_pass,
|
|
528
|
-
"sim": sim_time_ns,
|
|
529
|
-
"real": wall_time_s,
|
|
530
|
-
"ratio": ratio_time,
|
|
531
|
-
}
|
|
532
|
-
)
|
|
533
|
-
|
|
534
|
-
if sim_failed:
|
|
535
|
-
self._tear_down()
|
|
536
|
-
return
|
|
537
|
-
|
|
538
|
-
@deprecated("This method is now private.")
|
|
539
|
-
def execute(self) -> None:
|
|
540
|
-
self._execute()
|
|
755
|
+
# update running passed/failed/skipped counts
|
|
756
|
+
self.failures += 1
|
|
757
|
+
self.count += 1
|
|
541
758
|
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
if self._test_task is not None:
|
|
550
|
-
return self._start_test()
|
|
551
|
-
|
|
552
|
-
def _start_test(self) -> None:
|
|
553
|
-
# Want this to stand out a little bit
|
|
554
|
-
start = ""
|
|
555
|
-
end = ""
|
|
556
|
-
if want_color_output():
|
|
557
|
-
start = ANSI.COLOR_TEST
|
|
558
|
-
end = ANSI.COLOR_DEFAULT
|
|
559
|
-
self.log.info(
|
|
560
|
-
"{start}running{end} {name} ({i}/{total}){description}".format(
|
|
561
|
-
start=start,
|
|
562
|
-
i=self.count,
|
|
563
|
-
total=self.ntests,
|
|
564
|
-
end=end,
|
|
565
|
-
name=self._test.__qualname__,
|
|
566
|
-
description=_trim(self._test.__doc__),
|
|
759
|
+
# save details for summary
|
|
760
|
+
self._test_results.append(
|
|
761
|
+
_TestResults(
|
|
762
|
+
test_fullname=self._test.fullname,
|
|
763
|
+
passed=False,
|
|
764
|
+
sim_time_ns=sim_time_ns,
|
|
765
|
+
wall_time_s=wall_time_s,
|
|
567
766
|
)
|
|
568
767
|
)
|
|
569
768
|
|
|
570
|
-
self._test_start_time = time.time()
|
|
571
|
-
self._test_start_sim_time = get_sim_time("ns")
|
|
572
|
-
cocotb.scheduler._add_test(self._test_task)
|
|
573
|
-
|
|
574
769
|
def _log_test_summary(self) -> None:
|
|
575
|
-
|
|
576
|
-
real_time = time.time() - self.
|
|
770
|
+
"""Called by :meth:`_tear_down` to log the test summary."""
|
|
771
|
+
real_time = time.time() - self._regression_start_time
|
|
577
772
|
sim_time_ns = get_sim_time("ns")
|
|
578
|
-
ratio_time =
|
|
773
|
+
ratio_time = safe_divide(sim_time_ns, real_time)
|
|
579
774
|
|
|
580
|
-
if len(self.
|
|
775
|
+
if len(self._test_results) == 0:
|
|
581
776
|
return
|
|
582
777
|
|
|
583
778
|
TEST_FIELD = "TEST"
|
|
@@ -585,30 +780,30 @@ class RegressionManager:
|
|
|
585
780
|
SIM_FIELD = "SIM TIME (ns)"
|
|
586
781
|
REAL_FIELD = "REAL TIME (s)"
|
|
587
782
|
RATIO_FIELD = "RATIO (ns/s)"
|
|
588
|
-
TOTAL_NAME = f"TESTS={self.
|
|
783
|
+
TOTAL_NAME = f"TESTS={self.total_tests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
|
|
589
784
|
|
|
590
785
|
TEST_FIELD_LEN = max(
|
|
591
786
|
len(TEST_FIELD),
|
|
592
787
|
len(TOTAL_NAME),
|
|
593
|
-
len(max([x
|
|
788
|
+
len(max([x.test_fullname for x in self._test_results], key=len)),
|
|
594
789
|
)
|
|
595
790
|
RESULT_FIELD_LEN = len(RESULT_FIELD)
|
|
596
791
|
SIM_FIELD_LEN = len(SIM_FIELD)
|
|
597
792
|
REAL_FIELD_LEN = len(REAL_FIELD)
|
|
598
793
|
RATIO_FIELD_LEN = len(RATIO_FIELD)
|
|
599
794
|
|
|
600
|
-
header_dict =
|
|
601
|
-
a
|
|
602
|
-
b
|
|
603
|
-
c
|
|
604
|
-
d
|
|
605
|
-
e
|
|
606
|
-
a_len
|
|
607
|
-
b_len
|
|
608
|
-
c_len
|
|
609
|
-
d_len
|
|
610
|
-
e_len
|
|
611
|
-
|
|
795
|
+
header_dict = {
|
|
796
|
+
"a": TEST_FIELD,
|
|
797
|
+
"b": RESULT_FIELD,
|
|
798
|
+
"c": SIM_FIELD,
|
|
799
|
+
"d": REAL_FIELD,
|
|
800
|
+
"e": RATIO_FIELD,
|
|
801
|
+
"a_len": TEST_FIELD_LEN,
|
|
802
|
+
"b_len": RESULT_FIELD_LEN,
|
|
803
|
+
"c_len": SIM_FIELD_LEN,
|
|
804
|
+
"d_len": REAL_FIELD_LEN,
|
|
805
|
+
"e_len": RATIO_FIELD_LEN,
|
|
806
|
+
}
|
|
612
807
|
|
|
613
808
|
LINE_LEN = (
|
|
614
809
|
3
|
|
@@ -634,43 +829,43 @@ class RegressionManager:
|
|
|
634
829
|
summary += LINE_SEP
|
|
635
830
|
|
|
636
831
|
test_line = "** {a:<{a_len}} {start}{b:^{b_len}}{end} {c:>{c_len}.2f} {d:>{d_len}.2f} {e:>{e_len}} **\n"
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
if result["pass"] is None:
|
|
832
|
+
hilite: str
|
|
833
|
+
lolite: str
|
|
834
|
+
for result in self._test_results:
|
|
835
|
+
if result.passed is None:
|
|
642
836
|
ratio = "-.--"
|
|
643
837
|
pass_fail_str = "SKIP"
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
ratio = format(result["ratio"], "0.2f")
|
|
838
|
+
hilite = self.COLOR_SKIPPED
|
|
839
|
+
lolite = ANSI.DEFAULT
|
|
840
|
+
elif result.passed:
|
|
841
|
+
ratio = format(result.ratio, "0.2f")
|
|
649
842
|
pass_fail_str = "PASS"
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
lolite = ANSI.COLOR_DEFAULT
|
|
843
|
+
hilite = self.COLOR_PASSED
|
|
844
|
+
lolite = ANSI.DEFAULT
|
|
653
845
|
else:
|
|
654
|
-
ratio = format(result
|
|
846
|
+
ratio = format(result.ratio, "0.2f")
|
|
655
847
|
pass_fail_str = "FAIL"
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
848
|
+
hilite = self.COLOR_FAILED
|
|
849
|
+
lolite = ANSI.DEFAULT
|
|
850
|
+
|
|
851
|
+
if cocotb_logging.strip_ansi:
|
|
852
|
+
hilite = ""
|
|
853
|
+
lolite = ""
|
|
854
|
+
|
|
855
|
+
test_dict = {
|
|
856
|
+
"a": result.test_fullname,
|
|
857
|
+
"b": pass_fail_str,
|
|
858
|
+
"c": result.sim_time_ns,
|
|
859
|
+
"d": result.wall_time_s,
|
|
860
|
+
"e": ratio,
|
|
861
|
+
"a_len": TEST_FIELD_LEN,
|
|
862
|
+
"b_len": RESULT_FIELD_LEN,
|
|
863
|
+
"c_len": SIM_FIELD_LEN - 1,
|
|
864
|
+
"d_len": REAL_FIELD_LEN - 1,
|
|
865
|
+
"e_len": RATIO_FIELD_LEN - 1,
|
|
866
|
+
"start": hilite,
|
|
867
|
+
"end": lolite,
|
|
868
|
+
}
|
|
674
869
|
|
|
675
870
|
summary += test_line.format(**test_dict)
|
|
676
871
|
|
|
@@ -695,239 +890,7 @@ class RegressionManager:
|
|
|
695
890
|
|
|
696
891
|
self.log.info(summary)
|
|
697
892
|
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
except ZeroDivisionError:
|
|
703
|
-
if a == 0:
|
|
704
|
-
return float("nan")
|
|
705
|
-
else:
|
|
706
|
-
return float("inf")
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
def _create_test(function, name, documentation, mod, *args, **kwargs):
|
|
710
|
-
"""Factory function to create tests, avoids late binding.
|
|
711
|
-
|
|
712
|
-
Creates a test dynamically. The test will call the supplied
|
|
713
|
-
function with the supplied arguments.
|
|
714
|
-
|
|
715
|
-
Args:
|
|
716
|
-
function (function): The test function to run.
|
|
717
|
-
name (str): The name of the test.
|
|
718
|
-
documentation (str): The docstring for the test.
|
|
719
|
-
mod (module): The module this function belongs to.
|
|
720
|
-
*args: Remaining args to pass to test function.
|
|
721
|
-
**kwargs: Passed to the test function.
|
|
722
|
-
|
|
723
|
-
Returns:
|
|
724
|
-
Decorated test function
|
|
725
|
-
"""
|
|
726
|
-
|
|
727
|
-
async def _my_test(dut):
|
|
728
|
-
await function(dut, *args, **kwargs)
|
|
729
|
-
|
|
730
|
-
_my_test.__name__ = name
|
|
731
|
-
_my_test.__qualname__ = name
|
|
732
|
-
_my_test.__doc__ = documentation
|
|
733
|
-
_my_test.__module__ = mod.__name__
|
|
734
|
-
|
|
735
|
-
return cocotb.test()(_my_test)
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
class TestFactory:
|
|
739
|
-
"""Factory to automatically generate tests.
|
|
740
|
-
|
|
741
|
-
Args:
|
|
742
|
-
test_function: A Callable that returns the test Coroutine.
|
|
743
|
-
Must take *dut* as the first argument.
|
|
744
|
-
*args: Remaining arguments are passed directly to the test function.
|
|
745
|
-
Note that these arguments are not varied. An argument that
|
|
746
|
-
varies with each test must be a keyword argument to the
|
|
747
|
-
test function.
|
|
748
|
-
**kwargs: Remaining keyword arguments are passed directly to the test function.
|
|
749
|
-
Note that these arguments are not varied. An argument that
|
|
750
|
-
varies with each test must be a keyword argument to the
|
|
751
|
-
test function.
|
|
752
|
-
|
|
753
|
-
Assuming we have a common test function that will run a test. This test
|
|
754
|
-
function will take keyword arguments (for example generators for each of
|
|
755
|
-
the input interfaces) and generate tests that call the supplied function.
|
|
756
|
-
|
|
757
|
-
This Factory allows us to generate sets of tests based on the different
|
|
758
|
-
permutations of the possible arguments to the test function.
|
|
759
|
-
|
|
760
|
-
For example, if we have a module that takes backpressure, has two configurable
|
|
761
|
-
features where enabling ``feature_b`` requires ``feature_a`` to be active, and
|
|
762
|
-
need to test against data generation routines ``gen_a`` and ``gen_b``:
|
|
763
|
-
|
|
764
|
-
>>> tf = TestFactory(test_function=run_test)
|
|
765
|
-
>>> tf.add_option(name='data_in', optionlist=[gen_a, gen_b])
|
|
766
|
-
>>> tf.add_option('backpressure', [None, random_backpressure])
|
|
767
|
-
>>> tf.add_option(('feature_a', 'feature_b'), [(False, False), (True, False), (True, True)])
|
|
768
|
-
>>> tf.generate_tests()
|
|
769
|
-
|
|
770
|
-
We would get the following tests:
|
|
771
|
-
|
|
772
|
-
* ``gen_a`` with no backpressure and both features disabled
|
|
773
|
-
* ``gen_a`` with no backpressure and only ``feature_a`` enabled
|
|
774
|
-
* ``gen_a`` with no backpressure and both features enabled
|
|
775
|
-
* ``gen_a`` with ``random_backpressure`` and both features disabled
|
|
776
|
-
* ``gen_a`` with ``random_backpressure`` and only ``feature_a`` enabled
|
|
777
|
-
* ``gen_a`` with ``random_backpressure`` and both features enabled
|
|
778
|
-
* ``gen_b`` with no backpressure and both features disabled
|
|
779
|
-
* ``gen_b`` with no backpressure and only ``feature_a`` enabled
|
|
780
|
-
* ``gen_b`` with no backpressure and both features enabled
|
|
781
|
-
* ``gen_b`` with ``random_backpressure`` and both features disabled
|
|
782
|
-
* ``gen_b`` with ``random_backpressure`` and only ``feature_a`` enabled
|
|
783
|
-
* ``gen_b`` with ``random_backpressure`` and both features enabled
|
|
784
|
-
|
|
785
|
-
The tests are appended to the calling module for auto-discovery.
|
|
786
|
-
|
|
787
|
-
Tests are simply named ``test_function_N``. The docstring for the test (hence
|
|
788
|
-
the test description) includes the name and description of each generator.
|
|
789
|
-
|
|
790
|
-
.. versionchanged:: 1.5
|
|
791
|
-
Groups of options are now supported
|
|
792
|
-
"""
|
|
793
|
-
|
|
794
|
-
# Prevent warnings from collection of TestFactories by unit testing frameworks.
|
|
795
|
-
__test__ = False
|
|
796
|
-
|
|
797
|
-
def __init__(self, test_function, *args, **kwargs):
|
|
798
|
-
self.test_function = test_function
|
|
799
|
-
self.name = self.test_function.__qualname__
|
|
800
|
-
|
|
801
|
-
self.args = args
|
|
802
|
-
self.kwargs_constant = kwargs
|
|
803
|
-
self.kwargs = {}
|
|
804
|
-
self.log = _logger
|
|
805
|
-
|
|
806
|
-
def add_option(self, name, optionlist):
|
|
807
|
-
"""Add a named option to the test.
|
|
808
|
-
|
|
809
|
-
Args:
|
|
810
|
-
name (str or iterable of str): An option name, or an iterable of
|
|
811
|
-
several option names. Passed to test as keyword arguments.
|
|
812
|
-
|
|
813
|
-
optionlist (list): A list of possible options for this test knob.
|
|
814
|
-
If N names were specified, this must be a list of N-tuples or
|
|
815
|
-
lists, where each element specifies a value for its respective
|
|
816
|
-
option.
|
|
817
|
-
|
|
818
|
-
.. versionchanged:: 1.5
|
|
819
|
-
Groups of options are now supported
|
|
820
|
-
"""
|
|
821
|
-
if not isinstance(name, str):
|
|
822
|
-
name = tuple(name)
|
|
823
|
-
for opt in optionlist:
|
|
824
|
-
if len(name) != len(opt):
|
|
825
|
-
raise ValueError(
|
|
826
|
-
"Mismatch between number of options and number of option values in group"
|
|
827
|
-
)
|
|
828
|
-
self.kwargs[name] = optionlist
|
|
829
|
-
|
|
830
|
-
def generate_tests(self, prefix="", postfix=""):
|
|
831
|
-
"""
|
|
832
|
-
Generate an exhaustive set of tests using the cartesian product of the
|
|
833
|
-
possible keyword arguments.
|
|
834
|
-
|
|
835
|
-
The generated tests are appended to the namespace of the calling
|
|
836
|
-
module.
|
|
837
|
-
|
|
838
|
-
Args:
|
|
839
|
-
prefix (str): Text string to append to start of ``test_function`` name
|
|
840
|
-
when naming generated test cases. This allows reuse of
|
|
841
|
-
a single ``test_function`` with multiple
|
|
842
|
-
:class:`TestFactories <.TestFactory>` without name clashes.
|
|
843
|
-
postfix (str): Text string to append to end of ``test_function`` name
|
|
844
|
-
when naming generated test cases. This allows reuse of
|
|
845
|
-
a single ``test_function`` with multiple
|
|
846
|
-
:class:`TestFactories <.TestFactory>` without name clashes.
|
|
847
|
-
"""
|
|
848
|
-
|
|
849
|
-
frm = inspect.stack()[1]
|
|
850
|
-
mod = inspect.getmodule(frm[0])
|
|
851
|
-
|
|
852
|
-
d = self.kwargs
|
|
853
|
-
|
|
854
|
-
for index, testoptions in enumerate(
|
|
855
|
-
dict(zip(d, v)) for v in product(*d.values())
|
|
856
|
-
):
|
|
857
|
-
|
|
858
|
-
name = "%s%s%s_%03d" % (prefix, self.name, postfix, index + 1)
|
|
859
|
-
doc = "Automatically generated test\n\n"
|
|
860
|
-
|
|
861
|
-
# preprocess testoptions to split tuples
|
|
862
|
-
testoptions_split = {}
|
|
863
|
-
for optname, optvalue in testoptions.items():
|
|
864
|
-
if isinstance(optname, str):
|
|
865
|
-
testoptions_split[optname] = optvalue
|
|
866
|
-
else:
|
|
867
|
-
# previously checked in add_option; ensure nothing has changed
|
|
868
|
-
assert len(optname) == len(optvalue)
|
|
869
|
-
for n, v in zip(optname, optvalue):
|
|
870
|
-
testoptions_split[n] = v
|
|
871
|
-
|
|
872
|
-
for optname, optvalue in testoptions_split.items():
|
|
873
|
-
if callable(optvalue):
|
|
874
|
-
if not optvalue.__doc__:
|
|
875
|
-
desc = "No docstring supplied"
|
|
876
|
-
else:
|
|
877
|
-
desc = optvalue.__doc__.split("\n")[0]
|
|
878
|
-
doc += "\t{}: {} ({})\n".format(
|
|
879
|
-
optname, optvalue.__qualname__, desc
|
|
880
|
-
)
|
|
881
|
-
else:
|
|
882
|
-
doc += "\t{}: {}\n".format(optname, repr(optvalue))
|
|
883
|
-
|
|
884
|
-
self.log.debug(
|
|
885
|
-
'Adding generated test "%s" to module "%s"' % (name, mod.__name__)
|
|
886
|
-
)
|
|
887
|
-
kwargs = {}
|
|
888
|
-
kwargs.update(self.kwargs_constant)
|
|
889
|
-
kwargs.update(testoptions_split)
|
|
890
|
-
if hasattr(mod, name):
|
|
891
|
-
self.log.error(
|
|
892
|
-
"Overwriting %s in module %s. "
|
|
893
|
-
"This causes a previously defined testcase "
|
|
894
|
-
"not to be run. Consider setting/changing "
|
|
895
|
-
"name_postfix" % (name, mod)
|
|
896
|
-
)
|
|
897
|
-
setattr(
|
|
898
|
-
mod,
|
|
899
|
-
name,
|
|
900
|
-
_create_test(self.test_function, name, doc, mod, *self.args, **kwargs),
|
|
901
|
-
)
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
def _trim(docstring: Optional[str]) -> str:
|
|
905
|
-
"""Normalizes test docstrings
|
|
906
|
-
|
|
907
|
-
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation.
|
|
908
|
-
"""
|
|
909
|
-
if docstring is None or docstring == "":
|
|
910
|
-
return ""
|
|
911
|
-
# Convert tabs to spaces (following the normal Python rules)
|
|
912
|
-
# and split into a list of lines:
|
|
913
|
-
lines = docstring.expandtabs().splitlines()
|
|
914
|
-
# Determine minimum indentation (first line doesn't count):
|
|
915
|
-
indent = math.inf
|
|
916
|
-
for line in lines[1:]:
|
|
917
|
-
stripped = line.lstrip()
|
|
918
|
-
if stripped:
|
|
919
|
-
indent = min(indent, len(line) - len(stripped))
|
|
920
|
-
# Remove indentation (first line is special):
|
|
921
|
-
trimmed = [lines[0].strip()]
|
|
922
|
-
if indent < math.inf:
|
|
923
|
-
for line in lines[1:]:
|
|
924
|
-
trimmed.append(line[indent:].rstrip())
|
|
925
|
-
# Strip off trailing and leading blank lines:
|
|
926
|
-
while trimmed and not trimmed[-1]:
|
|
927
|
-
trimmed.pop()
|
|
928
|
-
while trimmed and not trimmed[0]:
|
|
929
|
-
trimmed.pop(0)
|
|
930
|
-
# Add one newline back
|
|
931
|
-
trimmed.insert(0, "")
|
|
932
|
-
# Return a single string:
|
|
933
|
-
return "\n ".join(trimmed)
|
|
893
|
+
def _fail_simulation(self, msg: str) -> None:
|
|
894
|
+
self._sim_failure = Error(SimFailure(msg))
|
|
895
|
+
self._running_test.abort(self._sim_failure)
|
|
896
|
+
cocotb._scheduler_inst._event_loop()
|