cocotb 1.9.2__cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of cocotb might be problematic. Click here for more details.
- cocotb/ANSI.py +92 -0
- cocotb/__init__.py +371 -0
- cocotb/_deprecation.py +36 -0
- cocotb/_py_compat.py +63 -0
- cocotb/_sim_versions.py +145 -0
- cocotb/_vendor/__init__.py +0 -0
- cocotb/_vendor/distutils_version.py +346 -0
- cocotb/_version.py +8 -0
- cocotb/binary.py +858 -0
- cocotb/clock.py +174 -0
- cocotb/config.py +289 -0
- cocotb/decorators.py +332 -0
- cocotb/handle.py +1175 -0
- cocotb/ipython_support.py +92 -0
- cocotb/libs/libcocotb.so +0 -0
- cocotb/libs/libcocotbfli_modelsim.so +0 -0
- cocotb/libs/libcocotbutils.so +0 -0
- cocotb/libs/libcocotbvhpi_aldec.so +0 -0
- cocotb/libs/libcocotbvhpi_ius.so +0 -0
- cocotb/libs/libcocotbvhpi_modelsim.so +0 -0
- cocotb/libs/libcocotbvhpi_nvc.so +0 -0
- cocotb/libs/libcocotbvpi_aldec.so +0 -0
- cocotb/libs/libcocotbvpi_ghdl.so +0 -0
- cocotb/libs/libcocotbvpi_icarus.vpl +0 -0
- cocotb/libs/libcocotbvpi_ius.so +0 -0
- cocotb/libs/libcocotbvpi_modelsim.so +0 -0
- cocotb/libs/libcocotbvpi_vcs.so +0 -0
- cocotb/libs/libcocotbvpi_verilator.so +0 -0
- cocotb/libs/libembed.so +0 -0
- cocotb/libs/libgpi.so +0 -0
- cocotb/libs/libgpilog.so +0 -0
- cocotb/libs/libpygpilog.so +0 -0
- cocotb/log.py +303 -0
- cocotb/memdebug.py +35 -0
- cocotb/outcomes.py +56 -0
- cocotb/queue.py +179 -0
- cocotb/regression.py +933 -0
- cocotb/result.py +209 -0
- cocotb/runner.py +1400 -0
- cocotb/scheduler.py +1099 -0
- cocotb/share/def/.gitignore +2 -0
- cocotb/share/def/README.md +4 -0
- cocotb/share/def/aldec.def +61 -0
- cocotb/share/def/ghdl.def +43 -0
- cocotb/share/def/icarus.def +43 -0
- cocotb/share/def/modelsim.def +137 -0
- cocotb/share/include/cocotb_utils.h +93 -0
- cocotb/share/include/embed.h +56 -0
- cocotb/share/include/exports.h +20 -0
- cocotb/share/include/gpi.h +265 -0
- cocotb/share/include/gpi_logging.h +212 -0
- cocotb/share/include/py_gpi_logging.h +30 -0
- cocotb/share/include/vhpi_user_ext.h +26 -0
- cocotb/share/include/vpi_user_ext.h +55 -0
- cocotb/share/lib/verilator/verilator.cpp +196 -0
- cocotb/share/makefiles/Makefile.deprecations +12 -0
- cocotb/share/makefiles/Makefile.inc +176 -0
- cocotb/share/makefiles/Makefile.sim +113 -0
- cocotb/share/makefiles/simulators/Makefile.activehdl +79 -0
- cocotb/share/makefiles/simulators/Makefile.cvc +94 -0
- cocotb/share/makefiles/simulators/Makefile.ghdl +113 -0
- cocotb/share/makefiles/simulators/Makefile.icarus +111 -0
- cocotb/share/makefiles/simulators/Makefile.ius +125 -0
- cocotb/share/makefiles/simulators/Makefile.modelsim +32 -0
- cocotb/share/makefiles/simulators/Makefile.nvc +64 -0
- cocotb/share/makefiles/simulators/Makefile.questa +171 -0
- cocotb/share/makefiles/simulators/Makefile.riviera +183 -0
- cocotb/share/makefiles/simulators/Makefile.vcs +98 -0
- cocotb/share/makefiles/simulators/Makefile.verilator +86 -0
- cocotb/share/makefiles/simulators/Makefile.xcelium +136 -0
- cocotb/simulator.cpython-313-x86_64-linux-gnu.so +0 -0
- cocotb/task.py +325 -0
- cocotb/triggers.py +1104 -0
- cocotb/types/__init__.py +50 -0
- cocotb/types/array.py +309 -0
- cocotb/types/logic.py +292 -0
- cocotb/types/logic_array.py +298 -0
- cocotb/types/range.py +198 -0
- cocotb/utils.py +698 -0
- cocotb/wavedrom.py +199 -0
- cocotb/xunit_reporter.py +80 -0
- cocotb-1.9.2.dist-info/LICENSE +28 -0
- cocotb-1.9.2.dist-info/METADATA +168 -0
- cocotb-1.9.2.dist-info/RECORD +89 -0
- cocotb-1.9.2.dist-info/WHEEL +6 -0
- cocotb-1.9.2.dist-info/entry_points.txt +2 -0
- cocotb-1.9.2.dist-info/top_level.txt +21 -0
- pygpi/__init__.py +0 -0
- pygpi/entry.py +26 -0
cocotb/regression.py
ADDED
|
@@ -0,0 +1,933 @@
|
|
|
1
|
+
# Copyright (c) 2013, 2018 Potential Ventures Ltd
|
|
2
|
+
# Copyright (c) 2013 SolarFlare Communications Inc
|
|
3
|
+
# All rights reserved.
|
|
4
|
+
#
|
|
5
|
+
# Redistribution and use in source and binary forms, with or without
|
|
6
|
+
# modification, are permitted provided that the following conditions are met:
|
|
7
|
+
# * Redistributions of source code must retain the above copyright
|
|
8
|
+
# notice, this list of conditions and the following disclaimer.
|
|
9
|
+
# * Redistributions in binary form must reproduce the above copyright
|
|
10
|
+
# notice, this list of conditions and the following disclaimer in the
|
|
11
|
+
# documentation and/or other materials provided with the distribution.
|
|
12
|
+
# * Neither the name of Potential Ventures Ltd,
|
|
13
|
+
# SolarFlare Communications Inc nor the
|
|
14
|
+
# names of its contributors may be used to endorse or promote products
|
|
15
|
+
# derived from this software without specific prior written permission.
|
|
16
|
+
#
|
|
17
|
+
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
18
|
+
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
19
|
+
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
20
|
+
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
|
|
21
|
+
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
22
|
+
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
|
23
|
+
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
|
24
|
+
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
25
|
+
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
26
|
+
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
27
|
+
|
|
28
|
+
"""All things relating to regression capabilities."""
|
|
29
|
+
|
|
30
|
+
import hashlib
|
|
31
|
+
import inspect
|
|
32
|
+
import math
|
|
33
|
+
import os
|
|
34
|
+
import pdb
|
|
35
|
+
import random
|
|
36
|
+
import sys
|
|
37
|
+
import time
|
|
38
|
+
import traceback
|
|
39
|
+
from itertools import product
|
|
40
|
+
from typing import Any, Iterable, Optional, Tuple, Type
|
|
41
|
+
|
|
42
|
+
import cocotb
|
|
43
|
+
import cocotb.ANSI as ANSI
|
|
44
|
+
from cocotb import simulator
|
|
45
|
+
from cocotb._deprecation import deprecated
|
|
46
|
+
from cocotb.decorators import test as Test
|
|
47
|
+
from cocotb.handle import SimHandle
|
|
48
|
+
from cocotb.log import SimLog
|
|
49
|
+
from cocotb.outcomes import Error, Outcome
|
|
50
|
+
from cocotb.result import SimFailure, TestSuccess
|
|
51
|
+
from cocotb.task import Task
|
|
52
|
+
from cocotb.utils import get_sim_time, remove_traceback_frames, want_color_output
|
|
53
|
+
from cocotb.xunit_reporter import XUnitReporter
|
|
54
|
+
|
|
55
|
+
_pdb_on_exception = "COCOTB_PDB_ON_EXCEPTION" in os.environ
|
|
56
|
+
|
|
57
|
+
# Optional support for coverage collection of testbench files
|
|
58
|
+
coverage = None
|
|
59
|
+
if "COVERAGE" in os.environ:
|
|
60
|
+
try:
|
|
61
|
+
import coverage
|
|
62
|
+
except ImportError as e:
|
|
63
|
+
msg = (
|
|
64
|
+
"Coverage collection requested but coverage module not available"
|
|
65
|
+
"\n"
|
|
66
|
+
"Import error was: %s\n" % repr(e)
|
|
67
|
+
)
|
|
68
|
+
sys.stderr.write(msg)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _my_import(name: str) -> Any:
|
|
72
|
+
mod = __import__(name)
|
|
73
|
+
components = name.split(".")
|
|
74
|
+
for comp in components[1:]:
|
|
75
|
+
mod = getattr(mod, comp)
|
|
76
|
+
return mod
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
_logger = SimLog(__name__)
|
|
80
|
+
|
|
81
|
+
_Failed: Type[BaseException]
|
|
82
|
+
try:
|
|
83
|
+
import pytest
|
|
84
|
+
except ModuleNotFoundError:
|
|
85
|
+
_Failed = AssertionError
|
|
86
|
+
else:
|
|
87
|
+
try:
|
|
88
|
+
with pytest.raises(Exception):
|
|
89
|
+
pass
|
|
90
|
+
except BaseException as _raises_e:
|
|
91
|
+
_Failed = type(_raises_e)
|
|
92
|
+
else:
|
|
93
|
+
assert "pytest.raises doesn't raise an exception when it fails"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class RegressionManager:
|
|
97
|
+
"""Encapsulates all regression capability into a single place"""
|
|
98
|
+
|
|
99
|
+
def __init__(self, dut: SimHandle, tests: Iterable[Test]):
|
|
100
|
+
"""
|
|
101
|
+
Args:
|
|
102
|
+
dut (SimHandle): The root handle to pass into test functions.
|
|
103
|
+
tests (Iterable[Test]): tests to run
|
|
104
|
+
"""
|
|
105
|
+
self._dut = dut
|
|
106
|
+
self._test = None
|
|
107
|
+
self._test_task = None
|
|
108
|
+
self._test_start_time = None
|
|
109
|
+
self._test_start_sim_time = None
|
|
110
|
+
self._cov = None
|
|
111
|
+
self.log = _logger
|
|
112
|
+
self.start_time = time.time()
|
|
113
|
+
self.test_results = []
|
|
114
|
+
self.count = 0
|
|
115
|
+
self.passed = 0
|
|
116
|
+
self.skipped = 0
|
|
117
|
+
self.failures = 0
|
|
118
|
+
self._tearing_down = False
|
|
119
|
+
|
|
120
|
+
# Setup XUnit
|
|
121
|
+
###################
|
|
122
|
+
|
|
123
|
+
results_filename = os.getenv("COCOTB_RESULTS_FILE", "results.xml")
|
|
124
|
+
suite_name = os.getenv("RESULT_TESTSUITE", "all")
|
|
125
|
+
package_name = os.getenv("RESULT_TESTPACKAGE", "all")
|
|
126
|
+
|
|
127
|
+
self.xunit = XUnitReporter(filename=results_filename)
|
|
128
|
+
|
|
129
|
+
self.xunit.add_testsuite(name=suite_name, package=package_name)
|
|
130
|
+
|
|
131
|
+
self.xunit.add_property(name="random_seed", value=str(cocotb.RANDOM_SEED))
|
|
132
|
+
|
|
133
|
+
# Setup Coverage
|
|
134
|
+
####################
|
|
135
|
+
|
|
136
|
+
if coverage is not None:
|
|
137
|
+
self.log.info("Enabling coverage collection of Python code")
|
|
138
|
+
config_filepath = os.getenv("COVERAGE_RCFILE")
|
|
139
|
+
if config_filepath is None:
|
|
140
|
+
# Exclude cocotb itself from coverage collection.
|
|
141
|
+
cocotb_package_dir = os.path.dirname(__file__)
|
|
142
|
+
self._cov = coverage.coverage(
|
|
143
|
+
branch=True, omit=[f"{cocotb_package_dir}/*"]
|
|
144
|
+
)
|
|
145
|
+
else:
|
|
146
|
+
# Allow the config file to handle all configuration
|
|
147
|
+
self._cov = coverage.coverage()
|
|
148
|
+
self._cov.start()
|
|
149
|
+
|
|
150
|
+
# Test Discovery
|
|
151
|
+
####################
|
|
152
|
+
self._queue = []
|
|
153
|
+
for test in tests:
|
|
154
|
+
self.log.info(f"Found test {test.__module__}.{test.__qualname__}")
|
|
155
|
+
self._queue.append(test)
|
|
156
|
+
self.ntests = len(self._queue)
|
|
157
|
+
|
|
158
|
+
if not self._queue:
|
|
159
|
+
self.log.warning("No tests were discovered")
|
|
160
|
+
|
|
161
|
+
self._queue.sort(key=lambda test: (test.stage, test._id))
|
|
162
|
+
|
|
163
|
+
@classmethod
|
|
164
|
+
def from_discovery(cls, dut: SimHandle):
|
|
165
|
+
"""
|
|
166
|
+
Obtains the test list by discovery.
|
|
167
|
+
|
|
168
|
+
See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
dut (SimHandle): The root handle to pass into test functions.
|
|
172
|
+
"""
|
|
173
|
+
tests = cls._discover_tests()
|
|
174
|
+
return cls(dut, tests)
|
|
175
|
+
|
|
176
|
+
@classmethod
|
|
177
|
+
def _discover_tests(cls) -> Iterable[Test]:
|
|
178
|
+
"""
|
|
179
|
+
Discovers tests in files automatically.
|
|
180
|
+
|
|
181
|
+
See :envvar:`MODULE` and :envvar:`TESTCASE` for details on how tests are discovered.
|
|
182
|
+
"""
|
|
183
|
+
module_str = os.getenv("MODULE")
|
|
184
|
+
test_str = os.getenv("TESTCASE")
|
|
185
|
+
|
|
186
|
+
if module_str is None:
|
|
187
|
+
raise ValueError(
|
|
188
|
+
"Environment variable MODULE, which defines the module(s) to execute, is not defined."
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
modules = [s.strip() for s in module_str.split(",") if s.strip()]
|
|
192
|
+
|
|
193
|
+
cls._setup_pytest_assertion_rewriting(modules)
|
|
194
|
+
|
|
195
|
+
tests = None
|
|
196
|
+
if test_str:
|
|
197
|
+
tests = [s.strip() for s in test_str.split(",") if s.strip()]
|
|
198
|
+
|
|
199
|
+
for module_name in modules:
|
|
200
|
+
try:
|
|
201
|
+
_logger.debug("Python Path: " + ",".join(sys.path))
|
|
202
|
+
_logger.debug("PWD: " + os.getcwd())
|
|
203
|
+
module = _my_import(module_name)
|
|
204
|
+
except Exception as E:
|
|
205
|
+
_logger.critical("Failed to import module %s: %s", module_name, E)
|
|
206
|
+
_logger.info('MODULE variable was "%s"', ".".join(modules))
|
|
207
|
+
_logger.info(traceback.format_exc())
|
|
208
|
+
raise
|
|
209
|
+
|
|
210
|
+
if tests is not None:
|
|
211
|
+
not_found_tests = []
|
|
212
|
+
# Specific functions specified, don't auto-discover
|
|
213
|
+
for test_name in tests:
|
|
214
|
+
try:
|
|
215
|
+
test = getattr(module, test_name)
|
|
216
|
+
except AttributeError:
|
|
217
|
+
not_found_tests.append(test_name)
|
|
218
|
+
continue
|
|
219
|
+
|
|
220
|
+
if not isinstance(test, Test):
|
|
221
|
+
_logger.error(
|
|
222
|
+
"Requested %s from module %s isn't a cocotb.test decorated coroutine",
|
|
223
|
+
test_name,
|
|
224
|
+
module_name,
|
|
225
|
+
)
|
|
226
|
+
raise ImportError(
|
|
227
|
+
"Failed to find requested test %s" % test_name
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
# If we request a test manually, it should be run even if skip=True is set.
|
|
231
|
+
test.skip = False
|
|
232
|
+
|
|
233
|
+
yield test
|
|
234
|
+
|
|
235
|
+
# Use the non-matching test names in the next module search
|
|
236
|
+
tests = not_found_tests
|
|
237
|
+
|
|
238
|
+
else:
|
|
239
|
+
# auto-discover
|
|
240
|
+
for thing in vars(module).values():
|
|
241
|
+
if isinstance(thing, Test):
|
|
242
|
+
yield thing
|
|
243
|
+
|
|
244
|
+
# If any test were not found in any module, raise an error
|
|
245
|
+
if tests:
|
|
246
|
+
_logger.error(
|
|
247
|
+
"Requested test(s) %s wasn't found in module(s) %s", tests, modules
|
|
248
|
+
)
|
|
249
|
+
raise AttributeError("Test(s) %s doesn't exist in %s" % (tests, modules))
|
|
250
|
+
|
|
251
|
+
@classmethod
|
|
252
|
+
def _setup_pytest_assertion_rewriting(cls, test_modules: Iterable[str]) -> None:
|
|
253
|
+
try:
|
|
254
|
+
import pytest
|
|
255
|
+
except ImportError:
|
|
256
|
+
_logger.info(
|
|
257
|
+
"pytest not found, install it to enable better AssertionError messages"
|
|
258
|
+
)
|
|
259
|
+
return
|
|
260
|
+
try:
|
|
261
|
+
# Install the assertion rewriting hook, which must be done before we
|
|
262
|
+
# import the test modules.
|
|
263
|
+
from _pytest.assertion import install_importhook
|
|
264
|
+
from _pytest.config import Config
|
|
265
|
+
|
|
266
|
+
pytest_conf = Config.fromdictargs(
|
|
267
|
+
{}, ["--capture=no", "-o", "python_files=*.py"]
|
|
268
|
+
)
|
|
269
|
+
install_importhook(pytest_conf)
|
|
270
|
+
except Exception:
|
|
271
|
+
_logger.exception(
|
|
272
|
+
"Configuring the assertion rewrite hook using pytest {} failed. "
|
|
273
|
+
"Please file a bug report!".format(pytest.__version__)
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
@deprecated("This method is now private.")
|
|
277
|
+
def tear_down(self) -> None:
|
|
278
|
+
self._tear_down()
|
|
279
|
+
|
|
280
|
+
def _tear_down(self) -> None:
|
|
281
|
+
# prevent re-entering the tear down procedure
|
|
282
|
+
if not self._tearing_down:
|
|
283
|
+
self._tearing_down = True
|
|
284
|
+
else:
|
|
285
|
+
return
|
|
286
|
+
|
|
287
|
+
# fail remaining tests
|
|
288
|
+
while True:
|
|
289
|
+
test = self._next_test()
|
|
290
|
+
if test is None:
|
|
291
|
+
break
|
|
292
|
+
self._record_result(
|
|
293
|
+
test=test, outcome=Error(SimFailure), wall_time_s=0, sim_time_ns=0
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
# Write out final log messages
|
|
297
|
+
self._log_test_summary()
|
|
298
|
+
|
|
299
|
+
# Generate output reports
|
|
300
|
+
self.xunit.write()
|
|
301
|
+
if self._cov:
|
|
302
|
+
self._cov.stop()
|
|
303
|
+
self.log.info("Writing coverage data")
|
|
304
|
+
self._cov.save()
|
|
305
|
+
self._cov.html_report()
|
|
306
|
+
|
|
307
|
+
# Setup simulator finalization
|
|
308
|
+
simulator.stop_simulator()
|
|
309
|
+
cocotb._stop_library_coverage()
|
|
310
|
+
|
|
311
|
+
@deprecated("This method is now private.")
|
|
312
|
+
def next_test(self) -> Optional[Test]:
|
|
313
|
+
return self._next_test()
|
|
314
|
+
|
|
315
|
+
def _next_test(self) -> Optional[Test]:
|
|
316
|
+
"""Get the next test to run"""
|
|
317
|
+
if not self._queue:
|
|
318
|
+
return None
|
|
319
|
+
self.count += 1
|
|
320
|
+
return self._queue.pop(0)
|
|
321
|
+
|
|
322
|
+
@deprecated("This method is now private.")
|
|
323
|
+
def handle_result(self, test: Task) -> None:
|
|
324
|
+
self._handle_result(test)
|
|
325
|
+
|
|
326
|
+
def _handle_result(self, test: Task) -> None:
|
|
327
|
+
"""Handle a test completing.
|
|
328
|
+
|
|
329
|
+
Dump result to XML and schedule the next test (if any). Entered by the scheduler.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
test: The test that completed
|
|
333
|
+
"""
|
|
334
|
+
assert test is self._test_task
|
|
335
|
+
|
|
336
|
+
real_time = time.time() - self._test_start_time
|
|
337
|
+
sim_time_ns = get_sim_time("ns") - self._test_start_sim_time
|
|
338
|
+
|
|
339
|
+
self._record_result(
|
|
340
|
+
test=self._test,
|
|
341
|
+
outcome=self._test_task._outcome,
|
|
342
|
+
wall_time_s=real_time,
|
|
343
|
+
sim_time_ns=sim_time_ns,
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
self._execute()
|
|
347
|
+
|
|
348
|
+
def _init_test(self, test: Test) -> Optional[Task]:
|
|
349
|
+
"""Initialize a test.
|
|
350
|
+
|
|
351
|
+
Record outcome if the initialization fails.
|
|
352
|
+
Record skip if the test is skipped.
|
|
353
|
+
Save the initialized test if it successfully initializes.
|
|
354
|
+
"""
|
|
355
|
+
|
|
356
|
+
if test.skip:
|
|
357
|
+
hilight_start = ANSI.COLOR_SKIPPED if want_color_output() else ""
|
|
358
|
+
hilight_end = ANSI.COLOR_DEFAULT if want_color_output() else ""
|
|
359
|
+
# Want this to stand out a little bit
|
|
360
|
+
self.log.info(
|
|
361
|
+
"{start}skipping{end} {name} ({i}/{total})".format(
|
|
362
|
+
start=hilight_start,
|
|
363
|
+
i=self.count,
|
|
364
|
+
total=self.ntests,
|
|
365
|
+
end=hilight_end,
|
|
366
|
+
name=test.__qualname__,
|
|
367
|
+
)
|
|
368
|
+
)
|
|
369
|
+
self._record_result(test, None, 0, 0)
|
|
370
|
+
return None
|
|
371
|
+
|
|
372
|
+
test_init_outcome = cocotb.outcomes.capture(test, self._dut)
|
|
373
|
+
|
|
374
|
+
if isinstance(test_init_outcome, cocotb.outcomes.Error):
|
|
375
|
+
self.log.error(
|
|
376
|
+
"Failed to initialize test %s" % test.__qualname__,
|
|
377
|
+
exc_info=test_init_outcome.error,
|
|
378
|
+
)
|
|
379
|
+
self._record_result(test, test_init_outcome, 0, 0)
|
|
380
|
+
return None
|
|
381
|
+
|
|
382
|
+
running_test = test_init_outcome.get()
|
|
383
|
+
|
|
384
|
+
# seed random number generator based on test module, name, and RANDOM_SEED
|
|
385
|
+
hasher = hashlib.sha1()
|
|
386
|
+
hasher.update(test.__qualname__.encode())
|
|
387
|
+
hasher.update(test.__module__.encode())
|
|
388
|
+
seed = cocotb.RANDOM_SEED + int(hasher.hexdigest(), 16)
|
|
389
|
+
random.seed(seed)
|
|
390
|
+
|
|
391
|
+
return running_test
|
|
392
|
+
|
|
393
|
+
def _score_test(self, test: Test, outcome: Outcome) -> Tuple[bool, bool]:
|
|
394
|
+
"""
|
|
395
|
+
Given a test and the test's outcome, determine if the test met expectations and log pertinent information
|
|
396
|
+
"""
|
|
397
|
+
|
|
398
|
+
# scoring outcomes
|
|
399
|
+
result_pass = True
|
|
400
|
+
sim_failed = False
|
|
401
|
+
|
|
402
|
+
try:
|
|
403
|
+
outcome.get()
|
|
404
|
+
except (KeyboardInterrupt, SystemExit):
|
|
405
|
+
raise
|
|
406
|
+
except BaseException as e:
|
|
407
|
+
result = remove_traceback_frames(e, ["_score_test", "get"])
|
|
408
|
+
else:
|
|
409
|
+
result = TestSuccess()
|
|
410
|
+
|
|
411
|
+
if (
|
|
412
|
+
isinstance(result, TestSuccess)
|
|
413
|
+
and not test.expect_fail
|
|
414
|
+
and not test.expect_error
|
|
415
|
+
):
|
|
416
|
+
self._log_test_passed(test, None, None)
|
|
417
|
+
|
|
418
|
+
elif isinstance(result, TestSuccess) and test.expect_error:
|
|
419
|
+
self._log_test_failed(test, None, "passed but we expected an error")
|
|
420
|
+
result_pass = False
|
|
421
|
+
|
|
422
|
+
elif isinstance(result, TestSuccess):
|
|
423
|
+
self._log_test_failed(test, None, "passed but we expected a failure")
|
|
424
|
+
result_pass = False
|
|
425
|
+
|
|
426
|
+
elif isinstance(result, SimFailure):
|
|
427
|
+
if isinstance(result, test.expect_error):
|
|
428
|
+
self._log_test_passed(test, result, "errored as expected")
|
|
429
|
+
else:
|
|
430
|
+
self.log.error("Test error has lead to simulator shutting us down")
|
|
431
|
+
result_pass = False
|
|
432
|
+
# whether we expected it or not, the simulation has failed unrecoverably
|
|
433
|
+
sim_failed = True
|
|
434
|
+
|
|
435
|
+
elif isinstance(result, (AssertionError, _Failed)) and test.expect_fail:
|
|
436
|
+
self._log_test_passed(test, result, "failed as expected")
|
|
437
|
+
|
|
438
|
+
elif test.expect_error:
|
|
439
|
+
if isinstance(result, test.expect_error):
|
|
440
|
+
self._log_test_passed(test, result, "errored as expected")
|
|
441
|
+
else:
|
|
442
|
+
self._log_test_failed(test, result, "errored with unexpected type ")
|
|
443
|
+
result_pass = False
|
|
444
|
+
|
|
445
|
+
else:
|
|
446
|
+
self._log_test_failed(test, result, None)
|
|
447
|
+
result_pass = False
|
|
448
|
+
|
|
449
|
+
if _pdb_on_exception:
|
|
450
|
+
pdb.post_mortem(result.__traceback__)
|
|
451
|
+
|
|
452
|
+
return result_pass, sim_failed
|
|
453
|
+
|
|
454
|
+
def _log_test_passed(
|
|
455
|
+
self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
|
|
456
|
+
) -> None:
|
|
457
|
+
start_hilight = ANSI.COLOR_PASSED if want_color_output() else ""
|
|
458
|
+
stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
|
|
459
|
+
if msg is None:
|
|
460
|
+
rest = ""
|
|
461
|
+
else:
|
|
462
|
+
rest = f": {msg}"
|
|
463
|
+
if result is None:
|
|
464
|
+
result_was = ""
|
|
465
|
+
else:
|
|
466
|
+
result_was = f" (result was {type(result).__qualname__})"
|
|
467
|
+
self.log.info(
|
|
468
|
+
f"{test.__qualname__} {start_hilight}passed{stop_hilight}{rest}{result_was}"
|
|
469
|
+
)
|
|
470
|
+
|
|
471
|
+
def _log_test_failed(
|
|
472
|
+
self, test: Test, result: Optional[Exception] = None, msg: Optional[str] = None
|
|
473
|
+
) -> None:
|
|
474
|
+
start_hilight = ANSI.COLOR_FAILED if want_color_output() else ""
|
|
475
|
+
stop_hilight = ANSI.COLOR_DEFAULT if want_color_output() else ""
|
|
476
|
+
if msg is None:
|
|
477
|
+
rest = ""
|
|
478
|
+
else:
|
|
479
|
+
rest = f": {msg}"
|
|
480
|
+
self.log.info(
|
|
481
|
+
f"{test.__qualname__} {start_hilight}failed{stop_hilight}{rest}",
|
|
482
|
+
exc_info=result,
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
def _record_result(
|
|
486
|
+
self,
|
|
487
|
+
test: Test,
|
|
488
|
+
outcome: Optional[Outcome],
|
|
489
|
+
wall_time_s: float,
|
|
490
|
+
sim_time_ns: float,
|
|
491
|
+
) -> None:
|
|
492
|
+
|
|
493
|
+
ratio_time = self._safe_divide(sim_time_ns, wall_time_s)
|
|
494
|
+
try:
|
|
495
|
+
lineno = inspect.getsourcelines(test._func)[1]
|
|
496
|
+
except OSError:
|
|
497
|
+
lineno = 1
|
|
498
|
+
|
|
499
|
+
self.xunit.add_testcase(
|
|
500
|
+
name=test.__qualname__,
|
|
501
|
+
classname=test.__module__,
|
|
502
|
+
file=inspect.getfile(test._func),
|
|
503
|
+
lineno=repr(lineno),
|
|
504
|
+
time=repr(wall_time_s),
|
|
505
|
+
sim_time_ns=repr(sim_time_ns),
|
|
506
|
+
ratio_time=repr(ratio_time),
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
if outcome is None: # skipped
|
|
510
|
+
test_pass, sim_failed = None, False
|
|
511
|
+
self.xunit.add_skipped()
|
|
512
|
+
self.skipped += 1
|
|
513
|
+
|
|
514
|
+
else:
|
|
515
|
+
test_pass, sim_failed = self._score_test(test, outcome)
|
|
516
|
+
if not test_pass:
|
|
517
|
+
self.xunit.add_failure(
|
|
518
|
+
message=f"Test failed with RANDOM_SEED={cocotb.RANDOM_SEED}"
|
|
519
|
+
)
|
|
520
|
+
self.failures += 1
|
|
521
|
+
else:
|
|
522
|
+
self.passed += 1
|
|
523
|
+
|
|
524
|
+
self.test_results.append(
|
|
525
|
+
{
|
|
526
|
+
"test": ".".join([test.__module__, test.__qualname__]),
|
|
527
|
+
"pass": test_pass,
|
|
528
|
+
"sim": sim_time_ns,
|
|
529
|
+
"real": wall_time_s,
|
|
530
|
+
"ratio": ratio_time,
|
|
531
|
+
}
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
if sim_failed:
|
|
535
|
+
self._tear_down()
|
|
536
|
+
return
|
|
537
|
+
|
|
538
|
+
@deprecated("This method is now private.")
|
|
539
|
+
def execute(self) -> None:
|
|
540
|
+
self._execute()
|
|
541
|
+
|
|
542
|
+
def _execute(self) -> None:
|
|
543
|
+
while True:
|
|
544
|
+
self._test = self._next_test()
|
|
545
|
+
if self._test is None:
|
|
546
|
+
return self._tear_down()
|
|
547
|
+
|
|
548
|
+
self._test_task = self._init_test(self._test)
|
|
549
|
+
if self._test_task is not None:
|
|
550
|
+
return self._start_test()
|
|
551
|
+
|
|
552
|
+
def _start_test(self) -> None:
|
|
553
|
+
# Want this to stand out a little bit
|
|
554
|
+
start = ""
|
|
555
|
+
end = ""
|
|
556
|
+
if want_color_output():
|
|
557
|
+
start = ANSI.COLOR_TEST
|
|
558
|
+
end = ANSI.COLOR_DEFAULT
|
|
559
|
+
self.log.info(
|
|
560
|
+
"{start}running{end} {name} ({i}/{total}){description}".format(
|
|
561
|
+
start=start,
|
|
562
|
+
i=self.count,
|
|
563
|
+
total=self.ntests,
|
|
564
|
+
end=end,
|
|
565
|
+
name=self._test.__qualname__,
|
|
566
|
+
description=_trim(self._test.__doc__),
|
|
567
|
+
)
|
|
568
|
+
)
|
|
569
|
+
|
|
570
|
+
self._test_start_time = time.time()
|
|
571
|
+
self._test_start_sim_time = get_sim_time("ns")
|
|
572
|
+
cocotb.scheduler._add_test(self._test_task)
|
|
573
|
+
|
|
574
|
+
def _log_test_summary(self) -> None:
|
|
575
|
+
|
|
576
|
+
real_time = time.time() - self.start_time
|
|
577
|
+
sim_time_ns = get_sim_time("ns")
|
|
578
|
+
ratio_time = self._safe_divide(sim_time_ns, real_time)
|
|
579
|
+
|
|
580
|
+
if len(self.test_results) == 0:
|
|
581
|
+
return
|
|
582
|
+
|
|
583
|
+
TEST_FIELD = "TEST"
|
|
584
|
+
RESULT_FIELD = "STATUS"
|
|
585
|
+
SIM_FIELD = "SIM TIME (ns)"
|
|
586
|
+
REAL_FIELD = "REAL TIME (s)"
|
|
587
|
+
RATIO_FIELD = "RATIO (ns/s)"
|
|
588
|
+
TOTAL_NAME = f"TESTS={self.ntests} PASS={self.passed} FAIL={self.failures} SKIP={self.skipped}"
|
|
589
|
+
|
|
590
|
+
TEST_FIELD_LEN = max(
|
|
591
|
+
len(TEST_FIELD),
|
|
592
|
+
len(TOTAL_NAME),
|
|
593
|
+
len(max([x["test"] for x in self.test_results], key=len)),
|
|
594
|
+
)
|
|
595
|
+
RESULT_FIELD_LEN = len(RESULT_FIELD)
|
|
596
|
+
SIM_FIELD_LEN = len(SIM_FIELD)
|
|
597
|
+
REAL_FIELD_LEN = len(REAL_FIELD)
|
|
598
|
+
RATIO_FIELD_LEN = len(RATIO_FIELD)
|
|
599
|
+
|
|
600
|
+
header_dict = dict(
|
|
601
|
+
a=TEST_FIELD,
|
|
602
|
+
b=RESULT_FIELD,
|
|
603
|
+
c=SIM_FIELD,
|
|
604
|
+
d=REAL_FIELD,
|
|
605
|
+
e=RATIO_FIELD,
|
|
606
|
+
a_len=TEST_FIELD_LEN,
|
|
607
|
+
b_len=RESULT_FIELD_LEN,
|
|
608
|
+
c_len=SIM_FIELD_LEN,
|
|
609
|
+
d_len=REAL_FIELD_LEN,
|
|
610
|
+
e_len=RATIO_FIELD_LEN,
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
LINE_LEN = (
|
|
614
|
+
3
|
|
615
|
+
+ TEST_FIELD_LEN
|
|
616
|
+
+ 2
|
|
617
|
+
+ RESULT_FIELD_LEN
|
|
618
|
+
+ 2
|
|
619
|
+
+ SIM_FIELD_LEN
|
|
620
|
+
+ 2
|
|
621
|
+
+ REAL_FIELD_LEN
|
|
622
|
+
+ 2
|
|
623
|
+
+ RATIO_FIELD_LEN
|
|
624
|
+
+ 3
|
|
625
|
+
)
|
|
626
|
+
|
|
627
|
+
LINE_SEP = "*" * LINE_LEN + "\n"
|
|
628
|
+
|
|
629
|
+
summary = ""
|
|
630
|
+
summary += LINE_SEP
|
|
631
|
+
summary += "** {a:<{a_len}} {b:^{b_len}} {c:>{c_len}} {d:>{d_len}} {e:>{e_len}} **\n".format(
|
|
632
|
+
**header_dict
|
|
633
|
+
)
|
|
634
|
+
summary += LINE_SEP
|
|
635
|
+
|
|
636
|
+
test_line = "** {a:<{a_len}} {start}{b:^{b_len}}{end} {c:>{c_len}.2f} {d:>{d_len}.2f} {e:>{e_len}} **\n"
|
|
637
|
+
for result in self.test_results:
|
|
638
|
+
hilite = ""
|
|
639
|
+
lolite = ""
|
|
640
|
+
|
|
641
|
+
if result["pass"] is None:
|
|
642
|
+
ratio = "-.--"
|
|
643
|
+
pass_fail_str = "SKIP"
|
|
644
|
+
if want_color_output():
|
|
645
|
+
hilite = ANSI.COLOR_SKIPPED
|
|
646
|
+
lolite = ANSI.COLOR_DEFAULT
|
|
647
|
+
elif result["pass"]:
|
|
648
|
+
ratio = format(result["ratio"], "0.2f")
|
|
649
|
+
pass_fail_str = "PASS"
|
|
650
|
+
if want_color_output():
|
|
651
|
+
hilite = ANSI.COLOR_PASSED
|
|
652
|
+
lolite = ANSI.COLOR_DEFAULT
|
|
653
|
+
else:
|
|
654
|
+
ratio = format(result["ratio"], "0.2f")
|
|
655
|
+
pass_fail_str = "FAIL"
|
|
656
|
+
if want_color_output():
|
|
657
|
+
hilite = ANSI.COLOR_FAILED
|
|
658
|
+
lolite = ANSI.COLOR_DEFAULT
|
|
659
|
+
|
|
660
|
+
test_dict = dict(
|
|
661
|
+
a=result["test"],
|
|
662
|
+
b=pass_fail_str,
|
|
663
|
+
c=result["sim"],
|
|
664
|
+
d=result["real"],
|
|
665
|
+
e=ratio,
|
|
666
|
+
a_len=TEST_FIELD_LEN,
|
|
667
|
+
b_len=RESULT_FIELD_LEN,
|
|
668
|
+
c_len=SIM_FIELD_LEN - 1,
|
|
669
|
+
d_len=REAL_FIELD_LEN - 1,
|
|
670
|
+
e_len=RATIO_FIELD_LEN - 1,
|
|
671
|
+
start=hilite,
|
|
672
|
+
end=lolite,
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
summary += test_line.format(**test_dict)
|
|
676
|
+
|
|
677
|
+
summary += LINE_SEP
|
|
678
|
+
|
|
679
|
+
summary += test_line.format(
|
|
680
|
+
a=TOTAL_NAME,
|
|
681
|
+
b="",
|
|
682
|
+
c=sim_time_ns,
|
|
683
|
+
d=real_time,
|
|
684
|
+
e=format(ratio_time, "0.2f"),
|
|
685
|
+
a_len=TEST_FIELD_LEN,
|
|
686
|
+
b_len=RESULT_FIELD_LEN,
|
|
687
|
+
c_len=SIM_FIELD_LEN - 1,
|
|
688
|
+
d_len=REAL_FIELD_LEN - 1,
|
|
689
|
+
e_len=RATIO_FIELD_LEN - 1,
|
|
690
|
+
start="",
|
|
691
|
+
end="",
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
summary += LINE_SEP
|
|
695
|
+
|
|
696
|
+
self.log.info(summary)
|
|
697
|
+
|
|
698
|
+
@staticmethod
|
|
699
|
+
def _safe_divide(a: float, b: float) -> float:
|
|
700
|
+
try:
|
|
701
|
+
return a / b
|
|
702
|
+
except ZeroDivisionError:
|
|
703
|
+
if a == 0:
|
|
704
|
+
return float("nan")
|
|
705
|
+
else:
|
|
706
|
+
return float("inf")
|
|
707
|
+
|
|
708
|
+
|
|
709
|
+
def _create_test(function, name, documentation, mod, *args, **kwargs):
|
|
710
|
+
"""Factory function to create tests, avoids late binding.
|
|
711
|
+
|
|
712
|
+
Creates a test dynamically. The test will call the supplied
|
|
713
|
+
function with the supplied arguments.
|
|
714
|
+
|
|
715
|
+
Args:
|
|
716
|
+
function (function): The test function to run.
|
|
717
|
+
name (str): The name of the test.
|
|
718
|
+
documentation (str): The docstring for the test.
|
|
719
|
+
mod (module): The module this function belongs to.
|
|
720
|
+
*args: Remaining args to pass to test function.
|
|
721
|
+
**kwargs: Passed to the test function.
|
|
722
|
+
|
|
723
|
+
Returns:
|
|
724
|
+
Decorated test function
|
|
725
|
+
"""
|
|
726
|
+
|
|
727
|
+
async def _my_test(dut):
|
|
728
|
+
await function(dut, *args, **kwargs)
|
|
729
|
+
|
|
730
|
+
_my_test.__name__ = name
|
|
731
|
+
_my_test.__qualname__ = name
|
|
732
|
+
_my_test.__doc__ = documentation
|
|
733
|
+
_my_test.__module__ = mod.__name__
|
|
734
|
+
|
|
735
|
+
return cocotb.test()(_my_test)
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
class TestFactory:
|
|
739
|
+
"""Factory to automatically generate tests.
|
|
740
|
+
|
|
741
|
+
Args:
|
|
742
|
+
test_function: A Callable that returns the test Coroutine.
|
|
743
|
+
Must take *dut* as the first argument.
|
|
744
|
+
*args: Remaining arguments are passed directly to the test function.
|
|
745
|
+
Note that these arguments are not varied. An argument that
|
|
746
|
+
varies with each test must be a keyword argument to the
|
|
747
|
+
test function.
|
|
748
|
+
**kwargs: Remaining keyword arguments are passed directly to the test function.
|
|
749
|
+
Note that these arguments are not varied. An argument that
|
|
750
|
+
varies with each test must be a keyword argument to the
|
|
751
|
+
test function.
|
|
752
|
+
|
|
753
|
+
Assuming we have a common test function that will run a test. This test
|
|
754
|
+
function will take keyword arguments (for example generators for each of
|
|
755
|
+
the input interfaces) and generate tests that call the supplied function.
|
|
756
|
+
|
|
757
|
+
This Factory allows us to generate sets of tests based on the different
|
|
758
|
+
permutations of the possible arguments to the test function.
|
|
759
|
+
|
|
760
|
+
For example, if we have a module that takes backpressure, has two configurable
|
|
761
|
+
features where enabling ``feature_b`` requires ``feature_a`` to be active, and
|
|
762
|
+
need to test against data generation routines ``gen_a`` and ``gen_b``:
|
|
763
|
+
|
|
764
|
+
>>> tf = TestFactory(test_function=run_test)
|
|
765
|
+
>>> tf.add_option(name='data_in', optionlist=[gen_a, gen_b])
|
|
766
|
+
>>> tf.add_option('backpressure', [None, random_backpressure])
|
|
767
|
+
>>> tf.add_option(('feature_a', 'feature_b'), [(False, False), (True, False), (True, True)])
|
|
768
|
+
>>> tf.generate_tests()
|
|
769
|
+
|
|
770
|
+
We would get the following tests:
|
|
771
|
+
|
|
772
|
+
* ``gen_a`` with no backpressure and both features disabled
|
|
773
|
+
* ``gen_a`` with no backpressure and only ``feature_a`` enabled
|
|
774
|
+
* ``gen_a`` with no backpressure and both features enabled
|
|
775
|
+
* ``gen_a`` with ``random_backpressure`` and both features disabled
|
|
776
|
+
* ``gen_a`` with ``random_backpressure`` and only ``feature_a`` enabled
|
|
777
|
+
* ``gen_a`` with ``random_backpressure`` and both features enabled
|
|
778
|
+
* ``gen_b`` with no backpressure and both features disabled
|
|
779
|
+
* ``gen_b`` with no backpressure and only ``feature_a`` enabled
|
|
780
|
+
* ``gen_b`` with no backpressure and both features enabled
|
|
781
|
+
* ``gen_b`` with ``random_backpressure`` and both features disabled
|
|
782
|
+
* ``gen_b`` with ``random_backpressure`` and only ``feature_a`` enabled
|
|
783
|
+
* ``gen_b`` with ``random_backpressure`` and both features enabled
|
|
784
|
+
|
|
785
|
+
The tests are appended to the calling module for auto-discovery.
|
|
786
|
+
|
|
787
|
+
Tests are simply named ``test_function_N``. The docstring for the test (hence
|
|
788
|
+
the test description) includes the name and description of each generator.
|
|
789
|
+
|
|
790
|
+
.. versionchanged:: 1.5
|
|
791
|
+
Groups of options are now supported
|
|
792
|
+
"""
|
|
793
|
+
|
|
794
|
+
# Prevent warnings from collection of TestFactories by unit testing frameworks.
|
|
795
|
+
__test__ = False
|
|
796
|
+
|
|
797
|
+
def __init__(self, test_function, *args, **kwargs):
|
|
798
|
+
self.test_function = test_function
|
|
799
|
+
self.name = self.test_function.__qualname__
|
|
800
|
+
|
|
801
|
+
self.args = args
|
|
802
|
+
self.kwargs_constant = kwargs
|
|
803
|
+
self.kwargs = {}
|
|
804
|
+
self.log = _logger
|
|
805
|
+
|
|
806
|
+
def add_option(self, name, optionlist):
|
|
807
|
+
"""Add a named option to the test.
|
|
808
|
+
|
|
809
|
+
Args:
|
|
810
|
+
name (str or iterable of str): An option name, or an iterable of
|
|
811
|
+
several option names. Passed to test as keyword arguments.
|
|
812
|
+
|
|
813
|
+
optionlist (list): A list of possible options for this test knob.
|
|
814
|
+
If N names were specified, this must be a list of N-tuples or
|
|
815
|
+
lists, where each element specifies a value for its respective
|
|
816
|
+
option.
|
|
817
|
+
|
|
818
|
+
.. versionchanged:: 1.5
|
|
819
|
+
Groups of options are now supported
|
|
820
|
+
"""
|
|
821
|
+
if not isinstance(name, str):
|
|
822
|
+
name = tuple(name)
|
|
823
|
+
for opt in optionlist:
|
|
824
|
+
if len(name) != len(opt):
|
|
825
|
+
raise ValueError(
|
|
826
|
+
"Mismatch between number of options and number of option values in group"
|
|
827
|
+
)
|
|
828
|
+
self.kwargs[name] = optionlist
|
|
829
|
+
|
|
830
|
+
def generate_tests(self, prefix="", postfix=""):
|
|
831
|
+
"""
|
|
832
|
+
Generate an exhaustive set of tests using the cartesian product of the
|
|
833
|
+
possible keyword arguments.
|
|
834
|
+
|
|
835
|
+
The generated tests are appended to the namespace of the calling
|
|
836
|
+
module.
|
|
837
|
+
|
|
838
|
+
Args:
|
|
839
|
+
prefix (str): Text string to append to start of ``test_function`` name
|
|
840
|
+
when naming generated test cases. This allows reuse of
|
|
841
|
+
a single ``test_function`` with multiple
|
|
842
|
+
:class:`TestFactories <.TestFactory>` without name clashes.
|
|
843
|
+
postfix (str): Text string to append to end of ``test_function`` name
|
|
844
|
+
when naming generated test cases. This allows reuse of
|
|
845
|
+
a single ``test_function`` with multiple
|
|
846
|
+
:class:`TestFactories <.TestFactory>` without name clashes.
|
|
847
|
+
"""
|
|
848
|
+
|
|
849
|
+
frm = inspect.stack()[1]
|
|
850
|
+
mod = inspect.getmodule(frm[0])
|
|
851
|
+
|
|
852
|
+
d = self.kwargs
|
|
853
|
+
|
|
854
|
+
for index, testoptions in enumerate(
|
|
855
|
+
dict(zip(d, v)) for v in product(*d.values())
|
|
856
|
+
):
|
|
857
|
+
|
|
858
|
+
name = "%s%s%s_%03d" % (prefix, self.name, postfix, index + 1)
|
|
859
|
+
doc = "Automatically generated test\n\n"
|
|
860
|
+
|
|
861
|
+
# preprocess testoptions to split tuples
|
|
862
|
+
testoptions_split = {}
|
|
863
|
+
for optname, optvalue in testoptions.items():
|
|
864
|
+
if isinstance(optname, str):
|
|
865
|
+
testoptions_split[optname] = optvalue
|
|
866
|
+
else:
|
|
867
|
+
# previously checked in add_option; ensure nothing has changed
|
|
868
|
+
assert len(optname) == len(optvalue)
|
|
869
|
+
for n, v in zip(optname, optvalue):
|
|
870
|
+
testoptions_split[n] = v
|
|
871
|
+
|
|
872
|
+
for optname, optvalue in testoptions_split.items():
|
|
873
|
+
if callable(optvalue):
|
|
874
|
+
if not optvalue.__doc__:
|
|
875
|
+
desc = "No docstring supplied"
|
|
876
|
+
else:
|
|
877
|
+
desc = optvalue.__doc__.split("\n")[0]
|
|
878
|
+
doc += "\t{}: {} ({})\n".format(
|
|
879
|
+
optname, optvalue.__qualname__, desc
|
|
880
|
+
)
|
|
881
|
+
else:
|
|
882
|
+
doc += "\t{}: {}\n".format(optname, repr(optvalue))
|
|
883
|
+
|
|
884
|
+
self.log.debug(
|
|
885
|
+
'Adding generated test "%s" to module "%s"' % (name, mod.__name__)
|
|
886
|
+
)
|
|
887
|
+
kwargs = {}
|
|
888
|
+
kwargs.update(self.kwargs_constant)
|
|
889
|
+
kwargs.update(testoptions_split)
|
|
890
|
+
if hasattr(mod, name):
|
|
891
|
+
self.log.error(
|
|
892
|
+
"Overwriting %s in module %s. "
|
|
893
|
+
"This causes a previously defined testcase "
|
|
894
|
+
"not to be run. Consider setting/changing "
|
|
895
|
+
"name_postfix" % (name, mod)
|
|
896
|
+
)
|
|
897
|
+
setattr(
|
|
898
|
+
mod,
|
|
899
|
+
name,
|
|
900
|
+
_create_test(self.test_function, name, doc, mod, *self.args, **kwargs),
|
|
901
|
+
)
|
|
902
|
+
|
|
903
|
+
|
|
904
|
+
def _trim(docstring: Optional[str]) -> str:
|
|
905
|
+
"""Normalizes test docstrings
|
|
906
|
+
|
|
907
|
+
Based on https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation.
|
|
908
|
+
"""
|
|
909
|
+
if docstring is None or docstring == "":
|
|
910
|
+
return ""
|
|
911
|
+
# Convert tabs to spaces (following the normal Python rules)
|
|
912
|
+
# and split into a list of lines:
|
|
913
|
+
lines = docstring.expandtabs().splitlines()
|
|
914
|
+
# Determine minimum indentation (first line doesn't count):
|
|
915
|
+
indent = math.inf
|
|
916
|
+
for line in lines[1:]:
|
|
917
|
+
stripped = line.lstrip()
|
|
918
|
+
if stripped:
|
|
919
|
+
indent = min(indent, len(line) - len(stripped))
|
|
920
|
+
# Remove indentation (first line is special):
|
|
921
|
+
trimmed = [lines[0].strip()]
|
|
922
|
+
if indent < math.inf:
|
|
923
|
+
for line in lines[1:]:
|
|
924
|
+
trimmed.append(line[indent:].rstrip())
|
|
925
|
+
# Strip off trailing and leading blank lines:
|
|
926
|
+
while trimmed and not trimmed[-1]:
|
|
927
|
+
trimmed.pop()
|
|
928
|
+
while trimmed and not trimmed[0]:
|
|
929
|
+
trimmed.pop(0)
|
|
930
|
+
# Add one newline back
|
|
931
|
+
trimmed.insert(0, "")
|
|
932
|
+
# Return a single string:
|
|
933
|
+
return "\n ".join(trimmed)
|