qa-testing-utils 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qa_testing_utils-0.1.0/.gitignore +89 -0
- qa_testing_utils-0.1.0/PKG-INFO +15 -0
- qa_testing_utils-0.1.0/README.md +1 -0
- qa_testing_utils-0.1.0/pyproject.toml +40 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/__init__.py +3 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/exception_utils.py +50 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/exceptions.py +12 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/file_utils.py +196 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/logger.py +140 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/matchers.py +371 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/object_utils.py +185 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/stream_utils.py +24 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/string_utils.py +69 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/thread_utils.py +13 -0
- qa_testing_utils-0.1.0/src/qa/testing/utils/tuple_utils.py +49 -0
- qa_testing_utils-0.1.0/tests/assertion_tests.py +56 -0
- qa_testing_utils-0.1.0/tests/exception_utils_tests.py +14 -0
- qa_testing_utils-0.1.0/tests/logger_tests.py +48 -0
- qa_testing_utils-0.1.0/tests/matchers_tests.py +137 -0
- qa_testing_utils-0.1.0/tests/object_utils_tests.py +113 -0
- qa_testing_utils-0.1.0/tests/self_tests.py +82 -0
- qa_testing_utils-0.1.0/tests/stream_utils_tests.py +71 -0
- qa_testing_utils-0.1.0/tests/string_utils_tests.py +43 -0
- qa_testing_utils-0.1.0/tests/tuple_utils_tests.py +17 -0
@@ -0,0 +1,89 @@
|
|
1
|
+
# Byte-compiled / optimized / DLL files
|
2
|
+
__pycache__/
|
3
|
+
*.py[cod]
|
4
|
+
*$py.class
|
5
|
+
|
6
|
+
# C extensions
|
7
|
+
*.so
|
8
|
+
|
9
|
+
# Hatch & Python environments
|
10
|
+
.hatch/
|
11
|
+
.env
|
12
|
+
.venv
|
13
|
+
env/
|
14
|
+
venv/
|
15
|
+
ENV/
|
16
|
+
env.bak/
|
17
|
+
venv.bak/
|
18
|
+
|
19
|
+
# VSCode settings (optional: keep .vscode/settings.json)
|
20
|
+
.vscode/*
|
21
|
+
!.vscode/settings.json
|
22
|
+
|
23
|
+
# Distribution / packaging
|
24
|
+
build/
|
25
|
+
dist/
|
26
|
+
*.egg-info/
|
27
|
+
.eggs/
|
28
|
+
*.egg
|
29
|
+
MANIFEST
|
30
|
+
|
31
|
+
# PyInstaller
|
32
|
+
*.spec
|
33
|
+
|
34
|
+
# Installer logs
|
35
|
+
pip-log.txt
|
36
|
+
pip-delete-this-directory.txt
|
37
|
+
|
38
|
+
# Unit test / coverage reports
|
39
|
+
.pytest_cache/
|
40
|
+
.tox/
|
41
|
+
.nox/
|
42
|
+
.coverage
|
43
|
+
.coverage.*
|
44
|
+
.cache
|
45
|
+
nosetests.xml
|
46
|
+
coverage.xml
|
47
|
+
*.cover
|
48
|
+
*.py,cover
|
49
|
+
.hypothesis/
|
50
|
+
htmlcov/
|
51
|
+
|
52
|
+
# Mypy
|
53
|
+
.mypy_cache/
|
54
|
+
.dmypy.json
|
55
|
+
dmypy.json
|
56
|
+
|
57
|
+
# Pyre / Pytype
|
58
|
+
.pyre/
|
59
|
+
.pytype/
|
60
|
+
|
61
|
+
# Ruff
|
62
|
+
.ruff_cache/
|
63
|
+
|
64
|
+
# Debugging & development
|
65
|
+
*.log
|
66
|
+
|
67
|
+
# Notebook / IPython
|
68
|
+
.ipynb_checkpoints
|
69
|
+
profile_default/
|
70
|
+
ipython_config.py
|
71
|
+
|
72
|
+
# JupyterLite / Sphinx / docs
|
73
|
+
docs/_build/
|
74
|
+
site/
|
75
|
+
|
76
|
+
# Cython
|
77
|
+
cython_debug/
|
78
|
+
|
79
|
+
# Other tools & IDEs
|
80
|
+
.ropeproject
|
81
|
+
.spyderproject
|
82
|
+
.spyproject
|
83
|
+
|
84
|
+
# JetBrains / PyCharm (optional - uncomment to ignore everything)
|
85
|
+
#.idea/
|
86
|
+
|
87
|
+
# Miscellaneous
|
88
|
+
*.pypirc
|
89
|
+
UNKNOWN.egg-info/
|
@@ -0,0 +1,15 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: qa-testing-utils
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: Testing utilities library for QA automation
|
5
|
+
Author-email: Adrian Herscu <adrian.herscu@gmail.com>
|
6
|
+
Requires-Python: >=3.13
|
7
|
+
Requires-Dist: allure-pytest
|
8
|
+
Requires-Dist: more-itertools
|
9
|
+
Requires-Dist: ppretty
|
10
|
+
Requires-Dist: pyfunctional
|
11
|
+
Requires-Dist: pyhamcrest
|
12
|
+
Requires-Dist: returns
|
13
|
+
Description-Content-Type: text/markdown
|
14
|
+
|
15
|
+
# qa-testing-utils
|
@@ -0,0 +1 @@
|
|
1
|
+
# qa-testing-utils
|
@@ -0,0 +1,40 @@
|
|
1
|
+
[project]
|
2
|
+
name = "qa-testing-utils"
|
3
|
+
version = "0.1.0"
|
4
|
+
description = "Testing utilities library for QA automation"
|
5
|
+
authors = [{ name = "Adrian Herscu", email = "adrian.herscu@gmail.com" }]
|
6
|
+
readme = "README.md"
|
7
|
+
requires-python = ">=3.13"
|
8
|
+
dependencies = [
|
9
|
+
"PyHamcrest",
|
10
|
+
"pyfunctional",
|
11
|
+
"ppretty",
|
12
|
+
"allure-pytest",
|
13
|
+
"more-itertools",
|
14
|
+
"returns"
|
15
|
+
]
|
16
|
+
|
17
|
+
[build-system]
|
18
|
+
requires = ["hatchling"]
|
19
|
+
build-backend = "hatchling.build"
|
20
|
+
|
21
|
+
[tool.hatch.metadata]
|
22
|
+
allow-direct-references = true
|
23
|
+
|
24
|
+
[tool.hatch.build.targets.wheel]
|
25
|
+
packages = ["src/qa/testing/utils"]
|
26
|
+
|
27
|
+
[tool.hatch.envs.default]
|
28
|
+
path = ".hatch/envs/default"
|
29
|
+
dependencies = [
|
30
|
+
"pytest",
|
31
|
+
"pytest-cov",
|
32
|
+
"mypy",
|
33
|
+
"isort",
|
34
|
+
"autopep8"
|
35
|
+
]
|
36
|
+
|
37
|
+
[tool.hatch.envs.default.scripts]
|
38
|
+
test = "pytest"
|
39
|
+
lint = "mypy src"
|
40
|
+
format = "autopep8 --in-place --recursive src"
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Adrian Herscu
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
import functools
|
6
|
+
import logging
|
7
|
+
from typing import Any, Callable
|
8
|
+
|
9
|
+
from returns.maybe import Maybe, Nothing, Some
|
10
|
+
from qa.testing.utils.object_utils import Supplier
|
11
|
+
|
12
|
+
|
13
|
+
def safely[T](supplier: Supplier[T]) -> Maybe[T]:
|
14
|
+
"""
|
15
|
+
Calls a function safely, wrapping its result in Maybe, and swallowing any exceptions.
|
16
|
+
The function should be a no-argument callable::
|
17
|
+
|
18
|
+
safely(lambda: call_something_that_may_fail(params))
|
19
|
+
|
20
|
+
Args:
|
21
|
+
supplier (Supplier[T]): The supplier to be called.
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
Maybe[T]: The result wrapped in Maybe, or Nothing if an exception occurs.
|
25
|
+
"""
|
26
|
+
try:
|
27
|
+
result = supplier()
|
28
|
+
return Some(result)
|
29
|
+
except Exception as e:
|
30
|
+
logging.exception(f"Exception occurred: {e}")
|
31
|
+
return Nothing
|
32
|
+
|
33
|
+
|
34
|
+
def swallow(func: Callable[..., Any]) -> Callable[..., Any]:
|
35
|
+
"""
|
36
|
+
Decorates a function to swallow any exceptions.
|
37
|
+
|
38
|
+
If an exception will occur, None will be returned.
|
39
|
+
|
40
|
+
Args:
|
41
|
+
func (Callable): the function, supplied by the run-time
|
42
|
+
|
43
|
+
Returns:
|
44
|
+
Callable: the decorated function
|
45
|
+
"""
|
46
|
+
@functools.wraps(func)
|
47
|
+
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
48
|
+
return safely(lambda: func(*args, **kwargs)).value_or(None)
|
49
|
+
|
50
|
+
return wrapper
|
@@ -0,0 +1,12 @@
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Adrian Herscu
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
class TestException(Exception):
|
6
|
+
"""
|
7
|
+
Marks an exception raised by tests infrastructure. Useful to differentiate
|
8
|
+
between unexpected run-time exceptions, which should be handled as
|
9
|
+
programming errors, and legitimate run-time exceptions such as time-out,
|
10
|
+
not found, etc. The former might be handled via a retry mechanism.
|
11
|
+
"""
|
12
|
+
pass
|
@@ -0,0 +1,196 @@
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Adrian Herscu
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
import csv
|
6
|
+
from io import DEFAULT_BUFFER_SIZE, BufferedReader, RawIOBase
|
7
|
+
from lzma import LZMADecompressor
|
8
|
+
from pathlib import Path
|
9
|
+
from tarfile import TarInfo, open
|
10
|
+
from typing import BinaryIO, Iterable, Iterator, Tuple, final, override
|
11
|
+
from zlib import crc32
|
12
|
+
|
13
|
+
from more_itertools import peekable
|
14
|
+
from qa.testing.utils.logger import *
|
15
|
+
from qa.testing.utils.object_utils import *
|
16
|
+
from qa.testing.utils.string_utils import *
|
17
|
+
|
18
|
+
LAUNCHING_DIR = Path.cwd()
|
19
|
+
|
20
|
+
|
21
|
+
@final
|
22
|
+
class IterableReader(RawIOBase, LoggerMixin, ImmutableMixin):
|
23
|
+
"""
|
24
|
+
I/O read-only stream over iterable of bytes, enabling streaming mode.
|
25
|
+
"""
|
26
|
+
|
27
|
+
def __init__(self, chunks: Iterable[bytes]):
|
28
|
+
self._chunks = iter(chunks)
|
29
|
+
self._accumulated_buffer = bytearray()
|
30
|
+
|
31
|
+
@override
|
32
|
+
def readable(self) -> bool:
|
33
|
+
return True
|
34
|
+
|
35
|
+
@override
|
36
|
+
def readinto(self, output_buffer: memoryview) -> int: # type: ignore
|
37
|
+
# consume chunks, accumulating their bytes up to size of output buffer
|
38
|
+
while len(self._accumulated_buffer) < len(output_buffer) \
|
39
|
+
and (chunk := next(self._chunks, None)) is not None:
|
40
|
+
self.log.debug(f"buffered chunk with length={len(chunk)}")
|
41
|
+
self._accumulated_buffer.extend(chunk)
|
42
|
+
|
43
|
+
# consume accumulated bytes up to size of output buffer
|
44
|
+
consumed_bytes = min(len(self._accumulated_buffer), len(output_buffer))
|
45
|
+
output_buffer[:consumed_bytes] = self._accumulated_buffer[:consumed_bytes]
|
46
|
+
|
47
|
+
# delete consumed bytes, shifting left the accumulated buffer
|
48
|
+
del self._accumulated_buffer[:consumed_bytes]
|
49
|
+
|
50
|
+
self.log.debug(f"consumed {consumed_bytes} bytes")
|
51
|
+
return consumed_bytes
|
52
|
+
|
53
|
+
@staticmethod
|
54
|
+
def from_(
|
55
|
+
chunks: Iterable[bytes],
|
56
|
+
buffer_size: int = DEFAULT_BUFFER_SIZE) -> BinaryIO:
|
57
|
+
"""
|
58
|
+
Converts a stream of binary chunks into a BufferedReader.
|
59
|
+
|
60
|
+
You should ensure closing.
|
61
|
+
|
62
|
+
Args:
|
63
|
+
chunks (Iterable[bytes]): stream of binary chunks
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
io.BufferedReader: buffered reader around stream of binary chunks.
|
67
|
+
"""
|
68
|
+
return BufferedReader(IterableReader(chunks), buffer_size)
|
69
|
+
|
70
|
+
|
71
|
+
# TODO perhaps there should be a writable stream to iterator utility too...
|
72
|
+
|
73
|
+
|
74
|
+
def stream_file(
|
75
|
+
file_path: Path,
|
76
|
+
chunk_size: int = DEFAULT_BUFFER_SIZE) -> Iterator[bytes]:
|
77
|
+
"""
|
78
|
+
Streams a binary file from disk into an iterator.
|
79
|
+
|
80
|
+
If the iterator is not consumed, the file will be closed when the iterator
|
81
|
+
will be garbage collected.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
file_path (Path): path to file
|
85
|
+
chunk_size (int, optional): the chunk size. Defaults to 8192.
|
86
|
+
|
87
|
+
Yields:
|
88
|
+
Iterator[bytes]: the binary chunks stream
|
89
|
+
"""
|
90
|
+
with file_path.open('rb') as f:
|
91
|
+
yield from iter(lambda: f.read(chunk_size), EMPTY_BYTE_STRING)
|
92
|
+
|
93
|
+
|
94
|
+
def read_lines(
|
95
|
+
byte_stream: Iterable[bytes],
|
96
|
+
encoding: str = UTF_8,
|
97
|
+
eol: str = LF) -> Iterator[str]:
|
98
|
+
"""
|
99
|
+
Converts a stream of binary chunks into stream of text lines.
|
100
|
+
Handles cases where lines are split across chunks.
|
101
|
+
|
102
|
+
Args:
|
103
|
+
byte_stream (Iterable[bytes]): the binary (chunks) stream
|
104
|
+
encoding (str, optional): expected text encoding. Defaults to 'utf-8'.
|
105
|
+
eol (str, optional): expected line-ending. Default to LF.
|
106
|
+
|
107
|
+
Yields:
|
108
|
+
Iterator[str]: stream of text lines, not terminated by EOL marker
|
109
|
+
"""
|
110
|
+
has_content = False
|
111
|
+
buffer = bytearray()
|
112
|
+
eol_bytes = eol.encode(encoding)
|
113
|
+
|
114
|
+
for chunk in byte_stream:
|
115
|
+
print(DOT, end=SPACE)
|
116
|
+
has_content = True
|
117
|
+
buffer.extend(chunk)
|
118
|
+
*lines, buffer = buffer.split(eol_bytes) # keep partial line in buffer
|
119
|
+
trace(f"streaming {len(lines)} lines; leftover {len(buffer)} chars")
|
120
|
+
yield from (line.decode(encoding) for line in lines)
|
121
|
+
|
122
|
+
if buffer: # yield the leftover
|
123
|
+
yield buffer.decode(encoding)
|
124
|
+
|
125
|
+
if not has_content:
|
126
|
+
trace("no lines")
|
127
|
+
|
128
|
+
|
129
|
+
def decompress_xz_stream(compressed_chunks: Iterable[bytes]) -> Iterator[bytes]:
|
130
|
+
"""
|
131
|
+
Decompresses XZ stream.
|
132
|
+
|
133
|
+
Args:
|
134
|
+
compressed_chunks (Iterable[bytes]): stream of binary compressed chunks
|
135
|
+
|
136
|
+
Yields:
|
137
|
+
Iterator[bytes]: the decompressed stream
|
138
|
+
"""
|
139
|
+
decompressor = LZMADecompressor()
|
140
|
+
return map(decompressor.decompress, compressed_chunks)
|
141
|
+
|
142
|
+
|
143
|
+
def extract_files_from_tar(tar_chunks: Iterable[bytes]) -> Iterator[Tuple[TarInfo, bytes]]:
|
144
|
+
"""
|
145
|
+
Extracts files from decompressed TAR stream.
|
146
|
+
|
147
|
+
Args:
|
148
|
+
tar_chunks (Iterable[bytes]): stream of decompressed TAR chunks
|
149
|
+
|
150
|
+
Yields:
|
151
|
+
Iterator[Tuple[tarfile.TarInfo, bytes]]: \
|
152
|
+
streams tuples of meta-data and data for each file
|
153
|
+
"""
|
154
|
+
with open(fileobj=IterableReader.from_(tar_chunks),
|
155
|
+
mode='r|*') as tar:
|
156
|
+
for member in tar:
|
157
|
+
if member.isfile():
|
158
|
+
extracted_file = tar.extractfile(member)
|
159
|
+
if extracted_file:
|
160
|
+
yield member, extracted_file.read()
|
161
|
+
|
162
|
+
|
163
|
+
def crc32_of(file: BinaryIO, chunk_size: int = DEFAULT_BUFFER_SIZE) -> int:
|
164
|
+
"""
|
165
|
+
Calculate the CRC of a binary stream from its current position to its tail,
|
166
|
+
using chunked reading.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
file (BinaryIO): The file object to read data from, starting from its current position.
|
170
|
+
chunk_size (int): The size of chunks to read at a time (default is 8192).
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
int: Calculated CRC value of the remaining file content.
|
174
|
+
"""
|
175
|
+
crc_value = 0
|
176
|
+
|
177
|
+
while chunk := file.read(chunk_size):
|
178
|
+
crc_value = crc32(chunk, crc_value)
|
179
|
+
|
180
|
+
return crc_value & 0xFFFFFFFF # ensure 32-bit unsigned
|
181
|
+
|
182
|
+
|
183
|
+
def write_csv(file_path: Path, data_stream: Iterable[dict]):
|
184
|
+
"""
|
185
|
+
Writes a stream of flattened telemetry packets to a CSV file.
|
186
|
+
|
187
|
+
Args:
|
188
|
+
file_path: Path to the CSV file to be written.
|
189
|
+
data_stream: Iterable of dictionaries representing the rows to be written.
|
190
|
+
"""
|
191
|
+
stream = peekable(data_stream) # Allow peeking to extract headers
|
192
|
+
with file_path.open(mode="w", newline="") as csv_file:
|
193
|
+
writer = csv.DictWriter(
|
194
|
+
csv_file, fieldnames=list(stream.peek().keys()))
|
195
|
+
writer.writeheader()
|
196
|
+
writer.writerows(stream)
|
@@ -0,0 +1,140 @@
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 Adrian Herscu
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
import inspect
|
6
|
+
import logging
|
7
|
+
from functools import cached_property, wraps
|
8
|
+
from typing import Callable, ParamSpec, TypeVar, cast, final
|
9
|
+
|
10
|
+
import allure
|
11
|
+
from qa.testing.utils.string_utils import EMPTY, LF
|
12
|
+
|
13
|
+
|
14
|
+
def trace[T](value: T) -> T:
|
15
|
+
"""Logs at debug level using the invoking module name as the logger."""
|
16
|
+
frame = inspect.currentframe()
|
17
|
+
try:
|
18
|
+
if frame is not None:
|
19
|
+
caller_frame = frame.f_back
|
20
|
+
if caller_frame is not None:
|
21
|
+
caller_module = inspect.getmodule(caller_frame)
|
22
|
+
logger_name = caller_module.__name__ if caller_module else '__main__'
|
23
|
+
logger = logging.getLogger(logger_name)
|
24
|
+
logger.debug(f"=== {value}")
|
25
|
+
else:
|
26
|
+
logging.getLogger(__name__).debug(f"=== {value}")
|
27
|
+
else:
|
28
|
+
logging.getLogger(__name__).debug(f"=== {value}")
|
29
|
+
finally:
|
30
|
+
del frame
|
31
|
+
|
32
|
+
return value
|
33
|
+
|
34
|
+
|
35
|
+
def logger[T:type](cls: T) -> T:
|
36
|
+
"""
|
37
|
+
Class decorator that injects a logger into annotated class.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
cls (type): automatically provided by the runtime
|
41
|
+
|
42
|
+
Returns:
|
43
|
+
_type_: the decorated class
|
44
|
+
"""
|
45
|
+
cls._logger = logging.getLogger(cls.__name__)
|
46
|
+
|
47
|
+
@property
|
48
|
+
def log(self: T) -> logging.Logger:
|
49
|
+
return cast(logging.Logger, getattr(self, '_logger', None))
|
50
|
+
|
51
|
+
cls.log = log
|
52
|
+
|
53
|
+
return cls
|
54
|
+
|
55
|
+
|
56
|
+
class LoggerMixin:
|
57
|
+
"""
|
58
|
+
Yet another way of adding logging by deriving from this one.
|
59
|
+
"""
|
60
|
+
@final
|
61
|
+
@cached_property
|
62
|
+
def log(self) -> logging.Logger:
|
63
|
+
return logging.getLogger(self.__class__.__name__)
|
64
|
+
|
65
|
+
@final
|
66
|
+
def trace[T](self, value: T) -> T:
|
67
|
+
"""
|
68
|
+
Logs value at DEBUG level using this logger.
|
69
|
+
|
70
|
+
Use to log something as a value, usually in a lambda expression::
|
71
|
+
|
72
|
+
then.eventually_assert_that(
|
73
|
+
lambda: self.trace(...call some API...),
|
74
|
+
greater_that(0)) \
|
75
|
+
|
76
|
+
.and_....other verifications may follow...
|
77
|
+
|
78
|
+
Args:
|
79
|
+
value (T): the value
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
T: the value
|
83
|
+
"""
|
84
|
+
self.log.debug(f"=== {value}")
|
85
|
+
return value
|
86
|
+
|
87
|
+
|
88
|
+
P = ParamSpec('P')
|
89
|
+
R = TypeVar('R')
|
90
|
+
|
91
|
+
|
92
|
+
def traced(func: Callable[P, R]) -> Callable[P, R]:
|
93
|
+
"""
|
94
|
+
Method decorator that logs the function call with its arguments and the
|
95
|
+
return value.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
func (Callable[P, R]): The function to be decorated.
|
99
|
+
*args (Any): Positional arguments to be passed to the function.
|
100
|
+
**kwargs (Any): Keyword arguments to be passed to the function.
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
Callable[P, R]: The result of the function call.
|
104
|
+
"""
|
105
|
+
@wraps(func)
|
106
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
107
|
+
# NOTE: each time a decorated function is called this logic will be
|
108
|
+
# re-evaluated.
|
109
|
+
signature = inspect.signature(func)
|
110
|
+
parameters = list(signature.parameters.keys())
|
111
|
+
|
112
|
+
if parameters and parameters[0] == 'self' and len(args) > 0:
|
113
|
+
instance = args[0]
|
114
|
+
logger = logging.getLogger(f"{instance.__class__.__name__}")
|
115
|
+
logger.debug(
|
116
|
+
f">>> {func.__name__} "
|
117
|
+
f"{", ".join([str(arg) for arg in args[1:]])} "
|
118
|
+
f"{LF.join(
|
119
|
+
f"{key}={str(value)}"
|
120
|
+
for key, value in kwargs.items()) if kwargs else EMPTY}")
|
121
|
+
|
122
|
+
with allure.step( # type: ignore
|
123
|
+
f"{func.__name__} "
|
124
|
+
f"{', '.join([str(arg) for arg in args[1:]])}"):
|
125
|
+
result = func(*args, **kwargs)
|
126
|
+
|
127
|
+
if result == instance:
|
128
|
+
logger.debug(f"<<< {func.__name__}")
|
129
|
+
else:
|
130
|
+
logger.debug(f"<<< {func.__name__} {result}")
|
131
|
+
|
132
|
+
return result
|
133
|
+
else:
|
134
|
+
logger = logging.getLogger(func.__name__)
|
135
|
+
logger.debug(f">>> {func.__name__} {args} {kwargs}")
|
136
|
+
result = func(*args, **kwargs)
|
137
|
+
logger.debug(f"<<< {func.__name__} {result}")
|
138
|
+
return result
|
139
|
+
|
140
|
+
return wrapper
|