coverage 7.13.1__cp314-cp314t-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- a1_coverage.pth +1 -0
- coverage/__init__.py +38 -0
- coverage/__main__.py +12 -0
- coverage/annotate.py +113 -0
- coverage/bytecode.py +197 -0
- coverage/cmdline.py +1220 -0
- coverage/collector.py +487 -0
- coverage/config.py +732 -0
- coverage/context.py +74 -0
- coverage/control.py +1514 -0
- coverage/core.py +139 -0
- coverage/data.py +251 -0
- coverage/debug.py +669 -0
- coverage/disposition.py +59 -0
- coverage/env.py +135 -0
- coverage/exceptions.py +85 -0
- coverage/execfile.py +329 -0
- coverage/files.py +553 -0
- coverage/html.py +860 -0
- coverage/htmlfiles/coverage_html.js +735 -0
- coverage/htmlfiles/favicon_32.png +0 -0
- coverage/htmlfiles/index.html +199 -0
- coverage/htmlfiles/keybd_closed.png +0 -0
- coverage/htmlfiles/pyfile.html +149 -0
- coverage/htmlfiles/style.css +389 -0
- coverage/htmlfiles/style.scss +844 -0
- coverage/inorout.py +590 -0
- coverage/jsonreport.py +200 -0
- coverage/lcovreport.py +218 -0
- coverage/misc.py +381 -0
- coverage/multiproc.py +120 -0
- coverage/numbits.py +146 -0
- coverage/parser.py +1215 -0
- coverage/patch.py +118 -0
- coverage/phystokens.py +197 -0
- coverage/plugin.py +617 -0
- coverage/plugin_support.py +299 -0
- coverage/pth_file.py +16 -0
- coverage/py.typed +1 -0
- coverage/python.py +272 -0
- coverage/pytracer.py +370 -0
- coverage/regions.py +127 -0
- coverage/report.py +298 -0
- coverage/report_core.py +117 -0
- coverage/results.py +502 -0
- coverage/sqldata.py +1212 -0
- coverage/sqlitedb.py +226 -0
- coverage/sysmon.py +509 -0
- coverage/templite.py +319 -0
- coverage/tomlconfig.py +212 -0
- coverage/tracer.cpython-314t-aarch64-linux-musl.so +0 -0
- coverage/tracer.pyi +43 -0
- coverage/types.py +214 -0
- coverage/version.py +35 -0
- coverage/xmlreport.py +263 -0
- coverage-7.13.1.dist-info/METADATA +200 -0
- coverage-7.13.1.dist-info/RECORD +61 -0
- coverage-7.13.1.dist-info/WHEEL +5 -0
- coverage-7.13.1.dist-info/entry_points.txt +4 -0
- coverage-7.13.1.dist-info/licenses/LICENSE.txt +177 -0
- coverage-7.13.1.dist-info/top_level.txt +1 -0
coverage/patch.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
|
2
|
+
# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
|
|
3
|
+
|
|
4
|
+
"""Invasive patches for coverage.py."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import contextlib
|
|
9
|
+
import os
|
|
10
|
+
from typing import TYPE_CHECKING, Any, NoReturn
|
|
11
|
+
|
|
12
|
+
from coverage import env
|
|
13
|
+
from coverage.debug import DevNullDebug
|
|
14
|
+
from coverage.exceptions import ConfigError, CoverageException
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from coverage import Coverage
|
|
18
|
+
from coverage.config import CoverageConfig
|
|
19
|
+
from coverage.types import TDebugCtl
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def apply_patches(
|
|
23
|
+
cov: Coverage,
|
|
24
|
+
config: CoverageConfig,
|
|
25
|
+
debug: TDebugCtl,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""Apply invasive patches requested by `[run] patch=`."""
|
|
28
|
+
debug = debug if debug.should("patch") else DevNullDebug()
|
|
29
|
+
for patch in sorted(set(config.patch)):
|
|
30
|
+
match patch:
|
|
31
|
+
case "_exit":
|
|
32
|
+
_patch__exit(cov, debug)
|
|
33
|
+
|
|
34
|
+
case "execv":
|
|
35
|
+
_patch_execv(cov, config, debug)
|
|
36
|
+
|
|
37
|
+
case "fork":
|
|
38
|
+
_patch_fork(debug)
|
|
39
|
+
|
|
40
|
+
case "subprocess":
|
|
41
|
+
_patch_subprocess(config, debug)
|
|
42
|
+
|
|
43
|
+
case _:
|
|
44
|
+
raise ConfigError(f"Unknown patch {patch!r}")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _patch__exit(cov: Coverage, debug: TDebugCtl) -> None:
|
|
48
|
+
"""Patch os._exit."""
|
|
49
|
+
debug.write("Patching _exit")
|
|
50
|
+
|
|
51
|
+
old_exit = os._exit
|
|
52
|
+
|
|
53
|
+
def coverage_os_exit_patch(status: int) -> NoReturn:
|
|
54
|
+
with contextlib.suppress(Exception):
|
|
55
|
+
debug.write(f"Using _exit patch with {cov = }")
|
|
56
|
+
with contextlib.suppress(Exception):
|
|
57
|
+
cov.save()
|
|
58
|
+
old_exit(status)
|
|
59
|
+
|
|
60
|
+
os._exit = coverage_os_exit_patch
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _patch_execv(cov: Coverage, config: CoverageConfig, debug: TDebugCtl) -> None:
|
|
64
|
+
"""Patch the execv family of functions."""
|
|
65
|
+
if env.WINDOWS:
|
|
66
|
+
raise CoverageException("patch=execv isn't supported yet on Windows.")
|
|
67
|
+
|
|
68
|
+
debug.write("Patching execv")
|
|
69
|
+
|
|
70
|
+
def make_execv_patch(fname: str, old_execv: Any) -> Any:
|
|
71
|
+
def coverage_execv_patch(*args: Any, **kwargs: Any) -> Any:
|
|
72
|
+
with contextlib.suppress(Exception):
|
|
73
|
+
debug.write(f"Using execv patch for {fname} with {cov = }")
|
|
74
|
+
with contextlib.suppress(Exception):
|
|
75
|
+
cov.save()
|
|
76
|
+
|
|
77
|
+
if fname.endswith("e"):
|
|
78
|
+
# Assume the `env` argument is passed positionally.
|
|
79
|
+
new_env = args[-1]
|
|
80
|
+
# Pass our configuration in the new environment.
|
|
81
|
+
new_env["COVERAGE_PROCESS_CONFIG"] = config.serialize()
|
|
82
|
+
if env.TESTING:
|
|
83
|
+
# The subprocesses need to use the same core as the main process.
|
|
84
|
+
new_env["COVERAGE_CORE"] = os.getenv("COVERAGE_CORE")
|
|
85
|
+
|
|
86
|
+
# When testing locally, we need to honor the pyc file location
|
|
87
|
+
# or they get written to the .tox directories and pollute the
|
|
88
|
+
# next run with a different core.
|
|
89
|
+
if (cache_prefix := os.getenv("PYTHONPYCACHEPREFIX")) is not None:
|
|
90
|
+
new_env["PYTHONPYCACHEPREFIX"] = cache_prefix
|
|
91
|
+
|
|
92
|
+
# Without this, it fails on PyPy and Ubuntu.
|
|
93
|
+
new_env["PATH"] = os.getenv("PATH")
|
|
94
|
+
old_execv(*args, **kwargs)
|
|
95
|
+
|
|
96
|
+
return coverage_execv_patch
|
|
97
|
+
|
|
98
|
+
# All the exec* and spawn* functions eventually call execv or execve.
|
|
99
|
+
os.execv = make_execv_patch("execv", os.execv)
|
|
100
|
+
os.execve = make_execv_patch("execve", os.execve)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _patch_fork(debug: TDebugCtl) -> None:
|
|
104
|
+
"""Ensure Coverage is properly reset after a fork."""
|
|
105
|
+
from coverage.control import _after_fork_in_child
|
|
106
|
+
|
|
107
|
+
if env.WINDOWS:
|
|
108
|
+
raise CoverageException("patch=fork isn't supported yet on Windows.")
|
|
109
|
+
|
|
110
|
+
debug.write("Patching fork")
|
|
111
|
+
os.register_at_fork(after_in_child=_after_fork_in_child)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _patch_subprocess(config: CoverageConfig, debug: TDebugCtl) -> None:
|
|
115
|
+
"""Write .pth files and set environment vars to measure subprocesses."""
|
|
116
|
+
debug.write("Patching subprocess")
|
|
117
|
+
assert config.config_file is not None
|
|
118
|
+
os.environ["COVERAGE_PROCESS_CONFIG"] = config.serialize()
|
coverage/phystokens.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
|
2
|
+
# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
|
|
3
|
+
|
|
4
|
+
"""Better tokenizing for coverage.py."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import ast
|
|
9
|
+
import io
|
|
10
|
+
import keyword
|
|
11
|
+
import re
|
|
12
|
+
import sys
|
|
13
|
+
import token
|
|
14
|
+
import tokenize
|
|
15
|
+
from collections.abc import Iterable
|
|
16
|
+
|
|
17
|
+
from coverage import env
|
|
18
|
+
from coverage.types import TLineNo, TSourceTokenLines
|
|
19
|
+
|
|
20
|
+
TokenInfos = Iterable[tokenize.TokenInfo]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _phys_tokens(toks: TokenInfos) -> TokenInfos:
|
|
24
|
+
"""Return all physical tokens, even line continuations.
|
|
25
|
+
|
|
26
|
+
tokenize.generate_tokens() doesn't return a token for the backslash that
|
|
27
|
+
continues lines. This wrapper provides those tokens so that we can
|
|
28
|
+
re-create a faithful representation of the original source.
|
|
29
|
+
|
|
30
|
+
Returns the same values as generate_tokens()
|
|
31
|
+
|
|
32
|
+
"""
|
|
33
|
+
last_line: str | None = None
|
|
34
|
+
last_lineno = -1
|
|
35
|
+
last_ttext: str = ""
|
|
36
|
+
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
|
|
37
|
+
if last_lineno != elineno:
|
|
38
|
+
if last_line and last_line.endswith("\\\n"):
|
|
39
|
+
# We are at the beginning of a new line, and the last line
|
|
40
|
+
# ended with a backslash. We probably have to inject a
|
|
41
|
+
# backslash token into the stream. Unfortunately, there's more
|
|
42
|
+
# to figure out. This code::
|
|
43
|
+
#
|
|
44
|
+
# usage = """\
|
|
45
|
+
# HEY THERE
|
|
46
|
+
# """
|
|
47
|
+
#
|
|
48
|
+
# triggers this condition, but the token text is::
|
|
49
|
+
#
|
|
50
|
+
# '"""\\\nHEY THERE\n"""'
|
|
51
|
+
#
|
|
52
|
+
# so we need to figure out if the backslash is already in the
|
|
53
|
+
# string token or not.
|
|
54
|
+
inject_backslash = True
|
|
55
|
+
if last_ttext.endswith("\\"):
|
|
56
|
+
inject_backslash = False
|
|
57
|
+
elif ttype == token.STRING:
|
|
58
|
+
if ( # pylint: disable=simplifiable-if-statement
|
|
59
|
+
last_line.endswith("\\\n")
|
|
60
|
+
and last_line.rstrip(" \\\n").endswith(last_ttext)
|
|
61
|
+
):
|
|
62
|
+
# Deal with special cases like such code::
|
|
63
|
+
#
|
|
64
|
+
# a = ["aaa",\ # there may be zero or more blanks between "," and "\".
|
|
65
|
+
# "bbb \
|
|
66
|
+
# ccc"]
|
|
67
|
+
#
|
|
68
|
+
inject_backslash = True
|
|
69
|
+
else:
|
|
70
|
+
# It's a multi-line string and the first line ends with
|
|
71
|
+
# a backslash, so we don't need to inject another.
|
|
72
|
+
inject_backslash = False
|
|
73
|
+
elif env.PYBEHAVIOR.fstring_syntax and ttype == token.FSTRING_MIDDLE:
|
|
74
|
+
inject_backslash = False
|
|
75
|
+
if inject_backslash:
|
|
76
|
+
# Figure out what column the backslash is in.
|
|
77
|
+
ccol = len(last_line.split("\n")[-2]) - 1
|
|
78
|
+
# Yield the token, with a fake token type.
|
|
79
|
+
yield tokenize.TokenInfo(
|
|
80
|
+
99999,
|
|
81
|
+
"\\\n",
|
|
82
|
+
(slineno, ccol),
|
|
83
|
+
(slineno, ccol + 2),
|
|
84
|
+
last_line,
|
|
85
|
+
)
|
|
86
|
+
last_line = ltext
|
|
87
|
+
if ttype not in (tokenize.NEWLINE, tokenize.NL):
|
|
88
|
+
last_ttext = ttext
|
|
89
|
+
yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext)
|
|
90
|
+
last_lineno = elineno
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def find_soft_key_lines(source: str) -> set[TLineNo]:
|
|
94
|
+
"""Helper for finding lines with soft keywords, like match/case lines."""
|
|
95
|
+
soft_key_lines: set[TLineNo] = set()
|
|
96
|
+
|
|
97
|
+
for node in ast.walk(ast.parse(source)):
|
|
98
|
+
if isinstance(node, ast.Match):
|
|
99
|
+
soft_key_lines.add(node.lineno)
|
|
100
|
+
for case in node.cases:
|
|
101
|
+
soft_key_lines.add(case.pattern.lineno)
|
|
102
|
+
elif sys.version_info >= (3, 12) and isinstance(node, ast.TypeAlias):
|
|
103
|
+
soft_key_lines.add(node.lineno)
|
|
104
|
+
|
|
105
|
+
return soft_key_lines
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def source_token_lines(source: str) -> TSourceTokenLines:
|
|
109
|
+
"""Generate a series of lines, one for each line in `source`.
|
|
110
|
+
|
|
111
|
+
Each line is a list of pairs, each pair is a token::
|
|
112
|
+
|
|
113
|
+
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
|
|
114
|
+
|
|
115
|
+
Each pair has a token class, and the token text.
|
|
116
|
+
|
|
117
|
+
If you concatenate all the token texts, and then join them with newlines,
|
|
118
|
+
you should have your original `source` back, with two differences:
|
|
119
|
+
trailing white space is not preserved, and a final line with no newline
|
|
120
|
+
is indistinguishable from a final line with a newline.
|
|
121
|
+
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
|
|
125
|
+
line: list[tuple[str, str]] = []
|
|
126
|
+
col = 0
|
|
127
|
+
|
|
128
|
+
source = source.expandtabs(8).replace("\r\n", "\n")
|
|
129
|
+
tokgen = generate_tokens(source)
|
|
130
|
+
|
|
131
|
+
soft_key_lines = find_soft_key_lines(source)
|
|
132
|
+
|
|
133
|
+
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
|
|
134
|
+
mark_start = True
|
|
135
|
+
for part in re.split("(\n)", ttext):
|
|
136
|
+
if part == "\n":
|
|
137
|
+
yield line
|
|
138
|
+
line = []
|
|
139
|
+
col = 0
|
|
140
|
+
mark_end = False
|
|
141
|
+
elif part == "":
|
|
142
|
+
mark_end = False
|
|
143
|
+
elif ttype in ws_tokens:
|
|
144
|
+
mark_end = False
|
|
145
|
+
else:
|
|
146
|
+
if env.PYBEHAVIOR.fstring_syntax and ttype == token.FSTRING_MIDDLE:
|
|
147
|
+
part = part.replace("{", "{{").replace("}", "}}")
|
|
148
|
+
ecol = scol + len(part)
|
|
149
|
+
if mark_start and scol > col:
|
|
150
|
+
line.append(("ws", " " * (scol - col)))
|
|
151
|
+
mark_start = False
|
|
152
|
+
tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
|
|
153
|
+
if ttype == token.NAME:
|
|
154
|
+
if keyword.iskeyword(ttext):
|
|
155
|
+
# Hard keywords are always keywords.
|
|
156
|
+
tok_class = "key"
|
|
157
|
+
elif keyword.issoftkeyword(ttext):
|
|
158
|
+
# Soft keywords appear at the start of their line.
|
|
159
|
+
if len(line) == 0:
|
|
160
|
+
is_start_of_line = True
|
|
161
|
+
elif (len(line) == 1) and line[0][0] == "ws":
|
|
162
|
+
is_start_of_line = True
|
|
163
|
+
else:
|
|
164
|
+
is_start_of_line = False
|
|
165
|
+
if is_start_of_line and sline in soft_key_lines:
|
|
166
|
+
tok_class = "key"
|
|
167
|
+
line.append((tok_class, part))
|
|
168
|
+
mark_end = True
|
|
169
|
+
scol = 0
|
|
170
|
+
if mark_end:
|
|
171
|
+
col = ecol
|
|
172
|
+
|
|
173
|
+
if line:
|
|
174
|
+
yield line
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def generate_tokens(text: str) -> TokenInfos:
|
|
178
|
+
"""A helper around `tokenize.generate_tokens`.
|
|
179
|
+
|
|
180
|
+
Originally this was used to cache the results, but it didn't seem to make
|
|
181
|
+
reporting go faster, and caused issues with using too much memory.
|
|
182
|
+
|
|
183
|
+
"""
|
|
184
|
+
readline = io.StringIO(text).readline
|
|
185
|
+
return tokenize.generate_tokens(readline)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def source_encoding(source: bytes) -> str:
|
|
189
|
+
"""Determine the encoding for `source`, according to PEP 263.
|
|
190
|
+
|
|
191
|
+
`source` is a byte string: the text of the program.
|
|
192
|
+
|
|
193
|
+
Returns a string, the name of the encoding.
|
|
194
|
+
|
|
195
|
+
"""
|
|
196
|
+
readline = iter(source.splitlines(True)).__next__
|
|
197
|
+
return tokenize.detect_encoding(readline)[0]
|