coverage 7.13.1__cp313-cp313-musllinux_1_2_riscv64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. a1_coverage.pth +1 -0
  2. coverage/__init__.py +38 -0
  3. coverage/__main__.py +12 -0
  4. coverage/annotate.py +113 -0
  5. coverage/bytecode.py +197 -0
  6. coverage/cmdline.py +1220 -0
  7. coverage/collector.py +487 -0
  8. coverage/config.py +732 -0
  9. coverage/context.py +74 -0
  10. coverage/control.py +1514 -0
  11. coverage/core.py +139 -0
  12. coverage/data.py +251 -0
  13. coverage/debug.py +669 -0
  14. coverage/disposition.py +59 -0
  15. coverage/env.py +135 -0
  16. coverage/exceptions.py +85 -0
  17. coverage/execfile.py +329 -0
  18. coverage/files.py +553 -0
  19. coverage/html.py +860 -0
  20. coverage/htmlfiles/coverage_html.js +735 -0
  21. coverage/htmlfiles/favicon_32.png +0 -0
  22. coverage/htmlfiles/index.html +199 -0
  23. coverage/htmlfiles/keybd_closed.png +0 -0
  24. coverage/htmlfiles/pyfile.html +149 -0
  25. coverage/htmlfiles/style.css +389 -0
  26. coverage/htmlfiles/style.scss +844 -0
  27. coverage/inorout.py +590 -0
  28. coverage/jsonreport.py +200 -0
  29. coverage/lcovreport.py +218 -0
  30. coverage/misc.py +381 -0
  31. coverage/multiproc.py +120 -0
  32. coverage/numbits.py +146 -0
  33. coverage/parser.py +1215 -0
  34. coverage/patch.py +118 -0
  35. coverage/phystokens.py +197 -0
  36. coverage/plugin.py +617 -0
  37. coverage/plugin_support.py +299 -0
  38. coverage/pth_file.py +16 -0
  39. coverage/py.typed +1 -0
  40. coverage/python.py +272 -0
  41. coverage/pytracer.py +370 -0
  42. coverage/regions.py +127 -0
  43. coverage/report.py +298 -0
  44. coverage/report_core.py +117 -0
  45. coverage/results.py +502 -0
  46. coverage/sqldata.py +1212 -0
  47. coverage/sqlitedb.py +226 -0
  48. coverage/sysmon.py +509 -0
  49. coverage/templite.py +319 -0
  50. coverage/tomlconfig.py +212 -0
  51. coverage/tracer.cpython-313-riscv64-linux-musl.so +0 -0
  52. coverage/tracer.pyi +43 -0
  53. coverage/types.py +214 -0
  54. coverage/version.py +35 -0
  55. coverage/xmlreport.py +263 -0
  56. coverage-7.13.1.dist-info/METADATA +200 -0
  57. coverage-7.13.1.dist-info/RECORD +61 -0
  58. coverage-7.13.1.dist-info/WHEEL +5 -0
  59. coverage-7.13.1.dist-info/entry_points.txt +4 -0
  60. coverage-7.13.1.dist-info/licenses/LICENSE.txt +177 -0
  61. coverage-7.13.1.dist-info/top_level.txt +1 -0
coverage/core.py ADDED
@@ -0,0 +1,139 @@
1
+ # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
2
+ # For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
3
+
4
+ """Management of core choices."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import os
9
+ import sys
10
+ from typing import Any
11
+
12
+ from coverage import env
13
+ from coverage.config import CoverageConfig
14
+ from coverage.disposition import FileDisposition
15
+ from coverage.exceptions import ConfigError
16
+ from coverage.misc import isolate_module
17
+ from coverage.pytracer import PyTracer
18
+ from coverage.sysmon import SysMonitor
19
+ from coverage.types import TDebugCtl, TFileDisposition, Tracer, TWarnFn
20
+
21
+ os = isolate_module(os)
22
+
23
+ IMPORT_ERROR: str = ""
24
+
25
+ try:
26
+ # Use the C extension code when we can, for speed.
27
+ import coverage.tracer
28
+
29
+ CTRACER_FILE: str | None = getattr(coverage.tracer, "__file__", "unknown")
30
+ except ImportError as imp_err:
31
+ # Couldn't import the C extension, maybe it isn't built.
32
+ # We still need to check the environment variable directly here,
33
+ # as this code runs before configuration is loaded.
34
+ if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered
35
+ # During testing, we use the COVERAGE_CORE environment variable
36
+ # to indicate that we've fiddled with the environment to test this
37
+ # fallback code. If we thought we had a C tracer, but couldn't import
38
+ # it, then exit quickly and clearly instead of dribbling confusing
39
+ # errors. I'm using sys.exit here instead of an exception because an
40
+ # exception here causes all sorts of other noise in unittest.
41
+ sys.stderr.write("*** COVERAGE_CORE is 'ctrace' but can't import CTracer!\n")
42
+ sys.exit(1)
43
+ IMPORT_ERROR = str(imp_err)
44
+ CTRACER_FILE = None
45
+
46
+
47
+ class Core:
48
+ """Information about the central technology enabling execution measurement."""
49
+
50
+ tracer_class: type[Tracer]
51
+ tracer_kwargs: dict[str, Any]
52
+ file_disposition_class: type[TFileDisposition]
53
+ supports_plugins: bool
54
+ packed_arcs: bool
55
+ systrace: bool
56
+
57
+ def __init__(
58
+ self,
59
+ *,
60
+ warn: TWarnFn,
61
+ debug: TDebugCtl | None,
62
+ config: CoverageConfig,
63
+ dynamic_contexts: bool,
64
+ metacov: bool,
65
+ ) -> None:
66
+ def _debug(msg: str) -> None:
67
+ if debug:
68
+ debug.write(msg)
69
+
70
+ _debug("in core.py")
71
+
72
+ # Check the conditions that preclude us from using sys.monitoring.
73
+ reason_no_sysmon = ""
74
+ if not env.PYBEHAVIOR.pep669:
75
+ reason_no_sysmon = "sys.monitoring isn't available in this version"
76
+ elif config.branch and not env.PYBEHAVIOR.branch_right_left:
77
+ reason_no_sysmon = "sys.monitoring can't measure branches in this version"
78
+ elif dynamic_contexts:
79
+ reason_no_sysmon = "it doesn't yet support dynamic contexts"
80
+ elif any((bad := c) in config.concurrency for c in ["greenlet", "eventlet", "gevent"]):
81
+ reason_no_sysmon = f"it doesn't support concurrency={bad}"
82
+
83
+ core_name: str | None = None
84
+ if config.timid:
85
+ core_name = "pytrace"
86
+ _debug("core.py: Using pytrace because timid=True")
87
+ elif core_name is None:
88
+ # This could still leave core_name as None.
89
+ core_name = config.core
90
+ _debug(f"core.py: core from config is {core_name!r}")
91
+
92
+ if core_name == "sysmon" and reason_no_sysmon:
93
+ _debug(f"core.py: defaulting because sysmon not usable: {reason_no_sysmon}")
94
+ warn(f"Can't use core=sysmon: {reason_no_sysmon}, using default core", slug="no-sysmon")
95
+ core_name = None
96
+
97
+ if core_name is None:
98
+ if env.SYSMON_DEFAULT and not reason_no_sysmon:
99
+ core_name = "sysmon"
100
+ _debug("core.py: Using sysmon because SYSMON_DEFAULT is set")
101
+ else:
102
+ core_name = "ctrace"
103
+ _debug("core.py: Defaulting to ctrace core")
104
+
105
+ if core_name == "ctrace":
106
+ if not CTRACER_FILE:
107
+ if IMPORT_ERROR and env.SHIPPING_WHEELS:
108
+ warn(f"Couldn't import C tracer: {IMPORT_ERROR}", slug="no-ctracer", once=True)
109
+ core_name = "pytrace"
110
+ _debug("core.py: Falling back to pytrace because C tracer not available")
111
+
112
+ _debug(f"core.py: Using core={core_name}")
113
+
114
+ self.tracer_kwargs = {}
115
+
116
+ if core_name == "sysmon":
117
+ self.tracer_class = SysMonitor
118
+ self.tracer_kwargs["tool_id"] = 3 if metacov else 1
119
+ self.file_disposition_class = FileDisposition
120
+ self.supports_plugins = False
121
+ self.packed_arcs = False
122
+ self.systrace = False
123
+ elif core_name == "ctrace":
124
+ self.tracer_class = coverage.tracer.CTracer
125
+ self.file_disposition_class = coverage.tracer.CFileDisposition
126
+ self.supports_plugins = True
127
+ self.packed_arcs = True
128
+ self.systrace = True
129
+ elif core_name == "pytrace":
130
+ self.tracer_class = PyTracer
131
+ self.file_disposition_class = FileDisposition
132
+ self.supports_plugins = False
133
+ self.packed_arcs = False
134
+ self.systrace = True
135
+ else:
136
+ raise ConfigError(f"Unknown core value: {core_name!r}")
137
+
138
+ def __repr__(self) -> str:
139
+ return f"<Core tracer_class={self.tracer_class.__name__}>"
coverage/data.py ADDED
@@ -0,0 +1,251 @@
1
+ # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
2
+ # For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
3
+
4
+ """Coverage data for coverage.py.
5
+
6
+ This file had the 4.x JSON data support, which is now gone. This file still
7
+ has storage-agnostic helpers, and is kept to avoid changing too many imports.
8
+ CoverageData is now defined in sqldata.py, and imported here to keep the
9
+ imports working.
10
+
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import functools
16
+ import glob
17
+ import hashlib
18
+ import os.path
19
+ from collections.abc import Callable, Iterable
20
+ from typing import Literal
21
+
22
+ from coverage.exceptions import CoverageException, NoDataError
23
+ from coverage.files import PathAliases
24
+ from coverage.misc import Hasher, file_be_gone, human_sorted, plural
25
+ from coverage.sqldata import CoverageData as CoverageData # pylint: disable=useless-import-alias
26
+ from coverage.sqldata import filename_match
27
+
28
+
29
+ def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]:
30
+ """Return a dict summarizing the line coverage data.
31
+
32
+ Keys are based on the file names, and values are the number of executed
33
+ lines. If `fullpath` is true, then the keys are the full pathnames of
34
+ the files, otherwise they are the basenames of the files.
35
+
36
+ Returns a dict mapping file names to counts of lines.
37
+
38
+ """
39
+ summ = {}
40
+ filename_fn: Callable[[str], str]
41
+ if fullpath:
42
+ # pylint: disable=unnecessary-lambda-assignment
43
+ filename_fn = lambda f: f
44
+ else:
45
+ filename_fn = os.path.basename
46
+ for filename in data.measured_files():
47
+ lines = data.lines(filename)
48
+ assert lines is not None
49
+ summ[filename_fn(filename)] = len(lines)
50
+ return summ
51
+
52
+
53
+ def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None:
54
+ """Contribute `filename`'s data to the `hasher`.
55
+
56
+ `hasher` is a `coverage.misc.Hasher` instance to be updated with
57
+ the file's data. It should only get the results data, not the run
58
+ data.
59
+
60
+ """
61
+ if data.has_arcs():
62
+ hasher.update(sorted(data.arcs(filename) or []))
63
+ else:
64
+ hasher.update(sorted_lines(data, filename))
65
+ hasher.update(data.file_tracer(filename))
66
+
67
+
68
+ def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]:
69
+ """Make a list of data files to be combined.
70
+
71
+ `data_file` is a path to a data file. `data_paths` is a list of files or
72
+ directories of files.
73
+
74
+ Returns a list of absolute file paths.
75
+ """
76
+ data_dir, local = os.path.split(os.path.abspath(data_file))
77
+
78
+ data_paths = data_paths or [data_dir]
79
+ files_to_combine = []
80
+ for p in data_paths:
81
+ if os.path.isfile(p):
82
+ files_to_combine.append(os.path.abspath(p))
83
+ elif os.path.isdir(p):
84
+ pattern = glob.escape(os.path.join(os.path.abspath(p), local)) + ".*"
85
+ files_to_combine.extend(glob.glob(pattern))
86
+ else:
87
+ raise NoDataError(f"Couldn't combine from non-existent path '{p}'")
88
+
89
+ # SQLite might have made journal files alongside our database files.
90
+ # We never want to combine those.
91
+ files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")]
92
+
93
+ # Sorting isn't usually needed, since it shouldn't matter what order files
94
+ # are combined, but sorting makes tests more predictable, and makes
95
+ # debugging more understandable when things go wrong.
96
+ return sorted(files_to_combine)
97
+
98
+
99
+ def hash_for_data_file(dbfilename: str) -> str:
100
+ """Get the hash of the data in the file."""
101
+ m = filename_match(dbfilename)
102
+ if m and m["hash"]:
103
+ return m["hash"]
104
+ else:
105
+ with open(dbfilename, "rb") as fobj:
106
+ hasher = hashlib.new("sha3_256", usedforsecurity=False)
107
+ hasher.update(fobj.read())
108
+ return hasher.hexdigest()
109
+
110
+
111
+ class DataFileClassifier:
112
+ """Track what files to combine and which to skip."""
113
+
114
+ def __init__(self) -> None:
115
+ self.file_hashes: set[str] = set()
116
+
117
+ def classify(self, f: str) -> Literal["combine", "skip"]:
118
+ """Determine whether to combine or skip this file."""
119
+ sha = hash_for_data_file(f)
120
+ if sha in self.file_hashes:
121
+ return "skip"
122
+ else:
123
+ self.file_hashes.add(sha)
124
+ return "combine"
125
+
126
+
127
+ def combine_parallel_data(
128
+ data: CoverageData,
129
+ aliases: PathAliases | None = None,
130
+ data_paths: Iterable[str] | None = None,
131
+ strict: bool = False,
132
+ keep: bool = False,
133
+ message: Callable[[str], None] | None = None,
134
+ ) -> None:
135
+ """Combine a number of data files together.
136
+
137
+ `data` is a CoverageData.
138
+
139
+ Treat `data.filename` as a file prefix, and combine the data from all
140
+ of the data files starting with that prefix plus a dot.
141
+
142
+ If `aliases` is provided, it's a `PathAliases` object that is used to
143
+ re-map paths to match the local machine's.
144
+
145
+ If `data_paths` is provided, it is a list of directories or files to
146
+ combine. Directories are searched for files that start with
147
+ `data.filename` plus dot as a prefix, and those files are combined.
148
+
149
+ If `data_paths` is not provided, then the directory portion of
150
+ `data.filename` is used as the directory to search for data files.
151
+
152
+ Unless `keep` is True every data file found and combined is then deleted
153
+ from disk. If a file cannot be read, a warning will be issued, and the
154
+ file will not be deleted.
155
+
156
+ If `strict` is true, and no files are found to combine, an error is
157
+ raised.
158
+
159
+ `message` is a function to use for printing messages to the user.
160
+
161
+ """
162
+ files_to_combine = combinable_files(data.base_filename(), data_paths)
163
+
164
+ if strict and not files_to_combine:
165
+ raise NoDataError("No data to combine")
166
+
167
+ if aliases is None:
168
+ map_path = None
169
+ else:
170
+ map_path = functools.cache(aliases.map)
171
+
172
+ classifier = DataFileClassifier()
173
+ combined_any = False
174
+
175
+ for f in files_to_combine:
176
+ if f == data.data_filename():
177
+ # Sometimes we are combining into a file which is one of the
178
+ # parallel files. Skip that file.
179
+ if data._debug.should("dataio"):
180
+ data._debug.write(f"Skipping combining ourself: {f!r}")
181
+ continue
182
+
183
+ try:
184
+ rel_file_name = os.path.relpath(f)
185
+ except ValueError:
186
+ # ValueError can be raised under Windows when os.getcwd() returns a
187
+ # folder from a different drive than the drive of f, in which case
188
+ # we print the original value of f instead of its relative path.
189
+ rel_file_name = f
190
+
191
+ file_action = classifier.classify(f)
192
+
193
+ delete_this_one = not keep
194
+ if file_action == "combine":
195
+ if data._debug.should("dataio"):
196
+ data._debug.write(f"Combining data file {f!r}")
197
+ try:
198
+ new_data = CoverageData(f, debug=data._debug)
199
+ new_data.read()
200
+ except CoverageException as exc:
201
+ if data._warn:
202
+ # The CoverageException has the file name in it, so just
203
+ # use the message as the warning.
204
+ data._warn(str(exc))
205
+ if message:
206
+ message(f"Couldn't combine data file {rel_file_name}: {exc}")
207
+ delete_this_one = False
208
+ else:
209
+ data.update(new_data, map_path=map_path)
210
+ combined_any = True
211
+ if message:
212
+ message(f"Combined data file {rel_file_name}")
213
+ else:
214
+ if message:
215
+ message(f"Skipping duplicate data {rel_file_name}")
216
+
217
+ if delete_this_one:
218
+ if data._debug.should("dataio"):
219
+ data._debug.write(f"Deleting data file {f!r}")
220
+ file_be_gone(f)
221
+
222
+ if strict and not combined_any:
223
+ raise NoDataError("No usable data files")
224
+
225
+
226
+ def debug_data_file(filename: str) -> None:
227
+ """Implementation of 'coverage debug data'."""
228
+ data = CoverageData(filename)
229
+ filename = data.data_filename()
230
+ print(f"path: {filename}")
231
+ if not os.path.exists(filename):
232
+ print("No data collected: file doesn't exist")
233
+ return
234
+ data.read()
235
+ print(f"has_arcs: {data.has_arcs()!r}")
236
+ summary = line_counts(data, fullpath=True)
237
+ filenames = human_sorted(summary.keys())
238
+ nfiles = len(filenames)
239
+ print(f"{nfiles} file{plural(nfiles)}:")
240
+ for f in filenames:
241
+ line = f"{f}: {summary[f]} line{plural(summary[f])}"
242
+ plugin = data.file_tracer(f)
243
+ if plugin:
244
+ line += f" [{plugin}]"
245
+ print(line)
246
+
247
+
248
+ def sorted_lines(data: CoverageData, filename: str) -> list[int]:
249
+ """Get the sorted lines for a file, for tests."""
250
+ lines = data.lines(filename)
251
+ return sorted(lines or [])