coverage 7.10.6__cp314-cp314-macosx_11_0_arm64.whl → 7.11.0__cp314-cp314-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- coverage/cmdline.py +4 -2
- coverage/collector.py +2 -2
- coverage/config.py +11 -11
- coverage/control.py +6 -5
- coverage/debug.py +9 -8
- coverage/env.py +1 -64
- coverage/exceptions.py +6 -8
- coverage/html.py +12 -4
- coverage/jsonreport.py +14 -5
- coverage/lcovreport.py +5 -2
- coverage/misc.py +26 -25
- coverage/parser.py +63 -206
- coverage/patch.py +11 -10
- coverage/phystokens.py +3 -6
- coverage/python.py +1 -1
- coverage/results.py +99 -40
- coverage/sqldata.py +6 -2
- coverage/tracer.cpython-314-darwin.so +0 -0
- coverage/types.py +8 -12
- coverage/version.py +2 -16
- {coverage-7.10.6.dist-info → coverage-7.11.0.dist-info}/METADATA +13 -13
- {coverage-7.10.6.dist-info → coverage-7.11.0.dist-info}/RECORD +26 -26
- {coverage-7.10.6.dist-info → coverage-7.11.0.dist-info}/WHEEL +0 -0
- {coverage-7.10.6.dist-info → coverage-7.11.0.dist-info}/entry_points.txt +0 -0
- {coverage-7.10.6.dist-info → coverage-7.11.0.dist-info}/licenses/LICENSE.txt +0 -0
- {coverage-7.10.6.dist-info → coverage-7.11.0.dist-info}/top_level.txt +0 -0
coverage/parser.py
CHANGED
|
@@ -10,7 +10,6 @@ import collections
|
|
|
10
10
|
import functools
|
|
11
11
|
import os
|
|
12
12
|
import re
|
|
13
|
-
import sys
|
|
14
13
|
import token
|
|
15
14
|
import tokenize
|
|
16
15
|
from collections.abc import Iterable, Sequence
|
|
@@ -299,10 +298,9 @@ class PythonParser:
|
|
|
299
298
|
aaa = AstArcAnalyzer(self.filename, self._ast_root, self.raw_statements, self._multiline)
|
|
300
299
|
aaa.analyze()
|
|
301
300
|
arcs = aaa.arcs
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
arcs = self.fix_with_jumps(arcs)
|
|
301
|
+
self._with_jump_fixers = aaa.with_jump_fixers()
|
|
302
|
+
if self._with_jump_fixers:
|
|
303
|
+
arcs = self.fix_with_jumps(arcs)
|
|
306
304
|
|
|
307
305
|
self._all_arcs = set()
|
|
308
306
|
for l1, l2 in arcs:
|
|
@@ -451,33 +449,11 @@ class ByteParser:
|
|
|
451
449
|
def _line_numbers(self) -> Iterable[TLineNo]:
|
|
452
450
|
"""Yield the line numbers possible in this code object.
|
|
453
451
|
|
|
454
|
-
Uses
|
|
455
|
-
line numbers. Produces a sequence: l0, l1, ...
|
|
452
|
+
Uses co_lines() to produce a sequence: l0, l1, ...
|
|
456
453
|
"""
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
if line:
|
|
461
|
-
yield line
|
|
462
|
-
else:
|
|
463
|
-
# Adapted from dis.py in the standard library.
|
|
464
|
-
byte_increments = self.code.co_lnotab[0::2]
|
|
465
|
-
line_increments = self.code.co_lnotab[1::2]
|
|
466
|
-
|
|
467
|
-
last_line_num: TLineNo | None = None
|
|
468
|
-
line_num = self.code.co_firstlineno
|
|
469
|
-
byte_num = 0
|
|
470
|
-
for byte_incr, line_incr in zip(byte_increments, line_increments):
|
|
471
|
-
if byte_incr:
|
|
472
|
-
if line_num != last_line_num:
|
|
473
|
-
yield line_num
|
|
474
|
-
last_line_num = line_num
|
|
475
|
-
byte_num += byte_incr
|
|
476
|
-
if line_incr >= 0x80:
|
|
477
|
-
line_incr -= 0x100
|
|
478
|
-
line_num += line_incr
|
|
479
|
-
if line_num != last_line_num:
|
|
480
|
-
yield line_num
|
|
454
|
+
for _, _, line in self.code.co_lines():
|
|
455
|
+
if line:
|
|
456
|
+
yield line
|
|
481
457
|
|
|
482
458
|
def _find_statements(self) -> Iterable[TLineNo]:
|
|
483
459
|
"""Find the statements in `self.code`.
|
|
@@ -650,19 +626,6 @@ class TryBlock(Block):
|
|
|
650
626
|
return True
|
|
651
627
|
|
|
652
628
|
|
|
653
|
-
class NodeList(ast.AST):
|
|
654
|
-
"""A synthetic fictitious node, containing a sequence of nodes.
|
|
655
|
-
|
|
656
|
-
This is used when collapsing optimized if-statements, to represent the
|
|
657
|
-
unconditional execution of one of the clauses.
|
|
658
|
-
|
|
659
|
-
"""
|
|
660
|
-
|
|
661
|
-
def __init__(self, body: Sequence[ast.AST]) -> None:
|
|
662
|
-
self.body = body
|
|
663
|
-
self.lineno = body[0].lineno # type: ignore[attr-defined]
|
|
664
|
-
|
|
665
|
-
|
|
666
629
|
# TODO: Shouldn't the cause messages join with "and" instead of "or"?
|
|
667
630
|
|
|
668
631
|
|
|
@@ -673,20 +636,22 @@ def is_constant_test_expr(node: ast.AST) -> tuple[bool, bool]:
|
|
|
673
636
|
handle the kinds of constant expressions people might actually use.
|
|
674
637
|
|
|
675
638
|
"""
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
639
|
+
match node:
|
|
640
|
+
case ast.Constant():
|
|
641
|
+
return True, bool(node.value)
|
|
642
|
+
case ast.Name():
|
|
643
|
+
if node.id in ["True", "False", "None", "__debug__"]:
|
|
644
|
+
return True, eval(node.id) # pylint: disable=eval-used
|
|
645
|
+
case ast.UnaryOp():
|
|
646
|
+
if isinstance(node.op, ast.Not):
|
|
647
|
+
is_constant, val = is_constant_test_expr(node.operand)
|
|
648
|
+
return is_constant, not val
|
|
649
|
+
case ast.BoolOp():
|
|
650
|
+
rets = [is_constant_test_expr(v) for v in node.values]
|
|
651
|
+
is_constant = all(is_const for is_const, _ in rets)
|
|
652
|
+
if is_constant:
|
|
653
|
+
op = any if isinstance(node.op, ast.Or) else all
|
|
654
|
+
return True, op(v for _, v in rets)
|
|
690
655
|
return False, False
|
|
691
656
|
|
|
692
657
|
|
|
@@ -890,14 +855,8 @@ class AstArcAnalyzer:
|
|
|
890
855
|
else:
|
|
891
856
|
return node.lineno
|
|
892
857
|
|
|
893
|
-
def _line__Module(self, node: ast.Module) -> TLineNo:
|
|
894
|
-
|
|
895
|
-
return 1
|
|
896
|
-
elif node.body:
|
|
897
|
-
return self.line_for_node(node.body[0])
|
|
898
|
-
else:
|
|
899
|
-
# Empty modules have no line number, they always start at 1.
|
|
900
|
-
return 1
|
|
858
|
+
def _line__Module(self, node: ast.Module) -> TLineNo: # pylint: disable=unused-argument
|
|
859
|
+
return 1
|
|
901
860
|
|
|
902
861
|
# The node types that just flow to the next node with no complications.
|
|
903
862
|
OK_TO_DEFAULT = {
|
|
@@ -982,98 +941,12 @@ class AstArcAnalyzer:
|
|
|
982
941
|
for body_node in body:
|
|
983
942
|
lineno = self.line_for_node(body_node)
|
|
984
943
|
if lineno not in self.statements:
|
|
985
|
-
|
|
986
|
-
if maybe_body_node is None:
|
|
987
|
-
continue
|
|
988
|
-
body_node = maybe_body_node
|
|
989
|
-
lineno = self.line_for_node(body_node)
|
|
944
|
+
continue
|
|
990
945
|
for prev_start in prev_starts:
|
|
991
946
|
self.add_arc(prev_start.lineno, lineno, prev_start.cause)
|
|
992
947
|
prev_starts = self.node_exits(body_node)
|
|
993
948
|
return prev_starts
|
|
994
949
|
|
|
995
|
-
def find_non_missing_node(self, node: ast.AST) -> ast.AST | None:
|
|
996
|
-
"""Search `node` looking for a child that has not been optimized away.
|
|
997
|
-
|
|
998
|
-
This might return the node you started with, or it will work recursively
|
|
999
|
-
to find a child node in self.statements.
|
|
1000
|
-
|
|
1001
|
-
Returns a node, or None if none of the node remains.
|
|
1002
|
-
|
|
1003
|
-
"""
|
|
1004
|
-
# This repeats work just done in process_body, but this duplication
|
|
1005
|
-
# means we can avoid a function call in the 99.9999% case of not
|
|
1006
|
-
# optimizing away statements.
|
|
1007
|
-
lineno = self.line_for_node(node)
|
|
1008
|
-
if lineno in self.statements:
|
|
1009
|
-
return node
|
|
1010
|
-
|
|
1011
|
-
missing_fn = cast(
|
|
1012
|
-
Optional[Callable[[ast.AST], Optional[ast.AST]]],
|
|
1013
|
-
getattr(self, f"_missing__{node.__class__.__name__}", None),
|
|
1014
|
-
)
|
|
1015
|
-
if missing_fn is not None:
|
|
1016
|
-
ret_node = missing_fn(node)
|
|
1017
|
-
else:
|
|
1018
|
-
ret_node = None
|
|
1019
|
-
return ret_node
|
|
1020
|
-
|
|
1021
|
-
# Missing nodes: _missing__*
|
|
1022
|
-
#
|
|
1023
|
-
# Entire statements can be optimized away by Python. They will appear in
|
|
1024
|
-
# the AST, but not the bytecode. These functions are called (by
|
|
1025
|
-
# find_non_missing_node) to find a node to use instead of the missing
|
|
1026
|
-
# node. They can return None if the node should truly be gone.
|
|
1027
|
-
|
|
1028
|
-
def _missing__If(self, node: ast.If) -> ast.AST | None:
|
|
1029
|
-
# If the if-node is missing, then one of its children might still be
|
|
1030
|
-
# here, but not both. So return the first of the two that isn't missing.
|
|
1031
|
-
# Use a NodeList to hold the clauses as a single node.
|
|
1032
|
-
non_missing = self.find_non_missing_node(NodeList(node.body))
|
|
1033
|
-
if non_missing:
|
|
1034
|
-
return non_missing
|
|
1035
|
-
if node.orelse:
|
|
1036
|
-
return self.find_non_missing_node(NodeList(node.orelse))
|
|
1037
|
-
return None
|
|
1038
|
-
|
|
1039
|
-
def _missing__NodeList(self, node: NodeList) -> ast.AST | None:
|
|
1040
|
-
# A NodeList might be a mixture of missing and present nodes. Find the
|
|
1041
|
-
# ones that are present.
|
|
1042
|
-
non_missing_children = []
|
|
1043
|
-
for child in node.body:
|
|
1044
|
-
maybe_child = self.find_non_missing_node(child)
|
|
1045
|
-
if maybe_child is not None:
|
|
1046
|
-
non_missing_children.append(maybe_child)
|
|
1047
|
-
|
|
1048
|
-
# Return the simplest representation of the present children.
|
|
1049
|
-
if not non_missing_children:
|
|
1050
|
-
return None
|
|
1051
|
-
if len(non_missing_children) == 1:
|
|
1052
|
-
return non_missing_children[0]
|
|
1053
|
-
return NodeList(non_missing_children)
|
|
1054
|
-
|
|
1055
|
-
def _missing__While(self, node: ast.While) -> ast.AST | None:
|
|
1056
|
-
body_nodes = self.find_non_missing_node(NodeList(node.body))
|
|
1057
|
-
if not body_nodes:
|
|
1058
|
-
return None
|
|
1059
|
-
# Make a synthetic While-true node.
|
|
1060
|
-
new_while = ast.While() # type: ignore[call-arg]
|
|
1061
|
-
new_while.lineno = body_nodes.lineno # type: ignore[attr-defined]
|
|
1062
|
-
new_while.test = ast.Name() # type: ignore[call-arg]
|
|
1063
|
-
new_while.test.lineno = body_nodes.lineno # type: ignore[attr-defined]
|
|
1064
|
-
new_while.test.id = "True"
|
|
1065
|
-
assert hasattr(body_nodes, "body")
|
|
1066
|
-
new_while.body = body_nodes.body
|
|
1067
|
-
new_while.orelse = []
|
|
1068
|
-
return new_while
|
|
1069
|
-
|
|
1070
|
-
# In the fullness of time, these might be good tests to write:
|
|
1071
|
-
# while EXPR:
|
|
1072
|
-
# while False:
|
|
1073
|
-
# listcomps hidden deep in other expressions
|
|
1074
|
-
# listcomps hidden in lists: x = [[i for i in range(10)]]
|
|
1075
|
-
# nested function definitions
|
|
1076
|
-
|
|
1077
950
|
# Exit processing: process_*_exits
|
|
1078
951
|
#
|
|
1079
952
|
# These functions process the four kinds of jump exits: break, continue,
|
|
@@ -1192,41 +1065,34 @@ class AstArcAnalyzer:
|
|
|
1192
1065
|
exits |= self.process_body(node.orelse, from_start=from_start)
|
|
1193
1066
|
return exits
|
|
1194
1067
|
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
case_start,
|
|
1206
|
-
cause="the pattern on line {lineno} never matched",
|
|
1207
|
-
)
|
|
1208
|
-
exits |= self.process_body(case.body, from_start=from_start)
|
|
1209
|
-
last_start = case_start
|
|
1210
|
-
|
|
1211
|
-
# case is now the last case, check for wildcard match.
|
|
1212
|
-
pattern = case.pattern # pylint: disable=undefined-loop-variable
|
|
1213
|
-
while isinstance(pattern, ast.MatchOr):
|
|
1214
|
-
pattern = pattern.patterns[-1]
|
|
1215
|
-
while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None:
|
|
1216
|
-
pattern = pattern.pattern
|
|
1217
|
-
had_wildcard = (
|
|
1218
|
-
isinstance(pattern, ast.MatchAs) and pattern.pattern is None and case.guard is None # pylint: disable=undefined-loop-variable
|
|
1068
|
+
def _handle__Match(self, node: ast.Match) -> set[ArcStart]:
|
|
1069
|
+
start = self.line_for_node(node)
|
|
1070
|
+
last_start = start
|
|
1071
|
+
exits = set()
|
|
1072
|
+
for case in node.cases:
|
|
1073
|
+
case_start = self.line_for_node(case.pattern)
|
|
1074
|
+
self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
|
|
1075
|
+
from_start = ArcStart(
|
|
1076
|
+
case_start,
|
|
1077
|
+
cause="the pattern on line {lineno} never matched",
|
|
1219
1078
|
)
|
|
1079
|
+
exits |= self.process_body(case.body, from_start=from_start)
|
|
1080
|
+
last_start = case_start
|
|
1081
|
+
|
|
1082
|
+
# case is now the last case, check for wildcard match.
|
|
1083
|
+
pattern = case.pattern # pylint: disable=undefined-loop-variable
|
|
1084
|
+
while isinstance(pattern, ast.MatchOr):
|
|
1085
|
+
pattern = pattern.patterns[-1]
|
|
1086
|
+
while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None:
|
|
1087
|
+
pattern = pattern.pattern
|
|
1088
|
+
had_wildcard = (
|
|
1089
|
+
isinstance(pattern, ast.MatchAs) and pattern.pattern is None and case.guard is None # pylint: disable=undefined-loop-variable
|
|
1090
|
+
)
|
|
1220
1091
|
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
return exits
|
|
1226
|
-
|
|
1227
|
-
def _handle__NodeList(self, node: NodeList) -> set[ArcStart]:
|
|
1228
|
-
start = self.line_for_node(node)
|
|
1229
|
-
exits = self.process_body(node.body, from_start=ArcStart(start))
|
|
1092
|
+
if not had_wildcard:
|
|
1093
|
+
exits.add(
|
|
1094
|
+
ArcStart(case_start, cause="the pattern on line {lineno} always matched"),
|
|
1095
|
+
)
|
|
1230
1096
|
return exits
|
|
1231
1097
|
|
|
1232
1098
|
def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]:
|
|
@@ -1301,13 +1167,6 @@ class AstArcAnalyzer:
|
|
|
1301
1167
|
def _handle__While(self, node: ast.While) -> set[ArcStart]:
|
|
1302
1168
|
start = to_top = self.line_for_node(node.test)
|
|
1303
1169
|
constant_test, _ = is_constant_test_expr(node.test)
|
|
1304
|
-
top_is_body0 = False
|
|
1305
|
-
if constant_test:
|
|
1306
|
-
top_is_body0 = True
|
|
1307
|
-
if env.PYBEHAVIOR.keep_constant_test:
|
|
1308
|
-
top_is_body0 = False
|
|
1309
|
-
if top_is_body0:
|
|
1310
|
-
to_top = self.line_for_node(node.body[0])
|
|
1311
1170
|
self.block_stack.append(LoopBlock(start=to_top))
|
|
1312
1171
|
from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
|
|
1313
1172
|
exits = self.process_body(node.body, from_start=from_start)
|
|
@@ -1332,22 +1191,20 @@ class AstArcAnalyzer:
|
|
|
1332
1191
|
starts = [self.line_for_node(item.context_expr) for item in node.items]
|
|
1333
1192
|
else:
|
|
1334
1193
|
starts = [self.line_for_node(node)]
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
self.all_with_starts.add(start)
|
|
1194
|
+
for start in starts:
|
|
1195
|
+
self.current_with_starts.add(start)
|
|
1196
|
+
self.all_with_starts.add(start)
|
|
1339
1197
|
|
|
1340
1198
|
exits = self.process_body(node.body, from_start=ArcStart(starts[-1]))
|
|
1341
1199
|
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
exits = with_exit
|
|
1200
|
+
start = starts[-1]
|
|
1201
|
+
self.current_with_starts.remove(start)
|
|
1202
|
+
with_exit = {ArcStart(start)}
|
|
1203
|
+
if exits:
|
|
1204
|
+
for xit in exits:
|
|
1205
|
+
self.add_arc(xit.lineno, start)
|
|
1206
|
+
self.with_exits.add((xit.lineno, start))
|
|
1207
|
+
exits = with_exit
|
|
1351
1208
|
|
|
1352
1209
|
return exits
|
|
1353
1210
|
|
coverage/patch.py
CHANGED
|
@@ -32,20 +32,21 @@ def apply_patches(
|
|
|
32
32
|
"""Apply invasive patches requested by `[run] patch=`."""
|
|
33
33
|
debug = debug if debug.should("patch") else DevNullDebug()
|
|
34
34
|
for patch in sorted(set(config.patch)):
|
|
35
|
-
|
|
36
|
-
|
|
35
|
+
match patch:
|
|
36
|
+
case "_exit":
|
|
37
|
+
_patch__exit(cov, debug)
|
|
37
38
|
|
|
38
|
-
|
|
39
|
-
|
|
39
|
+
case "execv":
|
|
40
|
+
_patch_execv(cov, config, debug)
|
|
40
41
|
|
|
41
|
-
|
|
42
|
-
|
|
42
|
+
case "fork":
|
|
43
|
+
_patch_fork(debug)
|
|
43
44
|
|
|
44
|
-
|
|
45
|
-
|
|
45
|
+
case "subprocess":
|
|
46
|
+
_patch_subprocess(config, debug, make_pth_file)
|
|
46
47
|
|
|
47
|
-
|
|
48
|
-
|
|
48
|
+
case _:
|
|
49
|
+
raise ConfigError(f"Unknown patch {patch!r}")
|
|
49
50
|
|
|
50
51
|
|
|
51
52
|
def _patch__exit(cov: Coverage, debug: TDebugCtl) -> None:
|
coverage/phystokens.py
CHANGED
|
@@ -95,7 +95,7 @@ def find_soft_key_lines(source: str) -> set[TLineNo]:
|
|
|
95
95
|
soft_key_lines: set[TLineNo] = set()
|
|
96
96
|
|
|
97
97
|
for node in ast.walk(ast.parse(source)):
|
|
98
|
-
if
|
|
98
|
+
if isinstance(node, ast.Match):
|
|
99
99
|
soft_key_lines.add(node.lineno)
|
|
100
100
|
for case in node.cases:
|
|
101
101
|
soft_key_lines.add(case.pattern.lineno)
|
|
@@ -128,10 +128,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
|
|
|
128
128
|
source = source.expandtabs(8).replace("\r\n", "\n")
|
|
129
129
|
tokgen = generate_tokens(source)
|
|
130
130
|
|
|
131
|
-
|
|
132
|
-
soft_key_lines = find_soft_key_lines(source)
|
|
133
|
-
else:
|
|
134
|
-
soft_key_lines = set()
|
|
131
|
+
soft_key_lines = find_soft_key_lines(source)
|
|
135
132
|
|
|
136
133
|
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
|
|
137
134
|
mark_start = True
|
|
@@ -157,7 +154,7 @@ def source_token_lines(source: str) -> TSourceTokenLines:
|
|
|
157
154
|
if keyword.iskeyword(ttext):
|
|
158
155
|
# Hard keywords are always keywords.
|
|
159
156
|
tok_class = "key"
|
|
160
|
-
elif
|
|
157
|
+
elif keyword.issoftkeyword(ttext):
|
|
161
158
|
# Soft keywords appear at the start of their line.
|
|
162
159
|
if len(line) == 0:
|
|
163
160
|
is_start_of_line = True
|
coverage/python.py
CHANGED
|
@@ -61,7 +61,7 @@ def get_python_source(filename: str) -> str:
|
|
|
61
61
|
break
|
|
62
62
|
else:
|
|
63
63
|
# Couldn't find source.
|
|
64
|
-
raise NoSource(f"No source for code: '{filename}'.")
|
|
64
|
+
raise NoSource(f"No source for code: '{filename}'.", slug="no-source")
|
|
65
65
|
|
|
66
66
|
# Replace \f because of http://bugs.python.org/issue19035
|
|
67
67
|
source_bytes = source_bytes.replace(b"\f", b" ")
|
coverage/results.py
CHANGED
|
@@ -7,7 +7,7 @@ from __future__ import annotations
|
|
|
7
7
|
|
|
8
8
|
import collections
|
|
9
9
|
import dataclasses
|
|
10
|
-
from collections.abc import
|
|
10
|
+
from collections.abc import Iterable
|
|
11
11
|
from typing import TYPE_CHECKING
|
|
12
12
|
|
|
13
13
|
from coverage.exceptions import ConfigError
|
|
@@ -113,45 +113,6 @@ class Analysis:
|
|
|
113
113
|
n_missing_branches=n_missing_branches,
|
|
114
114
|
)
|
|
115
115
|
|
|
116
|
-
def narrow(self, lines: Container[TLineNo]) -> Analysis:
|
|
117
|
-
"""Create a narrowed Analysis.
|
|
118
|
-
|
|
119
|
-
The current analysis is copied to make a new one that only considers
|
|
120
|
-
the lines in `lines`.
|
|
121
|
-
"""
|
|
122
|
-
|
|
123
|
-
statements = {lno for lno in self.statements if lno in lines}
|
|
124
|
-
excluded = {lno for lno in self.excluded if lno in lines}
|
|
125
|
-
executed = {lno for lno in self.executed if lno in lines}
|
|
126
|
-
|
|
127
|
-
if self.has_arcs:
|
|
128
|
-
arc_possibilities_set = {
|
|
129
|
-
(a, b) for a, b in self.arc_possibilities_set if a in lines or b in lines
|
|
130
|
-
}
|
|
131
|
-
arcs_executed_set = {
|
|
132
|
-
(a, b) for a, b in self.arcs_executed_set if a in lines or b in lines
|
|
133
|
-
}
|
|
134
|
-
exit_counts = {lno: num for lno, num in self.exit_counts.items() if lno in lines}
|
|
135
|
-
no_branch = {lno for lno in self.no_branch if lno in lines}
|
|
136
|
-
else:
|
|
137
|
-
arc_possibilities_set = set()
|
|
138
|
-
arcs_executed_set = set()
|
|
139
|
-
exit_counts = {}
|
|
140
|
-
no_branch = set()
|
|
141
|
-
|
|
142
|
-
return Analysis(
|
|
143
|
-
precision=self.precision,
|
|
144
|
-
filename=self.filename,
|
|
145
|
-
has_arcs=self.has_arcs,
|
|
146
|
-
statements=statements,
|
|
147
|
-
excluded=excluded,
|
|
148
|
-
executed=executed,
|
|
149
|
-
arc_possibilities_set=arc_possibilities_set,
|
|
150
|
-
arcs_executed_set=arcs_executed_set,
|
|
151
|
-
exit_counts=exit_counts,
|
|
152
|
-
no_branch=no_branch,
|
|
153
|
-
)
|
|
154
|
-
|
|
155
116
|
def missing_formatted(self, branches: bool = False) -> str:
|
|
156
117
|
"""The missing line numbers, formatted nicely.
|
|
157
118
|
|
|
@@ -236,6 +197,104 @@ class Analysis:
|
|
|
236
197
|
return stats
|
|
237
198
|
|
|
238
199
|
|
|
200
|
+
TRegionLines = frozenset[TLineNo]
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
class AnalysisNarrower:
|
|
204
|
+
"""
|
|
205
|
+
For reducing an `Analysis` to a subset of its lines.
|
|
206
|
+
|
|
207
|
+
Originally this was a simpler method on Analysis, but that led to quadratic
|
|
208
|
+
behavior. This class does the bulk of the work up-front to provide the
|
|
209
|
+
same results in linear time.
|
|
210
|
+
|
|
211
|
+
Create an AnalysisNarrower from an Analysis, bulk-add region lines to it
|
|
212
|
+
with `add_regions`, then individually request new narrowed Analysis objects
|
|
213
|
+
for each region with `narrow`. Doing most of the work in limited calls to
|
|
214
|
+
`add_regions` lets us avoid poor performance.
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
# In this class, regions are represented by a frozenset of their lines.
|
|
218
|
+
|
|
219
|
+
def __init__(self, analysis: Analysis) -> None:
|
|
220
|
+
self.analysis = analysis
|
|
221
|
+
self.region2arc_possibilities: dict[TRegionLines, set[TArc]] = collections.defaultdict(set)
|
|
222
|
+
self.region2arc_executed: dict[TRegionLines, set[TArc]] = collections.defaultdict(set)
|
|
223
|
+
self.region2exit_counts: dict[TRegionLines, dict[TLineNo, int]] = collections.defaultdict(
|
|
224
|
+
dict
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
def add_regions(self, liness: Iterable[set[TLineNo]]) -> None:
|
|
228
|
+
"""
|
|
229
|
+
Pre-process a number of sets of line numbers. Later calls to `narrow`
|
|
230
|
+
with one of these sets will provide a narrowed Analysis.
|
|
231
|
+
"""
|
|
232
|
+
if self.analysis.has_arcs:
|
|
233
|
+
line2region: dict[TLineNo, TRegionLines] = {}
|
|
234
|
+
|
|
235
|
+
for lines in liness:
|
|
236
|
+
fzlines = frozenset(lines)
|
|
237
|
+
for line in lines:
|
|
238
|
+
line2region[line] = fzlines
|
|
239
|
+
|
|
240
|
+
def collect_arcs(
|
|
241
|
+
arc_set: set[TArc],
|
|
242
|
+
region2arcs: dict[TRegionLines, set[TArc]],
|
|
243
|
+
) -> None:
|
|
244
|
+
for a, b in arc_set:
|
|
245
|
+
if r := line2region.get(a):
|
|
246
|
+
region2arcs[r].add((a, b))
|
|
247
|
+
if r := line2region.get(b):
|
|
248
|
+
region2arcs[r].add((a, b))
|
|
249
|
+
|
|
250
|
+
collect_arcs(self.analysis.arc_possibilities_set, self.region2arc_possibilities)
|
|
251
|
+
collect_arcs(self.analysis.arcs_executed_set, self.region2arc_executed)
|
|
252
|
+
|
|
253
|
+
for lno, num in self.analysis.exit_counts.items():
|
|
254
|
+
if r := line2region.get(lno):
|
|
255
|
+
self.region2exit_counts[r][lno] = num
|
|
256
|
+
|
|
257
|
+
def narrow(self, lines: set[TLineNo]) -> Analysis:
|
|
258
|
+
"""Create a narrowed Analysis.
|
|
259
|
+
|
|
260
|
+
The current analysis is copied to make a new one that only considers
|
|
261
|
+
the lines in `lines`.
|
|
262
|
+
"""
|
|
263
|
+
|
|
264
|
+
# Technically, the set intersections in this method are still O(N**2)
|
|
265
|
+
# since this method is called N times, but they're very fast and moving
|
|
266
|
+
# them to `add_regions` won't avoid the quadratic time.
|
|
267
|
+
|
|
268
|
+
statements = self.analysis.statements & lines
|
|
269
|
+
excluded = self.analysis.excluded & lines
|
|
270
|
+
executed = self.analysis.executed & lines
|
|
271
|
+
|
|
272
|
+
if self.analysis.has_arcs:
|
|
273
|
+
fzlines = frozenset(lines)
|
|
274
|
+
arc_possibilities_set = self.region2arc_possibilities[fzlines]
|
|
275
|
+
arcs_executed_set = self.region2arc_executed[fzlines]
|
|
276
|
+
exit_counts = self.region2exit_counts[fzlines]
|
|
277
|
+
no_branch = self.analysis.no_branch & lines
|
|
278
|
+
else:
|
|
279
|
+
arc_possibilities_set = set()
|
|
280
|
+
arcs_executed_set = set()
|
|
281
|
+
exit_counts = {}
|
|
282
|
+
no_branch = set()
|
|
283
|
+
|
|
284
|
+
return Analysis(
|
|
285
|
+
precision=self.analysis.precision,
|
|
286
|
+
filename=self.analysis.filename,
|
|
287
|
+
has_arcs=self.analysis.has_arcs,
|
|
288
|
+
statements=statements,
|
|
289
|
+
excluded=excluded,
|
|
290
|
+
executed=executed,
|
|
291
|
+
arc_possibilities_set=arc_possibilities_set,
|
|
292
|
+
arcs_executed_set=arcs_executed_set,
|
|
293
|
+
exit_counts=exit_counts,
|
|
294
|
+
no_branch=no_branch,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
|
|
239
298
|
@dataclasses.dataclass
|
|
240
299
|
class Numbers:
|
|
241
300
|
"""The numerical results of measuring coverage.
|
coverage/sqldata.py
CHANGED
|
@@ -706,9 +706,13 @@ class CoverageData:
|
|
|
706
706
|
)
|
|
707
707
|
)
|
|
708
708
|
if self._has_lines and other_data._has_arcs:
|
|
709
|
-
raise DataError(
|
|
709
|
+
raise DataError(
|
|
710
|
+
"Can't combine branch coverage data with statement data", slug="cant-combine"
|
|
711
|
+
)
|
|
710
712
|
if self._has_arcs and other_data._has_lines:
|
|
711
|
-
raise DataError(
|
|
713
|
+
raise DataError(
|
|
714
|
+
"Can't combine statement coverage data with branch data", slug="cant-combine"
|
|
715
|
+
)
|
|
712
716
|
|
|
713
717
|
map_path = map_path or (lambda p: p)
|
|
714
718
|
|
|
Binary file
|
coverage/types.py
CHANGED
|
@@ -11,7 +11,7 @@ import os
|
|
|
11
11
|
import pathlib
|
|
12
12
|
from collections.abc import Iterable, Mapping
|
|
13
13
|
from types import FrameType, ModuleType
|
|
14
|
-
from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol
|
|
15
15
|
|
|
16
16
|
if TYPE_CHECKING:
|
|
17
17
|
from coverage.plugin import FileTracer
|
|
@@ -22,14 +22,10 @@ AnyCallable = Callable[..., Any]
|
|
|
22
22
|
## File paths
|
|
23
23
|
|
|
24
24
|
# For arguments that are file paths:
|
|
25
|
-
|
|
26
|
-
FilePath = Union[str, os.PathLike[str]]
|
|
27
|
-
else:
|
|
28
|
-
# PathLike < python3.9 doesn't support subscription
|
|
29
|
-
FilePath = Union[str, os.PathLike]
|
|
25
|
+
FilePath = str | os.PathLike[str]
|
|
30
26
|
# For testing FilePath arguments
|
|
31
27
|
FilePathClasses = [str, pathlib.Path]
|
|
32
|
-
FilePathType =
|
|
28
|
+
FilePathType = type[str] | type[pathlib.Path]
|
|
33
29
|
|
|
34
30
|
## Python tracing
|
|
35
31
|
|
|
@@ -77,14 +73,14 @@ class TFileDisposition(Protocol):
|
|
|
77
73
|
# - If measuring arcs in the C tracer, the values are sets of packed arcs (two
|
|
78
74
|
# line numbers combined into one integer).
|
|
79
75
|
|
|
80
|
-
TTraceFileData =
|
|
76
|
+
TTraceFileData = set[TLineNo] | set[TArc] | set[int]
|
|
81
77
|
|
|
82
78
|
TTraceData = dict[str, TTraceFileData]
|
|
83
79
|
|
|
84
80
|
# Functions passed into collectors.
|
|
85
81
|
TShouldTraceFn = Callable[[str, FrameType], TFileDisposition]
|
|
86
82
|
TCheckIncludeFn = Callable[[str, FrameType], bool]
|
|
87
|
-
TShouldStartContextFn = Callable[[FrameType],
|
|
83
|
+
TShouldStartContextFn = Callable[[FrameType], str | None]
|
|
88
84
|
|
|
89
85
|
|
|
90
86
|
class Tracer(Protocol):
|
|
@@ -127,8 +123,8 @@ TCovKwargs = Any
|
|
|
127
123
|
## Configuration
|
|
128
124
|
|
|
129
125
|
# One value read from a config file.
|
|
130
|
-
TConfigValueIn = Optional[
|
|
131
|
-
TConfigValueOut = Optional[
|
|
126
|
+
TConfigValueIn = Optional[bool | int | float | str | Iterable[str] | Mapping[str, Iterable[str]]]
|
|
127
|
+
TConfigValueOut = Optional[bool | int | float | str | list[str] | dict[str, list[str]]]
|
|
132
128
|
# An entire config section, mapping option names to values.
|
|
133
129
|
TConfigSectionIn = Mapping[str, TConfigValueIn]
|
|
134
130
|
TConfigSectionOut = Mapping[str, TConfigValueOut]
|
|
@@ -169,7 +165,7 @@ class TPluginConfig(Protocol):
|
|
|
169
165
|
|
|
170
166
|
## Parsing
|
|
171
167
|
|
|
172
|
-
TMorf =
|
|
168
|
+
TMorf = ModuleType | str
|
|
173
169
|
|
|
174
170
|
TSourceTokenLines = Iterable[list[tuple[str, str]]]
|
|
175
171
|
|