coverage 7.13.0__cp312-cp312-win_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- a1_coverage.pth +1 -0
- coverage/__init__.py +38 -0
- coverage/__main__.py +12 -0
- coverage/annotate.py +114 -0
- coverage/bytecode.py +196 -0
- coverage/cmdline.py +1198 -0
- coverage/collector.py +486 -0
- coverage/config.py +732 -0
- coverage/context.py +74 -0
- coverage/control.py +1513 -0
- coverage/core.py +139 -0
- coverage/data.py +227 -0
- coverage/debug.py +669 -0
- coverage/disposition.py +59 -0
- coverage/env.py +135 -0
- coverage/exceptions.py +85 -0
- coverage/execfile.py +329 -0
- coverage/files.py +553 -0
- coverage/html.py +860 -0
- coverage/htmlfiles/coverage_html.js +735 -0
- coverage/htmlfiles/favicon_32.png +0 -0
- coverage/htmlfiles/index.html +199 -0
- coverage/htmlfiles/keybd_closed.png +0 -0
- coverage/htmlfiles/pyfile.html +149 -0
- coverage/htmlfiles/style.css +389 -0
- coverage/htmlfiles/style.scss +844 -0
- coverage/inorout.py +614 -0
- coverage/jsonreport.py +192 -0
- coverage/lcovreport.py +219 -0
- coverage/misc.py +373 -0
- coverage/multiproc.py +120 -0
- coverage/numbits.py +146 -0
- coverage/parser.py +1215 -0
- coverage/patch.py +118 -0
- coverage/phystokens.py +197 -0
- coverage/plugin.py +617 -0
- coverage/plugin_support.py +299 -0
- coverage/pth_file.py +16 -0
- coverage/py.typed +1 -0
- coverage/python.py +272 -0
- coverage/pytracer.py +369 -0
- coverage/regions.py +127 -0
- coverage/report.py +298 -0
- coverage/report_core.py +117 -0
- coverage/results.py +502 -0
- coverage/sqldata.py +1153 -0
- coverage/sqlitedb.py +239 -0
- coverage/sysmon.py +517 -0
- coverage/templite.py +318 -0
- coverage/tomlconfig.py +212 -0
- coverage/tracer.cp312-win_arm64.pyd +0 -0
- coverage/tracer.pyi +43 -0
- coverage/types.py +206 -0
- coverage/version.py +35 -0
- coverage/xmlreport.py +264 -0
- coverage-7.13.0.dist-info/METADATA +200 -0
- coverage-7.13.0.dist-info/RECORD +61 -0
- coverage-7.13.0.dist-info/WHEEL +5 -0
- coverage-7.13.0.dist-info/entry_points.txt +4 -0
- coverage-7.13.0.dist-info/licenses/LICENSE.txt +177 -0
- coverage-7.13.0.dist-info/top_level.txt +1 -0
coverage/jsonreport.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
|
2
|
+
# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
|
|
3
|
+
|
|
4
|
+
"""Json reporting for coverage.py"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import datetime
|
|
9
|
+
import json
|
|
10
|
+
import sys
|
|
11
|
+
from collections.abc import Iterable
|
|
12
|
+
from typing import IO, TYPE_CHECKING, Any
|
|
13
|
+
|
|
14
|
+
from coverage import __version__
|
|
15
|
+
from coverage.report_core import get_analysis_to_report
|
|
16
|
+
from coverage.results import Analysis, AnalysisNarrower, Numbers
|
|
17
|
+
from coverage.types import TLineNo, TMorf
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from coverage import Coverage
|
|
21
|
+
from coverage.data import CoverageData
|
|
22
|
+
from coverage.plugin import FileReporter
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# A type for data that can be JSON-serialized.
|
|
26
|
+
JsonObj = dict[str, Any]
|
|
27
|
+
|
|
28
|
+
# "Version 1" had no format number at all.
|
|
29
|
+
# 2: add the meta.format field.
|
|
30
|
+
# 3: add region information (functions, classes)
|
|
31
|
+
FORMAT_VERSION = 3
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class JsonReporter:
|
|
35
|
+
"""A reporter for writing JSON coverage results."""
|
|
36
|
+
|
|
37
|
+
report_type = "JSON report"
|
|
38
|
+
|
|
39
|
+
def __init__(self, coverage: Coverage) -> None:
|
|
40
|
+
self.coverage = coverage
|
|
41
|
+
self.config = self.coverage.config
|
|
42
|
+
self.total = Numbers(self.config.precision)
|
|
43
|
+
self.report_data: JsonObj = {}
|
|
44
|
+
|
|
45
|
+
def make_summary(self, nums: Numbers) -> JsonObj:
|
|
46
|
+
"""Create a dict summarizing `nums`."""
|
|
47
|
+
return {
|
|
48
|
+
"covered_lines": nums.n_executed,
|
|
49
|
+
"num_statements": nums.n_statements,
|
|
50
|
+
"percent_covered": nums.pc_covered,
|
|
51
|
+
"percent_covered_display": nums.pc_covered_str,
|
|
52
|
+
"missing_lines": nums.n_missing,
|
|
53
|
+
"excluded_lines": nums.n_excluded,
|
|
54
|
+
"percent_statements_covered": nums.pc_statements,
|
|
55
|
+
"percent_statements_covered_display": nums.pc_statements_str,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
def make_branch_summary(self, nums: Numbers) -> JsonObj:
|
|
59
|
+
"""Create a dict summarizing the branch info in `nums`."""
|
|
60
|
+
return {
|
|
61
|
+
"num_branches": nums.n_branches,
|
|
62
|
+
"num_partial_branches": nums.n_partial_branches,
|
|
63
|
+
"covered_branches": nums.n_executed_branches,
|
|
64
|
+
"missing_branches": nums.n_missing_branches,
|
|
65
|
+
"percent_branches_covered": nums.pc_branches,
|
|
66
|
+
"percent_branches_covered_display": nums.pc_branches_str,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
|
70
|
+
"""Generate a json report for `morfs`.
|
|
71
|
+
|
|
72
|
+
`morfs` is a list of modules or file names.
|
|
73
|
+
|
|
74
|
+
`outfile` is a file object to write the json to.
|
|
75
|
+
|
|
76
|
+
"""
|
|
77
|
+
outfile = outfile or sys.stdout
|
|
78
|
+
coverage_data = self.coverage.get_data()
|
|
79
|
+
coverage_data.set_query_contexts(self.config.report_contexts)
|
|
80
|
+
self.report_data["meta"] = {
|
|
81
|
+
"format": FORMAT_VERSION,
|
|
82
|
+
"version": __version__,
|
|
83
|
+
"timestamp": datetime.datetime.now().isoformat(),
|
|
84
|
+
"branch_coverage": coverage_data.has_arcs(),
|
|
85
|
+
"show_contexts": self.config.json_show_contexts,
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
measured_files = {}
|
|
89
|
+
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
|
|
90
|
+
measured_files[file_reporter.relative_filename()] = self.report_one_file(
|
|
91
|
+
coverage_data,
|
|
92
|
+
analysis,
|
|
93
|
+
file_reporter,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
self.report_data["files"] = measured_files
|
|
97
|
+
self.report_data["totals"] = self.make_summary(self.total)
|
|
98
|
+
|
|
99
|
+
if coverage_data.has_arcs():
|
|
100
|
+
self.report_data["totals"].update(self.make_branch_summary(self.total))
|
|
101
|
+
|
|
102
|
+
json.dump(
|
|
103
|
+
self.report_data,
|
|
104
|
+
outfile,
|
|
105
|
+
indent=(4 if self.config.json_pretty_print else None),
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
return self.total.n_statements and self.total.pc_covered
|
|
109
|
+
|
|
110
|
+
def report_one_file(
|
|
111
|
+
self, coverage_data: CoverageData, analysis: Analysis, file_reporter: FileReporter
|
|
112
|
+
) -> JsonObj:
|
|
113
|
+
"""Extract the relevant report data for a single file."""
|
|
114
|
+
nums = analysis.numbers
|
|
115
|
+
self.total += nums
|
|
116
|
+
summary = self.make_summary(nums)
|
|
117
|
+
reported_file: JsonObj = {
|
|
118
|
+
"executed_lines": sorted(analysis.executed),
|
|
119
|
+
"summary": summary,
|
|
120
|
+
"missing_lines": sorted(analysis.missing),
|
|
121
|
+
"excluded_lines": sorted(analysis.excluded),
|
|
122
|
+
}
|
|
123
|
+
if self.config.json_show_contexts:
|
|
124
|
+
reported_file["contexts"] = coverage_data.contexts_by_lineno(analysis.filename)
|
|
125
|
+
if coverage_data.has_arcs():
|
|
126
|
+
summary.update(self.make_branch_summary(nums))
|
|
127
|
+
reported_file["executed_branches"] = list(
|
|
128
|
+
_convert_branch_arcs(analysis.executed_branch_arcs()),
|
|
129
|
+
)
|
|
130
|
+
reported_file["missing_branches"] = list(
|
|
131
|
+
_convert_branch_arcs(analysis.missing_branch_arcs()),
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
num_lines = len(file_reporter.source().splitlines())
|
|
135
|
+
regions = file_reporter.code_regions()
|
|
136
|
+
for noun, plural in file_reporter.code_region_kinds():
|
|
137
|
+
outside_lines = set(range(1, num_lines + 1))
|
|
138
|
+
for region in regions:
|
|
139
|
+
if region.kind != noun:
|
|
140
|
+
continue
|
|
141
|
+
outside_lines -= region.lines
|
|
142
|
+
|
|
143
|
+
narrower = AnalysisNarrower(analysis)
|
|
144
|
+
narrower.add_regions(r.lines for r in regions if r.kind == noun)
|
|
145
|
+
narrower.add_regions([outside_lines])
|
|
146
|
+
|
|
147
|
+
reported_file[plural] = region_data = {}
|
|
148
|
+
for region in regions:
|
|
149
|
+
if region.kind != noun:
|
|
150
|
+
continue
|
|
151
|
+
region_data[region.name] = self.make_region_data(
|
|
152
|
+
coverage_data,
|
|
153
|
+
narrower.narrow(region.lines),
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
region_data[""] = self.make_region_data(
|
|
157
|
+
coverage_data,
|
|
158
|
+
narrower.narrow(outside_lines),
|
|
159
|
+
)
|
|
160
|
+
return reported_file
|
|
161
|
+
|
|
162
|
+
def make_region_data(self, coverage_data: CoverageData, narrowed_analysis: Analysis) -> JsonObj:
|
|
163
|
+
"""Create the data object for one region of a file."""
|
|
164
|
+
narrowed_nums = narrowed_analysis.numbers
|
|
165
|
+
narrowed_summary = self.make_summary(narrowed_nums)
|
|
166
|
+
this_region = {
|
|
167
|
+
"executed_lines": sorted(narrowed_analysis.executed),
|
|
168
|
+
"summary": narrowed_summary,
|
|
169
|
+
"missing_lines": sorted(narrowed_analysis.missing),
|
|
170
|
+
"excluded_lines": sorted(narrowed_analysis.excluded),
|
|
171
|
+
}
|
|
172
|
+
if self.config.json_show_contexts:
|
|
173
|
+
contexts = coverage_data.contexts_by_lineno(narrowed_analysis.filename)
|
|
174
|
+
this_region["contexts"] = contexts
|
|
175
|
+
if coverage_data.has_arcs():
|
|
176
|
+
narrowed_summary.update(self.make_branch_summary(narrowed_nums))
|
|
177
|
+
this_region["executed_branches"] = list(
|
|
178
|
+
_convert_branch_arcs(narrowed_analysis.executed_branch_arcs()),
|
|
179
|
+
)
|
|
180
|
+
this_region["missing_branches"] = list(
|
|
181
|
+
_convert_branch_arcs(narrowed_analysis.missing_branch_arcs()),
|
|
182
|
+
)
|
|
183
|
+
return this_region
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _convert_branch_arcs(
|
|
187
|
+
branch_arcs: dict[TLineNo, list[TLineNo]],
|
|
188
|
+
) -> Iterable[tuple[TLineNo, TLineNo]]:
|
|
189
|
+
"""Convert branch arcs to a list of two-element tuples."""
|
|
190
|
+
for source, targets in branch_arcs.items():
|
|
191
|
+
for target in targets:
|
|
192
|
+
yield source, target
|
coverage/lcovreport.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
|
2
|
+
# For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
|
|
3
|
+
|
|
4
|
+
"""LCOV reporting for coverage.py."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import base64
|
|
9
|
+
import hashlib
|
|
10
|
+
import sys
|
|
11
|
+
from collections.abc import Iterable
|
|
12
|
+
from typing import IO, TYPE_CHECKING
|
|
13
|
+
|
|
14
|
+
from coverage.plugin import FileReporter
|
|
15
|
+
from coverage.report_core import get_analysis_to_report
|
|
16
|
+
from coverage.results import Analysis, AnalysisNarrower, Numbers
|
|
17
|
+
from coverage.types import TMorf
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from coverage import Coverage
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def line_hash(line: str) -> str:
|
|
24
|
+
"""Produce a hash of a source line for use in the LCOV file."""
|
|
25
|
+
# The LCOV file format optionally allows each line to be MD5ed as a
|
|
26
|
+
# fingerprint of the file. This is not a security use. Some security
|
|
27
|
+
# scanners raise alarms about the use of MD5 here, but it is a false
|
|
28
|
+
# positive. This is not a security concern.
|
|
29
|
+
# The unusual encoding of the MD5 hash, as a base64 sequence with the
|
|
30
|
+
# trailing = signs stripped, is specified by the LCOV file format.
|
|
31
|
+
hashed = hashlib.md5(line.encode("utf-8"), usedforsecurity=False).digest()
|
|
32
|
+
return base64.b64encode(hashed).decode("ascii").rstrip("=")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def lcov_lines(
|
|
36
|
+
analysis: Analysis,
|
|
37
|
+
lines: list[int],
|
|
38
|
+
source_lines: list[str],
|
|
39
|
+
outfile: IO[str],
|
|
40
|
+
) -> None:
|
|
41
|
+
"""Emit line coverage records for an analyzed file."""
|
|
42
|
+
hash_suffix = ""
|
|
43
|
+
for line in lines:
|
|
44
|
+
if source_lines:
|
|
45
|
+
hash_suffix = "," + line_hash(source_lines[line - 1])
|
|
46
|
+
# Q: can we get info about the number of times a statement is
|
|
47
|
+
# executed? If so, that should be recorded here.
|
|
48
|
+
hit = int(line not in analysis.missing)
|
|
49
|
+
outfile.write(f"DA:{line},{hit}{hash_suffix}\n")
|
|
50
|
+
|
|
51
|
+
if analysis.numbers.n_statements > 0:
|
|
52
|
+
outfile.write(f"LF:{analysis.numbers.n_statements}\n")
|
|
53
|
+
outfile.write(f"LH:{analysis.numbers.n_executed}\n")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def lcov_functions(
|
|
57
|
+
fr: FileReporter,
|
|
58
|
+
file_analysis: Analysis,
|
|
59
|
+
outfile: IO[str],
|
|
60
|
+
) -> None:
|
|
61
|
+
"""Emit function coverage records for an analyzed file."""
|
|
62
|
+
# lcov 2.2 introduces a new format for function coverage records.
|
|
63
|
+
# We continue to generate the old format because we don't know what
|
|
64
|
+
# version of the lcov tools will be used to read this report.
|
|
65
|
+
|
|
66
|
+
# "and region.lines" below avoids a crash due to a bug in PyPy 3.8
|
|
67
|
+
# where, for whatever reason, when collecting data in --branch mode,
|
|
68
|
+
# top-level functions have an empty lines array. Instead we just don't
|
|
69
|
+
# emit function records for those.
|
|
70
|
+
|
|
71
|
+
# suppressions because of https://github.com/pylint-dev/pylint/issues/9923
|
|
72
|
+
functions = [
|
|
73
|
+
(
|
|
74
|
+
min(region.start, min(region.lines)), # pylint: disable=nested-min-max
|
|
75
|
+
max(region.start, max(region.lines)), # pylint: disable=nested-min-max
|
|
76
|
+
region,
|
|
77
|
+
)
|
|
78
|
+
for region in fr.code_regions()
|
|
79
|
+
if region.kind == "function" and region.lines
|
|
80
|
+
]
|
|
81
|
+
if not functions:
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
narrower = AnalysisNarrower(file_analysis)
|
|
85
|
+
narrower.add_regions(r.lines for _, _, r in functions)
|
|
86
|
+
|
|
87
|
+
functions.sort()
|
|
88
|
+
functions_hit = 0
|
|
89
|
+
for first_line, last_line, region in functions:
|
|
90
|
+
# A function counts as having been executed if any of it has been
|
|
91
|
+
# executed.
|
|
92
|
+
analysis = narrower.narrow(region.lines)
|
|
93
|
+
hit = int(analysis.numbers.n_executed > 0)
|
|
94
|
+
functions_hit += hit
|
|
95
|
+
|
|
96
|
+
outfile.write(f"FN:{first_line},{last_line},{region.name}\n")
|
|
97
|
+
outfile.write(f"FNDA:{hit},{region.name}\n")
|
|
98
|
+
|
|
99
|
+
outfile.write(f"FNF:{len(functions)}\n")
|
|
100
|
+
outfile.write(f"FNH:{functions_hit}\n")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def lcov_arcs(
|
|
104
|
+
fr: FileReporter,
|
|
105
|
+
analysis: Analysis,
|
|
106
|
+
lines: list[int],
|
|
107
|
+
outfile: IO[str],
|
|
108
|
+
) -> None:
|
|
109
|
+
"""Emit branch coverage records for an analyzed file."""
|
|
110
|
+
branch_stats = analysis.branch_stats()
|
|
111
|
+
executed_arcs = analysis.executed_branch_arcs()
|
|
112
|
+
missing_arcs = analysis.missing_branch_arcs()
|
|
113
|
+
|
|
114
|
+
for line in lines:
|
|
115
|
+
if line not in branch_stats:
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
# This is only one of several possible ways to map our sets of executed
|
|
119
|
+
# and not-executed arcs to BRDA codes. It seems to produce reasonable
|
|
120
|
+
# results when fed through genhtml.
|
|
121
|
+
_, taken = branch_stats[line]
|
|
122
|
+
|
|
123
|
+
if taken == 0:
|
|
124
|
+
# When _none_ of the out arcs from 'line' were executed,
|
|
125
|
+
# it can mean the line always raised an exception.
|
|
126
|
+
assert len(executed_arcs[line]) == 0
|
|
127
|
+
destinations = [(dst, "-") for dst in missing_arcs[line]]
|
|
128
|
+
else:
|
|
129
|
+
# Q: can we get counts of the number of times each arc was executed?
|
|
130
|
+
# branch_stats has "total" and "taken" counts for each branch,
|
|
131
|
+
# but it doesn't have "taken" broken down by destination.
|
|
132
|
+
destinations = [(dst, "1") for dst in executed_arcs[line]]
|
|
133
|
+
destinations.extend((dst, "0") for dst in missing_arcs[line])
|
|
134
|
+
|
|
135
|
+
# Sort exit arcs after normal arcs. Exit arcs typically come from
|
|
136
|
+
# an if statement, at the end of a function, with no else clause.
|
|
137
|
+
# This structure reads like you're jumping to the end of the function
|
|
138
|
+
# when the conditional expression is false, so it should be presented
|
|
139
|
+
# as the second alternative for the branch, after the alternative that
|
|
140
|
+
# enters the if clause.
|
|
141
|
+
destinations.sort(key=lambda d: (d[0] < 0, d))
|
|
142
|
+
|
|
143
|
+
for dst, hit in destinations:
|
|
144
|
+
branch = fr.arc_description(line, dst)
|
|
145
|
+
outfile.write(f"BRDA:{line},0,{branch},{hit}\n")
|
|
146
|
+
|
|
147
|
+
# Summary of the branch coverage.
|
|
148
|
+
brf = sum(t for t, k in branch_stats.values())
|
|
149
|
+
brh = brf - sum(t - k for t, k in branch_stats.values())
|
|
150
|
+
if brf > 0:
|
|
151
|
+
outfile.write(f"BRF:{brf}\n")
|
|
152
|
+
outfile.write(f"BRH:{brh}\n")
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class LcovReporter:
|
|
156
|
+
"""A reporter for writing LCOV coverage reports."""
|
|
157
|
+
|
|
158
|
+
report_type = "LCOV report"
|
|
159
|
+
|
|
160
|
+
def __init__(self, coverage: Coverage) -> None:
|
|
161
|
+
self.coverage = coverage
|
|
162
|
+
self.config = coverage.config
|
|
163
|
+
self.total = Numbers(self.coverage.config.precision)
|
|
164
|
+
|
|
165
|
+
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
|
166
|
+
"""Renders the full lcov report.
|
|
167
|
+
|
|
168
|
+
`morfs` is a list of modules or filenames
|
|
169
|
+
|
|
170
|
+
outfile is the file object to write the file into.
|
|
171
|
+
"""
|
|
172
|
+
|
|
173
|
+
self.coverage.get_data()
|
|
174
|
+
outfile = outfile or sys.stdout
|
|
175
|
+
|
|
176
|
+
# ensure file records are sorted by the _relative_ filename, not the full path
|
|
177
|
+
to_report = [
|
|
178
|
+
(fr.relative_filename(), fr, analysis)
|
|
179
|
+
for fr, analysis in get_analysis_to_report(self.coverage, morfs)
|
|
180
|
+
]
|
|
181
|
+
to_report.sort()
|
|
182
|
+
|
|
183
|
+
for fname, fr, analysis in to_report:
|
|
184
|
+
self.total += analysis.numbers
|
|
185
|
+
self.lcov_file(fname, fr, analysis, outfile)
|
|
186
|
+
|
|
187
|
+
return self.total.n_statements and self.total.pc_covered
|
|
188
|
+
|
|
189
|
+
def lcov_file(
|
|
190
|
+
self,
|
|
191
|
+
rel_fname: str,
|
|
192
|
+
fr: FileReporter,
|
|
193
|
+
analysis: Analysis,
|
|
194
|
+
outfile: IO[str],
|
|
195
|
+
) -> None:
|
|
196
|
+
"""Produces the lcov data for a single file.
|
|
197
|
+
|
|
198
|
+
This currently supports both line and branch coverage,
|
|
199
|
+
however function coverage is not supported.
|
|
200
|
+
"""
|
|
201
|
+
|
|
202
|
+
if analysis.numbers.n_statements == 0:
|
|
203
|
+
if self.config.skip_empty:
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
outfile.write(f"SF:{rel_fname}\n")
|
|
207
|
+
|
|
208
|
+
lines = sorted(analysis.statements)
|
|
209
|
+
if self.config.lcov_line_checksums:
|
|
210
|
+
source_lines = fr.source().splitlines()
|
|
211
|
+
else:
|
|
212
|
+
source_lines = []
|
|
213
|
+
|
|
214
|
+
lcov_lines(analysis, lines, source_lines, outfile)
|
|
215
|
+
lcov_functions(fr, analysis, outfile)
|
|
216
|
+
if analysis.has_arcs:
|
|
217
|
+
lcov_arcs(fr, analysis, lines, outfile)
|
|
218
|
+
|
|
219
|
+
outfile.write("end_of_record\n")
|