coverage 7.13.1__cp313-cp313-musllinux_1_2_riscv64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. a1_coverage.pth +1 -0
  2. coverage/__init__.py +38 -0
  3. coverage/__main__.py +12 -0
  4. coverage/annotate.py +113 -0
  5. coverage/bytecode.py +197 -0
  6. coverage/cmdline.py +1220 -0
  7. coverage/collector.py +487 -0
  8. coverage/config.py +732 -0
  9. coverage/context.py +74 -0
  10. coverage/control.py +1514 -0
  11. coverage/core.py +139 -0
  12. coverage/data.py +251 -0
  13. coverage/debug.py +669 -0
  14. coverage/disposition.py +59 -0
  15. coverage/env.py +135 -0
  16. coverage/exceptions.py +85 -0
  17. coverage/execfile.py +329 -0
  18. coverage/files.py +553 -0
  19. coverage/html.py +860 -0
  20. coverage/htmlfiles/coverage_html.js +735 -0
  21. coverage/htmlfiles/favicon_32.png +0 -0
  22. coverage/htmlfiles/index.html +199 -0
  23. coverage/htmlfiles/keybd_closed.png +0 -0
  24. coverage/htmlfiles/pyfile.html +149 -0
  25. coverage/htmlfiles/style.css +389 -0
  26. coverage/htmlfiles/style.scss +844 -0
  27. coverage/inorout.py +590 -0
  28. coverage/jsonreport.py +200 -0
  29. coverage/lcovreport.py +218 -0
  30. coverage/misc.py +381 -0
  31. coverage/multiproc.py +120 -0
  32. coverage/numbits.py +146 -0
  33. coverage/parser.py +1215 -0
  34. coverage/patch.py +118 -0
  35. coverage/phystokens.py +197 -0
  36. coverage/plugin.py +617 -0
  37. coverage/plugin_support.py +299 -0
  38. coverage/pth_file.py +16 -0
  39. coverage/py.typed +1 -0
  40. coverage/python.py +272 -0
  41. coverage/pytracer.py +370 -0
  42. coverage/regions.py +127 -0
  43. coverage/report.py +298 -0
  44. coverage/report_core.py +117 -0
  45. coverage/results.py +502 -0
  46. coverage/sqldata.py +1212 -0
  47. coverage/sqlitedb.py +226 -0
  48. coverage/sysmon.py +509 -0
  49. coverage/templite.py +319 -0
  50. coverage/tomlconfig.py +212 -0
  51. coverage/tracer.cpython-313-riscv64-linux-musl.so +0 -0
  52. coverage/tracer.pyi +43 -0
  53. coverage/types.py +214 -0
  54. coverage/version.py +35 -0
  55. coverage/xmlreport.py +263 -0
  56. coverage-7.13.1.dist-info/METADATA +200 -0
  57. coverage-7.13.1.dist-info/RECORD +61 -0
  58. coverage-7.13.1.dist-info/WHEEL +5 -0
  59. coverage-7.13.1.dist-info/entry_points.txt +4 -0
  60. coverage-7.13.1.dist-info/licenses/LICENSE.txt +177 -0
  61. coverage-7.13.1.dist-info/top_level.txt +1 -0
coverage/parser.py ADDED
@@ -0,0 +1,1215 @@
1
+ # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
2
+ # For details: https://github.com/coveragepy/coveragepy/blob/main/NOTICE.txt
3
+
4
+ """Code parsing for coverage.py."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import ast
9
+ import collections
10
+ import functools
11
+ import os
12
+ import re
13
+ import token
14
+ import tokenize
15
+ from collections.abc import Callable, Iterable, Sequence
16
+ from dataclasses import dataclass
17
+ from types import CodeType
18
+ from typing import Optional, Protocol, cast
19
+
20
+ from coverage import env
21
+ from coverage.bytecode import code_objects
22
+ from coverage.debug import short_stack
23
+ from coverage.exceptions import NoSource, NotPython
24
+ from coverage.misc import isolate_module, nice_pair
25
+ from coverage.phystokens import generate_tokens
26
+ from coverage.types import TArc, TLineNo
27
+
28
+ os = isolate_module(os)
29
+
30
+
31
+ class PythonParser:
32
+ """Parse code to find executable lines, excluded lines, etc.
33
+
34
+ This information is all based on static analysis: no code execution is
35
+ involved.
36
+
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ text: str | None = None,
42
+ filename: str | None = None,
43
+ exclude: str | None = None,
44
+ ) -> None:
45
+ """
46
+ Source can be provided as `text`, the text itself, or `filename`, from
47
+ which the text will be read. Excluded lines are those that match
48
+ `exclude`, a regex string.
49
+
50
+ """
51
+ assert text or filename, "PythonParser needs either text or filename"
52
+ self.filename = filename or "<code>"
53
+ if text is not None:
54
+ self.text: str = text
55
+ else:
56
+ from coverage.python import get_python_source
57
+
58
+ try:
59
+ self.text = get_python_source(self.filename)
60
+ except OSError as err:
61
+ raise NoSource(f"No source for code: '{self.filename}': {err}") from err
62
+
63
+ self.exclude = exclude
64
+
65
+ # The parsed AST of the text.
66
+ self._ast_root: ast.AST | None = None
67
+
68
+ # The normalized line numbers of the statements in the code. Exclusions
69
+ # are taken into account, and statements are adjusted to their first
70
+ # lines.
71
+ self.statements: set[TLineNo] = set()
72
+
73
+ # The normalized line numbers of the excluded lines in the code,
74
+ # adjusted to their first lines.
75
+ self.excluded: set[TLineNo] = set()
76
+
77
+ # The raw_* attributes are only used in this class, and in
78
+ # lab/parser.py to show how this class is working.
79
+
80
+ # The line numbers that start statements, as reported by the line
81
+ # number table in the bytecode.
82
+ self.raw_statements: set[TLineNo] = set()
83
+
84
+ # The raw line numbers of excluded lines of code, as marked by pragmas.
85
+ self.raw_excluded: set[TLineNo] = set()
86
+
87
+ # The line numbers of docstring lines.
88
+ self.raw_docstrings: set[TLineNo] = set()
89
+
90
+ # Internal detail, used by lab/parser.py.
91
+ self.show_tokens = False
92
+
93
+ # A dict mapping line numbers to lexical statement starts for
94
+ # multi-line statements.
95
+ self.multiline_map: dict[TLineNo, TLineNo] = {}
96
+
97
+ # Lazily-created arc data, and missing arc descriptions.
98
+ self._all_arcs: set[TArc] | None = None
99
+ self._missing_arc_fragments: TArcFragments | None = None
100
+ self._with_jump_fixers: dict[TArc, tuple[TArc, TArc]] = {}
101
+
102
+ def lines_matching(self, regex: str) -> set[TLineNo]:
103
+ """Find the lines matching a regex.
104
+
105
+ Returns a set of line numbers, the lines that contain a match for
106
+ `regex`. The entire line needn't match, just a part of it.
107
+ Handles multiline regex patterns.
108
+
109
+ """
110
+ matches: set[TLineNo] = set()
111
+
112
+ last_start = 0
113
+ last_start_line = 0
114
+ for match in re.finditer(regex, self.text, flags=re.MULTILINE):
115
+ start, end = match.span()
116
+ start_line = last_start_line + self.text.count("\n", last_start, start)
117
+ end_line = last_start_line + self.text.count("\n", last_start, end)
118
+ matches.update(
119
+ self.multiline_map.get(i, i) for i in range(start_line + 1, end_line + 2)
120
+ )
121
+ last_start = start
122
+ last_start_line = start_line
123
+ return matches
124
+
125
+ def _raw_parse(self) -> None:
126
+ """Parse the source to find the interesting facts about its lines.
127
+
128
+ A handful of attributes are updated.
129
+
130
+ """
131
+ # Find lines which match an exclusion pattern.
132
+ if self.exclude:
133
+ self.raw_excluded = self.lines_matching(self.exclude)
134
+ self.excluded = set(self.raw_excluded)
135
+
136
+ # The current number of indents.
137
+ indent: int = 0
138
+ # An exclusion comment will exclude an entire clause at this indent.
139
+ exclude_indent: int = 0
140
+ # Are we currently excluding lines?
141
+ excluding: bool = False
142
+ # The line number of the first line in a multi-line statement.
143
+ first_line: int = 0
144
+ # Is the file empty?
145
+ empty: bool = True
146
+ # Parenthesis (and bracket) nesting level.
147
+ nesting: int = 0
148
+
149
+ assert self.text is not None
150
+ tokgen = generate_tokens(self.text)
151
+ for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
152
+ if self.show_tokens: # pragma: debugging
153
+ print(
154
+ "%10s %5s %-20r %r"
155
+ % (
156
+ tokenize.tok_name.get(toktype, toktype),
157
+ nice_pair((slineno, elineno)),
158
+ ttext,
159
+ ltext,
160
+ )
161
+ )
162
+ if toktype == token.INDENT:
163
+ indent += 1
164
+ elif toktype == token.DEDENT:
165
+ indent -= 1
166
+ elif toktype == token.OP:
167
+ if ttext == ":" and nesting == 0:
168
+ should_exclude = self.excluded.intersection(range(first_line, elineno + 1))
169
+ if not excluding and should_exclude:
170
+ # Start excluding a suite. We trigger off of the colon
171
+ # token so that the #pragma comment will be recognized on
172
+ # the same line as the colon.
173
+ self.excluded.add(elineno)
174
+ exclude_indent = indent
175
+ excluding = True
176
+ elif ttext in "([{":
177
+ nesting += 1
178
+ elif ttext in ")]}":
179
+ nesting -= 1
180
+ elif toktype == token.NEWLINE:
181
+ if first_line and elineno != first_line:
182
+ # We're at the end of a line, and we've ended on a
183
+ # different line than the first line of the statement,
184
+ # so record a multi-line range.
185
+ for l in range(first_line, elineno + 1):
186
+ self.multiline_map[l] = first_line
187
+ first_line = 0
188
+
189
+ if ttext.strip() and toktype != tokenize.COMMENT:
190
+ # A non-white-space token.
191
+ empty = False
192
+ if not first_line:
193
+ # The token is not white space, and is the first in a statement.
194
+ first_line = slineno
195
+ # Check whether to end an excluded suite.
196
+ if excluding and indent <= exclude_indent:
197
+ excluding = False
198
+ if excluding:
199
+ self.excluded.add(elineno)
200
+
201
+ # Find the starts of the executable statements.
202
+ if not empty:
203
+ byte_parser = ByteParser(self.text, filename=self.filename)
204
+ self.raw_statements.update(byte_parser._find_statements())
205
+
206
+ self.excluded = self.first_lines(self.excluded)
207
+
208
+ # AST lets us find classes, docstrings, and decorator-affected
209
+ # functions and classes.
210
+ assert self._ast_root is not None
211
+ for node in ast.walk(self._ast_root):
212
+ # Find docstrings.
213
+ if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef, ast.Module)):
214
+ if node.body:
215
+ first = node.body[0]
216
+ if (
217
+ isinstance(first, ast.Expr)
218
+ and isinstance(first.value, ast.Constant)
219
+ and isinstance(first.value.value, str)
220
+ ):
221
+ self.raw_docstrings.update(
222
+ range(first.lineno, cast(int, first.end_lineno) + 1)
223
+ )
224
+ # Exclusions carry from decorators and signatures to the bodies of
225
+ # functions and classes.
226
+ if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)):
227
+ first_line = min((d.lineno for d in node.decorator_list), default=node.lineno)
228
+ if self.excluded.intersection(range(first_line, node.lineno + 1)):
229
+ self.excluded.update(range(first_line, cast(int, node.end_lineno) + 1))
230
+
231
+ @functools.lru_cache(maxsize=1000)
232
+ def first_line(self, lineno: TLineNo) -> TLineNo:
233
+ """Return the first line number of the statement including `lineno`."""
234
+ if lineno < 0:
235
+ lineno = -self.multiline_map.get(-lineno, -lineno)
236
+ else:
237
+ lineno = self.multiline_map.get(lineno, lineno)
238
+ return lineno
239
+
240
+ def first_lines(self, linenos: Iterable[TLineNo]) -> set[TLineNo]:
241
+ """Map the line numbers in `linenos` to the correct first line of the
242
+ statement.
243
+
244
+ Returns a set of the first lines.
245
+
246
+ """
247
+ return {self.first_line(l) for l in linenos}
248
+
249
+ def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
250
+ """Implement `FileReporter.translate_lines`."""
251
+ return self.first_lines(lines)
252
+
253
+ def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
254
+ """Implement `FileReporter.translate_arcs`."""
255
+ return {(self.first_line(a), self.first_line(b)) for (a, b) in self.fix_with_jumps(arcs)}
256
+
257
+ def parse_source(self) -> None:
258
+ """Parse source text to find executable lines, excluded lines, etc.
259
+
260
+ Sets the .excluded and .statements attributes, normalized to the first
261
+ line of multi-line statements.
262
+
263
+ """
264
+ try:
265
+ self._ast_root = ast.parse(self.text)
266
+ self._raw_parse()
267
+ except (tokenize.TokenError, IndentationError, SyntaxError) as err:
268
+ if hasattr(err, "lineno"):
269
+ lineno = err.lineno # IndentationError
270
+ else:
271
+ lineno = err.args[1][0] # TokenError
272
+ raise NotPython(
273
+ f"Couldn't parse '{self.filename}' as Python source: "
274
+ + f"{err.args[0]!r} at line {lineno}",
275
+ ) from err
276
+
277
+ ignore = self.excluded | self.raw_docstrings
278
+ starts = self.raw_statements - ignore
279
+ self.statements = self.first_lines(starts) - ignore
280
+
281
+ def arcs(self) -> set[TArc]:
282
+ """Get information about the arcs available in the code.
283
+
284
+ Returns a set of line number pairs. Line numbers have been normalized
285
+ to the first line of multi-line statements.
286
+
287
+ """
288
+ if self._all_arcs is None:
289
+ self._analyze_ast()
290
+ assert self._all_arcs is not None
291
+ return self._all_arcs
292
+
293
+ def _analyze_ast(self) -> None:
294
+ """Run the AstArcAnalyzer and save its results.
295
+
296
+ `_all_arcs` is the set of arcs in the code.
297
+
298
+ """
299
+ assert self._ast_root is not None
300
+ aaa = AstArcAnalyzer(self.filename, self._ast_root, self.raw_statements, self.multiline_map)
301
+ aaa.analyze()
302
+ arcs = aaa.arcs
303
+ self._with_jump_fixers = aaa.with_jump_fixers()
304
+ if self._with_jump_fixers:
305
+ arcs = self.fix_with_jumps(arcs)
306
+
307
+ self._all_arcs = set()
308
+ for l1, l2 in arcs:
309
+ fl1 = self.first_line(l1)
310
+ fl2 = self.first_line(l2)
311
+ if fl1 != fl2:
312
+ self._all_arcs.add((fl1, fl2))
313
+
314
+ self._missing_arc_fragments = aaa.missing_arc_fragments
315
+
316
+ def fix_with_jumps(self, arcs: Iterable[TArc]) -> set[TArc]:
317
+ """Adjust arcs to fix jumps leaving `with` statements.
318
+
319
+ Consider this code:
320
+
321
+ with open("/tmp/test", "w") as f1:
322
+ a = 2
323
+ b = 3
324
+ print(4)
325
+
326
+ In 3.10+, we get traces for lines 1, 2, 3, 1, 4. But we want to present
327
+ it to the user as if it had been 1, 2, 3, 4. The arc 3->1 should be
328
+ replaced with 3->4, and 1->4 should be removed.
329
+
330
+ For this code, the fixers dict is {(3, 1): ((1, 4), (3, 4))}. The key
331
+ is the actual measured arc from the end of the with block back to the
332
+ start of the with-statement. The values are start_next (the with
333
+ statement to the next statement after the with), and end_next (the end
334
+ of the with-statement to the next statement after the with).
335
+
336
+ With nested with-statements, we have to trace through a few levels to
337
+ correct a longer chain of arcs.
338
+
339
+ """
340
+ to_remove = set()
341
+ to_add = set()
342
+ for arc in arcs:
343
+ if arc in self._with_jump_fixers:
344
+ end0 = arc[0]
345
+ to_remove.add(arc)
346
+ start_next, end_next = self._with_jump_fixers[arc]
347
+ while start_next in self._with_jump_fixers:
348
+ to_remove.add(start_next)
349
+ start_next, end_next = self._with_jump_fixers[start_next]
350
+ to_remove.add(end_next)
351
+ to_add.add((end0, end_next[1]))
352
+ to_remove.add(start_next)
353
+ arcs = (set(arcs) | to_add) - to_remove
354
+ return arcs
355
+
356
+ @functools.lru_cache
357
+ def exit_counts(self) -> dict[TLineNo, int]:
358
+ """Get a count of exits from that each line.
359
+
360
+ Excluded lines are excluded.
361
+
362
+ """
363
+ exit_counts: dict[TLineNo, int] = collections.defaultdict(int)
364
+ for l1, l2 in self.arcs():
365
+ assert l1 > 0, f"{l1=} should be greater than zero in {self.filename}"
366
+ if l1 in self.excluded:
367
+ # Don't report excluded lines as line numbers.
368
+ continue
369
+ if l2 in self.excluded:
370
+ # Arcs to excluded lines shouldn't count.
371
+ continue
372
+ exit_counts[l1] += 1
373
+
374
+ return exit_counts
375
+
376
+ def _finish_action_msg(self, action_msg: str | None, end: TLineNo) -> str:
377
+ """Apply some defaulting and formatting to an arc's description."""
378
+ if action_msg is None:
379
+ if end < 0:
380
+ action_msg = "jump to the function exit"
381
+ else:
382
+ action_msg = "jump to line {lineno}"
383
+ action_msg = action_msg.format(lineno=end)
384
+ return action_msg
385
+
386
+ def missing_arc_description(self, start: TLineNo, end: TLineNo) -> str:
387
+ """Provide an English sentence describing a missing arc."""
388
+ if self._missing_arc_fragments is None:
389
+ self._analyze_ast()
390
+ assert self._missing_arc_fragments is not None
391
+
392
+ fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
393
+
394
+ msgs = []
395
+ for missing_cause_msg, action_msg in fragment_pairs:
396
+ action_msg = self._finish_action_msg(action_msg, end)
397
+ msg = f"line {start} didn't {action_msg}"
398
+ if missing_cause_msg is not None:
399
+ msg += f" because {missing_cause_msg.format(lineno=start)}"
400
+
401
+ msgs.append(msg)
402
+
403
+ return " or ".join(msgs)
404
+
405
+ def arc_description(self, start: TLineNo, end: TLineNo) -> str:
406
+ """Provide an English description of an arc's effect."""
407
+ if self._missing_arc_fragments is None:
408
+ self._analyze_ast()
409
+ assert self._missing_arc_fragments is not None
410
+
411
+ fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
412
+ action_msg = self._finish_action_msg(fragment_pairs[0][1], end)
413
+ return action_msg
414
+
415
+
416
+ class ByteParser:
417
+ """Parse bytecode to understand the structure of code."""
418
+
419
+ def __init__(
420
+ self,
421
+ text: str,
422
+ code: CodeType | None = None,
423
+ filename: str | None = None,
424
+ ) -> None:
425
+ self.text = text
426
+ if code is not None:
427
+ self.code = code
428
+ else:
429
+ assert filename is not None
430
+ # We only get here if earlier ast parsing succeeded, so no need to
431
+ # catch errors.
432
+ self.code = compile(text, filename, "exec", dont_inherit=True)
433
+
434
+ def child_parsers(self) -> Iterable[ByteParser]:
435
+ """Iterate over all the code objects nested within this one.
436
+
437
+ The iteration includes `self` as its first value.
438
+
439
+ We skip code objects named `__annotate__` since they are deferred
440
+ annotations that usually are never run. If there are errors in the
441
+ annotations, they will be caught by type checkers or other tools that
442
+ use annotations.
443
+
444
+ """
445
+ return (
446
+ ByteParser(self.text, code=c)
447
+ for c in code_objects(self.code)
448
+ if c.co_name != "__annotate__"
449
+ )
450
+
451
+ def _line_numbers(self) -> Iterable[TLineNo]:
452
+ """Yield the line numbers possible in this code object.
453
+
454
+ Uses co_lines() to produce a sequence: l0, l1, ...
455
+ """
456
+ for _, _, line in self.code.co_lines():
457
+ if line:
458
+ yield line
459
+
460
+ def _find_statements(self) -> Iterable[TLineNo]:
461
+ """Find the statements in `self.code`.
462
+
463
+ Produce a sequence of line numbers that start statements. Recurses
464
+ into all code objects reachable from `self.code`.
465
+
466
+ """
467
+ for bp in self.child_parsers():
468
+ # Get all of the lineno information from this code.
469
+ yield from bp._line_numbers()
470
+
471
+
472
+ #
473
+ # AST analysis
474
+ #
475
+
476
+
477
+ @dataclass(frozen=True, order=True)
478
+ class ArcStart:
479
+ """The information needed to start an arc.
480
+
481
+ `lineno` is the line number the arc starts from.
482
+
483
+ `cause` is an English text fragment used as the `missing_cause_msg` for
484
+ AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
485
+ arc wasn't executed, so should fit well into a sentence of the form,
486
+ "Line 17 didn't run because {cause}." The fragment can include "{lineno}"
487
+ to have `lineno` interpolated into it.
488
+
489
+ As an example, this code::
490
+
491
+ if something(x): # line 1
492
+ func(x) # line 2
493
+ more_stuff() # line 3
494
+
495
+ would have two ArcStarts:
496
+
497
+ - ArcStart(1, "the condition on line 1 was always true")
498
+ - ArcStart(1, "the condition on line 1 was never true")
499
+
500
+ The first would be used to create an arc from 1 to 3, creating a message like
501
+ "line 1 didn't jump to line 3 because the condition on line 1 was always true."
502
+
503
+ The second would be used for the arc from 1 to 2, creating a message like
504
+ "line 1 didn't jump to line 2 because the condition on line 1 was never true."
505
+
506
+ """
507
+
508
+ lineno: TLineNo
509
+ cause: str = ""
510
+
511
+
512
+ class TAddArcFn(Protocol):
513
+ """The type for AstArcAnalyzer.add_arc()."""
514
+
515
+ def __call__(
516
+ self,
517
+ start: TLineNo,
518
+ end: TLineNo,
519
+ missing_cause_msg: str | None = None,
520
+ action_msg: str | None = None,
521
+ ) -> None:
522
+ """
523
+ Record an arc from `start` to `end`.
524
+
525
+ `missing_cause_msg` is a description of the reason the arc wasn't
526
+ taken if it wasn't taken. For example, "the condition on line 10 was
527
+ never true."
528
+
529
+ `action_msg` is a description of what the arc does, like "jump to line
530
+ 10" or "exit from function 'fooey'."
531
+
532
+ """
533
+
534
+
535
+ TArcFragments = dict[TArc, list[tuple[Optional[str], Optional[str]]]]
536
+
537
+
538
+ class Block:
539
+ """
540
+ Blocks need to handle various exiting statements in their own ways.
541
+
542
+ All of these methods take a list of exits, and a callable `add_arc`
543
+ function that they can use to add arcs if needed. They return True if the
544
+ exits are handled, or False if the search should continue up the block
545
+ stack.
546
+ """
547
+
548
+ # pylint: disable=unused-argument
549
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
550
+ """Process break exits."""
551
+ return False
552
+
553
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
554
+ """Process continue exits."""
555
+ return False
556
+
557
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
558
+ """Process raise exits."""
559
+ return False
560
+
561
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
562
+ """Process return exits."""
563
+ return False
564
+
565
+
566
+ class LoopBlock(Block):
567
+ """A block on the block stack representing a `for` or `while` loop."""
568
+
569
+ def __init__(self, start: TLineNo) -> None:
570
+ # The line number where the loop starts.
571
+ self.start = start
572
+ # A set of ArcStarts, the arcs from break statements exiting this loop.
573
+ self.break_exits: set[ArcStart] = set()
574
+
575
+ def process_break_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
576
+ self.break_exits.update(exits)
577
+ return True
578
+
579
+ def process_continue_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
580
+ for xit in exits:
581
+ add_arc(xit.lineno, self.start, xit.cause)
582
+ return True
583
+
584
+
585
+ class FunctionBlock(Block):
586
+ """A block on the block stack representing a function definition."""
587
+
588
+ def __init__(self, start: TLineNo, name: str) -> None:
589
+ # The line number where the function starts.
590
+ self.start = start
591
+ # The name of the function.
592
+ self.name = name
593
+
594
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
595
+ for xit in exits:
596
+ add_arc(
597
+ xit.lineno,
598
+ -self.start,
599
+ xit.cause,
600
+ f"except from function {self.name!r}",
601
+ )
602
+ return True
603
+
604
+ def process_return_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
605
+ for xit in exits:
606
+ add_arc(
607
+ xit.lineno,
608
+ -self.start,
609
+ xit.cause,
610
+ f"return from function {self.name!r}",
611
+ )
612
+ return True
613
+
614
+
615
+ class TryBlock(Block):
616
+ """A block on the block stack representing a `try` block."""
617
+
618
+ def __init__(self, handler_start: TLineNo | None, final_start: TLineNo | None) -> None:
619
+ # The line number of the first "except" handler, if any.
620
+ self.handler_start = handler_start
621
+ # The line number of the "finally:" clause, if any.
622
+ self.final_start = final_start
623
+
624
+ def process_raise_exits(self, exits: set[ArcStart], add_arc: TAddArcFn) -> bool:
625
+ if self.handler_start is not None:
626
+ for xit in exits:
627
+ add_arc(xit.lineno, self.handler_start, xit.cause)
628
+ return True
629
+
630
+
631
+ # TODO: Shouldn't the cause messages join with "and" instead of "or"?
632
+
633
+
634
+ def is_constant_test_expr(node: ast.AST) -> tuple[bool, bool]:
635
+ """Is this a compile-time constant test expression?
636
+
637
+ We don't try to mimic all of CPython's optimizations. We just have to
638
+ handle the kinds of constant expressions people might actually use.
639
+
640
+ """
641
+ match node:
642
+ case ast.Constant():
643
+ return True, bool(node.value)
644
+ case ast.Name():
645
+ if node.id in ["True", "False", "None", "__debug__"]:
646
+ return True, eval(node.id) # pylint: disable=eval-used
647
+ case ast.UnaryOp():
648
+ if isinstance(node.op, ast.Not):
649
+ is_constant, val = is_constant_test_expr(node.operand)
650
+ return is_constant, not val
651
+ case ast.BoolOp():
652
+ rets = [is_constant_test_expr(v) for v in node.values]
653
+ is_constant = all(is_const for is_const, _ in rets)
654
+ if is_constant:
655
+ op = any if isinstance(node.op, ast.Or) else all
656
+ return True, op(v for _, v in rets)
657
+ return False, False
658
+
659
+
660
+ class AstArcAnalyzer:
661
+ """Analyze source text with an AST to find executable code paths.
662
+
663
+ The .analyze() method does the work, and populates these attributes:
664
+
665
+ `arcs`: a set of (from, to) pairs of the the arcs possible in the code.
666
+
667
+ `missing_arc_fragments`: a dict mapping (from, to) arcs to lists of
668
+ message fragments explaining why the arc is missing from execution::
669
+
670
+ { (start, end): [(missing_cause_msg, action_msg), ...], }
671
+
672
+ For an arc starting from line 17, they should be usable to form complete
673
+ sentences like: "Line 17 didn't {action_msg} because {missing_cause_msg}".
674
+
675
+ NOTE: Starting in July 2024, I've been whittling this down to only report
676
+ arc that are part of true branches. It's not clear how far this work will
677
+ go.
678
+
679
+ """
680
+
681
+ def __init__(
682
+ self,
683
+ filename: str,
684
+ root_node: ast.AST,
685
+ statements: set[TLineNo],
686
+ multiline: dict[TLineNo, TLineNo],
687
+ ) -> None:
688
+ self.filename = filename
689
+ self.root_node = root_node
690
+ self.statements = {multiline.get(l, l) for l in statements}
691
+ self.multiline = multiline
692
+
693
+ # Turn on AST dumps with an environment variable.
694
+ # $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
695
+ dump_ast = bool(int(os.getenv("COVERAGE_AST_DUMP", "0")))
696
+
697
+ if dump_ast: # pragma: debugging
698
+ # Dump the AST so that failing tests have helpful output.
699
+ print(f"Statements: {self.statements}")
700
+ print(f"Multiline map: {self.multiline}")
701
+ print(ast.dump(self.root_node, include_attributes=True, indent=4))
702
+
703
+ self.arcs: set[TArc] = set()
704
+ self.missing_arc_fragments: TArcFragments = collections.defaultdict(list)
705
+ self.block_stack: list[Block] = []
706
+
707
+ # If `with` clauses jump to their start on the way out, we need
708
+ # information to be able to skip over that jump. We record the arcs
709
+ # from `with` into the clause (with_entries), and the arcs from the
710
+ # clause to the `with` (with_exits).
711
+ self.current_with_starts: set[TLineNo] = set()
712
+ self.all_with_starts: set[TLineNo] = set()
713
+ self.with_entries: set[TArc] = set()
714
+ self.with_exits: set[TArc] = set()
715
+
716
+ # $set_env.py: COVERAGE_TRACK_ARCS - Trace possible arcs added while parsing code.
717
+ self.debug = bool(int(os.getenv("COVERAGE_TRACK_ARCS", "0")))
718
+
719
+ def analyze(self) -> None:
720
+ """Examine the AST tree from `self.root_node` to determine possible arcs."""
721
+ for node in ast.walk(self.root_node):
722
+ node_name = node.__class__.__name__
723
+ code_object_handler = getattr(self, f"_code_object__{node_name}", None)
724
+ if code_object_handler is not None:
725
+ code_object_handler(node)
726
+
727
+ def with_jump_fixers(self) -> dict[TArc, tuple[TArc, TArc]]:
728
+ """Get a dict with data for fixing jumps out of with statements.
729
+
730
+ Returns a dict. The keys are arcs leaving a with-statement by jumping
731
+ back to its start. The values are pairs: first, the arc from the start
732
+ to the next statement, then the arc that exits the with without going
733
+ to the start.
734
+
735
+ """
736
+ fixers = {}
737
+ with_nexts = {
738
+ arc
739
+ for arc in self.arcs
740
+ if arc[0] in self.all_with_starts and arc not in self.with_entries
741
+ }
742
+ for start in self.all_with_starts:
743
+ nexts = {arc[1] for arc in with_nexts if arc[0] == start}
744
+ if not nexts:
745
+ continue
746
+ assert len(nexts) == 1, f"Expected one arc, got {nexts} with {start = }"
747
+ nxt = nexts.pop()
748
+ ends = {arc[0] for arc in self.with_exits if arc[1] == start}
749
+ for end in ends:
750
+ fixers[(end, start)] = ((start, nxt), (end, nxt))
751
+ return fixers
752
+
753
+ # Code object dispatchers: _code_object__*
754
+ #
755
+ # These methods are used by analyze() as the start of the analysis.
756
+ # There is one for each construct with a code object.
757
+
758
+ def _code_object__Module(self, node: ast.Module) -> None:
759
+ start = self.line_for_node(node)
760
+ if node.body:
761
+ exits = self.process_body(node.body)
762
+ for xit in exits:
763
+ self.add_arc(xit.lineno, -start, xit.cause, "exit the module")
764
+ else:
765
+ # Empty module.
766
+ self.add_arc(start, -start)
767
+
768
+ def _code_object__FunctionDef(self, node: ast.FunctionDef) -> None:
769
+ start = self.line_for_node(node)
770
+ self.block_stack.append(FunctionBlock(start=start, name=node.name))
771
+ exits = self.process_body(node.body)
772
+ self.process_return_exits(exits)
773
+ self.block_stack.pop()
774
+
775
+ _code_object__AsyncFunctionDef = _code_object__FunctionDef
776
+
777
+ def _code_object__ClassDef(self, node: ast.ClassDef) -> None:
778
+ start = self.line_for_node(node)
779
+ exits = self.process_body(node.body)
780
+ for xit in exits:
781
+ self.add_arc(xit.lineno, -start, xit.cause, f"exit class {node.name!r}")
782
+
783
+ def add_arc(
784
+ self,
785
+ start: TLineNo,
786
+ end: TLineNo,
787
+ missing_cause_msg: str | None = None,
788
+ action_msg: str | None = None,
789
+ ) -> None:
790
+ """Add an arc, including message fragments to use if it is missing."""
791
+ if self.debug: # pragma: debugging
792
+ print(f"Adding possible arc: ({start}, {end}): {missing_cause_msg!r}, {action_msg!r}")
793
+ print(short_stack(), end="\n\n")
794
+ self.arcs.add((start, end))
795
+ if start in self.current_with_starts:
796
+ self.with_entries.add((start, end))
797
+
798
+ if missing_cause_msg is not None or action_msg is not None:
799
+ self.missing_arc_fragments[(start, end)].append((missing_cause_msg, action_msg))
800
+
801
+ def nearest_blocks(self) -> Iterable[Block]:
802
+ """Yield the blocks in nearest-to-farthest order."""
803
+ return reversed(self.block_stack)
804
+
805
+ def line_for_node(self, node: ast.AST) -> TLineNo:
806
+ """What is the right line number to use for this node?
807
+
808
+ This dispatches to _line__Node functions where needed.
809
+
810
+ """
811
+ node_name = node.__class__.__name__
812
+ handler = cast(
813
+ Optional[Callable[[ast.AST], TLineNo]],
814
+ getattr(self, f"_line__{node_name}", None),
815
+ )
816
+ if handler is not None:
817
+ line = handler(node)
818
+ else:
819
+ line = node.lineno # type: ignore[attr-defined]
820
+ return self.multiline.get(line, line)
821
+
822
+ # First lines: _line__*
823
+ #
824
+ # Dispatched by line_for_node, each method knows how to identify the first
825
+ # line number in the node, as Python will report it.
826
+
827
+ def _line_decorated(self, node: ast.FunctionDef) -> TLineNo:
828
+ """Compute first line number for things that can be decorated (classes and functions)."""
829
+ if node.decorator_list:
830
+ lineno = node.decorator_list[0].lineno
831
+ else:
832
+ lineno = node.lineno
833
+ return lineno
834
+
835
+ def _line__Assign(self, node: ast.Assign) -> TLineNo:
836
+ return self.line_for_node(node.value)
837
+
838
+ _line__ClassDef = _line_decorated
839
+
840
+ def _line__Dict(self, node: ast.Dict) -> TLineNo:
841
+ if node.keys:
842
+ if node.keys[0] is not None:
843
+ return node.keys[0].lineno
844
+ else:
845
+ # Unpacked dict literals `{**{"a":1}}` have None as the key,
846
+ # use the value in that case.
847
+ return node.values[0].lineno
848
+ else:
849
+ return node.lineno
850
+
851
+ _line__FunctionDef = _line_decorated
852
+ _line__AsyncFunctionDef = _line_decorated
853
+
854
+ def _line__List(self, node: ast.List) -> TLineNo:
855
+ if node.elts:
856
+ return self.line_for_node(node.elts[0])
857
+ else:
858
+ return node.lineno
859
+
860
+ def _line__Module(self, node: ast.Module) -> TLineNo: # pylint: disable=unused-argument
861
+ return 1
862
+
863
+ # The node types that just flow to the next node with no complications.
864
+ OK_TO_DEFAULT = {
865
+ "AnnAssign",
866
+ "Assign",
867
+ "Assert",
868
+ "AugAssign",
869
+ "Delete",
870
+ "Expr",
871
+ "Global",
872
+ "Import",
873
+ "ImportFrom",
874
+ "Nonlocal",
875
+ "Pass",
876
+ }
877
+
878
+ def node_exits(self, node: ast.AST) -> set[ArcStart]:
879
+ """Find the set of arc starts that exit this node.
880
+
881
+ Return a set of ArcStarts, exits from this node to the next. Because a
882
+ node represents an entire sub-tree (including its children), the exits
883
+ from a node can be arbitrarily complex::
884
+
885
+ if something(1):
886
+ if other(2):
887
+ doit(3)
888
+ else:
889
+ doit(5)
890
+
891
+ There are three exits from line 1: they start at lines 1, 3 and 5.
892
+ There are two exits from line 2: lines 3 and 5.
893
+
894
+ """
895
+ node_name = node.__class__.__name__
896
+ handler = cast(
897
+ Optional[Callable[[ast.AST], set[ArcStart]]],
898
+ getattr(self, f"_handle__{node_name}", None),
899
+ )
900
+ if handler is not None:
901
+ arc_starts = handler(node)
902
+ else:
903
+ # No handler: either it's something that's ok to default (a simple
904
+ # statement), or it's something we overlooked.
905
+ if env.TESTING:
906
+ if node_name not in self.OK_TO_DEFAULT:
907
+ raise RuntimeError(f"*** Unhandled: {node}") # pragma: only failure
908
+
909
+ # Default for simple statements: one exit from this node.
910
+ arc_starts = {ArcStart(self.line_for_node(node))}
911
+ return arc_starts
912
+
913
+ def process_body(
914
+ self,
915
+ body: Sequence[ast.AST],
916
+ from_start: ArcStart | None = None,
917
+ prev_starts: set[ArcStart] | None = None,
918
+ ) -> set[ArcStart]:
919
+ """Process the body of a compound statement.
920
+
921
+ `body` is the body node to process.
922
+
923
+ `from_start` is a single `ArcStart` that starts an arc into this body.
924
+ `prev_starts` is a set of ArcStarts that can all be the start of arcs
925
+ into this body. Only one of `from_start` and `prev_starts` should be
926
+ given.
927
+
928
+ Records arcs within the body by calling `self.add_arc`.
929
+
930
+ Returns a set of ArcStarts, the exits from this body.
931
+
932
+ """
933
+ if prev_starts is None:
934
+ if from_start is None:
935
+ prev_starts = set()
936
+ else:
937
+ prev_starts = {from_start}
938
+ else:
939
+ assert from_start is None
940
+
941
+ # Loop over the nodes in the body, making arcs from each one's exits to
942
+ # the next node.
943
+ for body_node in body:
944
+ lineno = self.line_for_node(body_node)
945
+ if lineno not in self.statements:
946
+ continue
947
+ for prev_start in prev_starts:
948
+ self.add_arc(prev_start.lineno, lineno, prev_start.cause)
949
+ prev_starts = self.node_exits(body_node)
950
+ return prev_starts
951
+
952
+ # Exit processing: process_*_exits
953
+ #
954
+ # These functions process the four kinds of jump exits: break, continue,
955
+ # raise, and return. To figure out where an exit goes, we have to look at
956
+ # the block stack context. For example, a break will jump to the nearest
957
+ # enclosing loop block, or the nearest enclosing finally block, whichever
958
+ # is nearer.
959
+
960
+ def process_break_exits(self, exits: set[ArcStart]) -> None:
961
+ """Add arcs due to jumps from `exits` being breaks."""
962
+ for block in self.nearest_blocks(): # pragma: always breaks
963
+ if block.process_break_exits(exits, self.add_arc):
964
+ break
965
+
966
+ def process_continue_exits(self, exits: set[ArcStart]) -> None:
967
+ """Add arcs due to jumps from `exits` being continues."""
968
+ for block in self.nearest_blocks(): # pragma: always breaks
969
+ if block.process_continue_exits(exits, self.add_arc):
970
+ break
971
+
972
+ def process_raise_exits(self, exits: set[ArcStart]) -> None:
973
+ """Add arcs due to jumps from `exits` being raises."""
974
+ for block in self.nearest_blocks():
975
+ if block.process_raise_exits(exits, self.add_arc):
976
+ break
977
+
978
+ def process_return_exits(self, exits: set[ArcStart]) -> None:
979
+ """Add arcs due to jumps from `exits` being returns."""
980
+ for block in self.nearest_blocks(): # pragma: always breaks
981
+ if block.process_return_exits(exits, self.add_arc):
982
+ break
983
+
984
+ # Node handlers: _handle__*
985
+ #
986
+ # Each handler deals with a specific AST node type, dispatched from
987
+ # node_exits. Handlers return the set of exits from that node, and can
988
+ # also call self.add_arc to record arcs they find. These functions mirror
989
+ # the Python semantics of each syntactic construct. See the docstring
990
+ # for node_exits to understand the concept of exits from a node.
991
+ #
992
+ # Every node type that represents a statement should have a handler, or it
993
+ # should be listed in OK_TO_DEFAULT.
994
+
995
+ def _handle__Break(self, node: ast.Break) -> set[ArcStart]:
996
+ here = self.line_for_node(node)
997
+ break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
998
+ self.process_break_exits({break_start})
999
+ return set()
1000
+
1001
+ def _handle_decorated(self, node: ast.FunctionDef) -> set[ArcStart]:
1002
+ """Add arcs for things that can be decorated (classes and functions)."""
1003
+ main_line: TLineNo = node.lineno
1004
+ last: TLineNo | None = node.lineno
1005
+ decs = node.decorator_list
1006
+ if decs:
1007
+ last = None
1008
+ for dec_node in decs:
1009
+ dec_start = self.line_for_node(dec_node)
1010
+ if last is not None and dec_start != last:
1011
+ self.add_arc(last, dec_start)
1012
+ last = dec_start
1013
+ assert last is not None
1014
+ self.add_arc(last, main_line)
1015
+ last = main_line
1016
+ # The definition line may have been missed, but we should have it
1017
+ # in `self.statements`. For some constructs, `line_for_node` is
1018
+ # not what we'd think of as the first line in the statement, so map
1019
+ # it to the first one.
1020
+ assert node.body, f"Oops: {node.body = } in {self.filename}@{node.lineno}"
1021
+ # The body is handled in collect_arcs.
1022
+ assert last is not None
1023
+ return {ArcStart(last)}
1024
+
1025
+ _handle__ClassDef = _handle_decorated
1026
+
1027
+ def _handle__Continue(self, node: ast.Continue) -> set[ArcStart]:
1028
+ here = self.line_for_node(node)
1029
+ continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
1030
+ self.process_continue_exits({continue_start})
1031
+ return set()
1032
+
1033
+ def _handle__For(self, node: ast.For) -> set[ArcStart]:
1034
+ start = self.line_for_node(node.iter)
1035
+ self.block_stack.append(LoopBlock(start=start))
1036
+ from_start = ArcStart(start, cause="the loop on line {lineno} never started")
1037
+ exits = self.process_body(node.body, from_start=from_start)
1038
+ # Any exit from the body will go back to the top of the loop.
1039
+ for xit in exits:
1040
+ self.add_arc(xit.lineno, start, xit.cause)
1041
+ my_block = self.block_stack.pop()
1042
+ assert isinstance(my_block, LoopBlock)
1043
+ exits = my_block.break_exits
1044
+ from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
1045
+ if node.orelse:
1046
+ else_exits = self.process_body(node.orelse, from_start=from_start)
1047
+ exits |= else_exits
1048
+ else:
1049
+ # No else clause: exit from the for line.
1050
+ exits.add(from_start)
1051
+ return exits
1052
+
1053
+ _handle__AsyncFor = _handle__For
1054
+
1055
+ _handle__FunctionDef = _handle_decorated
1056
+ _handle__AsyncFunctionDef = _handle_decorated
1057
+
1058
+ def _handle__If(self, node: ast.If) -> set[ArcStart]:
1059
+ start = self.line_for_node(node.test)
1060
+ constant_test, val = is_constant_test_expr(node.test)
1061
+ exits = set()
1062
+ if not constant_test or val:
1063
+ from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
1064
+ exits |= self.process_body(node.body, from_start=from_start)
1065
+ if not constant_test or not val:
1066
+ from_start = ArcStart(start, cause="the condition on line {lineno} was always true")
1067
+ exits |= self.process_body(node.orelse, from_start=from_start)
1068
+ return exits
1069
+
1070
+ def _handle__Match(self, node: ast.Match) -> set[ArcStart]:
1071
+ start = self.line_for_node(node)
1072
+ last_start = start
1073
+ exits = set()
1074
+ for case in node.cases:
1075
+ case_start = self.line_for_node(case.pattern)
1076
+ self.add_arc(last_start, case_start, "the pattern on line {lineno} always matched")
1077
+ from_start = ArcStart(
1078
+ case_start,
1079
+ cause="the pattern on line {lineno} never matched",
1080
+ )
1081
+ exits |= self.process_body(case.body, from_start=from_start)
1082
+ last_start = case_start
1083
+
1084
+ # case is now the last case, check for wildcard match.
1085
+ pattern = case.pattern # pylint: disable=undefined-loop-variable
1086
+ while isinstance(pattern, ast.MatchOr):
1087
+ pattern = pattern.patterns[-1]
1088
+ while isinstance(pattern, ast.MatchAs) and pattern.pattern is not None:
1089
+ pattern = pattern.pattern
1090
+ had_wildcard = (
1091
+ isinstance(pattern, ast.MatchAs) and pattern.pattern is None and case.guard is None # pylint: disable=undefined-loop-variable
1092
+ )
1093
+
1094
+ if not had_wildcard:
1095
+ exits.add(
1096
+ ArcStart(case_start, cause="the pattern on line {lineno} always matched"),
1097
+ )
1098
+ return exits
1099
+
1100
+ def _handle__Raise(self, node: ast.Raise) -> set[ArcStart]:
1101
+ here = self.line_for_node(node)
1102
+ raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
1103
+ self.process_raise_exits({raise_start})
1104
+ # `raise` statement jumps away, no exits from here.
1105
+ return set()
1106
+
1107
+ def _handle__Return(self, node: ast.Return) -> set[ArcStart]:
1108
+ here = self.line_for_node(node)
1109
+ return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
1110
+ self.process_return_exits({return_start})
1111
+ # `return` statement jumps away, no exits from here.
1112
+ return set()
1113
+
1114
+ def _handle__Try(self, node: ast.Try) -> set[ArcStart]:
1115
+ if node.handlers:
1116
+ handler_start = self.line_for_node(node.handlers[0])
1117
+ else:
1118
+ handler_start = None
1119
+
1120
+ if node.finalbody:
1121
+ final_start = self.line_for_node(node.finalbody[0])
1122
+ else:
1123
+ final_start = None
1124
+
1125
+ # This is true by virtue of Python syntax: have to have either except
1126
+ # or finally, or both.
1127
+ assert handler_start is not None or final_start is not None
1128
+ try_block = TryBlock(handler_start, final_start)
1129
+ self.block_stack.append(try_block)
1130
+
1131
+ start = self.line_for_node(node)
1132
+ exits = self.process_body(node.body, from_start=ArcStart(start))
1133
+
1134
+ # We're done with the `try` body, so this block no longer handles
1135
+ # exceptions. We keep the block so the `finally` clause can pick up
1136
+ # flows from the handlers and `else` clause.
1137
+ if node.finalbody:
1138
+ try_block.handler_start = None
1139
+ else:
1140
+ self.block_stack.pop()
1141
+
1142
+ handler_exits: set[ArcStart] = set()
1143
+
1144
+ if node.handlers:
1145
+ for handler_node in node.handlers:
1146
+ handler_start = self.line_for_node(handler_node)
1147
+ from_cause = "the exception caught by line {lineno} didn't happen"
1148
+ from_start = ArcStart(handler_start, cause=from_cause)
1149
+ handler_exits |= self.process_body(handler_node.body, from_start=from_start)
1150
+
1151
+ if node.orelse:
1152
+ exits = self.process_body(node.orelse, prev_starts=exits)
1153
+
1154
+ exits |= handler_exits
1155
+
1156
+ if node.finalbody:
1157
+ self.block_stack.pop()
1158
+ final_from = exits
1159
+
1160
+ final_exits = self.process_body(node.finalbody, prev_starts=final_from)
1161
+
1162
+ if exits:
1163
+ # The finally clause's exits are only exits for the try block
1164
+ # as a whole if the try block had some exits to begin with.
1165
+ exits = final_exits
1166
+
1167
+ return exits
1168
+
1169
+ _handle__TryStar = _handle__Try
1170
+
1171
+ def _handle__While(self, node: ast.While) -> set[ArcStart]:
1172
+ start = to_top = self.line_for_node(node.test)
1173
+ constant_test, _ = is_constant_test_expr(node.test)
1174
+ self.block_stack.append(LoopBlock(start=to_top))
1175
+ from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
1176
+ exits = self.process_body(node.body, from_start=from_start)
1177
+ for xit in exits:
1178
+ self.add_arc(xit.lineno, to_top, xit.cause)
1179
+ exits = set()
1180
+ my_block = self.block_stack.pop()
1181
+ assert isinstance(my_block, LoopBlock)
1182
+ exits.update(my_block.break_exits)
1183
+ from_start = ArcStart(start, cause="the condition on line {lineno} was always true")
1184
+ if node.orelse:
1185
+ else_exits = self.process_body(node.orelse, from_start=from_start)
1186
+ exits |= else_exits
1187
+ else:
1188
+ # No `else` clause: you can exit from the start.
1189
+ if not constant_test:
1190
+ exits.add(from_start)
1191
+ return exits
1192
+
1193
+ def _handle__With(self, node: ast.With) -> set[ArcStart]:
1194
+ if env.PYBEHAVIOR.exit_with_through_ctxmgr:
1195
+ starts = [self.line_for_node(item.context_expr) for item in node.items]
1196
+ else:
1197
+ starts = [self.line_for_node(node)]
1198
+ for start in starts:
1199
+ self.current_with_starts.add(start)
1200
+ self.all_with_starts.add(start)
1201
+
1202
+ exits = self.process_body(node.body, from_start=ArcStart(starts[-1]))
1203
+
1204
+ start = starts[-1]
1205
+ self.current_with_starts.remove(start)
1206
+ with_exit = {ArcStart(start)}
1207
+ if exits:
1208
+ for xit in exits:
1209
+ self.add_arc(xit.lineno, start)
1210
+ self.with_exits.add((xit.lineno, start))
1211
+ exits = with_exit
1212
+
1213
+ return exits
1214
+
1215
+ _handle__AsyncWith = _handle__With