passagemath-repl 10.4.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. passagemath_repl-10.4.62.data/scripts/sage-cachegrind +25 -0
  2. passagemath_repl-10.4.62.data/scripts/sage-callgrind +16 -0
  3. passagemath_repl-10.4.62.data/scripts/sage-cleaner +230 -0
  4. passagemath_repl-10.4.62.data/scripts/sage-coverage +327 -0
  5. passagemath_repl-10.4.62.data/scripts/sage-eval +14 -0
  6. passagemath_repl-10.4.62.data/scripts/sage-fixdoctests +708 -0
  7. passagemath_repl-10.4.62.data/scripts/sage-inline-fortran +12 -0
  8. passagemath_repl-10.4.62.data/scripts/sage-ipynb2rst +50 -0
  9. passagemath_repl-10.4.62.data/scripts/sage-ipython +16 -0
  10. passagemath_repl-10.4.62.data/scripts/sage-massif +25 -0
  11. passagemath_repl-10.4.62.data/scripts/sage-notebook +267 -0
  12. passagemath_repl-10.4.62.data/scripts/sage-omega +25 -0
  13. passagemath_repl-10.4.62.data/scripts/sage-preparse +302 -0
  14. passagemath_repl-10.4.62.data/scripts/sage-run +27 -0
  15. passagemath_repl-10.4.62.data/scripts/sage-run-cython +10 -0
  16. passagemath_repl-10.4.62.data/scripts/sage-runtests +9 -0
  17. passagemath_repl-10.4.62.data/scripts/sage-startuptime.py +163 -0
  18. passagemath_repl-10.4.62.data/scripts/sage-valgrind +34 -0
  19. passagemath_repl-10.4.62.dist-info/METADATA +77 -0
  20. passagemath_repl-10.4.62.dist-info/RECORD +162 -0
  21. passagemath_repl-10.4.62.dist-info/WHEEL +5 -0
  22. passagemath_repl-10.4.62.dist-info/top_level.txt +1 -0
  23. sage/all__sagemath_repl.py +119 -0
  24. sage/doctest/__init__.py +4 -0
  25. sage/doctest/__main__.py +236 -0
  26. sage/doctest/all.py +4 -0
  27. sage/doctest/check_tolerance.py +261 -0
  28. sage/doctest/control.py +1727 -0
  29. sage/doctest/external.py +534 -0
  30. sage/doctest/fixtures.py +383 -0
  31. sage/doctest/forker.py +2665 -0
  32. sage/doctest/marked_output.py +102 -0
  33. sage/doctest/parsing.py +1708 -0
  34. sage/doctest/parsing_test.py +79 -0
  35. sage/doctest/reporting.py +733 -0
  36. sage/doctest/rif_tol.py +124 -0
  37. sage/doctest/sources.py +1657 -0
  38. sage/doctest/test.py +584 -0
  39. sage/doctest/tests/1second.rst +4 -0
  40. sage/doctest/tests/99seconds.rst +4 -0
  41. sage/doctest/tests/abort.rst +5 -0
  42. sage/doctest/tests/atexit.rst +7 -0
  43. sage/doctest/tests/fail_and_die.rst +6 -0
  44. sage/doctest/tests/initial.rst +15 -0
  45. sage/doctest/tests/interrupt.rst +7 -0
  46. sage/doctest/tests/interrupt_diehard.rst +14 -0
  47. sage/doctest/tests/keyboardinterrupt.rst +11 -0
  48. sage/doctest/tests/longtime.rst +5 -0
  49. sage/doctest/tests/nodoctest +5 -0
  50. sage/doctest/tests/random_seed.rst +4 -0
  51. sage/doctest/tests/show_skipped.rst +18 -0
  52. sage/doctest/tests/sig_on.rst +9 -0
  53. sage/doctest/tests/simple_failure.rst +8 -0
  54. sage/doctest/tests/sleep_and_raise.rst +106 -0
  55. sage/doctest/tests/tolerance.rst +31 -0
  56. sage/doctest/util.py +750 -0
  57. sage/interfaces/cleaner.py +48 -0
  58. sage/interfaces/quit.py +163 -0
  59. sage/misc/all__sagemath_repl.py +51 -0
  60. sage/misc/banner.py +235 -0
  61. sage/misc/benchmark.py +221 -0
  62. sage/misc/classgraph.py +131 -0
  63. sage/misc/copying.py +22 -0
  64. sage/misc/cython.py +694 -0
  65. sage/misc/dev_tools.py +745 -0
  66. sage/misc/edit_module.py +304 -0
  67. sage/misc/explain_pickle.py +3079 -0
  68. sage/misc/gperftools.py +361 -0
  69. sage/misc/inline_fortran.py +212 -0
  70. sage/misc/messaging.py +86 -0
  71. sage/misc/pager.py +21 -0
  72. sage/misc/profiler.py +179 -0
  73. sage/misc/python.py +70 -0
  74. sage/misc/remote_file.py +53 -0
  75. sage/misc/sage_eval.py +246 -0
  76. sage/misc/sage_input.py +3621 -0
  77. sage/misc/sagedoc.py +1742 -0
  78. sage/misc/sh.py +38 -0
  79. sage/misc/trace.py +90 -0
  80. sage/repl/__init__.py +16 -0
  81. sage/repl/all.py +15 -0
  82. sage/repl/attach.py +625 -0
  83. sage/repl/configuration.py +186 -0
  84. sage/repl/display/__init__.py +1 -0
  85. sage/repl/display/fancy_repr.py +354 -0
  86. sage/repl/display/formatter.py +318 -0
  87. sage/repl/display/jsmol_iframe.py +290 -0
  88. sage/repl/display/pretty_print.py +153 -0
  89. sage/repl/display/util.py +163 -0
  90. sage/repl/image.py +302 -0
  91. sage/repl/inputhook.py +91 -0
  92. sage/repl/interface_magic.py +298 -0
  93. sage/repl/interpreter.py +854 -0
  94. sage/repl/ipython_extension.py +593 -0
  95. sage/repl/ipython_kernel/__init__.py +1 -0
  96. sage/repl/ipython_kernel/__main__.py +4 -0
  97. sage/repl/ipython_kernel/all_jupyter.py +10 -0
  98. sage/repl/ipython_kernel/install.py +301 -0
  99. sage/repl/ipython_kernel/interact.py +278 -0
  100. sage/repl/ipython_kernel/kernel.py +217 -0
  101. sage/repl/ipython_kernel/widgets.py +466 -0
  102. sage/repl/ipython_kernel/widgets_sagenb.py +587 -0
  103. sage/repl/ipython_tests.py +163 -0
  104. sage/repl/load.py +326 -0
  105. sage/repl/preparse.py +2218 -0
  106. sage/repl/prompts.py +90 -0
  107. sage/repl/rich_output/__init__.py +4 -0
  108. sage/repl/rich_output/backend_base.py +648 -0
  109. sage/repl/rich_output/backend_doctest.py +316 -0
  110. sage/repl/rich_output/backend_emacs.py +151 -0
  111. sage/repl/rich_output/backend_ipython.py +596 -0
  112. sage/repl/rich_output/buffer.py +311 -0
  113. sage/repl/rich_output/display_manager.py +829 -0
  114. sage/repl/rich_output/example.avi +0 -0
  115. sage/repl/rich_output/example.canvas3d +1 -0
  116. sage/repl/rich_output/example.dvi +0 -0
  117. sage/repl/rich_output/example.flv +0 -0
  118. sage/repl/rich_output/example.gif +0 -0
  119. sage/repl/rich_output/example.jpg +0 -0
  120. sage/repl/rich_output/example.mkv +0 -0
  121. sage/repl/rich_output/example.mov +0 -0
  122. sage/repl/rich_output/example.mp4 +0 -0
  123. sage/repl/rich_output/example.ogv +0 -0
  124. sage/repl/rich_output/example.pdf +0 -0
  125. sage/repl/rich_output/example.png +0 -0
  126. sage/repl/rich_output/example.svg +54 -0
  127. sage/repl/rich_output/example.webm +0 -0
  128. sage/repl/rich_output/example.wmv +0 -0
  129. sage/repl/rich_output/example_jmol.spt.zip +0 -0
  130. sage/repl/rich_output/example_wavefront_scene.mtl +7 -0
  131. sage/repl/rich_output/example_wavefront_scene.obj +17 -0
  132. sage/repl/rich_output/output_basic.py +391 -0
  133. sage/repl/rich_output/output_browser.py +103 -0
  134. sage/repl/rich_output/output_catalog.py +54 -0
  135. sage/repl/rich_output/output_graphics.py +320 -0
  136. sage/repl/rich_output/output_graphics3d.py +345 -0
  137. sage/repl/rich_output/output_video.py +231 -0
  138. sage/repl/rich_output/preferences.py +432 -0
  139. sage/repl/rich_output/pretty_print.py +339 -0
  140. sage/repl/rich_output/test_backend.py +201 -0
  141. sage/repl/user_globals.py +214 -0
  142. sage/tests/__init__.py +1 -0
  143. sage/tests/all.py +3 -0
  144. sage/tests/article_heuberger_krenn_kropf_fsm-in-sage.py +630 -0
  145. sage/tests/arxiv_0812_2725.py +351 -0
  146. sage/tests/benchmark.py +1923 -0
  147. sage/tests/book_schilling_zabrocki_kschur_primer.py +795 -0
  148. sage/tests/book_stein_ent.py +651 -0
  149. sage/tests/book_stein_modform.py +558 -0
  150. sage/tests/cmdline.py +790 -0
  151. sage/tests/combinatorial_hopf_algebras.py +52 -0
  152. sage/tests/finite_poset.py +623 -0
  153. sage/tests/functools_partial_src.py +27 -0
  154. sage/tests/gosper-sum.py +218 -0
  155. sage/tests/lazy_imports.py +28 -0
  156. sage/tests/modular_group_cohomology.py +80 -0
  157. sage/tests/numpy.py +21 -0
  158. sage/tests/parigp.py +76 -0
  159. sage/tests/startup.py +27 -0
  160. sage/tests/symbolic-series.py +76 -0
  161. sage/tests/sympy.py +16 -0
  162. sage/tests/test_deprecation.py +31 -0
@@ -0,0 +1,733 @@
1
+ # sage_setup: distribution = sagemath-repl
2
+ r"""
3
+ Reporting doctest results
4
+
5
+ This module determines how doctest results are reported to the user.
6
+
7
+ It also computes the exit status in the ``error_status`` attribute of
8
+ :class:`DocTestReporter`. This is a bitwise OR of the following bits:
9
+
10
+ - 1: Doctest failure
11
+ - 2: Bad command line syntax or invalid options
12
+ - 4: Test timed out
13
+ - 8: Test exited with nonzero status
14
+ - 16: Test crashed with a signal (e.g. segmentation fault)
15
+ - 32: TAB character found
16
+ - 64: Internal error in the doctesting framework
17
+ - 128: Testing interrupted, not all tests run
18
+ - 256: Doctest contains explicit source line number
19
+
20
+ AUTHORS:
21
+
22
+ - David Roe (2012-03-27) -- initial version, based on Robert Bradshaw's code.
23
+ """
24
+
25
+ # ****************************************************************************
26
+ # Copyright (C) 2012-2013 David Roe <roed.math@gmail.com>
27
+ # 2012 Robert Bradshaw <robertwb@gmail.com>
28
+ # 2012 William Stein <wstein@gmail.com>
29
+ # 2013 R. Andrew Ohana
30
+ # 2013 Jeroen Demeyer <jdemeyer@cage.ugent.be>
31
+ # 2013-2017 Volker Braun
32
+ # 2018 Julian Rüth
33
+ # 2018-2021 Sébastien Labbé
34
+ # 2020 Samuel Lelièvre
35
+ # 2022 Matthias Koeppe
36
+ #
37
+ # This program is free software: you can redistribute it and/or modify
38
+ # it under the terms of the GNU General Public License as published by
39
+ # the Free Software Foundation, either version 2 of the License, or
40
+ # (at your option) any later version.
41
+ # https://www.gnu.org/licenses/
42
+ # ****************************************************************************
43
+
44
+ import re
45
+ from sys import stdout
46
+ from signal import (SIGABRT, SIGALRM, SIGBUS, SIGFPE, SIGHUP, SIGILL,
47
+ SIGINT, SIGKILL, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM)
48
+ from sage.structure.sage_object import SageObject
49
+ from sage.doctest.util import count_noun
50
+ from sage.doctest.sources import DictAsObject
51
+ from .external import available_software
52
+
53
+ def signal_name(sig):
54
+ """
55
+ Return a string describing a signal number.
56
+
57
+ EXAMPLES::
58
+
59
+ sage: from signal import SIGSEGV
60
+ sage: from sage.doctest.reporting import signal_name
61
+ sage: signal_name(SIGSEGV)
62
+ 'segmentation fault'
63
+ sage: signal_name(9)
64
+ 'kill signal'
65
+ sage: signal_name(12345)
66
+ 'signal 12345'
67
+ """
68
+ if sig == SIGHUP:
69
+ return "hangup"
70
+ if sig == SIGINT:
71
+ return "interrupt"
72
+ if sig == SIGQUIT:
73
+ return "quit"
74
+ if sig == SIGILL:
75
+ return "illegal instruction"
76
+ if sig == SIGABRT:
77
+ return "abort"
78
+ if sig == SIGFPE:
79
+ return "floating point exception"
80
+ if sig == SIGKILL:
81
+ return "kill signal"
82
+ if sig == SIGSEGV:
83
+ return "segmentation fault"
84
+ if sig == SIGPIPE:
85
+ return "broken pipe"
86
+ if sig == SIGALRM:
87
+ return "alarm"
88
+ if sig == SIGTERM:
89
+ return "terminate"
90
+ if sig == SIGBUS:
91
+ return "bus error"
92
+ return "signal %s" % sig
93
+
94
+ class DocTestReporter(SageObject):
95
+ """
96
+ This class reports to the users on the results of doctests.
97
+ """
98
+ def __init__(self, controller):
99
+ """
100
+ Initialize the reporter.
101
+
102
+ INPUT:
103
+
104
+ - ``controller`` -- a
105
+ :class:`sage.doctest.control.DocTestController` instance;
106
+ Note that some methods assume that appropriate tests have
107
+ been run by the controller
108
+
109
+ EXAMPLES::
110
+
111
+ sage: from sage.doctest.reporting import DocTestReporter
112
+ sage: from sage.doctest.control import DocTestController, DocTestDefaults
113
+ sage: filename = sage.doctest.reporting.__file__
114
+ sage: DC = DocTestController(DocTestDefaults(), [filename])
115
+ sage: DTR = DocTestReporter(DC)
116
+ """
117
+ self.controller = controller
118
+ self.postscript = {"lines": [], "cputime": 0, "walltime": 0}
119
+ self.sources_completed = 0
120
+ self.stats = {}
121
+ self.error_status = 0
122
+
123
+ def were_doctests_with_optional_tag_run(self, tag):
124
+ r"""
125
+ Return whether doctests marked with this tag were run.
126
+
127
+ INPUT:
128
+
129
+ - ``tag`` -- string
130
+
131
+ EXAMPLES::
132
+
133
+ sage: from sage.doctest.reporting import DocTestReporter
134
+ sage: from sage.doctest.control import DocTestController, DocTestDefaults
135
+ sage: filename = sage.doctest.reporting.__file__
136
+ sage: DC = DocTestController(DocTestDefaults(), [filename])
137
+ sage: DTR = DocTestReporter(DC)
138
+
139
+ ::
140
+
141
+ sage: DTR.were_doctests_with_optional_tag_run('sage')
142
+ True
143
+ sage: DTR.were_doctests_with_optional_tag_run('nice_unavailable_package')
144
+ False
145
+
146
+ When latex is available, doctests marked with optional tag
147
+ ``latex`` are run by default since :issue:`32174`::
148
+
149
+ sage: # needs SAGE_SRC
150
+ sage: filename = os.path.join(SAGE_SRC, 'sage', 'misc', 'latex.py')
151
+ sage: DC = DocTestController(DocTestDefaults(), [filename])
152
+ sage: DTR = DocTestReporter(DC)
153
+ sage: DTR.were_doctests_with_optional_tag_run('latex') # optional - latex
154
+ True
155
+ """
156
+ if self.controller.options.optional is True or tag in self.controller.options.optional:
157
+ return True
158
+ if tag in available_software.seen():
159
+ return True
160
+ return False
161
+
162
+ def report_head(self, source, fail_msg=None):
163
+ """
164
+ Return the ``sage -t [options] file.py`` line as string.
165
+
166
+ INPUT:
167
+
168
+ - ``source`` -- a source from :mod:`sage.doctest.sources`
169
+
170
+ - ``fail_msg`` -- ``None`` or a string
171
+
172
+ EXAMPLES::
173
+
174
+ sage: from sage.doctest.reporting import DocTestReporter
175
+ sage: from sage.doctest.control import DocTestController, DocTestDefaults
176
+ sage: from sage.doctest.sources import FileDocTestSource
177
+ sage: from sage.doctest.forker import SageDocTestRunner
178
+ sage: filename = sage.doctest.reporting.__file__
179
+ sage: DD = DocTestDefaults()
180
+ sage: FDS = FileDocTestSource(filename, DD)
181
+ sage: DC = DocTestController(DD, [filename])
182
+ sage: DTR = DocTestReporter(DC)
183
+ sage: print(DTR.report_head(FDS))
184
+ sage -t .../sage/doctest/reporting.py
185
+
186
+ The same with various options::
187
+
188
+ sage: DD.long = True
189
+ sage: print(DTR.report_head(FDS))
190
+ sage -t --long .../sage/doctest/reporting.py
191
+ sage: print(DTR.report_head(FDS, "Failed by self-sabotage"))
192
+ sage -t --long .../sage/doctest/reporting.py # Failed by self-sabotage
193
+ """
194
+ cmd = "sage -t"
195
+ if self.controller.options.long:
196
+ cmd += " --long"
197
+
198
+ warnlong = self.controller.options.warn_long
199
+ if warnlong >= 0:
200
+ cmd += " --warn-long"
201
+ if warnlong != 1.0:
202
+ cmd += " %.1f" % (warnlong)
203
+ seed = self.controller.options.random_seed
204
+ cmd += " --random-seed={}".format(seed)
205
+ environment = self.controller.options.environment
206
+ if environment != "sage.repl.ipython_kernel.all_jupyter":
207
+ cmd += f" --environment={environment}"
208
+ cmd += " " + source.printpath
209
+ baseline = self.controller.source_baseline(source)
210
+ if fail_msg:
211
+ cmd += " # " + fail_msg
212
+ if failed := baseline.get('failed', False):
213
+ if not fail_msg:
214
+ cmd += " #"
215
+ if failed is True:
216
+ cmd += " [failed in baseline]"
217
+ else:
218
+ cmd += f" [failed in baseline: {failed}]"
219
+ return cmd
220
+
221
+ def _log_failure(self, source, fail_msg, event, output=None):
222
+ r"""
223
+ Report on the result of a failed doctest run.
224
+
225
+ INPUT:
226
+
227
+ - ``source`` -- a source from :mod:`sage.doctest.sources`
228
+
229
+ - ``fail_msg`` -- string
230
+
231
+ - ``event`` -- string
232
+
233
+ - ``output`` -- (optional) string
234
+
235
+ EXAMPLES::
236
+
237
+ sage: from sage.doctest.reporting import DocTestReporter
238
+ sage: from sage.doctest.control import DocTestController, DocTestDefaults
239
+ sage: from sage.doctest.sources import FileDocTestSource
240
+ sage: from sage.env import SAGE_SRC
241
+ sage: import os
242
+ sage: filename = os.path.join(SAGE_SRC, 'sage', 'doctest', 'reporting.py')
243
+ sage: DD = DocTestDefaults()
244
+ sage: FDS = FileDocTestSource(filename, DD)
245
+ sage: DC = DocTestController(DD,[filename])
246
+ sage: DTR = DocTestReporter(DC)
247
+ sage: DTR._log_failure(FDS, "Timed out", "process (pid=1234) timed out", "Output so far...")
248
+ Timed out
249
+ **********************************************************************
250
+ Tests run before process (pid=1234) timed out:
251
+ Output so far...
252
+ **********************************************************************
253
+ """
254
+ log = self.controller.log
255
+ format = self.controller.options.format
256
+ if format == 'sage':
257
+ stars = "*" * 70
258
+ log(f" {fail_msg}\n{stars}\n")
259
+ if output:
260
+ log(f"Tests run before {event}:")
261
+ log(output)
262
+ log(stars)
263
+ elif format == 'github':
264
+ # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#using-workflow-commands-to-access-toolkit-functions
265
+ command = f'::error title={fail_msg}'
266
+ command += f',file={source.printpath}'
267
+ if output:
268
+ if m := re.search("## line ([0-9]+) ##\n-{40,100}\n(.*)", output, re.MULTILINE | re.DOTALL):
269
+ lineno = m.group(1)
270
+ message = m.group(2)
271
+ command += f',line={lineno}'
272
+ else:
273
+ message = output
274
+ # Urlencoding trick for multi-line annotations
275
+ # https://github.com/actions/starter-workflows/issues/68#issuecomment-581479448
276
+ message = message.replace('\n', '%0A')
277
+ else:
278
+ message = ""
279
+ command += f'::{message}'
280
+ log(command)
281
+ else:
282
+ raise ValueError(f'unknown format option: {format}')
283
+
284
+ def report(self, source, timeout, return_code, results, output, pid=None):
285
+ """
286
+ Report on the result of running doctests on a given source.
287
+
288
+ This doesn't print the :meth:`report_head`, which is assumed
289
+ to be printed already.
290
+
291
+ INPUT:
292
+
293
+ - ``source`` -- a source from :mod:`sage.doctest.sources`
294
+
295
+ - ``timeout`` -- boolean; whether doctests timed out
296
+
297
+ - ``return_code`` -- integer; the return code of the process
298
+ running doctests on that file
299
+
300
+ - ``results`` -- (irrelevant if ``timeout`` or ``return_code``) a tuple
301
+
302
+ - ``ntests`` -- the number of doctests
303
+
304
+ - ``timings`` -- a
305
+ :class:`sage.doctest.sources.DictAsObject` instance
306
+ storing timing data
307
+
308
+ - ``output`` -- string; printed if there was some kind of failure
309
+
310
+ - ``pid`` -- integer (default: ``None``); the pid of the worker process
311
+
312
+ EXAMPLES::
313
+
314
+ sage: from sage.doctest.reporting import DocTestReporter
315
+ sage: from sage.doctest.control import DocTestController, DocTestDefaults
316
+ sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
317
+ sage: from sage.doctest.forker import SageDocTestRunner
318
+ sage: from sage.doctest.parsing import SageOutputChecker
319
+ sage: from sage.doctest.util import Timer
320
+ sage: import doctest
321
+ sage: filename = sage.doctest.reporting.__file__
322
+ sage: DD = DocTestDefaults()
323
+ sage: FDS = FileDocTestSource(filename, DD)
324
+ sage: DC = DocTestController(DD, [filename])
325
+ sage: DTR = DocTestReporter(DC)
326
+
327
+ You can report a timeout::
328
+
329
+ sage: DTR.report(FDS, True, 0, None, "Output so far...", pid=1234)
330
+ Timed out
331
+ **********************************************************************
332
+ Tests run before process (pid=1234) timed out:
333
+ Output so far...
334
+ **********************************************************************
335
+ sage: DTR.stats
336
+ {'sage.doctest.reporting': {'failed': True,
337
+ 'ntests': 0,
338
+ 'walltime': 1000000.0}}
339
+
340
+ Or a process that returned a bad exit code::
341
+
342
+ sage: DTR.report(FDS, False, 3, None, "Output before trouble")
343
+ Bad exit: 3
344
+ **********************************************************************
345
+ Tests run before process failed:
346
+ Output before trouble
347
+ **********************************************************************
348
+ sage: DTR.stats
349
+ {'sage.doctest.reporting': {'failed': True,
350
+ 'ntests': 0,
351
+ 'walltime': 1000000.0}}
352
+
353
+ Or a process that segfaulted::
354
+
355
+ sage: from signal import SIGSEGV
356
+ sage: DTR.report(FDS, False, -SIGSEGV, None, "Output before trouble")
357
+ Killed due to segmentation fault
358
+ **********************************************************************
359
+ Tests run before process failed:
360
+ Output before trouble
361
+ **********************************************************************
362
+ sage: DTR.stats
363
+ {'sage.doctest.reporting': {'failed': True,
364
+ 'ntests': 0,
365
+ 'walltime': 1000000.0}}
366
+
367
+ Report a timeout with results and a ``SIGKILL``::
368
+
369
+ sage: from signal import SIGKILL
370
+ sage: DTR.report(FDS, True, -SIGKILL, (1,None), "Output before trouble")
371
+ Timed out after testing finished (and interrupt failed)
372
+ **********************************************************************
373
+ Tests run before process timed out:
374
+ Output before trouble
375
+ **********************************************************************
376
+ sage: DTR.stats
377
+ {'sage.doctest.reporting': {'failed': True,
378
+ 'ntests': 1,
379
+ 'walltime': 1000000.0}}
380
+
381
+ This is an internal error since results is None::
382
+
383
+ sage: DTR.report(FDS, False, 0, None, "All output")
384
+ Error in doctesting framework (bad result returned)
385
+ **********************************************************************
386
+ Tests run before error:
387
+ All output
388
+ **********************************************************************
389
+ sage: DTR.stats
390
+ {'sage.doctest.reporting': {'failed': True,
391
+ 'ntests': 1,
392
+ 'walltime': 1000000.0}}
393
+
394
+ Or tell the user that everything succeeded::
395
+
396
+ sage: doctests, extras = FDS.create_doctests(globals())
397
+ sage: runner = SageDocTestRunner(
398
+ ....: SageOutputChecker(), verbose=False, sage_options=DD,
399
+ ....: optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
400
+ sage: Timer().start().stop().annotate(runner)
401
+ sage: D = DictAsObject({'err':None})
402
+ sage: runner.update_results(D)
403
+ 0
404
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D),
405
+ ....: "Good tests")
406
+ [... tests, ...s wall]
407
+ sage: DTR.stats
408
+ {'sage.doctest.reporting': {'ntests': ..., 'walltime': ...}}
409
+
410
+ Or inform the user that some doctests failed::
411
+
412
+ sage: runner.failures = 1
413
+ sage: runner.update_results(D)
414
+ 1
415
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D),
416
+ ....: "Doctest output including the failure...")
417
+ [... tests, 1 failure, ...s wall]
418
+
419
+ If the user has requested that we report on skipped doctests,
420
+ we do so::
421
+
422
+ sage: DC.options = DocTestDefaults(show_skipped=True)
423
+ sage: from collections import defaultdict
424
+ sage: optionals = defaultdict(int)
425
+ sage: optionals['magma'] = 5; optionals['long time'] = 4; optionals[''] = 1; optionals['not tested'] = 2
426
+ sage: D = DictAsObject(dict(err=None, optionals=optionals))
427
+ sage: runner.failures = 0
428
+ sage: runner.update_results(D)
429
+ 0
430
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D), "Good tests")
431
+ 1 unlabeled test not run
432
+ 4 long tests not run
433
+ 5 magma tests not run
434
+ 2 not tested tests not run
435
+ 0 tests not run because we ran out of time
436
+ [... tests, ...s wall]
437
+
438
+ Test an internal error in the reporter::
439
+
440
+ sage: DTR.report(None, None, None, None, None)
441
+ Traceback (most recent call last):
442
+ ...
443
+ AttributeError: 'NoneType' object has no attribute 'basename'...
444
+
445
+ The only-errors mode does not output anything on success::
446
+
447
+ sage: DD = DocTestDefaults(only_errors=True)
448
+ sage: FDS = FileDocTestSource(filename, DD)
449
+ sage: DC = DocTestController(DD, [filename])
450
+ sage: DTR = DocTestReporter(DC)
451
+ sage: doctests, extras = FDS.create_doctests(globals())
452
+ sage: runner = SageDocTestRunner(
453
+ ....: SageOutputChecker(), verbose=False, sage_options=DD,
454
+ ....: optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
455
+ sage: Timer().start().stop().annotate(runner)
456
+ sage: D = DictAsObject({'err':None})
457
+ sage: runner.update_results(D)
458
+ 0
459
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D),
460
+ ....: "Good tests")
461
+
462
+ However, failures are still output in the errors-only mode::
463
+
464
+ sage: runner.failures = 1
465
+ sage: runner.update_results(D)
466
+ 1
467
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D),
468
+ ....: "Failed test")
469
+ [... tests, 1 failure, ...s wall]
470
+ """
471
+ log = self.controller.log
472
+ process_name = 'process (pid={0})'.format(pid) if pid else 'process'
473
+ try:
474
+ postscript = self.postscript
475
+ stats = self.stats
476
+ basename = source.basename
477
+ baseline = self.controller.source_baseline(source)
478
+ cmd = self.report_head(source)
479
+ try:
480
+ ntests, result_dict = results
481
+ except (TypeError, ValueError):
482
+ ntests = 0
483
+ result_dict = DictAsObject({"err": 'badresult'})
484
+ if timeout:
485
+ fail_msg = "Timed out"
486
+ if ntests > 0:
487
+ fail_msg += " after testing finished"
488
+ if return_code > 0:
489
+ fail_msg += " (with error after interrupt)"
490
+ elif return_code < 0:
491
+ sig = -return_code
492
+ if sig == SIGQUIT:
493
+ pass # and interrupt succeeded
494
+ elif sig == SIGKILL:
495
+ fail_msg += " (and interrupt failed)"
496
+ else:
497
+ fail_msg += " (with %s after interrupt)" % signal_name(sig)
498
+ self._log_failure(source, fail_msg, f"{process_name} timed out", output)
499
+ postscript['lines'].append(self.report_head(source, fail_msg))
500
+ stats[basename] = {"failed": True, "walltime": 1e6, "ntests": ntests}
501
+ if not baseline.get('failed', False):
502
+ self.error_status |= 4
503
+ elif return_code:
504
+ if return_code > 0:
505
+ fail_msg = "Bad exit: %s" % return_code
506
+ else:
507
+ fail_msg = "Killed due to %s" % signal_name(-return_code)
508
+ if ntests > 0:
509
+ fail_msg += " after testing finished"
510
+ self._log_failure(source, fail_msg, f"{process_name} failed", output)
511
+ postscript['lines'].append(self.report_head(source, fail_msg))
512
+ stats[basename] = {"failed": True, "walltime": 1e6, "ntests": ntests}
513
+ if not baseline.get('failed', False):
514
+ self.error_status |= (8 if return_code > 0 else 16)
515
+ else:
516
+ if hasattr(result_dict, 'walltime') and hasattr(result_dict.walltime, '__len__') and len(result_dict.walltime) > 0:
517
+ wall = sum(result_dict.walltime) / len(result_dict.walltime)
518
+ else:
519
+ wall = 1e6
520
+ if hasattr(result_dict, 'cputime') and hasattr(result_dict.cputime, '__len__') and len(result_dict.cputime) > 0:
521
+ cpu = sum(result_dict.cputime) / len(result_dict.cputime)
522
+ else:
523
+ cpu = 1e6
524
+ if result_dict.err == 'badresult':
525
+ self._log_failure(source, "Error in doctesting framework (bad result returned)", "error", output)
526
+ postscript['lines'].append(self.report_head(source, "Testing error: bad result"))
527
+ self.error_status |= 64
528
+ elif result_dict.err == 'noresult':
529
+ self._log_failure(source, "Error in doctesting framework (no result returned)", "error", output)
530
+ postscript['lines'].append(self.report_head(source, "Testing error: no result"))
531
+ self.error_status |= 64
532
+ elif result_dict.err == 'tab':
533
+ if len(result_dict.tab_linenos) > 5:
534
+ result_dict.tab_linenos[3:-1] = "..."
535
+ tabs = " " + ",".join(result_dict.tab_linenos)
536
+ if len(result_dict.tab_linenos) > 1:
537
+ tabs = "s" + tabs
538
+ log(" Error: TAB character found at line%s" % (tabs))
539
+ postscript['lines'].append(self.report_head(source, "Tab character found"))
540
+ self.error_status |= 32
541
+ elif result_dict.err == 'line_number':
542
+ log(" Error: Source line number found")
543
+ postscript['lines'].append(self.report_head(source, "Source line number found"))
544
+ self.error_status |= 256
545
+ elif result_dict.err is not None:
546
+ # This case should not occur
547
+ if result_dict.err is True:
548
+ fail_msg = "Error in doctesting framework"
549
+ else:
550
+ if hasattr(result_dict.err, '__name__'):
551
+ err = result_dict.err.__name__
552
+ else:
553
+ err = repr(result_dict.err)
554
+ fail_msg = "%s in doctesting framework" % err
555
+ self._log_failure(source, fail_msg, "exception", output)
556
+ postscript['lines'].append(self.report_head(source, fail_msg))
557
+ if hasattr(result_dict, 'tb'):
558
+ log(result_dict.tb)
559
+ if hasattr(result_dict, 'walltime'):
560
+ stats[basename] = {"failed": True, "walltime": wall, "ntests": ntests}
561
+ else:
562
+ stats[basename] = {"failed": True, "walltime": 1e6, "ntests": ntests}
563
+ # This codepath is triggered by doctests that test some timeout
564
+ # ("AlarmInterrupt in doctesting framework") or other signal handling
565
+ # behavior. This is why we handle the baseline in this codepath,
566
+ # in contrast to other "Error in doctesting framework" codepaths.
567
+ if not baseline.get('failed', False):
568
+ self.error_status |= 64
569
+ if result_dict.err is None or result_dict.err == 'tab':
570
+ f = result_dict.failures
571
+ if f:
572
+ fail_msg = "%s failed" % (count_noun(f, "doctest"))
573
+ postscript['lines'].append(self.report_head(source, fail_msg))
574
+ if not baseline.get('failed', False):
575
+ self.error_status |= 1
576
+ if f or result_dict.err == 'tab':
577
+ stats[basename] = {"failed": True, "walltime": wall, "ntests": ntests}
578
+ else:
579
+ stats[basename] = {"walltime": wall, "ntests": ntests}
580
+ postscript['cputime'] += cpu
581
+ postscript['walltime'] += wall
582
+
583
+ try:
584
+ optionals = result_dict.optionals
585
+ except AttributeError:
586
+ optionals = {}
587
+ for tag in sorted(optionals):
588
+ nskipped = optionals[tag]
589
+ if tag == "long time":
590
+ if not self.controller.options.long:
591
+ if self.controller.options.show_skipped:
592
+ log(" %s not run" % (count_noun(nskipped, "long test")))
593
+ elif tag == "not tested":
594
+ if self.controller.options.show_skipped:
595
+ log(" %s not run" % (count_noun(nskipped, "not tested test")))
596
+ elif tag == "not implemented":
597
+ if self.controller.options.show_skipped:
598
+ log(" %s for not implemented functionality not run" % (count_noun(nskipped, "test")))
599
+ else:
600
+ if not self.were_doctests_with_optional_tag_run(tag):
601
+ if tag == "bug":
602
+ if self.controller.options.show_skipped:
603
+ log(" %s not run due to known bugs" % (count_noun(nskipped, "test")))
604
+ elif tag == "":
605
+ if self.controller.options.show_skipped:
606
+ log(" %s not run" % (count_noun(nskipped, "unlabeled test")))
607
+ else:
608
+ if self.controller.options.show_skipped:
609
+ log(" %s not run" % (count_noun(nskipped, tag + " test")))
610
+
611
+ nskipped = result_dict.walltime_skips
612
+ if self.controller.options.show_skipped:
613
+ log(" %s not run because we ran out of time" % (count_noun(nskipped, "test")))
614
+
615
+ if nskipped != 0:
616
+ # It would be nice to report "a/b tests run" instead of
617
+ # the percentage that is printed here. However, it is
618
+ # not clear how to pull out the actual part of "ntests"
619
+ # that has been run for a variety of reasons, such as
620
+ # the sig_on_count() tests, the possibility to run
621
+ # tests multiple times, and some other unclear mangling
622
+ # of these numbers that was not clear to the author.
623
+ ntests_run = result_dict.tests
624
+ total = "%d%% of tests run" % (round(100*ntests_run/float(ntests_run + nskipped)))
625
+ else:
626
+ total = count_noun(ntests, "test")
627
+ if not (self.controller.options.only_errors and not f):
628
+ log(" [%s, %s%.2fs wall]" % (total, "%s, " % (count_noun(f, "failure")) if f else "", wall))
629
+
630
+ self.sources_completed += 1
631
+
632
+ except Exception:
633
+ import traceback
634
+ log(traceback.format_exc(), end="")
635
+
636
+ def finalize(self):
637
+ """
638
+ Print out the postscript that summarizes the doctests that were run.
639
+
640
+ EXAMPLES:
641
+
642
+ First we have to set up a bunch of stuff::
643
+
644
+ sage: from sage.doctest.reporting import DocTestReporter
645
+ sage: from sage.doctest.control import DocTestController, DocTestDefaults
646
+ sage: from sage.doctest.sources import FileDocTestSource, DictAsObject
647
+ sage: from sage.doctest.forker import SageDocTestRunner
648
+ sage: from sage.doctest.parsing import SageOutputChecker
649
+ sage: from sage.doctest.util import Timer
650
+ sage: import doctest
651
+ sage: filename = sage.doctest.reporting.__file__
652
+ sage: DD = DocTestDefaults()
653
+ sage: FDS = FileDocTestSource(filename, DD)
654
+ sage: DC = DocTestController(DD, [filename])
655
+ sage: DTR = DocTestReporter(DC)
656
+
657
+ Now we pretend to run some doctests::
658
+
659
+ sage: DTR.report(FDS, True, 0, None, "Output so far...", pid=1234)
660
+ Timed out
661
+ **********************************************************************
662
+ Tests run before process (pid=1234) timed out:
663
+ Output so far...
664
+ **********************************************************************
665
+ sage: DTR.report(FDS, False, 3, None, "Output before bad exit")
666
+ Bad exit: 3
667
+ **********************************************************************
668
+ Tests run before process failed:
669
+ Output before bad exit
670
+ **********************************************************************
671
+ sage: doctests, extras = FDS.create_doctests(globals())
672
+ sage: runner = SageDocTestRunner(
673
+ ....: SageOutputChecker(), verbose=False, sage_options=DD,
674
+ ....: optionflags=doctest.NORMALIZE_WHITESPACE|doctest.ELLIPSIS)
675
+ sage: t = Timer().start().stop()
676
+ sage: t.annotate(runner)
677
+ sage: DC.timer = t
678
+ sage: D = DictAsObject({'err':None})
679
+ sage: runner.update_results(D)
680
+ 0
681
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D),
682
+ ....: "Good tests")
683
+ [... tests, ...s wall]
684
+ sage: runner.failures = 1
685
+ sage: runner.update_results(D)
686
+ 1
687
+ sage: DTR.report(FDS, False, 0, (sum([len(t.examples) for t in doctests]), D),
688
+ ....: "Doctest output including the failure...")
689
+ [... tests, 1 failure, ...s wall]
690
+
691
+ Now we can show the output of finalize::
692
+
693
+ sage: DC.sources = [None] * 4 # to fool the finalize method
694
+ sage: DTR.finalize()
695
+ ----------------------------------------------------------------------
696
+ sage -t .../sage/doctest/reporting.py # Timed out
697
+ sage -t .../sage/doctest/reporting.py # Bad exit: 3
698
+ sage -t .../sage/doctest/reporting.py # 1 doctest failed
699
+ ----------------------------------------------------------------------
700
+ Total time for all tests: 0.0 seconds
701
+ cpu time: 0.0 seconds
702
+ cumulative wall time: 0.0 seconds
703
+
704
+ If we interrupted doctests, then the number of files tested
705
+ will not match the number of sources on the controller::
706
+
707
+ sage: DC.sources = [None] * 6
708
+ sage: DTR.finalize()
709
+ <BLANKLINE>
710
+ ----------------------------------------------------------------------
711
+ sage -t .../sage/doctest/reporting.py # Timed out
712
+ sage -t .../sage/doctest/reporting.py # Bad exit: 3
713
+ sage -t .../sage/doctest/reporting.py # 1 doctest failed
714
+ Doctests interrupted: 4/6 files tested
715
+ ----------------------------------------------------------------------
716
+ Total time for all tests: 0.0 seconds
717
+ cpu time: 0.0 seconds
718
+ cumulative wall time: 0.0 seconds
719
+ """
720
+ log = self.controller.log
721
+ postscript = self.postscript
722
+ if self.sources_completed < len(self.controller.sources) * self.controller.options.global_iterations:
723
+ postscript['lines'].append("Doctests interrupted: %s/%s files tested" % (self.sources_completed, len(self.controller.sources)))
724
+ self.error_status |= 128
725
+ elif not postscript['lines']:
726
+ postscript['lines'].append("All tests passed!")
727
+ log('-' * 70)
728
+ log("\n".join(postscript['lines']))
729
+ log('-' * 70)
730
+ log("Total time for all tests: %.1f seconds" % self.controller.timer.walltime)
731
+ log(" cpu time: %.1f seconds" % postscript['cputime'])
732
+ log(" cumulative wall time: %.1f seconds" % postscript['walltime'])
733
+ stdout.flush()