rbx.cp 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. rbx/__init__.py +0 -0
  2. rbx/annotations.py +127 -0
  3. rbx/autoenum.py +333 -0
  4. rbx/box/__init__.py +0 -0
  5. rbx/box/builder.py +77 -0
  6. rbx/box/cd.py +37 -0
  7. rbx/box/checkers.py +134 -0
  8. rbx/box/code.py +185 -0
  9. rbx/box/compile.py +56 -0
  10. rbx/box/conftest.py +42 -0
  11. rbx/box/contest/__init__.py +0 -0
  12. rbx/box/contest/build_contest_statements.py +347 -0
  13. rbx/box/contest/contest_package.py +76 -0
  14. rbx/box/contest/contest_utils.py +20 -0
  15. rbx/box/contest/main.py +179 -0
  16. rbx/box/contest/schema.py +155 -0
  17. rbx/box/contest/statements.py +82 -0
  18. rbx/box/creation.py +72 -0
  19. rbx/box/download.py +64 -0
  20. rbx/box/environment.py +345 -0
  21. rbx/box/extensions.py +26 -0
  22. rbx/box/generators.py +478 -0
  23. rbx/box/generators_test.py +63 -0
  24. rbx/box/main.py +449 -0
  25. rbx/box/package.py +316 -0
  26. rbx/box/packaging/boca/extension.py +27 -0
  27. rbx/box/packaging/boca/packager.py +245 -0
  28. rbx/box/packaging/contest_main.py +82 -0
  29. rbx/box/packaging/main.py +68 -0
  30. rbx/box/packaging/packager.py +117 -0
  31. rbx/box/packaging/polygon/packager.py +320 -0
  32. rbx/box/packaging/polygon/test.py +81 -0
  33. rbx/box/packaging/polygon/xml_schema.py +106 -0
  34. rbx/box/presets/__init__.py +503 -0
  35. rbx/box/presets/fetch.py +70 -0
  36. rbx/box/presets/lock_schema.py +20 -0
  37. rbx/box/presets/schema.py +59 -0
  38. rbx/box/schema.py +394 -0
  39. rbx/box/solutions.py +792 -0
  40. rbx/box/solutions_test.py +41 -0
  41. rbx/box/statements/__init__.py +0 -0
  42. rbx/box/statements/build_statements.py +359 -0
  43. rbx/box/statements/builders.py +375 -0
  44. rbx/box/statements/joiners.py +113 -0
  45. rbx/box/statements/latex.py +47 -0
  46. rbx/box/statements/latex_jinja.py +214 -0
  47. rbx/box/statements/schema.py +138 -0
  48. rbx/box/stresses.py +292 -0
  49. rbx/box/stressing/__init__.py +0 -0
  50. rbx/box/stressing/finder_parser.py +359 -0
  51. rbx/box/stressing/generator_parser.py +258 -0
  52. rbx/box/testcases.py +54 -0
  53. rbx/box/ui/__init__.py +0 -0
  54. rbx/box/ui/captured_log.py +372 -0
  55. rbx/box/ui/css/app.tcss +48 -0
  56. rbx/box/ui/main.py +38 -0
  57. rbx/box/ui/run.py +209 -0
  58. rbx/box/validators.py +245 -0
  59. rbx/box/validators_test.py +15 -0
  60. rbx/checker.py +128 -0
  61. rbx/clone.py +197 -0
  62. rbx/config.py +271 -0
  63. rbx/conftest.py +38 -0
  64. rbx/console.py +27 -0
  65. rbx/create.py +37 -0
  66. rbx/edit.py +24 -0
  67. rbx/grading/__init__.py +0 -0
  68. rbx/grading/caching.py +356 -0
  69. rbx/grading/conftest.py +33 -0
  70. rbx/grading/judge/__init__.py +0 -0
  71. rbx/grading/judge/cacher.py +503 -0
  72. rbx/grading/judge/digester.py +35 -0
  73. rbx/grading/judge/sandbox.py +748 -0
  74. rbx/grading/judge/sandboxes/__init__.py +0 -0
  75. rbx/grading/judge/sandboxes/isolate.py +683 -0
  76. rbx/grading/judge/sandboxes/stupid_sandbox.py +310 -0
  77. rbx/grading/judge/sandboxes/timeit.py +217 -0
  78. rbx/grading/judge/storage.py +284 -0
  79. rbx/grading/judge/test.py +38 -0
  80. rbx/grading/judge/testiso.py +54 -0
  81. rbx/grading/steps.py +522 -0
  82. rbx/grading/steps_with_caching.py +59 -0
  83. rbx/grading/steps_with_caching_run_test.py +429 -0
  84. rbx/grading_utils.py +148 -0
  85. rbx/hydration.py +101 -0
  86. rbx/main.py +122 -0
  87. rbx/metadata.py +105 -0
  88. rbx/providers/__init__.py +43 -0
  89. rbx/providers/codeforces.py +73 -0
  90. rbx/providers/provider.py +26 -0
  91. rbx/resources/checkers/boilerplate.cpp +20 -0
  92. rbx/resources/default_config.json +48 -0
  93. rbx/resources/envs/default.rbx.yml +37 -0
  94. rbx/resources/envs/isolate.rbx.yml +37 -0
  95. rbx/resources/packagers/boca/checker.sh +43 -0
  96. rbx/resources/packagers/boca/compare +53 -0
  97. rbx/resources/packagers/boca/compile/c +172 -0
  98. rbx/resources/packagers/boca/compile/cc +173 -0
  99. rbx/resources/packagers/boca/compile/cpp +172 -0
  100. rbx/resources/packagers/boca/compile/java +194 -0
  101. rbx/resources/packagers/boca/compile/kt +155 -0
  102. rbx/resources/packagers/boca/compile/pas +172 -0
  103. rbx/resources/packagers/boca/compile/py2 +173 -0
  104. rbx/resources/packagers/boca/compile/py3 +173 -0
  105. rbx/resources/packagers/boca/run/c +128 -0
  106. rbx/resources/packagers/boca/run/cc +128 -0
  107. rbx/resources/packagers/boca/run/cpp +128 -0
  108. rbx/resources/packagers/boca/run/java +194 -0
  109. rbx/resources/packagers/boca/run/kt +159 -0
  110. rbx/resources/packagers/boca/run/py2 +166 -0
  111. rbx/resources/packagers/boca/run/py3 +166 -0
  112. rbx/resources/presets/default/contest/contest.rbx.yml +14 -0
  113. rbx/resources/presets/default/contest/statement/contest.rbx.tex +97 -0
  114. rbx/resources/presets/default/contest/statement/olymp.sty +250 -0
  115. rbx/resources/presets/default/contest/statement/template.rbx.tex +42 -0
  116. rbx/resources/presets/default/preset.rbx.yml +12 -0
  117. rbx/resources/presets/default/problem/.gitignore +6 -0
  118. rbx/resources/presets/default/problem/gen.cpp +9 -0
  119. rbx/resources/presets/default/problem/problem.rbx.yml +44 -0
  120. rbx/resources/presets/default/problem/random.py +3 -0
  121. rbx/resources/presets/default/problem/random.txt +2 -0
  122. rbx/resources/presets/default/problem/sols/main.cpp +9 -0
  123. rbx/resources/presets/default/problem/sols/slow.cpp +15 -0
  124. rbx/resources/presets/default/problem/sols/wa.cpp +9 -0
  125. rbx/resources/presets/default/problem/statement/olymp.sty +250 -0
  126. rbx/resources/presets/default/problem/statement/projecao.png +0 -0
  127. rbx/resources/presets/default/problem/statement/statement.rbx.tex +18 -0
  128. rbx/resources/presets/default/problem/statement/template.rbx.tex +89 -0
  129. rbx/resources/presets/default/problem/tests/samples/000.in +1 -0
  130. rbx/resources/presets/default/problem/tests/samples/001.in +1 -0
  131. rbx/resources/presets/default/problem/validator.cpp +16 -0
  132. rbx/resources/presets/default/problem/wcmp.cpp +34 -0
  133. rbx/resources/templates/template.cpp +19 -0
  134. rbx/run.py +45 -0
  135. rbx/schema.py +64 -0
  136. rbx/submit.py +61 -0
  137. rbx/submitors/__init__.py +18 -0
  138. rbx/submitors/codeforces.py +120 -0
  139. rbx/submitors/submitor.py +25 -0
  140. rbx/test.py +347 -0
  141. rbx/testcase.py +70 -0
  142. rbx/testcase_rendering.py +79 -0
  143. rbx/testdata/box1/gen1.cpp +7 -0
  144. rbx/testdata/box1/gen2.cpp +9 -0
  145. rbx/testdata/box1/genScript.py +2 -0
  146. rbx/testdata/box1/hard-tle.sol.cpp +26 -0
  147. rbx/testdata/box1/ole.cpp +17 -0
  148. rbx/testdata/box1/problem.rbx.yml +39 -0
  149. rbx/testdata/box1/re.sol.cpp +23 -0
  150. rbx/testdata/box1/sol.cpp +22 -0
  151. rbx/testdata/box1/tests/1.in +1 -0
  152. rbx/testdata/box1/tle-and-incorrect.sol.cpp +33 -0
  153. rbx/testdata/box1/tle.sol.cpp +35 -0
  154. rbx/testdata/box1/validator.cpp +11 -0
  155. rbx/testdata/box1/wa.sol.cpp +22 -0
  156. rbx/testdata/caching/executable.py +1 -0
  157. rbx/testdata/compatible +0 -0
  158. rbx/testing_utils.py +65 -0
  159. rbx/utils.py +162 -0
  160. rbx_cp-0.5.0.dist-info/LICENSE +201 -0
  161. rbx_cp-0.5.0.dist-info/METADATA +89 -0
  162. rbx_cp-0.5.0.dist-info/RECORD +164 -0
  163. rbx_cp-0.5.0.dist-info/WHEEL +4 -0
  164. rbx_cp-0.5.0.dist-info/entry_points.txt +4 -0
rbx/box/solutions.py ADDED
@@ -0,0 +1,792 @@
1
+ from __future__ import generators
2
+
3
+ import collections
4
+ import dataclasses
5
+ import pathlib
6
+ import shutil
7
+ from collections.abc import Iterator
8
+ from typing import Dict, List, Optional, Set
9
+
10
+ import rich
11
+ import rich.live
12
+ import rich.table
13
+ from more_itertools import seekable
14
+ from pydantic import BaseModel
15
+
16
+ from rbx import console
17
+ from rbx.box import checkers, environment, package
18
+ from rbx.box.code import compile_item, run_item
19
+ from rbx.box.environment import EnvironmentSandbox, ExecutionConfig, VerificationLevel
20
+ from rbx.box.generators import generate_output_for_testcase, generate_standalone
21
+ from rbx.box.schema import (
22
+ ExpectedOutcome,
23
+ GeneratorCall,
24
+ Solution,
25
+ Testcase,
26
+ TestcaseGroup,
27
+ )
28
+ from rbx.box.testcases import find_built_testcases
29
+ from rbx.grading.steps import (
30
+ DigestOrDest,
31
+ DigestOrSource,
32
+ Evaluation,
33
+ Outcome,
34
+ TestcaseIO,
35
+ TestcaseLog,
36
+ )
37
+ from rbx.utils import StatusProgress, model_to_yaml
38
+
39
+ StructuredEvaluation = Dict[str, Dict[str, List[Optional[Evaluation]]]]
40
+
41
+
42
+ class EvaluationItem(BaseModel):
43
+ solution_index: int
44
+ group_name: str
45
+ testcase_index: int
46
+ eval: Evaluation
47
+
48
+
49
+ class GroupSkeleton(BaseModel):
50
+ name: str
51
+ testcases: List[Testcase]
52
+
53
+
54
+ class SolutionReportSkeleton(BaseModel):
55
+ solutions: List[Solution]
56
+ groups: List[GroupSkeleton]
57
+ group_first: bool
58
+
59
+ def find_group_skeleton(self, group_name: str) -> Optional[GroupSkeleton]:
60
+ groups = [group for group in self.groups if group.name == group_name]
61
+ if not groups:
62
+ return None
63
+ return groups[0]
64
+
65
+ def empty_structured_evaluation(self) -> StructuredEvaluation:
66
+ res: StructuredEvaluation = {}
67
+ for solution in self.solutions:
68
+ res[str(solution.path)] = {}
69
+ for group in self.groups:
70
+ res[str(solution.path)][group.name] = [None for _ in group.testcases]
71
+ return res
72
+
73
+
74
+ @dataclasses.dataclass
75
+ class RunSolutionResult:
76
+ skeleton: SolutionReportSkeleton
77
+ items: Iterator[EvaluationItem]
78
+
79
+ def empty_structured_evaluation(self) -> StructuredEvaluation:
80
+ return self.skeleton.empty_structured_evaluation()
81
+
82
+
83
+ def is_fast(solution: Solution) -> bool:
84
+ # If solution has TLE tag, it is considered slow.
85
+ return not solution.outcome.match(Outcome.TIME_LIMIT_EXCEEDED)
86
+
87
+
88
+ def get_matching_solutions(expected_outcome: ExpectedOutcome) -> List[Solution]:
89
+ res = []
90
+ for solution in package.get_solutions():
91
+ if not solution.outcome.intersect(expected_outcome):
92
+ continue
93
+ res.append(solution)
94
+ return res
95
+
96
+
97
+ def compile_solutions(
98
+ progress: Optional[StatusProgress] = None,
99
+ tracked_solutions: Optional[Set[str]] = None,
100
+ ) -> Dict[pathlib.Path, str]:
101
+ pkg = package.find_problem_package_or_die()
102
+
103
+ compiled_solutions = {}
104
+
105
+ for solution in pkg.solutions:
106
+ if (
107
+ tracked_solutions is not None
108
+ and str(solution.path) not in tracked_solutions
109
+ ):
110
+ continue
111
+ if progress:
112
+ progress.update(f'Compiling solution [item]{solution.path}[/item]...')
113
+ try:
114
+ compiled_solutions[solution.path] = compile_item(solution)
115
+ except:
116
+ console.console.print(
117
+ f'[error]Failed compiling solution [item]{solution.path}[/item].[/error]'
118
+ )
119
+ raise
120
+
121
+ return compiled_solutions
122
+
123
+
124
+ def _run_solution_on_testcase(
125
+ solution: Solution,
126
+ compiled_digest: str,
127
+ checker_digest: Optional[str],
128
+ testcase: Testcase,
129
+ output_dir: pathlib.Path,
130
+ testcase_index: int = 0,
131
+ verification: VerificationLevel = VerificationLevel.NONE,
132
+ ) -> Evaluation:
133
+ pkg = package.find_problem_package_or_die()
134
+ actual_sandbox = package.get_singleton_sandbox()
135
+
136
+ timelimit = pkg.timelimit_for_language(solution.language)
137
+
138
+ sandbox = EnvironmentSandbox()
139
+ sandbox.timeLimit = timelimit
140
+ if verification.value >= VerificationLevel.FULL.value:
141
+ # Use double TL.
142
+ sandbox.timeLimit = sandbox.timeLimit * 2
143
+ sandbox.wallTimeLimit = (
144
+ timelimit * 2 if actual_sandbox.use_soft_timeout() else sandbox.timeLimit
145
+ )
146
+ sandbox.memoryLimit = pkg.memorylimit_for_language(solution.language)
147
+ sandbox.fileSizeLimit = pkg.outputLimit
148
+ extra_config = ExecutionConfig(sandbox=sandbox)
149
+
150
+ output_path = output_dir / testcase.inputPath.with_suffix('.out').name
151
+ error_path = output_path.with_suffix('.err')
152
+ log_path = output_path.with_suffix('.log')
153
+ output_path.parent.mkdir(parents=True, exist_ok=True)
154
+
155
+ run_log = run_item(
156
+ solution,
157
+ DigestOrSource.create(compiled_digest),
158
+ stdin=DigestOrSource.create(testcase.inputPath),
159
+ stdout=DigestOrDest.create(output_path),
160
+ stderr=DigestOrDest.create(error_path),
161
+ extra_config=extra_config,
162
+ )
163
+
164
+ if checker_digest is not None:
165
+ checker_result = checkers.check(
166
+ checker_digest,
167
+ run_log,
168
+ testcase,
169
+ program_output=output_path,
170
+ )
171
+ else:
172
+ checker_result = checkers.check_with_no_output(run_log)
173
+
174
+ eval = Evaluation(
175
+ result=checker_result,
176
+ testcase=TestcaseIO(
177
+ index=testcase_index, input=testcase.inputPath, output=testcase.outputPath
178
+ ),
179
+ log=TestcaseLog(
180
+ **(run_log.model_dump() if run_log is not None else {}),
181
+ stdout_absolute_path=output_path.absolute(),
182
+ stderr_absolute_path=error_path.absolute(),
183
+ log_absolute_path=log_path.absolute(),
184
+ ),
185
+ )
186
+
187
+ log_path.write_text(model_to_yaml(eval))
188
+ return eval
189
+
190
+
191
+ def _run_solution(
192
+ solution: Solution,
193
+ compiled_digest: str,
194
+ checker_digest: Optional[str],
195
+ solution_index: int,
196
+ group_name: str,
197
+ progress: Optional[StatusProgress] = None,
198
+ verification: VerificationLevel = VerificationLevel.NONE,
199
+ ) -> Iterator[Evaluation]:
200
+ runs_dir = package.get_problem_runs_dir()
201
+
202
+ group = package.get_testgroup(group_name)
203
+ testcases = find_built_testcases(group)
204
+ for i, testcase in enumerate(testcases):
205
+ assert testcase.outputPath is not None
206
+ output_path = runs_dir / f'{solution_index}' / group.name
207
+
208
+ if progress:
209
+ progress.update(
210
+ f'Running solution [item]{solution.path}[/item] on test [item]{group.name}[/item] / [item]{i}[/item]...'
211
+ )
212
+
213
+ yield _run_solution_on_testcase(
214
+ solution,
215
+ compiled_digest,
216
+ checker_digest,
217
+ testcase,
218
+ output_path,
219
+ testcase_index=i,
220
+ verification=verification,
221
+ )
222
+
223
+
224
+ def convert_list_of_solution_evaluations_to_dict(
225
+ items: Iterator[EvaluationItem],
226
+ ) -> List[Dict[str, List[Evaluation]]]:
227
+ pkg = package.find_problem_package_or_die()
228
+ res: List[Dict[str, List[Evaluation]]] = [
229
+ collections.defaultdict(list) for _ in pkg.solutions
230
+ ]
231
+
232
+ for item in items:
233
+ res[item.solution_index][item.group_name].append(item.eval)
234
+
235
+ return res
236
+
237
+
238
+ def _get_report_skeleton(
239
+ tracked_solutions: Optional[Set[str]] = None,
240
+ group_first: bool = False,
241
+ verification: VerificationLevel = VerificationLevel.NONE,
242
+ ) -> SolutionReportSkeleton:
243
+ pkg = package.find_problem_package_or_die()
244
+ solutions = [
245
+ sol
246
+ for sol in pkg.solutions
247
+ if verification.value >= VerificationLevel.ALL_SOLUTIONS.value or is_fast(sol)
248
+ ]
249
+ if tracked_solutions is not None:
250
+ solutions = [
251
+ solution
252
+ for solution in solutions
253
+ if str(solution.path) in tracked_solutions
254
+ ]
255
+
256
+ groups = []
257
+ for group in pkg.testcases:
258
+ testcases = find_built_testcases(group)
259
+ groups.append(GroupSkeleton(name=group.name, testcases=testcases))
260
+ return SolutionReportSkeleton(
261
+ solutions=solutions, groups=groups, group_first=group_first
262
+ )
263
+
264
+
265
+ def _produce_solution_items(
266
+ progress: Optional[StatusProgress] = None,
267
+ tracked_solutions: Optional[Set[str]] = None,
268
+ verification: VerificationLevel = VerificationLevel.NONE,
269
+ check: bool = True,
270
+ group_first: bool = False,
271
+ ) -> Iterator[EvaluationItem]:
272
+ pkg = package.find_problem_package_or_die()
273
+
274
+ checker_digest = checkers.compile_checker() if check else None
275
+ compiled_solutions = compile_solutions(
276
+ progress=progress, tracked_solutions=tracked_solutions
277
+ )
278
+
279
+ # Clear run directory and rely on cache to
280
+ # repopulate it.
281
+ runs_dir = package.get_problem_runs_dir()
282
+ shutil.rmtree(str(runs_dir), ignore_errors=True)
283
+ runs_dir.mkdir(parents=True, exist_ok=True)
284
+ solutions = list(
285
+ (i, sol)
286
+ for i, sol in enumerate(pkg.solutions)
287
+ if verification.value >= VerificationLevel.ALL_SOLUTIONS.value or is_fast(sol)
288
+ )
289
+ if tracked_solutions is not None:
290
+ solutions = [
291
+ (i, sol) for i, sol in solutions if str(sol.path) in tracked_solutions
292
+ ]
293
+
294
+ def yield_items(
295
+ solution_index: int, solution: Solution, group_name: str
296
+ ) -> Iterator[EvaluationItem]:
297
+ for i, eval in enumerate(
298
+ _run_solution(
299
+ solution,
300
+ compiled_solutions[solution.path],
301
+ checker_digest,
302
+ solution_index,
303
+ group_name,
304
+ progress=progress,
305
+ verification=verification,
306
+ )
307
+ ):
308
+ yield EvaluationItem(
309
+ solution_index=solution_index,
310
+ group_name=group_name,
311
+ testcase_index=i,
312
+ eval=eval,
313
+ )
314
+
315
+ groups = pkg.testcases
316
+ if group_first:
317
+ for group in groups:
318
+ for i, solution in solutions:
319
+ yield from yield_items(i, solution, group.name)
320
+ return
321
+
322
+ for i, solution in solutions:
323
+ for group in groups:
324
+ yield from yield_items(i, solution, group.name)
325
+
326
+
327
+ def run_solutions(
328
+ progress: Optional[StatusProgress] = None,
329
+ tracked_solutions: Optional[Set[str]] = None,
330
+ verification: VerificationLevel = VerificationLevel.NONE,
331
+ check: bool = True,
332
+ group_first: bool = False,
333
+ ) -> RunSolutionResult:
334
+ return RunSolutionResult(
335
+ skeleton=_get_report_skeleton(
336
+ tracked_solutions, group_first, verification=verification
337
+ ),
338
+ items=_produce_solution_items(
339
+ progress=progress,
340
+ tracked_solutions=tracked_solutions,
341
+ verification=verification,
342
+ check=check,
343
+ group_first=group_first,
344
+ ),
345
+ )
346
+
347
+
348
+ def _run_interactive_solutions(
349
+ tracked_solutions: Optional[Set[str]] = None,
350
+ verification: VerificationLevel = VerificationLevel.NONE,
351
+ generator: Optional[GeneratorCall] = None,
352
+ check: bool = True,
353
+ ) -> Iterator[EvaluationItem]:
354
+ pkg = package.find_problem_package_or_die()
355
+ main_solution = package.get_main_solution()
356
+ check = check and main_solution is not None
357
+
358
+ checker_digest = checkers.compile_checker() if check else None
359
+ compiled_solutions = compile_solutions(tracked_solutions=tracked_solutions)
360
+
361
+ main_solution_digest = None
362
+ if check and main_solution is not None:
363
+ try:
364
+ main_solution_digest = compile_item(main_solution)
365
+ except:
366
+ console.console.print(
367
+ '[error]Failed compiling main solution. If you do not want to check against a main solution, run with --nocheck flag.[/error]'
368
+ )
369
+ raise
370
+
371
+ solutions = list(enumerate(pkg.solutions))
372
+ if tracked_solutions is not None:
373
+ solutions = [
374
+ (i, sol) for i, sol in solutions if str(sol.path) in tracked_solutions
375
+ ]
376
+
377
+ irun_dir = package.get_problem_runs_dir() / '.irun'
378
+ shutil.rmtree(str(irun_dir), ignore_errors=True)
379
+ irun_dir.mkdir(parents=True, exist_ok=True)
380
+ inputs_dir = irun_dir / 'inputs'
381
+ input_path = inputs_dir / '000.in'
382
+ output_path = input_path.with_suffix('.out')
383
+
384
+ if generator is not None:
385
+ expanded_call = generate_standalone(generator, input_path)
386
+ console.console.print(
387
+ f'Using input from generator call [item]{expanded_call.name} {expanded_call.args}[/item].'
388
+ )
389
+ else:
390
+ input = console.multiline_prompt('Testcase input')
391
+ input_path.write_text(input)
392
+ testcase = Testcase(inputPath=input_path, outputPath=output_path if check else None)
393
+
394
+ if main_solution_digest is not None:
395
+ # TODO: Add stderr path
396
+ generate_output_for_testcase(main_solution_digest, testcase)
397
+
398
+ for i, solution in solutions:
399
+ output_dir = irun_dir / f'{i}'
400
+
401
+ yield EvaluationItem(
402
+ solution_index=i,
403
+ group_name='irun',
404
+ testcase_index=0,
405
+ eval=_run_solution_on_testcase(
406
+ solution,
407
+ compiled_solutions[solution.path],
408
+ checker_digest,
409
+ testcase,
410
+ output_dir,
411
+ verification=verification,
412
+ ),
413
+ )
414
+
415
+
416
+ def run_and_print_interactive_solutions(
417
+ tracked_solutions: Optional[Set[str]] = None,
418
+ verification: VerificationLevel = VerificationLevel.NONE,
419
+ generator: Optional[GeneratorCall] = None,
420
+ check: bool = True,
421
+ print: bool = False,
422
+ ):
423
+ pkg = package.find_problem_package_or_die()
424
+ items = _run_interactive_solutions(
425
+ tracked_solutions=tracked_solutions,
426
+ verification=verification,
427
+ check=check,
428
+ generator=generator,
429
+ )
430
+
431
+ for item in items:
432
+ sol = pkg.solutions[item.solution_index]
433
+ _print_solution_header(sol, console.console)
434
+
435
+ stdout_path = item.eval.log.stdout_absolute_path
436
+ if print:
437
+ if (
438
+ item.eval.testcase.output is not None
439
+ and stdout_path is not None
440
+ and stdout_path.is_file()
441
+ ):
442
+ console.console.print(stdout_path.read_text())
443
+ else:
444
+ console.console.print('[warning]Solution produced no output.[/warning]')
445
+ elif stdout_path is not None:
446
+ console.console.print(f'Output: {stdout_path}.')
447
+ console.console.print()
448
+
449
+
450
+ def get_outcome_style_verdict(outcome: Outcome) -> str:
451
+ if outcome == Outcome.ACCEPTED:
452
+ return 'green'
453
+ if outcome == Outcome.WRONG_ANSWER:
454
+ return 'red'
455
+ if outcome == Outcome.TIME_LIMIT_EXCEEDED:
456
+ return 'yellow'
457
+ if outcome == Outcome.RUNTIME_ERROR:
458
+ return 'lnumber'
459
+ if outcome == Outcome.MEMORY_LIMIT_EXCEEDED:
460
+ return 'cyan'
461
+ return 'magenta'
462
+
463
+
464
+ def get_testcase_markup_verdict(eval: Evaluation) -> str:
465
+ res = '✓'
466
+ if eval.result.outcome != Outcome.ACCEPTED:
467
+ res = '✗'
468
+ if eval.result.outcome == Outcome.TIME_LIMIT_EXCEEDED:
469
+ res = '⧖'
470
+ if eval.result.outcome == Outcome.RUNTIME_ERROR:
471
+ res = '✗'
472
+ style = get_outcome_style_verdict(eval.result.outcome)
473
+ res = f'[{style}]{res}[/{style}]'
474
+ # if eval.log.stdout_absolute_path:
475
+ # output_path = eval.log.stdout_absolute_path.resolve()
476
+ # output_link = f'file://{output_path}'
477
+ # res = f'[link={output_link}]{res}[/link]'
478
+ return res
479
+
480
+
481
+ def _get_evals_time_in_ms(evals: List[Evaluation]) -> int:
482
+ return max(int((eval.log.time or 0.0) * 1000) for eval in evals)
483
+
484
+
485
+ def _get_evals_memory_in_mb(evals: List[Evaluation]) -> int:
486
+ return max(int(eval.log.memory or 0) // (1024 * 1024) for eval in evals)
487
+
488
+
489
+ def get_evals_formatted_time(evals: List[Evaluation]) -> str:
490
+ max_time = _get_evals_time_in_ms(evals)
491
+ return f'{max_time} ms'
492
+
493
+
494
+ def get_evals_formatted_memory(evals: List[Evaluation]) -> str:
495
+ max_memory = _get_evals_memory_in_mb(evals)
496
+ return f'{max_memory} MiB'
497
+
498
+
499
+ def _print_solution_outcome(
500
+ solution: Solution,
501
+ evals: List[Evaluation],
502
+ console: rich.console.Console,
503
+ verification: VerificationLevel = VerificationLevel.NONE,
504
+ ) -> bool:
505
+ pkg = package.find_problem_package_or_die()
506
+
507
+ bad_verdicts = set()
508
+ no_tle_bad_verdicts = set()
509
+ for eval in evals:
510
+ if eval.result.outcome != Outcome.ACCEPTED:
511
+ bad_verdicts.add(eval.result.outcome)
512
+ if (
513
+ eval.result.no_tle_outcome is not None
514
+ and eval.result.no_tle_outcome != Outcome.ACCEPTED
515
+ ):
516
+ no_tle_bad_verdicts.add(eval.result.no_tle_outcome)
517
+
518
+ unmatched_bad_verdicts = set(
519
+ v for v in bad_verdicts if not solution.outcome.match(v)
520
+ )
521
+ matched_bad_verdicts = bad_verdicts - unmatched_bad_verdicts
522
+ expected_outcome_is_bad = not solution.outcome.match(Outcome.ACCEPTED)
523
+
524
+ if unmatched_bad_verdicts or (expected_outcome_is_bad and not matched_bad_verdicts):
525
+ console.print('[error]FAILED[/error]', end=' ')
526
+ else:
527
+ console.print('[success]OK[/success]', end=' ')
528
+
529
+ console.print(f'Expected: {solution.outcome}', end='')
530
+
531
+ if unmatched_bad_verdicts:
532
+ unmatched_bad_verdicts_names = set(v.name for v in unmatched_bad_verdicts)
533
+ console.print(f', got: {" ".join(unmatched_bad_verdicts_names)}', end='')
534
+ elif expected_outcome_is_bad and not matched_bad_verdicts:
535
+ console.print(f', got: {Outcome.ACCEPTED.name}', end='')
536
+
537
+ console.print()
538
+ evals_time = _get_evals_time_in_ms(evals)
539
+ expected_outcome_is_tle = solution.outcome.match(Outcome.TIME_LIMIT_EXCEEDED)
540
+ if (
541
+ # Running verification with double TL.
542
+ verification.value >= VerificationLevel.FULL.value
543
+ # Solution expects a TLE.
544
+ and expected_outcome_is_tle
545
+ # A TLE (or similar) has happened.
546
+ and matched_bad_verdicts
547
+ # The solution has no other bad verdicts except for TLEs in double TL.
548
+ and not ((bad_verdicts | no_tle_bad_verdicts) - {Outcome.TIME_LIMIT_EXCEEDED})
549
+ # The solution passes in double TL.
550
+ and evals_time < pkg.timelimit_for_language(solution.language) * 2
551
+ ):
552
+ console.print(
553
+ '[yellow]WARNING[/yellow] The solution still passed in double TL.'
554
+ )
555
+ console.print(f'Time: {get_evals_formatted_time(evals)}')
556
+ console.print(f'Memory: {get_evals_formatted_memory(evals)}')
557
+ return len(unmatched_bad_verdicts) == 0
558
+
559
+
560
+ def _consume_and_key_evaluation_items(
561
+ items: Iterator[EvaluationItem],
562
+ skeleton: SolutionReportSkeleton,
563
+ ) -> Iterator[StructuredEvaluation]:
564
+ """
565
+ Consumes EvaluationItems from a run_solutions call and build a view
566
+ with them, possibly marking with optional unprocessed items.
567
+ """
568
+ pkg = package.find_problem_package_or_die()
569
+ res = skeleton.empty_structured_evaluation()
570
+
571
+ for item in items:
572
+ solution = pkg.solutions[item.solution_index]
573
+ res[str(solution.path)][item.group_name][item.testcase_index] = item.eval
574
+ yield res
575
+
576
+
577
+ def _print_solution_header(solution: Solution, console: rich.console.Console):
578
+ solutions = package.get_solutions()
579
+ solution_index = [
580
+ i for i, sol in enumerate(solutions) if sol.path == solution.path
581
+ ][0]
582
+ solution_testdir = package.get_problem_runs_dir() / f'{solution_index}'
583
+ console.print(f'[item]{solution.path}[/item]', end=' ')
584
+ console.print(f'({solution_testdir})')
585
+
586
+
587
+ def _print_timing(
588
+ console: rich.console.Console,
589
+ skeleton: SolutionReportSkeleton,
590
+ structured_evaluation: StructuredEvaluation,
591
+ ):
592
+ slowest_good = None
593
+ fastest_slow = None
594
+ for solution in skeleton.solutions:
595
+ evals_per_group = structured_evaluation[str(solution.path)]
596
+ all_evals = []
597
+ for evals in evals_per_group.values():
598
+ all_evals.extend([eval for eval in evals if eval is not None])
599
+ solution_time = _get_evals_time_in_ms(all_evals)
600
+ if solution.outcome.match(Outcome.ACCEPTED):
601
+ if slowest_good is None or solution_time > slowest_good:
602
+ slowest_good = solution_time
603
+ if solution.outcome.is_slow():
604
+ if fastest_slow is None or solution_time < fastest_slow:
605
+ fastest_slow = solution_time
606
+
607
+ if slowest_good is None and fastest_slow is None:
608
+ return
609
+
610
+ console.print('[status]Timing summary:[/status]')
611
+ if slowest_good is not None:
612
+ console.print(f'Slowest [success]OK[/success] solution: {slowest_good} ms')
613
+ if fastest_slow is not None:
614
+ console.print(f'Fastest [error]slow[/error] solution: {fastest_slow} ms')
615
+
616
+
617
+ def _render_detailed_group_table(
618
+ group: TestcaseGroup,
619
+ skeleton: SolutionReportSkeleton,
620
+ structured_evaluations: Iterator[StructuredEvaluation],
621
+ console: rich.console.Console,
622
+ ):
623
+ group_skeleton = skeleton.find_group_skeleton(group.name)
624
+ assert group_skeleton is not None
625
+
626
+ def generate_table(
627
+ structured_evaluation: StructuredEvaluation, group_name: str
628
+ ) -> rich.table.Table:
629
+ table = rich.table.Table()
630
+ for solution in skeleton.solutions:
631
+ table.add_column(f'[item]{solution.path}[/item]', justify='full')
632
+
633
+ evals_per_solution = collections.defaultdict(list)
634
+ for tc, _ in enumerate(group_skeleton.testcases):
635
+ row = []
636
+ for solution in skeleton.solutions:
637
+ eval = structured_evaluation[str(solution.path)][group_name][tc]
638
+ evals_per_solution[str(solution.path)].append(eval)
639
+ if eval is None:
640
+ row.append('...')
641
+ continue
642
+ verdict = get_testcase_markup_verdict(eval)
643
+ time = get_evals_formatted_time([eval])
644
+ row.append(f'{verdict} {time}')
645
+ table.add_row(*row)
646
+
647
+ if table.row_count > 0:
648
+ summary_row = []
649
+ for solution in skeleton.solutions:
650
+ evals = evals_per_solution[str(solution.path)]
651
+ non_null_evals = [eval for eval in evals if eval is not None]
652
+ if not non_null_evals:
653
+ summary_row.append('...')
654
+ continue
655
+ summary_row.append(' ' + get_evals_formatted_time(non_null_evals))
656
+ table.add_section()
657
+ table.add_row(*summary_row)
658
+ return table
659
+
660
+ with rich.live.Live(
661
+ generate_table(skeleton.empty_structured_evaluation(), group.name),
662
+ refresh_per_second=5,
663
+ console=console,
664
+ ) as live:
665
+ for _ in skeleton.solutions:
666
+ for _ in group_skeleton.testcases:
667
+ structured_evaluation = next(structured_evaluations)
668
+ live.update(generate_table(structured_evaluation, group.name))
669
+ live.refresh()
670
+
671
+
672
+ def _print_detailed_run_report(
673
+ result: RunSolutionResult,
674
+ console: rich.console.Console,
675
+ structured_evaluations: Iterator[StructuredEvaluation],
676
+ timing: bool = True,
677
+ ):
678
+ structured_evaluations = seekable(structured_evaluations)
679
+ for group in result.skeleton.groups:
680
+ console.print(f'[bold][status]{group.name}[/status][/bold]')
681
+
682
+ _render_detailed_group_table(
683
+ package.get_testgroup(group.name),
684
+ result.skeleton,
685
+ structured_evaluations,
686
+ console,
687
+ )
688
+ continue
689
+
690
+ ok = True
691
+ structured_evaluations.seek(-1)
692
+ structured_evaluation = next(structured_evaluations)
693
+ for solution in result.skeleton.solutions:
694
+ all_evals = []
695
+ for evals in structured_evaluation[str(solution.path)].values():
696
+ all_evals.extend(evals)
697
+ _print_solution_header(solution, console)
698
+ cur_ok = _print_solution_outcome(
699
+ solution,
700
+ all_evals,
701
+ console,
702
+ )
703
+ ok = ok and cur_ok
704
+ console.print()
705
+
706
+ console.print()
707
+
708
+ if timing:
709
+ _print_timing(console, result.skeleton, structured_evaluation)
710
+ return ok
711
+
712
+
713
+ def print_run_report(
714
+ result: RunSolutionResult,
715
+ console: rich.console.Console,
716
+ verification: environment.VerificationParam,
717
+ detailed: bool = False,
718
+ timing: bool = True,
719
+ ) -> bool:
720
+ pkg = package.find_problem_package_or_die()
721
+ items = seekable(result.items)
722
+ structured_evaluations = _consume_and_key_evaluation_items(items, result.skeleton)
723
+ if detailed:
724
+ return _print_detailed_run_report(
725
+ result, console, structured_evaluations, timing=timing
726
+ )
727
+
728
+ assert not result.skeleton.group_first
729
+ # Since we're now streaming the evaluation results, the for-loop is a bit
730
+ # confusing. We must keep state across the iteration to understand whether
731
+ # we're seeing a new solution or a new testgroup.
732
+ ok = True
733
+ last_solution: Optional[Solution] = None
734
+ last_group: Optional[str] = None
735
+ test_index = 0
736
+ all_evals = []
737
+ group_evals = []
738
+
739
+ def print_last_solution():
740
+ nonlocal ok
741
+ if last_solution is None:
742
+ return
743
+ cur_ok = _print_solution_outcome(
744
+ last_solution,
745
+ all_evals,
746
+ console,
747
+ verification=VerificationLevel(verification),
748
+ )
749
+ console.print()
750
+ ok = ok and cur_ok
751
+
752
+ for item in items:
753
+ eval = item.eval
754
+ solution = pkg.solutions[item.solution_index]
755
+ is_new_solution = last_solution is None or solution.path != last_solution.path
756
+ is_new_group = is_new_solution or last_group != item.group_name
757
+ is_closing_group = last_group is not None and is_new_group
758
+
759
+ if is_closing_group:
760
+ console.print(f'({get_evals_formatted_time(group_evals)})', end='')
761
+ console.print()
762
+
763
+ if is_new_solution:
764
+ print_last_solution()
765
+ all_evals = []
766
+ last_solution = solution
767
+ _print_solution_header(last_solution, console)
768
+
769
+ if is_new_group:
770
+ group_evals = []
771
+ last_group = item.group_name
772
+ test_index = 0
773
+ console.print(f'[bold][status]{item.group_name}[/status][/bold]', end=' ')
774
+
775
+ all_evals.append(eval)
776
+ group_evals.append(eval)
777
+ console.print(f'{test_index}/', end='')
778
+ console.print(get_testcase_markup_verdict(eval), end=' ')
779
+
780
+ test_index += 1
781
+
782
+ console.print(
783
+ f'({get_evals_formatted_time(group_evals)}, {get_evals_formatted_memory(group_evals)})',
784
+ end=' ',
785
+ )
786
+ console.print()
787
+ print_last_solution()
788
+
789
+ items.seek(0)
790
+ _print_timing(console, result.skeleton, list(structured_evaluations)[-1])
791
+
792
+ return ok