rbx.cp 0.13.3__py3-none-any.whl → 0.13.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. rbx/annotations.py +5 -5
  2. rbx/box/checkers.py +26 -22
  3. rbx/box/cli.py +0 -4
  4. rbx/box/code.py +27 -80
  5. rbx/box/contest/build_contest_statements.py +16 -3
  6. rbx/box/contest/schema.py +1 -2
  7. rbx/box/environment.py +16 -6
  8. rbx/box/fields.py +25 -1
  9. rbx/box/generators.py +31 -5
  10. rbx/box/global_package.py +6 -2
  11. rbx/box/header.py +31 -11
  12. rbx/box/package.py +3 -15
  13. rbx/box/presets/__init__.py +2 -2
  14. rbx/box/schema.py +4 -25
  15. rbx/box/setter_config.py +11 -0
  16. rbx/box/solutions.py +12 -4
  17. rbx/box/statements/build_statements.py +5 -1
  18. rbx/box/statements/builders.py +7 -7
  19. rbx/box/statements/schema.py +11 -2
  20. rbx/box/tasks.py +9 -4
  21. rbx/box/testcase_utils.py +2 -0
  22. rbx/box/testing/__init__.py +0 -0
  23. rbx/box/testing/testing_package.py +246 -0
  24. rbx/box/testing/testing_preset.py +36 -0
  25. rbx/box/testing/testing_shared.py +81 -0
  26. rbx/box/ui/screens/run_explorer.py +0 -8
  27. rbx/box/ui/utils/run_ui.py +7 -3
  28. rbx/box/ui/widgets/test_output_box.py +1 -1
  29. rbx/box/validators.py +5 -2
  30. rbx/grading/caching.py +67 -16
  31. rbx/grading/judge/program.py +268 -0
  32. rbx/grading/judge/sandbox.py +30 -193
  33. rbx/grading/judge/sandboxes/stupid_sandbox.py +232 -241
  34. rbx/grading/judge/sandboxes/tee.py +31 -0
  35. rbx/grading/steps.py +87 -199
  36. rbx/grading/steps_with_caching.py +15 -6
  37. rbx/resources/presets/default/problem/problem.rbx.yml +0 -2
  38. rbx/resources/presets/default/shared/contest_template.rbx.tex +1 -1
  39. rbx/resources/presets/default/shared/problem_template.rbx.tex +5 -1
  40. rbx/resources/templates/rbx.h +43 -2
  41. rbx/testing_utils.py +8 -1
  42. rbx/utils.py +59 -1
  43. {rbx_cp-0.13.3.dist-info → rbx_cp-0.13.5.dist-info}/METADATA +2 -1
  44. {rbx_cp-0.13.3.dist-info → rbx_cp-0.13.5.dist-info}/RECORD +47 -67
  45. rbx/box/conftest.py +0 -42
  46. rbx/box/generators_test.py +0 -67
  47. rbx/box/lazy_importing_test.py +0 -25
  48. rbx/box/solutions_test.py +0 -47
  49. rbx/box/validators_test.py +0 -15
  50. rbx/checker.py +0 -128
  51. rbx/clone.py +0 -197
  52. rbx/conftest.py +0 -38
  53. rbx/create.py +0 -37
  54. rbx/edit.py +0 -24
  55. rbx/grading/conftest.py +0 -33
  56. rbx/grading/judge/sandboxes/isolate.py +0 -695
  57. rbx/grading/judge/testiso.py +0 -54
  58. rbx/grading/steps_with_caching_run_test.py +0 -707
  59. rbx/grading_utils.py +0 -148
  60. rbx/hydration.py +0 -101
  61. rbx/main.py +0 -118
  62. rbx/metadata.py +0 -105
  63. rbx/resources/envs/isolate.rbx.yml +0 -36
  64. rbx/resources/presets/default/problem/sols/slow.cpp +0 -15
  65. rbx/run.py +0 -45
  66. rbx/schema.py +0 -64
  67. rbx/submit.py +0 -61
  68. rbx/test.py +0 -349
  69. rbx/testcase.py +0 -70
  70. rbx/testcase_rendering.py +0 -79
  71. {rbx_cp-0.13.3.dist-info → rbx_cp-0.13.5.dist-info}/LICENSE +0 -0
  72. {rbx_cp-0.13.3.dist-info → rbx_cp-0.13.5.dist-info}/WHEEL +0 -0
  73. {rbx_cp-0.13.3.dist-info → rbx_cp-0.13.5.dist-info}/entry_points.txt +0 -0
rbx/test.py DELETED
@@ -1,349 +0,0 @@
1
- import atexit
2
- import pathlib
3
- import tempfile
4
- from typing import Dict, List, Optional
5
-
6
- import syncer
7
- from rich.columns import Columns
8
- from rich.panel import Panel
9
- from rich.progress import MofNCompleteColumn, Progress, SpinnerColumn
10
- from rich.text import Text
11
-
12
- from rbx import annotations, config, grading_utils, metadata, testcase_rendering
13
- from rbx.config import Language, get_config
14
- from rbx.console import console, multiline_prompt
15
- from rbx.grading import steps
16
- from rbx.grading.judge.sandbox import SandboxBase
17
- from rbx.grading.judge.sandboxes import stupid_sandbox
18
- from rbx.schema import DumpedProblem, Problem
19
-
20
-
21
- def get_testcase_index(path: pathlib.Path) -> int:
22
- return int(path.stem.split('.')[-1])
23
-
24
-
25
- def get_testcases_io(
26
- problem: DumpedProblem, root: pathlib.Path = pathlib.Path()
27
- ) -> List[steps.TestcaseIO]:
28
- testcases_per_index: Dict[int, steps.TestcaseIO] = {}
29
- for input_file in root.glob(f'{problem.code}.*.in'):
30
- try:
31
- index = get_testcase_index(input_file)
32
- except ValueError:
33
- continue
34
- testcases_per_index[index] = steps.TestcaseIO(index=index, input=input_file)
35
-
36
- for output_file in root.glob(f'{problem.code}.*.out'):
37
- index = get_testcase_index(output_file)
38
- try:
39
- index = get_testcase_index(output_file)
40
- except ValueError:
41
- continue
42
- if index in testcases_per_index:
43
- testcases_per_index[index].output = output_file
44
- continue
45
- testcases_per_index[index] = steps.TestcaseIO(index=index, output=output_file)
46
-
47
- return sorted(testcases_per_index.values(), key=lambda x: x.index)
48
-
49
-
50
- async def _run_testcases(
51
- problem: Problem,
52
- lang: Language,
53
- lang_name: Optional[str],
54
- sandbox: SandboxBase,
55
- testcases: List[steps.TestcaseIO],
56
- persist_root: pathlib.Path = pathlib.Path(),
57
- ) -> Dict[int, Optional[steps.TestcaseLog]]:
58
- logs: Dict[int, Optional[steps.TestcaseLog]] = {}
59
-
60
- # Ensure persist dir exists.
61
- persist_root.mkdir(parents=True, exist_ok=True)
62
-
63
- progress = Progress(
64
- SpinnerColumn(),
65
- *Progress.get_default_columns(),
66
- MofNCompleteColumn(),
67
- transient=True,
68
- )
69
- with progress:
70
- for testcase in progress.track(testcases, description='Running testcases...'):
71
- params = grading_utils.build_run_sandbox_params(
72
- problem, testcase.input is not None
73
- )
74
- artifacts = grading_utils.build_run_grading_artifacts(
75
- testcase, persist_root
76
- )
77
- run_log = await steps.run(
78
- lang.exec,
79
- params,
80
- sandbox,
81
- artifacts,
82
- metadata=steps.RunLogMetadata(language=lang_name),
83
- )
84
- if not run_log:
85
- logs[testcase.index] = None
86
- continue
87
- logs[testcase.index] = steps.TestcaseLog(
88
- **run_log.__dict__,
89
- stdout_absolute_path=persist_root / f'stdout-{testcase.index}.txt',
90
- stderr_absolute_path=persist_root / f'stderr-{testcase.index}.txt',
91
- )
92
-
93
- return logs
94
-
95
-
96
- def _evaluate_testcases(
97
- problem: DumpedProblem,
98
- sandbox: SandboxBase,
99
- testcases: List[steps.TestcaseIO],
100
- testcase_logs: Dict[int, Optional[steps.TestcaseLog]],
101
- persist_root: pathlib.Path = pathlib.Path(),
102
- ) -> List[steps.Evaluation]:
103
- evaluations = []
104
- artifacts = grading_utils.build_checker_run_grading_artifacts(
105
- problem,
106
- persist_root,
107
- )
108
- for testcase in testcases:
109
- if testcase.index not in testcase_logs:
110
- continue
111
-
112
- log = testcase_logs[testcase.index]
113
- evaluations.append(
114
- steps.evaluate(
115
- sandbox,
116
- testcase,
117
- log,
118
- artifacts,
119
- should_use_python_checker=not problem.checker,
120
- )
121
- )
122
-
123
- return evaluations
124
-
125
-
126
- def _pretty_print_output_on_panel(file: Optional[pathlib.Path], title: str) -> Panel:
127
- if not file:
128
- return Panel('[error]No file to read from.[/error]', title=title, expand=False)
129
- return Panel(
130
- testcase_rendering.render_from_file(file),
131
- title=title,
132
- expand=False,
133
- )
134
-
135
-
136
- def _pretty_print_side_by_side(result: steps.Evaluation):
137
- if not result.testcase.output:
138
- return _pretty_print_output_on_panel(result.log.stdout_absolute_path, 'Output')
139
- return Columns(
140
- [
141
- _pretty_print_output_on_panel(result.testcase.output, 'Expected'),
142
- _pretty_print_output_on_panel(result.log.stdout_absolute_path, 'Actual'),
143
- ],
144
- equal=True,
145
- expand=False,
146
- )
147
-
148
-
149
- def _get_outcome_style(outcome: steps.Outcome) -> str:
150
- if outcome == steps.Outcome.ACCEPTED:
151
- return 'success'
152
- if outcome == steps.Outcome.JUDGE_FAILED or outcome == steps.Outcome.INTERNAL_ERROR:
153
- return 'warning'
154
- return 'error'
155
-
156
-
157
- def _pretty_print_outcome_panel(
158
- problem: DumpedProblem, eval: steps.Evaluation
159
- ) -> Panel:
160
- result: steps.CheckerResult = eval.result
161
- is_tle = result.outcome.is_slow() or (
162
- problem.timeLimit and (eval.log.time or 0) * 1000 > problem.timeLimit
163
- )
164
-
165
- text = Text()
166
- text.append('Outcome: ')
167
- text.append(
168
- result.outcome.value,
169
- style=_get_outcome_style(result.outcome),
170
- )
171
- text.append(' ' * 4)
172
- text.append('Time: ')
173
- text.append(f'{eval.log.time:.2f}s', style='error' if is_tle else 'item')
174
- text.append('\n')
175
- if eval.testcase.input:
176
- text.append(f'Input path: {eval.testcase.input.absolute()}')
177
- text.append('\n')
178
- if eval.testcase.output:
179
- text.append(f'Expected path: {eval.testcase.output.absolute()}')
180
- text.append('\n')
181
- text.append(f'Answer path: {eval.log.stdout_absolute_path}')
182
- return Panel(
183
- text,
184
- title=f'[bold]Testcase [item]#{eval.testcase.index}[/item]',
185
- expand=False,
186
- )
187
-
188
-
189
- def _pretty_print_evaluation_result(
190
- problem: DumpedProblem,
191
- eval: steps.Evaluation,
192
- interactive: bool = False,
193
- ):
194
- console.print(_pretty_print_outcome_panel(problem, eval))
195
- if eval.result.outcome != steps.Outcome.ACCEPTED:
196
- if interactive:
197
- console.print(
198
- _pretty_print_output_on_panel(eval.log.stdout_absolute_path, 'Output')
199
- )
200
- else:
201
- console.print(_pretty_print_side_by_side(eval))
202
- if eval.result.message:
203
- console.print(
204
- f'[error]Checker message:[/error] {eval.result.message.strip()}'
205
- )
206
- console.print()
207
-
208
-
209
- def pretty_print_summary(
210
- problem: DumpedProblem,
211
- lang: Language,
212
- evals: List[steps.Evaluation],
213
- root: pathlib.Path = pathlib.Path(),
214
- ):
215
- submission_file = root / lang.get_submit_file(problem.code)
216
- passed = sum(1 for eval in evals if eval.result.outcome == steps.Outcome.ACCEPTED)
217
- total = len(evals)
218
- console.print(f'Summary for problem [item]{problem.pretty_name()}[/item]:')
219
-
220
- # Test summary.
221
- text = Text()
222
- text.append('Passed tests: ')
223
- text.append(f'{passed}/{total}', style='success' if passed == total else 'error')
224
- console.print(text)
225
-
226
- console.print(f'Submission file: {submission_file.absolute()}')
227
-
228
-
229
- def pretty_print_evaluation_results(
230
- problem: DumpedProblem,
231
- evals: List[steps.Evaluation],
232
- interactive: bool = False,
233
- ):
234
- for eval in evals:
235
- _pretty_print_evaluation_result(problem, eval, interactive=interactive)
236
-
237
-
238
- @syncer.sync
239
- async def main(
240
- problem: annotations.Problem,
241
- language: annotations.LanguageWithDefault = None,
242
- keep_sandbox: bool = False,
243
- interactive: bool = False,
244
- index: Optional[annotations.TestcaseIndex] = None,
245
- ):
246
- dumped_problem = metadata.find_problem_by_anything(problem)
247
- if not dumped_problem:
248
- console.print(
249
- f'[error]Problem with identifier [item]{problem}[/item] not found.[/error]'
250
- )
251
- return
252
-
253
- lang = get_config().get_language(language)
254
- if not lang:
255
- console.print(
256
- f'[error]Language {language or get_config().defaultLanguage} not found in config. Please check your configuration.[/error]'
257
- )
258
- return
259
-
260
- if interactive:
261
- testcases = []
262
- while True:
263
- console.print(
264
- f'Providing IO for testcase [item]#{len(testcases)}[/item]...'
265
- )
266
- input = multiline_prompt('Testcase input')
267
- if not input.strip():
268
- break
269
- output = multiline_prompt('Testcase output')
270
- input_path = pathlib.Path(tempfile.mktemp())
271
- output_path = pathlib.Path(tempfile.mktemp())
272
- input_path.write_text(input)
273
- output_path.write_text(output)
274
- testcases.append(
275
- steps.TestcaseIO(
276
- index=len(testcases), input=input_path, output=output_path
277
- )
278
- )
279
- else:
280
- testcases = get_testcases_io(dumped_problem)
281
-
282
- if index is not None:
283
- testcases = [tc for tc in testcases if tc.index == index]
284
-
285
- if not testcases:
286
- console.print(
287
- f'[error]No testcases found for the problem [item]{dumped_problem.pretty_name()}[/item].[/error]'
288
- )
289
- return
290
-
291
- box = stupid_sandbox.StupidSandbox()
292
- atexit.register(lambda: box.cleanup(delete=not keep_sandbox))
293
- persist_root = config.get_empty_app_persist_path()
294
-
295
- with console.status(
296
- f'Preprocessing code for problem [item]{dumped_problem.pretty_name()}[/item] in language [item]{language or get_config().defaultLanguage}[/item]...'
297
- ):
298
- if lang.preprocess:
299
- preprocess_cmds = grading_utils.build_preprocess_commands(
300
- dumped_problem, lang
301
- )
302
- sandbox_params = grading_utils.build_preprocess_sandbox_params()
303
- artifacts = grading_utils.build_compile_grading_artifacts(
304
- dumped_problem, lang
305
- )
306
- if not steps.compile(preprocess_cmds, sandbox_params, box, artifacts):
307
- console.print(
308
- f'[error]Failed to preprocess problem [item]{dumped_problem.pretty_name()}[/item].[/error]'
309
- )
310
- return
311
-
312
- with console.status(
313
- f'Compiling checker for problem [item]{dumped_problem.pretty_name()}[/item]...'
314
- ):
315
- command = '/usr/bin/g++ -std=c++17 -o checker checker.cpp'
316
- artifacts = grading_utils.build_checker_compile_grading_artifacts(
317
- dumped_problem, persist_root
318
- )
319
- if dumped_problem.checker and not steps.compile(
320
- [command], grading_utils.build_preprocess_sandbox_params(), box, artifacts
321
- ):
322
- console.print(
323
- f'[error]Failed to compile checker for problem [item]{dumped_problem.pretty_name()}[/item].[/error]'
324
- )
325
- return
326
-
327
- testcase_logs = await _run_testcases(
328
- dumped_problem, lang, language, box, testcases, persist_root
329
- )
330
-
331
- if not testcase_logs:
332
- console.print(
333
- f'[error]Failed to run testcases for problem [item]{dumped_problem.pretty_name()}[/item]. Sandbox probably crashed.[/error]'
334
- )
335
- return
336
-
337
- with console.status(
338
- f'Evaluating testcases for problem [item]{dumped_problem.pretty_name()}[/item]...'
339
- ):
340
- evals = _evaluate_testcases(
341
- dumped_problem, box, testcases, testcase_logs, persist_root
342
- )
343
- if not evals:
344
- console.print(
345
- f'[error]Failed to evaluate testcases for problem [item]{dumped_problem.pretty_name()}[/item].[/error]'
346
- )
347
- return
348
- pretty_print_evaluation_results(dumped_problem, evals, interactive=interactive)
349
- pretty_print_summary(dumped_problem, lang, evals)
rbx/testcase.py DELETED
@@ -1,70 +0,0 @@
1
- import pathlib
2
-
3
- import typer
4
-
5
- from rbx import annotations, hydration, metadata
6
- from rbx.console import console, multiline_prompt
7
- from rbx.schema import Testcase
8
-
9
- app = typer.Typer()
10
-
11
-
12
- @app.command()
13
- def hydrate(problem: annotations.ProblemOption = None):
14
- """
15
- Populate all samples of a problem (or of all problems in the folder).
16
- """
17
- hydration.main(problem=problem)
18
-
19
-
20
- @app.command('add, a')
21
- def add(problem: annotations.Problem):
22
- """
23
- Add a testcase to a problem.
24
- """
25
- dumped_problem = metadata.find_problem_by_anything(problem)
26
- if dumped_problem is None:
27
- console.print(f'[error]Problem [item]{problem}[/item] not found.[/error]')
28
- return
29
-
30
- input = multiline_prompt('Testcase input')
31
- output = multiline_prompt('Testcase output')
32
-
33
- hydration.add_testcase(
34
- pathlib.Path(), dumped_problem, Testcase(input=input, output=output)
35
- )
36
-
37
-
38
- @app.command('delete, d')
39
- def delete(
40
- problem: annotations.Problem,
41
- i: annotations.TestcaseIndex,
42
- ):
43
- """
44
- Remove the i-th testcase from a problem.
45
- """
46
- if i is None:
47
- console.print(f'[error]Index [item]{i}[/item] is invalid.[/error]')
48
- return
49
- dumped_problem = metadata.find_problem_by_anything(problem)
50
- if dumped_problem is None:
51
- console.print(f'[error]Problem [item]{problem}[/item] not found.[/error]')
52
- return
53
-
54
- hydration.remove_testcase(pathlib.Path(), dumped_problem, i)
55
-
56
-
57
- @app.command('edit, e')
58
- def edit(problem: annotations.Problem, i: annotations.TestcaseIndex):
59
- """
60
- Edit the testcases of a problem.
61
- """
62
- if i is None:
63
- console.print(f'[error]Index [item]{i}[/item] is invalid.[/error]')
64
- return
65
- dumped_problem = metadata.find_problem_by_anything(problem)
66
- if dumped_problem is None:
67
- console.print(f'[error]Problem [item]{problem}[/item] not found.[/error]')
68
- return
69
-
70
- hydration.edit_testcase(pathlib.Path(), dumped_problem, i)
rbx/testcase_rendering.py DELETED
@@ -1,79 +0,0 @@
1
- import dataclasses
2
- import pathlib
3
- import string
4
- from typing import List, Tuple
5
-
6
- from rich.text import Text
7
-
8
-
9
- @dataclasses.dataclass
10
- class TruncatedOutput:
11
- truncate: bool = False
12
- lines: List[Tuple[int, str]] = dataclasses.field(default_factory=list)
13
-
14
-
15
- def split_and_truncate_in_lines(
16
- s: str, max_line_length: int = 64, max_lines: int = 30
17
- ) -> TruncatedOutput:
18
- lines: List[Tuple[int, str]] = []
19
- current_line = []
20
- current_line_idx = 1
21
-
22
- def end_line(wrap: bool = False):
23
- nonlocal current_line, current_line_idx
24
- lines.append((current_line_idx, ''.join(current_line)))
25
- current_line = []
26
- if not wrap:
27
- current_line_idx += 1
28
-
29
- printable = set(string.printable)
30
- truncate = False
31
- for c in s:
32
- if c == '\n':
33
- end_line()
34
- continue
35
- if c not in printable:
36
- # TODO: handle better
37
- continue
38
- if len(current_line) >= max_line_length:
39
- end_line(wrap=True)
40
- if current_line_idx > max_lines:
41
- truncate = True
42
- break
43
- current_line.append(c)
44
-
45
- if current_line:
46
- end_line()
47
-
48
- return TruncatedOutput(truncate=truncate, lines=lines)
49
-
50
-
51
- def _largest_line_number_length(lines: List[Tuple[int, str]]) -> int:
52
- return max([len(str(line[0])) for line in lines] + [1])
53
-
54
-
55
- def render(s: str):
56
- truncated = split_and_truncate_in_lines(s)
57
- number_len = _largest_line_number_length(truncated.lines)
58
-
59
- text = Text()
60
-
61
- last_number = 0
62
- for line in truncated.lines:
63
- number, content = line
64
- number_str = '' if last_number == number else str(number)
65
- text.append(f'{number_str:>{number_len}}', style='lnumber')
66
- text.append(' ' * 3)
67
- text.append(content)
68
- text.append('\n')
69
-
70
- last_number = number
71
- if truncated.truncate:
72
- text.append(f"{'':>{number_len}}", style='lnumber')
73
- text.append(' ' * 3)
74
- text.append('... (truncated)')
75
- return text
76
-
77
-
78
- def render_from_file(file: pathlib.Path):
79
- return render(file.read_text())