rbx.cp 0.5.18__tar.gz → 0.5.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/PKG-INFO +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/pyproject.toml +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/builder.py +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/code.py +13 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/build_contest_statements.py +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/main.py +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/environment.py +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/generators.py +10 -5
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/main.py +5 -3
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/package.py +6 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/main.py +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/schema.py +16 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/solutions.py +212 -40
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/build_statements.py +1 -1
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/stresses.py +3 -3
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/steps.py +12 -8
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/LICENSE +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/README.md +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/annotations.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/autoenum.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/cd.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/checkers.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/compile.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/conftest.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/contest_package.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/contest_utils.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/schema.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/contest/statements.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/creation.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/deferred.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/download.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/extensions.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/generators_test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/boca/extension.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/boca/packager.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/contest_main.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/packager.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/polygon/packager.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/polygon/test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/packaging/polygon/xml_schema.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/presets/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/presets/fetch.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/presets/lock_schema.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/presets/schema.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/sanitizers/warning_stack.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/setter_config.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/solutions_test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/builders.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/joiners.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/latex.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/latex_jinja.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/statements/schema.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/stressing/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/stressing/finder_parser.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/stressing/generator_parser.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/testcases.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/ui/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/ui/captured_log.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/ui/css/app.tcss +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/ui/main.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/ui/run.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/validators.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/box/validators_test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/checker.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/clone.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/config.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/conftest.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/console.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/create.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/edit.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/caching.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/conftest.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/cacher.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/digester.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/sandbox.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/sandboxes/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/sandboxes/isolate.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/sandboxes/stupid_sandbox.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/sandboxes/timeit.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/storage.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/judge/testiso.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/steps_with_caching.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading/steps_with_caching_run_test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/grading_utils.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/hydration.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/main.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/metadata.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/providers/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/providers/codeforces.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/providers/provider.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/checkers/boilerplate.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/default_config.json +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/default_setter_config.mac.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/default_setter_config.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/envs/default.rbx.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/envs/isolate.rbx.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/checker.sh +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compare +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/c +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/cc +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/java +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/kt +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/pas +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/py2 +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/compile/py3 +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/c +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/cc +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/java +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/kt +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/py2 +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/packagers/boca/run/py3 +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/contest/contest.rbx.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/contest/statement/contest.rbx.tex +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/contest/statement/olymp.sty +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/contest/statement/template.rbx.tex +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/preset.rbx.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/.gitignore +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/gen.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/problem.rbx.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/random.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/random.txt +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/sols/main.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/sols/slow.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/sols/wa.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/statement/olymp.sty +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/statement/projecao.png +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/statement/statement.rbx.tex +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/statement/template.rbx.tex +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/tests/samples/000.in +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/tests/samples/001.in +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/validator.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/presets/default/problem/wcmp.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/resources/templates/template.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/run.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/schema.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/submit.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/submitors/__init__.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/submitors/codeforces.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/submitors/submitor.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/test.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testcase.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testcase_rendering.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/gen1.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/gen2.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/genScript.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/hard-tle.sol.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/ole.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/problem.rbx.yml +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/re.sol.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/sol.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/tests/1.in +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/tle-and-incorrect.sol.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/tle.sol.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/validator.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/box1/wa.sol.cpp +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/caching/executable.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testdata/compatible +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/testing_utils.py +0 -0
- {rbx_cp-0.5.18 → rbx_cp-0.5.20}/rbx/utils.py +0 -0
@@ -106,6 +106,13 @@ def compile_item(
|
|
106
106
|
verbose: bool = False,
|
107
107
|
) -> str:
|
108
108
|
generator_path = PosixPath(code.path)
|
109
|
+
|
110
|
+
if not generator_path.is_file():
|
111
|
+
console.console.print(
|
112
|
+
f'[error]Compilation file not found: [item]{generator_path}[/item][/error]'
|
113
|
+
)
|
114
|
+
raise typer.Exit(1)
|
115
|
+
|
109
116
|
language = find_language_name(code)
|
110
117
|
compilation_options = get_compilation_config(language)
|
111
118
|
file_mapping = get_file_mapping(language)
|
@@ -281,7 +288,12 @@ def run_item(
|
|
281
288
|
sandbox=sandbox,
|
282
289
|
artifacts=artifacts,
|
283
290
|
dependency_cache=dependency_cache,
|
284
|
-
metadata=RunLogMetadata(
|
291
|
+
metadata=RunLogMetadata(
|
292
|
+
language=code.language,
|
293
|
+
is_sanitized=sanitized,
|
294
|
+
timeLimit=sandbox_params.timeout,
|
295
|
+
memoryLimit=sandbox_params.address_space,
|
296
|
+
),
|
285
297
|
)
|
286
298
|
|
287
299
|
# Find sanitizer logs.
|
@@ -343,6 +343,6 @@ def build_statement(
|
|
343
343
|
console.console.print(
|
344
344
|
f'Statement built successfully for language '
|
345
345
|
f'[item]{statement.language}[/item] at '
|
346
|
-
f'[item]{statement_path}[/item]
|
346
|
+
f'[item]{statement_path.resolve()}[/item]'
|
347
347
|
)
|
348
348
|
return statement_path
|
@@ -59,7 +59,7 @@ def create(
|
|
59
59
|
fetch_info = get_preset_fetch_info(preset)
|
60
60
|
if fetch_info is None:
|
61
61
|
console.console.print(
|
62
|
-
f'[error]Invalid preset name/URI [item]{preset}[/item]
|
62
|
+
f'[error]Invalid preset name/URI [item]{preset}[/item][/error]'
|
63
63
|
)
|
64
64
|
raise typer.Exit(1)
|
65
65
|
|
@@ -221,7 +221,7 @@ def install_environment(name: str, file: pathlib.Path):
|
|
221
221
|
get_environment_path(name).parent.mkdir(parents=True, exist_ok=True)
|
222
222
|
get_environment_path(name).write_bytes(file.read_bytes())
|
223
223
|
console.console.print(
|
224
|
-
f'[success]Environment [item]{name}[/item] was installed from [item]{file}[/item]
|
224
|
+
f'[success]Environment [item]{name}[/item] was installed from [item]{file}[/item]'
|
225
225
|
)
|
226
226
|
|
227
227
|
|
@@ -149,13 +149,11 @@ def generate_output_for_testcase(
|
|
149
149
|
console.console.print(
|
150
150
|
f'[warning]Message: [info]{checker_result.message}[/info][/warning]',
|
151
151
|
)
|
152
|
+
console.console.print(f'Input written at [item]{testcase.inputPath}[/item]')
|
152
153
|
console.console.print(
|
153
|
-
f'
|
154
|
+
f'Output written at [item]{testcase.outputPath}[/item]'
|
154
155
|
)
|
155
|
-
console.console.print(
|
156
|
-
f'Output written at [item]{testcase.outputPath}[/item].'
|
157
|
-
)
|
158
|
-
console.console.print(f'Stderr written at [item]{stderr_path}[/item].')
|
156
|
+
console.console.print(f'Stderr written at [item]{stderr_path}[/item]')
|
159
157
|
raise typer.Exit(1)
|
160
158
|
|
161
159
|
|
@@ -206,6 +204,13 @@ def generate_outputs_for_testcases(
|
|
206
204
|
|
207
205
|
def _run_generator_script(testcase: TestcaseSubgroup, cacher: FileCacher) -> str:
|
208
206
|
assert testcase.generatorScript is not None
|
207
|
+
|
208
|
+
if not testcase.generatorScript.path.is_file():
|
209
|
+
console.console.print(
|
210
|
+
f'[error]Generator script not found: [item]{testcase.generatorScript.path}[/item][/error]'
|
211
|
+
)
|
212
|
+
raise typer.Exit(1)
|
213
|
+
|
209
214
|
script_digest = DigestHolder()
|
210
215
|
if testcase.generatorScript.path.suffix == '.txt':
|
211
216
|
script_digest.value = cacher.put_file_from_path(testcase.generatorScript.path)
|
@@ -215,8 +215,9 @@ def run(
|
|
215
215
|
print_run_report(
|
216
216
|
solution_result,
|
217
217
|
console.console,
|
218
|
-
verification,
|
218
|
+
VerificationLevel(verification),
|
219
219
|
detailed=detailed,
|
220
|
+
skip_printing_limits=sanitized,
|
220
221
|
)
|
221
222
|
)
|
222
223
|
|
@@ -250,8 +251,9 @@ def _time_impl(check: bool, detailed: bool) -> Optional[int]:
|
|
250
251
|
print_run_report(
|
251
252
|
solution_result,
|
252
253
|
console.console,
|
253
|
-
verification,
|
254
|
+
VerificationLevel(verification),
|
254
255
|
detailed=detailed,
|
256
|
+
skip_printing_limits=True,
|
255
257
|
)
|
256
258
|
)
|
257
259
|
|
@@ -540,7 +542,7 @@ def stress(
|
|
540
542
|
f.write(f'\n{stress_text}\n{finding_text}\n')
|
541
543
|
|
542
544
|
console.console.print(
|
543
|
-
f"Added [item]{len(report.findings)}[/item] tests to test group [item]{testgroup}[/item]'s generatorScript at [item]{subgroup.generatorScript.path}[/item]
|
545
|
+
f"Added [item]{len(report.findings)}[/item] tests to test group [item]{testgroup}[/item]'s generatorScript at [item]{subgroup.generatorScript.path}[/item]"
|
544
546
|
)
|
545
547
|
except typer.Exit:
|
546
548
|
continue
|
@@ -152,6 +152,12 @@ def get_problem_runs_dir(root: pathlib.Path = pathlib.Path()) -> pathlib.Path:
|
|
152
152
|
return runs_dir
|
153
153
|
|
154
154
|
|
155
|
+
def get_problem_iruns_dir(root: pathlib.Path = pathlib.Path()) -> pathlib.Path:
|
156
|
+
iruns_dir = get_problem_runs_dir(root) / '.irun'
|
157
|
+
iruns_dir.mkdir(parents=True, exist_ok=True)
|
158
|
+
return iruns_dir
|
159
|
+
|
160
|
+
|
155
161
|
@functools.cache
|
156
162
|
def get_cache_storage(root: pathlib.Path = pathlib.Path()) -> Storage:
|
157
163
|
return FilesystemStorage(get_problem_storage_dir(root))
|
@@ -50,7 +50,7 @@ def run_packager(
|
|
50
50
|
console.console.print(
|
51
51
|
f'[success]Problem packaged for [item]{packager.name()}[/item]![/success]'
|
52
52
|
)
|
53
|
-
console.console.print(f'Package was saved at [item]{result_path}[/item]
|
53
|
+
console.console.print(f'Package was saved at [item]{result_path.resolve()}[/item]')
|
54
54
|
return result_path
|
55
55
|
|
56
56
|
|
@@ -293,6 +293,22 @@ class Stress(BaseModel):
|
|
293
293
|
)
|
294
294
|
|
295
295
|
|
296
|
+
class Limits(BaseModel):
|
297
|
+
time: Optional[int] = Field(
|
298
|
+
None, description='Value to override time limit with, in milliseconds.'
|
299
|
+
)
|
300
|
+
memory: Optional[int] = Field(
|
301
|
+
None, description='Value to override memory limit with, in MB.'
|
302
|
+
)
|
303
|
+
output: Optional[int] = Field(
|
304
|
+
None, description='Value to override output limit with, in KB.'
|
305
|
+
)
|
306
|
+
|
307
|
+
isDoubleTL: bool = Field(
|
308
|
+
False, description='Whether to use double TL for this language.'
|
309
|
+
)
|
310
|
+
|
311
|
+
|
296
312
|
class LimitModifiers(BaseModel):
|
297
313
|
timeMultiplier: Optional[float] = Field(
|
298
314
|
None, description='Multiplier for time limit.'
|
@@ -5,22 +5,29 @@ import dataclasses
|
|
5
5
|
import pathlib
|
6
6
|
import shutil
|
7
7
|
from collections.abc import Iterator
|
8
|
-
from typing import Dict, Iterable, List, Optional, Set
|
8
|
+
from typing import Dict, Iterable, List, Optional, Set, Tuple
|
9
9
|
|
10
10
|
import rich
|
11
11
|
import rich.live
|
12
|
+
import rich.markup
|
12
13
|
import rich.table
|
14
|
+
import rich.text
|
13
15
|
from pydantic import BaseModel
|
14
16
|
|
15
17
|
from rbx import console
|
16
|
-
from rbx.box import checkers,
|
18
|
+
from rbx.box import checkers, package
|
17
19
|
from rbx.box.code import SanitizationLevel, compile_item, find_language_name, run_item
|
18
20
|
from rbx.box.deferred import Deferred
|
19
|
-
from rbx.box.environment import
|
21
|
+
from rbx.box.environment import (
|
22
|
+
EnvironmentSandbox,
|
23
|
+
ExecutionConfig,
|
24
|
+
VerificationLevel,
|
25
|
+
)
|
20
26
|
from rbx.box.generators import generate_output_for_testcase, generate_standalone
|
21
27
|
from rbx.box.schema import (
|
22
28
|
ExpectedOutcome,
|
23
29
|
GeneratorCall,
|
30
|
+
Limits,
|
24
31
|
Solution,
|
25
32
|
Testcase,
|
26
33
|
TestcaseGroup,
|
@@ -55,6 +62,7 @@ class GroupSkeleton(BaseModel):
|
|
55
62
|
class SolutionReportSkeleton(BaseModel):
|
56
63
|
solutions: List[Solution]
|
57
64
|
groups: List[GroupSkeleton]
|
65
|
+
limits: Dict[str, Limits]
|
58
66
|
|
59
67
|
def find_group_skeleton(self, group_name: str) -> Optional[GroupSkeleton]:
|
60
68
|
groups = [group for group in self.groups if group.name == group_name]
|
@@ -135,6 +143,20 @@ def compile_solutions(
|
|
135
143
|
return compiled_solutions
|
136
144
|
|
137
145
|
|
146
|
+
def get_limits_for_language(
|
147
|
+
lang: Optional[str],
|
148
|
+
verification: VerificationLevel,
|
149
|
+
timelimit_override: Optional[int],
|
150
|
+
) -> Limits:
|
151
|
+
pkg = package.find_problem_package_or_die()
|
152
|
+
time = timelimit_override or pkg.timelimit_for_language(lang)
|
153
|
+
isDoubleTL = verification.value >= VerificationLevel.FULL.value
|
154
|
+
memory = pkg.memorylimit_for_language(lang)
|
155
|
+
return Limits(
|
156
|
+
time=time, memory=memory, output=pkg.outputLimit, isDoubleTL=isDoubleTL
|
157
|
+
)
|
158
|
+
|
159
|
+
|
138
160
|
def _run_solution_on_testcase(
|
139
161
|
solution: Solution,
|
140
162
|
compiled_digest: str,
|
@@ -145,21 +167,22 @@ def _run_solution_on_testcase(
|
|
145
167
|
verification: VerificationLevel = VerificationLevel.NONE,
|
146
168
|
timelimit_override: Optional[int] = None,
|
147
169
|
) -> Evaluation:
|
148
|
-
pkg = package.find_problem_package_or_die()
|
149
170
|
actual_sandbox = package.get_singleton_sandbox()
|
150
171
|
|
151
|
-
|
172
|
+
limits = get_limits_for_language(
|
173
|
+
solution.language, verification, timelimit_override
|
174
|
+
)
|
152
175
|
|
153
176
|
sandbox = EnvironmentSandbox()
|
154
|
-
sandbox.timeLimit =
|
155
|
-
if
|
156
|
-
#
|
177
|
+
sandbox.timeLimit = limits.time
|
178
|
+
if limits.isDoubleTL and sandbox.timeLimit is not None:
|
179
|
+
# Double TL.
|
157
180
|
sandbox.timeLimit = sandbox.timeLimit * 2
|
158
|
-
sandbox.wallTimeLimit =
|
159
|
-
|
160
|
-
|
161
|
-
sandbox.memoryLimit =
|
162
|
-
sandbox.fileSizeLimit =
|
181
|
+
sandbox.wallTimeLimit = sandbox.timeLimit
|
182
|
+
if sandbox.timeLimit is not None and actual_sandbox.use_soft_timeout():
|
183
|
+
sandbox.wallTimeLimit = sandbox.timeLimit * 2
|
184
|
+
sandbox.memoryLimit = limits.memory
|
185
|
+
sandbox.fileSizeLimit = limits.output
|
163
186
|
extra_config = ExecutionConfig(sandbox=sandbox)
|
164
187
|
|
165
188
|
output_path = output_dir / testcase.inputPath.with_suffix('.out').name
|
@@ -261,6 +284,7 @@ async def convert_list_of_solution_evaluations_to_dict(
|
|
261
284
|
def _get_report_skeleton(
|
262
285
|
tracked_solutions: Optional[Set[str]] = None,
|
263
286
|
verification: VerificationLevel = VerificationLevel.NONE,
|
287
|
+
timelimit_override: Optional[int] = None,
|
264
288
|
) -> SolutionReportSkeleton:
|
265
289
|
pkg = package.find_problem_package_or_die()
|
266
290
|
solutions = [
|
@@ -275,6 +299,13 @@ def _get_report_skeleton(
|
|
275
299
|
if str(solution.path) in tracked_solutions
|
276
300
|
]
|
277
301
|
|
302
|
+
langs = set(find_language_name(solution) for solution in solutions)
|
303
|
+
limits = {
|
304
|
+
lang: get_limits_for_language(lang, verification, timelimit_override)
|
305
|
+
for lang in langs
|
306
|
+
if lang is not None
|
307
|
+
}
|
308
|
+
|
278
309
|
groups = []
|
279
310
|
for group in pkg.testcases:
|
280
311
|
testcases = find_built_testcases(group)
|
@@ -282,6 +313,7 @@ def _get_report_skeleton(
|
|
282
313
|
return SolutionReportSkeleton(
|
283
314
|
solutions=solutions,
|
284
315
|
groups=groups,
|
316
|
+
limits=limits,
|
285
317
|
)
|
286
318
|
|
287
319
|
|
@@ -361,7 +393,11 @@ def run_solutions(
|
|
361
393
|
sanitized: bool = False,
|
362
394
|
) -> RunSolutionResult:
|
363
395
|
return RunSolutionResult(
|
364
|
-
skeleton=_get_report_skeleton(
|
396
|
+
skeleton=_get_report_skeleton(
|
397
|
+
tracked_solutions,
|
398
|
+
verification=verification,
|
399
|
+
timelimit_override=timelimit_override,
|
400
|
+
),
|
365
401
|
items=_produce_solution_items(
|
366
402
|
progress=progress,
|
367
403
|
tracked_solutions=tracked_solutions,
|
@@ -378,6 +414,7 @@ def _run_interactive_solutions(
|
|
378
414
|
verification: VerificationLevel = VerificationLevel.NONE,
|
379
415
|
generator: Optional[GeneratorCall] = None,
|
380
416
|
check: bool = True,
|
417
|
+
print: bool = False,
|
381
418
|
sanitized: bool = False,
|
382
419
|
) -> Iterator[EvaluationItem]:
|
383
420
|
pkg = package.find_problem_package_or_die()
|
@@ -410,7 +447,7 @@ def _run_interactive_solutions(
|
|
410
447
|
(i, sol) for i, sol in solutions if str(sol.path) in tracked_solutions
|
411
448
|
]
|
412
449
|
|
413
|
-
irun_dir = package.
|
450
|
+
irun_dir = package.get_problem_iruns_dir()
|
414
451
|
shutil.rmtree(str(irun_dir), ignore_errors=True)
|
415
452
|
irun_dir.mkdir(parents=True, exist_ok=True)
|
416
453
|
inputs_dir = irun_dir / 'inputs'
|
@@ -423,6 +460,13 @@ def _run_interactive_solutions(
|
|
423
460
|
console.console.print(
|
424
461
|
f'Using input from generator call [item]{expanded_call.name} {expanded_call.args}[/item].'
|
425
462
|
)
|
463
|
+
if print:
|
464
|
+
console.console.print(input_path.read_text())
|
465
|
+
else:
|
466
|
+
console.console.print(
|
467
|
+
f'Input was written to [item]{input_path.resolve()}[/item]'
|
468
|
+
)
|
469
|
+
console.console.print()
|
426
470
|
else:
|
427
471
|
input = console.multiline_prompt('Testcase input')
|
428
472
|
input_path.write_text(input)
|
@@ -468,11 +512,12 @@ async def run_and_print_interactive_solutions(
|
|
468
512
|
check=check,
|
469
513
|
generator=generator,
|
470
514
|
sanitized=sanitized,
|
515
|
+
print=print,
|
471
516
|
)
|
472
517
|
|
473
518
|
for item in items:
|
474
519
|
sol = pkg.solutions[item.solution_index]
|
475
|
-
_print_solution_header(sol, console.console)
|
520
|
+
_print_solution_header(sol, console.console, is_irun=True)
|
476
521
|
|
477
522
|
eval = await item.eval()
|
478
523
|
|
@@ -487,7 +532,11 @@ async def run_and_print_interactive_solutions(
|
|
487
532
|
else:
|
488
533
|
console.console.print('[warning]Solution produced no output.[/warning]')
|
489
534
|
elif stdout_path is not None:
|
490
|
-
console.console.print(f'Output: {stdout_path}
|
535
|
+
console.console.print(f'[status]Output:[/status] {stdout_path}')
|
536
|
+
if eval.log.stderr_absolute_path is not None:
|
537
|
+
console.console.print(
|
538
|
+
f'[status]Stderr:[/status] {eval.log.stderr_absolute_path}'
|
539
|
+
)
|
491
540
|
console.console.print()
|
492
541
|
|
493
542
|
|
@@ -534,18 +583,53 @@ def _get_evals_memory_in_bytes(evals: List[Evaluation]) -> int:
|
|
534
583
|
return max(int(eval.log.memory or 0) for eval in evals)
|
535
584
|
|
536
585
|
|
586
|
+
def get_formatted_time(time_in_ms: int) -> str:
|
587
|
+
return f'{time_in_ms} ms'
|
588
|
+
|
589
|
+
|
537
590
|
def get_evals_formatted_time(evals: List[Evaluation]) -> str:
|
538
591
|
max_time = _get_evals_time_in_ms(evals)
|
592
|
+
return get_formatted_time(max_time)
|
593
|
+
|
594
|
+
|
595
|
+
def get_capped_evals_formatted_time(
|
596
|
+
solution: Solution, evals: List[Evaluation], verification: VerificationLevel
|
597
|
+
) -> str:
|
598
|
+
pkg = package.find_problem_package_or_die()
|
599
|
+
|
600
|
+
max_time = _get_evals_time_in_ms(evals)
|
601
|
+
has_tle = any(eval.result.outcome == Outcome.TIME_LIMIT_EXCEEDED for eval in evals)
|
602
|
+
timelimits = [
|
603
|
+
eval.log.metadata.timeLimit
|
604
|
+
for eval in evals
|
605
|
+
if eval.log.metadata is not None and eval.log.metadata.timeLimit is not None
|
606
|
+
]
|
607
|
+
tl = None
|
608
|
+
if timelimits:
|
609
|
+
tl = min(timelimits)
|
610
|
+
if tl is None:
|
611
|
+
tl = pkg.timelimit_for_language(solution.language)
|
612
|
+
|
613
|
+
if verification.value >= VerificationLevel.FULL.value:
|
614
|
+
# Using double TL for verification.
|
615
|
+
tl = tl * 2
|
616
|
+
|
617
|
+
if has_tle and max_time >= tl:
|
618
|
+
return f'>{tl} ms'
|
539
619
|
return f'{max_time} ms'
|
540
620
|
|
541
621
|
|
622
|
+
def get_formatted_memory(memory_in_bytes: int) -> str:
|
623
|
+
if memory_in_bytes < 1024 * 1024:
|
624
|
+
if memory_in_bytes < 1024:
|
625
|
+
return f'{memory_in_bytes} B'
|
626
|
+
return f'{memory_in_bytes // 1024} KiB'
|
627
|
+
return f'{memory_in_bytes // (1024 * 1024)} MiB'
|
628
|
+
|
629
|
+
|
542
630
|
def get_evals_formatted_memory(evals: List[Evaluation]) -> str:
|
543
631
|
max_memory = _get_evals_memory_in_bytes(evals)
|
544
|
-
|
545
|
-
if max_memory < 1024:
|
546
|
-
return f'{max_memory} B'
|
547
|
-
return f'{max_memory // 1024} KiB'
|
548
|
-
return f'{max_memory // (1024 * 1024)} MiB'
|
632
|
+
return get_formatted_memory(max_memory)
|
549
633
|
|
550
634
|
|
551
635
|
def _print_solution_outcome(
|
@@ -631,7 +715,9 @@ def _print_solution_outcome(
|
|
631
715
|
'[warning]WARNING[/warning] The solution had sanitizer errors or warnings, marked with [warning]*[/warning]. See their stderr for more details.'
|
632
716
|
)
|
633
717
|
|
634
|
-
console.print(
|
718
|
+
console.print(
|
719
|
+
f'Time: {get_capped_evals_formatted_time(solution, evals, verification)}'
|
720
|
+
)
|
635
721
|
console.print(f'Memory: {get_evals_formatted_memory(evals)}')
|
636
722
|
return len(unmatched_bad_verdicts) == 0
|
637
723
|
|
@@ -654,12 +740,18 @@ def _consume_and_key_evaluation_items(
|
|
654
740
|
return res
|
655
741
|
|
656
742
|
|
657
|
-
def _print_solution_header(
|
743
|
+
def _print_solution_header(
|
744
|
+
solution: Solution, console: rich.console.Console, is_irun: bool = False
|
745
|
+
):
|
658
746
|
solutions = package.get_solutions()
|
659
747
|
solution_index = [
|
660
748
|
i for i, sol in enumerate(solutions) if sol.path == solution.path
|
661
749
|
][0]
|
662
|
-
solution_testdir =
|
750
|
+
solution_testdir = (
|
751
|
+
package.get_problem_iruns_dir() / f'{solution_index}'
|
752
|
+
if is_irun
|
753
|
+
else package.get_problem_runs_dir() / f'{solution_index}'
|
754
|
+
)
|
663
755
|
console.print(f'[item]{solution.path}[/item]', end=' ')
|
664
756
|
console.print(f'({solution_testdir})')
|
665
757
|
|
@@ -693,11 +785,51 @@ async def _print_timing(
|
|
693
785
|
console.print(f'Fastest [error]slow[/error] solution: {fastest_slow} ms')
|
694
786
|
|
695
787
|
|
788
|
+
def _length_markup(markup: str) -> int:
|
789
|
+
text = rich.markup.render(markup)
|
790
|
+
return text.cell_len
|
791
|
+
|
792
|
+
|
793
|
+
def _length_pointwise(ls: Iterable[str]) -> Tuple[int, ...]:
|
794
|
+
return tuple(_length_markup(x) for x in ls)
|
795
|
+
|
796
|
+
|
797
|
+
def _max_pointwise(ls: Iterable[Tuple[int, ...]]) -> Tuple[int, ...]:
|
798
|
+
return tuple(max(x) for x in zip(*ls))
|
799
|
+
|
800
|
+
|
801
|
+
def _get_indented_text(s: str, width: int):
|
802
|
+
text = rich.markup.render(s)
|
803
|
+
text.align('right', width=width)
|
804
|
+
return text
|
805
|
+
|
806
|
+
|
807
|
+
def _render_padded_rows(
|
808
|
+
rows: List[List[Tuple[str, ...]]],
|
809
|
+
) -> List[List[rich.text.Text]]:
|
810
|
+
max_widths_per_column = [
|
811
|
+
_max_pointwise(_length_pointwise(cell) for cell in col) for col in zip(*rows)
|
812
|
+
]
|
813
|
+
res = []
|
814
|
+
for row in rows:
|
815
|
+
acc_row = []
|
816
|
+
for i, cell in enumerate(row):
|
817
|
+
acc_row.append(
|
818
|
+
rich.text.Text(' ').join(
|
819
|
+
_get_indented_text(item, width)
|
820
|
+
for item, width in zip(cell, max_widths_per_column[i])
|
821
|
+
)
|
822
|
+
)
|
823
|
+
res.append(acc_row)
|
824
|
+
return res
|
825
|
+
|
826
|
+
|
696
827
|
async def _render_detailed_group_table(
|
697
828
|
group: TestcaseGroup,
|
698
829
|
skeleton: SolutionReportSkeleton,
|
699
830
|
structured_evaluations: StructuredEvaluation,
|
700
831
|
console: rich.console.Console,
|
832
|
+
verification: VerificationLevel = VerificationLevel.NONE,
|
701
833
|
):
|
702
834
|
group_skeleton = skeleton.find_group_skeleton(group.name)
|
703
835
|
assert group_skeleton is not None
|
@@ -709,28 +841,34 @@ async def _render_detailed_group_table(
|
|
709
841
|
for solution in skeleton.solutions:
|
710
842
|
table.add_column(f'[item]{solution.path}[/item]', justify='full')
|
711
843
|
|
844
|
+
padded_rows = []
|
845
|
+
|
712
846
|
evals_per_solution = collections.defaultdict(list)
|
713
847
|
for tc, _ in enumerate(group_skeleton.testcases):
|
714
848
|
row = []
|
715
849
|
for solution in skeleton.solutions:
|
716
850
|
eval = structured_evaluation[str(solution.path)][group_name][tc]
|
717
851
|
if eval is None:
|
718
|
-
row.append('...')
|
852
|
+
row.append((f'[info]#{tc}[/info]', '', '...', '', '', ''))
|
719
853
|
continue
|
720
854
|
eval = eval.peek()
|
721
855
|
if eval is None:
|
722
|
-
row.append('...')
|
856
|
+
row.append((f'[info]#{tc}[/info]', '', '...', '', '', ''))
|
723
857
|
continue
|
724
858
|
|
859
|
+
evals_per_solution[str(solution.path)].append(eval)
|
860
|
+
|
725
861
|
verdict = get_testcase_markup_verdict(eval)
|
726
|
-
time =
|
862
|
+
time = get_capped_evals_formatted_time(solution, [eval], verification)
|
863
|
+
memory = get_evals_formatted_memory([eval])
|
864
|
+
full_item = (f'[info]#{tc}[/info]', verdict, time, '/', memory, '')
|
727
865
|
if eval.result.sanitizer_warnings:
|
728
|
-
|
729
|
-
|
730
|
-
row.append(
|
731
|
-
|
866
|
+
full_item = (*full_item[:-1], '[warning]*[/warning]')
|
867
|
+
|
868
|
+
row.append(full_item)
|
869
|
+
padded_rows.append(row)
|
732
870
|
|
733
|
-
if
|
871
|
+
if padded_rows:
|
734
872
|
summary_row = []
|
735
873
|
for solution in skeleton.solutions:
|
736
874
|
evals = evals_per_solution[str(solution.path)]
|
@@ -738,9 +876,18 @@ async def _render_detailed_group_table(
|
|
738
876
|
if not non_null_evals:
|
739
877
|
summary_row.append('...')
|
740
878
|
continue
|
741
|
-
|
742
|
-
|
743
|
-
|
879
|
+
formatted_time = get_capped_evals_formatted_time(
|
880
|
+
solution, non_null_evals, verification
|
881
|
+
)
|
882
|
+
formatted_memory = get_evals_formatted_memory(non_null_evals)
|
883
|
+
summary_row.append(('', '', formatted_time, '/', formatted_memory, ''))
|
884
|
+
padded_rows.append(summary_row)
|
885
|
+
|
886
|
+
for row in _render_padded_rows(padded_rows):
|
887
|
+
table.add_row(*row)
|
888
|
+
|
889
|
+
if padded_rows:
|
890
|
+
table.rows[-2].end_section = True
|
744
891
|
return table
|
745
892
|
|
746
893
|
with rich.live.Live(
|
@@ -763,6 +910,7 @@ async def _print_detailed_run_report(
|
|
763
910
|
console: rich.console.Console,
|
764
911
|
structured_evaluations: StructuredEvaluation,
|
765
912
|
timing: bool = True,
|
913
|
+
verification: VerificationLevel = VerificationLevel.NONE,
|
766
914
|
):
|
767
915
|
for group in result.skeleton.groups:
|
768
916
|
console.print(f'[bold][status]{group.name}[/status][/bold]')
|
@@ -772,6 +920,7 @@ async def _print_detailed_run_report(
|
|
772
920
|
result.skeleton,
|
773
921
|
structured_evaluations,
|
774
922
|
console,
|
923
|
+
verification=verification,
|
775
924
|
)
|
776
925
|
continue
|
777
926
|
|
@@ -788,6 +937,7 @@ async def _print_detailed_run_report(
|
|
788
937
|
solution,
|
789
938
|
all_evals,
|
790
939
|
console,
|
940
|
+
verification=verification,
|
791
941
|
)
|
792
942
|
ok = ok and cur_ok
|
793
943
|
console.print()
|
@@ -799,19 +949,41 @@ async def _print_detailed_run_report(
|
|
799
949
|
return ok
|
800
950
|
|
801
951
|
|
952
|
+
def _print_limits(limits: Dict[str, Limits]):
|
953
|
+
console.console.print(
|
954
|
+
'[bold][success]Running with the following limits (per language):[/success][/bold]'
|
955
|
+
)
|
956
|
+
for lang, limit in limits.items():
|
957
|
+
console.console.print(f'[bold][status]{lang}[/status][/bold]')
|
958
|
+
console.console.print(f'Time: {get_formatted_time(limit.time or int('+inf'))}')
|
959
|
+
memory = limit.memory * 1024 * 1024 if limit.memory is not None else int('+inf')
|
960
|
+
console.console.print(f'Memory: {get_formatted_memory(memory)}')
|
961
|
+
if limit.isDoubleTL:
|
962
|
+
console.console.print('[warning]Running with 2*TL[/warning]')
|
963
|
+
console.console.print()
|
964
|
+
|
965
|
+
|
802
966
|
async def print_run_report(
|
803
967
|
result: RunSolutionResult,
|
804
968
|
console: rich.console.Console,
|
805
|
-
verification:
|
969
|
+
verification: VerificationLevel,
|
806
970
|
detailed: bool = False,
|
807
971
|
timing: bool = True,
|
972
|
+
skip_printing_limits: bool = False,
|
808
973
|
) -> bool:
|
974
|
+
if not skip_printing_limits:
|
975
|
+
_print_limits(result.skeleton.limits)
|
976
|
+
|
809
977
|
structured_evaluations = _consume_and_key_evaluation_items(
|
810
978
|
result.items, result.skeleton
|
811
979
|
)
|
812
980
|
if detailed:
|
813
981
|
return await _print_detailed_run_report(
|
814
|
-
result,
|
982
|
+
result,
|
983
|
+
console,
|
984
|
+
structured_evaluations,
|
985
|
+
verification=verification,
|
986
|
+
timing=timing,
|
815
987
|
)
|
816
988
|
|
817
989
|
ok = True
|
@@ -836,7 +1008,7 @@ async def print_run_report(
|
|
836
1008
|
solution_evals.append(eval)
|
837
1009
|
|
838
1010
|
console.print(
|
839
|
-
f'({
|
1011
|
+
f'({get_capped_evals_formatted_time(solution, group_evals, verification)}, {get_evals_formatted_memory(group_evals)})',
|
840
1012
|
end='',
|
841
1013
|
)
|
842
1014
|
console.print()
|
@@ -845,7 +1017,7 @@ async def print_run_report(
|
|
845
1017
|
solution,
|
846
1018
|
solution_evals,
|
847
1019
|
console,
|
848
|
-
verification=
|
1020
|
+
verification=verification,
|
849
1021
|
)
|
850
1022
|
console.print()
|
851
1023
|
|