rbx.cp 0.5.73__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. rbx/annotations.py +21 -1
  2. rbx/box/cd.py +11 -1
  3. rbx/box/checkers.py +9 -1
  4. rbx/box/cli.py +59 -46
  5. rbx/box/code.py +142 -3
  6. rbx/box/contest/build_contest_statements.py +44 -34
  7. rbx/box/contest/contest_package.py +4 -7
  8. rbx/box/contest/main.py +7 -58
  9. rbx/box/contest/schema.py +52 -8
  10. rbx/box/contest/statements.py +53 -25
  11. rbx/box/creation.py +3 -36
  12. rbx/box/environment.py +21 -9
  13. rbx/box/fields.py +35 -0
  14. rbx/box/lang.py +27 -0
  15. rbx/box/linting.py +26 -0
  16. rbx/box/package.py +4 -35
  17. rbx/box/packaging/boca/packager.py +48 -5
  18. rbx/box/packaging/contest_main.py +13 -0
  19. rbx/box/packaging/main.py +13 -2
  20. rbx/box/packaging/packager.py +4 -4
  21. rbx/box/packaging/pkg/packager.py +142 -0
  22. rbx/box/packaging/polygon/packager.py +2 -24
  23. rbx/box/packaging/polygon/upload.py +35 -17
  24. rbx/box/presets/__init__.py +362 -281
  25. rbx/box/presets/lock_schema.py +1 -2
  26. rbx/box/presets/schema.py +13 -5
  27. rbx/box/remote.py +2 -2
  28. rbx/box/retries.py +8 -0
  29. rbx/box/schema.py +82 -19
  30. rbx/box/solutions.py +77 -15
  31. rbx/box/statements/build_statements.py +44 -27
  32. rbx/box/statements/builders.py +18 -10
  33. rbx/box/statements/expander.py +49 -0
  34. rbx/box/statements/latex_jinja.py +61 -4
  35. rbx/box/statements/schema.py +33 -9
  36. rbx/box/stats.py +92 -0
  37. rbx/box/tasks.py +6 -3
  38. rbx/box/testcase_utils.py +19 -47
  39. rbx/box/tooling/__init__.py +0 -0
  40. rbx/box/tooling/boca/__init__.py +0 -0
  41. rbx/box/tooling/boca/main.py +13 -0
  42. rbx/box/tooling/boca/scrape.py +34 -0
  43. rbx/box/{packaging/boca/upload.py → tooling/boca/scraper.py} +77 -8
  44. rbx/box/tooling/main.py +8 -0
  45. rbx/box/ui/utils/run_ui.py +1 -1
  46. rbx/box/ui/widgets/interaction_box.py +19 -1
  47. rbx/grading/caching.py +18 -2
  48. rbx/grading/judge/sandbox.py +60 -5
  49. rbx/grading/judge/sandboxes/isolate.py +1 -0
  50. rbx/grading/judge/sandboxes/stupid_sandbox.py +11 -5
  51. rbx/grading/judge/sandboxes/timeit.py +36 -15
  52. rbx/grading/processing_context.py +62 -78
  53. rbx/grading/steps.py +92 -40
  54. rbx/resources/packagers/boca/checker.sh +4 -1
  55. rbx/resources/packagers/boca/compile/c +2 -6
  56. rbx/resources/packagers/boca/compile/cc +2 -6
  57. rbx/resources/packagers/boca/compile/cpp +2 -6
  58. rbx/resources/packagers/boca/compile/java +1 -6
  59. rbx/resources/packagers/boca/compile/kt +24 -28
  60. rbx/resources/packagers/boca/compile/py2 +2 -6
  61. rbx/resources/packagers/boca/compile/py3 +2 -6
  62. rbx/resources/packagers/boca/interactive/c +15 -83
  63. rbx/resources/packagers/boca/interactive/cc +15 -83
  64. rbx/resources/packagers/boca/interactive/cpp +15 -83
  65. rbx/resources/packagers/boca/interactive/java +15 -88
  66. rbx/resources/packagers/boca/interactive/kt +15 -88
  67. rbx/resources/packagers/boca/interactive/py2 +15 -88
  68. rbx/resources/packagers/boca/interactive/py3 +15 -88
  69. rbx/resources/packagers/boca/interactor_compile.sh +5 -2
  70. rbx/resources/packagers/boca/interactor_run.sh +174 -0
  71. rbx/resources/packagers/boca/safeexec.c +530 -0
  72. rbx/resources/packagers/boca/safeexec_compile.sh +49 -0
  73. rbx/resources/presets/default/contest/contest.rbx.yml +9 -8
  74. rbx/resources/presets/default/problem/problem.rbx.yml +38 -26
  75. rbx/resources/presets/default/problem/random.txt +3 -1
  76. rbx/resources/presets/default/problem/rbx.h +92 -0
  77. rbx/resources/presets/default/problem/statement/statement.rbx.tex +4 -7
  78. rbx/resources/presets/default/problem/validator.cpp +8 -8
  79. rbx/resources/templates/rbx.h +2 -3
  80. {rbx_cp-0.5.73.dist-info → rbx_cp-0.6.1.dist-info}/METADATA +23 -6
  81. {rbx_cp-0.5.73.dist-info → rbx_cp-0.6.1.dist-info}/RECORD +84 -71
  82. {rbx_cp-0.5.73.dist-info → rbx_cp-0.6.1.dist-info}/WHEEL +1 -1
  83. rbx/resources/packagers/boca/compile/pas +0 -172
  84. rbx/resources/presets/default/problem/statement/projecao.png +0 -0
  85. {rbx_cp-0.5.73.dist-info → rbx_cp-0.6.1.dist-info}/LICENSE +0 -0
  86. {rbx_cp-0.5.73.dist-info → rbx_cp-0.6.1.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import List
2
2
 
3
3
  from pydantic import BaseModel
4
4
 
@@ -11,7 +11,6 @@ class LockedAsset(TrackedAsset):
11
11
 
12
12
  class PresetLock(BaseModel):
13
13
  name: str
14
- uri: Optional[str] = None
15
14
 
16
15
  @property
17
16
  def preset_name(self) -> str:
rbx/box/presets/schema.py CHANGED
@@ -1,8 +1,10 @@
1
1
  import pathlib
2
2
  from typing import List, Optional
3
3
 
4
+ import typer
4
5
  from pydantic import BaseModel, Field
5
6
 
7
+ from rbx import console
6
8
  from rbx.box.presets.fetch import PresetFetchInfo, get_preset_fetch_info
7
9
 
8
10
 
@@ -32,8 +34,9 @@ class Preset(BaseModel):
32
34
  # Name of the preset, or a GitHub repository containing it.
33
35
  name: str = NameField()
34
36
 
35
- # URI of the preset to be fetched.
36
- uri: Optional[str] = None
37
+ # URI of the preset to be fetched. Uniquely identifies the preset.
38
+ # Should usually be a GitHub repository.
39
+ uri: str
37
40
 
38
41
  # Path to the environment file that will be installed with this preset.
39
42
  # When copied to the box environment, the environment will be named `name`.
@@ -52,8 +55,13 @@ class Preset(BaseModel):
52
55
 
53
56
  @property
54
57
  def fetch_info(self) -> PresetFetchInfo:
55
- if self.uri is None:
56
- return PresetFetchInfo(name=self.name)
57
58
  res = get_preset_fetch_info(self.uri)
58
- assert res is not None
59
+ if res is None:
60
+ console.console.print(
61
+ f'[error]Preset URI [item]{self.uri}[/item] is not valid.[/error]'
62
+ )
63
+ console.console.print(
64
+ '[error]Please check that the URI is correct and that the directory/asset really exists.[/error]'
65
+ )
66
+ raise typer.Exit(1)
59
67
  return res
rbx/box/remote.py CHANGED
@@ -52,14 +52,14 @@ class BocaExpander(Expander):
52
52
  return [str(self.get_boca_path(run_number, site_number)) + '.*']
53
53
 
54
54
  def expand(self, path: pathlib.Path) -> Optional[pathlib.Path]:
55
- from rbx.box.packaging.boca import upload as boca_upload
55
+ from rbx.box.tooling.boca import scraper as boca_upload
56
56
 
57
57
  match = self.get_match(str(path))
58
58
  if match is None:
59
59
  return None
60
60
  run_number, site_number = match
61
61
 
62
- boca_uploader = boca_upload.get_boca_uploader()
62
+ boca_uploader = boca_upload.get_boca_scraper()
63
63
  boca_uploader.login()
64
64
  sol_path = boca_uploader.download_run(
65
65
  run_number, site_number, self.get_boca_folder()
rbx/box/retries.py CHANGED
@@ -90,6 +90,14 @@ def _move_logs_to_temp_dir(
90
90
  return recover
91
91
 
92
92
 
93
+ def get_retrier_config(nruns: int = 0) -> RepeatsConfig:
94
+ if nruns == 0:
95
+ return get_setter_config().repeats
96
+ repeats = get_setter_config().repeats.model_copy(deep=True)
97
+ repeats.reps = nruns
98
+ return repeats
99
+
100
+
93
101
  class Retrier:
94
102
  def __init__(self, config: Optional[RepeatsConfig] = None, is_stress: bool = False):
95
103
  self.config = config or get_setter_config().repeats
rbx/box/schema.py CHANGED
@@ -2,30 +2,21 @@ from __future__ import annotations
2
2
 
3
3
  import os
4
4
  import pathlib
5
- from typing import Dict, List, Optional, Union
5
+ import re
6
+ from typing import Annotated, Any, Dict, List, Optional, Union
6
7
 
7
- from pydantic import BaseModel, ConfigDict, Field, model_validator
8
+ from pydantic import AfterValidator, BaseModel, ConfigDict, Field, model_validator
8
9
  from pydantic_core import PydanticCustomError
9
10
 
10
11
  from rbx.autoenum import AutoEnum, alias
12
+ from rbx.box.fields import FNameField, NameField
13
+ from rbx.box.statements.expander import expand_statements
11
14
  from rbx.box.statements.schema import Statement
12
15
  from rbx.grading.steps import Outcome
13
16
 
14
17
  Primitive = Union[str, int, float, bool]
15
18
 
16
19
 
17
- def NameField(**kwargs):
18
- return Field(
19
- pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]*$', min_length=3, max_length=32, **kwargs
20
- )
21
-
22
-
23
- def FNameField(**kwargs):
24
- return Field(
25
- pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]*$', min_length=3, max_length=128, **kwargs
26
- )
27
-
28
-
29
20
  def _check_oneof(model_obj: BaseModel, fields: List[str]):
30
21
  has = []
31
22
  for field in fields:
@@ -56,6 +47,44 @@ def expand_var(value: Primitive) -> Primitive:
56
47
  )
57
48
 
58
49
 
50
+ def expand_vars(vars: Dict[str, Primitive]) -> Dict[str, Primitive]:
51
+ return {key: expand_var(value) for key, value in vars.items()}
52
+
53
+
54
+ def _represents_int(s: str) -> bool:
55
+ return re.match(r'[-+]?\d+$', s.strip()) is not None
56
+
57
+
58
+ def _represents_float(s: str) -> bool:
59
+ return re.match(r'[-+]?\d+\.\d+$', s.strip()) is not None
60
+
61
+
62
+ def _represents_bool(s: str) -> bool:
63
+ return s.lower().strip() in ['true', 'false', 'True', 'False']
64
+
65
+
66
+ def convert_to_primitive(value: Any) -> Primitive:
67
+ if _represents_int(value):
68
+ return int(value)
69
+ if _represents_float(value):
70
+ return float(value)
71
+ if _represents_bool(value):
72
+ return value.lower().strip() == 'true'
73
+ return str(value)
74
+
75
+
76
+ def expand_any_vars(vars: Dict[str, Any]) -> Dict[str, Primitive]:
77
+ converted_vars = {key: convert_to_primitive(value) for key, value in vars.items()}
78
+ return expand_vars(converted_vars)
79
+
80
+
81
+ def is_unique_by_name(statements: List['Statement']) -> List['Statement']:
82
+ names = {st.name for st in statements}
83
+ if len(names) != len(statements):
84
+ raise ValueError('Statement names must be unique.')
85
+ return statements
86
+
87
+
59
88
  class ExpectedOutcome(AutoEnum):
60
89
  ANY = alias('any') # type: ignore
61
90
  """Expected outcome for any outcome."""
@@ -204,6 +233,17 @@ Testlib and jngen are already included by default.
204
233
  )
205
234
 
206
235
 
236
+ class Interactor(CodeItem):
237
+ model_config = ConfigDict(extra='forbid')
238
+
239
+ legacy: bool = Field(
240
+ default=False,
241
+ description="""
242
+ Whether this interactor is a legacy interactor and needs a checker to be specified.
243
+ """,
244
+ )
245
+
246
+
207
247
  class Testcase(BaseModel):
208
248
  model_config = ConfigDict(extra='forbid')
209
249
 
@@ -429,7 +469,7 @@ class Package(BaseModel):
429
469
  default=None, description='The checker for this problem.'
430
470
  )
431
471
 
432
- interactor: Optional[CodeItem] = Field(
472
+ interactor: Optional[Interactor] = Field(
433
473
  default=None, description='The interactor for this problem.'
434
474
  )
435
475
 
@@ -459,9 +499,10 @@ that is correct and used as reference -- and should have the `accepted` outcome.
459
499
  default=[], description='Stress tests for the problem.'
460
500
  )
461
501
 
462
- statements: List[Statement] = Field(
463
- default=[], description='Statements for the problem.'
464
- )
502
+ statements: Annotated[
503
+ List[Statement],
504
+ AfterValidator(is_unique_by_name),
505
+ ] = Field(default=[], description='Statements for the problem.')
465
506
 
466
507
  # Vars to be re-used across the package.
467
508
  # - It will be passed as --key=value arguments to the validator.
@@ -475,9 +516,13 @@ that is correct and used as reference -- and should have the `accepted` outcome.
475
516
  description='Unit tests for components of this problem.',
476
517
  )
477
518
 
519
+ @property
520
+ def expanded_statements(self) -> List[Statement]:
521
+ return expand_statements(self.statements)
522
+
478
523
  @property
479
524
  def expanded_vars(self) -> Dict[str, Primitive]:
480
- return {key: expand_var(value) for key, value in self.vars.items()}
525
+ return expand_vars(self.vars)
481
526
 
482
527
  def timelimit_for_language(self, language: Optional[str]) -> int:
483
528
  res = self.timeLimit
@@ -525,3 +570,21 @@ that is correct and used as reference -- and should have the `accepted` outcome.
525
570
  {'i': i + 1},
526
571
  )
527
572
  return self
573
+
574
+ @model_validator(mode='after')
575
+ def check_checker_and_interactor_for_task_type(self):
576
+ if self.type == TaskType.BATCH:
577
+ if self.interactor is not None:
578
+ raise PydanticCustomError(
579
+ 'INTERACTOR_NOT_ALLOWED',
580
+ 'Interactor is not allowed for batch problems. Change the task type to COMMUNICATION.',
581
+ )
582
+ if self.type == TaskType.COMMUNICATION:
583
+ if self.checker is not None and (
584
+ self.interactor is None or not self.interactor.legacy
585
+ ):
586
+ raise PydanticCustomError(
587
+ 'CHECKER_NOT_ALLOWED',
588
+ 'Checkers should not be specified for communication problems.',
589
+ )
590
+ return self
rbx/box/solutions.py CHANGED
@@ -48,6 +48,7 @@ from rbx.box.tasks import (
48
48
  from rbx.box.testcase_extractors import extract_generation_testcases
49
49
  from rbx.box.testcase_utils import (
50
50
  TestcaseEntry,
51
+ TestcaseInteractionParsingError,
51
52
  find_built_testcases,
52
53
  parse_interaction,
53
54
  print_interaction,
@@ -191,6 +192,7 @@ def _run_solution(
191
192
  progress: Optional[StatusProgress] = None,
192
193
  verification: VerificationLevel = VerificationLevel.NONE,
193
194
  timelimit_override: Optional[int] = None,
195
+ nruns: int = 0,
194
196
  ) -> List[Deferred[Evaluation]]:
195
197
  group = package.get_testgroup(group_name)
196
198
  testcases = find_built_testcases(group)
@@ -216,6 +218,7 @@ def _run_solution(
216
218
  testcase_index=i,
217
219
  verification=verification,
218
220
  timelimit_override=timelimit_override,
221
+ nruns=nruns,
219
222
  )
220
223
 
221
224
  res.append(Deferred(run_fn))
@@ -314,6 +317,7 @@ def _produce_solution_items(
314
317
  check: bool = True,
315
318
  timelimit_override: Optional[int] = None,
316
319
  sanitized: bool = False,
320
+ nruns: int = 0,
317
321
  ) -> List[EvaluationItem]:
318
322
  pkg = package.find_problem_package_or_die()
319
323
 
@@ -347,6 +351,7 @@ def _produce_solution_items(
347
351
  progress=progress,
348
352
  verification=verification,
349
353
  timelimit_override=timelimit_override,
354
+ nruns=nruns,
350
355
  )
351
356
  ):
352
357
  res.append(
@@ -374,7 +379,11 @@ def print_best_output(output_files: List[pathlib.Path], empty_warning: bool = Fa
374
379
  if not output_file.is_file():
375
380
  continue
376
381
  if output_file.suffix == '.pio':
377
- print_interaction(parse_interaction(output_file))
382
+ try:
383
+ print_interaction(parse_interaction(output_file))
384
+ except TestcaseInteractionParsingError:
385
+ # Ignore parsing errors and proceed to next file.
386
+ continue
378
387
  else:
379
388
  console.console.print(output_file.read_text())
380
389
  return
@@ -389,6 +398,7 @@ def run_solutions(
389
398
  check: bool = True,
390
399
  timelimit_override: Optional[int] = None,
391
400
  sanitized: bool = False,
401
+ nruns: int = 0,
392
402
  ) -> RunSolutionResult:
393
403
  skeleton = _get_report_skeleton(
394
404
  tracked_solutions,
@@ -404,6 +414,7 @@ def run_solutions(
404
414
  check=check,
405
415
  timelimit_override=timelimit_override,
406
416
  sanitized=sanitized,
417
+ nruns=nruns,
407
418
  ),
408
419
  )
409
420
  return result
@@ -679,7 +690,7 @@ async def run_and_print_interactive_solutions(
679
690
  console.console.print(get_testcase_markup_verdict(eval), end=' ')
680
691
  _print_solution_header(sol, console.console)
681
692
  _print_solution_outcome(
682
- sol, [eval], console.console, verification, subset=True
693
+ sol, skeleton, [eval], console.console, verification, subset=True
683
694
  )
684
695
 
685
696
  stdout_path = eval.log.stdout_absolute_path
@@ -907,6 +918,7 @@ class SolutionOutcomeReport(BaseModel):
907
918
  solution: Solution
908
919
  evals: List[Evaluation]
909
920
  ok: bool
921
+ message: Optional[Tuple[TestcaseEntry, str]]
910
922
  expectedOutcome: Optional[ExpectedOutcome]
911
923
  gotVerdicts: Set[Outcome]
912
924
  runUnderDoubleTl: bool
@@ -944,15 +956,22 @@ class SolutionOutcomeReport(BaseModel):
944
956
  res += '\n[bold yellow]WARNING[/bold yellow] The solution had sanitizer errors or warnings, marked with [bold yellow]*[/bold yellow]. See their stderr for more details.'
945
957
  return res
946
958
 
947
- def get_outcome_markup(self) -> str:
959
+ def get_outcome_markup(self, print_message: bool = True) -> str:
948
960
  res = self.get_verdict_markup_with_warnings()
949
961
  res += f'\nTime: {get_capped_evals_formatted_time(self.solution, self.evals, self.verification)}'
950
962
  res += f'\nMemory: {get_evals_formatted_memory(self.evals)}'
963
+ if print_message and self.message is not None:
964
+ tc, msg = self.message
965
+ if msg:
966
+ if len(msg) > 100:
967
+ msg = msg[:100] + '... (truncated)'
968
+ res += f'\nMessage for {tc}: {msg}'
951
969
  return res
952
970
 
953
971
 
954
972
  def get_solution_outcome_report(
955
973
  solution: Solution,
974
+ skeleton: SolutionReportSkeleton,
956
975
  evals: List[Evaluation],
957
976
  verification: VerificationLevel = VerificationLevel.NONE,
958
977
  subset: bool = False,
@@ -964,7 +983,8 @@ def get_solution_outcome_report(
964
983
  bad_verdicts = set()
965
984
  no_tle_bad_verdicts = set()
966
985
  has_sanitizer_warnings = False
967
- for eval in evals:
986
+ message: Optional[Tuple[TestcaseEntry, str]] = None
987
+ for eval, entry in zip(evals, skeleton.entries):
968
988
  all_verdicts.add(eval.result.outcome)
969
989
  if eval.result.outcome != Outcome.ACCEPTED:
970
990
  bad_verdicts.add(eval.result.outcome)
@@ -979,6 +999,16 @@ def get_solution_outcome_report(
979
999
  has_sanitizer_warnings = (
980
1000
  has_sanitizer_warnings or eval.result.sanitizer_warnings
981
1001
  )
1002
+ if (
1003
+ eval.result.outcome
1004
+ in [
1005
+ Outcome.WRONG_ANSWER,
1006
+ Outcome.JUDGE_FAILED,
1007
+ ]
1008
+ and message is None
1009
+ ):
1010
+ message = (entry, eval.result.message)
1011
+
982
1012
  unmatched_bad_verdicts = set(
983
1013
  v for v in bad_verdicts if not solution.outcome.match(v)
984
1014
  )
@@ -1037,6 +1067,7 @@ def get_solution_outcome_report(
1037
1067
  solution=solution,
1038
1068
  evals=evals,
1039
1069
  ok=not has_failed,
1070
+ message=message,
1040
1071
  expectedOutcome=report_expected_outcome,
1041
1072
  gotVerdicts=report_got_verdicts,
1042
1073
  runUnderDoubleTl=report_run_under_double_tl,
@@ -1048,13 +1079,17 @@ def get_solution_outcome_report(
1048
1079
 
1049
1080
  def _print_solution_outcome(
1050
1081
  solution: Solution,
1082
+ skeleton: SolutionReportSkeleton,
1051
1083
  evals: List[Evaluation],
1052
1084
  console: rich.console.Console,
1053
1085
  verification: VerificationLevel = VerificationLevel.NONE,
1054
1086
  subset: bool = False,
1087
+ print_message: bool = True,
1055
1088
  ) -> bool:
1056
- report = get_solution_outcome_report(solution, evals, verification, subset)
1057
- console.print(report.get_outcome_markup())
1089
+ report = get_solution_outcome_report(
1090
+ solution, skeleton, evals, verification, subset
1091
+ )
1092
+ console.print(report.get_outcome_markup(print_message))
1058
1093
  return report.ok
1059
1094
 
1060
1095
 
@@ -1338,6 +1373,7 @@ async def _print_detailed_run_report(
1338
1373
  _print_solution_header(solution, console)
1339
1374
  cur_ok = _print_solution_outcome(
1340
1375
  solution,
1376
+ result.skeleton,
1341
1377
  all_evals,
1342
1378
  console,
1343
1379
  verification=verification,
@@ -1399,44 +1435,70 @@ async def print_run_report(
1399
1435
  )
1400
1436
 
1401
1437
  ok = True
1438
+ single_solution = len(result.skeleton.solutions) == 1
1402
1439
 
1403
1440
  for solution in result.skeleton.solutions:
1404
1441
  _print_solution_header(solution, console)
1442
+ if single_solution:
1443
+ console.print()
1405
1444
  solution_evals = []
1406
1445
  for group in result.skeleton.groups:
1407
- console.print(f'[bold][status]{group.name}[/status][/bold] ', end='')
1446
+ if not single_solution:
1447
+ console.print(f'[bold][status]{group.name}[/status][/bold] ', end='')
1408
1448
  group_evals = []
1409
1449
  for i, _ in enumerate(group.testcases):
1410
1450
  eval = structured_evaluations[str(solution.path)][group.name][i]
1411
1451
  if eval is None:
1412
1452
  continue
1413
1453
  eval = await eval()
1414
- console.print(f'{i}/', end='')
1415
- console.print(get_testcase_markup_verdict(eval), end='')
1416
- if eval.result.sanitizer_warnings:
1417
- console.print('[warning]*[/warning]', end='')
1418
- console.print('', end=' ')
1454
+ if single_solution:
1455
+ console.print(get_testcase_markup_verdict(eval), end=' ')
1456
+ console.print(f'{group.name}/{i}', end='')
1457
+ if eval.result.sanitizer_warnings:
1458
+ console.print('[warning]*[/warning]', end='')
1459
+ time = get_capped_evals_formatted_time(
1460
+ solution, [eval], verification
1461
+ )
1462
+ memory = get_evals_formatted_memory([eval])
1463
+ console.print(f' ({time}, {memory})', end='')
1464
+ checker_msg = eval.result.message
1465
+ if checker_msg:
1466
+ console.print(f': [i]{checker_msg}[/i]', end='')
1467
+ else:
1468
+ console.print(f'{i}/', end='')
1469
+ console.print(get_testcase_markup_verdict(eval), end='')
1470
+ if eval.result.sanitizer_warnings:
1471
+ console.print('[warning]*[/warning]', end='')
1472
+
1473
+ console.print('', end='\n' if single_solution else ' ')
1419
1474
  group_evals.append(eval)
1420
1475
  solution_evals.append(eval)
1421
1476
 
1477
+ if single_solution:
1478
+ console.print(f' [status]{group.name}[/status]', end=' ')
1422
1479
  console.print(
1423
1480
  f'({get_capped_evals_formatted_time(solution, group_evals, verification)}, {get_evals_formatted_memory(group_evals)})',
1424
1481
  end='',
1425
1482
  )
1426
1483
  console.print()
1484
+ if single_solution:
1485
+ console.print()
1427
1486
 
1428
1487
  cur_ok = _print_solution_outcome(
1429
1488
  solution,
1489
+ result.skeleton,
1430
1490
  solution_evals,
1431
1491
  console,
1432
1492
  verification=verification,
1493
+ print_message=not single_solution,
1433
1494
  )
1434
1495
  ok = ok and cur_ok
1435
1496
  console.print()
1436
1497
 
1437
- await _print_timing(
1438
- console, result.skeleton, structured_evaluations, verification=verification
1439
- )
1498
+ if not single_solution:
1499
+ await _print_timing(
1500
+ console, result.skeleton, structured_evaluations, verification=verification
1501
+ )
1440
1502
 
1441
1503
  return ok
1442
1504
 
@@ -1,7 +1,7 @@
1
1
  import pathlib
2
2
  import tempfile
3
3
  import typing
4
- from typing import Annotated, Dict, List, Optional, Tuple
4
+ from typing import Annotated, Any, Dict, List, Optional, Tuple
5
5
 
6
6
  import syncer
7
7
  import typer
@@ -9,7 +9,7 @@ import typer
9
9
  from rbx import annotations, console
10
10
  from rbx.box import environment, naming, package
11
11
  from rbx.box.formatting import href
12
- from rbx.box.schema import Package
12
+ from rbx.box.schema import Package, expand_any_vars
13
13
  from rbx.box.statements.builders import (
14
14
  BUILDER_LIST,
15
15
  PROBLEM_BUILDER_LIST,
@@ -217,7 +217,7 @@ def build_statement_bytes(
217
217
  overridden_params: Optional[Dict[ConversionType, ConversionStep]] = None,
218
218
  overridden_assets: Optional[List[Tuple[pathlib.Path, pathlib.Path]]] = None,
219
219
  use_samples: bool = True,
220
- is_editorial: bool = False,
220
+ custom_vars: Optional[Dict[str, Any]] = None,
221
221
  ) -> Tuple[bytes, StatementType]:
222
222
  overridden_params = overridden_params or {}
223
223
  overridden_assets = overridden_assets or []
@@ -258,10 +258,11 @@ def build_statement_bytes(
258
258
  output = bdr.build(
259
259
  input=last_content,
260
260
  context=StatementBuilderContext(
261
+ lang=statement.language,
261
262
  languages=get_environment_languages_for_statement(),
262
263
  params=params,
263
264
  root=pathlib.Path(td),
264
- editorial=is_editorial,
265
+ custom_vars=custom_vars,
265
266
  ),
266
267
  item=StatementBuilderProblem(
267
268
  package=pkg,
@@ -284,24 +285,23 @@ def build_statement(
284
285
  pkg: Package,
285
286
  output_type: Optional[StatementType] = None,
286
287
  use_samples: bool = True,
287
- is_editorial: bool = False,
288
+ custom_vars: Optional[Dict[str, Any]] = None,
288
289
  ) -> pathlib.Path:
289
290
  last_content, last_output = build_statement_bytes(
290
291
  statement,
291
292
  pkg,
292
293
  output_type=output_type,
293
294
  use_samples=use_samples,
294
- is_editorial=is_editorial,
295
+ custom_vars=custom_vars,
295
296
  short_name=naming.get_problem_shortname(),
296
297
  )
297
- statement_path = (
298
- package.get_build_path()
299
- / f'{statement.path.stem}{last_output.get_file_suffix()}'
298
+ statement_path = (package.get_build_path() / statement.name).with_suffix(
299
+ last_output.get_file_suffix()
300
300
  )
301
301
  statement_path.parent.mkdir(parents=True, exist_ok=True)
302
302
  statement_path.write_bytes(last_content)
303
303
  console.console.print(
304
- f'Statement built successfully for language '
304
+ f'Statement [item]{statement.name}[/item] built successfully for language '
305
305
  f'[item]{statement.language}[/item] at '
306
306
  f'{href(statement_path)}'
307
307
  )
@@ -313,13 +313,18 @@ def build_statement(
313
313
  @syncer.sync
314
314
  async def build(
315
315
  verification: environment.VerificationParam,
316
+ names: Annotated[
317
+ Optional[List[str]],
318
+ typer.Argument(
319
+ help='Names of statements to build.',
320
+ ),
321
+ ] = None,
316
322
  languages: Annotated[
317
323
  Optional[List[str]],
318
324
  typer.Option(
319
- default_factory=list,
320
325
  help='Languages to build statements for. If not specified, build statements for all available languages.',
321
326
  ),
322
- ],
327
+ ] = None,
323
328
  output: Annotated[
324
329
  Optional[StatementType],
325
330
  typer.Option(
@@ -331,9 +336,14 @@ async def build(
331
336
  bool,
332
337
  typer.Option(help='Whether to build the statement with samples or not.'),
333
338
  ] = True,
334
- editorial: Annotated[
335
- bool, typer.Option(help='Whether to add editorial blocks to the statements.')
336
- ] = False,
339
+ vars: Annotated[
340
+ Optional[List[str]],
341
+ typer.Option(
342
+ '-v',
343
+ '--vars',
344
+ help='Variables to be used in the statements.',
345
+ ),
346
+ ] = None,
337
347
  ):
338
348
  # At most run the validators, only in samples.
339
349
  if samples:
@@ -350,24 +360,31 @@ async def build(
350
360
  raise typer.Exit(1)
351
361
 
352
362
  pkg = package.find_problem_package_or_die()
353
- candidate_languages = languages
354
- if not candidate_languages:
355
- candidate_languages = sorted(set([st.language for st in pkg.statements]))
363
+ candidate_languages = set(languages or [])
364
+ candidate_names = set(names or [])
356
365
 
357
- for language in candidate_languages:
358
- candidates_for_lang = [st for st in pkg.statements if st.language == language]
359
- if not candidates_for_lang:
360
- console.console.print(
361
- f'[error]No statement found for language [item]{language}[/item].[/error]',
362
- )
363
- raise typer.Exit(1)
366
+ def should_process(st: Statement) -> bool:
367
+ if candidate_languages and st.language not in candidate_languages:
368
+ return False
369
+ if candidate_names and st.name not in candidate_names:
370
+ return False
371
+ return True
372
+
373
+ valid_statements = [st for st in pkg.expanded_statements if should_process(st)]
374
+
375
+ if not valid_statements:
376
+ console.console.print(
377
+ '[error]No statement found according to the specified criteria.[/error]',
378
+ )
379
+ raise typer.Exit(1)
364
380
 
381
+ for statement in valid_statements:
365
382
  build_statement(
366
- candidates_for_lang[0],
383
+ statement,
367
384
  pkg,
368
385
  output_type=output,
369
386
  use_samples=samples,
370
- is_editorial=editorial,
387
+ custom_vars=expand_any_vars(annotations.parse_dictionary_items(vars)),
371
388
  )
372
389
 
373
390