rbx.cp 0.6.0__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- from typing import List, Optional
1
+ from typing import List
2
2
 
3
3
  from pydantic import BaseModel
4
4
 
@@ -11,7 +11,6 @@ class LockedAsset(TrackedAsset):
11
11
 
12
12
  class PresetLock(BaseModel):
13
13
  name: str
14
- uri: Optional[str] = None
15
14
 
16
15
  @property
17
16
  def preset_name(self) -> str:
rbx/box/presets/schema.py CHANGED
@@ -1,8 +1,10 @@
1
1
  import pathlib
2
2
  from typing import List, Optional
3
3
 
4
+ import typer
4
5
  from pydantic import BaseModel, Field
5
6
 
7
+ from rbx import console
6
8
  from rbx.box.presets.fetch import PresetFetchInfo, get_preset_fetch_info
7
9
 
8
10
 
@@ -32,8 +34,9 @@ class Preset(BaseModel):
32
34
  # Name of the preset, or a GitHub repository containing it.
33
35
  name: str = NameField()
34
36
 
35
- # URI of the preset to be fetched.
36
- uri: Optional[str] = None
37
+ # URI of the preset to be fetched. Uniquely identifies the preset.
38
+ # Should usually be a GitHub repository.
39
+ uri: str
37
40
 
38
41
  # Path to the environment file that will be installed with this preset.
39
42
  # When copied to the box environment, the environment will be named `name`.
@@ -52,8 +55,13 @@ class Preset(BaseModel):
52
55
 
53
56
  @property
54
57
  def fetch_info(self) -> PresetFetchInfo:
55
- if self.uri is None:
56
- return PresetFetchInfo(name=self.name)
57
58
  res = get_preset_fetch_info(self.uri)
58
- assert res is not None
59
+ if res is None:
60
+ console.console.print(
61
+ f'[error]Preset URI [item]{self.uri}[/item] is not valid.[/error]'
62
+ )
63
+ console.console.print(
64
+ '[error]Please check that the URI is correct and that the directory/asset really exists.[/error]'
65
+ )
66
+ raise typer.Exit(1)
59
67
  return res
rbx/box/retries.py CHANGED
@@ -90,6 +90,14 @@ def _move_logs_to_temp_dir(
90
90
  return recover
91
91
 
92
92
 
93
+ def get_retrier_config(nruns: int = 0) -> RepeatsConfig:
94
+ if nruns == 0:
95
+ return get_setter_config().repeats
96
+ repeats = get_setter_config().repeats.model_copy(deep=True)
97
+ repeats.reps = nruns
98
+ return repeats
99
+
100
+
93
101
  class Retrier:
94
102
  def __init__(self, config: Optional[RepeatsConfig] = None, is_stress: bool = False):
95
103
  self.config = config or get_setter_config().repeats
rbx/box/schema.py CHANGED
@@ -233,6 +233,17 @@ Testlib and jngen are already included by default.
233
233
  )
234
234
 
235
235
 
236
+ class Interactor(CodeItem):
237
+ model_config = ConfigDict(extra='forbid')
238
+
239
+ legacy: bool = Field(
240
+ default=False,
241
+ description="""
242
+ Whether this interactor is a legacy interactor and needs a checker to be specified.
243
+ """,
244
+ )
245
+
246
+
236
247
  class Testcase(BaseModel):
237
248
  model_config = ConfigDict(extra='forbid')
238
249
 
@@ -458,7 +469,7 @@ class Package(BaseModel):
458
469
  default=None, description='The checker for this problem.'
459
470
  )
460
471
 
461
- interactor: Optional[CodeItem] = Field(
472
+ interactor: Optional[Interactor] = Field(
462
473
  default=None, description='The interactor for this problem.'
463
474
  )
464
475
 
@@ -569,7 +580,9 @@ that is correct and used as reference -- and should have the `accepted` outcome.
569
580
  'Interactor is not allowed for batch problems. Change the task type to COMMUNICATION.',
570
581
  )
571
582
  if self.type == TaskType.COMMUNICATION:
572
- if self.checker is not None:
583
+ if self.checker is not None and (
584
+ self.interactor is None or not self.interactor.legacy
585
+ ):
573
586
  raise PydanticCustomError(
574
587
  'CHECKER_NOT_ALLOWED',
575
588
  'Checkers should not be specified for communication problems.',
rbx/box/solutions.py CHANGED
@@ -192,6 +192,7 @@ def _run_solution(
192
192
  progress: Optional[StatusProgress] = None,
193
193
  verification: VerificationLevel = VerificationLevel.NONE,
194
194
  timelimit_override: Optional[int] = None,
195
+ nruns: int = 0,
195
196
  ) -> List[Deferred[Evaluation]]:
196
197
  group = package.get_testgroup(group_name)
197
198
  testcases = find_built_testcases(group)
@@ -217,6 +218,7 @@ def _run_solution(
217
218
  testcase_index=i,
218
219
  verification=verification,
219
220
  timelimit_override=timelimit_override,
221
+ nruns=nruns,
220
222
  )
221
223
 
222
224
  res.append(Deferred(run_fn))
@@ -315,6 +317,7 @@ def _produce_solution_items(
315
317
  check: bool = True,
316
318
  timelimit_override: Optional[int] = None,
317
319
  sanitized: bool = False,
320
+ nruns: int = 0,
318
321
  ) -> List[EvaluationItem]:
319
322
  pkg = package.find_problem_package_or_die()
320
323
 
@@ -348,6 +351,7 @@ def _produce_solution_items(
348
351
  progress=progress,
349
352
  verification=verification,
350
353
  timelimit_override=timelimit_override,
354
+ nruns=nruns,
351
355
  )
352
356
  ):
353
357
  res.append(
@@ -394,6 +398,7 @@ def run_solutions(
394
398
  check: bool = True,
395
399
  timelimit_override: Optional[int] = None,
396
400
  sanitized: bool = False,
401
+ nruns: int = 0,
397
402
  ) -> RunSolutionResult:
398
403
  skeleton = _get_report_skeleton(
399
404
  tracked_solutions,
@@ -409,6 +414,7 @@ def run_solutions(
409
414
  check=check,
410
415
  timelimit_override=timelimit_override,
411
416
  sanitized=sanitized,
417
+ nruns=nruns,
412
418
  ),
413
419
  )
414
420
  return result
@@ -684,7 +690,7 @@ async def run_and_print_interactive_solutions(
684
690
  console.console.print(get_testcase_markup_verdict(eval), end=' ')
685
691
  _print_solution_header(sol, console.console)
686
692
  _print_solution_outcome(
687
- sol, [eval], console.console, verification, subset=True
693
+ sol, skeleton, [eval], console.console, verification, subset=True
688
694
  )
689
695
 
690
696
  stdout_path = eval.log.stdout_absolute_path
@@ -912,6 +918,7 @@ class SolutionOutcomeReport(BaseModel):
912
918
  solution: Solution
913
919
  evals: List[Evaluation]
914
920
  ok: bool
921
+ message: Optional[Tuple[TestcaseEntry, str]]
915
922
  expectedOutcome: Optional[ExpectedOutcome]
916
923
  gotVerdicts: Set[Outcome]
917
924
  runUnderDoubleTl: bool
@@ -949,15 +956,22 @@ class SolutionOutcomeReport(BaseModel):
949
956
  res += '\n[bold yellow]WARNING[/bold yellow] The solution had sanitizer errors or warnings, marked with [bold yellow]*[/bold yellow]. See their stderr for more details.'
950
957
  return res
951
958
 
952
- def get_outcome_markup(self) -> str:
959
+ def get_outcome_markup(self, print_message: bool = True) -> str:
953
960
  res = self.get_verdict_markup_with_warnings()
954
961
  res += f'\nTime: {get_capped_evals_formatted_time(self.solution, self.evals, self.verification)}'
955
962
  res += f'\nMemory: {get_evals_formatted_memory(self.evals)}'
963
+ if print_message and self.message is not None:
964
+ tc, msg = self.message
965
+ if msg:
966
+ if len(msg) > 100:
967
+ msg = msg[:100] + '... (truncated)'
968
+ res += f'\nMessage for {tc}: {msg}'
956
969
  return res
957
970
 
958
971
 
959
972
  def get_solution_outcome_report(
960
973
  solution: Solution,
974
+ skeleton: SolutionReportSkeleton,
961
975
  evals: List[Evaluation],
962
976
  verification: VerificationLevel = VerificationLevel.NONE,
963
977
  subset: bool = False,
@@ -969,7 +983,8 @@ def get_solution_outcome_report(
969
983
  bad_verdicts = set()
970
984
  no_tle_bad_verdicts = set()
971
985
  has_sanitizer_warnings = False
972
- for eval in evals:
986
+ message: Optional[Tuple[TestcaseEntry, str]] = None
987
+ for eval, entry in zip(evals, skeleton.entries):
973
988
  all_verdicts.add(eval.result.outcome)
974
989
  if eval.result.outcome != Outcome.ACCEPTED:
975
990
  bad_verdicts.add(eval.result.outcome)
@@ -984,6 +999,16 @@ def get_solution_outcome_report(
984
999
  has_sanitizer_warnings = (
985
1000
  has_sanitizer_warnings or eval.result.sanitizer_warnings
986
1001
  )
1002
+ if (
1003
+ eval.result.outcome
1004
+ in [
1005
+ Outcome.WRONG_ANSWER,
1006
+ Outcome.JUDGE_FAILED,
1007
+ ]
1008
+ and message is None
1009
+ ):
1010
+ message = (entry, eval.result.message)
1011
+
987
1012
  unmatched_bad_verdicts = set(
988
1013
  v for v in bad_verdicts if not solution.outcome.match(v)
989
1014
  )
@@ -1042,6 +1067,7 @@ def get_solution_outcome_report(
1042
1067
  solution=solution,
1043
1068
  evals=evals,
1044
1069
  ok=not has_failed,
1070
+ message=message,
1045
1071
  expectedOutcome=report_expected_outcome,
1046
1072
  gotVerdicts=report_got_verdicts,
1047
1073
  runUnderDoubleTl=report_run_under_double_tl,
@@ -1053,13 +1079,17 @@ def get_solution_outcome_report(
1053
1079
 
1054
1080
  def _print_solution_outcome(
1055
1081
  solution: Solution,
1082
+ skeleton: SolutionReportSkeleton,
1056
1083
  evals: List[Evaluation],
1057
1084
  console: rich.console.Console,
1058
1085
  verification: VerificationLevel = VerificationLevel.NONE,
1059
1086
  subset: bool = False,
1087
+ print_message: bool = True,
1060
1088
  ) -> bool:
1061
- report = get_solution_outcome_report(solution, evals, verification, subset)
1062
- console.print(report.get_outcome_markup())
1089
+ report = get_solution_outcome_report(
1090
+ solution, skeleton, evals, verification, subset
1091
+ )
1092
+ console.print(report.get_outcome_markup(print_message))
1063
1093
  return report.ok
1064
1094
 
1065
1095
 
@@ -1343,6 +1373,7 @@ async def _print_detailed_run_report(
1343
1373
  _print_solution_header(solution, console)
1344
1374
  cur_ok = _print_solution_outcome(
1345
1375
  solution,
1376
+ result.skeleton,
1346
1377
  all_evals,
1347
1378
  console,
1348
1379
  verification=verification,
@@ -1404,44 +1435,70 @@ async def print_run_report(
1404
1435
  )
1405
1436
 
1406
1437
  ok = True
1438
+ single_solution = len(result.skeleton.solutions) == 1
1407
1439
 
1408
1440
  for solution in result.skeleton.solutions:
1409
1441
  _print_solution_header(solution, console)
1442
+ if single_solution:
1443
+ console.print()
1410
1444
  solution_evals = []
1411
1445
  for group in result.skeleton.groups:
1412
- console.print(f'[bold][status]{group.name}[/status][/bold] ', end='')
1446
+ if not single_solution:
1447
+ console.print(f'[bold][status]{group.name}[/status][/bold] ', end='')
1413
1448
  group_evals = []
1414
1449
  for i, _ in enumerate(group.testcases):
1415
1450
  eval = structured_evaluations[str(solution.path)][group.name][i]
1416
1451
  if eval is None:
1417
1452
  continue
1418
1453
  eval = await eval()
1419
- console.print(f'{i}/', end='')
1420
- console.print(get_testcase_markup_verdict(eval), end='')
1421
- if eval.result.sanitizer_warnings:
1422
- console.print('[warning]*[/warning]', end='')
1423
- console.print('', end=' ')
1454
+ if single_solution:
1455
+ console.print(get_testcase_markup_verdict(eval), end=' ')
1456
+ console.print(f'{group.name}/{i}', end='')
1457
+ if eval.result.sanitizer_warnings:
1458
+ console.print('[warning]*[/warning]', end='')
1459
+ time = get_capped_evals_formatted_time(
1460
+ solution, [eval], verification
1461
+ )
1462
+ memory = get_evals_formatted_memory([eval])
1463
+ console.print(f' ({time}, {memory})', end='')
1464
+ checker_msg = eval.result.message
1465
+ if checker_msg:
1466
+ console.print(f': [i]{checker_msg}[/i]', end='')
1467
+ else:
1468
+ console.print(f'{i}/', end='')
1469
+ console.print(get_testcase_markup_verdict(eval), end='')
1470
+ if eval.result.sanitizer_warnings:
1471
+ console.print('[warning]*[/warning]', end='')
1472
+
1473
+ console.print('', end='\n' if single_solution else ' ')
1424
1474
  group_evals.append(eval)
1425
1475
  solution_evals.append(eval)
1426
1476
 
1477
+ if single_solution:
1478
+ console.print(f' [status]{group.name}[/status]', end=' ')
1427
1479
  console.print(
1428
1480
  f'({get_capped_evals_formatted_time(solution, group_evals, verification)}, {get_evals_formatted_memory(group_evals)})',
1429
1481
  end='',
1430
1482
  )
1431
1483
  console.print()
1484
+ if single_solution:
1485
+ console.print()
1432
1486
 
1433
1487
  cur_ok = _print_solution_outcome(
1434
1488
  solution,
1489
+ result.skeleton,
1435
1490
  solution_evals,
1436
1491
  console,
1437
1492
  verification=verification,
1493
+ print_message=not single_solution,
1438
1494
  )
1439
1495
  ok = ok and cur_ok
1440
1496
  console.print()
1441
1497
 
1442
- await _print_timing(
1443
- console, result.skeleton, structured_evaluations, verification=verification
1444
- )
1498
+ if not single_solution:
1499
+ await _print_timing(
1500
+ console, result.skeleton, structured_evaluations, verification=verification
1501
+ )
1445
1502
 
1446
1503
  return ok
1447
1504
 
rbx/box/stats.py ADDED
@@ -0,0 +1,92 @@
1
+ import pathlib
2
+ from typing import Iterable, List, Tuple
3
+
4
+ from rbx import console
5
+ from rbx.box.cd import (
6
+ find_all_ancestor_packages,
7
+ is_contest_package,
8
+ is_problem_package,
9
+ )
10
+ from rbx.box.contest.contest_package import find_contest, find_contest_package_or_die
11
+ from rbx.box.formatting import get_formatted_memory
12
+
13
+
14
+ def find_problem_packages_from_contest(
15
+ root: pathlib.Path = pathlib.Path(),
16
+ ) -> Iterable[pathlib.Path]:
17
+ contest_path = find_contest(root)
18
+ contest = find_contest_package_or_die(contest_path)
19
+ for problem in contest.problems:
20
+ yield contest_path / problem.get_path()
21
+
22
+
23
+ def find_all_reachable_packages(
24
+ root: pathlib.Path = pathlib.Path(),
25
+ ) -> List[pathlib.Path]:
26
+ packages = find_all_ancestor_packages(root)
27
+
28
+ for package in list(packages):
29
+ if is_contest_package(package):
30
+ packages.extend(find_problem_packages_from_contest(package))
31
+ return packages
32
+
33
+
34
+ def find_and_group_all_reachable_packages(
35
+ root: pathlib.Path = pathlib.Path(),
36
+ ) -> Tuple[List[pathlib.Path], List[pathlib.Path]]:
37
+ packages = find_all_reachable_packages(root)
38
+ contest_packages = set(pkg for pkg in packages if is_contest_package(pkg))
39
+ problem_packages = set(pkg for pkg in packages if is_problem_package(pkg))
40
+ return sorted(contest_packages), sorted(problem_packages)
41
+
42
+
43
+ def get_dir_size(path: pathlib.Path) -> int:
44
+ if not path.is_dir():
45
+ return 0
46
+ return sum(
47
+ f.stat().st_size
48
+ for f in path.glob('**/*')
49
+ if f.is_file() and not f.is_symlink()
50
+ )
51
+
52
+
53
+ def get_cache_size(root: pathlib.Path = pathlib.Path()) -> int:
54
+ cache_dir = root / '.box'
55
+ return get_dir_size(cache_dir)
56
+
57
+
58
+ def get_build_size(root: pathlib.Path = pathlib.Path()) -> int:
59
+ build_dir = root / 'build'
60
+ return get_dir_size(build_dir)
61
+
62
+
63
+ def print_package_stats(root: pathlib.Path = pathlib.Path()) -> int:
64
+ if is_contest_package(root):
65
+ console.console.print(f'[status]Contest package[/status]: [item]{root}[/item]')
66
+ else:
67
+ console.console.print(f'[status]Problem package[/status]: [item]{root}[/item]')
68
+
69
+ cache_size = get_cache_size(root)
70
+ build_size = get_build_size(root)
71
+ console.console.print(
72
+ f'[status]Cache size[/status]: [item]{get_formatted_memory(cache_size)}[/item]'
73
+ )
74
+ console.console.print(
75
+ f'[status]Build size[/status]: [item]{get_formatted_memory(build_size)}[/item]'
76
+ )
77
+
78
+ return cache_size + build_size
79
+
80
+
81
+ def print_reachable_package_stats(root: pathlib.Path = pathlib.Path()) -> None:
82
+ contest_packages, problem_packages = find_and_group_all_reachable_packages(root)
83
+ total_size = 0
84
+ for pkg in contest_packages:
85
+ total_size += print_package_stats(pkg)
86
+ console.console.print()
87
+ for pkg in problem_packages:
88
+ total_size += print_package_stats(pkg)
89
+ console.console.print()
90
+ console.console.print(
91
+ f'[status]Total size[/status]: [item]{get_formatted_memory(total_size)}[/item]'
92
+ )
rbx/box/tasks.py CHANGED
@@ -4,7 +4,7 @@ from typing import Optional
4
4
  from rbx.box import checkers, package, state
5
5
  from rbx.box.code import CommunicationItem, run_communication, run_item
6
6
  from rbx.box.environment import EnvironmentSandbox, ExecutionConfig, VerificationLevel
7
- from rbx.box.retries import Retrier
7
+ from rbx.box.retries import Retrier, get_retrier_config
8
8
  from rbx.box.schema import Solution, Testcase
9
9
  from rbx.grading.judge.sandbox import SandboxBase
10
10
  from rbx.grading.limits import Limits
@@ -51,6 +51,7 @@ async def run_solution_on_testcase(
51
51
  use_retries: bool = True,
52
52
  use_timelimit: bool = True,
53
53
  capture_pipes: bool = False,
54
+ nruns: int = 0,
54
55
  ) -> Evaluation:
55
56
  if interactor_digest is not None:
56
57
  return await _run_communication_solution_on_testcase(
@@ -66,6 +67,7 @@ async def run_solution_on_testcase(
66
67
  use_retries=use_retries,
67
68
  use_timelimit=use_timelimit,
68
69
  capture_pipes=capture_pipes,
70
+ nruns=nruns,
69
71
  )
70
72
 
71
73
  async def run_fn(retry_index: int) -> Evaluation:
@@ -132,7 +134,7 @@ async def run_solution_on_testcase(
132
134
  if not use_retries:
133
135
  return await run_fn(0)
134
136
 
135
- retrier = Retrier()
137
+ retrier = Retrier(get_retrier_config(nruns))
136
138
  return await retrier.repeat(run_fn)
137
139
 
138
140
 
@@ -166,6 +168,7 @@ async def _run_communication_solution_on_testcase(
166
168
  use_retries: bool = True,
167
169
  use_timelimit: bool = True,
168
170
  capture_pipes: bool = False,
171
+ nruns: int = 0,
169
172
  ) -> Evaluation:
170
173
  capture_pipes = capture_pipes or state.STATE.debug_logs
171
174
 
@@ -291,5 +294,5 @@ async def _run_communication_solution_on_testcase(
291
294
  if not use_retries:
292
295
  return await run_fn(0)
293
296
 
294
- retrier = Retrier()
297
+ retrier = Retrier(get_retrier_config(nruns))
295
298
  return await retrier.repeat(run_fn)
@@ -52,7 +52,7 @@ def get_solution_markup(
52
52
 
53
53
  evals = get_solution_evals_or_null(skeleton, solution)
54
54
  report = solutions.get_solution_outcome_report(
55
- solution, evals or [], skeleton.verification, subset=False
55
+ solution, skeleton, evals or [], skeleton.verification, subset=False
56
56
  )
57
57
  if evals is None:
58
58
  return header + '\n' + report.get_verdict_markup(incomplete=True)
@@ -25,17 +25,29 @@ MERGE_STDERR = pathlib.PosixPath('/dev/stdout')
25
25
 
26
26
  # Thread-safe version of asyncio.Event.
27
27
  class Event_ts(asyncio.Event):
28
+ def __init__(self):
29
+ super().__init__()
30
+ self._inherited_loop = asyncio.get_event_loop()
31
+
28
32
  def get_loop(self):
29
- if self._loop is None:
30
- return asyncio.get_event_loop()
31
- else:
32
- return self._loop
33
+ if self._inherited_loop is None:
34
+ return None
35
+ if self._inherited_loop.is_closed():
36
+ return None
37
+ return self._inherited_loop
38
+
39
+ def set_loop(self, loop):
40
+ self._inherited_loop = loop
33
41
 
34
42
  def set(self):
35
- self.get_loop().call_soon_threadsafe(super().set)
43
+ loop = self.get_loop()
44
+ if loop is not None:
45
+ loop.call_soon_threadsafe(super().set)
36
46
 
37
47
  def clear(self):
38
- self.get_loop().call_soon_threadsafe(super().clear)
48
+ loop = self.get_loop()
49
+ if loop is not None:
50
+ loop.call_soon_threadsafe(super().clear)
39
51
 
40
52
 
41
53
  def wait_without_std(
@@ -245,7 +257,7 @@ class SandboxBase(abc.ABC):
245
257
 
246
258
  self.params = params or SandboxParams()
247
259
  self.pid = None
248
- self._pid_event = Event_ts()
260
+ self.pid_event = Event_ts()
249
261
 
250
262
  # Set common environment variables.
251
263
  # Specifically needed by Python, that searches the home for
@@ -352,7 +364,7 @@ class SandboxBase(abc.ABC):
352
364
 
353
365
  """
354
366
  self.pid = pid
355
- self._pid_event.set()
367
+ self.pid_event.set()
356
368
 
357
369
  async def get_pid(self) -> int:
358
370
  """Return the PID of the sandboxed process.
@@ -362,13 +374,13 @@ class SandboxBase(abc.ABC):
362
374
  return (int): the PID of the sandboxed process.
363
375
 
364
376
  """
365
- await self._pid_event.wait()
377
+ await self.pid_event.wait()
366
378
  assert self.pid is not None
367
379
  return self.pid
368
380
 
369
381
  def clear_pid(self):
370
382
  """Clear the PID of the sandboxed process."""
371
- self._pid_event.clear()
383
+ self.pid_event.clear()
372
384
  self.pid = None
373
385
 
374
386
  def use_pgid(self) -> bool:
rbx/grading/steps.py CHANGED
@@ -709,6 +709,7 @@ async def run(
709
709
  if is_java_like_command(get_exe_from_command(command)):
710
710
  sandbox.params.address_space = None
711
711
 
712
+ sandbox.pid_event.set_loop(asyncio.get_event_loop())
712
713
  if not await asyncio.to_thread(sandbox.execute_without_std, cmd):
713
714
  console.print(
714
715
  '[error]Sandbox crashed while processing command:[/error]',
@@ -1,14 +1,15 @@
1
+ ---
2
+ # yaml-language-server: $schema=https://rsalesc.github.io/rbx/schemas/Contest.json
1
3
  # Add problems by running `rbx contest add <problem-name> <short-name>`
2
-
3
4
  name: "new-contest"
4
5
  statements:
5
6
  - name: "statement-en"
6
7
  title: "New contest"
7
- language: en
8
+ language: "en"
8
9
  path: "statement/contest.rbx.tex"
9
10
  type: "jinja-tex"
10
11
  assets: ["statement/olymp.sty", "statement/*.png"]
11
- joiner: { type: "tex2pdf" }
12
+ joiner: {type: "tex2pdf"}
12
13
  override:
13
14
  configure:
14
15
  - type: "rbx-tex" # Convert rbxTeX to TeX
@@ -1,12 +1,14 @@
1
+ ---
2
+ # yaml-language-server: $schema=https://rsalesc.github.io/rbx/schemas/Preset.json
1
3
  name: "default"
2
- uri: rsalesc/rbx/rbx/resources/presets/default
4
+ uri: "rsalesc/rbx/rbx/resources/presets/default"
3
5
  problem: "problem"
4
6
  contest: "contest"
5
7
  tracking:
6
8
  problem:
7
- - path: statement/template.rbx.tex
8
- - path: statement/olymp.sty
9
+ - path: "statement/template.rbx.tex"
10
+ - path: "statement/olymp.sty"
9
11
  contest:
10
- - path: statement/template.rbx.tex
11
- - path: statement/olymp.sty
12
- - path: statement/contest.rbx.tex
12
+ - path: "statement/template.rbx.tex"
13
+ - path: "statement/olymp.sty"
14
+ - path: "statement/contest.rbx.tex"
@@ -1,10 +1,10 @@
1
- # yaml-language-server: $schema=/home/rsalesc/.config/rbx/schemas/Package.json
2
-
1
+ ---
2
+ # yaml-language-server: $schema=https://rsalesc.github.io/rbx/schemas/Package.json
3
3
  name: "new-problem"
4
4
  timeLimit: 1000
5
5
  memoryLimit: 256
6
- checker: { path: "wcmp.cpp" } # Download others from testlib with `rbx download checker`
7
- validator: { path: "validator.cpp" }
6
+ checker: {path: "wcmp.cpp"} # Download others from testlib with `rbx download checker`
7
+ validator: {path: "validator.cpp"}
8
8
  generators:
9
9
  - path: "gen.cpp"
10
10
  name: "gen"
@@ -19,16 +19,16 @@ testcases:
19
19
  path: "random.py" # Generator script written programatically.
20
20
  solutions:
21
21
  - path: "sols/main.cpp"
22
- outcome: ACCEPTED
22
+ outcome: "ACCEPTED"
23
23
  - path: "sols/wa.cpp"
24
- outcome: WRONG_ANSWER
24
+ outcome: "WRONG_ANSWER"
25
25
  - path: "sols/slow.cpp"
26
- outcome: TLE_OR_RTE # Can be TLE too
26
+ outcome: "TLE_OR_RTE" # Can be TLE too
27
27
  statements:
28
28
  - name: "statement-en"
29
29
  title: "New Problem"
30
30
  path: "statement/statement.rbx.tex" # Open this file to edit your statement.
31
- type: rbxTeX
31
+ type: "rbxTeX"
32
32
  language: "en"
33
33
  assets: ["statement/olymp.sty", "statement/*.png"]
34
34
  configure:
@@ -40,6 +40,16 @@ stresses:
40
40
  name: "gen"
41
41
  args: "[1..<MAX_N>] @" # `@` generates a random string
42
42
  finder: "[sols/wa.cpp] ~ INCORRECT"
43
+ unitTests:
44
+ validator:
45
+ - glob: "unit/validator/valid*.in"
46
+ outcome: "VALID"
47
+ - glob: "unit/validator/invalid*.in"
48
+ outcome: "INVALID"
49
+ checker:
50
+ - glob: "unit/checker/ac*"
51
+ outcome: "ACCEPTED"
52
+ - glob: "unit/checker/wa*"
53
+ outcome: "WRONG_ANSWER"
43
54
  vars:
44
- "MAX_N": 1000000000 # Can be used in the validator, in stress tests and in the statement.
45
-
55
+ MAX_N: 1000000000 # Can be used in the validator, in stress tests and in the statement.
@@ -1,2 +1,4 @@
1
1
  gen 123456
2
- gen 12345678
2
+ gen 12345678
3
+ # Obtained by running `rbx stress -g 'gen [1..<MAX_N>] @' -f '[sols/wa.cpp] ~ INCORRECT'`
4
+ gen 149403982 b139a2bd