rbx.cp 0.5.65__py3-none-any.whl → 0.5.67__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rbx/box/solutions.py CHANGED
@@ -16,7 +16,7 @@ import typer
16
16
  from pydantic import BaseModel
17
17
 
18
18
  from rbx import console, utils
19
- from rbx.box import checkers, environment, package, state
19
+ from rbx.box import checkers, environment, package, remote, state
20
20
  from rbx.box.code import (
21
21
  SanitizationLevel,
22
22
  compile_item,
@@ -130,7 +130,7 @@ class RunSolutionResult:
130
130
 
131
131
  def is_fast(solution: Solution) -> bool:
132
132
  # If solution has TLE tag, it is considered slow.
133
- return not solution.outcome.match(Outcome.TIME_LIMIT_EXCEEDED)
133
+ return not solution.outcome.is_slow()
134
134
 
135
135
 
136
136
  def get_matching_solutions(expected_outcome: ExpectedOutcome) -> List[Solution]:
@@ -159,12 +159,10 @@ def compile_solutions(
159
159
 
160
160
  compiled_solutions = {}
161
161
 
162
- for solution in pkg.solutions:
163
- if (
164
- tracked_solutions is not None
165
- and str(solution.path) not in tracked_solutions
166
- ):
167
- continue
162
+ if tracked_solutions is None:
163
+ tracked_solutions = set(str(sol.path) for sol in pkg.solutions)
164
+
165
+ for solution in expand_solutions(list(tracked_solutions)):
168
166
  if progress:
169
167
  progress.update(f'Compiling solution {href(solution.path)}...')
170
168
  try:
@@ -254,11 +252,7 @@ def _get_solutions_for_skeleton(
254
252
  if verification.value >= VerificationLevel.ALL_SOLUTIONS.value or is_fast(sol)
255
253
  ]
256
254
  if tracked_solutions is not None:
257
- solutions = [
258
- solution
259
- for solution in solutions
260
- if str(solution.path) in tracked_solutions
261
- ]
255
+ solutions = expand_solutions(list(tracked_solutions))
262
256
  return solutions
263
257
 
264
258
 
@@ -726,28 +720,42 @@ def _get_solution_repr(sol: Solution) -> List[Tuple[str, str]]:
726
720
  ]
727
721
 
728
722
 
729
- async def expand_solutions(sols: List[str]) -> List[Solution]:
723
+ def expand_solutions_with_source(sols: List[str]) -> List[Tuple[Solution, bool]]:
730
724
  pkg = package.find_problem_package_or_die()
731
- seen_sols = set(str(sol.path) for sol in pkg.solutions)
725
+ pkg_sols = {str(sol.path): sol for sol in pkg.solutions}
732
726
 
733
- # Dedup sols.
734
- sols = [sol for sol in sols if str(sol) not in seen_sols]
727
+ # Download remote sols.
728
+ sols = remote.expand_files(sols)
735
729
 
736
730
  # Ensure sols exist.
737
731
  sols = [sol for sol in sols if pathlib.Path(sol).is_file()]
738
732
 
739
- return [
740
- Solution(path=pathlib.Path(sol), outcome=ExpectedOutcome.ACCEPTED)
741
- for sol in sols
742
- ]
733
+ seen_sols = set()
734
+ res: List[Tuple[Solution, bool]] = []
735
+ for sol in sols:
736
+ if sol in seen_sols:
737
+ # This solution was already added.
738
+ continue
739
+ if sol in pkg_sols:
740
+ # This solution is in the package.
741
+ res.append((pkg_sols[sol], False))
742
+ else:
743
+ # This solution is fetched from some source.
744
+ res.append(
745
+ (Solution(path=pathlib.Path(sol), outcome=ExpectedOutcome.ANY), True)
746
+ )
747
+ seen_sols.add(sol)
748
+ return res
749
+
750
+
751
+ def expand_solutions(sols: List[str]) -> List[Solution]:
752
+ return [sol for sol, _ in expand_solutions_with_source(sols)]
743
753
 
744
754
 
745
755
  async def pick_solutions(
746
756
  tracked_solutions: Optional[Set[str]],
747
- extra_solutions: Optional[Iterable[Solution]] = None,
757
+ extra_solutions: Optional[List[str]] = None,
748
758
  ) -> List[str]:
749
- extra_sols = set(extra_solutions) if extra_solutions is not None else set()
750
-
751
759
  pkg = package.find_problem_package_or_die()
752
760
  # Store in a separate list to maintain order with the package declaration.
753
761
  import questionary
@@ -763,13 +771,15 @@ async def pick_solutions(
763
771
 
764
772
  seen_sols = set(str(sol.path) for sol in pkg.solutions)
765
773
 
766
- if extra_sols:
774
+ if extra_solutions is not None:
767
775
  # Add only new solutions.
768
776
  choices.extend(
769
777
  questionary.Choice(
770
- title=_get_solution_repr(sol), value=str(sol.path), checked=True
778
+ title=_get_solution_repr(sol),
779
+ value=str(sol.path),
780
+ checked=True,
771
781
  )
772
- for sol in extra_sols
782
+ for sol in expand_solutions(extra_solutions)
773
783
  if str(sol.path) not in seen_sols
774
784
  )
775
785
 
@@ -784,7 +794,7 @@ def get_outcome_style_verdict(outcome: Outcome) -> str:
784
794
  return 'green'
785
795
  if outcome == Outcome.WRONG_ANSWER:
786
796
  return 'red'
787
- if outcome == Outcome.TIME_LIMIT_EXCEEDED:
797
+ if outcome.is_slow():
788
798
  return 'yellow'
789
799
  if outcome == Outcome.RUNTIME_ERROR:
790
800
  return 'blue'
@@ -797,7 +807,7 @@ def get_outcome_markup_verdict(outcome: Outcome) -> str:
797
807
  res = '✓'
798
808
  if outcome != Outcome.ACCEPTED:
799
809
  res = '✗'
800
- if outcome == Outcome.TIME_LIMIT_EXCEEDED:
810
+ if outcome.is_slow():
801
811
  res = '⧖'
802
812
  if outcome == Outcome.RUNTIME_ERROR:
803
813
  res = '✗'
@@ -827,6 +837,19 @@ def get_full_testcase_markup_verdict(eval: Evaluation) -> str:
827
837
  def _get_evals_time_in_ms(evals: List[Evaluation]) -> int:
828
838
  if not evals:
829
839
  return 0
840
+ evals_with_ile = [
841
+ eval for eval in evals if eval.result.outcome == Outcome.IDLENESS_LIMIT_EXCEEDED
842
+ ]
843
+ for eval in evals_with_ile:
844
+ # Try every way of estimating a ILE max timelimit.
845
+ if eval.log.metadata is None:
846
+ continue
847
+ if eval.log.metadata.limits is not None:
848
+ expanded_tl = eval.log.metadata.limits.get_expanded_tl()
849
+ if expanded_tl is not None:
850
+ return expanded_tl
851
+ if eval.log.metadata.timeLimit is not None:
852
+ return eval.log.metadata.timeLimit
830
853
  return max(int((eval.log.time or 0.0) * 1000) for eval in evals)
831
854
 
832
855
 
@@ -847,7 +870,10 @@ def get_capped_evals_formatted_time(
847
870
  pkg = package.find_problem_package_or_die()
848
871
 
849
872
  max_time = _get_evals_time_in_ms(evals)
850
- has_tle = any(eval.result.outcome == Outcome.TIME_LIMIT_EXCEEDED for eval in evals)
873
+ has_tle = any(eval.result.outcome.is_slow() for eval in evals)
874
+ has_ile = any(
875
+ eval.result.outcome == Outcome.IDLENESS_LIMIT_EXCEEDED for eval in evals
876
+ )
851
877
  timelimits = [
852
878
  eval.log.metadata.limits.get_expanded_tl()
853
879
  for eval in evals
@@ -865,7 +891,7 @@ def get_capped_evals_formatted_time(
865
891
  # Using double TL for verification.
866
892
  tl = tl * 2
867
893
 
868
- if has_tle and max_time >= tl:
894
+ if has_tle and max_time >= tl or has_ile:
869
895
  return f'>{tl} ms'
870
896
  return f'{max_time} ms'
871
897
 
@@ -950,8 +976,7 @@ def get_solution_outcome_report(
950
976
  ):
951
977
  no_tle_bad_verdicts.add(eval.result.no_tle_outcome)
952
978
  has_plain_tle = has_plain_tle or (
953
- eval.result.outcome == Outcome.TIME_LIMIT_EXCEEDED
954
- and eval.result.no_tle_outcome is None
979
+ eval.result.outcome.is_slow() and eval.result.no_tle_outcome is None
955
980
  )
956
981
  has_sanitizer_warnings = (
957
982
  has_sanitizer_warnings or eval.result.sanitizer_warnings
@@ -984,9 +1009,7 @@ def get_solution_outcome_report(
984
1009
  report_got_verdicts = {Outcome.ACCEPTED}
985
1010
 
986
1011
  evals_time = _get_evals_time_in_ms(evals)
987
- expected_outcome_is_tle = solution.outcome.match(
988
- Outcome.TIME_LIMIT_EXCEEDED
989
- ) and not solution.outcome.match(Outcome.ACCEPTED)
1012
+ expected_outcome_is_tle = solution.outcome.matches_tle_and_is_incorrect()
990
1013
  if (
991
1014
  # Running verification with double TL.
992
1015
  verification.value >= VerificationLevel.FULL.value
rbx/box/stresses.py CHANGED
@@ -8,7 +8,7 @@ import typer
8
8
  from pydantic import BaseModel
9
9
 
10
10
  from rbx import console
11
- from rbx.box import checkers, package, validators
11
+ from rbx.box import checkers, generators, package, validators
12
12
  from rbx.box.code import SanitizationLevel, compile_item, run_item
13
13
  from rbx.box.generators import (
14
14
  GenerationMetadata,
@@ -51,10 +51,10 @@ def _compile_finder(finder: CodeItem) -> str:
51
51
 
52
52
 
53
53
  async def run_stress(
54
- name: str,
55
54
  timeoutInSeconds: int,
55
+ name: Optional[str] = None,
56
56
  finder: Optional[str] = None,
57
- args: Optional[str] = None,
57
+ generator_call: Optional[str] = None,
58
58
  findingsLimit: int = 1,
59
59
  verbose: bool = False,
60
60
  progress: Optional[StatusProgress] = None,
@@ -68,12 +68,23 @@ async def run_stress(
68
68
  raise typer.Exit(1)
69
69
 
70
70
  if finder:
71
+ if generator_call is None:
72
+ console.console.print(
73
+ '[error]Generator arguments are required for stress testing. Specify them through the [item]-g[/item] flag.[/error]'
74
+ )
75
+ raise typer.Exit(1)
76
+ generator = generators.get_call_from_string(generator_call)
71
77
  stress = Stress(
72
- name=f'{name}',
73
- generator=GeneratorCall(name=name, args=args or ''),
78
+ name=f'{generator.name}',
79
+ generator=generator,
74
80
  finder=finder,
75
81
  )
76
82
  else:
83
+ if name is None:
84
+ console.console.print(
85
+ '[error]Invalid stress test paramaters. Either provide a stress test name, or provide a finder expression (-f) and generator arguments (-g).[/error]'
86
+ )
87
+ raise typer.Exit(1)
77
88
  stress = package.get_stress(name)
78
89
 
79
90
  call = stress.generator
@@ -258,7 +269,7 @@ async def run_stress(
258
269
 
259
270
  if internal_error_results:
260
271
  console.console.print(
261
- f'[error]Checkers failed during stress test [item]{name}[/item] with args [info]{expanded_generator_call.name} {expanded_generator_call.args}[/info][/error]'
272
+ f'[error]Checkers failed during stress test [item]{stress.name}[/item] with args [info]{expanded_generator_call.name} {expanded_generator_call.args}[/info][/error]'
262
273
  )
263
274
  for internal_error_result in internal_error_results:
264
275
  assert internal_error_result.checker is not None
rbx/box/ui/css/app.tcss CHANGED
@@ -1,10 +1,11 @@
1
1
  Screen {
2
2
  background: $background;
3
3
  color: $text;
4
+ align: center middle;
4
5
  }
5
6
 
6
- Screen {
7
- align: center middle;
7
+ ModalScreen {
8
+ background: $background 65%;
8
9
  }
9
10
 
10
11
  ListView {
@@ -21,6 +22,16 @@ OptionList {
21
22
  border: solid $accent;
22
23
  }
23
24
 
25
+ DiffBox {
26
+ border: solid $accent;
27
+ height: 1fr;
28
+ width: 1fr;
29
+ RichLog {
30
+ height: 1fr;
31
+ width: 1fr;
32
+ }
33
+ }
34
+
24
35
  Button {
25
36
  width: 1fr;
26
37
  background: $accent;
@@ -96,4 +107,27 @@ TestExplorerScreen, RunTestExplorerScreen {
96
107
  border: solid $accent;
97
108
  padding: 0 1;
98
109
  }
110
+ }
111
+
112
+ #rich-dialog {
113
+ max-width: 80;
114
+ height: auto;
115
+ max-height: 5;
116
+
117
+ RichLog {
118
+ border: solid $accent;
119
+ padding: 0 1;
120
+ }
121
+ }
122
+
123
+ #selector-dialog {
124
+ max-width: 80;
125
+ height: auto;
126
+ }
127
+
128
+ #run-tips {
129
+ border: solid $accent;
130
+ padding: 0 1;
131
+ height: auto;
132
+ text-align: center;
99
133
  }
rbx/box/ui/main.py CHANGED
@@ -1,3 +1,4 @@
1
+ import pathlib
1
2
  from typing import Type
2
3
 
3
4
  from textual.app import App, ComposeResult
@@ -5,6 +6,8 @@ from textual.containers import Center
5
6
  from textual.screen import Screen
6
7
  from textual.widgets import Footer, Header, OptionList
7
8
 
9
+ from rbx.box import remote
10
+ from rbx.box.ui.screens.differ import DifferScreen
8
11
  from rbx.box.ui.screens.run_explorer import RunExplorerScreen
9
12
  from rbx.box.ui.screens.test_explorer import TestExplorerScreen
10
13
 
@@ -35,6 +38,27 @@ class rbxApp(App):
35
38
  self.push_screen(screen_cls())
36
39
 
37
40
 
41
+ class rbxDifferApp(App):
42
+ TITLE = 'rbx differ'
43
+ CSS_PATH = 'css/app.tcss'
44
+ BINDINGS = [('q', 'quit', 'Quit')]
45
+
46
+ def __init__(self, path1: pathlib.Path, path2: pathlib.Path):
47
+ super().__init__()
48
+ self.path1 = path1
49
+ self.path2 = path2
50
+
51
+ def on_mount(self):
52
+ self.push_screen(DifferScreen(self.path1, self.path2))
53
+
54
+
38
55
  def start():
39
56
  app = rbxApp()
40
57
  app.run()
58
+
59
+
60
+ def start_differ(path1: pathlib.Path, path2: pathlib.Path):
61
+ path1, path2 = remote.expand_files([path1, path2])
62
+
63
+ app = rbxDifferApp(path1, path2)
64
+ app.run()
@@ -0,0 +1,29 @@
1
+ import pathlib
2
+
3
+ from textual.app import ComposeResult
4
+ from textual.containers import Vertical
5
+ from textual.screen import Screen
6
+ from textual.widgets import Footer, Header
7
+
8
+ from rbx.box.ui.widgets.diff_box import DiffBox
9
+
10
+
11
+ class DifferScreen(Screen):
12
+ BINDINGS = [
13
+ ('q', 'quit', 'Quit'),
14
+ ]
15
+
16
+ def __init__(self, path1: pathlib.Path, path2: pathlib.Path):
17
+ super().__init__()
18
+ self.path1 = path1
19
+ self.path2 = path2
20
+
21
+ def compose(self) -> ComposeResult:
22
+ yield Header()
23
+ yield Footer()
24
+ with Vertical():
25
+ yield DiffBox()
26
+
27
+ def on_mount(self):
28
+ diff = self.query_one(DiffBox)
29
+ diff.paths = (self.path1, self.path2)
@@ -0,0 +1,29 @@
1
+ from typing import Optional
2
+
3
+ from textual.app import ComposeResult
4
+ from textual.containers import Container
5
+ from textual.screen import ModalScreen
6
+
7
+ from rbx.box.ui.widgets.rich_log_box import RichLogBox
8
+
9
+
10
+ class RichLogModal(ModalScreen[None]):
11
+ BINDINGS = [
12
+ ('q', 'app.pop_screen', 'Close'),
13
+ ('g', 'app.pop_screen', 'Close'),
14
+ ]
15
+
16
+ def __init__(self, log: str, title: Optional[str] = None):
17
+ super().__init__()
18
+ self._log = log
19
+ self._title = title
20
+
21
+ def compose(self) -> ComposeResult:
22
+ with Container(id='rich-dialog'):
23
+ box = RichLogBox(markup=True)
24
+ if self._title:
25
+ box.border_title = self._title
26
+ yield box
27
+
28
+ async def on_mount(self):
29
+ self.query_one(RichLogBox).write(self._log)
@@ -1,15 +1,19 @@
1
1
  from typing import Optional
2
2
 
3
3
  from textual.app import ComposeResult
4
+ from textual.containers import Vertical
4
5
  from textual.reactive import reactive
5
6
  from textual.screen import Screen
6
7
  from textual.widgets import Footer, Header, Label, ListItem, ListView
7
8
 
9
+ from rbx.box import package
10
+ from rbx.box.schema import TaskType
8
11
  from rbx.box.solutions import SolutionReportSkeleton
9
12
  from rbx.box.ui.screens.error import ErrorScreen
10
13
  from rbx.box.ui.screens.run_test_explorer import RunTestExplorerScreen
11
14
  from rbx.box.ui.screens.selector import SelectorScreen
12
15
  from rbx.box.ui.utils.run_ui import get_skeleton, get_solution_markup, has_run
16
+ from rbx.box.ui.widgets.rich_log_box import RichLogBox
13
17
 
14
18
 
15
19
  class RunExplorerScreen(Screen):
@@ -29,9 +33,22 @@ class RunExplorerScreen(Screen):
29
33
  Label(get_solution_markup(self.skeleton, sol), markup=True)
30
34
  for i, sol in enumerate(self.skeleton.solutions)
31
35
  ]
32
- run_list = ListView(*[ListItem(item) for item in items], id='run-list')
33
- run_list.border_title = 'Runs'
34
- yield run_list
36
+ with Vertical():
37
+ run_list = ListView(*[ListItem(item) for item in items], id='run-list')
38
+ run_list.border_title = 'Runs'
39
+ yield run_list
40
+
41
+ tips = RichLogBox(id='run-tips')
42
+ tips.markup = True
43
+ tips.display = False
44
+ tips.border_title = 'Tips'
45
+ pkg = package.find_problem_package_or_die()
46
+ if pkg.type == TaskType.COMMUNICATION:
47
+ tips.display = True
48
+ tips.write(
49
+ 'This is an interactive problem.\nYou can use the [bold blue]rbx -d run[/bold blue] command to capture the interaction between the processes and see them here.'
50
+ )
51
+ yield tips
35
52
 
36
53
  def on_mount(self):
37
54
  if not has_run():
@@ -13,7 +13,9 @@ from rbx.box.testcase_extractors import (
13
13
  GenerationTestcaseEntry,
14
14
  extract_generation_testcases,
15
15
  )
16
+ from rbx.box.ui.screens.rich_log_modal import RichLogModal
16
17
  from rbx.box.ui.utils.run_ui import (
18
+ get_metadata_markup,
17
19
  get_run_testcase_markup,
18
20
  get_run_testcase_metadata_markup,
19
21
  )
@@ -30,6 +32,7 @@ class RunTestExplorerScreen(Screen):
30
32
  ('3', 'show_log', 'Show log'),
31
33
  ('m', 'toggle_metadata', 'Toggle metadata'),
32
34
  ('s', 'toggle_side_by_side', 'Toggle sxs'),
35
+ ('g', 'toggle_test_metadata', 'Toggle test metadata'),
33
36
  ]
34
37
 
35
38
  side_by_side: reactive[bool] = reactive(False)
@@ -62,6 +65,11 @@ class RunTestExplorerScreen(Screen):
62
65
  yield TwoSidedTestBoxWidget(id='test-output')
63
66
 
64
67
  async def on_mount(self):
68
+ self.title = str(self.solution.path)
69
+
70
+ if self.diff_solution is not None:
71
+ self.title = f'{self.title} vs. {self.diff_solution.path}'
72
+
65
73
  self.query_one('#test-list').border_title = 'Tests'
66
74
  self.query_one('#test-input').border_title = 'Input'
67
75
 
@@ -164,3 +172,15 @@ class RunTestExplorerScreen(Screen):
164
172
  return
165
173
  widget = self.query_one('#test-output', TwoSidedTestBoxWidget)
166
174
  widget.diff_with_data = diff_with_data
175
+
176
+ def action_toggle_test_metadata(self):
177
+ list_view = self.query_one('#test-list', ListView)
178
+ if list_view.index is None:
179
+ return
180
+ entry = self._entries[list_view.index]
181
+ self.app.push_screen(
182
+ RichLogModal(
183
+ get_metadata_markup(entry),
184
+ title='Testcase metadata',
185
+ )
186
+ )
@@ -1,6 +1,7 @@
1
1
  from typing import List, Optional
2
2
 
3
3
  from textual.app import ComposeResult
4
+ from textual.containers import Container
4
5
  from textual.screen import ModalScreen
5
6
  from textual.widgets import ListItem, ListView
6
7
 
@@ -14,10 +15,11 @@ class SelectorScreen(ModalScreen[int]):
14
15
  self.title = title
15
16
 
16
17
  def compose(self) -> ComposeResult:
17
- list_view = ListView(*self.options)
18
- if self.title:
19
- list_view.border_title = self.title
20
- yield list_view
18
+ with Container(id='selector-dialog'):
19
+ list_view = ListView(*self.options)
20
+ if self.title:
21
+ list_view.border_title = self.title
22
+ yield list_view
21
23
 
22
24
  def on_list_view_selected(self, event: ListView.Selected):
23
25
  self.dismiss(event.list_view.index)
@@ -11,6 +11,7 @@ from rbx.box.testcase_extractors import (
11
11
  GenerationTestcaseEntry,
12
12
  extract_generation_testcases_from_groups,
13
13
  )
14
+ from rbx.box.ui.utils.run_ui import get_metadata_markup
14
15
  from rbx.box.ui.widgets.file_log import FileLog
15
16
  from rbx.box.ui.widgets.rich_log_box import RichLogBox
16
17
  from rbx.box.ui.widgets.test_output_box import TestBoxWidget, TestcaseRenderingData
@@ -78,19 +79,7 @@ class TestExplorerScreen(Screen):
78
79
  )
79
80
 
80
81
  metadata.clear()
81
- metadata.write(
82
- f'[bold]{entry.group_entry.group}[/bold] / [bold]{entry.group_entry.index}[/bold]'
83
- )
84
- if entry.metadata.copied_from is not None:
85
- metadata.write(
86
- f'[bold]Copied from:[/bold] {entry.metadata.copied_from.inputPath}'
87
- )
88
- if entry.metadata.generator_call is not None:
89
- metadata.write(f'[bold]Gen. call:[/bold] {entry.metadata.generator_call}')
90
- if entry.metadata.generator_script is not None:
91
- metadata.write(
92
- f'[bold]Gen. script:[/bold] {entry.metadata.generator_script}'
93
- )
82
+ metadata.write(get_metadata_markup(entry))
94
83
 
95
84
  async def _update_tests(self):
96
85
  self.watch(
@@ -4,6 +4,7 @@ from typing import List, Optional
4
4
  from rbx import utils
5
5
  from rbx.box import package, solutions
6
6
  from rbx.box.solutions import SolutionReportSkeleton, SolutionSkeleton
7
+ from rbx.box.testcase_extractors import GenerationTestcaseEntry
7
8
  from rbx.box.testcase_utils import TestcaseEntry
8
9
  from rbx.grading.steps import Evaluation
9
10
 
@@ -93,3 +94,15 @@ def get_run_testcase_metadata_markup(
93
94
  if checker_msg is not None:
94
95
  lines.append(f'[b]Checker:[/b] {checker_msg}')
95
96
  return '\n'.join(lines)
97
+
98
+
99
+ def get_metadata_markup(entry: GenerationTestcaseEntry) -> str:
100
+ lines = []
101
+ lines.append(f'[b]{entry.group_entry.group}[/b] / [b]{entry.group_entry.index}[/b]')
102
+ if entry.metadata.copied_from is not None:
103
+ lines.append(f'[b]Copied from:[/b] {entry.metadata.copied_from.inputPath}')
104
+ if entry.metadata.generator_call is not None:
105
+ lines.append(f'[b]Gen. call:[/b] {entry.metadata.generator_call}')
106
+ if entry.metadata.generator_script is not None:
107
+ lines.append(f'[b]Gen. script:[/b] {entry.metadata.generator_script}')
108
+ return '\n'.join(lines)
@@ -0,0 +1,38 @@
1
+ import difflib
2
+ import pathlib
3
+ from typing import Optional, Tuple
4
+
5
+ from rich.markdown import Markdown
6
+ from textual.app import ComposeResult
7
+ from textual.reactive import reactive
8
+ from textual.widget import Widget
9
+ from textual.widgets import RichLog
10
+
11
+
12
+ def compute_diff(file1: pathlib.Path, file2: pathlib.Path) -> str:
13
+ lines1 = file1.read_text().splitlines(keepends=True)
14
+ lines2 = file2.read_text().splitlines(keepends=True)
15
+ return ''.join(difflib.ndiff(lines1, lines2))
16
+
17
+
18
+ class DiffBox(Widget, can_focus=False):
19
+ paths: reactive[Optional[Tuple[pathlib.Path, pathlib.Path]]] = reactive(None)
20
+
21
+ def __init__(self):
22
+ super().__init__()
23
+
24
+ def compose(self) -> ComposeResult:
25
+ md = RichLog()
26
+ md.border_title = 'Differ'
27
+ yield md
28
+
29
+ async def watch_paths(self, paths: Optional[Tuple[pathlib.Path, pathlib.Path]]):
30
+ log = self.query_one(RichLog)
31
+ log.clear()
32
+ if paths is None:
33
+ return
34
+ file1, file2 = paths
35
+ md = Markdown(
36
+ f'```diff\n{compute_diff(file1, file2)}\n```', code_theme='monokai'
37
+ )
38
+ log.write(md)
rbx/box/unit.py CHANGED
@@ -14,6 +14,7 @@ from rbx.box.schema import (
14
14
  ValidatorOutcome,
15
15
  ValidatorTest,
16
16
  )
17
+ from rbx.grading.steps import Outcome
17
18
  from rbx.utils import StatusProgress
18
19
 
19
20
 
@@ -148,8 +149,6 @@ async def run_validator_unit_tests(progress: StatusProgress):
148
149
 
149
150
  async def run_checker_unit_tests(progress: StatusProgress):
150
151
  pkg = package.find_problem_package_or_die()
151
- if not pkg.unitTests.checker:
152
- return
153
152
 
154
153
  if not package.get_checker():
155
154
  console.console.print(
@@ -183,6 +182,39 @@ async def run_checker_unit_tests(progress: StatusProgress):
183
182
  skip_run_log=True,
184
183
  )
185
184
 
185
+ if test.answer is not None:
186
+ ans_result = await checkers.check(
187
+ compiled_digest,
188
+ run_log=None,
189
+ testcase=Testcase(
190
+ inputPath=test.input or empty_file,
191
+ outputPath=test.answer,
192
+ ),
193
+ program_output=test.answer,
194
+ skip_run_log=True,
195
+ )
196
+
197
+ if ans_result.outcome != Outcome.ACCEPTED:
198
+ console.console.print(
199
+ f'[error]FAIL[/error] Unit test [item]#{i + 1}[/item] ({test.running_tests_formatted_string()})'
200
+ )
201
+ console.console.print(
202
+ '[error]Error validating the [item].ans[/item] file.'
203
+ )
204
+ console.console.print(
205
+ '[error]While checking your [item].ans[/item] against itself, the checker returned the following error:[/error]'
206
+ )
207
+ console.console.print(
208
+ f' [status]Verdict[/status] {ans_result.outcome.name}'
209
+ )
210
+ console.console.print(
211
+ f' [status]Message[/status] {ans_result.message}'
212
+ )
213
+ console.console.print(
214
+ '[error]Please fix your [item].ans[/item] file and try again, or double-check that your checker is correct.[/error]'
215
+ )
216
+ continue
217
+
186
218
  markup = (
187
219
  '[success]OK[/success]'
188
220
  if test.outcome.match(result.outcome)