rbx.cp 0.5.39__py3-none-any.whl → 0.5.42__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. rbx/box/builder.py +6 -6
  2. rbx/box/checkers.py +105 -26
  3. rbx/box/cli.py +860 -0
  4. rbx/box/code.py +199 -84
  5. rbx/box/contest/statements.py +4 -2
  6. rbx/box/generators.py +55 -49
  7. rbx/box/generators_test.py +7 -7
  8. rbx/box/main.py +1 -852
  9. rbx/box/package.py +42 -1
  10. rbx/box/packaging/boca/packager.py +2 -1
  11. rbx/box/packaging/main.py +24 -7
  12. rbx/box/packaging/moj/packager.py +164 -0
  13. rbx/box/retries.py +5 -5
  14. rbx/box/schema.py +86 -4
  15. rbx/box/solutions.py +46 -108
  16. rbx/box/solutions_test.py +5 -6
  17. rbx/box/statements/build_statements.py +4 -2
  18. rbx/box/stresses.py +23 -12
  19. rbx/box/tasks.py +258 -0
  20. rbx/box/testcase_extractors.py +21 -21
  21. rbx/box/testcases/main.py +19 -14
  22. rbx/box/unit.py +116 -0
  23. rbx/box/validators.py +27 -18
  24. rbx/box/validators_test.py +3 -3
  25. rbx/grading/judge/sandbox.py +8 -0
  26. rbx/grading/judge/sandboxes/stupid_sandbox.py +12 -7
  27. rbx/grading/judge/sandboxes/timeit.py +8 -2
  28. rbx/grading/steps.py +76 -2
  29. rbx/grading/steps_with_caching.py +45 -3
  30. rbx/grading/steps_with_caching_run_test.py +51 -49
  31. rbx/resources/packagers/moj/scripts/compare.sh +101 -0
  32. rbx/test.py +6 -4
  33. rbx/testdata/interactive/checker.cpp +21 -0
  34. rbx/testdata/interactive/gen.cpp +11 -0
  35. rbx/testdata/interactive/interactor.cpp +63 -0
  36. rbx/testdata/interactive/problem.rbx.yml +40 -0
  37. rbx/testdata/interactive/sols/af_ac_pe.cpp +75 -0
  38. rbx/testdata/interactive/sols/af_ac_re.cpp +76 -0
  39. rbx/testdata/interactive/sols/af_ac_too_many_iter.cpp +72 -0
  40. rbx/testdata/interactive/sols/af_inf_cout_with_flush.cpp +79 -0
  41. rbx/testdata/interactive/sols/af_inf_cout_without_flush.cpp +78 -0
  42. rbx/testdata/interactive/sols/af_ml.cpp +78 -0
  43. rbx/testdata/interactive/sols/af_tl_after_ans.cpp +74 -0
  44. rbx/testdata/interactive/sols/af_wa.cpp +74 -0
  45. rbx/testdata/interactive/sols/interactive-binary-search_mm_naive_cin.cpp +17 -0
  46. rbx/testdata/interactive/sols/main.cpp +26 -0
  47. rbx/testdata/interactive/testplan.txt +6 -0
  48. rbx/testdata/interactive/validator.cpp +16 -0
  49. {rbx_cp-0.5.39.dist-info → rbx_cp-0.5.42.dist-info}/METADATA +2 -1
  50. {rbx_cp-0.5.39.dist-info → rbx_cp-0.5.42.dist-info}/RECORD +53 -32
  51. {rbx_cp-0.5.39.dist-info → rbx_cp-0.5.42.dist-info}/LICENSE +0 -0
  52. {rbx_cp-0.5.39.dist-info → rbx_cp-0.5.42.dist-info}/WHEEL +0 -0
  53. {rbx_cp-0.5.39.dist-info → rbx_cp-0.5.42.dist-info}/entry_points.txt +0 -0
rbx/box/code.py CHANGED
@@ -1,3 +1,4 @@
1
+ import dataclasses
1
2
  import pathlib
2
3
  import re
3
4
  import resource
@@ -14,6 +15,7 @@ from rbx import console
14
15
  from rbx.box import download, package, setter_config, state
15
16
  from rbx.box.environment import (
16
17
  ExecutionConfig,
18
+ FileMapping,
17
19
  get_compilation_config,
18
20
  get_execution_config,
19
21
  get_file_mapping,
@@ -26,7 +28,8 @@ from rbx.box.environment import (
26
28
  from rbx.box.formatting import get_formatted_memory
27
29
  from rbx.box.sanitizers import warning_stack
28
30
  from rbx.box.schema import CodeItem
29
- from rbx.grading import steps_with_caching
31
+ from rbx.grading import steps, steps_with_caching
32
+ from rbx.grading.judge.sandbox import SandboxParams
30
33
  from rbx.grading.steps import (
31
34
  DigestHolder,
32
35
  DigestOrDest,
@@ -202,6 +205,117 @@ function rbx() {{
202
205
  raise typer.Exit(1)
203
206
 
204
207
 
208
+ @dataclasses.dataclass
209
+ class PreparedRun:
210
+ command: str
211
+ sandbox_params: SandboxParams
212
+ artifacts: GradingArtifacts
213
+ sanitized: bool
214
+
215
+ file_mapping: FileMapping
216
+ metadata: RunLogMetadata
217
+
218
+
219
+ def _prepare_run(
220
+ code: CodeItem,
221
+ executable: DigestOrSource,
222
+ stdin: Optional[DigestOrSource] = None,
223
+ stdout: Optional[DigestOrDest] = None,
224
+ stderr: Optional[DigestOrDest] = None,
225
+ inputs: Optional[List[GradingFileInput]] = None,
226
+ outputs: Optional[List[GradingFileOutput]] = None,
227
+ extra_args: Optional[str] = None,
228
+ extra_config: Optional[ExecutionConfig] = None,
229
+ retry_index: Optional[int] = None,
230
+ ):
231
+ language = find_language_name(code)
232
+ execution_options = get_execution_config(language)
233
+ if extra_config is not None:
234
+ execution_options = merge_execution_configs([execution_options, extra_config])
235
+ file_mapping = get_file_mapping(language)
236
+ sandbox_params = get_sandbox_params_from_config(execution_options.sandbox)
237
+
238
+ # Sanitization parameters.
239
+ sanitized = False
240
+ if is_executable_sanitized(executable):
241
+ # Remove any memory constraints for a sanitized executable.
242
+ # Sanitizers are known to be memory-hungry.
243
+ sandbox_params.address_space = None
244
+
245
+ # Reset timeout configs since sanitizers are known to be time-hungry.
246
+ sandbox_params.timeout = None
247
+ sandbox_params.wallclock_timeout = None
248
+ sanitized = True
249
+
250
+ sandbox_params.set_stdall(
251
+ stdin=PosixPath(file_mapping.input) if stdin is not None else None,
252
+ stdout=PosixPath(file_mapping.output) if stdout is not None else None,
253
+ stderr=PosixPath(file_mapping.error)
254
+ if stderr is not None or sanitized
255
+ else None,
256
+ )
257
+
258
+ assert execution_options.command
259
+ command = get_mapped_command(execution_options.command, file_mapping)
260
+ command = substitute_commands([command], sanitized=sanitized)[0]
261
+
262
+ if extra_args is not None:
263
+ splitted_command = shlex.split(command)
264
+ splitted_command.extend(shlex.split(extra_args))
265
+ command = shlex.join(splitted_command)
266
+
267
+ artifacts = GradingArtifacts()
268
+ artifacts.inputs.append(
269
+ GradingFileInput(
270
+ **executable.expand(),
271
+ dest=PosixPath(file_mapping.executable),
272
+ executable=True,
273
+ )
274
+ )
275
+ if stdin is not None:
276
+ artifacts.inputs.append(
277
+ GradingFileInput(
278
+ **stdin.expand(),
279
+ dest=PosixPath(file_mapping.input),
280
+ )
281
+ )
282
+ if stdout is not None:
283
+ artifacts.outputs.append(
284
+ GradingFileOutput(
285
+ src=PosixPath(file_mapping.output),
286
+ **stdout.expand(),
287
+ touch=True,
288
+ )
289
+ )
290
+ if stderr is not None:
291
+ artifacts.outputs.append(
292
+ GradingFileOutput(
293
+ src=PosixPath(file_mapping.error),
294
+ **stderr.expand(),
295
+ touch=True,
296
+ )
297
+ )
298
+ if inputs:
299
+ artifacts.inputs.extend(inputs)
300
+ if outputs:
301
+ artifacts.outputs.extend(outputs)
302
+
303
+ return PreparedRun(
304
+ command=command,
305
+ sandbox_params=sandbox_params,
306
+ artifacts=artifacts,
307
+ sanitized=sanitized,
308
+ file_mapping=file_mapping,
309
+ metadata=RunLogMetadata(
310
+ language=code.language,
311
+ is_sanitized=sanitized,
312
+ timeLimit=sandbox_params.timeout,
313
+ memoryLimit=sandbox_params.address_space,
314
+ retryIndex=retry_index,
315
+ ),
316
+ )
317
+
318
+
205
319
  # Compile code item and return its digest in the storage.
206
320
  def compile_item(
207
321
  code: CodeItem,
@@ -310,7 +424,7 @@ def compile_item(
310
424
  return compiled_digest.value
311
425
 
312
426
 
313
- def run_item(
427
+ async def run_item(
314
428
  code: CodeItem,
315
429
  executable: DigestOrSource,
316
430
  stdin: Optional[DigestOrSource] = None,
@@ -324,99 +438,100 @@ def run_item(
324
438
  ) -> Optional[RunLog]:
325
439
  _check_stack_limit()
326
440
 
327
- language = find_language_name(code)
328
- execution_options = get_execution_config(language)
329
- if extra_config is not None:
330
- execution_options = merge_execution_configs([execution_options, extra_config])
331
- file_mapping = get_file_mapping(language)
332
441
  dependency_cache = package.get_dependency_cache()
333
- sandbox = package.get_singleton_sandbox()
334
- sandbox_params = get_sandbox_params_from_config(execution_options.sandbox)
335
-
336
- # Sanitization parameters.
337
- sanitized = False
338
- if is_executable_sanitized(executable):
339
- # Remove any memory constraints for a sanitized executable.
340
- # Sanitizers are known to be memory-hungry.
341
- sandbox_params.address_space = None
342
-
343
- # Reset timeout configs since sanitizers are known to be time-hungry.
344
- sandbox_params.timeout = None
345
- sandbox_params.wallclock_timeout = None
346
- sanitized = True
347
-
348
- sandbox_params.set_stdall(
349
- stdin=PosixPath(file_mapping.input) if stdin is not None else None,
350
- stdout=PosixPath(file_mapping.output) if stdout is not None else None,
351
- stderr=PosixPath(file_mapping.error)
352
- if stderr is not None or sanitized
353
- else None,
354
- )
355
442
 
356
- assert execution_options.command
357
- command = get_mapped_command(execution_options.command, file_mapping)
358
- command = substitute_commands([command], sanitized=sanitized)[0]
359
-
360
- if extra_args is not None:
361
- splitted_command = shlex.split(command)
362
- splitted_command.extend(shlex.split(extra_args))
363
- command = shlex.join(splitted_command)
364
-
365
- artifacts = GradingArtifacts()
366
- artifacts.inputs.append(
367
- GradingFileInput(
368
- **executable.expand(),
369
- dest=PosixPath(file_mapping.executable),
370
- executable=True,
371
- )
443
+ prepared = _prepare_run(
444
+ code,
445
+ executable,
446
+ stdin,
447
+ stdout,
448
+ stderr,
449
+ inputs,
450
+ outputs,
451
+ extra_args,
452
+ extra_config,
453
+ retry_index,
372
454
  )
373
- if stdin is not None:
374
- artifacts.inputs.append(
375
- GradingFileInput(
376
- **stdin.expand(),
377
- dest=PosixPath(file_mapping.input),
378
- )
379
- )
380
- if stdout is not None:
381
- artifacts.outputs.append(
382
- GradingFileOutput(
383
- src=PosixPath(file_mapping.output),
384
- **stdout.expand(),
385
- )
386
- )
387
- if stderr is not None:
388
- artifacts.outputs.append(
389
- GradingFileOutput(
390
- src=PosixPath(file_mapping.error),
391
- **stderr.expand(),
392
- )
393
- )
394
- if inputs:
395
- artifacts.inputs.extend(inputs)
396
- if outputs:
397
- artifacts.outputs.extend(outputs)
398
455
 
399
- run_log = steps_with_caching.run(
400
- command,
401
- params=sandbox_params,
402
- sandbox=sandbox,
403
- artifacts=artifacts,
456
+ run_log = await steps_with_caching.run(
457
+ prepared.command,
458
+ params=prepared.sandbox_params,
459
+ sandbox=package.get_singleton_sandbox(),
460
+ artifacts=prepared.artifacts,
404
461
  dependency_cache=dependency_cache,
405
- metadata=RunLogMetadata(
406
- language=code.language,
407
- is_sanitized=sanitized,
408
- timeLimit=sandbox_params.timeout,
409
- memoryLimit=sandbox_params.address_space,
410
- retryIndex=retry_index,
411
- ),
462
+ metadata=prepared.metadata,
412
463
  )
413
464
 
414
465
  # Find sanitizer logs.
415
466
  if run_log is not None and run_log.warnings:
416
- assert sandbox_params.stderr_file is not None
417
- stderr_output = artifacts.get_output_file_for_src(sandbox_params.stderr_file)
467
+ assert prepared.sandbox_params.stderr_file is not None
468
+ stderr_output = prepared.artifacts.get_output_file_for_src(
469
+ prepared.sandbox_params.stderr_file
470
+ )
418
471
  if stderr_output is not None:
419
472
  warning_stack.get_warning_stack().add_sanitizer_warning(
420
473
  package.get_cache_storage(), code, stderr_output
421
474
  )
422
475
  return run_log
476
+
477
+
478
+ @dataclasses.dataclass
479
+ class CommunicationItem:
480
+ code: CodeItem
481
+ executable: DigestOrSource
482
+ stderr: Optional[DigestOrDest] = None
483
+ inputs: Optional[List[GradingFileInput]] = None
484
+ outputs: Optional[List[GradingFileOutput]] = None
485
+ extra_args: Optional[str] = None
486
+ extra_config: Optional[ExecutionConfig] = None
487
+
488
+ def prepare(self) -> PreparedRun:
489
+ return _prepare_run(
490
+ self.code,
491
+ self.executable,
492
+ stderr=self.stderr,
493
+ inputs=self.inputs,
494
+ outputs=self.outputs,
495
+ extra_args=self.extra_args,
496
+ extra_config=self.extra_config,
497
+ )
498
+
499
+
500
+ async def run_communication(
501
+ interactor: CommunicationItem,
502
+ solution: CommunicationItem,
503
+ retry_index: Optional[int] = None,
504
+ ):
505
+ fifo_in, fifo_out = package.get_fifos()
506
+ interactor_prepared = interactor.prepare()
507
+ solution_prepared = solution.prepare()
508
+
509
+ # Prepare retry index.
510
+ interactor_prepared.metadata.retryIndex = retry_index
511
+ solution_prepared.metadata.retryIndex = retry_index
512
+
513
+ interactor_prepared.sandbox_params.set_stdio(stdin=fifo_out, stdout=fifo_in)
514
+ solution_prepared.sandbox_params.set_stdio(stdin=fifo_in, stdout=fifo_out)
515
+
516
+ solution_prepared.sandbox_params.reverse_io = True
517
+
518
+ interactor_run_params = steps.CoordinatedRunParams(
519
+ command=interactor_prepared.command,
520
+ params=interactor_prepared.sandbox_params,
521
+ sandbox=package.get_singleton_interactor_sandbox(),
522
+ artifacts=interactor_prepared.artifacts,
523
+ metadata=interactor_prepared.metadata,
524
+ )
525
+ solution_run_params = steps.CoordinatedRunParams(
526
+ command=solution_prepared.command,
527
+ params=solution_prepared.sandbox_params,
528
+ sandbox=package.get_singleton_sandbox(),
529
+ artifacts=solution_prepared.artifacts,
530
+ metadata=solution_prepared.metadata,
531
+ )
532
+
533
+ return await steps_with_caching.run_coordinated(
534
+ interactor_run_params,
535
+ solution_run_params,
536
+ dependency_cache=package.get_dependency_cache(),
537
+ )
@@ -1,5 +1,6 @@
1
1
  from typing import Annotated, List, Optional
2
2
 
3
+ import syncer
3
4
  import typer
4
5
 
5
6
  from rbx import annotations, console
@@ -16,7 +17,8 @@ app = typer.Typer(no_args_is_help=True, cls=annotations.AliasGroup)
16
17
 
17
18
  @app.command('build, b', help='Build statements.')
18
19
  @within_contest
19
- def build(
20
+ @syncer.sync
21
+ async def build(
20
22
  verification: environment.VerificationParam,
21
23
  languages: Annotated[
22
24
  Optional[List[str]],
@@ -53,7 +55,7 @@ def build(
53
55
  with cd.new_package_cd(problem.get_path()):
54
56
  package.clear_package_cache()
55
57
 
56
- if not builder.build(
58
+ if not await builder.build(
57
59
  verification=verification, groups=set(['samples']), output=None
58
60
  ):
59
61
  console.console.print(
rbx/box/generators.py CHANGED
@@ -1,5 +1,6 @@
1
1
  import pathlib
2
2
  import shutil
3
+ import tempfile
3
4
  from typing import Dict, List, Optional, Set
4
5
 
5
6
  import typer
@@ -7,15 +8,13 @@ import typer
7
8
  from rbx import console
8
9
  from rbx.box import checkers, package, testcase_utils, validators
9
10
  from rbx.box.code import SanitizationLevel, compile_item, run_item
10
- from rbx.box.environment import (
11
- EnvironmentSandbox,
12
- ExecutionConfig,
13
- )
14
11
  from rbx.box.schema import (
15
12
  CodeItem,
16
13
  GeneratorCall,
14
+ TaskType,
17
15
  Testcase,
18
16
  )
17
+ from rbx.box.tasks import run_solution_on_testcase
19
18
  from rbx.box.testcase_extractors import (
20
19
  GenerationMetadata,
21
20
  GenerationTestcaseEntry,
@@ -32,6 +31,8 @@ from rbx.grading.steps import (
32
31
  DigestHolder,
33
32
  DigestOrDest,
34
33
  DigestOrSource,
34
+ Evaluation,
35
+ Outcome,
35
36
  )
36
37
  from rbx.utils import StatusProgress
37
38
 
@@ -73,7 +74,7 @@ def get_call_from_string(call_str: str) -> GeneratorCall:
73
74
  return GeneratorCall(name=name, args=args)
74
75
 
75
76
 
76
- def _get_necessary_generators_for_groups(
77
+ async def _get_necessary_generators_for_groups(
77
78
  groups: Optional[Set[str]] = None,
78
79
  ) -> Set[str]:
79
80
  pkg = package.find_problem_package_or_die()
@@ -81,11 +82,11 @@ def _get_necessary_generators_for_groups(
81
82
  necessary_generators = set()
82
83
 
83
84
  class NecessaryGeneratorsVisitor(TestcaseGroupVisitor):
84
- def visit(self, entry: GenerationTestcaseEntry):
85
+ async def visit(self, entry: GenerationTestcaseEntry):
85
86
  if entry.metadata.generator_call is not None:
86
87
  necessary_generators.add(entry.metadata.generator_call.name)
87
88
 
88
- run_testcase_visitor(NecessaryGeneratorsVisitor(groups))
89
+ await run_testcase_visitor(NecessaryGeneratorsVisitor(groups))
89
90
 
90
91
  return existing_generators.intersection(necessary_generators)
91
92
 
@@ -126,7 +127,7 @@ def expand_generator_call(call: GeneratorCall) -> GeneratorCall:
126
127
  return call.model_copy(update={'args': generator_for_args.generate(parsed_args)})
127
128
 
128
129
 
129
- def generate_standalone(
130
+ async def generate_standalone(
130
131
  spec: GenerationMetadata,
131
132
  validate: bool = True,
132
133
  group_entry: Optional[TestcaseEntry] = None,
@@ -166,7 +167,7 @@ def generate_standalone(
166
167
  progress.update(
167
168
  f'Generating testcase [status]{generator.name} {call.args}[/status]...'
168
169
  )
169
- generation_log = run_item(
170
+ generation_log = await run_item(
170
171
  generator,
171
172
  DigestOrSource.create(generator_digest),
172
173
  stdout=DigestOrDest.create(spec.copied_to.inputPath),
@@ -200,7 +201,7 @@ def generate_standalone(
200
201
  _, validator_digest = validator_tp
201
202
  if progress:
202
203
  progress.update('Validating test...')
203
- validation_info = validators.validate_one_off(
204
+ validation_info = await validators.validate_one_off(
204
205
  spec.copied_to.inputPath,
205
206
  validator,
206
207
  validator_digest,
@@ -214,7 +215,7 @@ def generate_standalone(
214
215
  raise typer.Exit(1)
215
216
 
216
217
 
217
- def generate_testcases(
218
+ async def generate_testcases(
218
219
  progress: Optional[StatusProgress] = None, groups: Optional[Set[str]] = None
219
220
  ):
220
221
  def step():
@@ -223,7 +224,7 @@ def generate_testcases(
223
224
 
224
225
  compiled_generators = compile_generators(
225
226
  progress=progress,
226
- tracked_generators=_get_necessary_generators_for_groups(groups)
227
+ tracked_generators=await _get_necessary_generators_for_groups(groups)
227
228
  if groups is not None
228
229
  else None,
229
230
  )
@@ -231,7 +232,7 @@ def generate_testcases(
231
232
  testcase_utils.clear_built_testcases()
232
233
 
233
234
  class BuildTestcaseVisitor(TestcaseGroupVisitor):
234
- def visit(self, entry: GenerationTestcaseEntry):
235
+ async def visit(self, entry: GenerationTestcaseEntry):
235
236
  if entry.metadata.copied_from is not None:
236
237
  _copy_testcase_over(
237
238
  entry.metadata.copied_from,
@@ -239,7 +240,7 @@ def generate_testcases(
239
240
  )
240
241
 
241
242
  if entry.metadata.generator_call is not None:
242
- generate_standalone(
243
+ await generate_standalone(
243
244
  entry.metadata,
244
245
  group_entry=entry.group_entry,
245
246
  validate=False,
@@ -249,13 +250,14 @@ def generate_testcases(
249
250
  )
250
251
  step()
251
252
 
252
- run_testcase_visitor(BuildTestcaseVisitor(groups))
253
+ await run_testcase_visitor(BuildTestcaseVisitor(groups))
253
254
 
254
255
 
255
- def generate_output_for_testcase(
256
+ async def generate_output_for_testcase(
256
257
  main_solution_digest: str,
257
258
  testcase: Testcase,
258
259
  stderr_path: Optional[pathlib.Path] = None,
260
+ interactor_digest: Optional[str] = None,
259
261
  ):
260
262
  assert testcase.outputPath is not None
261
263
  testcase.inputPath.parent.mkdir(parents=True, exist_ok=True)
@@ -265,55 +267,51 @@ def generate_output_for_testcase(
265
267
  # Output file was already copied over from manual tests.
266
268
  return
267
269
 
268
- pkg = package.find_problem_package_or_die()
269
270
  main_solution = package.get_main_solution()
270
271
  if main_solution is None:
271
272
  return
272
273
 
273
- # Obey no limits when generating testcases.
274
- sandbox = EnvironmentSandbox()
275
- sandbox.fileSizeLimit = pkg.outputLimit
276
- extra_config = ExecutionConfig(sandbox=sandbox)
274
+ with tempfile.TemporaryDirectory() as dir:
275
+ output_dir = pathlib.Path(dir)
277
276
 
278
- try:
279
- run_log = run_item(
277
+ eval: Evaluation = await run_solution_on_testcase(
280
278
  main_solution,
281
- DigestOrSource.create(main_solution_digest),
282
- stdin=DigestOrSource.create(testcase.inputPath),
283
- stdout=DigestOrDest.create(testcase.outputPath),
284
- stderr=DigestOrDest.create(stderr_path)
285
- if stderr_path is not None
286
- else None,
287
- extra_config=extra_config,
288
- )
289
- except:
290
- console.console.print(
291
- '[error]Failed running main solution to generate testcase.[/error]'
279
+ main_solution_digest,
280
+ None,
281
+ testcase,
282
+ output_dir,
283
+ interactor_digest=interactor_digest,
284
+ use_retries=False,
285
+ use_timelimit=False,
292
286
  )
293
- raise
294
287
 
295
- if run_log is None or run_log.exitcode != 0:
296
- console.console.print(
297
- f'[error]Failed generating output for [item]{testcase.inputPath}[/item][/error]',
298
- )
299
- if run_log is not None:
300
- console.console.print(f'[error]Summary:[/error] {run_log.get_summary()}')
301
- checker_result = checkers.check_with_no_output(run_log)
288
+ if eval.log.stdout_absolute_path is not None:
289
+ shutil.copy(eval.log.stdout_absolute_path, testcase.outputPath)
290
+ if eval.log.stderr_absolute_path is not None and stderr_path is not None:
291
+ shutil.copy(eval.log.stderr_absolute_path, stderr_path)
292
+
293
+ if eval.result.outcome != Outcome.ACCEPTED:
294
+ console.console.print(
295
+ f'[error]Failed generating output for [item]{testcase.inputPath}[/item][/error]',
296
+ )
297
+ console.console.print(f'[error]Summary:[/error] {eval.log.get_summary()}')
302
298
  console.console.print(
303
- f'[warning]Verdict: [item]{checker_result.outcome.value}[/item][/warning]',
299
+ f'[warning]Verdict: [item]{eval.result.outcome.value}[/item][/warning]',
304
300
  )
305
301
  console.console.print(
306
- f'[warning]Message: [info]{checker_result.message}[/info][/warning]',
302
+ f'[warning]Message: [info]{eval.result.message}[/info][/warning]',
307
303
  )
308
304
  console.console.print(f'Input written at [item]{testcase.inputPath}[/item]')
309
305
  console.console.print(
310
306
  f'Output written at [item]{testcase.outputPath}[/item]'
311
307
  )
312
- console.console.print(f'Stderr written at [item]{stderr_path}[/item]')
313
- raise typer.Exit(1)
308
+ if stderr_path is not None:
309
+ console.console.print(f'Stderr written at [item]{stderr_path}[/item]')
310
+
311
+ raise typer.Exit(1)
314
312
 
315
313
 
316
- def generate_outputs_for_testcases(
314
+ async def generate_outputs_for_testcases(
317
315
  entries: List[TestcaseEntry],
318
316
  progress: Optional[StatusProgress] = None,
319
317
  ):
@@ -324,6 +322,13 @@ def generate_outputs_for_testcases(
324
322
  main_solution = package.get_main_solution()
325
323
  solution_digest: Optional[str] = None
326
324
 
325
+ pkg = package.find_problem_package_or_die()
326
+
327
+ if pkg.type == TaskType.COMMUNICATION:
328
+ interactor_digest = checkers.compile_interactor(progress)
329
+ else:
330
+ interactor_digest = None
331
+
327
332
  if main_solution is not None:
328
333
  if progress:
329
334
  progress.update('Compiling main solution...')
@@ -337,7 +342,7 @@ def generate_outputs_for_testcases(
337
342
  shutil.rmtree(str(gen_runs_dir), ignore_errors=True)
338
343
  gen_runs_dir.mkdir(parents=True, exist_ok=True)
339
344
 
340
- generation_entries = extract_generation_testcases(entries)
345
+ generation_entries = await extract_generation_testcases(entries)
341
346
 
342
347
  for entry in generation_entries:
343
348
  tc = entry.metadata.copied_to
@@ -354,9 +359,10 @@ def generate_outputs_for_testcases(
354
359
  raise typer.Exit(1)
355
360
 
356
361
  assert solution_digest is not None
357
- generate_output_for_testcase(
362
+ await generate_output_for_testcase(
358
363
  solution_digest,
359
364
  tc,
360
365
  gen_runs_dir / 'main.stderr',
366
+ interactor_digest=interactor_digest,
361
367
  )
362
368
  step()
@@ -12,12 +12,12 @@ from rbx.testing_utils import print_directory_tree
12
12
 
13
13
 
14
14
  @pytest.mark.test_pkg('box1')
15
- def test_generator_works(pkg_from_testdata: pathlib.Path):
16
- generate_testcases()
15
+ async def test_generator_works(pkg_from_testdata: pathlib.Path):
16
+ await generate_testcases()
17
17
  entries = [
18
- entry.group_entry for entry in extract_generation_testcases_from_groups()
18
+ entry.group_entry for entry in await extract_generation_testcases_from_groups()
19
19
  ]
20
- generate_outputs_for_testcases(entries)
20
+ await generate_outputs_for_testcases(entries)
21
21
 
22
22
  # Debug when fail.
23
23
  print_directory_tree(pkg_from_testdata)
@@ -37,11 +37,11 @@ def test_generator_works(pkg_from_testdata: pathlib.Path):
37
37
 
38
38
 
39
39
  @pytest.mark.test_pkg('box1')
40
- def test_generator_cache_works(
40
+ async def test_generator_cache_works(
41
41
  pkg_from_testdata: pathlib.Path,
42
42
  ):
43
43
  # Run the first time.
44
- generate_testcases()
44
+ await generate_testcases()
45
45
  assert (
46
46
  package.get_build_testgroup_path('gen1') / '1-gen-000.in'
47
47
  ).read_text() == '123\n'
@@ -54,7 +54,7 @@ def test_generator_cache_works(
54
54
  gen_path.write_text(gen_path.read_text().replace('123', '4567'))
55
55
 
56
56
  # Run the second time.
57
- generate_testcases()
57
+ await generate_testcases()
58
58
 
59
59
  # Debug when fail.
60
60
  print_directory_tree(pkg_from_testdata)