shrinkray 25.12.29.0__py3-none-any.whl → 26.2.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -12,7 +12,14 @@ from typing import Any, Protocol
12
12
  import trio
13
13
  from binaryornot.helpers import is_binary_string
14
14
 
15
+ from shrinkray.cli import InputType
16
+ from shrinkray.passes.clangdelta import C_FILE_EXTENSIONS, ClangDelta, find_clang_delta
15
17
  from shrinkray.problem import InvalidInitialExample
18
+ from shrinkray.state import (
19
+ OutputCaptureManager,
20
+ ShrinkRayDirectoryState,
21
+ ShrinkRayStateSingleFile,
22
+ )
16
23
  from shrinkray.subprocess.protocol import (
17
24
  PassStatsData,
18
25
  ProgressUpdate,
@@ -21,6 +28,7 @@ from shrinkray.subprocess.protocol import (
21
28
  deserialize,
22
29
  serialize,
23
30
  )
31
+ from shrinkray.work import Volume
24
32
 
25
33
 
26
34
  class InputStream(Protocol):
@@ -50,6 +58,7 @@ class ReducerWorker:
50
58
  self.problem = None
51
59
  self.state = None
52
60
  self._cancel_scope: trio.CancelScope | None = None
61
+ self._restart_requested = False
53
62
  # Parallelism tracking
54
63
  self._parallel_samples = 0
55
64
  self._parallel_total = 0
@@ -63,6 +72,16 @@ class ReducerWorker:
63
72
  self._last_sent_history_index: int = 0
64
73
  self._last_recorded_size: int = 0
65
74
  self._last_history_time: float = 0.0
75
+ # Original start time - preserved across restarts for consistent graphing
76
+ self._original_start_time: float | None = None
77
+ # Log file for stderr redirection (cleaned up on shutdown)
78
+ self._log_file: Any = None
79
+ # Accumulated stats across restarts - when a restart happens, the problem
80
+ # gets a fresh ReductionStats object, so we need to preserve the counts
81
+ self._accumulated_calls: int = 0
82
+ self._accumulated_reductions: int = 0
83
+ self._accumulated_interesting_calls: int = 0
84
+ self._accumulated_wasted_calls: int = 0
66
85
 
67
86
  async def emit(self, msg: Response | ProgressUpdate) -> None:
68
87
  """Write a message to the output stream."""
@@ -108,9 +127,10 @@ class ReducerWorker:
108
127
  return
109
128
  response = await self.handle_command(request)
110
129
  await self.emit(response)
111
- except Exception as e:
130
+ except Exception:
112
131
  traceback.print_exc()
113
- await self.emit(Response(id="", error=str(e)))
132
+ # Include full traceback in error message in case stderr isn't visible
133
+ await self.emit(Response(id="", error=traceback.format_exc()))
114
134
 
115
135
  async def handle_command(self, request: Request) -> Response:
116
136
  """Handle a command request and return a response."""
@@ -127,6 +147,8 @@ class ReducerWorker:
127
147
  return self._handle_enable_pass(request.id, request.params)
128
148
  case "skip_pass":
129
149
  return self._handle_skip_pass(request.id)
150
+ case "restart_from":
151
+ return await self._handle_restart_from(request.id, request.params)
130
152
  case _:
131
153
  return Response(
132
154
  id=request.id, error=f"Unknown command: {request.command}"
@@ -141,6 +163,7 @@ class ReducerWorker:
141
163
  await self._start_reduction(params)
142
164
  return Response(id=request_id, result={"status": "started"})
143
165
  except* InvalidInitialExample as excs:
166
+ traceback.print_exc()
144
167
  assert len(excs.exceptions) == 1
145
168
  (e,) = excs.exceptions
146
169
  # Build a detailed error message for invalid initial examples
@@ -148,26 +171,14 @@ class ReducerWorker:
148
171
  error_message = await self.state.build_error_message(e)
149
172
  else:
150
173
  error_message = str(e)
151
- except* Exception as e:
174
+ except* Exception:
152
175
  traceback.print_exc()
153
- error_message = str(e.exceptions[0])
176
+ # Include full traceback in error message in case stderr isn't visible
177
+ error_message = traceback.format_exc()
154
178
  return Response(id=request_id, error=error_message)
155
179
 
156
180
  async def _start_reduction(self, params: dict) -> None:
157
181
  """Initialize and start the reduction."""
158
- from shrinkray.cli import InputType
159
- from shrinkray.passes.clangdelta import (
160
- C_FILE_EXTENSIONS,
161
- ClangDelta,
162
- find_clang_delta,
163
- )
164
- from shrinkray.state import (
165
- OutputCaptureManager,
166
- ShrinkRayDirectoryState,
167
- ShrinkRayStateSingleFile,
168
- )
169
- from shrinkray.work import Volume
170
-
171
182
  filename = params["file_path"]
172
183
  test = params["test"]
173
184
  parallelism = params.get("parallelism", os.cpu_count() or 1)
@@ -181,6 +192,8 @@ class ReducerWorker:
181
192
  clang_delta_path = params.get("clang_delta", "")
182
193
  trivial_is_error = params.get("trivial_is_error", True)
183
194
  skip_validation = params.get("skip_validation", False)
195
+ history_enabled = params.get("history_enabled", True)
196
+ also_interesting_code = params.get("also_interesting_code")
184
197
 
185
198
  clang_delta_executable = None
186
199
  if os.path.splitext(filename)[1] in C_FILE_EXTENSIONS and not no_clang_delta:
@@ -202,6 +215,8 @@ class ReducerWorker:
202
215
  "seed": seed,
203
216
  "volume": volume,
204
217
  "clang_delta_executable": clang_delta_executable,
218
+ "history_enabled": history_enabled,
219
+ "also_interesting_code": also_interesting_code,
205
220
  }
206
221
 
207
222
  if os.path.isdir(filename):
@@ -220,6 +235,17 @@ class ReducerWorker:
220
235
  self._output_dir = tempfile.mkdtemp(prefix="shrinkray-output-")
221
236
  self.state.output_manager = OutputCaptureManager(output_dir=self._output_dir)
222
237
 
238
+ # Redirect stderr to the history directory if history is enabled
239
+ # This ensures errors are logged to the per-run directory
240
+ if self.state.history_manager is not None:
241
+ log_path = os.path.join(
242
+ self.state.history_manager.history_dir, "shrinkray.log"
243
+ )
244
+ # Ensure directory exists (history_manager creates it, but be safe)
245
+ os.makedirs(os.path.dirname(log_path), exist_ok=True)
246
+ self._log_file = open(log_path, "a", encoding="utf-8")
247
+ sys.stderr = self._log_file
248
+
223
249
  self.problem = self.state.problem
224
250
  self.reducer = self.state.reducer
225
251
 
@@ -309,6 +335,99 @@ class ReducerWorker:
309
335
  return Response(id=request_id, result={"status": "skipped"})
310
336
  return Response(id=request_id, error="Reducer does not support pass control")
311
337
 
338
+ async def _handle_restart_from(self, request_id: str, params: dict) -> Response:
339
+ """Restart reduction from a specific history point.
340
+
341
+ This moves all reductions after the specified point to also-interesting,
342
+ resets the current test case to that point, and modifies the
343
+ interestingness test to reject previously reduced values.
344
+ """
345
+ reduction_number = params.get("reduction_number")
346
+ if reduction_number is None:
347
+ return Response(id=request_id, error="reduction_number is required")
348
+
349
+ if self.state is None or self.state.history_manager is None:
350
+ return Response(id=request_id, error="History not available")
351
+
352
+ # Restart only works with single-file reductions
353
+ if not isinstance(self.state, ShrinkRayStateSingleFile):
354
+ return Response(
355
+ id=request_id,
356
+ error="Restart from history not supported for directory reductions",
357
+ )
358
+
359
+ # First, try to get restart data - this validates the reduction exists
360
+ # Do this BEFORE cancelling the current reduction to avoid leaving
361
+ # things in an inconsistent state if the restart fails
362
+ try:
363
+ new_test_case, excluded_set = (
364
+ self.state.history_manager.restart_from_reduction(reduction_number)
365
+ )
366
+ except FileNotFoundError:
367
+ return Response(
368
+ id=request_id, error=f"Reduction {reduction_number} not found"
369
+ )
370
+ except Exception:
371
+ traceback.print_exc()
372
+ return Response(id=request_id, error=traceback.format_exc())
373
+
374
+ # Save current stats before restart - the new problem will have fresh stats
375
+ if self.problem is not None:
376
+ stats = self.problem.stats
377
+ self._accumulated_calls += stats.calls
378
+ self._accumulated_reductions += stats.reductions
379
+ self._accumulated_interesting_calls += stats.interesting_calls
380
+ self._accumulated_wasted_calls += stats.wasted_interesting_calls
381
+
382
+ # Set restart flag BEFORE cancelling the scope to avoid race condition.
383
+ # The run() loop checks this flag after run_reducer() returns - if we
384
+ # cancel first and the flag isn't set, the loop will exit instead of
385
+ # restarting.
386
+ self._restart_requested = True
387
+ self.running = False
388
+
389
+ # Now cancel current reduction
390
+ if self._cancel_scope is not None:
391
+ self._cancel_scope.cancel()
392
+
393
+ try:
394
+ # Clear old test output to avoid showing stale output from before restart
395
+ if self.state.output_manager is not None:
396
+ self.state.output_manager.cleanup_all()
397
+
398
+ # Reset state with new initial and exclusions
399
+ self.state.reset_for_restart(new_test_case, excluded_set)
400
+
401
+ # Get fresh reducer BEFORE any await points to avoid race condition.
402
+ # After we cancel the scope, the main run() loop may loop back and
403
+ # call run_reducer() while we're still in an await. We need the new
404
+ # reducer to be set before that happens.
405
+ self.reducer = self.state.reducer
406
+ self.problem = self.reducer.target
407
+
408
+ # Record the upward jump in size at current runtime.
409
+ # Don't reset history - this preserves the graph continuity.
410
+ if self._size_history and self._original_start_time is not None:
411
+ current_runtime = time.time() - self._original_start_time
412
+ self._size_history.append((current_runtime, len(new_test_case)))
413
+ self._last_recorded_size = len(new_test_case)
414
+ self._last_history_time = current_runtime
415
+
416
+ # Write new test case to file (can happen after reducer is set up)
417
+ await self.state.write_test_case_to_file(self.state.filename, new_test_case)
418
+
419
+ # Ready to restart - running will be set to True by the run() loop
420
+ return Response(
421
+ id=request_id,
422
+ result={"status": "restarted", "size": len(new_test_case)},
423
+ )
424
+ except Exception:
425
+ traceback.print_exc()
426
+ # Reset restart flag - we can't restart, so don't try
427
+ self._restart_requested = False
428
+ # Include full traceback in error message in case stderr isn't visible
429
+ return Response(id=request_id, error=traceback.format_exc())
430
+
312
431
  def _get_test_output_preview(self) -> tuple[str, int | None, int | None]:
313
432
  """Get preview of current test output, test ID, and return code.
314
433
 
@@ -394,7 +513,12 @@ class ReducerWorker:
394
513
  return None
395
514
 
396
515
  stats = self.problem.stats
397
- runtime = time.time() - stats.start_time
516
+
517
+ # Use original start time for consistent graphing across restarts.
518
+ # Capture it on first call.
519
+ if self._original_start_time is None:
520
+ self._original_start_time = stats.start_time
521
+ runtime = time.time() - self._original_start_time
398
522
  current_size = stats.current_test_case_size
399
523
 
400
524
  # Record size history when size changes or periodically
@@ -426,14 +550,22 @@ class ReducerWorker:
426
550
  self._parallel_samples += 1
427
551
  self._parallel_total += parallel_workers
428
552
 
553
+ # Calculate total stats including accumulated from previous runs (before restarts)
554
+ total_calls = stats.calls + self._accumulated_calls
555
+ total_reductions = stats.reductions + self._accumulated_reductions
556
+ total_interesting_calls = (
557
+ stats.interesting_calls + self._accumulated_interesting_calls
558
+ )
559
+ total_wasted_calls = (
560
+ stats.wasted_interesting_calls + self._accumulated_wasted_calls
561
+ )
562
+
429
563
  # Calculate parallelism stats
430
564
  average_parallelism = 0.0
431
565
  effective_parallelism = 0.0
432
566
  if self._parallel_samples > 0:
433
567
  average_parallelism = self._parallel_total / self._parallel_samples
434
- wasteage = (
435
- stats.wasted_interesting_calls / stats.calls if stats.calls > 0 else 0.0
436
- )
568
+ wasteage = total_wasted_calls / total_calls if total_calls > 0 else 0.0
437
569
  effective_parallelism = average_parallelism * (1.0 - wasteage)
438
570
 
439
571
  # Collect pass statistics in run order (only those with test evaluations)
@@ -479,14 +611,21 @@ class ReducerWorker:
479
611
  new_entries = self._size_history[self._last_sent_history_index :]
480
612
  self._last_sent_history_index = len(self._size_history)
481
613
 
614
+ # Get history directory info for history explorer
615
+ history_dir: str | None = None
616
+ target_basename = ""
617
+ if self.state is not None and self.state.history_manager is not None:
618
+ history_dir = self.state.history_manager.history_dir
619
+ target_basename = self.state.history_manager.target_basename
620
+
482
621
  return ProgressUpdate(
483
622
  status=self.reducer.status if self.reducer else "",
484
623
  size=stats.current_test_case_size,
485
624
  original_size=stats.initial_test_case_size,
486
- calls=stats.calls,
487
- reductions=stats.reductions,
488
- interesting_calls=stats.interesting_calls,
489
- wasted_calls=stats.wasted_interesting_calls,
625
+ calls=total_calls,
626
+ reductions=total_reductions,
627
+ interesting_calls=total_interesting_calls,
628
+ wasted_calls=total_wasted_calls,
490
629
  runtime=runtime,
491
630
  parallel_workers=parallel_workers,
492
631
  average_parallelism=average_parallelism,
@@ -501,6 +640,8 @@ class ReducerWorker:
501
640
  active_test_id=active_test_id,
502
641
  last_test_return_code=last_return_code,
503
642
  new_size_history=new_entries,
643
+ history_dir=history_dir,
644
+ target_basename=target_basename,
504
645
  )
505
646
 
506
647
  async def emit_progress_updates(self) -> None:
@@ -510,8 +651,14 @@ class ReducerWorker:
510
651
  if update is not None:
511
652
  await self.emit(update)
512
653
 
513
- while self.running:
654
+ while True:
514
655
  await trio.sleep(0.1)
656
+ # Keep running while reducer is active or restart is pending.
657
+ # During restart, running is temporarily False but we need to
658
+ # keep emitting updates until the restart completes and running
659
+ # is set back to True.
660
+ if not self.running and not self._restart_requested:
661
+ break
515
662
  update = await self._build_progress_update()
516
663
  if update is not None:
517
664
  await self.emit(update)
@@ -532,6 +679,7 @@ class ReducerWorker:
532
679
  if trivial_error:
533
680
  await self.emit(Response(id="", error=trivial_error))
534
681
  except* InvalidInitialExample as excs:
682
+ traceback.print_exc()
535
683
  assert len(excs.exceptions) == 1
536
684
  (e,) = excs.exceptions
537
685
  # Build a detailed error message for invalid initial examples
@@ -540,10 +688,10 @@ class ReducerWorker:
540
688
  else:
541
689
  error_message = str(e)
542
690
  await self.emit(Response(id="", error=error_message))
543
- except* Exception as e:
544
- # Catch any other exception during reduction and emit as error
691
+ except* Exception:
545
692
  traceback.print_exc()
546
- await self.emit(Response(id="", error=str(e.exceptions[0])))
693
+ # Include full traceback in error message in case stderr isn't visible
694
+ await self.emit(Response(id="", error=traceback.format_exc()))
547
695
  finally:
548
696
  self._cancel_scope = None
549
697
  self.running = False
@@ -558,9 +706,20 @@ class ReducerWorker:
558
706
  while not self.running:
559
707
  await trio.sleep(0.01)
560
708
 
561
- # Start progress updates and reducer
709
+ # Start progress updates
562
710
  nursery.start_soon(self.emit_progress_updates)
563
- await self.run_reducer()
711
+
712
+ # Run reducer, looping if restart is requested
713
+ while True:
714
+ self._restart_requested = False
715
+ await self.run_reducer()
716
+
717
+ # Check if we should restart
718
+ if not self._restart_requested:
719
+ break
720
+
721
+ # Set running=True here since run_reducer's finally block set it to False
722
+ self.running = True
564
723
 
565
724
  # Emit final progress update before completion
566
725
  final_update = await self._build_progress_update()
@@ -576,6 +735,12 @@ class ReducerWorker:
576
735
  self.state.output_manager.cleanup_all()
577
736
  if self._output_dir is not None and os.path.isdir(self._output_dir):
578
737
  shutil.rmtree(self._output_dir, ignore_errors=True)
738
+ # Close log file if we opened one
739
+ if self._log_file is not None:
740
+ try:
741
+ self._log_file.close()
742
+ except Exception:
743
+ pass
579
744
 
580
745
 
581
746
  def main() -> None: