shrinkray 25.12.28.0__py3-none-any.whl → 26.1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,8 +57,17 @@ class ProgressUpdate:
57
57
  disabled_passes: list[str] = field(default_factory=list)
58
58
  # Test output preview (last 4KB of current/recent test output)
59
59
  test_output_preview: str = ""
60
- # Currently running test ID (None if no test running)
60
+ # Test ID of the output being displayed (None if no output yet)
61
61
  active_test_id: int | None = None
62
+ # Return code of the displayed test (None if test is still running)
63
+ last_test_return_code: int | None = None
64
+ # New size history entries since last update: list of (runtime_seconds, size)
65
+ # Client should accumulate these over time
66
+ new_size_history: list[tuple[float, int]] = field(default_factory=list)
67
+ # History directory path (for browsing reductions/also-interesting)
68
+ history_dir: str | None = None
69
+ # Target file basename (for reading history files)
70
+ target_basename: str = ""
62
71
 
63
72
 
64
73
  @dataclass
@@ -120,6 +129,10 @@ def serialize(msg: Request | Response | ProgressUpdate) -> str:
120
129
  "disabled_passes": msg.disabled_passes,
121
130
  "test_output_preview": msg.test_output_preview,
122
131
  "active_test_id": msg.active_test_id,
132
+ "last_test_return_code": msg.last_test_return_code,
133
+ "new_size_history": msg.new_size_history,
134
+ "history_dir": msg.history_dir,
135
+ "target_basename": msg.target_basename,
123
136
  },
124
137
  }
125
138
  else:
@@ -169,6 +182,10 @@ def deserialize(line: str) -> Request | Response | ProgressUpdate:
169
182
  disabled_passes=d.get("disabled_passes", []),
170
183
  test_output_preview=d.get("test_output_preview", ""),
171
184
  active_test_id=d.get("active_test_id"),
185
+ last_test_return_code=d.get("last_test_return_code"),
186
+ new_size_history=[tuple(x) for x in d.get("new_size_history", [])],
187
+ history_dir=d.get("history_dir"),
188
+ target_basename=d.get("target_basename", ""),
172
189
  )
173
190
 
174
191
  # Check for response (has "result" or "error" field)
@@ -12,7 +12,14 @@ from typing import Any, Protocol
12
12
  import trio
13
13
  from binaryornot.helpers import is_binary_string
14
14
 
15
+ from shrinkray.cli import InputType
16
+ from shrinkray.passes.clangdelta import C_FILE_EXTENSIONS, ClangDelta, find_clang_delta
15
17
  from shrinkray.problem import InvalidInitialExample
18
+ from shrinkray.state import (
19
+ OutputCaptureManager,
20
+ ShrinkRayDirectoryState,
21
+ ShrinkRayStateSingleFile,
22
+ )
16
23
  from shrinkray.subprocess.protocol import (
17
24
  PassStatsData,
18
25
  ProgressUpdate,
@@ -21,6 +28,7 @@ from shrinkray.subprocess.protocol import (
21
28
  deserialize,
22
29
  serialize,
23
30
  )
31
+ from shrinkray.work import Volume
24
32
 
25
33
 
26
34
  class InputStream(Protocol):
@@ -50,6 +58,7 @@ class ReducerWorker:
50
58
  self.problem = None
51
59
  self.state = None
52
60
  self._cancel_scope: trio.CancelScope | None = None
61
+ self._restart_requested = False
53
62
  # Parallelism tracking
54
63
  self._parallel_samples = 0
55
64
  self._parallel_total = 0
@@ -58,6 +67,21 @@ class ReducerWorker:
58
67
  self._output_stream = output_stream
59
68
  # Output directory for test output capture (cleaned up on shutdown)
60
69
  self._output_dir: str | None = None
70
+ # Size history for graphing: list of (runtime_seconds, size) tuples
71
+ self._size_history: list[tuple[float, int]] = []
72
+ self._last_sent_history_index: int = 0
73
+ self._last_recorded_size: int = 0
74
+ self._last_history_time: float = 0.0
75
+ # Original start time - preserved across restarts for consistent graphing
76
+ self._original_start_time: float | None = None
77
+ # Log file for stderr redirection (cleaned up on shutdown)
78
+ self._log_file: Any = None
79
+ # Accumulated stats across restarts - when a restart happens, the problem
80
+ # gets a fresh ReductionStats object, so we need to preserve the counts
81
+ self._accumulated_calls: int = 0
82
+ self._accumulated_reductions: int = 0
83
+ self._accumulated_interesting_calls: int = 0
84
+ self._accumulated_wasted_calls: int = 0
61
85
 
62
86
  async def emit(self, msg: Response | ProgressUpdate) -> None:
63
87
  """Write a message to the output stream."""
@@ -103,9 +127,10 @@ class ReducerWorker:
103
127
  return
104
128
  response = await self.handle_command(request)
105
129
  await self.emit(response)
106
- except Exception as e:
130
+ except Exception:
107
131
  traceback.print_exc()
108
- await self.emit(Response(id="", error=str(e)))
132
+ # Include full traceback in error message in case stderr isn't visible
133
+ await self.emit(Response(id="", error=traceback.format_exc()))
109
134
 
110
135
  async def handle_command(self, request: Request) -> Response:
111
136
  """Handle a command request and return a response."""
@@ -122,6 +147,8 @@ class ReducerWorker:
122
147
  return self._handle_enable_pass(request.id, request.params)
123
148
  case "skip_pass":
124
149
  return self._handle_skip_pass(request.id)
150
+ case "restart_from":
151
+ return await self._handle_restart_from(request.id, request.params)
125
152
  case _:
126
153
  return Response(
127
154
  id=request.id, error=f"Unknown command: {request.command}"
@@ -136,6 +163,7 @@ class ReducerWorker:
136
163
  await self._start_reduction(params)
137
164
  return Response(id=request_id, result={"status": "started"})
138
165
  except* InvalidInitialExample as excs:
166
+ traceback.print_exc()
139
167
  assert len(excs.exceptions) == 1
140
168
  (e,) = excs.exceptions
141
169
  # Build a detailed error message for invalid initial examples
@@ -143,26 +171,14 @@ class ReducerWorker:
143
171
  error_message = await self.state.build_error_message(e)
144
172
  else:
145
173
  error_message = str(e)
146
- except* Exception as e:
174
+ except* Exception:
147
175
  traceback.print_exc()
148
- error_message = str(e.exceptions[0])
176
+ # Include full traceback in error message in case stderr isn't visible
177
+ error_message = traceback.format_exc()
149
178
  return Response(id=request_id, error=error_message)
150
179
 
151
180
  async def _start_reduction(self, params: dict) -> None:
152
181
  """Initialize and start the reduction."""
153
- from shrinkray.cli import InputType
154
- from shrinkray.passes.clangdelta import (
155
- C_FILE_EXTENSIONS,
156
- ClangDelta,
157
- find_clang_delta,
158
- )
159
- from shrinkray.state import (
160
- ShrinkRayDirectoryState,
161
- ShrinkRayStateSingleFile,
162
- TestOutputManager,
163
- )
164
- from shrinkray.work import Volume
165
-
166
182
  filename = params["file_path"]
167
183
  test = params["test"]
168
184
  parallelism = params.get("parallelism", os.cpu_count() or 1)
@@ -176,6 +192,8 @@ class ReducerWorker:
176
192
  clang_delta_path = params.get("clang_delta", "")
177
193
  trivial_is_error = params.get("trivial_is_error", True)
178
194
  skip_validation = params.get("skip_validation", False)
195
+ history_enabled = params.get("history_enabled", True)
196
+ also_interesting_code = params.get("also_interesting_code")
179
197
 
180
198
  clang_delta_executable = None
181
199
  if os.path.splitext(filename)[1] in C_FILE_EXTENSIONS and not no_clang_delta:
@@ -197,6 +215,8 @@ class ReducerWorker:
197
215
  "seed": seed,
198
216
  "volume": volume,
199
217
  "clang_delta_executable": clang_delta_executable,
218
+ "history_enabled": history_enabled,
219
+ "also_interesting_code": also_interesting_code,
200
220
  }
201
221
 
202
222
  if os.path.isdir(filename):
@@ -213,7 +233,18 @@ class ReducerWorker:
213
233
 
214
234
  # Create output manager for test output capture (always enabled for TUI)
215
235
  self._output_dir = tempfile.mkdtemp(prefix="shrinkray-output-")
216
- self.state.output_manager = TestOutputManager(output_dir=self._output_dir)
236
+ self.state.output_manager = OutputCaptureManager(output_dir=self._output_dir)
237
+
238
+ # Redirect stderr to the history directory if history is enabled
239
+ # This ensures errors are logged to the per-run directory
240
+ if self.state.history_manager is not None:
241
+ log_path = os.path.join(
242
+ self.state.history_manager.history_dir, "shrinkray.log"
243
+ )
244
+ # Ensure directory exists (history_manager creates it, but be safe)
245
+ os.makedirs(os.path.dirname(log_path), exist_ok=True)
246
+ self._log_file = open(log_path, "a", encoding="utf-8")
247
+ sys.stderr = self._log_file
217
248
 
218
249
  self.problem = self.state.problem
219
250
  self.reducer = self.state.reducer
@@ -304,17 +335,116 @@ class ReducerWorker:
304
335
  return Response(id=request_id, result={"status": "skipped"})
305
336
  return Response(id=request_id, error="Reducer does not support pass control")
306
337
 
307
- def _get_test_output_preview(self) -> tuple[str, int | None]:
308
- """Get preview of current test output and active test ID."""
338
+ async def _handle_restart_from(self, request_id: str, params: dict) -> Response:
339
+ """Restart reduction from a specific history point.
340
+
341
+ This moves all reductions after the specified point to also-interesting,
342
+ resets the current test case to that point, and modifies the
343
+ interestingness test to reject previously reduced values.
344
+ """
345
+ reduction_number = params.get("reduction_number")
346
+ if reduction_number is None:
347
+ return Response(id=request_id, error="reduction_number is required")
348
+
349
+ if self.state is None or self.state.history_manager is None:
350
+ return Response(id=request_id, error="History not available")
351
+
352
+ # Restart only works with single-file reductions
353
+ if not isinstance(self.state, ShrinkRayStateSingleFile):
354
+ return Response(
355
+ id=request_id,
356
+ error="Restart from history not supported for directory reductions",
357
+ )
358
+
359
+ # First, try to get restart data - this validates the reduction exists
360
+ # Do this BEFORE cancelling the current reduction to avoid leaving
361
+ # things in an inconsistent state if the restart fails
362
+ try:
363
+ new_test_case, excluded_set = (
364
+ self.state.history_manager.restart_from_reduction(reduction_number)
365
+ )
366
+ except FileNotFoundError:
367
+ return Response(
368
+ id=request_id, error=f"Reduction {reduction_number} not found"
369
+ )
370
+ except Exception:
371
+ traceback.print_exc()
372
+ return Response(id=request_id, error=traceback.format_exc())
373
+
374
+ # Save current stats before restart - the new problem will have fresh stats
375
+ if self.problem is not None:
376
+ stats = self.problem.stats
377
+ self._accumulated_calls += stats.calls
378
+ self._accumulated_reductions += stats.reductions
379
+ self._accumulated_interesting_calls += stats.interesting_calls
380
+ self._accumulated_wasted_calls += stats.wasted_interesting_calls
381
+
382
+ # Set restart flag BEFORE cancelling the scope to avoid race condition.
383
+ # The run() loop checks this flag after run_reducer() returns - if we
384
+ # cancel first and the flag isn't set, the loop will exit instead of
385
+ # restarting.
386
+ self._restart_requested = True
387
+ self.running = False
388
+
389
+ # Now cancel current reduction
390
+ if self._cancel_scope is not None:
391
+ self._cancel_scope.cancel()
392
+
393
+ try:
394
+ # Clear old test output to avoid showing stale output from before restart
395
+ if self.state.output_manager is not None:
396
+ self.state.output_manager.cleanup_all()
397
+
398
+ # Reset state with new initial and exclusions
399
+ self.state.reset_for_restart(new_test_case, excluded_set)
400
+
401
+ # Get fresh reducer BEFORE any await points to avoid race condition.
402
+ # After we cancel the scope, the main run() loop may loop back and
403
+ # call run_reducer() while we're still in an await. We need the new
404
+ # reducer to be set before that happens.
405
+ self.reducer = self.state.reducer
406
+ self.problem = self.reducer.target
407
+
408
+ # Record the upward jump in size at current runtime.
409
+ # Don't reset history - this preserves the graph continuity.
410
+ if self._size_history and self._original_start_time is not None:
411
+ current_runtime = time.time() - self._original_start_time
412
+ self._size_history.append((current_runtime, len(new_test_case)))
413
+ self._last_recorded_size = len(new_test_case)
414
+ self._last_history_time = current_runtime
415
+
416
+ # Write new test case to file (can happen after reducer is set up)
417
+ await self.state.write_test_case_to_file(self.state.filename, new_test_case)
418
+
419
+ # Ready to restart - running will be set to True by the run() loop
420
+ return Response(
421
+ id=request_id,
422
+ result={"status": "restarted", "size": len(new_test_case)},
423
+ )
424
+ except Exception:
425
+ traceback.print_exc()
426
+ # Reset restart flag - we can't restart, so don't try
427
+ self._restart_requested = False
428
+ # Include full traceback in error message in case stderr isn't visible
429
+ return Response(id=request_id, error=traceback.format_exc())
430
+
431
+ def _get_test_output_preview(self) -> tuple[str, int | None, int | None]:
432
+ """Get preview of current test output, test ID, and return code.
433
+
434
+ Returns (content, test_id, return_code) where:
435
+ - content: the last 4KB of the output file
436
+ - test_id: the test ID being displayed
437
+ - return_code: None if test is still running, otherwise the exit code
438
+ """
309
439
  if self.state is None or self.state.output_manager is None:
310
- return "", None
440
+ return "", None, None
311
441
 
312
- manager = self.state.output_manager
313
- active_test_id = manager.get_active_test_id()
314
- output_path = manager.get_current_output_path()
442
+ output_path, test_id, return_code = (
443
+ self.state.output_manager.get_current_output()
444
+ )
315
445
 
316
446
  if output_path is None:
317
- return "", active_test_id
447
+ return "", None, None
318
448
 
319
449
  # Read last 4KB of file
320
450
  try:
@@ -326,9 +456,13 @@ class ReducerWorker:
326
456
  else:
327
457
  f.seek(0)
328
458
  data = f.read()
329
- return data.decode("utf-8", errors="replace"), active_test_id
459
+ return (
460
+ data.decode("utf-8", errors="replace"),
461
+ test_id,
462
+ return_code,
463
+ )
330
464
  except OSError:
331
- return "", active_test_id
465
+ return "", test_id, return_code
332
466
 
333
467
  def _get_content_preview(self) -> tuple[str, bool]:
334
468
  """Get a preview of the current test case content."""
@@ -379,6 +513,34 @@ class ReducerWorker:
379
513
  return None
380
514
 
381
515
  stats = self.problem.stats
516
+
517
+ # Use original start time for consistent graphing across restarts.
518
+ # Capture it on first call.
519
+ if self._original_start_time is None:
520
+ self._original_start_time = stats.start_time
521
+ runtime = time.time() - self._original_start_time
522
+ current_size = stats.current_test_case_size
523
+
524
+ # Record size history when size changes or periodically
525
+ # Use 200ms interval for first 5 minutes, then 1s (ticks are at 1-minute intervals)
526
+ history_interval = 1.0 if runtime >= 300 else 0.2
527
+
528
+ if not self._size_history:
529
+ # First sample: record initial size at time 0
530
+ self._size_history.append((0.0, stats.initial_test_case_size))
531
+ self._last_recorded_size = stats.initial_test_case_size
532
+ self._last_history_time = 0.0
533
+
534
+ if current_size != self._last_recorded_size:
535
+ # Size changed - always record
536
+ self._size_history.append((runtime, current_size))
537
+ self._last_recorded_size = current_size
538
+ self._last_history_time = runtime
539
+ elif runtime - self._last_history_time >= history_interval:
540
+ # No size change but interval passed - record periodic update
541
+ self._size_history.append((runtime, current_size))
542
+ self._last_history_time = runtime
543
+
382
544
  content_preview, hex_mode = self._get_content_preview()
383
545
 
384
546
  # Get parallel workers count and track average
@@ -388,14 +550,22 @@ class ReducerWorker:
388
550
  self._parallel_samples += 1
389
551
  self._parallel_total += parallel_workers
390
552
 
553
+ # Calculate total stats including accumulated from previous runs (before restarts)
554
+ total_calls = stats.calls + self._accumulated_calls
555
+ total_reductions = stats.reductions + self._accumulated_reductions
556
+ total_interesting_calls = (
557
+ stats.interesting_calls + self._accumulated_interesting_calls
558
+ )
559
+ total_wasted_calls = (
560
+ stats.wasted_interesting_calls + self._accumulated_wasted_calls
561
+ )
562
+
391
563
  # Calculate parallelism stats
392
564
  average_parallelism = 0.0
393
565
  effective_parallelism = 0.0
394
566
  if self._parallel_samples > 0:
395
567
  average_parallelism = self._parallel_total / self._parallel_samples
396
- wasteage = (
397
- stats.wasted_interesting_calls / stats.calls if stats.calls > 0 else 0.0
398
- )
568
+ wasteage = total_wasted_calls / total_calls if total_calls > 0 else 0.0
399
569
  effective_parallelism = average_parallelism * (1.0 - wasteage)
400
570
 
401
571
  # Collect pass statistics in run order (only those with test evaluations)
@@ -433,17 +603,30 @@ class ReducerWorker:
433
603
  disabled_passes = []
434
604
 
435
605
  # Get test output preview
436
- test_output_preview, active_test_id = self._get_test_output_preview()
606
+ test_output_preview, active_test_id, last_return_code = (
607
+ self._get_test_output_preview()
608
+ )
609
+
610
+ # Get new size history entries since last update
611
+ new_entries = self._size_history[self._last_sent_history_index :]
612
+ self._last_sent_history_index = len(self._size_history)
613
+
614
+ # Get history directory info for history explorer
615
+ history_dir: str | None = None
616
+ target_basename = ""
617
+ if self.state is not None and self.state.history_manager is not None:
618
+ history_dir = self.state.history_manager.history_dir
619
+ target_basename = self.state.history_manager.target_basename
437
620
 
438
621
  return ProgressUpdate(
439
622
  status=self.reducer.status if self.reducer else "",
440
623
  size=stats.current_test_case_size,
441
624
  original_size=stats.initial_test_case_size,
442
- calls=stats.calls,
443
- reductions=stats.reductions,
444
- interesting_calls=stats.interesting_calls,
445
- wasted_calls=stats.wasted_interesting_calls,
446
- runtime=time.time() - stats.start_time,
625
+ calls=total_calls,
626
+ reductions=total_reductions,
627
+ interesting_calls=total_interesting_calls,
628
+ wasted_calls=total_wasted_calls,
629
+ runtime=runtime,
447
630
  parallel_workers=parallel_workers,
448
631
  average_parallelism=average_parallelism,
449
632
  effective_parallelism=effective_parallelism,
@@ -455,6 +638,10 @@ class ReducerWorker:
455
638
  disabled_passes=disabled_passes,
456
639
  test_output_preview=test_output_preview,
457
640
  active_test_id=active_test_id,
641
+ last_test_return_code=last_return_code,
642
+ new_size_history=new_entries,
643
+ history_dir=history_dir,
644
+ target_basename=target_basename,
458
645
  )
459
646
 
460
647
  async def emit_progress_updates(self) -> None:
@@ -464,8 +651,14 @@ class ReducerWorker:
464
651
  if update is not None:
465
652
  await self.emit(update)
466
653
 
467
- while self.running:
654
+ while True:
468
655
  await trio.sleep(0.1)
656
+ # Keep running while reducer is active or restart is pending.
657
+ # During restart, running is temporarily False but we need to
658
+ # keep emitting updates until the restart completes and running
659
+ # is set back to True.
660
+ if not self.running and not self._restart_requested:
661
+ break
469
662
  update = await self._build_progress_update()
470
663
  if update is not None:
471
664
  await self.emit(update)
@@ -486,6 +679,7 @@ class ReducerWorker:
486
679
  if trivial_error:
487
680
  await self.emit(Response(id="", error=trivial_error))
488
681
  except* InvalidInitialExample as excs:
682
+ traceback.print_exc()
489
683
  assert len(excs.exceptions) == 1
490
684
  (e,) = excs.exceptions
491
685
  # Build a detailed error message for invalid initial examples
@@ -494,10 +688,10 @@ class ReducerWorker:
494
688
  else:
495
689
  error_message = str(e)
496
690
  await self.emit(Response(id="", error=error_message))
497
- except* Exception as e:
498
- # Catch any other exception during reduction and emit as error
691
+ except* Exception:
499
692
  traceback.print_exc()
500
- await self.emit(Response(id="", error=str(e.exceptions[0])))
693
+ # Include full traceback in error message in case stderr isn't visible
694
+ await self.emit(Response(id="", error=traceback.format_exc()))
501
695
  finally:
502
696
  self._cancel_scope = None
503
697
  self.running = False
@@ -512,9 +706,20 @@ class ReducerWorker:
512
706
  while not self.running:
513
707
  await trio.sleep(0.01)
514
708
 
515
- # Start progress updates and reducer
709
+ # Start progress updates
516
710
  nursery.start_soon(self.emit_progress_updates)
517
- await self.run_reducer()
711
+
712
+ # Run reducer, looping if restart is requested
713
+ while True:
714
+ self._restart_requested = False
715
+ await self.run_reducer()
716
+
717
+ # Check if we should restart
718
+ if not self._restart_requested:
719
+ break
720
+
721
+ # Set running=True here since run_reducer's finally block set it to False
722
+ self.running = True
518
723
 
519
724
  # Emit final progress update before completion
520
725
  final_update = await self._build_progress_update()
@@ -530,6 +735,12 @@ class ReducerWorker:
530
735
  self.state.output_manager.cleanup_all()
531
736
  if self._output_dir is not None and os.path.isdir(self._output_dir):
532
737
  shutil.rmtree(self._output_dir, ignore_errors=True)
738
+ # Close log file if we opened one
739
+ if self._log_file is not None:
740
+ try:
741
+ self._log_file.close()
742
+ except Exception:
743
+ pass
533
744
 
534
745
 
535
746
  def main() -> None: