wafer-core 0.1.46__py3-none-any.whl → 0.1.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -722,7 +722,15 @@ class App:
722
722
  """Send message through update, execute resulting command."""
723
723
  if not self._running:
724
724
  return
725
+ old_model = self._model
725
726
  self._model, cmd = self._update_fn(self._model, msg)
727
+ _log(
728
+ "dispatch",
729
+ msg_type=type(msg).__name__,
730
+ msg_data=repr(msg)[:200],
731
+ cmd_kind=cmd._kind,
732
+ model_changed=self._model is not old_model,
733
+ )
726
734
  self._execute_cmd(cmd)
727
735
 
728
736
  def _execute_cmd(self, cmd: Cmd) -> None:
@@ -0,0 +1,186 @@
1
+ """Viewport component for pytui - Elm-style scrollable text view.
2
+
3
+ A frozen dataclass + pure functions approach matching pytui's architecture.
4
+ Based on patterns from charmbracelet/bubbles viewport.
5
+
6
+ Usage:
7
+ # In your Model
8
+ @dataclass(frozen=True)
9
+ class Model:
10
+ viewport: Viewport = Viewport()
11
+
12
+ # In update()
13
+ case KeyPress(key="j"):
14
+ new_vp = viewport_scroll_down(model.viewport, 1, visible_height)
15
+ return replace(model, viewport=new_vp), Cmd.none()
16
+
17
+ # In view()
18
+ visible = viewport_visible_lines(model.viewport, visible_height)
19
+ for line in visible:
20
+ lines.append(line)
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ from dataclasses import dataclass, replace
26
+
27
+
28
+ def _clamp(value: int, min_val: int, max_val: int) -> int:
29
+ """Clamp value to [min_val, max_val]."""
30
+ if min_val > max_val:
31
+ min_val, max_val = max_val, min_val
32
+ return max(min_val, min(value, max_val))
33
+
34
+
35
+ @dataclass(frozen=True)
36
+ class Viewport:
37
+ """Immutable viewport state.
38
+
39
+ Attributes:
40
+ lines: Content as tuple of strings (one per line)
41
+ y_offset: Vertical scroll position (0 = top)
42
+ auto_follow: If True, scroll to bottom when new content added
43
+ """
44
+
45
+ lines: tuple[str, ...] = ()
46
+ y_offset: int = 0
47
+ auto_follow: bool = True
48
+
49
+
50
+ # ── Query functions ──────────────────────────────────────────────────────────
51
+
52
+
53
+ def viewport_max_offset(vp: Viewport, visible_height: int) -> int:
54
+ """Maximum valid y_offset for this viewport."""
55
+ return max(0, len(vp.lines) - visible_height)
56
+
57
+
58
+ def viewport_at_top(vp: Viewport) -> bool:
59
+ """Check if viewport is scrolled to top."""
60
+ return vp.y_offset <= 0
61
+
62
+
63
+ def viewport_at_bottom(vp: Viewport, visible_height: int) -> bool:
64
+ """Check if viewport is scrolled to bottom."""
65
+ return vp.y_offset >= viewport_max_offset(vp, visible_height)
66
+
67
+
68
+ def viewport_scroll_percent(vp: Viewport, visible_height: int) -> float:
69
+ """Get scroll position as percentage (0.0 to 1.0)."""
70
+ max_off = viewport_max_offset(vp, visible_height)
71
+ if max_off == 0:
72
+ return 1.0
73
+ return vp.y_offset / max_off
74
+
75
+
76
+ def viewport_visible_lines(vp: Viewport, visible_height: int) -> list[str]:
77
+ """Get the lines currently visible in the viewport."""
78
+ start = _clamp(vp.y_offset, 0, len(vp.lines))
79
+ end = _clamp(start + visible_height, start, len(vp.lines))
80
+ return list(vp.lines[start:end])
81
+
82
+
83
+ # ── Scroll operations ────────────────────────────────────────────────────────
84
+
85
+
86
+ def viewport_scroll_down(vp: Viewport, n: int, visible_height: int) -> Viewport:
87
+ """Scroll down by n lines. Returns unchanged viewport if already at bottom."""
88
+ if viewport_at_bottom(vp, visible_height) or n <= 0:
89
+ return vp
90
+ max_off = viewport_max_offset(vp, visible_height)
91
+ new_offset = _clamp(vp.y_offset + n, 0, max_off)
92
+ # Re-enable auto_follow if we hit bottom
93
+ auto_follow = new_offset >= max_off
94
+ return replace(vp, y_offset=new_offset, auto_follow=auto_follow)
95
+
96
+
97
+ def viewport_scroll_up(vp: Viewport, n: int) -> Viewport:
98
+ """Scroll up by n lines. Returns unchanged viewport if already at top."""
99
+ if viewport_at_top(vp) or n <= 0:
100
+ return vp
101
+ new_offset = max(0, vp.y_offset - n)
102
+ # Disable auto_follow when scrolling up
103
+ return replace(vp, y_offset=new_offset, auto_follow=False)
104
+
105
+
106
+ def viewport_page_down(vp: Viewport, visible_height: int) -> Viewport:
107
+ """Scroll down by one page."""
108
+ return viewport_scroll_down(vp, visible_height, visible_height)
109
+
110
+
111
+ def viewport_page_up(vp: Viewport, visible_height: int) -> Viewport:
112
+ """Scroll up by one page."""
113
+ return viewport_scroll_up(vp, visible_height)
114
+
115
+
116
+ def viewport_half_page_down(vp: Viewport, visible_height: int) -> Viewport:
117
+ """Scroll down by half a page (Ctrl+D style)."""
118
+ return viewport_scroll_down(vp, visible_height // 2, visible_height)
119
+
120
+
121
+ def viewport_half_page_up(vp: Viewport, visible_height: int) -> Viewport:
122
+ """Scroll up by half a page (Ctrl+U style)."""
123
+ return viewport_scroll_up(vp, visible_height // 2)
124
+
125
+
126
+ def viewport_goto_top(vp: Viewport) -> Viewport:
127
+ """Jump to top of content."""
128
+ if viewport_at_top(vp):
129
+ return vp
130
+ return replace(vp, y_offset=0, auto_follow=False)
131
+
132
+
133
+ def viewport_goto_bottom(vp: Viewport, visible_height: int) -> Viewport:
134
+ """Jump to bottom of content, re-enable auto_follow."""
135
+ max_off = viewport_max_offset(vp, visible_height)
136
+ return replace(vp, y_offset=max_off, auto_follow=True)
137
+
138
+
139
+ # ── Content operations ───────────────────────────────────────────────────────
140
+
141
+
142
+ def viewport_set_content(vp: Viewport, lines: tuple[str, ...], visible_height: int) -> Viewport:
143
+ """Replace all content. Adjusts scroll if past new content end."""
144
+ new_vp = replace(vp, lines=lines)
145
+ max_off = viewport_max_offset(new_vp, visible_height)
146
+
147
+ # If scrolled past content, jump to bottom
148
+ if new_vp.y_offset > max_off:
149
+ return replace(new_vp, y_offset=max_off)
150
+
151
+ # If auto_follow enabled, stay at bottom
152
+ if new_vp.auto_follow:
153
+ return replace(new_vp, y_offset=max_off)
154
+
155
+ return new_vp
156
+
157
+
158
+ def viewport_append_line(vp: Viewport, line: str, visible_height: int) -> Viewport:
159
+ """Append a single line. Auto-scrolls if auto_follow enabled."""
160
+ new_lines = vp.lines + (line,)
161
+ new_vp = replace(vp, lines=new_lines)
162
+
163
+ if vp.auto_follow:
164
+ max_off = viewport_max_offset(new_vp, visible_height)
165
+ return replace(new_vp, y_offset=max_off)
166
+
167
+ return new_vp
168
+
169
+
170
+ def viewport_append_lines(vp: Viewport, lines: tuple[str, ...], visible_height: int) -> Viewport:
171
+ """Append multiple lines. Auto-scrolls if auto_follow enabled."""
172
+ if not lines:
173
+ return vp
174
+ new_lines = vp.lines + lines
175
+ new_vp = replace(vp, lines=new_lines)
176
+
177
+ if vp.auto_follow:
178
+ max_off = viewport_max_offset(new_vp, visible_height)
179
+ return replace(new_vp, y_offset=max_off)
180
+
181
+ return new_vp
182
+
183
+
184
+ def viewport_clear(vp: Viewport) -> Viewport:
185
+ """Clear all content and reset scroll."""
186
+ return Viewport(lines=(), y_offset=0, auto_follow=True)
@@ -844,6 +844,18 @@ async def process_pending_tools(
844
844
  if "content" in tool_call.args:
845
845
  result_summary["content"] = tool_call.args["content"]
846
846
 
847
+ # For edit, capture file path and diff stats
848
+ elif tool_call.name == "edit":
849
+ if "file_path" in tool_call.args:
850
+ result_summary["path"] = tool_call.args["file_path"]
851
+ # Compute +/- lines from old_string and new_string
852
+ old_str = tool_call.args.get("old_string", "")
853
+ new_str = tool_call.args.get("new_string", "")
854
+ old_lines = old_str.count("\n") + (1 if old_str else 0)
855
+ new_lines = new_str.count("\n") + (1 if new_str else 0)
856
+ result_summary["lines_removed"] = old_lines
857
+ result_summary["lines_added"] = new_lines
858
+
847
859
  # Extract key metrics from details (e.g., compiled, correct for kernelbench)
848
860
  if tool_result.details:
849
861
  for k, v in tool_result.details.items():
@@ -19,6 +19,7 @@ from __future__ import annotations
19
19
 
20
20
  import json
21
21
  import os
22
+ import re
22
23
  import signal
23
24
  import subprocess
24
25
  import sys
@@ -71,6 +72,57 @@ SAMPLE_EVENT_TYPES = frozenset({
71
72
  })
72
73
 
73
74
 
75
+ # ── Utilities ────────────────────────────────────────────────────────────────
76
+
77
+ # Regex for matching ANSI escape codes
78
+ _ANSI_ESCAPE = re.compile(r"\x1b\[[0-9;]*m")
79
+
80
+
81
+ def _clamp(value: int, min_val: int, max_val: int) -> int:
82
+ """Clamp value to [min_val, max_val]."""
83
+ assert min_val <= max_val, f"min_val ({min_val}) > max_val ({max_val})"
84
+ return max(min_val, min(value, max_val))
85
+
86
+
87
+ def _visible_len(s: str) -> int:
88
+ """Return visible length of string (excluding ANSI codes)."""
89
+ return len(_ANSI_ESCAPE.sub("", s))
90
+
91
+
92
+ def _ansi_slice(s: str, start: int) -> str:
93
+ """Slice a string with ANSI codes from visible position `start`.
94
+
95
+ Returns substring starting at visible character `start`, preserving
96
+ any ANSI codes that were active at that position.
97
+ """
98
+ if start <= 0:
99
+ return s
100
+
101
+ visible_pos = 0
102
+ i = 0
103
+ active_codes: list[str] = [] # Track active ANSI codes
104
+
105
+ while i < len(s) and visible_pos < start:
106
+ # Check for ANSI escape sequence
107
+ match = _ANSI_ESCAPE.match(s, i)
108
+ if match:
109
+ code = match.group()
110
+ if code == RESET:
111
+ active_codes.clear()
112
+ else:
113
+ active_codes.append(code)
114
+ i = match.end()
115
+ else:
116
+ visible_pos += 1
117
+ i += 1
118
+
119
+ # Return remaining string with any active codes prepended
120
+ result = s[i:]
121
+ if active_codes:
122
+ result = "".join(active_codes) + result
123
+ return result
124
+
125
+
74
126
  # ── State ────────────────────────────────────────────────────────────────────
75
127
 
76
128
 
@@ -196,9 +248,23 @@ def derive_state(events: list[dict[str, Any]]) -> RenderState:
196
248
  return state
197
249
 
198
250
 
199
- def _format_sample_row(sample: SampleState, width: int, selected: bool = False) -> str:
200
- """Format a single sample row."""
201
- name = sample.name[:25].ljust(25)
251
+ def _format_sample_row(
252
+ sample: SampleState,
253
+ width: int,
254
+ selected: bool = False,
255
+ marquee_offset: int = 0,
256
+ ) -> tuple[str, int]:
257
+ """Format a single sample row.
258
+
259
+ Returns (formatted_row, overflow_chars) where overflow_chars is how many
260
+ characters extend beyond the visible width (for marquee animation).
261
+ """
262
+ # For selected rows, don't truncate name - show full content with marquee
263
+ if selected:
264
+ name = sample.name
265
+ else:
266
+ name = sample.name[:25].ljust(25)
267
+
202
268
  turn_info = f"T:{sample.turn}"
203
269
 
204
270
  if sample.status == "complete":
@@ -213,11 +279,21 @@ def _format_sample_row(sample: SampleState, width: int, selected: bool = False)
213
279
  phase = sample.phase or "running"
214
280
  row = f" {name} {turn_info:>4} {CYAN}{phase}...{RESET}"
215
281
 
282
+ # Calculate visible length and overflow
283
+ visible_len = _visible_len(row)
284
+ overflow = max(0, visible_len - width)
285
+
216
286
  if selected:
287
+ # Apply marquee offset for selected rows (ANSI-aware slicing)
288
+ if marquee_offset > 0 and overflow > 0:
289
+ offset = min(marquee_offset, overflow)
290
+ # Skip the leading indent (2 spaces), slice the rest, re-add indent
291
+ row = " " + _ansi_slice(row, 2 + offset)
292
+
217
293
  # Add selection highlight
218
294
  row = f"{BG_SELECTED}{row}{RESET}"
219
295
 
220
- return row
296
+ return row, overflow
221
297
 
222
298
 
223
299
  # ── Viewport formatting ──────────────────────────────────────────────────────
@@ -251,10 +327,32 @@ def format_sample_event(
251
327
  elif msg == "tool_execution":
252
328
  tool = event.get("tool_name", "tool")
253
329
  duration = event.get("duration_ms", 0)
254
- status = event.get("result_summary", "")
330
+ summary = event.get("result_summary") or {}
255
331
  is_error = event.get("is_error", False)
256
332
  color = RED if is_error else CYAN
257
- return f"{color}→ {tool}{RESET} ({duration / 1000:.1f}s) {status}", None
333
+
334
+ # Format tool-specific summary
335
+ if tool == "edit" and isinstance(summary, dict):
336
+ path = summary.get("path", "")
337
+ # Show just filename, not full path
338
+ filename = path.split("/")[-1] if path else ""
339
+ added = summary.get("lines_added", 0)
340
+ removed = summary.get("lines_removed", 0)
341
+ diff_info = f"{filename} +{added}/-{removed}"
342
+ return f"{color}→ {tool}{RESET} ({duration / 1000:.1f}s) {diff_info}", None
343
+ elif tool == "bash" and isinstance(summary, dict):
344
+ cmd = summary.get("command", "")
345
+ # Truncate long commands
346
+ cmd_short = cmd[:50] + "…" if len(cmd) > 50 else cmd
347
+ return f"{color}→ {tool}{RESET} ({duration / 1000:.1f}s) {cmd_short}", None
348
+ elif tool == "write" and isinstance(summary, dict):
349
+ path = summary.get("path", "")
350
+ filename = path.split("/")[-1] if path else ""
351
+ return f"{color}→ {tool}{RESET} ({duration / 1000:.1f}s) {filename}", None
352
+ else:
353
+ # Fallback: show summary dict or empty
354
+ status = str(summary) if summary else ""
355
+ return f"{color}→ {tool}{RESET} ({duration / 1000:.1f}s) {status}", None
258
356
 
259
357
  elif msg == "llm_call":
260
358
  provider = event.get("provider", "")
@@ -314,49 +412,39 @@ def render_header(state: RenderState, width: int) -> str:
314
412
  return header[:width]
315
413
 
316
414
 
317
- def render(state: RenderState, width: int, height: int) -> list[str]:
318
- """Render state to list of lines. Pure function."""
319
- lines: list[str] = []
320
-
321
- lines.append(render_header(state, width))
322
-
323
- # Active samples, sorted by most recently updated
324
- active = [s for s in state.samples.values() if s.status != "complete" and s.phase]
325
- active.sort(key=lambda s: s.last_update, reverse=True)
326
-
327
- max_samples = max(1, height - 4)
328
- for sample in active[:max_samples]:
329
- lines.append(_format_sample_row(sample, width))
330
-
331
- total_in_flight = sum(1 for s in state.samples.values() if s.status != "complete")
332
- hidden = total_in_flight - len(active[:max_samples])
333
- if hidden > 0:
334
- lines.append(f"{DIM} ... and {hidden} more in flight{RESET}")
335
-
336
- if state.scores:
337
- mean_score = sum(state.scores) / len(state.scores)
338
- lines.append(f"{DIM}score: {mean_score:.1%}{RESET}")
339
-
340
- return lines
341
-
342
-
343
415
  # ── Elm architecture (subprocess entry point) ────────────────────────────────
344
416
 
345
417
 
346
418
  @dataclass(frozen=True)
347
419
  class Model:
348
- """Immutable model for the pytui App."""
420
+ """Immutable model for the pytui App.
421
+
422
+ Uses bubbles-style cursor tracking:
423
+ - selected_sample_id: stable selection by ID (not index)
424
+ - viewport_scroll: scroll offset for detail viewport
425
+
426
+ Note: list_scroll is computed dynamically in view() to keep selection visible.
427
+ """
349
428
 
350
429
  events: tuple[dict[str, Any], ...] = ()
351
430
  done: bool = False
352
431
 
353
- # Viewport state
432
+ # Focus state
354
433
  focus: str = "list" # "list" or "viewport"
355
- selected_idx: int = 0 # Index into active samples list
434
+
435
+ # Sample list state (bubbles table pattern)
436
+ selected_sample_id: str | None = None # Track by ID, not index
437
+
438
+ # Marquee state for selected row (scrolls long text horizontally)
439
+ marquee_offset: int = 0 # Current scroll offset for selected row text
440
+
441
+ # Viewport state
356
442
  viewport_lines: tuple[str, ...] = () # Formatted lines for viewport
357
443
  viewport_scroll: int = 0 # Scroll position in viewport
358
444
  viewport_auto_follow: bool = True # Auto-scroll to bottom on new content
359
- tailing_sample_id: str | None = None # Which sample file we're tailing
445
+
446
+ # Viewport marquee state (for scrolling long lines when focused)
447
+ viewport_marquee_offset: int = 0 # Horizontal scroll for viewport lines
360
448
 
361
449
  # For streaming text accumulation
362
450
  streaming_text: str = ""
@@ -379,6 +467,20 @@ class SampleEvent:
379
467
  line: str
380
468
 
381
469
 
470
+ @dataclass(frozen=True)
471
+ class MarqueeTick:
472
+ """Periodic tick for marquee animation on selected sample row."""
473
+
474
+ pass
475
+
476
+
477
+ @dataclass(frozen=True)
478
+ class ViewportMarqueeTick:
479
+ """Periodic tick for marquee animation on viewport lines."""
480
+
481
+ pass
482
+
483
+
382
484
  def _parse_event(line: str) -> dict[str, Any] | None:
383
485
  try:
384
486
  return json.loads(line)
@@ -387,71 +489,178 @@ def _parse_event(line: str) -> dict[str, Any] | None:
387
489
 
388
490
 
389
491
  def _get_active_samples(events: tuple[dict[str, Any], ...]) -> list[SampleState]:
390
- """Get list of active (non-complete) samples from events."""
492
+ """Get list of active (non-complete) samples from events.
493
+
494
+ Returns samples sorted by name (stable order) not last_update (jumping order).
495
+ """
391
496
  state = derive_state(list(events))
392
497
  active = [s for s in state.samples.values() if s.status != "complete" and s.phase]
393
- active.sort(key=lambda s: s.last_update, reverse=True)
498
+ # Sort by sample name for stable order (not last_update which causes jumping)
499
+ active.sort(key=lambda s: s.name)
394
500
  return active
395
501
 
396
502
 
397
- def _get_selected_sample_id(events: tuple[dict[str, Any], ...], selected_idx: int) -> str | None:
398
- """Get the sample_id of the currently selected sample."""
399
- active = _get_active_samples(events)
400
- if 0 <= selected_idx < len(active):
401
- return active[selected_idx].id
503
+ def _find_sample_index(active: list[SampleState], sample_id: str | None) -> int:
504
+ """Find index of sample_id in active list, or 0 if not found."""
505
+ if sample_id is None:
506
+ return 0
507
+ for i, s in enumerate(active):
508
+ if s.id == sample_id:
509
+ return i
510
+ return 0
511
+
512
+
513
+ def _get_sample_at_index(active: list[SampleState], idx: int) -> SampleState | None:
514
+ """Get sample at index, or None if out of bounds."""
515
+ if 0 <= idx < len(active):
516
+ return active[idx]
402
517
  return None
403
518
 
404
519
 
520
+ def _handle_marquee_tick(model: Model) -> tuple[Model, Any]:
521
+ """Handle marquee tick for selected row animation."""
522
+ from ._pytui import Cmd
523
+
524
+ if not model.selected_sample_id:
525
+ return model, Cmd.none()
526
+
527
+ # Get the selected sample to check if it has overflow
528
+ active = _get_active_samples(model.events)
529
+ selected_idx = _find_sample_index(active, model.selected_sample_id)
530
+ sample = _get_sample_at_index(active, selected_idx)
531
+ if not sample:
532
+ return model, Cmd.none()
533
+
534
+ # Check how much overflow this sample has (use typical terminal width)
535
+ _, overflow = _format_sample_row(sample, 80, selected=True, marquee_offset=0)
536
+ if overflow <= 0:
537
+ # No overflow, reset offset
538
+ if model.marquee_offset != 0:
539
+ return replace(model, marquee_offset=0), Cmd.none()
540
+ return model, Cmd.none()
541
+
542
+ # Advance marquee with pause at start and end
543
+ # offset < 0: pause at start (show beginning)
544
+ # offset 0..overflow: scrolling
545
+ # offset > overflow: pause at end (show end)
546
+ pause_ticks = 5 # Ticks to pause at start/end
547
+ new_offset = model.marquee_offset + 1
548
+ if new_offset > overflow + pause_ticks:
549
+ new_offset = -pause_ticks # Snap back to start pause
550
+
551
+ return replace(model, marquee_offset=new_offset), Cmd.none()
552
+
553
+
554
+ def _get_max_viewport_overflow(model: Model, width: int) -> int:
555
+ """Get the maximum overflow across all visible viewport lines."""
556
+ if not model.viewport_lines:
557
+ return 0
558
+
559
+ max_overflow = 0
560
+ for line in model.viewport_lines:
561
+ visible_len = _visible_len(line)
562
+ overflow = max(0, visible_len - width)
563
+ max_overflow = max(max_overflow, overflow)
564
+ return max_overflow
565
+
566
+
567
+ def _handle_viewport_marquee_tick(model: Model) -> tuple[Model, Any]:
568
+ """Handle marquee tick for viewport lines animation."""
569
+ from ._pytui import Cmd
570
+
571
+ # Only animate when viewport is focused
572
+ if model.focus != "viewport":
573
+ if model.viewport_marquee_offset != 0:
574
+ return replace(model, viewport_marquee_offset=0), Cmd.none()
575
+ return model, Cmd.none()
576
+
577
+ # Check if any lines have overflow (use typical terminal width)
578
+ max_overflow = _get_max_viewport_overflow(model, 80)
579
+ if max_overflow <= 0:
580
+ if model.viewport_marquee_offset != 0:
581
+ return replace(model, viewport_marquee_offset=0), Cmd.none()
582
+ return model, Cmd.none()
583
+
584
+ # Advance marquee with pause at start and end
585
+ pause_ticks = 8 # Longer pause for viewport (more content to read)
586
+ new_offset = model.viewport_marquee_offset + 1
587
+ if new_offset > max_overflow + pause_ticks:
588
+ new_offset = -pause_ticks # Snap back to start pause
589
+
590
+ return replace(model, viewport_marquee_offset=new_offset), Cmd.none()
591
+
592
+
405
593
  def update(model: Model, msg: object) -> tuple[Model, Any]:
406
594
  from ._pytui import Cmd, KeyPress
407
595
 
408
596
  match msg:
409
- case KeyPress(key="q" | "\x03"):
597
+ case KeyPress(key="\x03"): # Ctrl+C: cancel eval and quit
598
+ # Signal parent process to cancel the eval
599
+ os.kill(os.getppid(), signal.SIGINT)
600
+ return model, Cmd.quit()
601
+
602
+ case KeyPress(key="q"): # q: detach (quit TUI, eval continues)
410
603
  return model, Cmd.quit()
411
604
 
412
605
  # Focus switching
413
606
  case KeyPress(key="\t"): # Tab
414
607
  new_focus = "viewport" if model.focus == "list" else "list"
415
- return replace(model, focus=new_focus), Cmd.none()
608
+ # Reset viewport marquee when switching focus
609
+ return replace(model, focus=new_focus, viewport_marquee_offset=-8), Cmd.none()
416
610
 
417
611
  # Navigation in list focus
418
612
  case KeyPress(key="j" | "\x1b[B") if model.focus == "list": # Down
419
613
  active = _get_active_samples(model.events)
420
- new_idx = min(model.selected_idx + 1, len(active) - 1) if active else 0
421
- new_sample_id = _get_selected_sample_id(model.events, new_idx)
422
- # Reset viewport when selection changes
423
- if new_sample_id != model.tailing_sample_id:
614
+ if not active:
615
+ return model, Cmd.none()
616
+
617
+ # Find current index, move down
618
+ current_idx = _find_sample_index(active, model.selected_sample_id)
619
+ new_idx = _clamp(current_idx + 1, 0, len(active) - 1)
620
+ new_sample = active[new_idx]
621
+
622
+ # Reset viewport and marquee when selection changes
623
+ if new_sample.id != model.selected_sample_id:
424
624
  return replace(
425
625
  model,
426
- selected_idx=new_idx,
427
- tailing_sample_id=new_sample_id,
626
+ selected_sample_id=new_sample.id,
627
+ marquee_offset=-5, # Start with pause at beginning
428
628
  viewport_lines=(),
429
629
  viewport_scroll=0,
430
630
  viewport_auto_follow=True,
431
631
  streaming_text="",
432
632
  last_turn=None,
433
633
  ), Cmd.none()
434
- return replace(model, selected_idx=new_idx), Cmd.none()
634
+ return model, Cmd.none()
435
635
 
436
636
  case KeyPress(key="k" | "\x1b[A") if model.focus == "list": # Up
437
- new_idx = max(model.selected_idx - 1, 0)
438
- new_sample_id = _get_selected_sample_id(model.events, new_idx)
439
- if new_sample_id != model.tailing_sample_id:
637
+ active = _get_active_samples(model.events)
638
+ if not active:
639
+ return model, Cmd.none()
640
+
641
+ # Find current index, move up
642
+ current_idx = _find_sample_index(active, model.selected_sample_id)
643
+ new_idx = _clamp(current_idx - 1, 0, len(active) - 1)
644
+ new_sample = active[new_idx]
645
+
646
+ if new_sample.id != model.selected_sample_id:
440
647
  return replace(
441
648
  model,
442
- selected_idx=new_idx,
443
- tailing_sample_id=new_sample_id,
649
+ selected_sample_id=new_sample.id,
650
+ marquee_offset=-5, # Start with pause at beginning
444
651
  viewport_lines=(),
445
652
  viewport_scroll=0,
446
653
  viewport_auto_follow=True,
447
654
  streaming_text="",
448
655
  last_turn=None,
449
656
  ), Cmd.none()
450
- return replace(model, selected_idx=new_idx), Cmd.none()
657
+ return model, Cmd.none()
451
658
 
452
659
  # Navigation in viewport focus
453
660
  case KeyPress(key="j" | "\x1b[B") if model.focus == "viewport": # Down
454
- new_scroll = model.viewport_scroll + 1
661
+ # Clamp to content length to prevent phantom scroll past end
662
+ max_scroll = max(0, len(model.viewport_lines) - 1)
663
+ new_scroll = min(model.viewport_scroll + 1, max_scroll)
455
664
  return replace(
456
665
  model, viewport_scroll=new_scroll, viewport_auto_follow=False
457
666
  ), Cmd.none()
@@ -479,12 +688,13 @@ def update(model: Model, msg: object) -> tuple[Model, Any]:
479
688
  if event.get("message") == "eval_end":
480
689
  return replace(model, events=new_events, done=True), Cmd.quit()
481
690
 
482
- # Auto-select first sample if none selected
483
691
  new_model = replace(model, events=new_events)
484
- if model.tailing_sample_id is None:
485
- sample_id = _get_selected_sample_id(new_events, model.selected_idx)
486
- if sample_id:
487
- new_model = replace(new_model, tailing_sample_id=sample_id)
692
+
693
+ # Auto-select first sample if none selected
694
+ if model.selected_sample_id is None:
695
+ active = _get_active_samples(new_events)
696
+ if active:
697
+ new_model = replace(new_model, selected_sample_id=active[0].id)
488
698
 
489
699
  return new_model, Cmd.none()
490
700
 
@@ -502,39 +712,103 @@ def update(model: Model, msg: object) -> tuple[Model, Any]:
502
712
  new_streaming = model.streaming_text + (formatted or "")
503
713
  return replace(model, streaming_text=new_streaming), Cmd.none()
504
714
 
505
- # When we get a non-delta event, flush accumulated streaming text
506
- new_lines = list(model.viewport_lines)
507
- if model.streaming_text:
508
- # Wrap streaming text to reasonable width and add
509
- for line in model.streaming_text.split("\n"):
510
- if line:
511
- new_lines.append(line)
512
- new_streaming = ""
513
- else:
514
- new_streaming = model.streaming_text
515
-
516
- # Add the formatted event line
517
- if formatted and msg_type != "text_delta":
518
- new_lines.append(formatted)
715
+ # Non-delta event: flush streaming text and add formatted line
716
+ flushed_lines = [l for l in model.streaming_text.split("\n") if l]
717
+ event_line = [formatted] if formatted else []
718
+ new_lines = model.viewport_lines + tuple(flushed_lines) + tuple(event_line)
519
719
 
520
720
  # Auto-scroll if enabled
521
- new_scroll = model.viewport_scroll
522
- if model.viewport_auto_follow:
523
- new_scroll = max(0, len(new_lines) - 1)
721
+ new_scroll = (
722
+ max(0, len(new_lines) - 1) if model.viewport_auto_follow else model.viewport_scroll
723
+ )
524
724
 
525
725
  return replace(
526
726
  model,
527
- viewport_lines=tuple(new_lines),
727
+ viewport_lines=new_lines,
528
728
  viewport_scroll=new_scroll,
529
- streaming_text=new_streaming,
729
+ streaming_text="",
530
730
  last_turn=new_turn if new_turn is not None else model.last_turn,
531
731
  ), Cmd.none()
532
732
 
733
+ # Marquee tick for selected row animation
734
+ case MarqueeTick():
735
+ return _handle_marquee_tick(model)
736
+
737
+ # Marquee tick for viewport lines animation
738
+ case ViewportMarqueeTick():
739
+ return _handle_viewport_marquee_tick(model)
740
+
533
741
  return model, Cmd.none()
534
742
 
535
743
 
744
+ def _render_viewport(model: Model, width: int, viewport_height: int, list_height: int) -> list[str]:
745
+ """Render the viewport section showing selected sample's output."""
746
+ lines: list[str] = []
747
+
748
+ if model.selected_sample_id:
749
+ # Include any streaming text as a "virtual" line at the end
750
+ display_lines = list(model.viewport_lines)
751
+ if model.streaming_text:
752
+ # Show streaming text (last 200 chars to keep it manageable)
753
+ stream_preview = model.streaming_text[-200:]
754
+ if len(model.streaming_text) > 200:
755
+ stream_preview = "..." + stream_preview
756
+ display_lines.append(f"{CYAN}{stream_preview}{RESET}")
757
+
758
+ if display_lines:
759
+ # Clamp scroll (bubbles pattern)
760
+ max_scroll = max(0, len(display_lines) - viewport_height)
761
+ scroll = _clamp(model.viewport_scroll, 0, max_scroll)
762
+
763
+ # Render visible lines with marquee for overflow
764
+ visible = display_lines[scroll : scroll + viewport_height]
765
+ marquee_offset = (
766
+ max(0, model.viewport_marquee_offset) if model.focus == "viewport" else 0
767
+ )
768
+
769
+ for line in visible:
770
+ visible_len = _visible_len(line)
771
+ overflow = visible_len - width
772
+
773
+ if overflow > 0 and marquee_offset > 0:
774
+ # Apply marquee: slice from offset position (ANSI-aware)
775
+ offset = min(marquee_offset, overflow)
776
+ sliced = _ansi_slice(line, offset)
777
+ # Truncate to width after slicing
778
+ if _visible_len(sliced) > width:
779
+ # Crude truncation - find approx position
780
+ lines.append(sliced[: width + 20] + "…")
781
+ else:
782
+ lines.append(sliced)
783
+ elif overflow > 0:
784
+ # No marquee active, just truncate with indicator
785
+ lines.append(line[: width + 20] + "…")
786
+ else:
787
+ lines.append(line)
788
+ else:
789
+ lines.append(f"{DIM} (waiting for events...){RESET}")
790
+
791
+ # Pad viewport
792
+ target_len = viewport_height
793
+ else:
794
+ # No sample selected
795
+ lines.append(f"{DIM} (select a sample to view its output){RESET}")
796
+ target_len = viewport_height
797
+
798
+ while len(lines) < target_len:
799
+ lines.append("")
800
+
801
+ return lines
802
+
803
+
536
804
  def view(model: Model, width: int, height: int) -> list[str]:
537
- """Render the split view: sample list on top, viewport on bottom."""
805
+ """Render the split view: sample list on top, viewport on bottom.
806
+
807
+ Uses bubbles-style bounded rendering:
808
+ - Only render visible rows in each section
809
+ - Keep selected item in view
810
+ - Clamp scroll positions
811
+ """
538
812
  state = derive_state(list(model.events))
539
813
  lines: list[str] = []
540
814
 
@@ -546,30 +820,57 @@ def view(model: Model, width: int, height: int) -> list[str]:
546
820
  list_height = max(4, min(height // 3, 10))
547
821
  viewport_height = height - list_height - 3 # -3 for header, separator, footer
548
822
 
549
- # Active samples with selection highlight
823
+ # Get active samples (stable sort by name)
550
824
  active = _get_active_samples(model.events)
551
825
 
552
- # Render sample list
553
- for i, sample in enumerate(active[: list_height - 1]):
554
- selected = i == model.selected_idx
555
- row = _format_sample_row(sample, width, selected=selected)
826
+ # Find selected index
827
+ selected_idx = _find_sample_index(active, model.selected_sample_id)
828
+
829
+ # Calculate visible window for sample list (bubbles pattern)
830
+ # Reserve last line for scroll indicator if there will be hidden items
831
+ needs_scroll_indicator = len(active) > list_height - 1
832
+ list_visible_height = list_height - 2 if needs_scroll_indicator else list_height - 1
833
+
834
+ # Compute list_scroll to keep selected item visible (derived, not stored in model)
835
+ # Start with scroll that centers selection, then clamp
836
+ list_scroll = max(0, selected_idx - list_visible_height // 2)
837
+ list_scroll = _clamp(list_scroll, 0, max(0, len(active) - list_visible_height))
838
+
839
+ # Render visible sample rows
840
+ visible_end = min(list_scroll + list_visible_height, len(active))
841
+ for i in range(list_scroll, visible_end):
842
+ sample = active[i]
843
+ selected = i == selected_idx
844
+ row, _overflow = _format_sample_row(
845
+ sample,
846
+ width,
847
+ selected=selected,
848
+ marquee_offset=model.marquee_offset if selected else 0,
849
+ )
556
850
  lines.append(row)
557
851
 
852
+ # Show scroll indicator if there are hidden items (on its own line)
853
+ hidden_above = list_scroll
854
+ hidden_below = max(0, len(active) - visible_end)
855
+ if hidden_above > 0 or hidden_below > 0:
856
+ indicator_parts = []
857
+ if hidden_above > 0:
858
+ indicator_parts.append(f"↑{hidden_above}")
859
+ if hidden_below > 0:
860
+ indicator_parts.append(f"↓{hidden_below}")
861
+ indicator = " ".join(indicator_parts)
862
+ lines.append(f"{DIM} ... {indicator} more{RESET}")
863
+
558
864
  # Pad list area
559
865
  while len(lines) < list_height:
560
866
  lines.append("")
561
867
 
562
- # Show hidden count
563
- if len(active) > list_height - 1:
564
- hidden = len(active) - (list_height - 1)
565
- lines[list_height - 1] = f"{DIM} ... and {hidden} more{RESET}"
566
-
567
868
  # Separator with focus indicator and selected sample name
568
869
  selected_name = ""
569
- if model.tailing_sample_id:
570
- active = _get_active_samples(model.events)
571
- if 0 <= model.selected_idx < len(active):
572
- selected_name = f" [{active[model.selected_idx].name[:20]}]"
870
+ if model.selected_sample_id:
871
+ sample = _get_sample_at_index(active, selected_idx)
872
+ if sample:
873
+ selected_name = f" [{sample.name[:20]}]"
573
874
 
574
875
  if model.focus == "list":
575
876
  label = "samples"
@@ -581,48 +882,14 @@ def view(model: Model, width: int, height: int) -> list[str]:
581
882
  lines.append(f"{DIM}{prefix}{fill}{RESET}")
582
883
 
583
884
  # Viewport
584
- if model.tailing_sample_id:
585
- # Include any streaming text as a "virtual" line at the end
586
- display_lines = list(model.viewport_lines)
587
- if model.streaming_text:
588
- # Show streaming text (last 200 chars to keep it manageable)
589
- stream_preview = model.streaming_text[-200:]
590
- if len(model.streaming_text) > 200:
591
- stream_preview = "..." + stream_preview
592
- display_lines.append(f"{CYAN}{stream_preview}{RESET}")
593
-
594
- if display_lines:
595
- # Clamp scroll
596
- max_scroll = max(0, len(display_lines) - viewport_height)
597
- scroll = min(model.viewport_scroll, max_scroll)
598
-
599
- # Render visible lines
600
- visible = display_lines[scroll : scroll + viewport_height]
601
- for line in visible:
602
- # Truncate long lines (but preserve ANSI - crude truncation for now)
603
- # TODO: proper ANSI-aware truncation
604
- if len(line) > width + 20: # Allow some slack for ANSI codes
605
- lines.append(line[: width + 10] + "...")
606
- else:
607
- lines.append(line)
608
- else:
609
- lines.append(f"{DIM} (waiting for events...){RESET}")
610
-
611
- # Pad viewport
612
- while len(lines) < list_height + 1 + viewport_height:
613
- lines.append("")
614
- else:
615
- # No sample selected
616
- lines.append(f"{DIM} (select a sample to view its output){RESET}")
617
- while len(lines) < list_height + 1 + viewport_height:
618
- lines.append("")
885
+ lines.extend(_render_viewport(model, width, viewport_height, list_height))
619
886
 
620
887
  # Footer
621
888
  if model.focus == "list":
622
- footer = f"{DIM}j/k: select Tab: focus viewport q: quit{RESET}"
889
+ footer = f"{DIM}j/k: select Tab: viewport q: detach ^C: cancel{RESET}"
623
890
  else:
624
891
  follow_status = "follow" if model.viewport_auto_follow else "manual"
625
- footer = f"{DIM}j/k: scroll G: bottom Tab: focus list [{follow_status}] q: quit{RESET}"
892
+ footer = f"{DIM}j/k: scroll G: bottom Tab: list [{follow_status}] q: detach ^C: cancel{RESET}"
626
893
 
627
894
  # Pad to fill screen
628
895
  while len(lines) < height - 1:
@@ -639,6 +906,7 @@ def view(model: Model, width: int, height: int) -> list[str]:
639
906
  def progress_display(
640
907
  output_dir: Path | str,
641
908
  disable: bool = False,
909
+ debug_log: Path | str | None = None,
642
910
  ) -> Generator[Path, None, None]:
643
911
  """Context manager that spawns a pytui progress display in a subprocess.
644
912
 
@@ -648,6 +916,7 @@ def progress_display(
648
916
  Args:
649
917
  output_dir: Directory containing events.jsonl
650
918
  disable: If True, skip progress display (verbose mode)
919
+ debug_log: If set, write pytui debug events to this file
651
920
  """
652
921
  if disable:
653
922
  yield Path(output_dir)
@@ -657,18 +926,24 @@ def progress_display(
657
926
  output_dir.mkdir(parents=True, exist_ok=True)
658
927
  events_file = output_dir / "events.jsonl"
659
928
 
929
+ # Build command
930
+ cmd = [sys.executable, "-m", "wafer_core.rollouts.progress_app", str(events_file)]
931
+ if debug_log is None:
932
+ # Default: write debug log to output_dir
933
+ debug_log = output_dir / "pytui_debug.jsonl"
934
+ if debug_log:
935
+ cmd.extend(["--debug-log", str(debug_log)])
936
+
660
937
  proc = subprocess.Popen(
661
- [sys.executable, "-m", "wafer_core.rollouts.progress_app", str(events_file)],
938
+ cmd,
662
939
  # Don't touch stdout/stderr — child inherits the terminal.
663
940
  # pytui opens /dev/tty directly for input, writes to stdout for rendering.
664
941
  #
665
- # start_new_session: put subprocess in its own session so it doesn't
666
- # receive SIGINT from Ctrl+C. The parent gets SIGINT and terminates
667
- # the subprocess in the finally block. Without this, pytui's raw mode
668
- # swallows Ctrl+C (terminal driver doesn't generate SIGINT in raw mode)
669
- # and the parent never sees it.
942
+ # We need the subprocess to stay in the same session so it can access
943
+ # /dev/tty for keyboard input. SIGINT handling: pytui is in raw mode
944
+ # so Ctrl+C becomes a regular keypress (^C / \x03) rather than generating
945
+ # SIGINT. The progress app handles KeyPress("\x03") -> Cmd.quit().
670
946
  stdin=subprocess.DEVNULL,
671
- start_new_session=True,
672
947
  )
673
948
 
674
949
  try:
@@ -695,13 +970,16 @@ def progress_display(
695
970
  # ── CLI entry point (subprocess runs this) ────────────────────────────────────
696
971
 
697
972
  if __name__ == "__main__":
698
- if len(sys.argv) != 2:
699
- print("Usage: python -m wafer_core.rollouts.progress_app <events.jsonl>", file=sys.stderr)
700
- sys.exit(1)
973
+ import argparse
974
+
975
+ parser = argparse.ArgumentParser()
976
+ parser.add_argument("events_file", help="Path to events.jsonl")
977
+ parser.add_argument("--debug-log", help="Path to debug log file")
978
+ args = parser.parse_args()
701
979
 
702
980
  from ._pytui import App, Cmd, Sub
703
981
 
704
- events_file = sys.argv[1]
982
+ events_file = args.events_file
705
983
  output_dir = Path(events_file).parent
706
984
  samples_dir = output_dir / "samples"
707
985
 
@@ -712,11 +990,18 @@ if __name__ == "__main__":
712
990
  subs = [Sub.file_tail(events_file, lambda line: NewEvent(line=line))]
713
991
 
714
992
  # Tail the selected sample's file if we have one
715
- if model.tailing_sample_id:
716
- sample_file = samples_dir / f"{model.tailing_sample_id}.jsonl"
993
+ if model.selected_sample_id:
994
+ sample_file = samples_dir / f"{model.selected_sample_id}.jsonl"
717
995
  if sample_file.exists():
718
996
  subs.append(Sub.file_tail(str(sample_file), lambda line: SampleEvent(line=line)))
719
997
 
998
+ # Marquee animation tick (scrolls selected row text)
999
+ subs.append(Sub.every(0.15, lambda: MarqueeTick()))
1000
+
1001
+ # Viewport marquee animation (scrolls long lines when viewport focused)
1002
+ if model.focus == "viewport" and model.viewport_lines:
1003
+ subs.append(Sub.every(0.12, lambda: ViewportMarqueeTick()))
1004
+
720
1005
  return Sub.batch(*subs)
721
1006
 
722
1007
  App(
@@ -725,4 +1010,5 @@ if __name__ == "__main__":
725
1010
  view=view,
726
1011
  subscriptions=_subscriptions,
727
1012
  alternate_screen=False,
1013
+ debug_log=args.debug_log,
728
1014
  ).run()
@@ -155,18 +155,35 @@ async def score_with_llm_judge(
155
155
  client = anthropic.Anthropic(api_key=api_key)
156
156
  response = client.messages.create(
157
157
  model=model,
158
- max_tokens=500,
158
+ max_tokens=8192,
159
159
  messages=[{"role": "user", "content": prompt}],
160
160
  )
161
161
 
162
162
  raw_response = response.content[0].text
163
163
 
164
- # Parse JSON from response
164
+ # Parse JSON from response - extract JSON object from anywhere in text
165
165
  text = raw_response.strip()
166
166
  if text.startswith("```"):
167
167
  lines = text.split("\n")
168
168
  text = "\n".join(lines[1:-1] if lines[-1] == "```" else lines[1:])
169
169
 
170
+ # Try to find JSON object with "score" key anywhere in response
171
+ import re
172
+ # Find last occurrence of {"score" pattern and extract to end
173
+ json_match = re.search(r'(\{"score":\s*\d+.*)', text, re.DOTALL)
174
+ if json_match:
175
+ # Find the matching closing brace
176
+ candidate = json_match.group(1)
177
+ # Try progressively shorter substrings until valid JSON
178
+ for i in range(len(candidate), 0, -1):
179
+ if candidate[i-1] == '}':
180
+ try:
181
+ json.loads(candidate[:i])
182
+ text = candidate[:i]
183
+ break
184
+ except json.JSONDecodeError:
185
+ continue
186
+
170
187
  try:
171
188
  result = json.loads(text)
172
189
  return {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: wafer-core
3
- Version: 0.1.46
3
+ Version: 0.1.47
4
4
  Summary: Core utilities and environments for Wafer GPU kernel optimization
5
5
  Requires-Python: >=3.10
6
6
  Requires-Dist: aiohttp>=3.9.0
@@ -350,7 +350,7 @@ wafer_core/rollouts/__init__.py,sha256=7RjEk9MCHY_aFe9Yl02uTNG-gZJdlU8LTkw70Ip67
350
350
  wafer_core/rollouts/__main__.py,sha256=aSZKLG1SRhTMRGeGi4nA2tUHonJwxA6y0SGYfb-VfnM,127
351
351
  wafer_core/rollouts/_rate_limit.py,sha256=pNToYI5OJL41wPYNjAAYkYca3rmshlNgf1KnVBLiWY0,5629
352
352
  wafer_core/rollouts/_retry.py,sha256=Ay3EelWoZRs9DvsXLQkg31kGwVb6DWBh0V9IwdQZkpE,9990
353
- wafer_core/rollouts/agents.py,sha256=Uv1kjYogUfdPl18YfkVxVqFTbmWfuJQrxem_iHTUgdw,44742
353
+ wafer_core/rollouts/agents.py,sha256=LZcW2ydzEspHUu6I3itvAMakmo9fNoD05NPvureuQ8o,45421
354
354
  wafer_core/rollouts/cli.py,sha256=2NqgegKdlmxD0eJzGOMB5o_1Hb5t7O5JpP_32uvF2BE,80117
355
355
  wafer_core/rollouts/cli_agents.py,sha256=e4qqqYBzWLsbw8FsNnddGApWp_on9Cvzrfd1amiAyvI,20641
356
356
  wafer_core/rollouts/deploy.py,sha256=3t88fM_BMyAPkxIl8pS4r5ogHJvrlqWQDuIaltDZBRc,40924
@@ -366,11 +366,11 @@ wafer_core/rollouts/models.py,sha256=BrMnUpTA9_HnOghpedzdLUm9Di3FyJouzOkK4PPBg_k
366
366
  wafer_core/rollouts/paths.py,sha256=9XtrA9ylhb5LttMFe2DE7X0IHeUMjuGUerII9OscYec,3436
367
367
  wafer_core/rollouts/pipeline.py,sha256=vlJTYE3ZX2XScpF9pmtv91K8Q0g8uLmcbI5jn6b5Hzg,15319
368
368
  wafer_core/rollouts/progress.py,sha256=szA9cvWT2xUxGVhF9BaAqJMmKDqMAUlxImxcOpcnqbY,29228
369
- wafer_core/rollouts/progress_app.py,sha256=JSx4FKn_H8Thi2IK19mr2RvpH15BZuUi3nC0h6qMcD8,26218
369
+ wafer_core/rollouts/progress_app.py,sha256=dWuMj3KQphiVt-D-5jH7mPYtVybpTtliXRJXOaLWOAU,36638
370
370
  wafer_core/rollouts/prompt.py,sha256=EDmGb0rhWwke7tokIcO8dukc3q5c8x0n5Omi5CpAQmA,11022
371
371
  wafer_core/rollouts/providers.py,sha256=dcGJh1p30hstVbCDDtJ902lyafkg81DKjcOzb0uuKS0,1400
372
372
  wafer_core/rollouts/remote.py,sha256=cAYpRCONlsTeRxzLiegAUfjZWGtqBNwZTHehMhk5ldA,8816
373
- wafer_core/rollouts/scoring.py,sha256=qeIT8Z7pK51XRDmN2sGdg_hIPRabWqoQIYKsuytlvRo,8838
373
+ wafer_core/rollouts/scoring.py,sha256=WNEDZltad_4T4Pw4DHUGrXqGZU1DqFm9Ph4Ibho40H0,9623
374
374
  wafer_core/rollouts/search.py,sha256=5BEDuw9FVbQhei3nvUXEVwBU5ouwgJE6ONhEqvU5Ldc,14696
375
375
  wafer_core/rollouts/skills.py,sha256=ATYoG02Cc6_VrtE415TnseBFJrKOMq27z-5YgBgPpZQ,5081
376
376
  wafer_core/rollouts/slice.py,sha256=darOZO53BuSPfvv_KjOSzulGVSWbL4OuoE3k6xXpBFg,20195
@@ -383,13 +383,14 @@ wafer_core/rollouts/_logging/json_formatter.py,sha256=jJIa2IZCsu2C_Y1HXQi7hbI33x
383
383
  wafer_core/rollouts/_logging/logging_config.py,sha256=JbHCBcKHaospwF3BlKfp1HzhZTUElhtOGghKHF21bvc,10581
384
384
  wafer_core/rollouts/_logging/sample_handler.py,sha256=XhS5bVs6VKjABP7PeAp8CuEppqXUOm5rPCi-uEbt6QU,2122
385
385
  wafer_core/rollouts/_pytui/__init__.py,sha256=Q4TwOoefOqDkudvaWJ7TQF6Ak32Nqs09h7jl8luPuo8,2368
386
- wafer_core/rollouts/_pytui/app.py,sha256=NGNZ97nPq8ZZitMrY2tUgSEz_K3m5FqlNQpdy3geoEk,26163
386
+ wafer_core/rollouts/_pytui/app.py,sha256=I_0sFuMCu5LQIbezp9B0zNIDNaDsKjBnYY8b57gPpvU,26410
387
387
  wafer_core/rollouts/_pytui/console.py,sha256=0TBZ9MPfLls6wPQvI9KA_0k1348e0VckAvPwn2y6D9Q,10804
388
388
  wafer_core/rollouts/_pytui/renderer.py,sha256=kFJoEsURAowj8KmKujcDJF00VDwiFhW4AP0fFD7hsrA,6429
389
389
  wafer_core/rollouts/_pytui/spinner.py,sha256=ilIOIubZQz5C77m-Y6te55z8c1Kz8njy66CVsxMu9xw,2256
390
390
  wafer_core/rollouts/_pytui/terminal.py,sha256=mCSZYAAH10Dl53fuCxQ2ZegkOmL1gFI5wEkTxQyOoKI,17736
391
391
  wafer_core/rollouts/_pytui/text.py,sha256=KeGJAM6MJQQLSk9y2R-HnYgLqCcc8Qk4QfukctBuYA8,14107
392
392
  wafer_core/rollouts/_pytui/theme.py,sha256=FdYrvQz66b7uMNbB2WCp8FPUvGSJ6vUHDaYnR6SSeSY,8067
393
+ wafer_core/rollouts/_pytui/viewport.py,sha256=8Y3adY57dXSn9H3bCtQ7FnhA5iq2miZKkiAv09aSgSI,6425
393
394
  wafer_core/rollouts/agent_presets/README.md,sha256=gGyE0P_2Cp5s5ekHVY3cYcz19g91Wb-wboFakcvBriQ,7584
394
395
  wafer_core/rollouts/agent_presets/__init__.py,sha256=BLZOzAyC-cThQQQpAROeo4eEMOlMCrVEgnKYqzSsyS4,454
395
396
  wafer_core/rollouts/agent_presets/base_preset.py,sha256=vDzDiVe_DHN2fiQrtGTW309EJgt3t1jaPAgkQCgDycw,2550
@@ -727,6 +728,6 @@ wafer_core/utils/modal_execution/modal_app.py,sha256=VfS2cX8gHtnlPXemmMcEwDPeQdh
727
728
  wafer_core/utils/modal_execution/modal_config.py,sha256=7cGX9TGqilQ3qxI3OFGXV5orjtyRU-PEDOJ4vP2oxno,4421
728
729
  wafer_core/utils/modal_execution/modal_execution.py,sha256=gChjnV6jqA3A7IRP3DfvV5cSfm_MN0X4f7JZufXgdZE,24594
729
730
  wafer_core/utils/modal_execution/test_modal.py,sha256=_jqou_hrLs1Daf1590Pnb0a_lXMMa2rczAPpW9HpoNQ,8153
730
- wafer_core-0.1.46.dist-info/METADATA,sha256=rgFOq_IA8Z0JLFzshlHqGDDZPo50owAGlqWuFhiu_HY,1477
731
- wafer_core-0.1.46.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
732
- wafer_core-0.1.46.dist-info/RECORD,,
731
+ wafer_core-0.1.47.dist-info/METADATA,sha256=OLmPs__QV_sZBZ1_-VNFx9SoHtnaRj-CnrgJvf1b2BY,1477
732
+ wafer_core-0.1.47.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
733
+ wafer_core-0.1.47.dist-info/RECORD,,