glaip-sdk 0.0.20__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. glaip_sdk/_version.py +1 -3
  2. glaip_sdk/branding.py +2 -6
  3. glaip_sdk/cli/agent_config.py +2 -6
  4. glaip_sdk/cli/auth.py +11 -30
  5. glaip_sdk/cli/commands/agents.py +64 -107
  6. glaip_sdk/cli/commands/configure.py +12 -36
  7. glaip_sdk/cli/commands/mcps.py +25 -63
  8. glaip_sdk/cli/commands/models.py +2 -4
  9. glaip_sdk/cli/commands/tools.py +22 -35
  10. glaip_sdk/cli/commands/update.py +3 -8
  11. glaip_sdk/cli/config.py +1 -3
  12. glaip_sdk/cli/display.py +10 -13
  13. glaip_sdk/cli/io.py +8 -14
  14. glaip_sdk/cli/main.py +10 -30
  15. glaip_sdk/cli/mcp_validators.py +5 -15
  16. glaip_sdk/cli/pager.py +3 -9
  17. glaip_sdk/cli/parsers/json_input.py +11 -22
  18. glaip_sdk/cli/resolution.py +3 -9
  19. glaip_sdk/cli/rich_helpers.py +1 -3
  20. glaip_sdk/cli/slash/agent_session.py +5 -10
  21. glaip_sdk/cli/slash/prompt.py +3 -10
  22. glaip_sdk/cli/slash/session.py +46 -98
  23. glaip_sdk/cli/transcript/cache.py +6 -19
  24. glaip_sdk/cli/transcript/capture.py +45 -20
  25. glaip_sdk/cli/transcript/launcher.py +1 -3
  26. glaip_sdk/cli/transcript/viewer.py +224 -47
  27. glaip_sdk/cli/update_notifier.py +165 -21
  28. glaip_sdk/cli/utils.py +33 -91
  29. glaip_sdk/cli/validators.py +11 -12
  30. glaip_sdk/client/_agent_payloads.py +10 -30
  31. glaip_sdk/client/agents.py +33 -63
  32. glaip_sdk/client/base.py +77 -35
  33. glaip_sdk/client/mcps.py +1 -3
  34. glaip_sdk/client/run_rendering.py +121 -26
  35. glaip_sdk/client/tools.py +8 -24
  36. glaip_sdk/client/validators.py +20 -48
  37. glaip_sdk/exceptions.py +1 -3
  38. glaip_sdk/icons.py +9 -3
  39. glaip_sdk/models.py +14 -33
  40. glaip_sdk/payload_schemas/agent.py +1 -3
  41. glaip_sdk/utils/agent_config.py +4 -14
  42. glaip_sdk/utils/client_utils.py +7 -21
  43. glaip_sdk/utils/display.py +2 -6
  44. glaip_sdk/utils/general.py +1 -3
  45. glaip_sdk/utils/import_export.py +3 -9
  46. glaip_sdk/utils/rendering/formatting.py +52 -12
  47. glaip_sdk/utils/rendering/models.py +17 -8
  48. glaip_sdk/utils/rendering/renderer/__init__.py +1 -5
  49. glaip_sdk/utils/rendering/renderer/base.py +1181 -328
  50. glaip_sdk/utils/rendering/renderer/config.py +4 -10
  51. glaip_sdk/utils/rendering/renderer/debug.py +4 -14
  52. glaip_sdk/utils/rendering/renderer/panels.py +1 -3
  53. glaip_sdk/utils/rendering/renderer/progress.py +3 -11
  54. glaip_sdk/utils/rendering/renderer/stream.py +9 -42
  55. glaip_sdk/utils/rendering/renderer/summary_window.py +79 -0
  56. glaip_sdk/utils/rendering/renderer/toggle.py +182 -0
  57. glaip_sdk/utils/rendering/step_tree_state.py +100 -0
  58. glaip_sdk/utils/rendering/steps.py +899 -25
  59. glaip_sdk/utils/resource_refs.py +4 -13
  60. glaip_sdk/utils/serialization.py +14 -46
  61. glaip_sdk/utils/validation.py +4 -4
  62. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.1.3.dist-info}/METADATA +12 -1
  63. glaip_sdk-0.1.3.dist-info/RECORD +83 -0
  64. glaip_sdk-0.0.20.dist-info/RECORD +0 -80
  65. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.1.3.dist-info}/WHEEL +0 -0
  66. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.1.3.dist-info}/entry_points.txt +0 -0
@@ -13,21 +13,15 @@ from dataclasses import dataclass
13
13
  class RendererConfig:
14
14
  """Configuration for the RichStreamRenderer."""
15
15
 
16
- # Style and layout
17
- theme: str = "dark" # dark|light
18
- style: str = "pretty" # pretty|debug|minimal
19
-
20
16
  # Performance
21
- think_threshold: float = 0.7
22
17
  refresh_debounce: float = 0.25
23
18
  render_thinking: bool = True
24
19
  live: bool = True
25
20
  persist_live: bool = True
26
-
27
- # Debug visibility toggles
28
- show_delegate_tool_panels: bool = False
21
+ summary_display_window: int = 20
29
22
 
30
23
  # Scrollback/append options
24
+ summary_max_steps: int = 0
31
25
  append_finished_snapshots: bool = False
32
- snapshot_max_chars: int = 12000
33
- snapshot_max_lines: int = 200
26
+ snapshot_max_chars: int = 0
27
+ snapshot_max_lines: int = 0
@@ -34,16 +34,10 @@ def _coerce_datetime(value: Any) -> datetime | None:
34
34
  return None
35
35
 
36
36
 
37
- def _parse_event_timestamp(
38
- event: dict[str, Any], received_ts: datetime | None = None
39
- ) -> datetime | None:
37
+ def _parse_event_timestamp(event: dict[str, Any], received_ts: datetime | None = None) -> datetime | None:
40
38
  """Resolve the most accurate timestamp available for the event."""
41
39
  if received_ts is not None:
42
- return (
43
- received_ts
44
- if received_ts.tzinfo
45
- else received_ts.replace(tzinfo=timezone.utc)
46
- )
40
+ return received_ts if received_ts.tzinfo else received_ts.replace(tzinfo=timezone.utc)
47
41
 
48
42
  ts_value = event.get("timestamp") or (event.get("metadata") or {}).get("timestamp")
49
43
  return _coerce_datetime(ts_value)
@@ -86,9 +80,7 @@ def _get_event_metadata(event: dict[str, Any]) -> tuple[str, str | None]:
86
80
  return sse_kind, status_str
87
81
 
88
82
 
89
- def _build_debug_title(
90
- sse_kind: str, status_str: str | None, ts_ms: str, rel: float
91
- ) -> str:
83
+ def _build_debug_title(sse_kind: str, status_str: str | None, ts_ms: str, rel: float) -> str:
92
84
  """Build the debug event title."""
93
85
  if status_str:
94
86
  return f"SSE: {sse_kind} — {status_str} @ {ts_ms} (+{rel:.2f}s)"
@@ -104,9 +96,7 @@ def _dejson_value(obj: Any) -> Any:
104
96
  return [_dejson_value(x) for x in obj]
105
97
  if isinstance(obj, str):
106
98
  s = obj.strip()
107
- if (s.startswith("{") and s.endswith("}")) or (
108
- s.startswith("[") and s.endswith("]")
109
- ):
99
+ if (s.startswith("{") and s.endswith("}")) or (s.startswith("[") and s.endswith("]")):
110
100
  try:
111
101
  return _dejson_value(json.loads(s))
112
102
  except Exception:
@@ -128,9 +128,7 @@ def create_context_panel(
128
128
  )
129
129
 
130
130
 
131
- def create_final_panel(
132
- content: str, title: str = "Final Result", theme: str = "dark"
133
- ) -> AIPPanel:
131
+ def create_final_panel(content: str, title: str = "Final Result", theme: str = "dark") -> AIPPanel:
134
132
  """Create a final result panel.
135
133
 
136
134
  Args:
@@ -48,15 +48,11 @@ def format_working_indicator(
48
48
  """Format a working indicator with elapsed time."""
49
49
  base_message = "Working..."
50
50
 
51
- if started_at is None and (
52
- server_elapsed_time is None or streaming_started_at is None
53
- ):
51
+ if started_at is None and (server_elapsed_time is None or streaming_started_at is None):
54
52
  return base_message
55
53
 
56
54
  spinner_chip = f"{get_spinner_char()} {base_message}"
57
- elapsed = _resolve_elapsed_time(
58
- started_at, server_elapsed_time, streaming_started_at
59
- )
55
+ elapsed = _resolve_elapsed_time(started_at, server_elapsed_time, streaming_started_at)
60
56
  if elapsed is None:
61
57
  return spinner_chip
62
58
 
@@ -93,11 +89,7 @@ def is_delegation_tool(tool_name: str) -> bool:
93
89
  Returns:
94
90
  True if this is a delegation tool
95
91
  """
96
- return (
97
- tool_name.startswith("delegate_to_")
98
- or tool_name.startswith("delegate_")
99
- or "sub_agent" in tool_name.lower()
100
- )
92
+ return tool_name.startswith("delegate_to_") or tool_name.startswith("delegate_") or "sub_agent" in tool_name.lower()
101
93
 
102
94
 
103
95
  def _delegation_tool_title(tool_name: str) -> str | None:
@@ -38,27 +38,25 @@ class StreamProcessor:
38
38
  Returns:
39
39
  Dictionary with extracted metadata
40
40
  """
41
- metadata = event.get("metadata", {})
41
+ metadata = event.get("metadata") or {}
42
42
  # Update server elapsed timing if backend provides it
43
43
  try:
44
44
  t = metadata.get("time")
45
- if isinstance(t, int | float):
45
+ if isinstance(t, (int, float)):
46
46
  self.server_elapsed_time = float(t)
47
47
  except Exception:
48
48
  pass
49
49
 
50
50
  return {
51
51
  "kind": metadata.get("kind") if metadata else event.get("kind"),
52
- "task_id": event.get("task_id"),
53
- "context_id": event.get("context_id"),
52
+ "task_id": metadata.get("task_id") or event.get("task_id"),
53
+ "context_id": metadata.get("context_id") or event.get("context_id"),
54
54
  "content": event.get("content", ""),
55
55
  "status": metadata.get("status") if metadata else event.get("status"),
56
56
  "metadata": metadata,
57
57
  }
58
58
 
59
- def _extract_metadata_tool_calls(
60
- self, metadata: dict[str, Any]
61
- ) -> tuple[str | None, dict, Any, list]:
59
+ def _extract_metadata_tool_calls(self, metadata: dict[str, Any]) -> tuple[str | None, dict, Any, list]:
62
60
  """Extract tool calls from metadata."""
63
61
  tool_calls = metadata.get("tool_calls", [])
64
62
  if not tool_calls:
@@ -84,9 +82,7 @@ class StreamProcessor:
84
82
 
85
83
  return tool_name, tool_args, tool_out, tool_calls_info
86
84
 
87
- def _extract_tool_info_calls(
88
- self, tool_info: dict[str, Any]
89
- ) -> tuple[str | None, dict, Any, list]:
85
+ def _extract_tool_info_calls(self, tool_info: dict[str, Any]) -> tuple[str | None, dict, Any, list]:
90
86
  """Extract tool calls from tool_info structure."""
91
87
  tool_calls_info = []
92
88
  tool_name = None
@@ -98,9 +94,7 @@ class StreamProcessor:
98
94
  if isinstance(ti_calls, list) and ti_calls:
99
95
  for call in ti_calls:
100
96
  if isinstance(call, dict) and call.get("name"):
101
- tool_calls_info.append(
102
- (call.get("name"), call.get("args", {}), call.get("output"))
103
- )
97
+ tool_calls_info.append((call.get("name"), call.get("args", {}), call.get("output")))
104
98
  if tool_calls_info:
105
99
  tool_name, tool_args, tool_out = tool_calls_info[0]
106
100
  return tool_name, tool_args, tool_out, tool_calls_info
@@ -114,9 +108,7 @@ class StreamProcessor:
114
108
 
115
109
  return tool_name, tool_args, tool_out, tool_calls_info
116
110
 
117
- def _extract_tool_calls_from_metadata(
118
- self, metadata: dict[str, Any]
119
- ) -> tuple[str | None, dict, Any, list]:
111
+ def _extract_tool_calls_from_metadata(self, metadata: dict[str, Any]) -> tuple[str | None, dict, Any, list]:
120
112
  """Extract tool calls from metadata structure."""
121
113
  tool_info = metadata.get("tool_info", {}) or {}
122
114
 
@@ -125,9 +117,7 @@ class StreamProcessor:
125
117
 
126
118
  return None, {}, None, []
127
119
 
128
- def parse_tool_calls(
129
- self, event: dict[str, Any]
130
- ) -> tuple[str | None, Any, Any, list[tuple[str, Any, Any]]]:
120
+ def parse_tool_calls(self, event: dict[str, Any]) -> tuple[str | None, Any, Any, list[tuple[str, Any, Any]]]:
131
121
  """Parse tool call information from an event.
132
122
 
133
123
  Args:
@@ -166,29 +156,6 @@ class StreamProcessor:
166
156
  if context_id:
167
157
  self.last_event_time_by_ctx[context_id] = monotonic()
168
158
 
169
- def should_insert_thinking_gap(
170
- self, task_id: str | None, context_id: str | None, think_threshold: float
171
- ) -> bool:
172
- """Determine if a thinking gap should be inserted.
173
-
174
- Args:
175
- task_id: Task identifier
176
- context_id: Context identifier
177
- think_threshold: Threshold for thinking gap
178
-
179
- Returns:
180
- True if thinking gap should be inserted
181
- """
182
- if not task_id or not context_id:
183
- return False
184
-
185
- last_time = self.last_event_time_by_ctx.get(context_id)
186
- if last_time is None:
187
- return True
188
-
189
- elapsed = monotonic() - last_time
190
- return elapsed >= think_threshold
191
-
192
159
  def track_tools_and_agents(
193
160
  self,
194
161
  tool_name: str | None,
@@ -0,0 +1,79 @@
1
+ """Helpers for clamping the steps summary view to a rolling window.
2
+
3
+ Authors:
4
+ Raymond Christopher (raymond.christopher@gdplabs.id)
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from collections.abc import Callable
10
+
11
+ from rich.text import Text
12
+
13
+ Node = tuple[str, tuple[bool, ...]]
14
+ LabelFn = Callable[[str], str]
15
+ ParentFn = Callable[[str], str | None]
16
+
17
+
18
+ def clamp_step_nodes(
19
+ nodes: list[Node],
20
+ *,
21
+ window: int,
22
+ get_label: LabelFn,
23
+ get_parent: ParentFn,
24
+ ) -> tuple[list[Node], Text | None, Text | None]:
25
+ """Return a windowed slice of nodes plus optional header/footer notices."""
26
+ if window <= 0 or len(nodes) <= window:
27
+ return nodes, None, None
28
+
29
+ start_index = len(nodes) - window
30
+ first_visible_step_id = nodes[start_index][0]
31
+ header = _build_header(first_visible_step_id, window, len(nodes), get_label, get_parent)
32
+ footer = _build_footer(len(nodes) - window)
33
+ return nodes[start_index:], header, footer
34
+
35
+
36
+ def _build_header(
37
+ step_id: str,
38
+ window: int,
39
+ total: int,
40
+ get_label: LabelFn,
41
+ get_parent: ParentFn,
42
+ ) -> Text:
43
+ """Construct the leading notice for a truncated window."""
44
+ parts = [f"… (latest {window} of {total} steps shown"]
45
+ path = _collect_path_labels(step_id, get_label, get_parent)
46
+ if path:
47
+ parts.append("; continuing with ")
48
+ parts.append(" / ".join(path))
49
+ parts.append(")")
50
+ return Text("".join(parts), style="dim")
51
+
52
+
53
+ def _build_footer(hidden_count: int) -> Text:
54
+ """Construct the footer notice indicating hidden steps."""
55
+ noun = "step" if hidden_count == 1 else "steps"
56
+ message = f"{hidden_count} earlier {noun} hidden. Press Ctrl+T to inspect the full transcript."
57
+ return Text(message, style="dim")
58
+
59
+
60
+ def _collect_path_labels(
61
+ step_id: str,
62
+ get_label: LabelFn,
63
+ get_parent: ParentFn,
64
+ ) -> list[str]:
65
+ """Collect labels for the ancestry of the provided step."""
66
+ labels: list[str] = []
67
+ seen: set[str] = set()
68
+ current = step_id
69
+ while current and current not in seen:
70
+ seen.add(current)
71
+ label = get_label(current)
72
+ if label:
73
+ labels.append(label)
74
+ parent = get_parent(current)
75
+ if not parent:
76
+ break
77
+ current = parent
78
+ labels.reverse()
79
+ return labels
@@ -0,0 +1,182 @@
1
+ """Keyboard-driven transcript toggling support for the live renderer.
2
+
3
+ Authors:
4
+ Raymond Christopher (raymond.christopher@gdplabs.id)
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import os
10
+ import sys
11
+ import threading
12
+ import time
13
+ from typing import Any
14
+
15
+ try: # pragma: no cover - Windows-specific dependencies
16
+ import msvcrt # type: ignore[import]
17
+ except ImportError: # pragma: no cover - POSIX fallback
18
+ msvcrt = None # type: ignore[assignment]
19
+
20
+ if os.name != "nt": # pragma: no cover - POSIX-only imports
21
+ import select
22
+ import termios
23
+ import tty
24
+
25
+
26
+ CTRL_T = "\x14"
27
+
28
+
29
+ class TranscriptToggleController:
30
+ """Manage mid-run transcript toggling for RichStreamRenderer instances."""
31
+
32
+ def __init__(self, *, enabled: bool) -> None:
33
+ """Initialise controller.
34
+
35
+ Args:
36
+ enabled: Whether toggling should be active (usually gated by TTY checks).
37
+ """
38
+ self._enabled = enabled and bool(sys.stdin) and sys.stdin.isatty()
39
+ self._lock = threading.Lock()
40
+ self._posix_fd: int | None = None
41
+ self._posix_attrs: list[int] | None = None
42
+ self._active = False
43
+ self._stop_event = threading.Event()
44
+ self._poll_thread: threading.Thread | None = None
45
+
46
+ @property
47
+ def enabled(self) -> bool:
48
+ """Return True when controller is able to process keypresses."""
49
+ return self._enabled
50
+
51
+ def on_stream_start(self, renderer: Any) -> None:
52
+ """Prepare terminal state before streaming begins."""
53
+ if not self._enabled:
54
+ return
55
+
56
+ if os.name == "nt": # pragma: no cover - Windows behaviour not in CI
57
+ self._active = True
58
+ self._start_polling_thread(renderer)
59
+ return
60
+
61
+ fd = sys.stdin.fileno()
62
+ try:
63
+ attrs = termios.tcgetattr(fd)
64
+ except Exception:
65
+ self._enabled = False
66
+ return
67
+
68
+ try:
69
+ tty.setcbreak(fd)
70
+ except Exception:
71
+ try:
72
+ termios.tcsetattr(fd, termios.TCSADRAIN, attrs)
73
+ except Exception:
74
+ pass
75
+ self._enabled = False
76
+ return
77
+
78
+ with self._lock:
79
+ self._posix_fd = fd
80
+ self._posix_attrs = attrs
81
+ self._active = True
82
+
83
+ self._start_polling_thread(renderer)
84
+
85
+ def on_stream_complete(self) -> None:
86
+ """Restore terminal state when streaming ends."""
87
+ if not self._active:
88
+ return
89
+
90
+ self._stop_polling_thread()
91
+
92
+ if os.name == "nt": # pragma: no cover - Windows behaviour not in CI
93
+ self._active = False
94
+ return
95
+
96
+ with self._lock:
97
+ fd = self._posix_fd
98
+ attrs = self._posix_attrs
99
+ self._posix_fd = None
100
+ self._posix_attrs = None
101
+ self._active = False
102
+
103
+ if fd is None or attrs is None:
104
+ return
105
+
106
+ try:
107
+ termios.tcsetattr(fd, termios.TCSADRAIN, attrs)
108
+ except Exception:
109
+ pass
110
+
111
+ def poll(self, renderer: Any) -> None:
112
+ """Poll for toggle keypresses and update renderer if needed."""
113
+ if not self._active:
114
+ return
115
+
116
+ if os.name == "nt": # pragma: no cover - Windows behaviour not in CI
117
+ self._poll_windows(renderer)
118
+ else:
119
+ self._poll_posix(renderer)
120
+
121
+ # ------------------------------------------------------------------
122
+ # Platform-specific polling
123
+ # ------------------------------------------------------------------
124
+ def _poll_windows(self, renderer: Any) -> None:
125
+ if not msvcrt: # pragma: no cover - safety guard
126
+ return
127
+
128
+ while msvcrt.kbhit():
129
+ ch = msvcrt.getwch()
130
+ if ch == CTRL_T:
131
+ renderer.toggle_transcript_mode()
132
+
133
+ def _poll_posix(self, renderer: Any) -> None: # pragma: no cover - requires TTY
134
+ fd = self._posix_fd
135
+ if fd is None:
136
+ return
137
+
138
+ while True:
139
+ readable, _, _ = select.select([fd], [], [], 0)
140
+ if not readable:
141
+ return
142
+
143
+ try:
144
+ data = os.read(fd, 1)
145
+ except Exception:
146
+ return
147
+
148
+ if not data:
149
+ return
150
+
151
+ ch = data.decode(errors="ignore")
152
+ if ch == CTRL_T:
153
+ renderer.toggle_transcript_mode()
154
+
155
+ def _start_polling_thread(self, renderer: Any) -> None:
156
+ if self._poll_thread and self._poll_thread.is_alive():
157
+ return
158
+ if not self._active:
159
+ return
160
+
161
+ self._stop_event.clear()
162
+ self._poll_thread = threading.Thread(target=self._poll_loop, args=(renderer,), daemon=True)
163
+ self._poll_thread.start()
164
+
165
+ def _stop_polling_thread(self) -> None:
166
+ self._stop_event.set()
167
+ thread = self._poll_thread
168
+ if thread and thread.is_alive():
169
+ thread.join(timeout=0.2)
170
+ self._poll_thread = None
171
+
172
+ def _poll_loop(self, renderer: Any) -> None:
173
+ while self._active and not self._stop_event.is_set():
174
+ try:
175
+ if os.name == "nt":
176
+ self._poll_windows(renderer)
177
+ else:
178
+ self._poll_posix(renderer)
179
+ except Exception:
180
+ # Never let background polling disrupt the main stream
181
+ pass
182
+ time.sleep(0.05)
@@ -0,0 +1,100 @@
1
+ """State container for hierarchical renderer steps.
2
+
3
+ Authors:
4
+ Raymond Christopher (raymond.christopher@gdplabs.id)
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from collections.abc import Iterator
10
+ from dataclasses import dataclass, field
11
+
12
+ from glaip_sdk.utils.rendering.models import Step
13
+
14
+
15
+ @dataclass(slots=True)
16
+ class StepTreeState:
17
+ """Track hierarchical ordering, buffers, and pruning metadata."""
18
+
19
+ max_steps: int = 200
20
+ root_order: list[str] = field(default_factory=list)
21
+ child_map: dict[str, list[str]] = field(default_factory=dict)
22
+ buffered_children: dict[str, list[str]] = field(default_factory=dict)
23
+ running_by_context: dict[tuple[str | None, str | None], set[str]] = field(default_factory=dict)
24
+ retained_ids: set[str] = field(default_factory=set)
25
+ step_index: dict[str, Step] = field(default_factory=dict)
26
+ pending_branch_failures: set[str] = field(default_factory=set)
27
+
28
+ def link_root(self, step_id: str) -> None:
29
+ """Ensure a step id is present in the root ordering."""
30
+ if step_id not in self.root_order:
31
+ self.root_order.append(step_id)
32
+
33
+ def unlink_root(self, step_id: str) -> None:
34
+ """Remove a step id from the root ordering if present."""
35
+ if step_id in self.root_order:
36
+ self.root_order.remove(step_id)
37
+
38
+ def link_child(self, parent_id: str, child_id: str) -> None:
39
+ """Attach a child step to a parent."""
40
+ children = self.child_map.setdefault(parent_id, [])
41
+ if child_id not in children:
42
+ children.append(child_id)
43
+
44
+ def unlink_child(self, parent_id: str, child_id: str) -> None:
45
+ """Detach a child from a parent."""
46
+ children = self.child_map.get(parent_id)
47
+ if not children:
48
+ return
49
+
50
+ if child_id in children:
51
+ children.remove(child_id)
52
+ # Clean up if the list is now empty
53
+ if len(children) == 0:
54
+ self.child_map.pop(parent_id, None)
55
+
56
+ def buffer_child(self, parent_id: str, child_id: str) -> None:
57
+ """Track a child that is waiting for its parent to appear."""
58
+ queue = self.buffered_children.setdefault(parent_id, [])
59
+ if child_id not in queue:
60
+ queue.append(child_id)
61
+
62
+ def pop_buffered_children(self, parent_id: str) -> list[str]:
63
+ """Return any buffered children for a parent."""
64
+ return self.buffered_children.pop(parent_id, [])
65
+
66
+ def discard_running(self, step_id: str) -> None:
67
+ """Remove a step from running context tracking."""
68
+ for key, running in tuple(self.running_by_context.items()):
69
+ if step_id in running:
70
+ running.discard(step_id)
71
+ if not running:
72
+ self.running_by_context.pop(key, None)
73
+
74
+ def iter_visible_tree(self) -> Iterator[tuple[str, tuple[bool, ...]]]:
75
+ """Yield step ids in depth-first order alongside branch metadata.
76
+
77
+ Returns:
78
+ Iterator of (step_id, branch_state) tuples where branch_state
79
+ captures whether each ancestor was the last child. This data
80
+ is later used by rendering helpers to draw connectors such as
81
+ `│`, `├─`, and `└─` consistently.
82
+ """
83
+ roots = tuple(self.root_order)
84
+ total_roots = len(roots)
85
+ for index, root_id in enumerate(roots):
86
+ yield root_id, ()
87
+ ancestor_state = (index == total_roots - 1,)
88
+ yield from self._walk_children(root_id, ancestor_state)
89
+
90
+ def _walk_children(
91
+ self, parent_id: str, ancestor_state: tuple[bool, ...]
92
+ ) -> Iterator[tuple[str, tuple[bool, ...]]]:
93
+ """Depth-first traversal helper yielding children with ancestry info."""
94
+ children = self.child_map.get(parent_id, [])
95
+ total_children = len(children)
96
+ for idx, child_id in enumerate(children):
97
+ is_last = idx == total_children - 1
98
+ branch_state = ancestor_state + (is_last,)
99
+ yield child_id, branch_state
100
+ yield from self._walk_children(child_id, branch_state)