glaip-sdk 0.0.5__py3-none-any.whl → 0.0.6a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. glaip_sdk/__init__.py +1 -1
  2. glaip_sdk/branding.py +3 -2
  3. glaip_sdk/cli/commands/__init__.py +1 -1
  4. glaip_sdk/cli/commands/agents.py +444 -268
  5. glaip_sdk/cli/commands/configure.py +12 -11
  6. glaip_sdk/cli/commands/mcps.py +28 -16
  7. glaip_sdk/cli/commands/models.py +5 -3
  8. glaip_sdk/cli/commands/tools.py +109 -102
  9. glaip_sdk/cli/display.py +38 -16
  10. glaip_sdk/cli/io.py +1 -1
  11. glaip_sdk/cli/main.py +26 -5
  12. glaip_sdk/cli/resolution.py +5 -4
  13. glaip_sdk/cli/utils.py +376 -157
  14. glaip_sdk/cli/validators.py +7 -2
  15. glaip_sdk/client/agents.py +184 -89
  16. glaip_sdk/client/base.py +24 -13
  17. glaip_sdk/client/validators.py +154 -94
  18. glaip_sdk/config/constants.py +0 -2
  19. glaip_sdk/models.py +4 -4
  20. glaip_sdk/utils/__init__.py +7 -7
  21. glaip_sdk/utils/client_utils.py +144 -78
  22. glaip_sdk/utils/display.py +4 -2
  23. glaip_sdk/utils/general.py +8 -6
  24. glaip_sdk/utils/import_export.py +55 -24
  25. glaip_sdk/utils/rendering/formatting.py +12 -6
  26. glaip_sdk/utils/rendering/models.py +1 -1
  27. glaip_sdk/utils/rendering/renderer/base.py +412 -248
  28. glaip_sdk/utils/rendering/renderer/console.py +6 -5
  29. glaip_sdk/utils/rendering/renderer/debug.py +94 -52
  30. glaip_sdk/utils/rendering/renderer/stream.py +93 -48
  31. glaip_sdk/utils/rendering/steps.py +103 -39
  32. glaip_sdk/utils/rich_utils.py +1 -1
  33. glaip_sdk/utils/run_renderer.py +1 -1
  34. glaip_sdk/utils/serialization.py +3 -1
  35. glaip_sdk/utils/validation.py +2 -2
  36. glaip_sdk-0.0.6a0.dist-info/METADATA +183 -0
  37. glaip_sdk-0.0.6a0.dist-info/RECORD +55 -0
  38. {glaip_sdk-0.0.5.dist-info → glaip_sdk-0.0.6a0.dist-info}/WHEEL +1 -1
  39. glaip_sdk-0.0.6a0.dist-info/entry_points.txt +3 -0
  40. glaip_sdk-0.0.5.dist-info/METADATA +0 -645
  41. glaip_sdk-0.0.5.dist-info/RECORD +0 -55
  42. glaip_sdk-0.0.5.dist-info/entry_points.txt +0 -2
@@ -7,6 +7,7 @@ Authors:
7
7
  from __future__ import annotations
8
8
 
9
9
  import io
10
+ from typing import Any
10
11
 
11
12
  from rich.console import Console as RichConsole
12
13
 
@@ -14,7 +15,7 @@ from rich.console import Console as RichConsole
14
15
  class CapturingConsole:
15
16
  """Console wrapper that captures all output for saving."""
16
17
 
17
- def __init__(self, original_console, capture=False):
18
+ def __init__(self, original_console: RichConsole, capture: bool = False) -> None:
18
19
  """Initialize the capturing console.
19
20
 
20
21
  Args:
@@ -23,9 +24,9 @@ class CapturingConsole:
23
24
  """
24
25
  self.original_console = original_console
25
26
  self.capture = capture
26
- self.captured_output = []
27
+ self.captured_output: list[str] = []
27
28
 
28
- def print(self, *args, **kwargs):
29
+ def print(self, *args: Any, **kwargs: Any) -> None:
29
30
  """Print to both original console and capture buffer if capturing."""
30
31
  # Always print to original console
31
32
  self.original_console.print(*args, **kwargs)
@@ -43,12 +44,12 @@ class CapturingConsole:
43
44
  temp_console.print(*args, **kwargs)
44
45
  self.captured_output.append(temp_output.getvalue())
45
46
 
46
- def get_captured_output(self):
47
+ def get_captured_output(self) -> str:
47
48
  """Get the captured output as plain text."""
48
49
  if self.capture:
49
50
  return "".join(self.captured_output)
50
51
  return ""
51
52
 
52
- def __getattr__(self, name):
53
+ def __getattr__(self, name: str) -> Any:
53
54
  """Delegate all other attributes to the original console."""
54
55
  return getattr(self.original_console, name)
@@ -15,6 +15,81 @@ from rich.markdown import Markdown
15
15
  from glaip_sdk.rich_components import AIPPanel
16
16
 
17
17
 
18
+ def _calculate_relative_time(started_ts: float | None) -> tuple[float, str]:
19
+ """Calculate relative time since start."""
20
+ now_mono = monotonic()
21
+ rel = 0.0
22
+ if started_ts is not None:
23
+ rel = max(0.0, now_mono - started_ts)
24
+
25
+ ts_full = datetime.now().strftime("%H:%M:%S.%f")
26
+ ts_ms = ts_full[:-3] # trim to milliseconds
27
+
28
+ return rel, ts_ms
29
+
30
+
31
+ def _get_event_metadata(event: dict[str, Any]) -> tuple[str, str | None]:
32
+ """Extract event kind and status."""
33
+ sse_kind = (event.get("metadata") or {}).get("kind") or "event"
34
+ status_str = event.get("status") or (event.get("metadata") or {}).get("status")
35
+ return sse_kind, status_str
36
+
37
+
38
+ def _build_debug_title(
39
+ sse_kind: str, status_str: str | None, ts_ms: str, rel: float
40
+ ) -> str:
41
+ """Build the debug event title."""
42
+ if status_str:
43
+ return f"SSE: {sse_kind} — {status_str} @ {ts_ms} (+{rel:.2f}s)"
44
+ else:
45
+ return f"SSE: {sse_kind} @ {ts_ms} (+{rel:.2f}s)"
46
+
47
+
48
+ def _dejson_value(obj: Any) -> Any:
49
+ """Deep-parse JSON strings in nested objects."""
50
+ if isinstance(obj, dict):
51
+ return {k: _dejson_value(v) for k, v in obj.items()}
52
+ if isinstance(obj, list):
53
+ return [_dejson_value(x) for x in obj]
54
+ if isinstance(obj, str):
55
+ s = obj.strip()
56
+ if (s.startswith("{") and s.endswith("}")) or (
57
+ s.startswith("[") and s.endswith("]")
58
+ ):
59
+ try:
60
+ return _dejson_value(json.loads(s))
61
+ except Exception:
62
+ return obj
63
+ return obj
64
+ return obj
65
+
66
+
67
+ def _format_event_json(event: dict[str, Any]) -> str:
68
+ """Format event as JSON with deep parsing."""
69
+ try:
70
+ return json.dumps(_dejson_value(event), indent=2, ensure_ascii=False)
71
+ except Exception:
72
+ return str(event)
73
+
74
+
75
+ def _get_border_color(sse_kind: str) -> str:
76
+ """Get border color for event type."""
77
+ border_map = {
78
+ "agent_step": "blue",
79
+ "content": "green",
80
+ "final_response": "green",
81
+ "status": "yellow",
82
+ "artifact": "grey42",
83
+ }
84
+ return border_map.get(sse_kind, "grey42")
85
+
86
+
87
+ def _create_debug_panel(title: str, event_json: str, border: str) -> AIPPanel:
88
+ """Create the debug panel."""
89
+ md = Markdown(f"```json\n{event_json}\n```", code_theme="monokai")
90
+ return AIPPanel(md, title=title, border_style=border)
91
+
92
+
18
93
  def render_debug_event(
19
94
  event: dict[str, Any], console: Console, started_ts: float | None = None
20
95
  ) -> None:
@@ -26,58 +101,25 @@ def render_debug_event(
26
101
  started_ts: Monotonic timestamp when streaming started
27
102
  """
28
103
  try:
29
- # Add relative time since first meaningful event and wall-clock stamp
30
- now_mono = monotonic()
31
- rel = 0.0
32
- if started_ts is not None:
33
- rel = max(0.0, now_mono - started_ts)
34
- ts_full = datetime.now().strftime("%H:%M:%S.%f")
35
- ts_ms = ts_full[:-3] # trim to milliseconds
36
-
37
- # Compose a descriptive title with kind/status
38
- sse_kind = (event.get("metadata") or {}).get("kind") or "event"
39
- status_str = event.get("status") or (event.get("metadata") or {}).get("status")
40
- title = (
41
- f"SSE: {sse_kind} {status_str} @ {ts_ms} (+{rel:.2f}s)"
42
- if status_str
43
- else f"SSE: {sse_kind} @ {ts_ms} (+{rel:.2f}s)"
44
- )
45
-
46
- # Deep-pretty the event by parsing nested JSON strings
47
- def _dejson(obj):
48
- if isinstance(obj, dict):
49
- return {k: _dejson(v) for k, v in obj.items()}
50
- if isinstance(obj, list):
51
- return [_dejson(x) for x in obj]
52
- if isinstance(obj, str):
53
- s = obj.strip()
54
- if (s.startswith("{") and s.endswith("}")) or (
55
- s.startswith("[") and s.endswith("]")
56
- ):
57
- try:
58
- return _dejson(json.loads(s))
59
- except Exception:
60
- return obj
61
- return obj
62
- return obj
63
-
64
- try:
65
- event_json = json.dumps(_dejson(event), indent=2, ensure_ascii=False)
66
- except Exception:
67
- event_json = str(event)
68
-
69
- # Choose border color by kind for readability
70
- border = {
71
- "agent_step": "blue",
72
- "content": "green",
73
- "final_response": "green",
74
- "status": "yellow",
75
- "artifact": "grey42",
76
- }.get(sse_kind, "grey42")
77
-
78
- # Render using Markdown with JSON code block (consistent with tool panels)
79
- md = Markdown(f"```json\n{event_json}\n```", code_theme="monokai")
80
- console.print(AIPPanel(md, title=title, border_style=border))
104
+ # Calculate timing information
105
+ rel, ts_ms = _calculate_relative_time(started_ts)
106
+
107
+ # Extract event metadata
108
+ sse_kind, status_str = _get_event_metadata(event)
109
+
110
+ # Build title
111
+ title = _build_debug_title(sse_kind, status_str, ts_ms, rel)
112
+
113
+ # Format event JSON
114
+ event_json = _format_event_json(event)
115
+
116
+ # Get border color
117
+ border = _get_border_color(sse_kind)
118
+
119
+ # Create and print panel
120
+ panel = _create_debug_panel(title, event_json, border)
121
+ console.print(panel)
122
+
81
123
  except Exception as e:
82
124
  # Debug helpers must not break streaming
83
125
  print(f"Debug error: {e}") # Fallback debug output
@@ -6,6 +6,7 @@ Authors:
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ from collections.abc import Callable
9
10
  from time import monotonic
10
11
  from typing import Any
11
12
 
@@ -13,7 +14,7 @@ from typing import Any
13
14
  class StreamProcessor:
14
15
  """Handles event routing and parsing for streaming agent execution."""
15
16
 
16
- def __init__(self):
17
+ def __init__(self) -> None:
17
18
  """Initialize the stream processor."""
18
19
  self.streaming_started_at: float | None = None
19
20
  self.server_elapsed_time: float | None = None
@@ -22,7 +23,7 @@ class StreamProcessor:
22
23
  self.current_event_finished_panels: set[str] = set()
23
24
  self.last_event_time_by_ctx: dict[str, float] = {}
24
25
 
25
- def reset_event_tracking(self):
26
+ def reset_event_tracking(self) -> None:
26
27
  """Reset tracking for the current event."""
27
28
  self.current_event_tools.clear()
28
29
  self.current_event_sub_agents.clear()
@@ -47,7 +48,7 @@ class StreamProcessor:
47
48
  pass
48
49
 
49
50
  return {
50
- "kind": metadata.get("kind") if metadata else None,
51
+ "kind": metadata.get("kind") if metadata else event.get("kind"),
51
52
  "task_id": event.get("task_id"),
52
53
  "context_id": event.get("context_id"),
53
54
  "content": event.get("content", ""),
@@ -55,9 +56,78 @@ class StreamProcessor:
55
56
  "metadata": metadata,
56
57
  }
57
58
 
59
+ def _extract_metadata_tool_calls(
60
+ self, metadata: dict[str, Any]
61
+ ) -> tuple[str | None, dict, Any, list]:
62
+ """Extract tool calls from metadata."""
63
+ tool_calls = metadata.get("tool_calls", [])
64
+ if not tool_calls:
65
+ return None, {}, None, []
66
+
67
+ # Take the first tool call if multiple exist
68
+ first_call = tool_calls[0] if isinstance(tool_calls, list) else tool_calls
69
+ tool_name = first_call.get("name")
70
+ tool_args = first_call.get("arguments", {})
71
+ tool_out = first_call.get("output")
72
+
73
+ # Collect info for all tool calls
74
+ tool_calls_info = []
75
+ for call in tool_calls if isinstance(tool_calls, list) else [tool_calls]:
76
+ if isinstance(call, dict) and "name" in call:
77
+ tool_calls_info.append(
78
+ (
79
+ call.get("name", ""),
80
+ call.get("arguments", {}),
81
+ call.get("output"),
82
+ )
83
+ )
84
+
85
+ return tool_name, tool_args, tool_out, tool_calls_info
86
+
87
+ def _extract_tool_info_calls(
88
+ self, tool_info: dict[str, Any]
89
+ ) -> tuple[str | None, dict, Any, list]:
90
+ """Extract tool calls from tool_info structure."""
91
+ tool_calls_info = []
92
+ tool_name = None
93
+ tool_args = {}
94
+ tool_out = None
95
+
96
+ # Case 1: tool_info.tool_calls
97
+ ti_calls = tool_info.get("tool_calls")
98
+ if isinstance(ti_calls, list) and ti_calls:
99
+ for call in ti_calls:
100
+ if isinstance(call, dict) and call.get("name"):
101
+ tool_calls_info.append(
102
+ (call.get("name"), call.get("args", {}), call.get("output"))
103
+ )
104
+ if tool_calls_info:
105
+ tool_name, tool_args, tool_out = tool_calls_info[0]
106
+ return tool_name, tool_args, tool_out, tool_calls_info
107
+
108
+ # Case 2: single tool_info name/args/output
109
+ if tool_info.get("name"):
110
+ tool_name = tool_info.get("name")
111
+ tool_args = tool_info.get("args", {})
112
+ tool_out = tool_info.get("output")
113
+ tool_calls_info.append((tool_name, tool_args, tool_out))
114
+
115
+ return tool_name, tool_args, tool_out, tool_calls_info
116
+
117
+ def _extract_tool_calls_from_metadata(
118
+ self, metadata: dict[str, Any]
119
+ ) -> tuple[str | None, dict, Any, list]:
120
+ """Extract tool calls from metadata structure."""
121
+ tool_info = metadata.get("tool_info", {}) or {}
122
+
123
+ if tool_info:
124
+ return self._extract_tool_info_calls(tool_info)
125
+
126
+ return None, {}, None, []
127
+
58
128
  def parse_tool_calls(
59
129
  self, event: dict[str, Any]
60
- ) -> tuple[str | None, Any, Any, list]:
130
+ ) -> tuple[str | None, Any, Any, list[tuple[str, Any, Any]]]:
61
131
  """Parse tool call information from an event.
62
132
 
63
133
  Args:
@@ -66,56 +136,28 @@ class StreamProcessor:
66
136
  Returns:
67
137
  Tuple of (tool_name, tool_args, tool_output, tool_calls_info)
68
138
  """
69
- tool_name = None
70
- tool_args = {}
71
- tool_out = None
72
- tool_calls_info = []
73
-
74
- # Extract tool information from metadata
75
139
  metadata = event.get("metadata", {})
76
- tool_calls = metadata.get("tool_calls", [])
77
140
 
78
- if tool_calls:
79
- # Take the first tool call if multiple exist
80
- first_call = tool_calls[0] if isinstance(tool_calls, list) else tool_calls
81
- tool_name = first_call.get("name")
82
- tool_args = first_call.get("arguments", {})
83
- tool_out = first_call.get("output")
84
-
85
- # Collect info for all tool calls
86
- for call in tool_calls if isinstance(tool_calls, list) else [tool_calls]:
87
- if isinstance(call, dict) and "name" in call:
88
- tool_calls_info.append(
89
- (
90
- call.get("name", ""),
91
- call.get("arguments", {}),
92
- call.get("output"),
93
- )
94
- )
141
+ # Try primary extraction method
142
+ (
143
+ tool_name,
144
+ tool_args,
145
+ tool_out,
146
+ tool_calls_info,
147
+ ) = self._extract_metadata_tool_calls(metadata)
95
148
 
96
149
  # Fallback to nested metadata.tool_info (newer schema)
97
150
  if not tool_calls_info:
98
- tool_info = metadata.get("tool_info", {}) or {}
99
- # Case 1: tool_info.tool_calls
100
- ti_calls = tool_info.get("tool_calls")
101
- if isinstance(ti_calls, list) and ti_calls:
102
- for call in ti_calls:
103
- if isinstance(call, dict) and call.get("name"):
104
- tool_calls_info.append(
105
- (call.get("name"), call.get("args", {}), call.get("output"))
106
- )
107
- if tool_calls_info and not tool_name:
108
- tool_name, tool_args, tool_out = tool_calls_info[0]
109
- # Case 2: single tool_info name/args/output
110
- if tool_info.get("name") and not tool_name:
111
- tool_name = tool_info.get("name")
112
- tool_args = tool_info.get("args", {})
113
- tool_out = tool_info.get("output")
114
- tool_calls_info.append((tool_name, tool_args, tool_out))
151
+ (
152
+ tool_name,
153
+ tool_args,
154
+ tool_out,
155
+ tool_calls_info,
156
+ ) = self._extract_tool_calls_from_metadata(metadata)
115
157
 
116
158
  return tool_name, tool_args, tool_out, tool_calls_info
117
159
 
118
- def update_timing(self, context_id: str | None):
160
+ def update_timing(self, context_id: str | None) -> None:
119
161
  """Update timing information for the given context.
120
162
 
121
163
  Args:
@@ -148,8 +190,11 @@ class StreamProcessor:
148
190
  return elapsed >= think_threshold
149
191
 
150
192
  def track_tools_and_agents(
151
- self, tool_name: str | None, tool_calls_info: list, is_delegation_tool_func
152
- ):
193
+ self,
194
+ tool_name: str | None,
195
+ tool_calls_info: list[tuple[str, Any, Any]],
196
+ is_delegation_tool_func: Callable[[str], bool],
197
+ ) -> None:
153
198
  """Track tools and sub-agents mentioned in the current event.
154
199
 
155
200
  Args:
@@ -12,7 +12,7 @@ from glaip_sdk.utils.rendering.models import Step
12
12
 
13
13
 
14
14
  class StepManager:
15
- def __init__(self, max_steps: int = 200):
15
+ def __init__(self, max_steps: int = 200) -> None:
16
16
  self.by_id: dict[str, Step] = {}
17
17
  self.order: list[str] = []
18
18
  self.children: dict[str, list[str]] = {}
@@ -21,19 +21,46 @@ class StepManager:
21
21
  self.max_steps = max_steps
22
22
  self._last_running: dict[tuple, str] = {}
23
23
 
24
- def _alloc_slot(self, task_id, context_id, kind, name) -> int:
24
+ def _alloc_slot(
25
+ self,
26
+ task_id: str | None,
27
+ context_id: str | None,
28
+ kind: str,
29
+ name: str,
30
+ ) -> int:
25
31
  k = (task_id, context_id, kind, name)
26
32
  self.slot_counter[k] = self.slot_counter.get(k, 0) + 1
27
33
  return self.slot_counter[k]
28
34
 
29
- def _key(self, task_id, context_id, kind, name, slot) -> tuple:
35
+ def _key(
36
+ self,
37
+ task_id: str | None,
38
+ context_id: str | None,
39
+ kind: str,
40
+ name: str,
41
+ slot: int,
42
+ ) -> tuple[str | None, str | None, str, str, int]:
30
43
  return (task_id, context_id, kind, name, slot)
31
44
 
32
- def _make_id(self, task_id, context_id, kind, name, slot) -> str:
45
+ def _make_id(
46
+ self,
47
+ task_id: str | None,
48
+ context_id: str | None,
49
+ kind: str,
50
+ name: str,
51
+ slot: int,
52
+ ) -> str:
33
53
  return f"{task_id or 't'}::{context_id or 'c'}::{kind}::{name}::{slot}"
34
54
 
35
55
  def start_or_get(
36
- self, *, task_id, context_id, kind, name, parent_id=None, args=None
56
+ self,
57
+ *,
58
+ task_id: str | None,
59
+ context_id: str | None,
60
+ kind: str,
61
+ name: str,
62
+ parent_id: str | None = None,
63
+ args: dict[str, object] | None = None,
37
64
  ) -> Step:
38
65
  existing = self.find_running(
39
66
  task_id=task_id, context_id=context_id, kind=kind, name=name
@@ -64,44 +91,74 @@ class StepManager:
64
91
  self._last_running[(task_id, context_id, kind, name)] = step_id
65
92
  return st
66
93
 
67
- def _prune_steps(self):
68
- total = len(self.order) + sum(len(v) for v in self.children.values())
69
- if total <= self.max_steps:
94
+ def _calculate_total_steps(self) -> int:
95
+ """Calculate total number of steps."""
96
+ return len(self.order) + sum(len(v) for v in self.children.values())
97
+
98
+ def _get_subtree_size(self, root_id: str) -> int:
99
+ """Get the size of a subtree (including root)."""
100
+ subtree = [root_id]
101
+ stack = list(self.children.get(root_id, []))
102
+ while stack:
103
+ x = stack.pop()
104
+ subtree.append(x)
105
+ stack.extend(self.children.get(x, []))
106
+ return len(subtree)
107
+
108
+ def _remove_subtree(self, root_id: str) -> None:
109
+ """Remove a complete subtree from all data structures."""
110
+ stack = [root_id]
111
+ to_remove = []
112
+ while stack:
113
+ sid = stack.pop()
114
+ to_remove.append(sid)
115
+ stack.extend(self.children.pop(sid, []))
116
+
117
+ for sid in to_remove:
118
+ st = self.by_id.pop(sid, None)
119
+ if st:
120
+ key = (st.task_id, st.context_id, st.kind, st.name)
121
+ self._last_running.pop(key, None)
122
+ for _parent, kids in list(self.children.items()):
123
+ if sid in kids:
124
+ kids.remove(sid)
125
+ if sid in self.order:
126
+ self.order.remove(sid)
127
+
128
+ def _should_prune_steps(self, total: int) -> bool:
129
+ """Check if steps should be pruned."""
130
+ return total > self.max_steps
131
+
132
+ def _get_oldest_step_id(self) -> str | None:
133
+ """Get the oldest step ID for pruning."""
134
+ return self.order[0] if self.order else None
135
+
136
+ def _prune_steps(self) -> None:
137
+ """Prune steps when total exceeds maximum."""
138
+ total = self._calculate_total_steps()
139
+ if not self._should_prune_steps(total):
70
140
  return
71
141
 
72
- def remove_subtree(root_id: str):
73
- stack = [root_id]
74
- to_remove = []
75
- while stack:
76
- sid = stack.pop()
77
- to_remove.append(sid)
78
- stack.extend(self.children.pop(sid, []))
79
- for sid in to_remove:
80
- st = self.by_id.pop(sid, None)
81
- if st:
82
- key = (st.task_id, st.context_id, st.kind, st.name)
83
- self._last_running.pop(key, None)
84
- for _parent, kids in list(self.children.items()):
85
- if sid in kids:
86
- kids.remove(sid)
87
- if sid in self.order:
88
- self.order.remove(sid)
89
-
90
- while total > self.max_steps and self.order:
91
- sid = self.order[0]
92
- subtree = [sid]
93
- stack = list(self.children.get(sid, []))
94
- while stack:
95
- x = stack.pop()
96
- subtree.append(x)
97
- stack.extend(self.children.get(x, []))
98
- total -= len(subtree)
99
- remove_subtree(sid)
142
+ while self._should_prune_steps(total) and self.order:
143
+ sid = self._get_oldest_step_id()
144
+ if not sid:
145
+ break
146
+
147
+ subtree_size = self._get_subtree_size(sid)
148
+ self._remove_subtree(sid)
149
+ total -= subtree_size
100
150
 
101
151
  def get_child_count(self, step_id: str) -> int:
102
152
  return len(self.children.get(step_id, []))
103
153
 
104
- def find_running(self, *, task_id, context_id, kind, name) -> Step | None:
154
+ def find_running(
155
+ self,
156
+ *,
157
+ task_id: str | None,
158
+ context_id: str | None,
159
+ kind: str,
160
+ name: str,
161
+ ) -> Step | None:
105
162
  key = (task_id, context_id, kind, name)
106
163
  step_id = self._last_running.get(key)
107
164
  if step_id:
@@ -125,8 +182,15 @@ class StepManager:
125
182
  return None
126
183
 
127
184
  def finish(
128
- self, *, task_id, context_id, kind, name, output=None, duration_raw=None
129
- ):
185
+ self,
186
+ *,
187
+ task_id: str | None,
188
+ context_id: str | None,
189
+ kind: str,
190
+ name: str,
191
+ output: object | None = None,
192
+ duration_raw: float | None = None,
193
+ ) -> Step:
130
194
  st = self.find_running(
131
195
  task_id=task_id, context_id=context_id, kind=kind, name=name
132
196
  )
@@ -5,7 +5,7 @@ Authors:
5
5
  """
6
6
 
7
7
 
8
- def _check_rich_available():
8
+ def _check_rich_available() -> bool:
9
9
  """Check if Rich is available by attempting imports."""
10
10
  try:
11
11
  import importlib.util
@@ -35,7 +35,7 @@ __all__ = [
35
35
  "CapturingConsole",
36
36
  "RendererConfig",
37
37
  "RichStreamRenderer",
38
- "render_debug_event",
39
38
  "RunStats",
40
39
  "StepManager",
40
+ "render_debug_event",
41
41
  ]
@@ -92,7 +92,9 @@ def write_yaml(file_path: Path, data: dict[str, Any]) -> None:
92
92
  class LiteralString(str):
93
93
  pass
94
94
 
95
- def literal_string_representer(dumper, data):
95
+ def literal_string_representer(
96
+ dumper: yaml.Dumper, data: "LiteralString"
97
+ ) -> yaml.nodes.Node:
96
98
  # Use literal block scalar (|) for multiline strings to preserve formatting
97
99
  if "\n" in data:
98
100
  return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
@@ -55,8 +55,8 @@ def validate_agent_instruction(instruction: str) -> str:
55
55
 
56
56
  cleaned_instruction = instruction.strip()
57
57
 
58
- if len(cleaned_instruction) > 10000:
59
- raise ValueError("Agent instruction cannot be longer than 10,000 characters")
58
+ if len(cleaned_instruction) > 100000:
59
+ raise ValueError("Agent instruction cannot be longer than 100,000 characters")
60
60
 
61
61
  return cleaned_instruction
62
62