deepagents-cli 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of deepagents-cli might be problematic. Click here for more details.
- deepagents_cli/__init__.py +5 -0
- deepagents_cli/__main__.py +6 -0
- deepagents_cli/agent.py +278 -0
- deepagents_cli/cli.py +13 -0
- deepagents_cli/commands.py +89 -0
- deepagents_cli/config.py +138 -0
- deepagents_cli/execution.py +644 -0
- deepagents_cli/file_ops.py +347 -0
- deepagents_cli/input.py +249 -0
- deepagents_cli/main.py +226 -0
- deepagents_cli/py.typed +0 -0
- deepagents_cli/token_utils.py +63 -0
- deepagents_cli/tools.py +140 -0
- deepagents_cli/ui.py +489 -0
- deepagents_cli-0.0.5.dist-info/METADATA +18 -0
- deepagents_cli-0.0.5.dist-info/RECORD +19 -0
- deepagents_cli-0.0.5.dist-info/entry_points.txt +3 -0
- deepagents_cli-0.0.5.dist-info/top_level.txt +1 -0
- deepagents/__init__.py +0 -7
- deepagents/cli.py +0 -567
- deepagents/default_agent_prompt.md +0 -64
- deepagents/graph.py +0 -144
- deepagents/memory/__init__.py +0 -17
- deepagents/memory/backends/__init__.py +0 -15
- deepagents/memory/backends/composite.py +0 -250
- deepagents/memory/backends/filesystem.py +0 -330
- deepagents/memory/backends/state.py +0 -206
- deepagents/memory/backends/store.py +0 -351
- deepagents/memory/backends/utils.py +0 -319
- deepagents/memory/protocol.py +0 -164
- deepagents/middleware/__init__.py +0 -13
- deepagents/middleware/agent_memory.py +0 -207
- deepagents/middleware/filesystem.py +0 -615
- deepagents/middleware/patch_tool_calls.py +0 -44
- deepagents/middleware/subagents.py +0 -481
- deepagents/pretty_cli.py +0 -289
- deepagents_cli-0.0.3.dist-info/METADATA +0 -551
- deepagents_cli-0.0.3.dist-info/RECORD +0 -24
- deepagents_cli-0.0.3.dist-info/entry_points.txt +0 -2
- deepagents_cli-0.0.3.dist-info/licenses/LICENSE +0 -21
- deepagents_cli-0.0.3.dist-info/top_level.txt +0 -1
- {deepagents_cli-0.0.3.dist-info → deepagents_cli-0.0.5.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,644 @@
|
|
|
1
|
+
"""Task execution and streaming logic for the CLI."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import sys
|
|
5
|
+
import termios
|
|
6
|
+
import threading
|
|
7
|
+
import tty
|
|
8
|
+
|
|
9
|
+
from langchain_core.messages import HumanMessage, ToolMessage
|
|
10
|
+
from langgraph.types import Command
|
|
11
|
+
from rich import box
|
|
12
|
+
from rich.markdown import Markdown
|
|
13
|
+
from rich.panel import Panel
|
|
14
|
+
|
|
15
|
+
from .config import COLORS, console
|
|
16
|
+
from .file_ops import FileOpTracker, build_approval_preview
|
|
17
|
+
from .input import parse_file_mentions
|
|
18
|
+
from .ui import (
|
|
19
|
+
TokenTracker,
|
|
20
|
+
format_tool_display,
|
|
21
|
+
format_tool_message_content,
|
|
22
|
+
render_diff_block,
|
|
23
|
+
render_file_operation,
|
|
24
|
+
render_summary_panel,
|
|
25
|
+
render_todo_list,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_summary_message(content: str) -> bool:
|
|
30
|
+
"""Detect if a message is from SummarizationMiddleware."""
|
|
31
|
+
if not isinstance(content, str):
|
|
32
|
+
return False
|
|
33
|
+
content_lower = content.lower()
|
|
34
|
+
# Common patterns from SummarizationMiddleware
|
|
35
|
+
return (
|
|
36
|
+
"conversation summary" in content_lower
|
|
37
|
+
or "previous conversation" in content_lower
|
|
38
|
+
or content.startswith("Summary:")
|
|
39
|
+
or content.startswith("Conversation summary:")
|
|
40
|
+
or "summarized the conversation" in content_lower
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _extract_tool_args(action_request: dict) -> dict | None:
|
|
45
|
+
"""Best-effort extraction of tool call arguments from an action request."""
|
|
46
|
+
if "tool_call" in action_request and isinstance(action_request["tool_call"], dict):
|
|
47
|
+
args = action_request["tool_call"].get("args")
|
|
48
|
+
if isinstance(args, dict):
|
|
49
|
+
return args
|
|
50
|
+
args = action_request.get("args")
|
|
51
|
+
if isinstance(args, dict):
|
|
52
|
+
return args
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def prompt_for_tool_approval(action_request: dict, assistant_id: str | None) -> dict:
|
|
57
|
+
"""Prompt user to approve/reject a tool action with arrow key navigation."""
|
|
58
|
+
description = action_request.get("description", "No description available")
|
|
59
|
+
tool_name = action_request.get("name") or action_request.get("tool")
|
|
60
|
+
tool_args = _extract_tool_args(action_request)
|
|
61
|
+
preview = build_approval_preview(tool_name, tool_args, assistant_id) if tool_name else None
|
|
62
|
+
|
|
63
|
+
body_lines = []
|
|
64
|
+
if preview:
|
|
65
|
+
body_lines.append(f"[bold]{preview.title}[/bold]")
|
|
66
|
+
body_lines.extend(preview.details)
|
|
67
|
+
if preview.error:
|
|
68
|
+
body_lines.append(f"[red]{preview.error}[/red]")
|
|
69
|
+
if description and description != "No description available":
|
|
70
|
+
body_lines.append("")
|
|
71
|
+
body_lines.append(description)
|
|
72
|
+
else:
|
|
73
|
+
body_lines.append(description)
|
|
74
|
+
|
|
75
|
+
# Display action info first
|
|
76
|
+
console.print()
|
|
77
|
+
console.print(
|
|
78
|
+
Panel(
|
|
79
|
+
"[bold yellow]⚠️ Tool Action Requires Approval[/bold yellow]\n\n"
|
|
80
|
+
+ "\n".join(body_lines),
|
|
81
|
+
border_style="yellow",
|
|
82
|
+
box=box.ROUNDED,
|
|
83
|
+
padding=(0, 1),
|
|
84
|
+
)
|
|
85
|
+
)
|
|
86
|
+
if preview and preview.diff and not preview.error:
|
|
87
|
+
console.print()
|
|
88
|
+
render_diff_block(preview.diff, preview.diff_title or preview.title)
|
|
89
|
+
console.print()
|
|
90
|
+
|
|
91
|
+
options = ["approve", "reject"]
|
|
92
|
+
selected = 0 # Start with approve selected
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
fd = sys.stdin.fileno()
|
|
96
|
+
old_settings = termios.tcgetattr(fd)
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
tty.setraw(fd)
|
|
100
|
+
|
|
101
|
+
# Initial render flag
|
|
102
|
+
first_render = True
|
|
103
|
+
|
|
104
|
+
while True:
|
|
105
|
+
if not first_render:
|
|
106
|
+
# Move cursor back to start of menu (up 2 lines, then to start of line)
|
|
107
|
+
sys.stdout.write("\033[2A\r")
|
|
108
|
+
|
|
109
|
+
first_render = False
|
|
110
|
+
|
|
111
|
+
# Display options vertically with ANSI color codes
|
|
112
|
+
for i, option in enumerate(options):
|
|
113
|
+
sys.stdout.write("\r\033[K") # Clear line from cursor to end
|
|
114
|
+
|
|
115
|
+
if i == selected:
|
|
116
|
+
if option == "approve":
|
|
117
|
+
# Green bold with filled checkbox
|
|
118
|
+
sys.stdout.write("\033[1;32m☑ Approve\033[0m\n")
|
|
119
|
+
else:
|
|
120
|
+
# Red bold with filled checkbox
|
|
121
|
+
sys.stdout.write("\033[1;31m☑ Reject\033[0m\n")
|
|
122
|
+
elif option == "approve":
|
|
123
|
+
# Dim with empty checkbox
|
|
124
|
+
sys.stdout.write("\033[2m☐ Approve\033[0m\n")
|
|
125
|
+
else:
|
|
126
|
+
# Dim with empty checkbox
|
|
127
|
+
sys.stdout.write("\033[2m☐ Reject\033[0m\n")
|
|
128
|
+
|
|
129
|
+
sys.stdout.flush()
|
|
130
|
+
|
|
131
|
+
# Read key
|
|
132
|
+
char = sys.stdin.read(1)
|
|
133
|
+
|
|
134
|
+
if char == "\x1b": # ESC sequence (arrow keys)
|
|
135
|
+
next1 = sys.stdin.read(1)
|
|
136
|
+
next2 = sys.stdin.read(1)
|
|
137
|
+
if next1 == "[":
|
|
138
|
+
if next2 == "B": # Down arrow
|
|
139
|
+
selected = (selected + 1) % len(options)
|
|
140
|
+
elif next2 == "A": # Up arrow
|
|
141
|
+
selected = (selected - 1) % len(options)
|
|
142
|
+
elif char == "\r" or char == "\n": # Enter
|
|
143
|
+
sys.stdout.write("\033[1B\n") # Move down past the menu
|
|
144
|
+
break
|
|
145
|
+
elif char == "\x03": # Ctrl+C
|
|
146
|
+
sys.stdout.write("\033[1B\n") # Move down past the menu
|
|
147
|
+
raise KeyboardInterrupt
|
|
148
|
+
elif char.lower() == "a":
|
|
149
|
+
selected = 0
|
|
150
|
+
sys.stdout.write("\033[1B\n") # Move down past the menu
|
|
151
|
+
break
|
|
152
|
+
elif char.lower() == "r":
|
|
153
|
+
selected = 1
|
|
154
|
+
sys.stdout.write("\033[1B\n") # Move down past the menu
|
|
155
|
+
break
|
|
156
|
+
|
|
157
|
+
finally:
|
|
158
|
+
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
|
|
159
|
+
|
|
160
|
+
except (termios.error, AttributeError):
|
|
161
|
+
# Fallback for non-Unix systems
|
|
162
|
+
console.print(" ☐ (A)pprove (default)")
|
|
163
|
+
console.print(" ☐ (R)eject")
|
|
164
|
+
choice = input("\nChoice (A/R, default=Approve): ").strip().lower()
|
|
165
|
+
if choice == "r" or choice == "reject":
|
|
166
|
+
selected = 1
|
|
167
|
+
else:
|
|
168
|
+
selected = 0
|
|
169
|
+
|
|
170
|
+
console.print()
|
|
171
|
+
|
|
172
|
+
# Return decision based on selection
|
|
173
|
+
if selected == 0:
|
|
174
|
+
return {"type": "approve"}
|
|
175
|
+
return {"type": "reject", "message": "User rejected the command"}
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def execute_task(
|
|
179
|
+
user_input: str,
|
|
180
|
+
agent,
|
|
181
|
+
assistant_id: str | None,
|
|
182
|
+
session_state,
|
|
183
|
+
token_tracker: TokenTracker | None = None,
|
|
184
|
+
):
|
|
185
|
+
"""Execute any task by passing it directly to the AI agent."""
|
|
186
|
+
console.print()
|
|
187
|
+
|
|
188
|
+
# Parse file mentions and inject content if any
|
|
189
|
+
prompt_text, mentioned_files = parse_file_mentions(user_input)
|
|
190
|
+
|
|
191
|
+
if mentioned_files:
|
|
192
|
+
context_parts = [prompt_text, "\n\n## Referenced Files\n"]
|
|
193
|
+
for file_path in mentioned_files:
|
|
194
|
+
try:
|
|
195
|
+
content = file_path.read_text()
|
|
196
|
+
# Limit file content to reasonable size
|
|
197
|
+
if len(content) > 50000:
|
|
198
|
+
content = content[:50000] + "\n... (file truncated)"
|
|
199
|
+
context_parts.append(
|
|
200
|
+
f"\n### {file_path.name}\nPath: `{file_path}`\n```\n{content}\n```"
|
|
201
|
+
)
|
|
202
|
+
except Exception as e:
|
|
203
|
+
context_parts.append(f"\n### {file_path.name}\n[Error reading file: {e}]")
|
|
204
|
+
|
|
205
|
+
final_input = "\n".join(context_parts)
|
|
206
|
+
else:
|
|
207
|
+
final_input = prompt_text
|
|
208
|
+
|
|
209
|
+
config = {
|
|
210
|
+
"configurable": {"thread_id": "main"},
|
|
211
|
+
"metadata": {"assistant_id": assistant_id} if assistant_id else {},
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
has_responded = False
|
|
215
|
+
captured_input_tokens = 0
|
|
216
|
+
captured_output_tokens = 0
|
|
217
|
+
current_todos = None # Track current todo list state
|
|
218
|
+
|
|
219
|
+
status = console.status(f"[bold {COLORS['thinking']}]Agent is thinking...", spinner="dots")
|
|
220
|
+
status.start()
|
|
221
|
+
spinner_active = True
|
|
222
|
+
|
|
223
|
+
tool_icons = {
|
|
224
|
+
"read_file": "📖",
|
|
225
|
+
"write_file": "✏️",
|
|
226
|
+
"edit_file": "✂️",
|
|
227
|
+
"ls": "📁",
|
|
228
|
+
"glob": "🔍",
|
|
229
|
+
"grep": "🔎",
|
|
230
|
+
"shell": "⚡",
|
|
231
|
+
"web_search": "🌐",
|
|
232
|
+
"http_request": "🌍",
|
|
233
|
+
"task": "🤖",
|
|
234
|
+
"write_todos": "📋",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
file_op_tracker = FileOpTracker(assistant_id=assistant_id)
|
|
238
|
+
|
|
239
|
+
# Track which tool calls we've displayed to avoid duplicates
|
|
240
|
+
displayed_tool_ids = set()
|
|
241
|
+
# Buffer partial tool-call chunks keyed by streaming index
|
|
242
|
+
tool_call_buffers: dict[str | int, dict] = {}
|
|
243
|
+
# Buffer assistant text so we can render complete markdown segments
|
|
244
|
+
pending_text = ""
|
|
245
|
+
# Track if we're buffering a summary message
|
|
246
|
+
summary_mode = False
|
|
247
|
+
summary_buffer = ""
|
|
248
|
+
|
|
249
|
+
def flush_text_buffer(*, final: bool = False) -> None:
|
|
250
|
+
"""Flush accumulated assistant text as rendered markdown when appropriate."""
|
|
251
|
+
nonlocal pending_text, spinner_active, has_responded
|
|
252
|
+
if not final or not pending_text.strip():
|
|
253
|
+
return
|
|
254
|
+
if spinner_active:
|
|
255
|
+
status.stop()
|
|
256
|
+
spinner_active = False
|
|
257
|
+
if not has_responded:
|
|
258
|
+
console.print("●", style=COLORS["agent"], markup=False)
|
|
259
|
+
has_responded = True
|
|
260
|
+
markdown = Markdown(pending_text.rstrip())
|
|
261
|
+
console.print(markdown, style=COLORS["agent"])
|
|
262
|
+
pending_text = ""
|
|
263
|
+
|
|
264
|
+
def flush_summary_buffer() -> None:
|
|
265
|
+
"""Render any buffered summary panel output."""
|
|
266
|
+
nonlocal summary_mode, summary_buffer, spinner_active, has_responded
|
|
267
|
+
if not summary_mode or not summary_buffer.strip():
|
|
268
|
+
summary_mode = False
|
|
269
|
+
summary_buffer = ""
|
|
270
|
+
return
|
|
271
|
+
if spinner_active:
|
|
272
|
+
status.stop()
|
|
273
|
+
spinner_active = False
|
|
274
|
+
if not has_responded:
|
|
275
|
+
console.print("●", style=COLORS["agent"], markup=False)
|
|
276
|
+
has_responded = True
|
|
277
|
+
console.print()
|
|
278
|
+
render_summary_panel(summary_buffer.strip())
|
|
279
|
+
console.print()
|
|
280
|
+
summary_mode = False
|
|
281
|
+
summary_buffer = ""
|
|
282
|
+
|
|
283
|
+
# Stream input - may need to loop if there are interrupts
|
|
284
|
+
stream_input = {"messages": [{"role": "user", "content": final_input}]}
|
|
285
|
+
|
|
286
|
+
try:
|
|
287
|
+
while True:
|
|
288
|
+
interrupt_occurred = False
|
|
289
|
+
hitl_response = None
|
|
290
|
+
suppress_resumed_output = False
|
|
291
|
+
|
|
292
|
+
for chunk in agent.stream(
|
|
293
|
+
stream_input,
|
|
294
|
+
stream_mode=["messages", "updates"], # Dual-mode for HITL support
|
|
295
|
+
subgraphs=True,
|
|
296
|
+
config=config,
|
|
297
|
+
durability="exit",
|
|
298
|
+
):
|
|
299
|
+
# Unpack chunk - with subgraphs=True and dual-mode, it's (namespace, stream_mode, data)
|
|
300
|
+
if not isinstance(chunk, tuple) or len(chunk) != 3:
|
|
301
|
+
continue
|
|
302
|
+
|
|
303
|
+
namespace, current_stream_mode, data = chunk
|
|
304
|
+
|
|
305
|
+
# Handle UPDATES stream - for interrupts and todos
|
|
306
|
+
if current_stream_mode == "updates":
|
|
307
|
+
if not isinstance(data, dict):
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
# Check for interrupts
|
|
311
|
+
if "__interrupt__" in data:
|
|
312
|
+
interrupt_data = data["__interrupt__"]
|
|
313
|
+
if interrupt_data:
|
|
314
|
+
interrupt_obj = (
|
|
315
|
+
interrupt_data[0]
|
|
316
|
+
if isinstance(interrupt_data, tuple)
|
|
317
|
+
else interrupt_data
|
|
318
|
+
)
|
|
319
|
+
hitl_request = (
|
|
320
|
+
interrupt_obj.value
|
|
321
|
+
if hasattr(interrupt_obj, "value")
|
|
322
|
+
else interrupt_obj
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
# Check if auto-approve is enabled
|
|
326
|
+
if session_state.auto_approve:
|
|
327
|
+
# Auto-approve all commands without prompting
|
|
328
|
+
decisions = []
|
|
329
|
+
for action_request in hitl_request.get("action_requests", []):
|
|
330
|
+
# Show what's being auto-approved (brief, dim message)
|
|
331
|
+
if spinner_active:
|
|
332
|
+
status.stop()
|
|
333
|
+
spinner_active = False
|
|
334
|
+
|
|
335
|
+
description = action_request.get("description", "tool action")
|
|
336
|
+
console.print()
|
|
337
|
+
console.print(f" [dim]⚡ {description}[/dim]")
|
|
338
|
+
|
|
339
|
+
decisions.append({"type": "approve"})
|
|
340
|
+
|
|
341
|
+
hitl_response = {"decisions": decisions}
|
|
342
|
+
interrupt_occurred = True
|
|
343
|
+
|
|
344
|
+
# Restart spinner for continuation
|
|
345
|
+
if not spinner_active:
|
|
346
|
+
status.start()
|
|
347
|
+
spinner_active = True
|
|
348
|
+
|
|
349
|
+
break
|
|
350
|
+
# Normal HITL flow - stop spinner and prompt user
|
|
351
|
+
if spinner_active:
|
|
352
|
+
status.stop()
|
|
353
|
+
spinner_active = False
|
|
354
|
+
|
|
355
|
+
# Handle human-in-the-loop approval
|
|
356
|
+
decisions = []
|
|
357
|
+
for action_request in hitl_request.get("action_requests", []):
|
|
358
|
+
decision = prompt_for_tool_approval(action_request, assistant_id)
|
|
359
|
+
decisions.append(decision)
|
|
360
|
+
|
|
361
|
+
suppress_resumed_output = any(
|
|
362
|
+
decision.get("type") == "reject" for decision in decisions
|
|
363
|
+
)
|
|
364
|
+
hitl_response = {"decisions": decisions}
|
|
365
|
+
interrupt_occurred = True
|
|
366
|
+
break
|
|
367
|
+
|
|
368
|
+
# Extract chunk_data from updates for todo checking
|
|
369
|
+
chunk_data = list(data.values())[0] if data else None
|
|
370
|
+
if chunk_data and isinstance(chunk_data, dict):
|
|
371
|
+
# Check for todo updates
|
|
372
|
+
if "todos" in chunk_data:
|
|
373
|
+
new_todos = chunk_data["todos"]
|
|
374
|
+
if new_todos != current_todos:
|
|
375
|
+
current_todos = new_todos
|
|
376
|
+
# Stop spinner before rendering todos
|
|
377
|
+
if spinner_active:
|
|
378
|
+
status.stop()
|
|
379
|
+
spinner_active = False
|
|
380
|
+
console.print()
|
|
381
|
+
render_todo_list(new_todos)
|
|
382
|
+
console.print()
|
|
383
|
+
|
|
384
|
+
# Handle MESSAGES stream - for content and tool calls
|
|
385
|
+
elif current_stream_mode == "messages":
|
|
386
|
+
# Messages stream returns (message, metadata) tuples
|
|
387
|
+
if not isinstance(data, tuple) or len(data) != 2:
|
|
388
|
+
continue
|
|
389
|
+
|
|
390
|
+
message, metadata = data
|
|
391
|
+
|
|
392
|
+
if isinstance(message, ToolMessage):
|
|
393
|
+
# Tool results are sent to the agent, not displayed to users
|
|
394
|
+
# Exception: show shell command errors to help with debugging
|
|
395
|
+
tool_name = getattr(message, "name", "")
|
|
396
|
+
tool_status = getattr(message, "status", "success")
|
|
397
|
+
tool_content = format_tool_message_content(message.content)
|
|
398
|
+
record = file_op_tracker.complete_with_message(message)
|
|
399
|
+
|
|
400
|
+
if tool_name == "shell" and tool_status != "success":
|
|
401
|
+
flush_summary_buffer()
|
|
402
|
+
flush_text_buffer(final=True)
|
|
403
|
+
if tool_content:
|
|
404
|
+
if spinner_active:
|
|
405
|
+
status.stop()
|
|
406
|
+
spinner_active = False
|
|
407
|
+
console.print()
|
|
408
|
+
console.print(tool_content, style="red", markup=False)
|
|
409
|
+
console.print()
|
|
410
|
+
elif tool_content and isinstance(tool_content, str):
|
|
411
|
+
stripped = tool_content.lstrip()
|
|
412
|
+
if stripped.lower().startswith("error"):
|
|
413
|
+
flush_summary_buffer()
|
|
414
|
+
flush_text_buffer(final=True)
|
|
415
|
+
if spinner_active:
|
|
416
|
+
status.stop()
|
|
417
|
+
spinner_active = False
|
|
418
|
+
console.print()
|
|
419
|
+
console.print(tool_content, style="red", markup=False)
|
|
420
|
+
console.print()
|
|
421
|
+
|
|
422
|
+
if record:
|
|
423
|
+
flush_summary_buffer()
|
|
424
|
+
flush_text_buffer(final=True)
|
|
425
|
+
if spinner_active:
|
|
426
|
+
status.stop()
|
|
427
|
+
spinner_active = False
|
|
428
|
+
console.print()
|
|
429
|
+
render_file_operation(record)
|
|
430
|
+
console.print()
|
|
431
|
+
if not spinner_active:
|
|
432
|
+
status.start()
|
|
433
|
+
spinner_active = True
|
|
434
|
+
|
|
435
|
+
# For all other tools (web_search, http_request, etc.),
|
|
436
|
+
# results are hidden from user - agent will process and respond
|
|
437
|
+
continue
|
|
438
|
+
|
|
439
|
+
# Check if this is an AIMessageChunk
|
|
440
|
+
if not hasattr(message, "content_blocks"):
|
|
441
|
+
# Fallback for messages without content_blocks
|
|
442
|
+
continue
|
|
443
|
+
|
|
444
|
+
# Extract token usage if available
|
|
445
|
+
if token_tracker and hasattr(message, "usage_metadata"):
|
|
446
|
+
usage = message.usage_metadata
|
|
447
|
+
if usage:
|
|
448
|
+
input_toks = usage.get("input_tokens", 0)
|
|
449
|
+
output_toks = usage.get("output_tokens", 0)
|
|
450
|
+
if input_toks or output_toks:
|
|
451
|
+
captured_input_tokens = max(captured_input_tokens, input_toks)
|
|
452
|
+
captured_output_tokens = max(captured_output_tokens, output_toks)
|
|
453
|
+
|
|
454
|
+
# Process content blocks (this is the key fix!)
|
|
455
|
+
for block in message.content_blocks:
|
|
456
|
+
block_type = block.get("type")
|
|
457
|
+
|
|
458
|
+
# Handle text blocks
|
|
459
|
+
if block_type == "text":
|
|
460
|
+
text = block.get("text", "")
|
|
461
|
+
if text:
|
|
462
|
+
if summary_mode:
|
|
463
|
+
summary_buffer += text
|
|
464
|
+
continue
|
|
465
|
+
|
|
466
|
+
if is_summary_message(text) or is_summary_message(
|
|
467
|
+
pending_text + text
|
|
468
|
+
):
|
|
469
|
+
if pending_text:
|
|
470
|
+
summary_buffer += pending_text
|
|
471
|
+
pending_text = ""
|
|
472
|
+
summary_mode = True
|
|
473
|
+
summary_buffer += text
|
|
474
|
+
continue
|
|
475
|
+
|
|
476
|
+
pending_text += text
|
|
477
|
+
|
|
478
|
+
# Handle reasoning blocks
|
|
479
|
+
elif block_type == "reasoning":
|
|
480
|
+
flush_summary_buffer()
|
|
481
|
+
flush_text_buffer(final=True)
|
|
482
|
+
reasoning = block.get("reasoning", "")
|
|
483
|
+
if reasoning:
|
|
484
|
+
if spinner_active:
|
|
485
|
+
status.stop()
|
|
486
|
+
spinner_active = False
|
|
487
|
+
# Could display reasoning differently if desired
|
|
488
|
+
# For now, skip it or handle minimally
|
|
489
|
+
|
|
490
|
+
# Handle tool call chunks
|
|
491
|
+
elif block_type == "tool_call_chunk":
|
|
492
|
+
chunk_name = block.get("name")
|
|
493
|
+
chunk_args = block.get("args")
|
|
494
|
+
chunk_id = block.get("id")
|
|
495
|
+
chunk_index = block.get("index")
|
|
496
|
+
|
|
497
|
+
# Use index as stable buffer key; fall back to id if needed
|
|
498
|
+
buffer_key: str | int
|
|
499
|
+
if chunk_index is not None:
|
|
500
|
+
buffer_key = chunk_index
|
|
501
|
+
elif chunk_id is not None:
|
|
502
|
+
buffer_key = chunk_id
|
|
503
|
+
else:
|
|
504
|
+
buffer_key = f"unknown-{len(tool_call_buffers)}"
|
|
505
|
+
|
|
506
|
+
buffer = tool_call_buffers.setdefault(
|
|
507
|
+
buffer_key,
|
|
508
|
+
{"name": None, "id": None, "args": None, "args_parts": []},
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
if chunk_name:
|
|
512
|
+
buffer["name"] = chunk_name
|
|
513
|
+
if chunk_id:
|
|
514
|
+
buffer["id"] = chunk_id
|
|
515
|
+
|
|
516
|
+
if isinstance(chunk_args, dict):
|
|
517
|
+
buffer["args"] = chunk_args
|
|
518
|
+
buffer["args_parts"] = []
|
|
519
|
+
elif isinstance(chunk_args, str):
|
|
520
|
+
if chunk_args:
|
|
521
|
+
parts: list[str] = buffer.setdefault("args_parts", [])
|
|
522
|
+
if not parts or chunk_args != parts[-1]:
|
|
523
|
+
parts.append(chunk_args)
|
|
524
|
+
buffer["args"] = "".join(parts)
|
|
525
|
+
elif chunk_args is not None:
|
|
526
|
+
buffer["args"] = chunk_args
|
|
527
|
+
|
|
528
|
+
buffer_name = buffer.get("name")
|
|
529
|
+
buffer_id = buffer.get("id")
|
|
530
|
+
if buffer_name is None:
|
|
531
|
+
continue
|
|
532
|
+
if buffer_id is not None and buffer_id in displayed_tool_ids:
|
|
533
|
+
continue
|
|
534
|
+
|
|
535
|
+
parsed_args = buffer.get("args")
|
|
536
|
+
if isinstance(parsed_args, str):
|
|
537
|
+
if not parsed_args:
|
|
538
|
+
continue
|
|
539
|
+
try:
|
|
540
|
+
parsed_args = json.loads(parsed_args)
|
|
541
|
+
except json.JSONDecodeError:
|
|
542
|
+
# Wait for more chunks to form valid JSON
|
|
543
|
+
continue
|
|
544
|
+
elif parsed_args is None:
|
|
545
|
+
continue
|
|
546
|
+
|
|
547
|
+
# Ensure args are in dict form for formatter
|
|
548
|
+
if not isinstance(parsed_args, dict):
|
|
549
|
+
parsed_args = {"value": parsed_args}
|
|
550
|
+
|
|
551
|
+
flush_summary_buffer()
|
|
552
|
+
flush_text_buffer(final=True)
|
|
553
|
+
if buffer_id is not None:
|
|
554
|
+
displayed_tool_ids.add(buffer_id)
|
|
555
|
+
file_op_tracker.start_operation(buffer_name, parsed_args, buffer_id)
|
|
556
|
+
tool_call_buffers.pop(buffer_key, None)
|
|
557
|
+
icon = tool_icons.get(buffer_name, "🔧")
|
|
558
|
+
|
|
559
|
+
if spinner_active:
|
|
560
|
+
status.stop()
|
|
561
|
+
|
|
562
|
+
if has_responded:
|
|
563
|
+
console.print()
|
|
564
|
+
|
|
565
|
+
display_str = format_tool_display(buffer_name, parsed_args)
|
|
566
|
+
console.print(
|
|
567
|
+
f" {icon} {display_str}",
|
|
568
|
+
style=f"dim {COLORS['tool']}",
|
|
569
|
+
markup=False,
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
if not spinner_active:
|
|
573
|
+
status.start()
|
|
574
|
+
spinner_active = True
|
|
575
|
+
|
|
576
|
+
if getattr(message, "chunk_position", None) == "last":
|
|
577
|
+
flush_summary_buffer()
|
|
578
|
+
flush_text_buffer(final=True)
|
|
579
|
+
|
|
580
|
+
# After streaming loop - handle interrupt if it occurred
|
|
581
|
+
flush_summary_buffer()
|
|
582
|
+
flush_text_buffer(final=True)
|
|
583
|
+
if interrupt_occurred and hitl_response:
|
|
584
|
+
if suppress_resumed_output:
|
|
585
|
+
if spinner_active:
|
|
586
|
+
status.stop()
|
|
587
|
+
spinner_active = False
|
|
588
|
+
|
|
589
|
+
console.print("\nCommand rejected. Returning to prompt.\n", style=COLORS["dim"])
|
|
590
|
+
|
|
591
|
+
# Resume agent in background thread to properly update graph state
|
|
592
|
+
# without blocking the user
|
|
593
|
+
def resume_after_rejection():
|
|
594
|
+
try:
|
|
595
|
+
agent.invoke(Command(resume=hitl_response), config=config)
|
|
596
|
+
except Exception:
|
|
597
|
+
pass # Silently ignore errors
|
|
598
|
+
|
|
599
|
+
threading.Thread(target=resume_after_rejection, daemon=True).start()
|
|
600
|
+
return
|
|
601
|
+
|
|
602
|
+
# Resume the agent with the human decision
|
|
603
|
+
stream_input = Command(resume=hitl_response)
|
|
604
|
+
# Continue the while loop to restream
|
|
605
|
+
else:
|
|
606
|
+
# No interrupt, break out of while loop
|
|
607
|
+
break
|
|
608
|
+
|
|
609
|
+
except KeyboardInterrupt:
|
|
610
|
+
# User pressed Ctrl+C - clean up and exit gracefully
|
|
611
|
+
if spinner_active:
|
|
612
|
+
status.stop()
|
|
613
|
+
console.print("\n[yellow]Interrupted by user[/yellow]\n")
|
|
614
|
+
|
|
615
|
+
# Inform the agent in background thread (non-blocking)
|
|
616
|
+
def notify_agent():
|
|
617
|
+
try:
|
|
618
|
+
agent.update_state(
|
|
619
|
+
config=config,
|
|
620
|
+
values={
|
|
621
|
+
"messages": [
|
|
622
|
+
HumanMessage(
|
|
623
|
+
content="[User interrupted the previous request with Ctrl+C]"
|
|
624
|
+
)
|
|
625
|
+
]
|
|
626
|
+
},
|
|
627
|
+
)
|
|
628
|
+
except Exception:
|
|
629
|
+
pass
|
|
630
|
+
|
|
631
|
+
threading.Thread(target=notify_agent, daemon=True).start()
|
|
632
|
+
return
|
|
633
|
+
|
|
634
|
+
if spinner_active:
|
|
635
|
+
status.stop()
|
|
636
|
+
|
|
637
|
+
if has_responded:
|
|
638
|
+
console.print()
|
|
639
|
+
|
|
640
|
+
# Track token usage (display only via /tokens command)
|
|
641
|
+
if token_tracker and (captured_input_tokens or captured_output_tokens):
|
|
642
|
+
token_tracker.add(captured_input_tokens, captured_output_tokens)
|
|
643
|
+
|
|
644
|
+
console.print()
|