agentcrew-ai 0.8.4__py3-none-any.whl → 0.8.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- AgentCrew/__init__.py +1 -1
- AgentCrew/modules/a2a/task_manager.py +154 -30
- AgentCrew/modules/agents/local_agent.py +9 -9
- AgentCrew/modules/browser_automation/element_extractor.py +1 -1
- AgentCrew/modules/browser_automation/service.py +17 -7
- AgentCrew/modules/chat/message/command_processor.py +4 -2
- AgentCrew/modules/chat/message/conversation.py +1 -0
- AgentCrew/modules/chat/message/handler.py +3 -6
- AgentCrew/modules/command_execution/constants.py +2 -2
- AgentCrew/modules/command_execution/service.py +37 -83
- AgentCrew/modules/command_execution/tool.py +5 -7
- AgentCrew/modules/command_execution/types.py +3 -4
- AgentCrew/modules/console/command_handlers.py +2 -2
- AgentCrew/modules/console/confirmation_handler.py +83 -38
- AgentCrew/modules/console/console_ui.py +27 -23
- AgentCrew/modules/console/diff_display.py +203 -0
- AgentCrew/modules/console/display_handlers.py +3 -0
- AgentCrew/modules/console/input_handler.py +3 -4
- AgentCrew/modules/console/tool_display.py +35 -4
- AgentCrew/modules/console/ui_effects.py +30 -14
- AgentCrew/modules/custom_llm/deepinfra_service.py +20 -19
- AgentCrew/modules/custom_llm/github_copilot_service.py +157 -2
- AgentCrew/modules/custom_llm/service.py +1 -9
- AgentCrew/modules/llm/constants.py +24 -3
- {agentcrew_ai-0.8.4.dist-info → agentcrew_ai-0.8.6.dist-info}/METADATA +2 -2
- {agentcrew_ai-0.8.4.dist-info → agentcrew_ai-0.8.6.dist-info}/RECORD +30 -30
- AgentCrew/modules/command_execution/metric.py +0 -55
- {agentcrew_ai-0.8.4.dist-info → agentcrew_ai-0.8.6.dist-info}/WHEEL +0 -0
- {agentcrew_ai-0.8.4.dist-info → agentcrew_ai-0.8.6.dist-info}/entry_points.txt +0 -0
- {agentcrew_ai-0.8.4.dist-info → agentcrew_ai-0.8.6.dist-info}/licenses/LICENSE +0 -0
- {agentcrew_ai-0.8.4.dist-info → agentcrew_ai-0.8.6.dist-info}/top_level.txt +0 -0
|
@@ -2,7 +2,6 @@ import os
|
|
|
2
2
|
import sys
|
|
3
3
|
import time
|
|
4
4
|
import uuid
|
|
5
|
-
import queue
|
|
6
5
|
import threading
|
|
7
6
|
import subprocess
|
|
8
7
|
import re
|
|
@@ -10,12 +9,11 @@ import atexit
|
|
|
10
9
|
import hashlib
|
|
11
10
|
from typing import Dict, Any, Optional, Tuple, List
|
|
12
11
|
from datetime import datetime
|
|
13
|
-
from .metric import CommandMetrics
|
|
14
12
|
from .types import CommandState, CommandProcess
|
|
15
13
|
from .constants import (
|
|
16
14
|
MAX_CONCURRENT_COMMANDS,
|
|
17
15
|
MAX_COMMAND_LIFETIME,
|
|
18
|
-
|
|
16
|
+
MAX_OUTPUT_LINES,
|
|
19
17
|
MAX_COMMANDS_PER_MINUTE,
|
|
20
18
|
MAX_INPUT_SIZE,
|
|
21
19
|
BLOCKED_PATTERNS,
|
|
@@ -68,9 +66,6 @@ class CommandExecutionService:
|
|
|
68
66
|
# Rate limiting (application-wide)
|
|
69
67
|
self._rate_limiter: List[float] = []
|
|
70
68
|
|
|
71
|
-
# Metrics
|
|
72
|
-
self.metrics = CommandMetrics()
|
|
73
|
-
|
|
74
69
|
# Register cleanup on shutdown
|
|
75
70
|
atexit.register(self.shutdown)
|
|
76
71
|
|
|
@@ -242,40 +237,40 @@ class CommandExecutionService:
|
|
|
242
237
|
def _reader_thread(
|
|
243
238
|
self,
|
|
244
239
|
stream,
|
|
245
|
-
|
|
240
|
+
output_list: list,
|
|
241
|
+
output_lock: threading.Lock,
|
|
246
242
|
stop_event: threading.Event,
|
|
247
|
-
|
|
243
|
+
max_lines: int,
|
|
248
244
|
):
|
|
249
245
|
"""
|
|
250
|
-
Read stream line by line into
|
|
246
|
+
Read stream line by line into persistent list with rolling buffer.
|
|
251
247
|
|
|
252
|
-
|
|
253
|
-
- ('data', line): Normal output line
|
|
254
|
-
- ('eof', None): End of stream
|
|
255
|
-
- ('error', msg): Error occurred
|
|
256
|
-
- ('size_limit', None): Output size limit reached
|
|
257
|
-
"""
|
|
258
|
-
total_bytes = 0
|
|
248
|
+
When output exceeds max_lines, old lines are removed to keep recent output.
|
|
259
249
|
|
|
250
|
+
Args:
|
|
251
|
+
stream: Process stdout or stderr stream
|
|
252
|
+
output_list: Persistent list to append output lines
|
|
253
|
+
output_lock: Threading lock for thread-safe list access
|
|
254
|
+
stop_event: Event to signal thread stop
|
|
255
|
+
max_lines: Maximum number of lines to keep (rolling buffer)
|
|
256
|
+
"""
|
|
260
257
|
try:
|
|
261
258
|
for line in iter(stream.readline, b""):
|
|
262
259
|
if stop_event.is_set():
|
|
263
260
|
break
|
|
264
261
|
|
|
265
|
-
total_bytes += len(line)
|
|
266
|
-
if total_bytes > max_size:
|
|
267
|
-
output_queue.put(("size_limit", None))
|
|
268
|
-
logger.warning(f"Output size limit ({max_size} bytes) exceeded")
|
|
269
|
-
break
|
|
270
|
-
|
|
271
262
|
decoded = line.decode("utf-8", errors="replace")
|
|
272
|
-
|
|
263
|
+
|
|
264
|
+
with output_lock:
|
|
265
|
+
output_list.append(decoded)
|
|
266
|
+
|
|
267
|
+
# Keep only recent lines using slice
|
|
268
|
+
if len(output_list) > max_lines:
|
|
269
|
+
output_list[:] = output_list[-max_lines:]
|
|
273
270
|
|
|
274
271
|
except Exception as e:
|
|
275
272
|
logger.error(f"Reader thread error: {e}")
|
|
276
|
-
output_queue.put(("error", str(e)))
|
|
277
273
|
finally:
|
|
278
|
-
output_queue.put(("eof", None))
|
|
279
274
|
stream.close()
|
|
280
275
|
|
|
281
276
|
def execute_command(
|
|
@@ -371,9 +366,10 @@ class CommandExecutionService:
|
|
|
371
366
|
target=self._reader_thread,
|
|
372
367
|
args=(
|
|
373
368
|
process.stdout,
|
|
374
|
-
cmd_process.
|
|
369
|
+
cmd_process.stdout_lines,
|
|
370
|
+
cmd_process.output_lock,
|
|
375
371
|
cmd_process.stop_event,
|
|
376
|
-
|
|
372
|
+
MAX_OUTPUT_LINES,
|
|
377
373
|
),
|
|
378
374
|
daemon=True,
|
|
379
375
|
name=f"stdout-reader-{command_id}",
|
|
@@ -383,9 +379,10 @@ class CommandExecutionService:
|
|
|
383
379
|
target=self._reader_thread,
|
|
384
380
|
args=(
|
|
385
381
|
process.stderr,
|
|
386
|
-
cmd_process.
|
|
382
|
+
cmd_process.stderr_lines,
|
|
383
|
+
cmd_process.output_lock,
|
|
387
384
|
cmd_process.stop_event,
|
|
388
|
-
|
|
385
|
+
MAX_OUTPUT_LINES,
|
|
389
386
|
),
|
|
390
387
|
daemon=True,
|
|
391
388
|
name=f"stderr-reader-{command_id}",
|
|
@@ -405,21 +402,10 @@ class CommandExecutionService:
|
|
|
405
402
|
cmd_process.exit_code = process.returncode
|
|
406
403
|
cmd_process.transition_to(CommandState.COMPLETING)
|
|
407
404
|
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
msg_type, data = cmd_process.output_queue.get()
|
|
413
|
-
if msg_type == "data":
|
|
414
|
-
output_lines.append(data)
|
|
415
|
-
|
|
416
|
-
while not cmd_process.error_queue.empty():
|
|
417
|
-
msg_type, data = cmd_process.error_queue.get()
|
|
418
|
-
if msg_type == "data":
|
|
419
|
-
error_lines.append(data)
|
|
420
|
-
|
|
421
|
-
output = "".join(output_lines)
|
|
422
|
-
error_output = "".join(error_lines)
|
|
405
|
+
# Get output from persistent storage (thread-safe)
|
|
406
|
+
with cmd_process.output_lock:
|
|
407
|
+
output = "".join(cmd_process.stdout_lines)
|
|
408
|
+
error_output = "".join(cmd_process.stderr_lines)
|
|
423
409
|
|
|
424
410
|
duration = time.time() - start_time
|
|
425
411
|
|
|
@@ -433,7 +419,6 @@ class CommandExecutionService:
|
|
|
433
419
|
len(output) + len(error_output),
|
|
434
420
|
)
|
|
435
421
|
|
|
436
|
-
self.metrics.record_execution(command, duration, "completed")
|
|
437
422
|
self._cleanup_command_internal(command_id)
|
|
438
423
|
|
|
439
424
|
result = {
|
|
@@ -466,22 +451,20 @@ class CommandExecutionService:
|
|
|
466
451
|
logger.error(f"Command execution error: {e}")
|
|
467
452
|
|
|
468
453
|
self._audit_log(command, "error", command_id)
|
|
469
|
-
self.metrics.record_execution(command, time.time() - start_time, "error")
|
|
470
454
|
|
|
471
455
|
if command_id in self._instances:
|
|
472
456
|
self._cleanup_command_internal(command_id)
|
|
473
457
|
|
|
474
458
|
return {"status": "error", "error": f"Execution failed: {str(e)}"}
|
|
475
459
|
|
|
476
|
-
def get_command_status(
|
|
477
|
-
self, command_id: str, consume_output: bool = True
|
|
478
|
-
) -> Dict[str, Any]:
|
|
460
|
+
def get_command_status(self, command_id: str) -> Dict[str, Any]:
|
|
479
461
|
"""
|
|
480
462
|
Check status of running command.
|
|
481
463
|
|
|
464
|
+
Output is persistent and will be returned in full on every call.
|
|
465
|
+
|
|
482
466
|
Args:
|
|
483
467
|
command_id: Command identifier
|
|
484
|
-
consume_output: If True, drain and return queued output
|
|
485
468
|
|
|
486
469
|
Returns:
|
|
487
470
|
Dict with status, output, exit_code, elapsed_time
|
|
@@ -494,30 +477,11 @@ class CommandExecutionService:
|
|
|
494
477
|
|
|
495
478
|
exit_code = cmd_process.process.poll()
|
|
496
479
|
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
while not cmd_process.output_queue.empty():
|
|
502
|
-
try:
|
|
503
|
-
msg_type, data = cmd_process.output_queue.get_nowait()
|
|
504
|
-
if msg_type == "data":
|
|
505
|
-
output_lines.append(data)
|
|
506
|
-
elif msg_type == "size_limit":
|
|
507
|
-
output_lines.append("\n[OUTPUT SIZE LIMIT REACHED]\n")
|
|
508
|
-
except queue.Empty:
|
|
509
|
-
break
|
|
510
|
-
|
|
511
|
-
while not cmd_process.error_queue.empty():
|
|
512
|
-
try:
|
|
513
|
-
msg_type, data = cmd_process.error_queue.get_nowait()
|
|
514
|
-
if msg_type == "data":
|
|
515
|
-
error_lines.append(data)
|
|
516
|
-
except queue.Empty:
|
|
517
|
-
break
|
|
480
|
+
# Get output from persistent storage (thread-safe)
|
|
481
|
+
with cmd_process.output_lock:
|
|
482
|
+
output = "".join(cmd_process.stdout_lines)
|
|
483
|
+
error_output = "".join(cmd_process.stderr_lines)
|
|
518
484
|
|
|
519
|
-
output = "".join(output_lines)
|
|
520
|
-
error_output = "".join(error_lines)
|
|
521
485
|
elapsed = time.time() - cmd_process.start_time
|
|
522
486
|
|
|
523
487
|
if elapsed > MAX_COMMAND_LIFETIME:
|
|
@@ -546,7 +510,6 @@ class CommandExecutionService:
|
|
|
546
510
|
duration,
|
|
547
511
|
len(output) + len(error_output),
|
|
548
512
|
)
|
|
549
|
-
self.metrics.record_execution(cmd_process.command, duration, "completed")
|
|
550
513
|
self._cleanup_command_internal(command_id)
|
|
551
514
|
|
|
552
515
|
return {
|
|
@@ -690,11 +653,6 @@ class CommandExecutionService:
|
|
|
690
653
|
pass
|
|
691
654
|
|
|
692
655
|
cmd_process.transition_to(CommandState.KILLED)
|
|
693
|
-
self.metrics.record_execution(
|
|
694
|
-
cmd_process.command,
|
|
695
|
-
time.time() - cmd_process.start_time,
|
|
696
|
-
"killed",
|
|
697
|
-
)
|
|
698
656
|
|
|
699
657
|
except Exception as e:
|
|
700
658
|
logger.error(f"Process termination error: {e}")
|
|
@@ -767,10 +725,6 @@ class CommandExecutionService:
|
|
|
767
725
|
"""
|
|
768
726
|
return self.cleanup_command(command_id)
|
|
769
727
|
|
|
770
|
-
def get_metrics(self) -> Dict[str, Any]:
|
|
771
|
-
"""Get command execution metrics"""
|
|
772
|
-
return self.metrics.get_report()
|
|
773
|
-
|
|
774
728
|
def shutdown(self):
|
|
775
729
|
"""Shutdown service and cleanup all running commands"""
|
|
776
730
|
logger.info("Shutting down CommandExecutionService")
|
|
@@ -6,6 +6,7 @@ Tool definitions and handlers for secure shell command execution.
|
|
|
6
6
|
|
|
7
7
|
from typing import Dict, Any, Callable
|
|
8
8
|
from .service import CommandExecutionService
|
|
9
|
+
import os
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
def get_run_command_tool_definition(provider="claude") -> Dict[str, Any]:
|
|
@@ -34,8 +35,7 @@ def get_run_command_tool_definition(provider="claude") -> Dict[str, Any]:
|
|
|
34
35
|
},
|
|
35
36
|
"working_dir": {
|
|
36
37
|
"type": "string",
|
|
37
|
-
"
|
|
38
|
-
"description": "Working directory.",
|
|
38
|
+
"description": f"Working directory. Current working directory is {os.getcwd()}. Use ./ for current dir.",
|
|
39
39
|
},
|
|
40
40
|
"env_vars": {
|
|
41
41
|
"type": "object",
|
|
@@ -50,7 +50,7 @@ def get_run_command_tool_definition(provider="claude") -> Dict[str, Any]:
|
|
|
50
50
|
"input_schema": {
|
|
51
51
|
"type": "object",
|
|
52
52
|
"properties": args,
|
|
53
|
-
"required": ["command"],
|
|
53
|
+
"required": ["command", "working_dir"],
|
|
54
54
|
},
|
|
55
55
|
}
|
|
56
56
|
else:
|
|
@@ -62,7 +62,7 @@ def get_run_command_tool_definition(provider="claude") -> Dict[str, Any]:
|
|
|
62
62
|
"parameters": {
|
|
63
63
|
"type": "object",
|
|
64
64
|
"properties": args,
|
|
65
|
-
"required": ["command"],
|
|
65
|
+
"required": ["command", "working_dir"],
|
|
66
66
|
},
|
|
67
67
|
},
|
|
68
68
|
}
|
|
@@ -258,9 +258,7 @@ def get_check_command_status_tool_handler(
|
|
|
258
258
|
if not command_id:
|
|
259
259
|
raise ValueError("Missing required parameter: command_id")
|
|
260
260
|
|
|
261
|
-
result = command_service.get_command_status(
|
|
262
|
-
command_id=command_id, consume_output=True
|
|
263
|
-
)
|
|
261
|
+
result = command_service.get_command_status(command_id=command_id)
|
|
264
262
|
|
|
265
263
|
if result["status"] == "completed":
|
|
266
264
|
response = f"Command completed.\nExit Code: {result['exit_code']}\nDuration: {result['duration_seconds']}s\n\n"
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import queue
|
|
2
1
|
import threading
|
|
3
2
|
import subprocess
|
|
4
3
|
from enum import Enum
|
|
@@ -31,13 +30,13 @@ class CommandProcess:
|
|
|
31
30
|
process: subprocess.Popen
|
|
32
31
|
platform: str
|
|
33
32
|
start_time: float
|
|
34
|
-
|
|
35
|
-
|
|
33
|
+
stdout_lines: List[str] = field(default_factory=list)
|
|
34
|
+
stderr_lines: List[str] = field(default_factory=list)
|
|
35
|
+
output_lock: threading.Lock = field(default_factory=threading.Lock)
|
|
36
36
|
state: CommandState = CommandState.QUEUED
|
|
37
37
|
exit_code: Optional[int] = None
|
|
38
38
|
reader_threads: List[threading.Thread] = field(default_factory=list)
|
|
39
39
|
stop_event: threading.Event = field(default_factory=threading.Event)
|
|
40
|
-
total_output_size: int = 0
|
|
41
40
|
working_dir: Optional[str] = None
|
|
42
41
|
|
|
43
42
|
def transition_to(self, new_state: CommandState):
|
|
@@ -346,8 +346,8 @@ class CommandHandlers:
|
|
|
346
346
|
for behavior_id, behavior_text in project_behaviors.items():
|
|
347
347
|
project_table.add_row(behavior_id, behavior_text)
|
|
348
348
|
|
|
349
|
-
|
|
350
|
-
|
|
349
|
+
self.console.print(project_table)
|
|
350
|
+
self.console.print()
|
|
351
351
|
|
|
352
352
|
def handle_update_behavior_command(
|
|
353
353
|
self, behavior_id: str, behavior_text: str, scope: str = "global"
|
|
@@ -11,6 +11,8 @@ from rich.box import HORIZONTALS
|
|
|
11
11
|
from rich.console import Group
|
|
12
12
|
import time
|
|
13
13
|
|
|
14
|
+
from .diff_display import DiffDisplay
|
|
15
|
+
|
|
14
16
|
from .constants import (
|
|
15
17
|
RICH_STYLE_BLUE,
|
|
16
18
|
RICH_STYLE_BLUE_BOLD,
|
|
@@ -45,6 +47,20 @@ class ConfirmationHandler:
|
|
|
45
47
|
self._handle_ask_tool(tool_use, confirmation_id, message_handler)
|
|
46
48
|
return
|
|
47
49
|
|
|
50
|
+
# Special handling for 'write_or_edit_file' tool with search/replace blocks
|
|
51
|
+
if tool_use["name"] == "write_or_edit_file":
|
|
52
|
+
file_path = tool_use["input"].get("file_path", "")
|
|
53
|
+
text_or_blocks = tool_use["input"].get("text_or_search_replace_blocks", "")
|
|
54
|
+
|
|
55
|
+
if DiffDisplay.has_search_replace_blocks(text_or_blocks):
|
|
56
|
+
self._display_write_or_edit_file_diff(
|
|
57
|
+
tool_use, file_path, text_or_blocks
|
|
58
|
+
)
|
|
59
|
+
self._get_and_handle_tool_response(
|
|
60
|
+
tool_use, confirmation_id, message_handler
|
|
61
|
+
)
|
|
62
|
+
return
|
|
63
|
+
|
|
48
64
|
tool_texts_group = []
|
|
49
65
|
header = Text("🔧 Tool ", style=RICH_STYLE_YELLOW)
|
|
50
66
|
header.append(tool_use["name"], style=RICH_STYLE_BLUE_BOLD)
|
|
@@ -72,8 +88,71 @@ class ConfirmationHandler:
|
|
|
72
88
|
)
|
|
73
89
|
)
|
|
74
90
|
|
|
75
|
-
|
|
91
|
+
self._get_and_handle_tool_response(tool_use, confirmation_id, message_handler)
|
|
92
|
+
|
|
93
|
+
def _handle_ask_tool(self, tool_use, confirmation_id, message_handler):
|
|
94
|
+
"""Handle the ask tool - display question and guided answers."""
|
|
95
|
+
question = tool_use["input"].get("question", "")
|
|
96
|
+
guided_answers = tool_use["input"].get("guided_answers", [])
|
|
97
|
+
if isinstance(guided_answers, str):
|
|
98
|
+
guided_answers = guided_answers.strip("\n ").splitlines()
|
|
99
|
+
|
|
100
|
+
guided_answers.append("Custom your answer")
|
|
101
|
+
|
|
102
|
+
self.input_handler._stop_input_thread()
|
|
103
|
+
# Display the question
|
|
104
|
+
self.console.print(
|
|
105
|
+
Text("\n❓ Agent is asking for clarification:", style=RICH_STYLE_BLUE_BOLD)
|
|
106
|
+
)
|
|
107
|
+
response = self.input_handler.get_choice_input(f"{question}", guided_answers)
|
|
108
|
+
|
|
109
|
+
if response == "Custom your answer":
|
|
110
|
+
custom_answer = self.input_handler.get_prompt_input("Input your answer:")
|
|
111
|
+
message_handler.resolve_tool_confirmation(
|
|
112
|
+
confirmation_id, {"action": "answer", "answer": custom_answer}
|
|
113
|
+
)
|
|
114
|
+
elif response:
|
|
115
|
+
message_handler.resolve_tool_confirmation(
|
|
116
|
+
confirmation_id, {"action": "answer", "answer": response}
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
else:
|
|
120
|
+
message_handler.resolve_tool_confirmation(
|
|
121
|
+
confirmation_id, {"action": "answer", "answer": "Cancelled by user"}
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
self._ui.start_loading_animation()
|
|
125
|
+
|
|
126
|
+
self.input_handler._start_input_thread()
|
|
127
|
+
|
|
128
|
+
def _display_write_or_edit_file_diff(self, tool_use, file_path, blocks_text):
|
|
129
|
+
"""Display split diff view for write_or_edit_file tool."""
|
|
130
|
+
header = Text("📝 File Edit ", style=RICH_STYLE_YELLOW)
|
|
131
|
+
header.append(file_path, style=RICH_STYLE_BLUE_BOLD)
|
|
132
|
+
header.append(" - Search/Replace Blocks", style=RICH_STYLE_YELLOW)
|
|
133
|
+
|
|
134
|
+
self.console.print(
|
|
135
|
+
Panel(header, box=HORIZONTALS, border_style=RICH_STYLE_YELLOW)
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
blocks = DiffDisplay.parse_search_replace_blocks(blocks_text)
|
|
139
|
+
|
|
140
|
+
if not blocks:
|
|
141
|
+
self.console.print(
|
|
142
|
+
Text("No valid search/replace blocks found", style=RICH_STYLE_RED)
|
|
143
|
+
)
|
|
144
|
+
return
|
|
145
|
+
|
|
146
|
+
for block in blocks:
|
|
147
|
+
diff_table = DiffDisplay.create_split_diff_table(
|
|
148
|
+
block["search"], block["replace"], max_width=self.console.width - 4
|
|
149
|
+
)
|
|
150
|
+
self.console.print(diff_table)
|
|
151
|
+
|
|
152
|
+
def _get_and_handle_tool_response(self, tool_use, confirmation_id, message_handler):
|
|
153
|
+
"""Get user response for tool confirmation and handle it."""
|
|
76
154
|
self.input_handler._stop_input_thread()
|
|
155
|
+
|
|
77
156
|
choices = [
|
|
78
157
|
"yes",
|
|
79
158
|
"no",
|
|
@@ -91,11 +170,11 @@ class ConfirmationHandler:
|
|
|
91
170
|
confirmation_id, {"action": "approve"}
|
|
92
171
|
)
|
|
93
172
|
elif response == choices[1]:
|
|
94
|
-
|
|
173
|
+
deny_reason = self.input_handler.get_prompt_input(
|
|
95
174
|
"Please tell me why you are denying this tool: "
|
|
96
175
|
)
|
|
97
176
|
message_handler.resolve_tool_confirmation(
|
|
98
|
-
confirmation_id, {"action": "deny", "reason":
|
|
177
|
+
confirmation_id, {"action": "deny", "reason": deny_reason}
|
|
99
178
|
)
|
|
100
179
|
elif response == choices[2]:
|
|
101
180
|
message_handler.resolve_tool_confirmation(
|
|
@@ -120,44 +199,10 @@ class ConfirmationHandler:
|
|
|
120
199
|
style=RICH_STYLE_YELLOW,
|
|
121
200
|
)
|
|
122
201
|
self.console.print(saved_text)
|
|
123
|
-
self._ui.start_loading_animation()
|
|
124
|
-
self.input_handler._start_input_thread()
|
|
125
|
-
time.sleep(0.2) # Small delay to between tool calls
|
|
126
|
-
|
|
127
|
-
def _handle_ask_tool(self, tool_use, confirmation_id, message_handler):
|
|
128
|
-
"""Handle the ask tool - display question and guided answers."""
|
|
129
|
-
question = tool_use["input"].get("question", "")
|
|
130
|
-
guided_answers = tool_use["input"].get("guided_answers", [])
|
|
131
|
-
if isinstance(guided_answers, str):
|
|
132
|
-
guided_answers = guided_answers.strip("\n ").splitlines()
|
|
133
|
-
|
|
134
|
-
guided_answers.append("Custom your answer")
|
|
135
|
-
|
|
136
|
-
self.input_handler._stop_input_thread()
|
|
137
|
-
# Display the question
|
|
138
|
-
self.console.print(
|
|
139
|
-
Text("\n❓ Agent is asking for clarification:", style=RICH_STYLE_BLUE_BOLD)
|
|
140
|
-
)
|
|
141
|
-
response = self.input_handler.get_choice_input(f"{question}", guided_answers)
|
|
142
|
-
|
|
143
|
-
if response == "Custom your answer":
|
|
144
|
-
custom_answer = self.input_handler.get_prompt_input("Input your answer:")
|
|
145
|
-
message_handler.resolve_tool_confirmation(
|
|
146
|
-
confirmation_id, {"action": "answer", "answer": custom_answer}
|
|
147
|
-
)
|
|
148
|
-
elif response:
|
|
149
|
-
message_handler.resolve_tool_confirmation(
|
|
150
|
-
confirmation_id, {"action": "answer", "answer": response}
|
|
151
|
-
)
|
|
152
|
-
|
|
153
|
-
else:
|
|
154
|
-
message_handler.resolve_tool_confirmation(
|
|
155
|
-
confirmation_id, {"action": "answer", "answer": "Cancelled by user"}
|
|
156
|
-
)
|
|
157
202
|
|
|
158
203
|
self._ui.start_loading_animation()
|
|
159
|
-
|
|
160
204
|
self.input_handler._start_input_thread()
|
|
205
|
+
time.sleep(0.2)
|
|
161
206
|
|
|
162
207
|
def display_mcp_prompt_confirmation(self, prompt_data, input_queue):
|
|
163
208
|
"""Display MCP prompt confirmation request and get user response."""
|
|
@@ -20,7 +20,6 @@ from .constants import (
|
|
|
20
20
|
RICH_STYLE_GREEN,
|
|
21
21
|
RICH_STYLE_BLUE,
|
|
22
22
|
RICH_STYLE_YELLOW,
|
|
23
|
-
RICH_STYLE_GREEN_BOLD,
|
|
24
23
|
RICH_STYLE_YELLOW_BOLD,
|
|
25
24
|
PROMPT_CHAR,
|
|
26
25
|
)
|
|
@@ -83,11 +82,17 @@ class ConsoleUI(Observer):
|
|
|
83
82
|
|
|
84
83
|
if event == "thinking_started":
|
|
85
84
|
self.ui_effects.stop_loading_animation() # Stop loading on first chunk
|
|
86
|
-
self.display_handlers.display_thinking_started(data) # data is agent_name
|
|
85
|
+
# self.display_handlers.display_thinking_started(data) # data is agent_name
|
|
87
86
|
elif event == "thinking_chunk":
|
|
88
|
-
self.
|
|
89
|
-
|
|
90
|
-
|
|
87
|
+
self.ui_effects.update_live_display(data, is_thinking=True)
|
|
88
|
+
# self.display_handlers.display_thinking_chunk(
|
|
89
|
+
# data
|
|
90
|
+
# ) # data is the thinking chunk
|
|
91
|
+
#
|
|
92
|
+
elif event == "thinking_completed":
|
|
93
|
+
self.ui_effects.finish_response(
|
|
94
|
+
self.ui_effects.updated_text, is_thinking=True
|
|
95
|
+
)
|
|
91
96
|
elif event == "user_message_created":
|
|
92
97
|
pass
|
|
93
98
|
elif event == "response_chunk":
|
|
@@ -178,10 +183,6 @@ class ConsoleUI(Observer):
|
|
|
178
183
|
f"{data['agent_name'] if 'agent_name' in data else 'other'} agent"
|
|
179
184
|
)
|
|
180
185
|
self.display_handlers.display_message(transfer_text)
|
|
181
|
-
elif event == "agent_continue":
|
|
182
|
-
self.display_handlers.display_message(
|
|
183
|
-
Text(f"\n🤖 {data.upper()}:", style=RICH_STYLE_GREEN_BOLD)
|
|
184
|
-
)
|
|
185
186
|
elif event == "jump_performed":
|
|
186
187
|
jump_text = Text(
|
|
187
188
|
f"🕰️ Jumping to turn {data['turn_number']}...\n",
|
|
@@ -189,12 +190,11 @@ class ConsoleUI(Observer):
|
|
|
189
190
|
)
|
|
190
191
|
preview_text = Text("Conversation rewound to: ", style=RICH_STYLE_YELLOW)
|
|
191
192
|
preview_text.append(data["preview"])
|
|
193
|
+
self._clean_and_reprint_chat()
|
|
192
194
|
|
|
193
195
|
self.display_handlers.display_message(jump_text)
|
|
194
196
|
self.display_handlers.display_message(preview_text)
|
|
195
197
|
self.input_handler.set_current_buffer(data["message"])
|
|
196
|
-
elif event == "thinking_completed":
|
|
197
|
-
self.display_handlers.display_divider()
|
|
198
198
|
elif event == "file_processing":
|
|
199
199
|
self.ui_effects.stop_loading_animation() # Stop loading on first chunk
|
|
200
200
|
self.display_handlers.add_file(data["file_path"])
|
|
@@ -286,25 +286,14 @@ class ConsoleUI(Observer):
|
|
|
286
286
|
Signal handler for SIGWINCH.
|
|
287
287
|
This function is called when the terminal window is resized.
|
|
288
288
|
"""
|
|
289
|
-
import os
|
|
290
289
|
import time
|
|
291
290
|
|
|
292
291
|
if self.input_handler.is_message_processing or self._is_resizing:
|
|
293
292
|
return # Ignore resize during message processing
|
|
294
293
|
self._is_resizing = True
|
|
295
294
|
time.sleep(0.5) # brief pause to allow resize to complete
|
|
296
|
-
|
|
297
|
-
self.display_handlers.display_loaded_conversation(
|
|
298
|
-
self.message_handler.streamline_messages, self.message_handler.agent.name
|
|
299
|
-
)
|
|
300
|
-
self.display_handlers.print_prompt_prefix(
|
|
301
|
-
self.message_handler.agent.name,
|
|
302
|
-
self.message_handler.agent.get_model(),
|
|
303
|
-
self.message_handler.tool_manager.get_effective_yolo_mode(),
|
|
304
|
-
)
|
|
305
|
-
|
|
295
|
+
self._clean_and_reprint_chat()
|
|
306
296
|
self.display_handlers.print_divider("👤 YOU: ", with_time=True)
|
|
307
|
-
|
|
308
297
|
prompt = Text(
|
|
309
298
|
PROMPT_CHAR,
|
|
310
299
|
style=RICH_STYLE_BLUE,
|
|
@@ -318,6 +307,21 @@ class ConsoleUI(Observer):
|
|
|
318
307
|
self.console.print(prompt, end="")
|
|
319
308
|
self._is_resizing = False
|
|
320
309
|
|
|
310
|
+
def _clean_and_reprint_chat(self):
|
|
311
|
+
"""Clear and reprint the chat display."""
|
|
312
|
+
|
|
313
|
+
import os
|
|
314
|
+
|
|
315
|
+
os.system("cls" if os.name == "nt" else "printf '\033c'")
|
|
316
|
+
self.display_handlers.display_loaded_conversation(
|
|
317
|
+
self.message_handler.streamline_messages, self.message_handler.agent.name
|
|
318
|
+
)
|
|
319
|
+
self.display_handlers.print_prompt_prefix(
|
|
320
|
+
self.message_handler.agent.name,
|
|
321
|
+
self.message_handler.agent.get_model(),
|
|
322
|
+
self.message_handler.tool_manager.get_effective_yolo_mode(),
|
|
323
|
+
)
|
|
324
|
+
|
|
321
325
|
def start_streaming_response(self, agent_name: str):
|
|
322
326
|
"""Start streaming the assistant's response."""
|
|
323
327
|
self.ui_effects.start_streaming_response(agent_name)
|