agentcrew-ai 0.8.5__py3-none-any.whl → 0.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. AgentCrew/__init__.py +1 -1
  2. AgentCrew/app.py +1 -1
  3. AgentCrew/modules/a2a/registry.py +1 -1
  4. AgentCrew/modules/a2a/task_manager.py +159 -33
  5. AgentCrew/modules/agents/local_agent.py +9 -9
  6. AgentCrew/modules/code_analysis/service.py +1 -3
  7. AgentCrew/modules/command_execution/constants.py +2 -2
  8. AgentCrew/modules/command_execution/service.py +37 -83
  9. AgentCrew/modules/command_execution/tool.py +5 -7
  10. AgentCrew/modules/command_execution/types.py +3 -4
  11. AgentCrew/modules/console/diff_display.py +16 -12
  12. AgentCrew/modules/console/input_handler.py +2 -3
  13. AgentCrew/modules/custom_llm/service.py +0 -1
  14. AgentCrew/modules/gui/components/tool_handlers.py +167 -4
  15. AgentCrew/modules/gui/themes/atom_light.py +20 -0
  16. AgentCrew/modules/gui/themes/catppuccin.py +20 -0
  17. AgentCrew/modules/gui/themes/dracula.py +20 -0
  18. AgentCrew/modules/gui/themes/nord.py +20 -0
  19. AgentCrew/modules/gui/themes/saigontech.py +20 -0
  20. AgentCrew/modules/gui/themes/style_provider.py +25 -0
  21. AgentCrew/modules/gui/themes/unicorn.py +20 -0
  22. AgentCrew/modules/gui/widgets/__init__.py +3 -0
  23. AgentCrew/modules/gui/widgets/diff_widget.py +532 -0
  24. AgentCrew/modules/gui/widgets/tool_widget.py +42 -7
  25. AgentCrew/modules/llm/constants.py +23 -3
  26. {agentcrew_ai-0.8.5.dist-info → agentcrew_ai-0.8.7.dist-info}/METADATA +1 -1
  27. {agentcrew_ai-0.8.5.dist-info → agentcrew_ai-0.8.7.dist-info}/RECORD +31 -31
  28. AgentCrew/modules/command_execution/metric.py +0 -55
  29. {agentcrew_ai-0.8.5.dist-info → agentcrew_ai-0.8.7.dist-info}/WHEEL +0 -0
  30. {agentcrew_ai-0.8.5.dist-info → agentcrew_ai-0.8.7.dist-info}/entry_points.txt +0 -0
  31. {agentcrew_ai-0.8.5.dist-info → agentcrew_ai-0.8.7.dist-info}/licenses/LICENSE +0 -0
  32. {agentcrew_ai-0.8.5.dist-info → agentcrew_ai-0.8.7.dist-info}/top_level.txt +0 -0
AgentCrew/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.8.5"
1
+ __version__ = "0.8.7"
AgentCrew/app.py CHANGED
@@ -584,7 +584,7 @@ tools = ["memory", "browser", "web_search", "code_analysis"]
584
584
  "No LLM API key found. Please set either ANTHROPIC_API_KEY, GEMINI_API_KEY, OPENAI_API_KEY, GROQ_API_KEY, or DEEPINFRA_API_KEY"
585
585
  )
586
586
 
587
- services = self.setup_services(provider, memory_llm)
587
+ services = self.setup_services(provider, memory_llm, need_memory=False)
588
588
 
589
589
  if mcp_config:
590
590
  os.environ["MCP_CONFIG_PATH"] = mcp_config
@@ -4,10 +4,10 @@ from typing import TYPE_CHECKING
4
4
  from pydantic import BaseModel
5
5
  from .agent_cards import create_agent_card
6
6
  from AgentCrew.modules.agents import LocalAgent
7
+ from typing import Any, Dict, List, Optional
7
8
 
8
9
 
9
10
  if TYPE_CHECKING:
10
- from typing import Any, Dict, List, Optional
11
11
  from AgentCrew.modules.agents import AgentManager
12
12
  from a2a.types import AgentCard
13
13
 
@@ -27,6 +27,11 @@ from a2a.types import (
27
27
  TaskState,
28
28
  TaskStatusUpdateEvent,
29
29
  TaskArtifactUpdateEvent,
30
+ Part,
31
+ TextPart,
32
+ DataPart,
33
+ Role,
34
+ Message,
30
35
  )
31
36
 
32
37
  from AgentCrew.modules.agents import LocalAgent
@@ -58,6 +63,7 @@ class AgentTaskManager(TaskManager):
58
63
  """Manages tasks for a specific agent"""
59
64
 
60
65
  TERMINAL_STATES = {TaskState.completed, TaskState.canceled, TaskState.failed}
66
+ INPUT_REQUIRED_STATES = {TaskState.input_required}
61
67
 
62
68
  def __init__(self, agent_name: str, agent_manager: AgentManager):
63
69
  self.agent_name = agent_name
@@ -72,16 +78,32 @@ class AgentTaskManager(TaskManager):
72
78
  ] = defaultdict(list)
73
79
  self.streaming_enabled_tasks: set[str] = set()
74
80
 
81
+ self.pending_ask_responses: Dict[str, asyncio.Event] = {}
82
+ self.ask_responses: Dict[str, str] = {}
83
+
75
84
  self.agent = self.agent_manager.get_agent(self.agent_name)
76
85
  if self.agent is None or not isinstance(self.agent, LocalAgent):
77
86
  raise ValueError(f"Agent {agent_name} not found or is not a LocalAgent")
78
87
 
79
- self.memory_service = self.agent.services["memory"]
88
+ self.memory_service = self.agent.services.get("memory", None)
80
89
 
81
90
  def _is_terminal_state(self, state: TaskState) -> bool:
82
91
  """Check if a state is terminal."""
83
92
  return state in self.TERMINAL_STATES
84
93
 
94
+ def _extract_text_from_message(self, message: Dict[str, Any]) -> str:
95
+ """Extract text content from a message."""
96
+ content = message.get("content", [])
97
+ if isinstance(content, str):
98
+ return content
99
+ text_parts = []
100
+ for part in content:
101
+ if isinstance(part, str):
102
+ text_parts.append(part)
103
+ elif isinstance(part, dict) and part.get("type") == "text":
104
+ text_parts.append(part.get("text", ""))
105
+ return " ".join(text_parts)
106
+
85
107
  def _validate_task_not_terminal(
86
108
  self, task: Task, operation: str
87
109
  ) -> Optional[TaskNotCancelableError]:
@@ -121,7 +143,6 @@ class AgentTaskManager(TaskManager):
121
143
  )
122
144
  )
123
145
 
124
- # Generate task ID from message
125
146
  task_id = (
126
147
  request.params.message.task_id
127
148
  or f"task_{request.params.message.message_id}"
@@ -135,8 +156,19 @@ class AgentTaskManager(TaskManager):
135
156
  root=JSONRPCErrorResponse(id=request.id, error=error)
136
157
  )
137
158
 
159
+ if existing_task.status.state == TaskState.input_required:
160
+ message = convert_a2a_message_to_agent(request.params.message)
161
+ user_response = self._extract_text_from_message(message)
162
+
163
+ if task_id in self.pending_ask_responses:
164
+ self.ask_responses[task_id] = user_response
165
+ self.pending_ask_responses[task_id].set()
166
+
167
+ return SendMessageResponse(
168
+ root=SendMessageSuccessResponse(id=request.id, result=existing_task)
169
+ )
170
+
138
171
  if task_id not in self.tasks:
139
- # Create task with initial state
140
172
  task = Task(
141
173
  id=task_id,
142
174
  context_id=request.params.message.context_id or f"ctx_{task_id}",
@@ -147,8 +179,8 @@ class AgentTaskManager(TaskManager):
147
179
  self.tasks[task.id] = task
148
180
 
149
181
  task = self.tasks[task_id]
150
- if task_id not in self.task_history:
151
- self.task_history[task_id] = []
182
+ if task.context_id not in self.task_history:
183
+ self.task_history[task.context_id] = []
152
184
 
153
185
  # Convert A2A message to SwissKnife format
154
186
  message = convert_a2a_message_to_agent(request.params.message)
@@ -185,9 +217,8 @@ class AgentTaskManager(TaskManager):
185
217
 
186
218
  message["content"] = new_parts
187
219
 
188
- self.task_history[task_id].append(message)
220
+ self.task_history[task.context_id].append(message)
189
221
 
190
- # Process with agent (non-blocking)
191
222
  asyncio.create_task(self._process_agent_task(self.agent, task))
192
223
 
193
224
  # Return initial task state
@@ -241,8 +272,38 @@ class AgentTaskManager(TaskManager):
241
272
 
242
273
  finally:
243
274
  # Clean up
275
+ self.tasks.pop(task_id, None)
244
276
  self.streaming_tasks.pop(task_id, None)
245
277
 
278
+ def _create_ask_tool_message(
279
+ self, question: str, guided_answers: list[str]
280
+ ) -> Message:
281
+ """
282
+ Create an A2A message for the ask tool's input-required state.
283
+
284
+ Args:
285
+ question: The question to ask the user
286
+ guided_answers: List of suggested answers
287
+
288
+ Returns:
289
+ A2A Message with the question and guided answers
290
+ """
291
+ ask_data = {
292
+ "type": "ask",
293
+ "question": question,
294
+ "guided_answers": guided_answers,
295
+ "instruction": "Please respond with one of the guided answers or provide a custom response.",
296
+ }
297
+
298
+ return Message(
299
+ message_id=f"ask_{hash(question)}",
300
+ role=Role.agent,
301
+ parts=[
302
+ Part(root=TextPart(text=f"❓ {question}")),
303
+ Part(root=DataPart(data=ask_data)),
304
+ ],
305
+ )
306
+
246
307
  def _record_and_emit_event(
247
308
  self, task_id: str, event: Union[TaskStatusUpdateEvent, TaskArtifactUpdateEvent]
248
309
  ):
@@ -281,7 +342,7 @@ class AgentTaskManager(TaskManager):
281
342
 
282
343
  try:
283
344
  artifacts = []
284
- if task.id not in self.task_history:
345
+ if task.context_id not in self.task_history:
285
346
  raise ValueError("Task history is not existed")
286
347
 
287
348
  input_tokens = 0
@@ -308,7 +369,7 @@ class AgentTaskManager(TaskManager):
308
369
  chunk_text,
309
370
  thinking_chunk,
310
371
  ) in agent.process_messages(
311
- self.task_history[task.id], callback=process_result
372
+ self.task_history[task.context_id], callback=process_result
312
373
  ):
313
374
  # Update current response
314
375
  if response_message:
@@ -388,9 +449,8 @@ class AgentTaskManager(TaskManager):
388
449
  MessageType.Thinking, {"thinking": thinking_data}
389
450
  )
390
451
  if thinking_message:
391
- self.task_history[task.id].append(thinking_message)
452
+ self.task_history[task.context_id].append(thinking_message)
392
453
 
393
- # Format assistant message with the response and tool uses
394
454
  assistant_message = agent.format_message(
395
455
  MessageType.Assistant,
396
456
  {
@@ -401,34 +461,99 @@ class AgentTaskManager(TaskManager):
401
461
  },
402
462
  )
403
463
  if assistant_message:
404
- self.task_history[task.id].append(assistant_message)
464
+ self.task_history[task.context_id].append(assistant_message)
405
465
 
406
- # Process each tool use
407
466
  for tool_use in tool_uses:
408
- try:
409
- tool_result = await agent.execute_tool_call(
410
- tool_use["name"],
411
- tool_use["input"],
467
+ tool_name = tool_use["name"]
468
+
469
+ if tool_name == "ask":
470
+ question = tool_use["input"].get("question", "")
471
+ guided_answers = tool_use["input"].get("guided_answers", [])
472
+
473
+ task.status.state = TaskState.input_required
474
+ task.status.timestamp = datetime.now().isoformat()
475
+ task.status.message = self._create_ask_tool_message(
476
+ question, guided_answers
477
+ )
478
+
479
+ self._record_and_emit_event(
480
+ task.id,
481
+ TaskStatusUpdateEvent(
482
+ task_id=task.id,
483
+ context_id=task.context_id,
484
+ status=task.status,
485
+ final=False,
486
+ ),
412
487
  )
413
488
 
489
+ wait_event = asyncio.Event()
490
+ self.pending_ask_responses[task.id] = wait_event
491
+
492
+ try:
493
+ await asyncio.wait_for(wait_event.wait(), timeout=300)
494
+ user_answer = self.ask_responses.get(
495
+ task.id, "No response received"
496
+ )
497
+ except asyncio.TimeoutError:
498
+ user_answer = "User did not respond in time."
499
+ finally:
500
+ self.pending_ask_responses.pop(task.id, None)
501
+ self.ask_responses.pop(task.id, None)
502
+
503
+ tool_result = f"User's answer: {user_answer}"
504
+
505
+ task.status.state = TaskState.working
506
+ task.status.timestamp = datetime.now().isoformat()
507
+ task.status.message = None
508
+
414
509
  tool_result_message = agent.format_message(
415
510
  MessageType.ToolResult,
416
511
  {"tool_use": tool_use, "tool_result": tool_result},
417
512
  )
418
513
  if tool_result_message:
419
- self.task_history[task.id].append(tool_result_message)
514
+ self.task_history[task.context_id].append(
515
+ tool_result_message
516
+ )
420
517
 
421
- except Exception as e:
422
- error_message = agent.format_message(
423
- MessageType.ToolResult,
424
- {
425
- "tool_use": tool_use,
426
- "tool_result": str(e),
427
- "is_error": True,
428
- },
518
+ self._record_and_emit_event(
519
+ task.id,
520
+ TaskStatusUpdateEvent(
521
+ task_id=task.id,
522
+ context_id=task.context_id,
523
+ status=task.status,
524
+ final=False,
525
+ ),
429
526
  )
430
- if error_message:
431
- self.task_history[task.id].append(error_message)
527
+
528
+ else:
529
+ try:
530
+ tool_result = await agent.execute_tool_call(
531
+ tool_name,
532
+ tool_use["input"],
533
+ )
534
+
535
+ tool_result_message = agent.format_message(
536
+ MessageType.ToolResult,
537
+ {"tool_use": tool_use, "tool_result": tool_result},
538
+ )
539
+ if tool_result_message:
540
+ self.task_history[task.context_id].append(
541
+ tool_result_message
542
+ )
543
+
544
+ except Exception as e:
545
+ error_message = agent.format_message(
546
+ MessageType.ToolResult,
547
+ {
548
+ "tool_use": tool_use,
549
+ "tool_result": str(e),
550
+ "is_error": True,
551
+ },
552
+ )
553
+ if error_message:
554
+ self.task_history[task.context_id].append(
555
+ error_message
556
+ )
432
557
 
433
558
  return await _process_task()
434
559
  return current_response
@@ -442,15 +567,16 @@ class AgentTaskManager(TaskManager):
442
567
  },
443
568
  )
444
569
  if assistant_message:
445
- self.task_history[task.id].append(assistant_message)
570
+ self.task_history[task.context_id].append(assistant_message)
446
571
  user_message = (
447
- self.task_history[task.id][0]
572
+ self.task_history[task.context_id][0]
448
573
  .get("content", [{}])[0]
449
574
  .get("text", "")
450
575
  )
451
- self.memory_service.store_conversation(
452
- user_message, current_response, self.agent_name
453
- )
576
+ if self.memory_service:
577
+ self.memory_service.store_conversation(
578
+ user_message, current_response, self.agent_name
579
+ )
454
580
 
455
581
  # Create artifact from final response
456
582
  artifact = convert_agent_response_to_a2a_artifact(
@@ -111,25 +111,25 @@ class LocalAgent(BaseAgent):
111
111
  # self.tool_prompts.append(
112
112
  # delegate_tool_prompt(self.services["agent_manager"])
113
113
  # )
114
+ from AgentCrew.modules.agents.tools.ask import (
115
+ register as register_ask,
116
+ ask_tool_prompt,
117
+ )
118
+
119
+ register_ask(self)
120
+ self.tool_prompts.append(ask_tool_prompt())
121
+
114
122
  if not self.is_remoting_mode:
115
123
  from AgentCrew.modules.agents.tools.transfer import (
116
124
  register as register_transfer,
117
125
  transfer_tool_prompt,
118
126
  )
119
- from AgentCrew.modules.agents.tools.ask import (
120
- register as register_ask,
121
- ask_tool_prompt,
122
- )
123
127
 
124
128
  register_transfer(self.services["agent_manager"], self)
125
129
  self.tool_prompts.append(
126
130
  transfer_tool_prompt(self.services["agent_manager"])
127
131
  )
128
132
 
129
- # Register the ask tool (always available)
130
- register_ask(self)
131
- self.tool_prompts.append(ask_tool_prompt())
132
-
133
133
  for tool_name in self.tools:
134
134
  if self.services and tool_name in self.services:
135
135
  service = self.services[tool_name]
@@ -634,7 +634,7 @@ Check if `when` condition in <Global_Behavior> or <Project_Behavior> matches, up
634
634
  - Skip agent evaluation if user request is when...,[action]... related to adaptive behaviors call `adapt` tool instead.""",
635
635
  },
636
636
  )
637
- if self.services.get("memory"):
637
+ if not self.is_remoting_mode and self.services.get("memory"):
638
638
  memory_headers = self.services["memory"].list_memory_headers(
639
639
  agent_name=self.name
640
640
  )
@@ -1290,9 +1290,7 @@ class CodeAnalysisService:
1290
1290
  f"start_line {start_line} exceeds file length ({total_lines} lines)"
1291
1291
  )
1292
1292
  if end_line > total_lines:
1293
- raise ValueError(
1294
- f"end_line {end_line} exceeds file length ({total_lines} lines)"
1295
- )
1293
+ end_line = total_lines
1296
1294
 
1297
1295
  # Extract the line range (convert to 0-indexed)
1298
1296
  selected_lines = lines[start_line - 1 : end_line]
@@ -15,8 +15,8 @@ MAX_CONCURRENT_COMMANDS = 3
15
15
  # Maximum lifetime for a single command execution (seconds)
16
16
  MAX_COMMAND_LIFETIME = 600
17
17
 
18
- # Maximum output size per command (bytes)
19
- MAX_OUTPUT_SIZE = 1 * 1024 * 1024 # 1MB
18
+ # Maximum output lines to keep in rolling buffer per stream (stdout/stderr)
19
+ MAX_OUTPUT_LINES = 300
20
20
 
21
21
  # Maximum number of commands allowed per minute (application-wide rate limit)
22
22
  MAX_COMMANDS_PER_MINUTE = 10
@@ -2,7 +2,6 @@ import os
2
2
  import sys
3
3
  import time
4
4
  import uuid
5
- import queue
6
5
  import threading
7
6
  import subprocess
8
7
  import re
@@ -10,12 +9,11 @@ import atexit
10
9
  import hashlib
11
10
  from typing import Dict, Any, Optional, Tuple, List
12
11
  from datetime import datetime
13
- from .metric import CommandMetrics
14
12
  from .types import CommandState, CommandProcess
15
13
  from .constants import (
16
14
  MAX_CONCURRENT_COMMANDS,
17
15
  MAX_COMMAND_LIFETIME,
18
- MAX_OUTPUT_SIZE,
16
+ MAX_OUTPUT_LINES,
19
17
  MAX_COMMANDS_PER_MINUTE,
20
18
  MAX_INPUT_SIZE,
21
19
  BLOCKED_PATTERNS,
@@ -68,9 +66,6 @@ class CommandExecutionService:
68
66
  # Rate limiting (application-wide)
69
67
  self._rate_limiter: List[float] = []
70
68
 
71
- # Metrics
72
- self.metrics = CommandMetrics()
73
-
74
69
  # Register cleanup on shutdown
75
70
  atexit.register(self.shutdown)
76
71
 
@@ -242,40 +237,40 @@ class CommandExecutionService:
242
237
  def _reader_thread(
243
238
  self,
244
239
  stream,
245
- output_queue: queue.Queue,
240
+ output_list: list,
241
+ output_lock: threading.Lock,
246
242
  stop_event: threading.Event,
247
- max_size: int,
243
+ max_lines: int,
248
244
  ):
249
245
  """
250
- Read stream line by line into queue with size enforcement.
246
+ Read stream line by line into persistent list with rolling buffer.
251
247
 
252
- Uses sentinel values:
253
- - ('data', line): Normal output line
254
- - ('eof', None): End of stream
255
- - ('error', msg): Error occurred
256
- - ('size_limit', None): Output size limit reached
257
- """
258
- total_bytes = 0
248
+ When output exceeds max_lines, old lines are removed to keep recent output.
259
249
 
250
+ Args:
251
+ stream: Process stdout or stderr stream
252
+ output_list: Persistent list to append output lines
253
+ output_lock: Threading lock for thread-safe list access
254
+ stop_event: Event to signal thread stop
255
+ max_lines: Maximum number of lines to keep (rolling buffer)
256
+ """
260
257
  try:
261
258
  for line in iter(stream.readline, b""):
262
259
  if stop_event.is_set():
263
260
  break
264
261
 
265
- total_bytes += len(line)
266
- if total_bytes > max_size:
267
- output_queue.put(("size_limit", None))
268
- logger.warning(f"Output size limit ({max_size} bytes) exceeded")
269
- break
270
-
271
262
  decoded = line.decode("utf-8", errors="replace")
272
- output_queue.put(("data", decoded))
263
+
264
+ with output_lock:
265
+ output_list.append(decoded)
266
+
267
+ # Keep only recent lines using slice
268
+ if len(output_list) > max_lines:
269
+ output_list[:] = output_list[-max_lines:]
273
270
 
274
271
  except Exception as e:
275
272
  logger.error(f"Reader thread error: {e}")
276
- output_queue.put(("error", str(e)))
277
273
  finally:
278
- output_queue.put(("eof", None))
279
274
  stream.close()
280
275
 
281
276
  def execute_command(
@@ -371,9 +366,10 @@ class CommandExecutionService:
371
366
  target=self._reader_thread,
372
367
  args=(
373
368
  process.stdout,
374
- cmd_process.output_queue,
369
+ cmd_process.stdout_lines,
370
+ cmd_process.output_lock,
375
371
  cmd_process.stop_event,
376
- MAX_OUTPUT_SIZE,
372
+ MAX_OUTPUT_LINES,
377
373
  ),
378
374
  daemon=True,
379
375
  name=f"stdout-reader-{command_id}",
@@ -383,9 +379,10 @@ class CommandExecutionService:
383
379
  target=self._reader_thread,
384
380
  args=(
385
381
  process.stderr,
386
- cmd_process.error_queue,
382
+ cmd_process.stderr_lines,
383
+ cmd_process.output_lock,
387
384
  cmd_process.stop_event,
388
- MAX_OUTPUT_SIZE,
385
+ MAX_OUTPUT_LINES,
389
386
  ),
390
387
  daemon=True,
391
388
  name=f"stderr-reader-{command_id}",
@@ -405,21 +402,10 @@ class CommandExecutionService:
405
402
  cmd_process.exit_code = process.returncode
406
403
  cmd_process.transition_to(CommandState.COMPLETING)
407
404
 
408
- output_lines = []
409
- error_lines = []
410
-
411
- while not cmd_process.output_queue.empty():
412
- msg_type, data = cmd_process.output_queue.get()
413
- if msg_type == "data":
414
- output_lines.append(data)
415
-
416
- while not cmd_process.error_queue.empty():
417
- msg_type, data = cmd_process.error_queue.get()
418
- if msg_type == "data":
419
- error_lines.append(data)
420
-
421
- output = "".join(output_lines)
422
- error_output = "".join(error_lines)
405
+ # Get output from persistent storage (thread-safe)
406
+ with cmd_process.output_lock:
407
+ output = "".join(cmd_process.stdout_lines)
408
+ error_output = "".join(cmd_process.stderr_lines)
423
409
 
424
410
  duration = time.time() - start_time
425
411
 
@@ -433,7 +419,6 @@ class CommandExecutionService:
433
419
  len(output) + len(error_output),
434
420
  )
435
421
 
436
- self.metrics.record_execution(command, duration, "completed")
437
422
  self._cleanup_command_internal(command_id)
438
423
 
439
424
  result = {
@@ -466,22 +451,20 @@ class CommandExecutionService:
466
451
  logger.error(f"Command execution error: {e}")
467
452
 
468
453
  self._audit_log(command, "error", command_id)
469
- self.metrics.record_execution(command, time.time() - start_time, "error")
470
454
 
471
455
  if command_id in self._instances:
472
456
  self._cleanup_command_internal(command_id)
473
457
 
474
458
  return {"status": "error", "error": f"Execution failed: {str(e)}"}
475
459
 
476
- def get_command_status(
477
- self, command_id: str, consume_output: bool = True
478
- ) -> Dict[str, Any]:
460
+ def get_command_status(self, command_id: str) -> Dict[str, Any]:
479
461
  """
480
462
  Check status of running command.
481
463
 
464
+ Output is persistent and will be returned in full on every call.
465
+
482
466
  Args:
483
467
  command_id: Command identifier
484
- consume_output: If True, drain and return queued output
485
468
 
486
469
  Returns:
487
470
  Dict with status, output, exit_code, elapsed_time
@@ -494,30 +477,11 @@ class CommandExecutionService:
494
477
 
495
478
  exit_code = cmd_process.process.poll()
496
479
 
497
- output_lines = []
498
- error_lines = []
499
-
500
- if consume_output:
501
- while not cmd_process.output_queue.empty():
502
- try:
503
- msg_type, data = cmd_process.output_queue.get_nowait()
504
- if msg_type == "data":
505
- output_lines.append(data)
506
- elif msg_type == "size_limit":
507
- output_lines.append("\n[OUTPUT SIZE LIMIT REACHED]\n")
508
- except queue.Empty:
509
- break
510
-
511
- while not cmd_process.error_queue.empty():
512
- try:
513
- msg_type, data = cmd_process.error_queue.get_nowait()
514
- if msg_type == "data":
515
- error_lines.append(data)
516
- except queue.Empty:
517
- break
480
+ # Get output from persistent storage (thread-safe)
481
+ with cmd_process.output_lock:
482
+ output = "".join(cmd_process.stdout_lines)
483
+ error_output = "".join(cmd_process.stderr_lines)
518
484
 
519
- output = "".join(output_lines)
520
- error_output = "".join(error_lines)
521
485
  elapsed = time.time() - cmd_process.start_time
522
486
 
523
487
  if elapsed > MAX_COMMAND_LIFETIME:
@@ -546,7 +510,6 @@ class CommandExecutionService:
546
510
  duration,
547
511
  len(output) + len(error_output),
548
512
  )
549
- self.metrics.record_execution(cmd_process.command, duration, "completed")
550
513
  self._cleanup_command_internal(command_id)
551
514
 
552
515
  return {
@@ -690,11 +653,6 @@ class CommandExecutionService:
690
653
  pass
691
654
 
692
655
  cmd_process.transition_to(CommandState.KILLED)
693
- self.metrics.record_execution(
694
- cmd_process.command,
695
- time.time() - cmd_process.start_time,
696
- "killed",
697
- )
698
656
 
699
657
  except Exception as e:
700
658
  logger.error(f"Process termination error: {e}")
@@ -767,10 +725,6 @@ class CommandExecutionService:
767
725
  """
768
726
  return self.cleanup_command(command_id)
769
727
 
770
- def get_metrics(self) -> Dict[str, Any]:
771
- """Get command execution metrics"""
772
- return self.metrics.get_report()
773
-
774
728
  def shutdown(self):
775
729
  """Shutdown service and cleanup all running commands"""
776
730
  logger.info("Shutting down CommandExecutionService")