autobyteus 1.1.7__py3-none-any.whl → 1.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +6 -2
  2. autobyteus/agent/handlers/inter_agent_message_event_handler.py +17 -19
  3. autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +6 -3
  4. autobyteus/agent/handlers/tool_result_event_handler.py +86 -23
  5. autobyteus/agent/handlers/user_input_message_event_handler.py +19 -10
  6. autobyteus/agent/hooks/base_phase_hook.py +17 -0
  7. autobyteus/agent/hooks/hook_registry.py +15 -27
  8. autobyteus/agent/input_processor/base_user_input_processor.py +17 -1
  9. autobyteus/agent/input_processor/processor_registry.py +15 -27
  10. autobyteus/agent/llm_response_processor/base_processor.py +17 -1
  11. autobyteus/agent/llm_response_processor/processor_registry.py +15 -24
  12. autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +14 -0
  13. autobyteus/agent/message/agent_input_user_message.py +15 -2
  14. autobyteus/agent/message/send_message_to.py +1 -1
  15. autobyteus/agent/processor_option.py +17 -0
  16. autobyteus/agent/sender_type.py +1 -0
  17. autobyteus/agent/system_prompt_processor/base_processor.py +17 -1
  18. autobyteus/agent/system_prompt_processor/processor_registry.py +15 -27
  19. autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +10 -0
  20. autobyteus/agent/tool_execution_result_processor/base_processor.py +17 -1
  21. autobyteus/agent/tool_execution_result_processor/processor_registry.py +15 -1
  22. autobyteus/agent/workspace/base_workspace.py +1 -1
  23. autobyteus/agent/workspace/workspace_definition.py +1 -1
  24. autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +1 -1
  25. autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +2 -2
  26. autobyteus/agent_team/task_notification/__init__.py +4 -0
  27. autobyteus/agent_team/task_notification/activation_policy.py +70 -0
  28. autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +56 -122
  29. autobyteus/agent_team/task_notification/task_activator.py +66 -0
  30. autobyteus/cli/agent_team_tui/state.py +17 -20
  31. autobyteus/cli/agent_team_tui/widgets/focus_pane.py +1 -1
  32. autobyteus/cli/agent_team_tui/widgets/task_board_panel.py +1 -1
  33. autobyteus/events/event_types.py +2 -2
  34. autobyteus/llm/api/gemini_llm.py +45 -54
  35. autobyteus/llm/api/qwen_llm.py +25 -0
  36. autobyteus/llm/autobyteus_provider.py +8 -2
  37. autobyteus/llm/llm_factory.py +16 -0
  38. autobyteus/multimedia/audio/api/autobyteus_audio_client.py +4 -1
  39. autobyteus/multimedia/audio/api/gemini_audio_client.py +84 -153
  40. autobyteus/multimedia/audio/audio_client_factory.py +47 -22
  41. autobyteus/multimedia/audio/audio_model.py +13 -6
  42. autobyteus/multimedia/audio/autobyteus_audio_provider.py +8 -2
  43. autobyteus/multimedia/audio/base_audio_client.py +3 -1
  44. autobyteus/multimedia/image/api/autobyteus_image_client.py +12 -5
  45. autobyteus/multimedia/image/api/gemini_image_client.py +72 -130
  46. autobyteus/multimedia/image/api/openai_image_client.py +4 -2
  47. autobyteus/multimedia/image/autobyteus_image_provider.py +8 -2
  48. autobyteus/multimedia/image/base_image_client.py +6 -2
  49. autobyteus/multimedia/image/image_client_factory.py +20 -19
  50. autobyteus/multimedia/image/image_model.py +13 -6
  51. autobyteus/multimedia/providers.py +1 -0
  52. autobyteus/task_management/__init__.py +9 -10
  53. autobyteus/task_management/base_task_board.py +14 -6
  54. autobyteus/task_management/converters/__init__.py +0 -2
  55. autobyteus/task_management/converters/task_board_converter.py +7 -16
  56. autobyteus/task_management/events.py +6 -6
  57. autobyteus/task_management/in_memory_task_board.py +48 -38
  58. autobyteus/task_management/schemas/__init__.py +2 -2
  59. autobyteus/task_management/schemas/{plan_definition.py → task_definition.py} +5 -6
  60. autobyteus/task_management/schemas/task_status_report.py +0 -1
  61. autobyteus/task_management/task.py +60 -0
  62. autobyteus/task_management/tools/__init__.py +4 -2
  63. autobyteus/task_management/tools/get_my_tasks.py +80 -0
  64. autobyteus/task_management/tools/get_task_board_status.py +3 -3
  65. autobyteus/task_management/tools/publish_task.py +77 -0
  66. autobyteus/task_management/tools/publish_tasks.py +74 -0
  67. autobyteus/task_management/tools/update_task_status.py +5 -5
  68. autobyteus/tools/__init__.py +3 -1
  69. autobyteus/tools/base_tool.py +4 -4
  70. autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +1 -1
  71. autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +1 -1
  72. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +1 -1
  73. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +1 -1
  74. autobyteus/tools/browser/standalone/navigate_to.py +1 -1
  75. autobyteus/tools/browser/standalone/web_page_pdf_generator.py +1 -1
  76. autobyteus/tools/browser/standalone/webpage_image_downloader.py +1 -1
  77. autobyteus/tools/browser/standalone/webpage_reader.py +1 -1
  78. autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +1 -1
  79. autobyteus/tools/functional_tool.py +1 -1
  80. autobyteus/tools/google_search.py +1 -1
  81. autobyteus/tools/image_downloader.py +1 -1
  82. autobyteus/tools/mcp/factory.py +1 -1
  83. autobyteus/tools/mcp/schema_mapper.py +1 -1
  84. autobyteus/tools/mcp/tool.py +1 -1
  85. autobyteus/tools/multimedia/__init__.py +2 -0
  86. autobyteus/tools/multimedia/audio_tools.py +10 -20
  87. autobyteus/tools/multimedia/image_tools.py +21 -22
  88. autobyteus/tools/multimedia/media_reader_tool.py +117 -0
  89. autobyteus/tools/pydantic_schema_converter.py +1 -1
  90. autobyteus/tools/registry/tool_definition.py +1 -1
  91. autobyteus/tools/timer.py +1 -1
  92. autobyteus/tools/tool_meta.py +1 -1
  93. autobyteus/tools/usage/formatters/default_json_example_formatter.py +1 -1
  94. autobyteus/tools/usage/formatters/default_xml_example_formatter.py +1 -1
  95. autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +59 -3
  96. autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +1 -1
  97. autobyteus/tools/usage/formatters/google_json_example_formatter.py +1 -1
  98. autobyteus/tools/usage/formatters/openai_json_example_formatter.py +1 -1
  99. autobyteus/{tools → utils}/parameter_schema.py +1 -1
  100. {autobyteus-1.1.7.dist-info → autobyteus-1.1.9.dist-info}/METADATA +2 -2
  101. {autobyteus-1.1.7.dist-info → autobyteus-1.1.9.dist-info}/RECORD +105 -99
  102. examples/run_poem_writer.py +1 -1
  103. autobyteus/task_management/converters/task_plan_converter.py +0 -48
  104. autobyteus/task_management/task_plan.py +0 -110
  105. autobyteus/task_management/tools/publish_task_plan.py +0 -101
  106. {autobyteus-1.1.7.dist-info → autobyteus-1.1.9.dist-info}/WHEEL +0 -0
  107. {autobyteus-1.1.7.dist-info → autobyteus-1.1.9.dist-info}/licenses/LICENSE +0 -0
  108. {autobyteus-1.1.7.dist-info → autobyteus-1.1.9.dist-info}/top_level.txt +0 -0
@@ -45,8 +45,12 @@ class SystemPromptProcessingStep(BaseBootstrapStep):
45
45
  if not processor_instances:
46
46
  logger.debug(f"Agent '{agent_id}': No system prompt processors configured. Using system prompt as is.")
47
47
  else:
48
- logger.debug(f"Agent '{agent_id}': Found {len(processor_instances)} configured system prompt processors. Applying sequentially.")
49
- for processor_instance in processor_instances:
48
+ # Sort processors by their order attribute
49
+ sorted_processors = sorted(processor_instances, key=lambda p: p.get_order())
50
+ processor_names = [p.get_name() for p in sorted_processors]
51
+ logger.debug(f"Agent '{agent_id}': Found {len(sorted_processors)} configured system prompt processors. Applying sequentially in order: {processor_names}")
52
+
53
+ for processor_instance in sorted_processors:
50
54
  if not isinstance(processor_instance, BaseSystemPromptProcessor):
51
55
  error_message = f"Agent '{agent_id}': Invalid system prompt processor configuration type: {type(processor_instance)}. Expected BaseSystemPromptProcessor."
52
56
  logger.error(error_message)
@@ -3,10 +3,10 @@ import logging
3
3
  from typing import TYPE_CHECKING
4
4
 
5
5
  from autobyteus.agent.handlers.base_event_handler import AgentEventHandler
6
- from autobyteus.agent.events import InterAgentMessageReceivedEvent, LLMUserMessageReadyEvent
6
+ from autobyteus.agent.events import InterAgentMessageReceivedEvent, UserMessageReceivedEvent
7
7
  from autobyteus.agent.message.inter_agent_message import InterAgentMessage
8
- from autobyteus.llm.user_message import LLMUserMessage
9
- from autobyteus.agent.sender_type import TASK_NOTIFIER_SENDER_ID # New import
8
+ from autobyteus.agent.message.agent_input_user_message import AgentInputUserMessage
9
+ from autobyteus.agent.sender_type import SenderType
10
10
 
11
11
  if TYPE_CHECKING:
12
12
  from autobyteus.agent.context import AgentContext
@@ -17,7 +17,8 @@ logger = logging.getLogger(__name__)
17
17
  class InterAgentMessageReceivedEventHandler(AgentEventHandler):
18
18
  """
19
19
  Handles InterAgentMessageReceivedEvents by formatting the InterAgentMessage
20
- into an LLMUserMessage and enqueuing an LLMUserMessageReadyEvent for LLM processing.
20
+ into an AgentInputUserMessage and enqueuing a UserMessageReceivedEvent to route
21
+ it through the main input processing pipeline.
21
22
  """
22
23
 
23
24
  def __init__(self):
@@ -47,10 +48,6 @@ class InterAgentMessageReceivedEventHandler(AgentEventHandler):
47
48
  f"'{inter_agent_msg.sender_agent_id}', type '{inter_agent_msg.message_type.value}'. "
48
49
  f"Content: '{inter_agent_msg.content}'"
49
50
  )
50
-
51
- # This handler now only deals with messages from other agents, not the system notifier.
52
- # The logic for system task notifications has been moved to UserInputMessageEventHandler
53
- # by checking the message metadata.
54
51
 
55
52
  content_for_llm = (
56
53
  f"You have received a message from another agent.\n"
@@ -63,19 +60,20 @@ class InterAgentMessageReceivedEventHandler(AgentEventHandler):
63
60
  f"Please process this information and act accordingly."
64
61
  )
65
62
 
66
- context.state.add_message_to_history({
67
- "role": "user",
68
- "content": content_for_llm,
69
- "sender_agent_id": inter_agent_msg.sender_agent_id,
70
- "original_message_type": inter_agent_msg.message_type.value
71
- })
72
-
73
- llm_user_message = LLMUserMessage(content=content_for_llm)
63
+ # --- REFACTORED: Route through the main input pipeline ---
64
+ agent_input_user_message = AgentInputUserMessage(
65
+ content=content_for_llm,
66
+ sender_type=SenderType.AGENT,
67
+ metadata={
68
+ "sender_agent_id": inter_agent_msg.sender_agent_id,
69
+ "original_message_type": inter_agent_msg.message_type.value
70
+ }
71
+ )
74
72
 
75
- llm_user_message_ready_event = LLMUserMessageReadyEvent(llm_user_message=llm_user_message)
76
- await context.input_event_queues.enqueue_internal_system_event(llm_user_message_ready_event)
73
+ user_message_received_event = UserMessageReceivedEvent(agent_input_user_message=agent_input_user_message)
74
+ await context.input_event_queues.enqueue_user_message(user_message_received_event)
77
75
 
78
76
  logger.info(
79
77
  f"Agent '{context.agent_id}' processed InterAgentMessage from sender '{inter_agent_msg.sender_agent_id}' "
80
- f"and enqueued LLMUserMessageReadyEvent."
78
+ f"and enqueued UserMessageReceivedEvent to route through input pipeline."
81
79
  )
@@ -63,9 +63,12 @@ class LLMCompleteResponseReceivedEventHandler(AgentEventHandler):
63
63
  f"Proceeding to treat LLM response as output for this leg."
64
64
  )
65
65
  else:
66
- processor_names = [p.get_name() for p in processor_instances_to_try]
67
- logger.debug(f"Agent '{agent_id}': Attempting LLM response processing with: {processor_names}")
68
- for processor_instance in processor_instances_to_try:
66
+ # Sort processors by their order attribute
67
+ sorted_processors = sorted(processor_instances_to_try, key=lambda p: p.get_order())
68
+ processor_names = [p.get_name() for p in sorted_processors]
69
+ logger.debug(f"Agent '{agent_id}': Attempting LLM response processing in order: {processor_names}")
70
+
71
+ for processor_instance in sorted_processors:
69
72
  processor_name_for_log: str = "unknown"
70
73
  try:
71
74
  if not isinstance(processor_instance, BaseLLMResponseProcessor):
@@ -4,9 +4,11 @@ import json
4
4
  from typing import TYPE_CHECKING, Optional, List
5
5
 
6
6
  from autobyteus.agent.handlers.base_event_handler import AgentEventHandler
7
- from autobyteus.agent.events import ToolResultEvent, LLMUserMessageReadyEvent
8
- from autobyteus.llm.user_message import LLMUserMessage
7
+ from autobyteus.agent.events import ToolResultEvent, UserMessageReceivedEvent
9
8
  from autobyteus.agent.tool_execution_result_processor import BaseToolExecutionResultProcessor
9
+ from autobyteus.agent.message.context_file import ContextFile
10
+ from autobyteus.agent.message import AgentInputUserMessage
11
+ from autobyteus.agent.sender_type import SenderType
10
12
 
11
13
  if TYPE_CHECKING:
12
14
  from autobyteus.agent.context import AgentContext
@@ -16,34 +18,68 @@ logger = logging.getLogger(__name__)
16
18
 
17
19
  class ToolResultEventHandler(AgentEventHandler):
18
20
  """
19
- Handles ToolResultEvents. It immediately processes and notifies for each
20
- individual tool result. If a multi-tool call turn is active, it accumulates
21
- these processed results until the turn is complete, then sends a single
22
- aggregated message to the LLM.
21
+ Handles ToolResultEvents. It processes and notifies for each individual tool
22
+ result as it arrives. If a multi-tool call turn is active, it accumulates
23
+ these results until the turn is complete, re-orders them to match the original
24
+ invocation sequence, and then sends a single aggregated message to the LLM
25
+ by enqueuing a UserMessageReceivedEvent.
26
+
27
+ This handler is now "media-aware": if a tool's result is a `ContextFile`
28
+ object, it will be added as multimodal context to the next LLM call rather
29
+ than as plain text.
23
30
  """
24
31
  def __init__(self):
25
32
  logger.info("ToolResultEventHandler initialized.")
26
33
 
27
- async def _dispatch_aggregated_results_to_llm(self,
34
+ async def _dispatch_results_to_input_pipeline(self,
28
35
  processed_events: List[ToolResultEvent],
29
36
  context: 'AgentContext'):
30
37
  """
31
- Aggregates a list of PRE-PROCESSED tool results into a single message and
32
- dispatches it to the LLM.
38
+ Aggregates a list of PRE-PROCESSED and ORDERED tool results into a single
39
+ AgentInputUserMessage and dispatches it into the main input processing pipeline
40
+ by enqueuing a UserMessageReceivedEvent.
33
41
  """
34
42
  agent_id = context.agent_id
35
43
 
36
- # --- Aggregate results into a single message ---
37
- aggregated_content_parts = []
44
+ # --- NEW: Separate text results from media context results ---
45
+ aggregated_content_parts: List[str] = []
46
+ media_context_files: List[ContextFile] = []
47
+
38
48
  for p_event in processed_events:
39
49
  tool_invocation_id = p_event.tool_invocation_id if p_event.tool_invocation_id else 'N/A'
40
- content_part: str
50
+
51
+ # Check if the result is a ContextFile or a list of them
52
+ result_is_media = False
53
+ if isinstance(p_event.result, ContextFile):
54
+ media_context_files.append(p_event.result)
55
+ aggregated_content_parts.append(
56
+ f"Tool: {p_event.tool_name} (ID: {tool_invocation_id})\n"
57
+ f"Status: Success\n"
58
+ f"Result: The file '{p_event.result.file_name}' has been loaded into the context for you to view."
59
+ )
60
+ result_is_media = True
61
+ elif isinstance(p_event.result, list) and all(isinstance(item, ContextFile) for item in p_event.result):
62
+ media_context_files.extend(p_event.result)
63
+ file_names = [cf.file_name for cf in p_event.result if cf.file_name]
64
+ aggregated_content_parts.append(
65
+ f"Tool: {p_event.tool_name} (ID: {tool_invocation_id})\n"
66
+ f"Status: Success\n"
67
+ f"Result: The following files have been loaded into the context for you to view: {file_names}"
68
+ )
69
+ result_is_media = True
70
+
71
+ if result_is_media:
72
+ continue
73
+
74
+ # Handle errors
41
75
  if p_event.error:
42
76
  content_part = (
43
77
  f"Tool: {p_event.tool_name} (ID: {tool_invocation_id})\n"
44
78
  f"Status: Error\n"
45
79
  f"Details: {p_event.error}"
46
80
  )
81
+ aggregated_content_parts.append(content_part)
82
+ # Handle standard text/JSON results
47
83
  else:
48
84
  try:
49
85
  result_str = json.dumps(p_event.result, indent=2) if not isinstance(p_event.result, str) else p_event.result
@@ -54,20 +90,26 @@ class ToolResultEventHandler(AgentEventHandler):
54
90
  f"Status: Success\n"
55
91
  f"Result:\n{result_str}"
56
92
  )
57
- aggregated_content_parts.append(content_part)
93
+ aggregated_content_parts.append(content_part)
58
94
 
59
95
  final_content_for_llm = (
60
96
  "The following tool executions have completed. Please analyze their results and decide the next course of action.\n\n"
61
97
  + "\n\n---\n\n".join(aggregated_content_parts)
62
98
  )
63
99
 
64
- logger.debug(f"Agent '{agent_id}' preparing aggregated message for LLM:\n---\n{final_content_for_llm}\n---")
65
- llm_user_message = LLMUserMessage(content=final_content_for_llm)
100
+ logger.debug(f"Agent '{agent_id}' preparing aggregated message from tool results for input pipeline:\n---\n{final_content_for_llm}\n---")
101
+
102
+ # --- REFACTORED: Create an AgentInputUserMessage and route it through the standard input pipeline ---
103
+ agent_input_user_message = AgentInputUserMessage(
104
+ content=final_content_for_llm,
105
+ sender_type=SenderType.TOOL,
106
+ context_files=media_context_files
107
+ )
66
108
 
67
- next_event = LLMUserMessageReadyEvent(llm_user_message=llm_user_message)
68
- await context.input_event_queues.enqueue_internal_system_event(next_event)
109
+ next_event = UserMessageReceivedEvent(agent_input_user_message=agent_input_user_message)
110
+ await context.input_event_queues.enqueue_user_message(next_event)
69
111
 
70
- logger.info(f"Agent '{agent_id}' enqueued LLMUserMessageReadyEvent with aggregated results from {len(processed_events)} tool(s).")
112
+ logger.info(f"Agent '{agent_id}' enqueued UserMessageReceivedEvent with aggregated results from {len(processed_events)} tool(s) and {len(media_context_files)} media file(s).")
71
113
 
72
114
 
73
115
  async def handle(self,
@@ -84,7 +126,9 @@ class ToolResultEventHandler(AgentEventHandler):
84
126
  processed_event = event
85
127
  processor_instances = context.config.tool_execution_result_processors
86
128
  if processor_instances:
87
- for processor_instance in processor_instances:
129
+ # Sort processors by their order attribute
130
+ sorted_processors = sorted(processor_instances, key=lambda p: p.get_order())
131
+ for processor_instance in sorted_processors:
88
132
  if not isinstance(processor_instance, BaseToolExecutionResultProcessor):
89
133
  logger.error(f"Agent '{agent_id}': Invalid tool result processor type: {type(processor_instance)}. Skipping.")
90
134
  continue
@@ -119,7 +163,7 @@ class ToolResultEventHandler(AgentEventHandler):
119
163
  # Case 1: Not a multi-tool call turn, dispatch to LLM immediately.
120
164
  if not active_turn:
121
165
  logger.info(f"Agent '{agent_id}' handling single ToolResultEvent from tool: '{processed_event.tool_name}'.")
122
- await self._dispatch_aggregated_results_to_llm([processed_event], context)
166
+ await self._dispatch_results_to_input_pipeline([processed_event], context)
123
167
  return
124
168
 
125
169
  # Case 2: Multi-tool call turn is active, accumulate results.
@@ -133,9 +177,28 @@ class ToolResultEventHandler(AgentEventHandler):
133
177
  if not active_turn.is_complete():
134
178
  return
135
179
 
136
- # If all results are in, dispatch them to the LLM and clean up the turn state.
137
- logger.info(f"Agent '{agent_id}': All tool results for the turn collected. Aggregating for LLM.")
138
- await self._dispatch_aggregated_results_to_llm(active_turn.results, context)
180
+ # If all results are in, re-order them and then dispatch to the LLM.
181
+ logger.info(f"Agent '{agent_id}': All tool results for the turn collected. Re-ordering to match invocation sequence.")
182
+
183
+ # --- NEW RE-ORDERING LOGIC ---
184
+ results_by_id = {res.tool_invocation_id: res for res in active_turn.results}
185
+ sorted_results: List[ToolResultEvent] = []
186
+ for original_invocation in active_turn.invocations:
187
+ result = results_by_id.get(original_invocation.id)
188
+ if result:
189
+ sorted_results.append(result)
190
+ else:
191
+ # This should not happen if the logic is correct, but it's a good safeguard.
192
+ logger.error(f"Agent '{agent_id}': Missing result for invocation ID '{original_invocation.id}' during re-ordering.")
193
+ # Add a synthetic error result to maintain sequence length.
194
+ sorted_results.append(ToolResultEvent(
195
+ tool_name=original_invocation.name,
196
+ result=None,
197
+ error=f"Critical Error: Result for this tool call was lost.",
198
+ tool_invocation_id=original_invocation.id
199
+ ))
200
+
201
+ await self._dispatch_results_to_input_pipeline(sorted_results, context)
139
202
 
140
203
  context.state.active_multi_tool_call_turn = None
141
204
  logger.info(f"Agent '{agent_id}': Multi-tool call turn state has been cleared.")
@@ -1,5 +1,6 @@
1
1
  # file: autobyteus/autobyteus/agent/handlers/user_input_message_event_handler.py
2
2
  import logging
3
+ import copy
3
4
  from typing import TYPE_CHECKING
4
5
 
5
6
  from autobyteus.agent.handlers.base_event_handler import AgentEventHandler
@@ -7,6 +8,7 @@ from autobyteus.agent.events import UserMessageReceivedEvent, LLMUserMessageRead
7
8
  from autobyteus.agent.message.agent_input_user_message import AgentInputUserMessage
8
9
  from autobyteus.agent.input_processor import BaseAgentUserInputMessageProcessor
9
10
  from autobyteus.agent.message.multimodal_message_builder import build_llm_user_message
11
+ from autobyteus.agent.sender_type import SenderType
10
12
 
11
13
 
12
14
  if TYPE_CHECKING:
@@ -35,27 +37,34 @@ class UserInputMessageEventHandler(AgentEventHandler):
35
37
 
36
38
  original_agent_input_user_msg: AgentInputUserMessage = event.agent_input_user_message
37
39
 
38
- # --- NEW LOGIC: Check metadata for system-generated tasks and notify TUI ---
39
- if original_agent_input_user_msg.metadata.get('source') == 'system_task_notifier':
40
+ # --- UPDATED LOGIC: Check sender_type for system-generated tasks and notify TUI ---
41
+ if original_agent_input_user_msg.sender_type == SenderType.SYSTEM:
40
42
  if context.phase_manager:
41
43
  notifier: 'AgentExternalEventNotifier' = context.phase_manager.notifier
42
44
  notification_data = {
43
- "sender_id": "system.task_notifier",
45
+ "sender_id": original_agent_input_user_msg.metadata.get("sender_id", "system"),
44
46
  "content": original_agent_input_user_msg.content,
45
47
  }
46
48
  notifier.notify_agent_data_system_task_notification_received(notification_data)
47
- logger.info(f"Agent '{context.agent_id}' emitted system task notification for TUI.")
48
- # --- END NEW LOGIC ---
49
+ logger.info(f"Agent '{context.agent_id}' emitted system task notification for TUI based on SYSTEM sender_type.")
50
+ # --- END UPDATED LOGIC ---
49
51
 
50
- processed_agent_input_user_msg: AgentInputUserMessage = original_agent_input_user_msg
52
+ # Create a deep copy of the message to pass through the processor chain.
53
+ # This prevents in-place mutation of the original event's message object,
54
+ # ensuring that processors like UserInputPersistenceProcessor can access
55
+ # the true original content via the triggering_event.
56
+ processed_agent_input_user_msg = copy.deepcopy(original_agent_input_user_msg)
51
57
 
52
- logger.info(f"Agent '{context.agent_id}' handling UserMessageReceivedEvent: '{original_agent_input_user_msg.content}'")
58
+ logger.info(f"Agent '{context.agent_id}' handling UserMessageReceivedEvent (type: {original_agent_input_user_msg.sender_type.value}): '{original_agent_input_user_msg.content}'")
53
59
 
54
60
  processor_instances = context.config.input_processors
55
61
  if processor_instances:
56
- processor_names = [p.get_name() for p in processor_instances]
57
- logger.debug(f"Agent '{context.agent_id}': Applying input processors: {processor_names}")
58
- for processor_instance in processor_instances:
62
+ # Sort processors by their order attribute
63
+ sorted_processors = sorted(processor_instances, key=lambda p: p.get_order())
64
+ processor_names = [p.get_name() for p in sorted_processors]
65
+ logger.debug(f"Agent '{context.agent_id}': Applying input processors in order: {processor_names}")
66
+
67
+ for processor_instance in sorted_processors:
59
68
  processor_name_for_log = "unknown"
60
69
  try:
61
70
  if not isinstance(processor_instance, BaseAgentUserInputMessageProcessor):
@@ -29,6 +29,23 @@ class BasePhaseHook(ABC, metaclass=PhaseHookMeta):
29
29
  """
30
30
  return cls.__name__
31
31
 
32
+ @classmethod
33
+ def get_order(cls) -> int:
34
+ """
35
+ Returns the execution order for this hook if multiple hooks are triggered
36
+ on the same transition. Lower numbers execute earlier.
37
+ Defaults to 500 (normal priority).
38
+ """
39
+ return 500
40
+
41
+ @classmethod
42
+ def is_mandatory(cls) -> bool:
43
+ """
44
+ Returns True if this hook is mandatory for the agent to function correctly.
45
+ Defaults to False (optional).
46
+ """
47
+ return False
48
+
32
49
  @property
33
50
  @abstractmethod
34
51
  def source_phase(self) -> AgentOperationalPhase:
@@ -3,6 +3,7 @@ import logging
3
3
  from typing import TYPE_CHECKING, Dict, List, Optional
4
4
 
5
5
  from autobyteus.utils.singleton import SingletonMeta
6
+ from autobyteus.agent.processor_option import HookOption
6
7
  from .hook_definition import PhaseHookDefinition
7
8
 
8
9
  if TYPE_CHECKING:
@@ -24,14 +25,6 @@ class PhaseHookRegistry(metaclass=SingletonMeta):
24
25
  def register_hook(self, definition: PhaseHookDefinition) -> None:
25
26
  """
26
27
  Registers a phase hook definition.
27
- If a definition with the same name already exists, it will be overwritten,
28
- and a warning will be logged.
29
-
30
- Args:
31
- definition: The PhaseHookDefinition object to register.
32
-
33
- Raises:
34
- TypeError: If the definition is not an instance of PhaseHookDefinition.
35
28
  """
36
29
  if not isinstance(definition, PhaseHookDefinition):
37
30
  raise TypeError(f"Expected PhaseHookDefinition instance, got {type(definition).__name__}.")
@@ -46,12 +39,6 @@ class PhaseHookRegistry(metaclass=SingletonMeta):
46
39
  def get_hook_definition(self, name: str) -> Optional[PhaseHookDefinition]:
47
40
  """
48
41
  Retrieves a phase hook definition by its name.
49
-
50
- Args:
51
- name: The name of the phase hook definition to retrieve.
52
-
53
- Returns:
54
- The PhaseHookDefinition object if found, otherwise None.
55
42
  """
56
43
  if not isinstance(name, str):
57
44
  logger.warning(f"Attempted to retrieve hook definition with non-string name: {type(name).__name__}.")
@@ -64,12 +51,6 @@ class PhaseHookRegistry(metaclass=SingletonMeta):
64
51
  def get_hook(self, name: str) -> Optional['BasePhaseHook']:
65
52
  """
66
53
  Retrieves an instance of a phase hook by its name.
67
-
68
- Args:
69
- name: The name of the phase hook to retrieve.
70
-
71
- Returns:
72
- An instance of the BasePhaseHook if found and instantiable, otherwise None.
73
54
  """
74
55
  definition = self.get_hook_definition(name)
75
56
  if definition:
@@ -82,19 +63,26 @@ class PhaseHookRegistry(metaclass=SingletonMeta):
82
63
 
83
64
  def list_hook_names(self) -> List[str]:
84
65
  """
85
- Returns a list of names of all registered phase hook definitions.
86
-
87
- Returns:
88
- A list of strings, where each string is a registered hook name.
66
+ Returns an unordered list of names of all registered phase hook definitions.
89
67
  """
90
68
  return list(self._definitions.keys())
91
69
 
70
+ def get_ordered_hook_options(self) -> List[HookOption]:
71
+ """
72
+ Returns a list of HookOption objects, sorted by their execution order.
73
+ """
74
+ definitions = list(self._definitions.values())
75
+ sorted_definitions = sorted(definitions, key=lambda d: d.hook_class.get_order())
76
+ return [
77
+ HookOption(
78
+ name=d.name,
79
+ is_mandatory=d.hook_class.is_mandatory()
80
+ ) for d in sorted_definitions
81
+ ]
82
+
92
83
  def get_all_definitions(self) -> Dict[str, PhaseHookDefinition]:
93
84
  """
94
85
  Returns a shallow copy of the dictionary containing all registered phase hook definitions.
95
-
96
- Returns:
97
- A dictionary where keys are hook names and values are PhaseHookDefinition objects.
98
86
  """
99
87
  return dict(self._definitions)
100
88
 
@@ -28,6 +28,22 @@ class BaseAgentUserInputMessageProcessor(ABC, metaclass=AgentUserInputMessagePro
28
28
  """
29
29
  return cls.__name__
30
30
 
31
+ @classmethod
32
+ def get_order(cls) -> int:
33
+ """
34
+ Returns the execution order for this processor. Lower numbers execute earlier.
35
+ Defaults to 500 (normal priority).
36
+ """
37
+ return 500
38
+
39
+ @classmethod
40
+ def is_mandatory(cls) -> bool:
41
+ """
42
+ Returns True if this processor is mandatory for the agent to function correctly.
43
+ Defaults to False (optional).
44
+ """
45
+ return False
46
+
31
47
  @abstractmethod
32
48
  async def process(self,
33
49
  message: 'AgentInputUserMessage',
@@ -51,4 +67,4 @@ class BaseAgentUserInputMessageProcessor(ABC, metaclass=AgentUserInputMessagePro
51
67
  raise NotImplementedError("Subclasses must implement the 'process' method.")
52
68
 
53
69
  def __repr__(self) -> str:
54
- return f"<{self.__class__.__name__}>"
70
+ return f"&lt;{self.__class__.__name__}&gt;"
@@ -3,6 +3,7 @@ import logging
3
3
  from typing import TYPE_CHECKING, Dict, List, Optional, Type
4
4
 
5
5
  from autobyteus.utils.singleton import SingletonMeta
6
+ from autobyteus.agent.processor_option import ProcessorOption
6
7
  from .processor_definition import AgentUserInputMessageProcessorDefinition
7
8
  if TYPE_CHECKING:
8
9
  from .base_user_input_processor import BaseAgentUserInputMessageProcessor
@@ -23,14 +24,6 @@ class AgentUserInputMessageProcessorRegistry(metaclass=SingletonMeta):
23
24
  def register_processor(self, definition: AgentUserInputMessageProcessorDefinition) -> None:
24
25
  """
25
26
  Registers an input processor definition.
26
- If a definition with the same name already exists, it will be overwritten,
27
- and a warning will be logged.
28
-
29
- Args:
30
- definition: The AgentUserInputMessageProcessorDefinition object to register.
31
-
32
- Raises:
33
- ValueError: If the definition is not an instance of AgentUserInputMessageProcessorDefinition.
34
27
  """
35
28
  if not isinstance(definition, AgentUserInputMessageProcessorDefinition):
36
29
  raise TypeError(f"Expected AgentUserInputMessageProcessorDefinition instance, got {type(definition).__name__}.")
@@ -45,12 +38,6 @@ class AgentUserInputMessageProcessorRegistry(metaclass=SingletonMeta):
45
38
  def get_processor_definition(self, name: str) -> Optional[AgentUserInputMessageProcessorDefinition]:
46
39
  """
47
40
  Retrieves an input processor definition by its name.
48
-
49
- Args:
50
- name: The name of the input processor definition to retrieve.
51
-
52
- Returns:
53
- The AgentUserInputMessageProcessorDefinition object if found, otherwise None.
54
41
  """
55
42
  if not isinstance(name, str):
56
43
  logger.warning(f"Attempted to retrieve input processor definition with non-string name: {type(name).__name__}.")
@@ -63,12 +50,6 @@ class AgentUserInputMessageProcessorRegistry(metaclass=SingletonMeta):
63
50
  def get_processor(self, name: str) -> Optional['BaseAgentUserInputMessageProcessor']:
64
51
  """
65
52
  Retrieves an instance of an input processor by its name.
66
-
67
- Args:
68
- name: The name of the input processor to retrieve.
69
-
70
- Returns:
71
- An instance of the BaseAgentUserInputMessageProcessor if found and instantiable, otherwise None.
72
53
  """
73
54
  definition = self.get_processor_definition(name)
74
55
  if definition:
@@ -81,19 +62,26 @@ class AgentUserInputMessageProcessorRegistry(metaclass=SingletonMeta):
81
62
 
82
63
  def list_processor_names(self) -> List[str]:
83
64
  """
84
- Returns a list of names of all registered input processor definitions.
85
-
86
- Returns:
87
- A list of strings, where each string is a registered processor name.
65
+ Returns an unordered list of names of all registered input processor definitions.
88
66
  """
89
67
  return list(self._definitions.keys())
90
68
 
69
+ def get_ordered_processor_options(self) -> List[ProcessorOption]:
70
+ """
71
+ Returns a list of ProcessorOption objects, sorted by their execution order.
72
+ """
73
+ definitions = list(self._definitions.values())
74
+ sorted_definitions = sorted(definitions, key=lambda d: d.processor_class.get_order())
75
+ return [
76
+ ProcessorOption(
77
+ name=d.name,
78
+ is_mandatory=d.processor_class.is_mandatory()
79
+ ) for d in sorted_definitions
80
+ ]
81
+
91
82
  def get_all_definitions(self) -> Dict[str, AgentUserInputMessageProcessorDefinition]:
92
83
  """
93
84
  Returns a shallow copy of the dictionary containing all registered input processor definitions.
94
-
95
- Returns:
96
- A dictionary where keys are processor names and values are AgentUserInputMessageProcessorDefinition objects.
97
85
  """
98
86
  return dict(self._definitions)
99
87
 
@@ -30,6 +30,22 @@ class BaseLLMResponseProcessor(ABC, metaclass=LLMResponseProcessorMeta):
30
30
  """
31
31
  return cls.__name__
32
32
 
33
+ @classmethod
34
+ def get_order(cls) -> int:
35
+ """
36
+ Returns the execution order for this processor. Lower numbers execute earlier.
37
+ Defaults to 500 (normal priority).
38
+ """
39
+ return 500
40
+
41
+ @classmethod
42
+ def is_mandatory(cls) -> bool:
43
+ """
44
+ Returns True if this processor is mandatory for the agent to function correctly.
45
+ Defaults to False (optional).
46
+ """
47
+ return False
48
+
33
49
  @abstractmethod
34
50
  async def process_response(self, response: 'CompleteResponse', context: 'AgentContext', triggering_event: 'LLMCompleteResponseReceivedEvent') -> bool:
35
51
  """
@@ -50,4 +66,4 @@ class BaseLLMResponseProcessor(ABC, metaclass=LLMResponseProcessorMeta):
50
66
  raise NotImplementedError("Subclasses must implement the 'process_response' method.")
51
67
 
52
68
  def __repr__(self) -> str:
53
- return f"<{self.__class__.__name__}>"
69
+ return f"&lt;{self.__class__.__name__}&gt;"