autobyteus 1.1.6__py3-none-any.whl → 1.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. autobyteus/agent/context/agent_runtime_state.py +7 -1
  2. autobyteus/agent/handlers/tool_result_event_handler.py +121 -89
  3. autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +7 -1
  4. autobyteus/agent/tool_invocation.py +25 -1
  5. autobyteus/agent_team/agent_team_builder.py +22 -1
  6. autobyteus/agent_team/context/agent_team_runtime_state.py +0 -2
  7. autobyteus/llm/llm_factory.py +25 -57
  8. autobyteus/llm/ollama_provider_resolver.py +1 -0
  9. autobyteus/llm/providers.py +1 -0
  10. autobyteus/llm/token_counter/token_counter_factory.py +2 -0
  11. autobyteus/multimedia/audio/audio_model.py +2 -1
  12. autobyteus/multimedia/image/image_model.py +2 -1
  13. autobyteus/task_management/tools/publish_task_plan.py +4 -16
  14. autobyteus/task_management/tools/update_task_status.py +4 -19
  15. autobyteus/tools/__init__.py +2 -4
  16. autobyteus/tools/base_tool.py +98 -29
  17. autobyteus/tools/browser/standalone/__init__.py +0 -1
  18. autobyteus/tools/google_search.py +149 -0
  19. autobyteus/tools/mcp/schema_mapper.py +29 -71
  20. autobyteus/tools/multimedia/audio_tools.py +3 -3
  21. autobyteus/tools/multimedia/image_tools.py +5 -5
  22. autobyteus/tools/parameter_schema.py +82 -89
  23. autobyteus/tools/pydantic_schema_converter.py +81 -0
  24. autobyteus/tools/usage/formatters/default_json_example_formatter.py +89 -20
  25. autobyteus/tools/usage/formatters/default_xml_example_formatter.py +115 -41
  26. autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +50 -20
  27. autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +55 -22
  28. autobyteus/tools/usage/formatters/google_json_example_formatter.py +54 -21
  29. autobyteus/tools/usage/formatters/openai_json_example_formatter.py +53 -23
  30. autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +270 -94
  31. autobyteus/tools/usage/providers/tool_manifest_provider.py +39 -14
  32. autobyteus-1.1.8.dist-info/METADATA +204 -0
  33. {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/RECORD +39 -40
  34. examples/run_google_slides_agent.py +2 -2
  35. examples/run_mcp_google_slides_client.py +1 -1
  36. examples/run_sqlite_agent.py +1 -1
  37. autobyteus/tools/ask_user_input.py +0 -40
  38. autobyteus/tools/browser/standalone/factory/google_search_factory.py +0 -25
  39. autobyteus/tools/browser/standalone/google_search_ui.py +0 -126
  40. autobyteus-1.1.6.dist-info/METADATA +0 -161
  41. {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/WHEEL +0 -0
  42. {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/licenses/LICENSE +0 -0
  43. {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/top_level.txt +0 -0
@@ -16,6 +16,7 @@ from autobyteus.agent.tool_invocation import ToolInvocation
16
16
  if TYPE_CHECKING:
17
17
  from autobyteus.agent.phases import AgentPhaseManager
18
18
  from autobyteus.tools.base_tool import BaseTool
19
+ from autobyteus.agent.tool_invocation import ToolInvocationTurn
19
20
 
20
21
  logger = logging.getLogger(__name__)
21
22
 
@@ -48,6 +49,9 @@ class AgentRuntimeState:
48
49
  self.pending_tool_approvals: Dict[str, ToolInvocation] = {}
49
50
  self.custom_data: Dict[str, Any] = custom_data or {}
50
51
 
52
+ # NEW: State for multi-tool call invocation turns, with a very explicit name.
53
+ self.active_multi_tool_call_turn: Optional['ToolInvocationTurn'] = None
54
+
51
55
  self.processed_system_prompt: Optional[str] = None
52
56
  # self.final_llm_config_for_creation removed
53
57
 
@@ -83,7 +87,9 @@ class AgentRuntimeState:
83
87
  tools_status = f"{len(self.tool_instances)} Initialized" if self.tool_instances is not None else "Not Initialized"
84
88
  input_queues_status = "Initialized" if self.input_event_queues else "Not Initialized"
85
89
  # REMOVED output_queues_status from repr
90
+ active_turn_status = "Active" if self.active_multi_tool_call_turn else "Inactive"
86
91
  return (f"AgentRuntimeState(agent_id='{self.agent_id}', current_phase='{phase_repr}', "
87
92
  f"llm_status='{llm_status}', tools_status='{tools_status}', "
88
93
  f"input_queues_status='{input_queues_status}', "
89
- f"pending_approvals={len(self.pending_tool_approvals)}, history_len={len(self.conversation_history)})")
94
+ f"pending_approvals={len(self.pending_tool_approvals)}, history_len={len(self.conversation_history)}, "
95
+ f"multi_tool_call_turn='{active_turn_status}')")
@@ -1,7 +1,7 @@
1
1
  # file: autobyteus/autobyteus/agent/handlers/tool_result_event_handler.py
2
2
  import logging
3
3
  import json
4
- from typing import TYPE_CHECKING, Optional
4
+ from typing import TYPE_CHECKING, Optional, List
5
5
 
6
6
  from autobyteus.agent.handlers.base_event_handler import AgentEventHandler
7
7
  from autobyteus.agent.events import ToolResultEvent, LLMUserMessageReadyEvent
@@ -16,13 +16,60 @@ logger = logging.getLogger(__name__)
16
16
 
17
17
  class ToolResultEventHandler(AgentEventHandler):
18
18
  """
19
- Handles ToolResultEvents by formatting the tool's output (or error)
20
- as a new LLMUserMessage, emitting AGENT_DATA_TOOL_LOG event for this outcome,
21
- and enqueuing an LLMUserMessageReadyEvent for further LLM processing.
19
+ Handles ToolResultEvents. It processes and notifies for each individual tool
20
+ result as it arrives. If a multi-tool call turn is active, it accumulates
21
+ these results until the turn is complete, re-orders them to match the original
22
+ invocation sequence, and then sends a single aggregated message to the LLM.
22
23
  """
23
24
  def __init__(self):
24
25
  logger.info("ToolResultEventHandler initialized.")
25
26
 
27
+ async def _dispatch_aggregated_results_to_llm(self,
28
+ processed_events: List[ToolResultEvent],
29
+ context: 'AgentContext'):
30
+ """
31
+ Aggregates a list of PRE-PROCESSED and ORDERED tool results into a single
32
+ message and dispatches it to the LLM.
33
+ """
34
+ agent_id = context.agent_id
35
+
36
+ # --- Aggregate results into a single message ---
37
+ aggregated_content_parts = []
38
+ for p_event in processed_events:
39
+ tool_invocation_id = p_event.tool_invocation_id if p_event.tool_invocation_id else 'N/A'
40
+ content_part: str
41
+ if p_event.error:
42
+ content_part = (
43
+ f"Tool: {p_event.tool_name} (ID: {tool_invocation_id})\n"
44
+ f"Status: Error\n"
45
+ f"Details: {p_event.error}"
46
+ )
47
+ else:
48
+ try:
49
+ result_str = json.dumps(p_event.result, indent=2) if not isinstance(p_event.result, str) else p_event.result
50
+ except TypeError: # pragma: no cover
51
+ result_str = str(p_event.result)
52
+ content_part = (
53
+ f"Tool: {p_event.tool_name} (ID: {tool_invocation_id})\n"
54
+ f"Status: Success\n"
55
+ f"Result:\n{result_str}"
56
+ )
57
+ aggregated_content_parts.append(content_part)
58
+
59
+ final_content_for_llm = (
60
+ "The following tool executions have completed. Please analyze their results and decide the next course of action.\n\n"
61
+ + "\n\n---\n\n".join(aggregated_content_parts)
62
+ )
63
+
64
+ logger.debug(f"Agent '{agent_id}' preparing aggregated message for LLM:\n---\n{final_content_for_llm}\n---")
65
+ llm_user_message = LLMUserMessage(content=final_content_for_llm)
66
+
67
+ next_event = LLMUserMessageReadyEvent(llm_user_message=llm_user_message)
68
+ await context.input_event_queues.enqueue_internal_system_event(next_event)
69
+
70
+ logger.info(f"Agent '{agent_id}' enqueued LLMUserMessageReadyEvent with aggregated results from {len(processed_events)} tool(s).")
71
+
72
+
26
73
  async def handle(self,
27
74
  event: ToolResultEvent,
28
75
  context: 'AgentContext') -> None:
@@ -31,99 +78,84 @@ class ToolResultEventHandler(AgentEventHandler):
31
78
  return
32
79
 
33
80
  agent_id = context.agent_id
34
- processed_event = event
81
+ notifier: Optional['AgentExternalEventNotifier'] = context.phase_manager.notifier if context.phase_manager else None
35
82
 
36
- # --- New: Apply Tool Execution Result Processors ---
83
+ # --- Step 1: Immediately process the incoming event ---
84
+ processed_event = event
37
85
  processor_instances = context.config.tool_execution_result_processors
38
86
  if processor_instances:
39
- processor_names = [p.get_name() for p in processor_instances]
40
- logger.debug(f"Agent '{agent_id}': Applying tool execution result processors: {processor_names}")
41
87
  for processor_instance in processor_instances:
42
- processor_name_for_log = "unknown"
88
+ if not isinstance(processor_instance, BaseToolExecutionResultProcessor):
89
+ logger.error(f"Agent '{agent_id}': Invalid tool result processor type: {type(processor_instance)}. Skipping.")
90
+ continue
43
91
  try:
44
- if not isinstance(processor_instance, BaseToolExecutionResultProcessor):
45
- logger.error(f"Agent '{agent_id}': Invalid tool result processor type: {type(processor_instance)}. Skipping.")
46
- continue
47
-
48
- processor_name_for_log = processor_instance.get_name()
49
- logger.debug(f"Agent '{agent_id}': Applying tool result processor '{processor_name_for_log}'.")
50
-
51
- event_before_proc = processed_event
52
- processed_event = await processor_instance.process(event_before_proc, context)
53
- logger.info(f"Agent '{agent_id}': Tool result processor '{processor_name_for_log}' applied successfully.")
54
-
92
+ processed_event = await processor_instance.process(processed_event, context)
55
93
  except Exception as e:
56
- logger.error(f"Agent '{agent_id}': Error applying tool result processor '{processor_name_for_log}': {e}. "
57
- f"Skipping and continuing with result from before this processor.", exc_info=True)
58
- processed_event = event_before_proc
59
- # --- End New ---
60
-
94
+ logger.error(f"Agent '{agent_id}': Error applying tool result processor '{processor_instance.get_name()}': {e}", exc_info=True)
95
+
96
+ # --- Step 2: Immediately notify the result of this single tool call ---
61
97
  tool_invocation_id = processed_event.tool_invocation_id if processed_event.tool_invocation_id else 'N/A'
98
+ if notifier:
99
+ log_message = ""
100
+ if processed_event.error:
101
+ log_message = f"[TOOL_RESULT_ERROR_PROCESSED] Agent_ID: {agent_id}, Tool: {processed_event.tool_name}, Invocation_ID: {tool_invocation_id}, Error: {processed_event.error}"
102
+ else:
103
+ log_message = f"[TOOL_RESULT_SUCCESS_PROCESSED] Agent_ID: {agent_id}, Tool: {processed_event.tool_name}, Invocation_ID: {tool_invocation_id}, Result: {str(processed_event.result)}"
104
+
105
+ try:
106
+ log_data = {
107
+ "log_entry": log_message,
108
+ "tool_invocation_id": tool_invocation_id,
109
+ "tool_name": processed_event.tool_name,
110
+ }
111
+ notifier.notify_agent_data_tool_log(log_data)
112
+ logger.debug(f"Agent '{agent_id}': Notified individual tool result for '{processed_event.tool_name}'.")
113
+ except Exception as e_notify:
114
+ logger.error(f"Agent '{agent_id}': Error notifying tool result log: {e_notify}", exc_info=True)
62
115
 
63
- logger.info(f"Agent '{agent_id}' handling processed ToolResultEvent from tool: '{processed_event.tool_name}' (Invocation ID: {tool_invocation_id}). Error: {processed_event.error is not None}")
64
-
65
- notifier: Optional['AgentExternalEventNotifier'] = None
66
- if context.phase_manager:
67
- notifier = context.phase_manager.notifier
68
-
69
- if not notifier: # pragma: no cover
70
- logger.error(f"Agent '{agent_id}': Notifier not available in ToolResultEventHandler. Tool result processing logs will not be emitted.")
116
+ # --- Step 3: Manage the multi-tool call turn state ---
117
+ active_turn = context.state.active_multi_tool_call_turn
71
118
 
72
- if processed_event.error:
73
- logger.debug(f"Agent '{agent_id}' tool '{processed_event.tool_name}' (ID: {tool_invocation_id}) raw error details: {processed_event.error}")
74
- else:
75
- try:
76
- raw_result_str_for_debug_log = json.dumps(processed_event.result, indent=2)
77
- except TypeError: # pragma: no cover
78
- raw_result_str_for_debug_log = str(processed_event.result)
79
- logger.debug(f"Agent '{agent_id}' tool '{processed_event.tool_name}' (ID: {tool_invocation_id}) raw result:\n---\n{raw_result_str_for_debug_log}\n---")
80
-
81
-
82
- content_for_llm: str
83
- if processed_event.error:
84
- content_for_llm = (
85
- f"The tool '{processed_event.tool_name}' (invocation ID: {tool_invocation_id}) encountered an error.\n"
86
- f"Error details: {processed_event.error}\n"
87
- f"Please analyze this error and decide the next course of action."
88
- )
89
- log_msg_error_processed = f"[TOOL_RESULT_ERROR_PROCESSED] Agent_ID: {agent_id}, Tool: {processed_event.tool_name}, Invocation_ID: {tool_invocation_id}, Error: {processed_event.error}"
90
- if notifier:
91
- try:
92
- log_data = {
93
- "log_entry": log_msg_error_processed,
94
- "tool_invocation_id": tool_invocation_id,
95
- "tool_name": processed_event.tool_name,
96
- }
97
- notifier.notify_agent_data_tool_log(log_data)
98
- except Exception as e_notify:
99
- logger.error(f"Agent '{agent_id}': Error notifying tool result error log: {e_notify}", exc_info=True)
100
- else:
101
- try:
102
- result_str_for_llm = json.dumps(processed_event.result, indent=2) if not isinstance(processed_event.result, str) else processed_event.result
103
- except TypeError: # pragma: no cover
104
- result_str_for_llm = str(processed_event.result)
105
-
106
- content_for_llm = (
107
- f"The tool '{processed_event.tool_name}' (invocation ID: {tool_invocation_id}) has executed.\n"
108
- f"Result:\n{result_str_for_llm}\n"
109
- f"Based on this result, what is the next step or final answer?"
110
- )
111
- log_msg_success_processed = f"[TOOL_RESULT_SUCCESS_PROCESSED] Agent_ID: {agent_id}, Tool: {processed_event.tool_name}, Invocation_ID: {tool_invocation_id}, Result: {str(processed_event.result)}"
112
- if notifier:
113
- try:
114
- log_data = {
115
- "log_entry": log_msg_success_processed,
116
- "tool_invocation_id": tool_invocation_id,
117
- "tool_name": processed_event.tool_name,
118
- }
119
- notifier.notify_agent_data_tool_log(log_data)
120
- except Exception as e_notify:
121
- logger.error(f"Agent '{agent_id}': Error notifying tool result success log: {e_notify}", exc_info=True)
122
-
123
- logger.debug(f"Agent '{agent_id}' preparing message for LLM based on tool '{processed_event.tool_name}' (ID: {tool_invocation_id}) result:\n---\n{content_for_llm}\n---")
124
- llm_user_message = LLMUserMessage(content=content_for_llm)
119
+ # Case 1: Not a multi-tool call turn, dispatch to LLM immediately.
120
+ if not active_turn:
121
+ logger.info(f"Agent '{agent_id}' handling single ToolResultEvent from tool: '{processed_event.tool_name}'.")
122
+ await self._dispatch_aggregated_results_to_llm([processed_event], context)
123
+ return
124
+
125
+ # Case 2: Multi-tool call turn is active, accumulate results.
126
+ active_turn.results.append(processed_event)
127
+ num_results = len(active_turn.results)
128
+ num_expected = len(active_turn.invocations)
129
+ logger.info(f"Agent '{agent_id}' handling ToolResultEvent for multi-tool call turn. "
130
+ f"Collected {num_results}/{num_expected} results.")
131
+
132
+ # If not all results are in, just wait for the next ToolResultEvent.
133
+ if not active_turn.is_complete():
134
+ return
135
+
136
+ # If all results are in, re-order them and then dispatch to the LLM.
137
+ logger.info(f"Agent '{agent_id}': All tool results for the turn collected. Re-ordering to match invocation sequence.")
125
138
 
126
- next_event = LLMUserMessageReadyEvent(llm_user_message=llm_user_message)
127
- await context.input_event_queues.enqueue_internal_system_event(next_event)
139
+ # --- NEW RE-ORDERING LOGIC ---
140
+ results_by_id = {res.tool_invocation_id: res for res in active_turn.results}
141
+ sorted_results: List[ToolResultEvent] = []
142
+ for original_invocation in active_turn.invocations:
143
+ result = results_by_id.get(original_invocation.id)
144
+ if result:
145
+ sorted_results.append(result)
146
+ else:
147
+ # This should not happen if the logic is correct, but it's a good safeguard.
148
+ logger.error(f"Agent '{agent_id}': Missing result for invocation ID '{original_invocation.id}' during re-ordering.")
149
+ # Add a synthetic error result to maintain sequence length.
150
+ sorted_results.append(ToolResultEvent(
151
+ tool_name=original_invocation.name,
152
+ result=None,
153
+ error=f"Critical Error: Result for this tool call was lost.",
154
+ tool_invocation_id=original_invocation.id
155
+ ))
156
+
157
+ await self._dispatch_aggregated_results_to_llm(sorted_results, context)
128
158
 
129
- logger.info(f"Agent '{agent_id}' enqueued LLMUserMessageReadyEvent for LLM based on tool '{processed_event.tool_name}' (ID: {tool_invocation_id}) result summary.")
159
+ context.state.active_multi_tool_call_turn = None
160
+ logger.info(f"Agent '{agent_id}': Multi-tool call turn state has been cleared.")
161
+
@@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, List
4
4
 
5
5
  from .base_processor import BaseLLMResponseProcessor
6
6
  from autobyteus.agent.events import PendingToolInvocationEvent
7
- from autobyteus.agent.tool_invocation import ToolInvocation
7
+ from autobyteus.agent.tool_invocation import ToolInvocation, ToolInvocationTurn
8
8
  from autobyteus.tools.usage.parsers import ProviderAwareToolUsageParser
9
9
  from autobyteus.tools.usage.parsers.exceptions import ToolUsageParseException
10
10
 
@@ -72,6 +72,12 @@ class ProviderAwareToolUsageProcessor(BaseLLMResponseProcessor):
72
72
  processed_invocations.append(invocation)
73
73
 
74
74
  # --- END NEW LOGIC ---
75
+
76
+ # --- NEW: Initialize the multi-tool turn state ---
77
+ if len(processed_invocations) > 0:
78
+ logger.info(f"Agent '{context.agent_id}': Initializing multi-tool call turn with {len(processed_invocations)} invocations.")
79
+ context.state.active_multi_tool_call_turn = ToolInvocationTurn(invocations=processed_invocations)
80
+ # --- END NEW ---
75
81
 
76
82
  logger.info(f"Agent '{context.agent_id}': Parsed {len(processed_invocations)} tool invocations. Enqueuing events with unique IDs.")
77
83
  for invocation in processed_invocations:
@@ -2,7 +2,14 @@
2
2
  import uuid
3
3
  import hashlib
4
4
  import json
5
- from typing import Optional, Dict, Any
5
+ import logging
6
+ from typing import Optional, Dict, Any, List, TYPE_CHECKING
7
+ from dataclasses import dataclass, field
8
+
9
+ if TYPE_CHECKING:
10
+ from autobyteus.agent.events import ToolResultEvent
11
+
12
+ logger = logging.getLogger(__name__)
6
13
 
7
14
  class ToolInvocation:
8
15
  def __init__(self, name: Optional[str] = None, arguments: Optional[Dict[str, Any]] = None, id: Optional[str] = None):
@@ -39,6 +46,9 @@ class ToolInvocation:
39
46
  # Create a string to hash
40
47
  hash_string = f"{name}:{canonical_args}"
41
48
 
49
+ # --- ADDED LOGGING ---
50
+ logger.debug(f"Generating tool invocation ID from hash_string: '{hash_string}'")
51
+
42
52
  # Use SHA256 for a robust hash
43
53
  sha256_hash = hashlib.sha256(hash_string.encode('utf-8')).hexdigest()
44
54
 
@@ -55,3 +65,17 @@ class ToolInvocation:
55
65
  def __repr__(self) -> str:
56
66
  return (f"ToolInvocation(id='{self.id}', name='{self.name}', "
57
67
  f"arguments={self.arguments})")
68
+
69
+
70
+ @dataclass
71
+ class ToolInvocationTurn:
72
+ """
73
+ A data class to encapsulate the state of a multi-tool invocation turn.
74
+ Its existence in the agent's state signifies that a multi-tool turn is active.
75
+ """
76
+ invocations: List[ToolInvocation]
77
+ results: List['ToolResultEvent'] = field(default_factory=list)
78
+
79
+ def is_complete(self) -> bool:
80
+ """Checks if all expected tool results have been collected."""
81
+ return len(self.results) >= len(self.invocations)
@@ -45,6 +45,7 @@ class AgentTeamBuilder:
45
45
  self._coordinator_config: Optional[AgentConfig] = None
46
46
  self._added_node_names: Set[str] = set()
47
47
  self._task_notification_mode: TaskNotificationMode = TaskNotificationMode.AGENT_MANUAL_NOTIFICATION
48
+ self._use_xml_tool_format: Optional[bool] = None
48
49
  logger.info(f"AgentTeamBuilder initialized for team: '{self._name}'.")
49
50
 
50
51
  def add_agent_node(self, agent_config: AgentConfig, dependencies: Optional[List[NodeDefinition]] = None) -> 'AgentTeamBuilder':
@@ -142,6 +143,25 @@ class AgentTeamBuilder:
142
143
  logger.debug(f"Task notification mode set to '{mode.value}'.")
143
144
  return self
144
145
 
146
+ def set_use_xml_tool_format(self, use_xml: bool) -> 'AgentTeamBuilder':
147
+ """
148
+ Sets the team-level override for using XML tool format.
149
+
150
+ If set, this will override the setting on all individual agents within the team.
151
+
152
+ Args:
153
+ use_xml: If True, forces the team to use XML format for tool
154
+ definitions and parsing.
155
+
156
+ Returns:
157
+ The builder instance for fluent chaining.
158
+ """
159
+ if not isinstance(use_xml, bool):
160
+ raise TypeError("use_xml must be a boolean.")
161
+ self._use_xml_tool_format = use_xml
162
+ logger.debug(f"Team-level XML tool format override set to '{use_xml}'.")
163
+ return self
164
+
145
165
  def build(self) -> AgentTeam:
146
166
  """
147
167
  Constructs and returns the final AgentTeam instance using the
@@ -175,7 +195,8 @@ class AgentTeamBuilder:
175
195
  role=self._role,
176
196
  nodes=tuple(final_nodes),
177
197
  coordinator_node=coordinator_node_instance,
178
- task_notification_mode=self._task_notification_mode
198
+ task_notification_mode=self._task_notification_mode,
199
+ use_xml_tool_format=self._use_xml_tool_format
179
200
  )
180
201
 
181
202
  logger.info(f"AgentTeamConfig created successfully. Name: '{team_config.name}'. Total nodes: {len(final_nodes)}. Coordinator: '{coordinator_node_instance.name}'.")
@@ -14,7 +14,6 @@ if TYPE_CHECKING:
14
14
  from autobyteus.agent_team.context.team_manager import TeamManager
15
15
  from autobyteus.agent_team.streaming.agent_event_multiplexer import AgentEventMultiplexer
16
16
  from autobyteus.task_management.base_task_board import BaseTaskBoard
17
- from autobyteus.task_management.artifacts.artifact_manifest import ArtifactManifest
18
17
  from autobyteus.agent_team.task_notification.system_event_driven_agent_task_notifier import SystemEventDrivenAgentTaskNotifier
19
18
 
20
19
  logger = logging.getLogger(__name__)
@@ -40,7 +39,6 @@ class AgentTeamRuntimeState:
40
39
 
41
40
  # Dynamic planning and artifact state
42
41
  task_board: Optional['BaseTaskBoard'] = None
43
- artifact_registry: Dict[str, 'ArtifactManifest'] = field(default_factory=dict)
44
42
 
45
43
  def __post_init__(self):
46
44
  if not self.team_id or not isinstance(self.team_id, str):
@@ -14,6 +14,7 @@ from autobyteus.llm.api.bedrock_llm import BedrockLLM
14
14
  from autobyteus.llm.api.mistral_llm import MistralLLM
15
15
  from autobyteus.llm.api.openai_llm import OpenAILLM
16
16
  from autobyteus.llm.api.deepseek_llm import DeepSeekLLM
17
+ from autobyteus.llm.api.gemini_llm import GeminiLLM
17
18
  from autobyteus.llm.api.grok_llm import GrokLLM
18
19
  from autobyteus.llm.api.kimi_llm import KimiLLM
19
20
  from autobyteus.llm.ollama_provider import OllamaModelProvider
@@ -266,106 +267,73 @@ class LLMFactory(metaclass=SingletonMeta):
266
267
  name="gemini-2.5-pro",
267
268
  value="gemini-2.5-pro",
268
269
  provider=LLMProvider.GEMINI,
269
- llm_class=OpenAILLM,
270
+ llm_class=GeminiLLM,
270
271
  canonical_name="gemini-2.5-pro",
271
272
  default_config=LLMConfig(
272
- pricing_config=TokenPricingConfig(2.50, 10.00)
273
+ pricing_config=TokenPricingConfig(2.50, 15.00)
273
274
  )
274
275
  ),
275
276
  LLMModel(
276
277
  name="gemini-2.5-flash",
277
278
  value="gemini-2.5-flash",
278
279
  provider=LLMProvider.GEMINI,
279
- llm_class=OpenAILLM,
280
+ llm_class=GeminiLLM,
280
281
  canonical_name="gemini-2.5-flash",
281
282
  default_config=LLMConfig(
282
- pricing_config=TokenPricingConfig(0.15, 0.60)
283
- )
284
- ),
285
- LLMModel(
286
- name="gemini-2.0-flash",
287
- value="gemini-2.0-flash",
288
- provider=LLMProvider.GEMINI,
289
- llm_class=OpenAILLM,
290
- canonical_name="gemini-2.0-flash",
291
- default_config=LLMConfig(
292
- pricing_config=TokenPricingConfig(0.1, 0.40)
283
+ pricing_config=TokenPricingConfig(0.30, 2.50)
293
284
  )
294
285
  ),
295
286
  LLMModel(
296
- name="gemini-2.0-flash-lite",
297
- value="gemini-2.0-flash-lite",
287
+ name="gemini-2.5-flash-lite",
288
+ value="gemini-2.5-flash-lite",
298
289
  provider=LLMProvider.GEMINI,
299
- llm_class=OpenAILLM,
300
- canonical_name="gemini-2.0-flash-lite",
290
+ llm_class=GeminiLLM,
291
+ canonical_name="gemini-2.5-flash-lite",
301
292
  default_config=LLMConfig(
302
- pricing_config=TokenPricingConfig(0.075, 0.30)
303
- )
304
- ),
305
- # GROK Provider Models
306
- LLMModel(
307
- name="grok-2-1212",
308
- value="grok-2-1212",
309
- provider=LLMProvider.GROK,
310
- llm_class=GrokLLM,
311
- canonical_name="grok-2",
312
- default_config=LLMConfig(
313
- rate_limit=60,
314
- token_limit=8000,
315
- pricing_config=TokenPricingConfig(2.0, 6.0)
293
+ pricing_config=TokenPricingConfig(0.10, 0.40)
316
294
  )
317
295
  ),
318
296
  # KIMI Provider Models
319
297
  LLMModel(
320
- name="kimi-latest",
321
- value="kimi-latest",
298
+ name="kimi-k2-0711-preview",
299
+ value="kimi-k2-0711-preview",
322
300
  provider=LLMProvider.KIMI,
323
301
  llm_class=KimiLLM,
324
- canonical_name="kimi-latest",
302
+ canonical_name="kimi-k2-0711-preview",
325
303
  default_config=LLMConfig(
326
- pricing_config=TokenPricingConfig(1.38, 4.14)
304
+ pricing_config=TokenPricingConfig(0.55, 2.21)
327
305
  )
328
306
  ),
329
307
  LLMModel(
330
- name="moonshot-v1-8k",
331
- value="moonshot-v1-8k",
308
+ name="kimi-k2-0905-preview",
309
+ value="kimi-k2-0905-preview",
332
310
  provider=LLMProvider.KIMI,
333
311
  llm_class=KimiLLM,
334
- canonical_name="moonshot-v1-8k",
312
+ canonical_name="kimi-k2-0905-preview",
335
313
  default_config=LLMConfig(
336
- pricing_config=TokenPricingConfig(0.28, 1.38)
314
+ pricing_config=TokenPricingConfig(0.55, 2.21)
337
315
  )
338
316
  ),
339
317
  LLMModel(
340
- name="moonshot-v1-32k",
341
- value="moonshot-v1-32k",
318
+ name="kimi-k2-turbo-preview",
319
+ value="kimi-k2-turbo-preview",
342
320
  provider=LLMProvider.KIMI,
343
321
  llm_class=KimiLLM,
344
- canonical_name="moonshot-v1-32k",
322
+ canonical_name="kimi-k2-turbo-preview",
345
323
  default_config=LLMConfig(
346
- pricing_config=TokenPricingConfig(0.69, 2.76)
324
+ pricing_config=TokenPricingConfig(2.76, 2.76)
347
325
  )
348
326
  ),
349
327
  LLMModel(
350
- name="moonshot-v1-128k",
351
- value="moonshot-v1-128k",
328
+ name="kimi-latest",
329
+ value="kimi-latest",
352
330
  provider=LLMProvider.KIMI,
353
331
  llm_class=KimiLLM,
354
- canonical_name="moonshot-v1-128k",
332
+ canonical_name="kimi-latest",
355
333
  default_config=LLMConfig(
356
334
  pricing_config=TokenPricingConfig(1.38, 4.14)
357
335
  )
358
336
  ),
359
- LLMModel(
360
- name="kimi-k2-0711-preview",
361
- value="kimi-k2-0711-preview",
362
- provider=LLMProvider.KIMI,
363
- llm_class=KimiLLM,
364
- canonical_name="kimi-k2-0711-preview",
365
- default_config=LLMConfig(
366
- pricing_config=TokenPricingConfig(0.55, 2.21)
367
- )
368
- ),
369
337
  LLMModel(
370
338
  name="kimi-thinking-preview",
371
339
  value="kimi-thinking-preview",
@@ -18,6 +18,7 @@ class OllamaProviderResolver:
18
18
  (['llama'], LLMProvider.GROQ),
19
19
  (['mistral'], LLMProvider.MISTRAL),
20
20
  (['deepseek'], LLMProvider.DEEPSEEK),
21
+ (['qwen'], LLMProvider.QWEN),
21
22
  ]
22
23
 
23
24
  @staticmethod
@@ -13,4 +13,5 @@ class LLMProvider(Enum):
13
13
  GROK = "GROK"
14
14
  AUTOBYTEUS = "AUTOBYTEUS"
15
15
  KIMI = "KIMI"
16
+ QWEN = "QWEN"
16
17
  LMSTUDIO = "LMSTUDIO"
@@ -34,6 +34,8 @@ def get_token_counter(model: LLMModel, llm: 'BaseLLM') -> BaseTokenCounter:
34
34
  return DeepSeekTokenCounter(model, llm)
35
35
  elif model.provider == LLMProvider.KIMI:
36
36
  return KimiTokenCounter(model, llm)
37
+ elif model.provider == LLMProvider.QWEN:
38
+ return OpenAITokenCounter(model, llm)
37
39
  elif model.provider == LLMProvider.OLLAMA:
38
40
  return OpenAITokenCounter(model, llm)
39
41
  elif model.provider == LLMProvider.LMSTUDIO:
@@ -28,7 +28,8 @@ class AudioModelMeta(type):
28
28
  model = AudioClientFactory._models_by_identifier.get(name_or_identifier)
29
29
  if model:
30
30
  return model
31
- raise KeyError(f"Audio model '{name_or_identifier}' not found.")
31
+ available_models = list(AudioClientFactory._models_by_identifier.keys())
32
+ raise KeyError(f"Audio model '{name_or_identifier}' not found. Available models: {available_models}")
32
33
 
33
34
  def __len__(cls) -> int:
34
35
  from autobyteus.multimedia.audio.audio_client_factory import AudioClientFactory
@@ -28,7 +28,8 @@ class ImageModelMeta(type):
28
28
  model = ImageClientFactory._models_by_identifier.get(name_or_identifier)
29
29
  if model:
30
30
  return model
31
- raise KeyError(f"Image model '{name_or_identifier}' not found.")
31
+ available_models = list(ImageClientFactory._models_by_identifier.keys())
32
+ raise KeyError(f"Image model '{name_or_identifier}' not found. Available models: {available_models}")
32
33
 
33
34
  def __len__(cls) -> int:
34
35
  from autobyteus.multimedia.image.image_client_factory import ImageClientFactory