autobyteus 1.1.6__py3-none-any.whl → 1.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/context/agent_runtime_state.py +7 -1
- autobyteus/agent/handlers/tool_result_event_handler.py +121 -89
- autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +7 -1
- autobyteus/agent/tool_invocation.py +25 -1
- autobyteus/agent_team/agent_team_builder.py +22 -1
- autobyteus/agent_team/context/agent_team_runtime_state.py +0 -2
- autobyteus/llm/llm_factory.py +25 -57
- autobyteus/llm/ollama_provider_resolver.py +1 -0
- autobyteus/llm/providers.py +1 -0
- autobyteus/llm/token_counter/token_counter_factory.py +2 -0
- autobyteus/multimedia/audio/audio_model.py +2 -1
- autobyteus/multimedia/image/image_model.py +2 -1
- autobyteus/task_management/tools/publish_task_plan.py +4 -16
- autobyteus/task_management/tools/update_task_status.py +4 -19
- autobyteus/tools/__init__.py +2 -4
- autobyteus/tools/base_tool.py +98 -29
- autobyteus/tools/browser/standalone/__init__.py +0 -1
- autobyteus/tools/google_search.py +149 -0
- autobyteus/tools/mcp/schema_mapper.py +29 -71
- autobyteus/tools/multimedia/audio_tools.py +3 -3
- autobyteus/tools/multimedia/image_tools.py +5 -5
- autobyteus/tools/parameter_schema.py +82 -89
- autobyteus/tools/pydantic_schema_converter.py +81 -0
- autobyteus/tools/usage/formatters/default_json_example_formatter.py +89 -20
- autobyteus/tools/usage/formatters/default_xml_example_formatter.py +115 -41
- autobyteus/tools/usage/formatters/default_xml_schema_formatter.py +50 -20
- autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +55 -22
- autobyteus/tools/usage/formatters/google_json_example_formatter.py +54 -21
- autobyteus/tools/usage/formatters/openai_json_example_formatter.py +53 -23
- autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +270 -94
- autobyteus/tools/usage/providers/tool_manifest_provider.py +39 -14
- autobyteus-1.1.8.dist-info/METADATA +204 -0
- {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/RECORD +39 -40
- examples/run_google_slides_agent.py +2 -2
- examples/run_mcp_google_slides_client.py +1 -1
- examples/run_sqlite_agent.py +1 -1
- autobyteus/tools/ask_user_input.py +0 -40
- autobyteus/tools/browser/standalone/factory/google_search_factory.py +0 -25
- autobyteus/tools/browser/standalone/google_search_ui.py +0 -126
- autobyteus-1.1.6.dist-info/METADATA +0 -161
- {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/licenses/LICENSE +0 -0
- {autobyteus-1.1.6.dist-info → autobyteus-1.1.8.dist-info}/top_level.txt +0 -0
|
@@ -4,12 +4,11 @@ import logging
|
|
|
4
4
|
from typing import TYPE_CHECKING, Optional, Dict, Any
|
|
5
5
|
|
|
6
6
|
from pydantic import ValidationError
|
|
7
|
-
# No longer need GenerateJsonSchema from pydantic.json_schema
|
|
8
|
-
# from pydantic.json_schema import GenerateJsonSchema
|
|
9
7
|
|
|
10
8
|
from autobyteus.tools.base_tool import BaseTool
|
|
11
9
|
from autobyteus.tools.tool_category import ToolCategory
|
|
12
10
|
from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
|
|
11
|
+
from autobyteus.tools.pydantic_schema_converter import pydantic_to_parameter_schema
|
|
13
12
|
from autobyteus.task_management.schemas import TaskPlanDefinitionSchema
|
|
14
13
|
from autobyteus.task_management.converters import TaskPlanConverter, TaskBoardConverter
|
|
15
14
|
|
|
@@ -24,8 +23,6 @@ class PublishTaskPlan(BaseTool):
|
|
|
24
23
|
|
|
25
24
|
CATEGORY = ToolCategory.TASK_MANAGEMENT
|
|
26
25
|
|
|
27
|
-
# The failing custom InlineSchemaGenerator has been removed.
|
|
28
|
-
|
|
29
26
|
@classmethod
|
|
30
27
|
def get_name(cls) -> str:
|
|
31
28
|
return "PublishTaskPlan"
|
|
@@ -44,11 +41,8 @@ class PublishTaskPlan(BaseTool):
|
|
|
44
41
|
def get_argument_schema(cls) -> Optional[ParameterSchema]:
|
|
45
42
|
schema = ParameterSchema()
|
|
46
43
|
|
|
47
|
-
#
|
|
48
|
-
|
|
49
|
-
# JSON schema with $refs, which the framework handles correctly.
|
|
50
|
-
# This completely avoids the TypeError caused by the unsupported 'ref_strategy' argument.
|
|
51
|
-
object_json_schema = TaskPlanDefinitionSchema.model_json_schema()
|
|
44
|
+
# Convert the Pydantic model to our native ParameterSchema for the nested object
|
|
45
|
+
plan_object_schema = pydantic_to_parameter_schema(TaskPlanDefinitionSchema)
|
|
52
46
|
|
|
53
47
|
schema.add_parameter(ParameterDefinition(
|
|
54
48
|
name="plan",
|
|
@@ -59,7 +53,7 @@ class PublishTaskPlan(BaseTool):
|
|
|
59
53
|
"Each task must have a unique name within the plan."
|
|
60
54
|
),
|
|
61
55
|
required=True,
|
|
62
|
-
object_schema=
|
|
56
|
+
object_schema=plan_object_schema
|
|
63
57
|
))
|
|
64
58
|
return schema
|
|
65
59
|
|
|
@@ -83,12 +77,8 @@ class PublishTaskPlan(BaseTool):
|
|
|
83
77
|
return error_msg
|
|
84
78
|
|
|
85
79
|
try:
|
|
86
|
-
# Step 1: The input is now a dictionary, so we can directly validate it.
|
|
87
80
|
plan_definition_schema = TaskPlanDefinitionSchema(**plan)
|
|
88
|
-
|
|
89
|
-
# Step 2: Use the dedicated converter to create the internal TaskPlan object.
|
|
90
81
|
final_plan = TaskPlanConverter.from_schema(plan_definition_schema)
|
|
91
|
-
|
|
92
82
|
except (ValidationError, ValueError) as e:
|
|
93
83
|
error_msg = f"Invalid or inconsistent task plan provided: {e}"
|
|
94
84
|
logger.warning(f"Agent '{context.agent_id}' provided an invalid plan for PublishTaskPlan: {error_msg}")
|
|
@@ -100,12 +90,10 @@ class PublishTaskPlan(BaseTool):
|
|
|
100
90
|
|
|
101
91
|
if task_board.load_task_plan(final_plan):
|
|
102
92
|
logger.info(f"Agent '{context.agent_id}': Task plan published successfully. Returning new board status.")
|
|
103
|
-
# Convert the new state of the board back to an LLM-friendly schema and return it.
|
|
104
93
|
status_report_schema = TaskBoardConverter.to_schema(task_board)
|
|
105
94
|
if status_report_schema:
|
|
106
95
|
return status_report_schema.model_dump_json(indent=2)
|
|
107
96
|
else:
|
|
108
|
-
# This is a fallback case, shouldn't happen right after a successful load.
|
|
109
97
|
return "Task plan published successfully, but could not generate status report."
|
|
110
98
|
else:
|
|
111
99
|
error_msg = "Failed to load task plan onto the board. This can happen if the board implementation rejects the plan."
|
|
@@ -6,6 +6,7 @@ from pydantic import ValidationError
|
|
|
6
6
|
from autobyteus.tools.base_tool import BaseTool
|
|
7
7
|
from autobyteus.tools.tool_category import ToolCategory
|
|
8
8
|
from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
|
|
9
|
+
from autobyteus.tools.pydantic_schema_converter import pydantic_to_parameter_schema
|
|
9
10
|
from autobyteus.task_management.base_task_board import TaskStatus
|
|
10
11
|
from autobyteus.task_management.deliverable import FileDeliverable
|
|
11
12
|
from autobyteus.task_management.schemas import FileDeliverableSchema
|
|
@@ -53,14 +54,11 @@ class UpdateTaskStatus(BaseTool):
|
|
|
53
54
|
param_type=ParameterType.ARRAY,
|
|
54
55
|
description="Optional. A list of file deliverables to submit for this task, typically when status is 'completed'. Each deliverable must include a file_path and a summary.",
|
|
55
56
|
required=False,
|
|
56
|
-
array_item_schema=FileDeliverableSchema
|
|
57
|
+
array_item_schema=pydantic_to_parameter_schema(FileDeliverableSchema)
|
|
57
58
|
))
|
|
58
59
|
return schema
|
|
59
60
|
|
|
60
61
|
async def _execute(self, context: 'AgentContext', task_name: str, status: str, deliverables: Optional[List[Dict[str, Any]]] = None) -> str:
|
|
61
|
-
"""
|
|
62
|
-
Executes the tool to update a task's status and optionally submit deliverables.
|
|
63
|
-
"""
|
|
64
62
|
agent_name = context.config.name
|
|
65
63
|
log_msg = f"Agent '{agent_name}' is executing UpdateTaskStatus for task '{task_name}' to status '{status}'"
|
|
66
64
|
if deliverables:
|
|
@@ -84,12 +82,7 @@ class UpdateTaskStatus(BaseTool):
|
|
|
84
82
|
logger.warning(f"Agent '{agent_name}' tried to update task status, but no plan is loaded.")
|
|
85
83
|
return error_msg
|
|
86
84
|
|
|
87
|
-
|
|
88
|
-
target_task = None
|
|
89
|
-
for task in task_board.current_plan.tasks:
|
|
90
|
-
if task.task_name == task_name:
|
|
91
|
-
target_task = task
|
|
92
|
-
break
|
|
85
|
+
target_task = next((t for t in task_board.current_plan.tasks if t.task_name == task_name), None)
|
|
93
86
|
|
|
94
87
|
if not target_task:
|
|
95
88
|
error_msg = f"Failed to update status for task '{task_name}'. The task name does not exist on the current plan."
|
|
@@ -103,17 +96,11 @@ class UpdateTaskStatus(BaseTool):
|
|
|
103
96
|
logger.warning(f"Agent '{agent_name}' provided invalid status for UpdateTaskStatus: {status}")
|
|
104
97
|
return f"Error: {error_msg}"
|
|
105
98
|
|
|
106
|
-
# --- Process Deliverables FIRST --- (CORRECTED ORDER)
|
|
107
99
|
if deliverables:
|
|
108
100
|
try:
|
|
109
101
|
for d_data in deliverables:
|
|
110
|
-
# Validate and create the internal deliverable object
|
|
111
102
|
deliverable_schema = FileDeliverableSchema(**d_data)
|
|
112
|
-
full_deliverable = FileDeliverable(
|
|
113
|
-
**deliverable_schema.model_dump(),
|
|
114
|
-
author_agent_name=agent_name
|
|
115
|
-
)
|
|
116
|
-
# Append to the task object
|
|
103
|
+
full_deliverable = FileDeliverable(**deliverable_schema.model_dump(), author_agent_name=agent_name)
|
|
117
104
|
target_task.file_deliverables.append(full_deliverable)
|
|
118
105
|
logger.info(f"Agent '{agent_name}' successfully processed and added {len(deliverables)} deliverables to task '{task_name}'.")
|
|
119
106
|
except (ValidationError, TypeError) as e:
|
|
@@ -121,8 +108,6 @@ class UpdateTaskStatus(BaseTool):
|
|
|
121
108
|
logger.warning(f"Agent '{agent_name}': {error_msg}")
|
|
122
109
|
return f"Error: {error_msg}"
|
|
123
110
|
|
|
124
|
-
# --- Update Status SECOND --- (CORRECTED ORDER)
|
|
125
|
-
# This will now emit an event with the deliverables already attached to the task.
|
|
126
111
|
if not task_board.update_task_status(target_task.task_id, status_enum, agent_name):
|
|
127
112
|
error_msg = f"Failed to update status for task '{task_name}'. An unexpected error occurred on the task board."
|
|
128
113
|
logger.error(f"Agent '{agent_name}': {error_msg}")
|
autobyteus/tools/__init__.py
CHANGED
|
@@ -16,19 +16,18 @@ from .tool_category import ToolCategory
|
|
|
16
16
|
# --- Re-export specific tools for easier access ---
|
|
17
17
|
|
|
18
18
|
# Functional tools (decorated functions are now instances)
|
|
19
|
-
from .ask_user_input import ask_user_input
|
|
20
19
|
from .pdf_downloader import pdf_downloader
|
|
21
20
|
from .bash.bash_executor import bash_executor
|
|
22
21
|
from .file.file_reader import file_reader
|
|
23
22
|
from .file.file_writer import file_writer
|
|
24
23
|
|
|
25
24
|
# General Class-based tools
|
|
25
|
+
from .google_search import GoogleSearch
|
|
26
26
|
from .image_downloader import ImageDownloader
|
|
27
27
|
from .timer import Timer
|
|
28
28
|
from .multimedia.image_tools import GenerateImageTool, EditImageTool
|
|
29
29
|
|
|
30
30
|
# Standalone Browser tools
|
|
31
|
-
from .browser.standalone.google_search_ui import GoogleSearch
|
|
32
31
|
from .browser.standalone.navigate_to import NavigateTo as StandaloneNavigateTo # Alias to avoid name clash
|
|
33
32
|
from .browser.standalone.webpage_reader import WebPageReader as StandaloneWebPageReader # Alias
|
|
34
33
|
from .browser.standalone.webpage_screenshot_taker import WebPageScreenshotTaker as StandaloneWebPageScreenshotTaker # Alias
|
|
@@ -54,20 +53,19 @@ __all__ = [
|
|
|
54
53
|
"ToolCategory",
|
|
55
54
|
|
|
56
55
|
# Re-exported functional tool instances
|
|
57
|
-
"ask_user_input",
|
|
58
56
|
"pdf_downloader",
|
|
59
57
|
"bash_executor",
|
|
60
58
|
"file_reader",
|
|
61
59
|
"file_writer",
|
|
62
60
|
|
|
63
61
|
# Re-exported general class-based tools
|
|
62
|
+
"GoogleSearch",
|
|
64
63
|
"ImageDownloader",
|
|
65
64
|
"Timer",
|
|
66
65
|
"GenerateImageTool",
|
|
67
66
|
"EditImageTool",
|
|
68
67
|
|
|
69
68
|
# Re-exported Standalone Browser tools
|
|
70
|
-
"GoogleSearch",
|
|
71
69
|
"StandaloneNavigateTo",
|
|
72
70
|
"StandaloneWebPageReader",
|
|
73
71
|
"StandaloneWebPageScreenshotTaker",
|
autobyteus/tools/base_tool.py
CHANGED
|
@@ -2,18 +2,17 @@
|
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
4
|
from abc import ABC, abstractmethod
|
|
5
|
-
from typing import Optional, Any, TYPE_CHECKING, List as TypingList, Dict
|
|
6
|
-
import xml.sax.saxutils
|
|
5
|
+
from typing import Optional, Any, TYPE_CHECKING, List as TypingList, Dict, Union
|
|
7
6
|
|
|
8
7
|
from autobyteus.events.event_emitter import EventEmitter
|
|
9
|
-
from autobyteus.
|
|
8
|
+
from autobyteus.tools.parameter_schema import ParameterType
|
|
10
9
|
|
|
11
10
|
from .tool_meta import ToolMeta
|
|
12
11
|
from .tool_state import ToolState
|
|
13
12
|
|
|
14
13
|
if TYPE_CHECKING:
|
|
15
14
|
from autobyteus.agent.context import AgentContext
|
|
16
|
-
from autobyteus.tools.parameter_schema import ParameterSchema
|
|
15
|
+
from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition
|
|
17
16
|
from autobyteus.tools.tool_config import ToolConfig
|
|
18
17
|
from .tool_state import ToolState
|
|
19
18
|
from autobyteus.tools.registry import ToolDefinition
|
|
@@ -27,41 +26,115 @@ class BaseTool(ABC, EventEmitter, metaclass=ToolMeta):
|
|
|
27
26
|
def __init__(self, config: Optional['ToolConfig'] = None):
|
|
28
27
|
super().__init__()
|
|
29
28
|
self.agent_id: Optional[str] = None
|
|
30
|
-
self.definition: Optional['ToolDefinition'] = None
|
|
31
|
-
# The config is stored primarily for potential use by subclasses or future base features.
|
|
29
|
+
self.definition: Optional['ToolDefinition'] = None
|
|
32
30
|
self._config = config
|
|
33
|
-
# Add a dedicated state dictionary for the tool instance
|
|
34
|
-
# CHANGED: Use ToolState class for explicit state management.
|
|
35
31
|
self.tool_state: 'ToolState' = ToolState()
|
|
36
32
|
logger.debug(f"BaseTool instance initializing for potential class {self.__class__.__name__}. tool_state initialized.")
|
|
37
33
|
|
|
38
34
|
@classmethod
|
|
39
35
|
def get_name(cls) -> str:
|
|
40
|
-
"""Returns the registered name of the tool."""
|
|
41
36
|
return cls.__name__
|
|
42
37
|
|
|
43
38
|
@classmethod
|
|
44
39
|
@abstractmethod
|
|
45
40
|
def get_description(cls) -> str:
|
|
46
|
-
"""Returns the description of the tool."""
|
|
47
41
|
raise NotImplementedError("Subclasses must implement get_description().")
|
|
48
42
|
|
|
49
43
|
@classmethod
|
|
50
44
|
@abstractmethod
|
|
51
45
|
def get_argument_schema(cls) -> Optional['ParameterSchema']:
|
|
52
|
-
"""
|
|
53
|
-
Return a ParameterSchema defining the arguments this tool accepts for execution.
|
|
54
|
-
Return None if the tool accepts no arguments.
|
|
55
|
-
"""
|
|
56
46
|
raise NotImplementedError("Subclasses must implement get_argument_schema().")
|
|
57
47
|
|
|
58
48
|
@classmethod
|
|
59
49
|
def get_config_schema(cls) -> Optional['ParameterSchema']:
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
def _coerce_argument_types(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
|
60
53
|
"""
|
|
61
|
-
|
|
62
|
-
|
|
54
|
+
Coerces argument values from the parser (often strings) to their proper
|
|
55
|
+
Python types based on the tool's argument schema.
|
|
56
|
+
This method is fully recursive to handle nested objects and arrays.
|
|
63
57
|
"""
|
|
64
|
-
|
|
58
|
+
arg_schema = self.get_argument_schema()
|
|
59
|
+
if not arg_schema:
|
|
60
|
+
return kwargs
|
|
61
|
+
|
|
62
|
+
return self._coerce_object_recursively(kwargs, arg_schema)
|
|
63
|
+
|
|
64
|
+
def _coerce_object_recursively(self, data: Dict[str, Any], schema: 'ParameterSchema') -> Dict[str, Any]:
|
|
65
|
+
""" Helper to recursively coerce values in an object based on a ParameterSchema. """
|
|
66
|
+
coerced_data = data.copy()
|
|
67
|
+
for name, value in data.items():
|
|
68
|
+
param_def = schema.get_parameter(name)
|
|
69
|
+
if param_def:
|
|
70
|
+
coerced_data[name] = self._coerce_value_recursively(value, param_def)
|
|
71
|
+
return coerced_data
|
|
72
|
+
|
|
73
|
+
def _coerce_value_recursively(self, value: Any, param_def: 'ParameterDefinition') -> Any:
|
|
74
|
+
""" Coerces a single value based on its ParameterDefinition, recursing into complex types. """
|
|
75
|
+
if value is None:
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
# 1. Coerce empty string to empty list for ARRAY types. This is a common parser artifact.
|
|
79
|
+
if param_def.param_type == ParameterType.ARRAY and value == "":
|
|
80
|
+
return []
|
|
81
|
+
|
|
82
|
+
# 2. Recurse into objects
|
|
83
|
+
if param_def.param_type == ParameterType.OBJECT and param_def.object_schema and isinstance(value, dict):
|
|
84
|
+
return self._coerce_object_recursively(value, param_def.object_schema)
|
|
85
|
+
|
|
86
|
+
# 3. Recurse into arrays.
|
|
87
|
+
if param_def.param_type == ParameterType.ARRAY and isinstance(value, list):
|
|
88
|
+
item_schema_dict = param_def.array_item_schema
|
|
89
|
+
# If items are objects described by a schema, coerce each one.
|
|
90
|
+
if item_schema_dict and isinstance(item_schema_dict, dict) and item_schema_dict.get("type") == "object":
|
|
91
|
+
# Create a temporary ParameterSchema for the item type to enable recursion.
|
|
92
|
+
# This is a simplified conversion for coercion purposes only.
|
|
93
|
+
from .parameter_schema import ParameterSchema as TempSchema
|
|
94
|
+
from .parameter_schema import ParameterDefinition as TempDef
|
|
95
|
+
|
|
96
|
+
item_param_schema = TempSchema()
|
|
97
|
+
props = item_schema_dict.get("properties", {})
|
|
98
|
+
reqs = item_schema_dict.get("required", [])
|
|
99
|
+
for prop_name, prop_details in props.items():
|
|
100
|
+
# This is a simplified conversion and might not capture all details,
|
|
101
|
+
# but it's sufficient for recursive coercion.
|
|
102
|
+
prop_type_str = prop_details.get("type", "string")
|
|
103
|
+
try:
|
|
104
|
+
prop_type = ParameterType(prop_type_str)
|
|
105
|
+
except ValueError:
|
|
106
|
+
prop_type = ParameterType.STRING
|
|
107
|
+
|
|
108
|
+
item_param_schema.add_parameter(TempDef(
|
|
109
|
+
name=prop_name,
|
|
110
|
+
param_type=prop_type,
|
|
111
|
+
description=prop_details.get("description", ""),
|
|
112
|
+
required=prop_name in reqs,
|
|
113
|
+
array_item_schema=prop_details.get("items") # Pass down nested array schemas
|
|
114
|
+
))
|
|
115
|
+
|
|
116
|
+
return [self._coerce_object_recursively(item, item_param_schema) for item in value if isinstance(item, dict)]
|
|
117
|
+
|
|
118
|
+
return value # Return list of primitives as is
|
|
119
|
+
|
|
120
|
+
# 4. Coerce primitives if they are passed as strings
|
|
121
|
+
if isinstance(value, str):
|
|
122
|
+
try:
|
|
123
|
+
if param_def.param_type == ParameterType.INTEGER:
|
|
124
|
+
return int(value)
|
|
125
|
+
elif param_def.param_type == ParameterType.FLOAT:
|
|
126
|
+
return float(value)
|
|
127
|
+
elif param_def.param_type == ParameterType.BOOLEAN:
|
|
128
|
+
lower_val = value.lower()
|
|
129
|
+
if lower_val in ["true", "1", "yes"]:
|
|
130
|
+
return True
|
|
131
|
+
elif lower_val in ["false", "0", "no"]:
|
|
132
|
+
return False
|
|
133
|
+
except (ValueError, TypeError):
|
|
134
|
+
logger.warning(f"Could not coerce argument '{param_def.name}' with value '{value}' to type {param_def.param_type}. "
|
|
135
|
+
f"Passing string value to tool.")
|
|
136
|
+
|
|
137
|
+
return value
|
|
65
138
|
|
|
66
139
|
def set_agent_id(self, agent_id: str):
|
|
67
140
|
if not isinstance(agent_id, str) or not agent_id:
|
|
@@ -71,30 +144,26 @@ class BaseTool(ABC, EventEmitter, metaclass=ToolMeta):
|
|
|
71
144
|
logger.debug(f"Agent ID '{agent_id}' set for tool instance '{self.__class__.get_name()}'")
|
|
72
145
|
|
|
73
146
|
async def execute(self, context: 'AgentContext', **kwargs):
|
|
74
|
-
# In this context, self.get_name() will call the instance-specific method if it exists.
|
|
75
147
|
tool_name = self.get_name()
|
|
76
148
|
if self.agent_id is None:
|
|
77
149
|
self.set_agent_id(context.agent_id)
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
f"calling context's agent_id '{context.agent_id}'. Updating tool's agent_id."
|
|
82
|
-
)
|
|
83
|
-
self.set_agent_id(context.agent_id)
|
|
150
|
+
|
|
151
|
+
# Coerce types before validation and execution
|
|
152
|
+
coerced_kwargs = self._coerce_argument_types(kwargs)
|
|
84
153
|
|
|
85
154
|
arg_schema = self.get_argument_schema()
|
|
86
155
|
if arg_schema:
|
|
87
|
-
is_valid, errors = arg_schema.validate_config(
|
|
156
|
+
is_valid, errors = arg_schema.validate_config(coerced_kwargs)
|
|
88
157
|
if not is_valid:
|
|
89
158
|
error_message = f"Invalid arguments for tool '{tool_name}': {'; '.join(errors)}"
|
|
90
159
|
logger.error(error_message)
|
|
91
160
|
raise ValueError(error_message)
|
|
92
|
-
elif
|
|
93
|
-
logger.warning(f"Tool '{tool_name}' does not define an argument schema but received arguments: {
|
|
161
|
+
elif coerced_kwargs:
|
|
162
|
+
logger.warning(f"Tool '{tool_name}' does not define an argument schema but received arguments: {coerced_kwargs}. These will be passed to _execute.")
|
|
94
163
|
|
|
95
|
-
logger.info(f"Executing tool '{tool_name}' for agent '{self.agent_id}' with args: {
|
|
164
|
+
logger.info(f"Executing tool '{tool_name}' for agent '{self.agent_id}' with args: {coerced_kwargs}")
|
|
96
165
|
try:
|
|
97
|
-
result = await self._execute(context=context, **
|
|
166
|
+
result = await self._execute(context=context, **coerced_kwargs)
|
|
98
167
|
logger.info(f"Tool '{tool_name}' execution completed successfully for agent '{self.agent_id}'.")
|
|
99
168
|
return result
|
|
100
169
|
except Exception as e:
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
import aiohttp
|
|
5
|
+
from typing import Optional, TYPE_CHECKING, Any, Dict, List
|
|
6
|
+
|
|
7
|
+
from autobyteus.tools.base_tool import BaseTool
|
|
8
|
+
from autobyteus.tools.tool_config import ToolConfig
|
|
9
|
+
from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
|
|
10
|
+
from autobyteus.tools.tool_category import ToolCategory
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from autobyteus.agent.context import AgentContext
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
class GoogleSearch(BaseTool):
|
|
18
|
+
"""
|
|
19
|
+
Performs a Google search using the Serper.dev API and returns a structured summary of the results.
|
|
20
|
+
This tool requires a Serper API key, which should be set in the SERPER_API_KEY environment variable.
|
|
21
|
+
"""
|
|
22
|
+
CATEGORY = ToolCategory.WEB
|
|
23
|
+
API_URL = "https://google.serper.dev/search"
|
|
24
|
+
|
|
25
|
+
def __init__(self, config: Optional[ToolConfig] = None):
|
|
26
|
+
super().__init__(config=config)
|
|
27
|
+
self.api_key: Optional[str] = None
|
|
28
|
+
|
|
29
|
+
if config:
|
|
30
|
+
self.api_key = config.get('api_key')
|
|
31
|
+
|
|
32
|
+
if not self.api_key:
|
|
33
|
+
self.api_key = os.getenv("SERPER_API_KEY")
|
|
34
|
+
|
|
35
|
+
if not self.api_key:
|
|
36
|
+
raise ValueError(
|
|
37
|
+
"GoogleSearch tool requires a Serper API key. "
|
|
38
|
+
"Please provide it via the 'api_key' config parameter or set the 'SERPER_API_KEY' environment variable."
|
|
39
|
+
)
|
|
40
|
+
logger.debug("GoogleSearch (API-based) tool initialized.")
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def get_name(cls) -> str:
|
|
44
|
+
return "GoogleSearch"
|
|
45
|
+
|
|
46
|
+
@classmethod
|
|
47
|
+
def get_description(cls) -> str:
|
|
48
|
+
return (
|
|
49
|
+
"Searches Google for a given query using the Serper API. "
|
|
50
|
+
"Returns a concise, structured summary of search results, including direct answers and top organic links."
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def get_argument_schema(cls) -> Optional[ParameterSchema]:
|
|
55
|
+
schema = ParameterSchema()
|
|
56
|
+
schema.add_parameter(ParameterDefinition(
|
|
57
|
+
name="query",
|
|
58
|
+
param_type=ParameterType.STRING,
|
|
59
|
+
description="The search query string.",
|
|
60
|
+
required=True
|
|
61
|
+
))
|
|
62
|
+
schema.add_parameter(ParameterDefinition(
|
|
63
|
+
name="num_results",
|
|
64
|
+
param_type=ParameterType.INTEGER,
|
|
65
|
+
description="The number of organic search results to return.",
|
|
66
|
+
required=False,
|
|
67
|
+
default_value=5,
|
|
68
|
+
min_value=1,
|
|
69
|
+
max_value=10
|
|
70
|
+
))
|
|
71
|
+
return schema
|
|
72
|
+
|
|
73
|
+
@classmethod
|
|
74
|
+
def get_config_schema(cls) -> Optional[ParameterSchema]:
|
|
75
|
+
schema = ParameterSchema()
|
|
76
|
+
schema.add_parameter(ParameterDefinition(
|
|
77
|
+
name="api_key",
|
|
78
|
+
param_type=ParameterType.STRING,
|
|
79
|
+
description="The API key for the Serper.dev service. Overrides the SERPER_API_KEY environment variable.",
|
|
80
|
+
required=False
|
|
81
|
+
))
|
|
82
|
+
return schema
|
|
83
|
+
|
|
84
|
+
def _format_results(self, data: Dict[str, Any]) -> str:
|
|
85
|
+
"""Formats the JSON response from Serper into a clean string for an LLM."""
|
|
86
|
+
summary_parts = []
|
|
87
|
+
|
|
88
|
+
# 1. Answer Box (most important for direct questions)
|
|
89
|
+
if "answerBox" in data:
|
|
90
|
+
answer_box = data["answerBox"]
|
|
91
|
+
title = answer_box.get("title", "")
|
|
92
|
+
snippet = answer_box.get("snippet") or answer_box.get("answer")
|
|
93
|
+
summary_parts.append(f"Direct Answer for '{title}':\n{snippet}")
|
|
94
|
+
|
|
95
|
+
# 2. Knowledge Graph (for entity information)
|
|
96
|
+
if "knowledgeGraph" in data:
|
|
97
|
+
kg = data["knowledgeGraph"]
|
|
98
|
+
title = kg.get("title", "")
|
|
99
|
+
description = kg.get("description")
|
|
100
|
+
summary_parts.append(f"Summary for '{title}':\n{description}")
|
|
101
|
+
|
|
102
|
+
# 3. Organic Results (the main search links)
|
|
103
|
+
if "organic" in data and data["organic"]:
|
|
104
|
+
organic_results = data["organic"]
|
|
105
|
+
results_str = "\n".join(
|
|
106
|
+
f"{i+1}. {result.get('title', 'No Title')}\n"
|
|
107
|
+
f" Link: {result.get('link', 'No Link')}\n"
|
|
108
|
+
f" Snippet: {result.get('snippet', 'No Snippet')}"
|
|
109
|
+
for i, result in enumerate(organic_results)
|
|
110
|
+
)
|
|
111
|
+
summary_parts.append(f"Search Results:\n{results_str}")
|
|
112
|
+
|
|
113
|
+
if not summary_parts:
|
|
114
|
+
return "No relevant information found for the query."
|
|
115
|
+
|
|
116
|
+
return "\n\n---\n\n".join(summary_parts)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
async def _execute(self, context: 'AgentContext', query: str, num_results: int = 5) -> str:
|
|
120
|
+
logger.info(f"Executing GoogleSearch (API) for agent {context.agent_id} with query: '{query}'")
|
|
121
|
+
|
|
122
|
+
headers = {
|
|
123
|
+
'X-API-KEY': self.api_key,
|
|
124
|
+
'Content-Type': 'application/json'
|
|
125
|
+
}
|
|
126
|
+
payload = json.dumps({
|
|
127
|
+
"q": query,
|
|
128
|
+
"num": num_results
|
|
129
|
+
})
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
async with aiohttp.ClientSession() as session:
|
|
133
|
+
async with session.post(self.API_URL, headers=headers, data=payload) as response:
|
|
134
|
+
if response.status == 200:
|
|
135
|
+
data = await response.json()
|
|
136
|
+
return self._format_results(data)
|
|
137
|
+
else:
|
|
138
|
+
error_text = await response.text()
|
|
139
|
+
logger.error(
|
|
140
|
+
f"Serper API returned a non-200 status code: {response.status}. "
|
|
141
|
+
f"Response: {error_text}"
|
|
142
|
+
)
|
|
143
|
+
raise RuntimeError(f"API request failed with status {response.status}: {error_text}")
|
|
144
|
+
except aiohttp.ClientError as e:
|
|
145
|
+
logger.error(f"Network error during GoogleSearch API call: {e}", exc_info=True)
|
|
146
|
+
raise RuntimeError(f"A network error occurred: {e}")
|
|
147
|
+
except Exception as e:
|
|
148
|
+
logger.error(f"An unexpected error occurred in GoogleSearch tool: {e}", exc_info=True)
|
|
149
|
+
raise
|