autobyteus 1.1.9__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/clients/__init__.py +10 -0
- autobyteus/clients/autobyteus_client.py +318 -0
- autobyteus/clients/cert_utils.py +105 -0
- autobyteus/clients/certificates/cert.pem +34 -0
- autobyteus/llm/api/autobyteus_llm.py +1 -1
- autobyteus/llm/api/zhipu_llm.py +26 -0
- autobyteus/llm/autobyteus_provider.py +1 -1
- autobyteus/llm/llm_factory.py +23 -0
- autobyteus/llm/ollama_provider_resolver.py +1 -0
- autobyteus/llm/providers.py +1 -0
- autobyteus/llm/token_counter/token_counter_factory.py +3 -0
- autobyteus/llm/token_counter/zhipu_token_counter.py +24 -0
- autobyteus/multimedia/audio/api/autobyteus_audio_client.py +1 -1
- autobyteus/multimedia/audio/autobyteus_audio_provider.py +1 -1
- autobyteus/multimedia/image/api/autobyteus_image_client.py +1 -1
- autobyteus/multimedia/image/autobyteus_image_provider.py +1 -1
- autobyteus/task_management/__init__.py +2 -1
- autobyteus/task_management/schemas/task_definition.py +1 -1
- autobyteus/task_management/schemas/task_status_report.py +1 -1
- autobyteus/task_management/tools/__init__.py +2 -0
- autobyteus/task_management/tools/assign_task_to.py +125 -0
- autobyteus/tools/__init__.py +52 -16
- autobyteus/tools/download_media_tool.py +136 -0
- autobyteus/tools/file/file_editor.py +200 -0
- autobyteus/tools/usage/parsers/_string_decoders.py +18 -0
- autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +9 -1
- autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +15 -1
- autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +4 -1
- autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +4 -1
- {autobyteus-1.1.9.dist-info → autobyteus-1.2.0.dist-info}/METADATA +3 -2
- {autobyteus-1.1.9.dist-info → autobyteus-1.2.0.dist-info}/RECORD +34 -26
- autobyteus/tools/image_downloader.py +0 -99
- autobyteus/tools/pdf_downloader.py +0 -89
- {autobyteus-1.1.9.dist-info → autobyteus-1.2.0.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.9.dist-info → autobyteus-1.2.0.dist-info}/licenses/LICENSE +0 -0
- {autobyteus-1.1.9.dist-info → autobyteus-1.2.0.dist-info}/top_level.txt +0 -0
|
@@ -13,7 +13,7 @@ class TaskDefinitionSchema(BaseModel):
|
|
|
13
13
|
"""A Pydantic model representing a single task as defined by an LLM."""
|
|
14
14
|
task_name: str = Field(..., description="A short, unique, descriptive name for this task within the plan (e.g., 'setup_project', 'implement_scraper'). Used for defining dependencies.")
|
|
15
15
|
assignee_name: str = Field(..., description="The name of the agent or sub-team assigned to this task.")
|
|
16
|
-
description: str = Field(..., description="A detailed description of the task.")
|
|
16
|
+
description: str = Field(..., description="A clear, detailed, and unambiguous description of what this task entails. Provide all necessary context for the assignee to complete the work. For example, if the task involves a file, specify its full, absolute path. If it requires creating a file, specify where it should be saved. Mention any specific requirements or expected outputs.")
|
|
17
17
|
dependencies: List[str] = Field(
|
|
18
18
|
default_factory=list,
|
|
19
19
|
description="A list of 'task_name' values for tasks that must be completed first."
|
|
@@ -16,7 +16,7 @@ class TaskStatusReportItemSchema(BaseModel):
|
|
|
16
16
|
"""Represents the status of a single task in an LLM-friendly format."""
|
|
17
17
|
task_name: str = Field(..., description="The unique, descriptive name for this task.")
|
|
18
18
|
assignee_name: str = Field(..., description="The name of the agent or sub-team assigned to this task.")
|
|
19
|
-
description: str = Field(..., description="A detailed description of the task.")
|
|
19
|
+
description: str = Field(..., description="A clear, detailed, and unambiguous description of what this task entails. Provide all necessary context for the assignee to complete the work. For example, if the task involves a file, specify its full, absolute path. If it requires creating a file, specify where it should be saved. Mention any specific requirements or expected outputs.")
|
|
20
20
|
dependencies: List[str] = Field(..., description="A list of 'task_name' values for tasks that must be completed first.")
|
|
21
21
|
status: TaskStatus = Field(..., description="The current status of this task.")
|
|
22
22
|
file_deliverables: List[FileDeliverable] = Field(default_factory=list, description="A list of files submitted as deliverables for this task.")
|
|
@@ -7,10 +7,12 @@ from .get_task_board_status import GetTaskBoardStatus
|
|
|
7
7
|
from .publish_tasks import PublishTasks
|
|
8
8
|
from .publish_task import PublishTask
|
|
9
9
|
from .update_task_status import UpdateTaskStatus
|
|
10
|
+
from .assign_task_to import AssignTaskTo
|
|
10
11
|
|
|
11
12
|
__all__ = [
|
|
12
13
|
"GetTaskBoardStatus",
|
|
13
14
|
"PublishTasks",
|
|
14
15
|
"PublishTask",
|
|
15
16
|
"UpdateTaskStatus",
|
|
17
|
+
"AssignTaskTo",
|
|
16
18
|
]
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/task_management/tools/assign_task_to.py
|
|
2
|
+
import logging
|
|
3
|
+
from typing import TYPE_CHECKING, Optional, Any
|
|
4
|
+
|
|
5
|
+
from pydantic import ValidationError
|
|
6
|
+
|
|
7
|
+
from autobyteus.tools.base_tool import BaseTool
|
|
8
|
+
from autobyteus.tools.tool_category import ToolCategory
|
|
9
|
+
from autobyteus.utils.parameter_schema import ParameterSchema
|
|
10
|
+
from autobyteus.tools.pydantic_schema_converter import pydantic_to_parameter_schema
|
|
11
|
+
from autobyteus.task_management.schemas import TaskDefinitionSchema
|
|
12
|
+
from autobyteus.task_management.task import Task
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from autobyteus.agent.context import AgentContext
|
|
16
|
+
from autobyteus.agent_team.context import AgentTeamContext
|
|
17
|
+
from autobyteus.agent_team.context.team_manager import TeamManager
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
class AssignTaskTo(BaseTool):
|
|
22
|
+
"""A tool for one agent to directly create and assign a single task to another agent."""
|
|
23
|
+
|
|
24
|
+
CATEGORY = ToolCategory.TASK_MANAGEMENT
|
|
25
|
+
|
|
26
|
+
@classmethod
|
|
27
|
+
def get_name(cls) -> str:
|
|
28
|
+
return "AssignTaskTo"
|
|
29
|
+
|
|
30
|
+
@classmethod
|
|
31
|
+
def get_description(cls) -> str:
|
|
32
|
+
return (
|
|
33
|
+
"Creates and assigns a single new task to a specific team member, and sends them a direct notification "
|
|
34
|
+
"with the task details. Use this to delegate a well-defined piece of work you have identified."
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
@classmethod
|
|
38
|
+
def get_argument_schema(cls) -> Optional[ParameterSchema]:
|
|
39
|
+
# The schema is the same as for defining a single task.
|
|
40
|
+
return pydantic_to_parameter_schema(TaskDefinitionSchema)
|
|
41
|
+
|
|
42
|
+
async def _execute(self, context: 'AgentContext', **kwargs: Any) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Executes the tool by adding the task to the central TaskBoard and then
|
|
45
|
+
sending a direct message to the assignee with the task's details.
|
|
46
|
+
"""
|
|
47
|
+
agent_name = context.config.name
|
|
48
|
+
task_name = kwargs.get("task_name", "unnamed task")
|
|
49
|
+
assignee_name = kwargs.get("assignee_name")
|
|
50
|
+
logger.info(f"Agent '{agent_name}' is executing AssignTaskTo for task '{task_name}' assigned to '{assignee_name}'.")
|
|
51
|
+
|
|
52
|
+
# --- Get Team Context and Task Board ---
|
|
53
|
+
team_context: Optional['AgentTeamContext'] = context.custom_data.get("team_context")
|
|
54
|
+
if not team_context:
|
|
55
|
+
error_msg = "Error: Team context is not available. Cannot access the task board or send messages."
|
|
56
|
+
logger.error(f"Agent '{agent_name}': {error_msg}")
|
|
57
|
+
return error_msg
|
|
58
|
+
|
|
59
|
+
task_board = getattr(team_context.state, 'task_board', None)
|
|
60
|
+
if not task_board:
|
|
61
|
+
error_msg = "Error: Task board has not been initialized for this team."
|
|
62
|
+
logger.error(f"Agent '{agent_name}': {error_msg}")
|
|
63
|
+
return error_msg
|
|
64
|
+
|
|
65
|
+
# --- Action 1: Add the task to the Task Board ---
|
|
66
|
+
try:
|
|
67
|
+
task_def_schema = TaskDefinitionSchema(**kwargs)
|
|
68
|
+
new_task = Task(**task_def_schema.model_dump())
|
|
69
|
+
except (ValidationError, ValueError) as e:
|
|
70
|
+
error_msg = f"Invalid task definition provided: {e}"
|
|
71
|
+
logger.warning(f"Agent '{agent_name}' provided an invalid definition for AssignTaskTo: {error_msg}")
|
|
72
|
+
return f"Error: {error_msg}"
|
|
73
|
+
|
|
74
|
+
if not task_board.add_task(new_task):
|
|
75
|
+
error_msg = f"Failed to publish task '{new_task.task_name}' to the board for an unknown reason."
|
|
76
|
+
logger.error(f"Agent '{agent_name}': {error_msg}")
|
|
77
|
+
return f"Error: {error_msg}"
|
|
78
|
+
|
|
79
|
+
logger.info(f"Agent '{agent_name}' successfully published task '{new_task.task_name}' to the task board.")
|
|
80
|
+
|
|
81
|
+
# --- Action 2: Send a direct notification message to the assignee ---
|
|
82
|
+
team_manager: Optional['TeamManager'] = team_context.team_manager
|
|
83
|
+
if not team_manager:
|
|
84
|
+
# This is a degraded state, but the primary action (publishing) succeeded.
|
|
85
|
+
warning_msg = (f"Successfully published task '{new_task.task_name}', but could not send a direct notification "
|
|
86
|
+
"because the TeamManager is not available.")
|
|
87
|
+
logger.warning(f"Agent '{agent_name}': {warning_msg}")
|
|
88
|
+
return warning_msg
|
|
89
|
+
|
|
90
|
+
try:
|
|
91
|
+
# Local import to break potential circular dependency at module load time.
|
|
92
|
+
from autobyteus.agent_team.events.agent_team_events import InterAgentMessageRequestEvent
|
|
93
|
+
|
|
94
|
+
notification_content = (
|
|
95
|
+
f"You have been assigned a new task directly from agent '{agent_name}'.\n\n"
|
|
96
|
+
f"**Task Name**: '{new_task.task_name}'\n"
|
|
97
|
+
f"**Description**: {new_task.description}\n"
|
|
98
|
+
)
|
|
99
|
+
if new_task.dependencies:
|
|
100
|
+
# Resolve dependency names for the message
|
|
101
|
+
id_to_name_map = {task.task_id: task.task_name for task in task_board.tasks}
|
|
102
|
+
dep_names = [id_to_name_map.get(dep_id, str(dep_id)) for dep_id in new_task.dependencies]
|
|
103
|
+
notification_content += f"**Dependencies**: {', '.join(dep_names)}\n"
|
|
104
|
+
|
|
105
|
+
notification_content += "\nThis task has been logged on the team's task board. You can begin work when its dependencies are met."
|
|
106
|
+
|
|
107
|
+
event = InterAgentMessageRequestEvent(
|
|
108
|
+
sender_agent_id=context.agent_id,
|
|
109
|
+
recipient_name=new_task.assignee_name,
|
|
110
|
+
content=notification_content,
|
|
111
|
+
message_type="task_assignment"
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
await team_manager.dispatch_inter_agent_message_request(event)
|
|
115
|
+
logger.info(f"Agent '{agent_name}' successfully dispatched a notification message for task '{new_task.task_name}' to '{new_task.assignee_name}'.")
|
|
116
|
+
|
|
117
|
+
except Exception as e:
|
|
118
|
+
# Again, this is a degraded state. The main goal was achieved.
|
|
119
|
+
warning_msg = (f"Successfully published task '{new_task.task_name}', but failed to send the direct notification message. "
|
|
120
|
+
f"Error: {e}")
|
|
121
|
+
logger.error(f"Agent '{agent_name}': {warning_msg}", exc_info=True)
|
|
122
|
+
return warning_msg
|
|
123
|
+
|
|
124
|
+
success_msg = f"Successfully assigned task '{new_task.task_name}' to agent '{new_task.assignee_name}' and sent a notification."
|
|
125
|
+
return success_msg
|
autobyteus/tools/__init__.py
CHANGED
|
@@ -5,6 +5,8 @@ for creating tools within the AutoByteUs framework.
|
|
|
5
5
|
It also contains implementations of various standard tools.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import logging
|
|
9
|
+
|
|
8
10
|
# Core components for defining tools
|
|
9
11
|
from .base_tool import BaseTool
|
|
10
12
|
from .functional_tool import tool # The @tool decorator
|
|
@@ -13,33 +15,67 @@ from .tool_config import ToolConfig # Configuration data object, primarily for c
|
|
|
13
15
|
from .tool_origin import ToolOrigin
|
|
14
16
|
from .tool_category import ToolCategory
|
|
15
17
|
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
16
20
|
# --- Re-export specific tools for easier access ---
|
|
17
21
|
|
|
18
22
|
# Functional tools (decorated functions are now instances)
|
|
19
|
-
from .pdf_downloader import pdf_downloader
|
|
20
23
|
from .bash.bash_executor import bash_executor
|
|
21
24
|
from .file.file_reader import file_reader
|
|
22
25
|
from .file.file_writer import file_writer
|
|
26
|
+
from .file.file_editor import file_edit
|
|
23
27
|
|
|
24
28
|
# General Class-based tools
|
|
25
|
-
|
|
26
|
-
from .
|
|
29
|
+
try:
|
|
30
|
+
from .google_search import GoogleSearch
|
|
31
|
+
except ModuleNotFoundError as import_err:
|
|
32
|
+
logger.warning("GoogleSearch tool not available: %s", import_err)
|
|
33
|
+
GoogleSearch = None
|
|
27
34
|
from .timer import Timer
|
|
28
|
-
|
|
29
|
-
from .multimedia.
|
|
35
|
+
try:
|
|
36
|
+
from .multimedia.image_tools import GenerateImageTool, EditImageTool
|
|
37
|
+
except ModuleNotFoundError as import_err:
|
|
38
|
+
logger.warning("Image tools not available: %s", import_err)
|
|
39
|
+
GenerateImageTool = None
|
|
40
|
+
EditImageTool = None
|
|
41
|
+
try:
|
|
42
|
+
from .multimedia.media_reader_tool import ReadMediaFile
|
|
43
|
+
except ModuleNotFoundError as import_err:
|
|
44
|
+
logger.warning("Media reader tool not available: %s", import_err)
|
|
45
|
+
ReadMediaFile = None
|
|
46
|
+
try:
|
|
47
|
+
from .download_media_tool import DownloadMediaTool
|
|
48
|
+
except ModuleNotFoundError as import_err:
|
|
49
|
+
logger.warning("Download media tool not available: %s", import_err)
|
|
50
|
+
DownloadMediaTool = None
|
|
30
51
|
|
|
31
52
|
# Standalone Browser tools
|
|
32
|
-
|
|
33
|
-
from .browser.standalone.
|
|
34
|
-
from .browser.standalone.
|
|
35
|
-
from .browser.standalone.
|
|
36
|
-
from .browser.standalone.
|
|
53
|
+
try:
|
|
54
|
+
from .browser.standalone.navigate_to import NavigateTo as StandaloneNavigateTo # Alias to avoid name clash
|
|
55
|
+
from .browser.standalone.webpage_reader import WebPageReader as StandaloneWebPageReader # Alias
|
|
56
|
+
from .browser.standalone.webpage_screenshot_taker import WebPageScreenshotTaker as StandaloneWebPageScreenshotTaker # Alias
|
|
57
|
+
from .browser.standalone.webpage_image_downloader import WebPageImageDownloader
|
|
58
|
+
from .browser.standalone.web_page_pdf_generator import WebPagePDFGenerator
|
|
59
|
+
except ModuleNotFoundError as import_err:
|
|
60
|
+
logger.warning('Standalone browser tools not available: %s', import_err)
|
|
61
|
+
StandaloneNavigateTo = None
|
|
62
|
+
StandaloneWebPageReader = None
|
|
63
|
+
StandaloneWebPageScreenshotTaker = None
|
|
64
|
+
WebPageImageDownloader = None
|
|
65
|
+
WebPagePDFGenerator = None
|
|
37
66
|
|
|
38
67
|
# Session-Aware Browser tools
|
|
39
|
-
|
|
40
|
-
from .browser.session_aware.
|
|
41
|
-
from .browser.session_aware.
|
|
42
|
-
from .browser.session_aware.
|
|
68
|
+
try:
|
|
69
|
+
from .browser.session_aware.browser_session_aware_navigate_to import BrowserSessionAwareNavigateTo
|
|
70
|
+
from .browser.session_aware.browser_session_aware_web_element_trigger import BrowserSessionAwareWebElementTrigger
|
|
71
|
+
from .browser.session_aware.browser_session_aware_webpage_reader import BrowserSessionAwareWebPageReader
|
|
72
|
+
from .browser.session_aware.browser_session_aware_webpage_screenshot_taker import BrowserSessionAwareWebPageScreenshotTaker
|
|
73
|
+
except ModuleNotFoundError as import_err:
|
|
74
|
+
logger.warning('Session-aware browser tools not available: %s', import_err)
|
|
75
|
+
BrowserSessionAwareNavigateTo = None
|
|
76
|
+
BrowserSessionAwareWebElementTrigger = None
|
|
77
|
+
BrowserSessionAwareWebPageReader = None
|
|
78
|
+
BrowserSessionAwareWebPageScreenshotTaker = None
|
|
43
79
|
|
|
44
80
|
|
|
45
81
|
__all__ = [
|
|
@@ -54,18 +90,18 @@ __all__ = [
|
|
|
54
90
|
"ToolCategory",
|
|
55
91
|
|
|
56
92
|
# Re-exported functional tool instances
|
|
57
|
-
"pdf_downloader",
|
|
58
93
|
"bash_executor",
|
|
59
94
|
"file_reader",
|
|
60
95
|
"file_writer",
|
|
96
|
+
"file_edit",
|
|
61
97
|
|
|
62
98
|
# Re-exported general class-based tools
|
|
63
99
|
"GoogleSearch",
|
|
64
|
-
"ImageDownloader",
|
|
65
100
|
"Timer",
|
|
66
101
|
"GenerateImageTool",
|
|
67
102
|
"EditImageTool",
|
|
68
103
|
"ReadMediaFile",
|
|
104
|
+
"DownloadMediaTool",
|
|
69
105
|
|
|
70
106
|
# Re-exported Standalone Browser tools
|
|
71
107
|
"StandaloneNavigateTo",
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import logging
|
|
3
|
+
import mimetypes
|
|
4
|
+
import aiohttp
|
|
5
|
+
from typing import Optional, TYPE_CHECKING
|
|
6
|
+
from urllib.parse import urlparse
|
|
7
|
+
|
|
8
|
+
from autobyteus.tools.base_tool import BaseTool
|
|
9
|
+
from autobyteus.tools.tool_category import ToolCategory
|
|
10
|
+
from autobyteus.utils.file_utils import get_default_download_folder
|
|
11
|
+
from autobyteus.utils.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from autobyteus.agent.context import AgentContext
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
class DownloadMediaTool(BaseTool):
|
|
19
|
+
"""
|
|
20
|
+
A unified tool to download any media file (e.g., image, PDF, audio) from a URL.
|
|
21
|
+
"""
|
|
22
|
+
CATEGORY = ToolCategory.WEB
|
|
23
|
+
|
|
24
|
+
@classmethod
|
|
25
|
+
def get_name(cls) -> str:
|
|
26
|
+
return "DownloadMedia"
|
|
27
|
+
|
|
28
|
+
@classmethod
|
|
29
|
+
def get_description(cls) -> str:
|
|
30
|
+
return (
|
|
31
|
+
"Downloads various media files (e.g., images like PNG/JPG, documents like PDF, audio like MP3/WAV) "
|
|
32
|
+
"from a direct URL and saves them locally. It intelligently determines the correct file extension "
|
|
33
|
+
"based on the content type. Returns the absolute path to the downloaded file."
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
@classmethod
|
|
37
|
+
def get_argument_schema(cls) -> ParameterSchema:
|
|
38
|
+
schema = ParameterSchema()
|
|
39
|
+
schema.add_parameter(ParameterDefinition(
|
|
40
|
+
name="url",
|
|
41
|
+
param_type=ParameterType.STRING,
|
|
42
|
+
description="The direct URL of the media file to download.",
|
|
43
|
+
required=True
|
|
44
|
+
))
|
|
45
|
+
schema.add_parameter(ParameterDefinition(
|
|
46
|
+
name="filename",
|
|
47
|
+
param_type=ParameterType.STRING,
|
|
48
|
+
description="The desired base name for the downloaded file (e.g., 'vacation_photo', 'annual_report'). The tool will automatically add the correct file extension.",
|
|
49
|
+
required=True
|
|
50
|
+
))
|
|
51
|
+
schema.add_parameter(ParameterDefinition(
|
|
52
|
+
name="folder",
|
|
53
|
+
param_type=ParameterType.STRING,
|
|
54
|
+
description="Optional. A custom directory path to save the file. If not provided, the system's default download folder will be used.",
|
|
55
|
+
required=False
|
|
56
|
+
))
|
|
57
|
+
return schema
|
|
58
|
+
|
|
59
|
+
async def _execute(self, context: 'AgentContext', url: str, filename: str, folder: Optional[str] = None) -> str:
|
|
60
|
+
# 1. Determine download directory
|
|
61
|
+
try:
|
|
62
|
+
if folder:
|
|
63
|
+
# Security: prevent path traversal attacks.
|
|
64
|
+
if ".." in folder:
|
|
65
|
+
raise ValueError("Security error: 'folder' path cannot contain '..'.")
|
|
66
|
+
destination_dir = os.path.abspath(folder)
|
|
67
|
+
else:
|
|
68
|
+
destination_dir = get_default_download_folder()
|
|
69
|
+
|
|
70
|
+
os.makedirs(destination_dir, exist_ok=True)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
logger.error(f"Error preparing download directory '{folder or 'default'}': {e}", exc_info=True)
|
|
73
|
+
raise IOError(f"Failed to create or access download directory: {e}")
|
|
74
|
+
|
|
75
|
+
# 2. Sanitize filename provided by the LLM
|
|
76
|
+
if not filename or ".." in filename or os.path.isabs(filename) or "/" in filename or "\\" in filename:
|
|
77
|
+
raise ValueError("Invalid filename. It must be a simple name without any path characters ('..', '/', '\\').")
|
|
78
|
+
|
|
79
|
+
logger.info(f"Attempting to download from {url} to save as '{filename}' in '{destination_dir}'.")
|
|
80
|
+
|
|
81
|
+
# 3. Download and process file asynchronously
|
|
82
|
+
try:
|
|
83
|
+
async with aiohttp.ClientSession() as session:
|
|
84
|
+
async with session.get(url, timeout=60) as response:
|
|
85
|
+
response.raise_for_status()
|
|
86
|
+
|
|
87
|
+
# 4. Intelligently determine file extension from Content-Type header
|
|
88
|
+
content_type = response.headers.get('Content-Type')
|
|
89
|
+
correct_ext = ''
|
|
90
|
+
if content_type:
|
|
91
|
+
mime_type = content_type.split(';')[0].strip()
|
|
92
|
+
guess = mimetypes.guess_extension(mime_type)
|
|
93
|
+
if guess:
|
|
94
|
+
correct_ext = guess
|
|
95
|
+
logger.debug(f"Determined extension '{correct_ext}' from Content-Type: '{mime_type}'")
|
|
96
|
+
|
|
97
|
+
# Fallback to URL extension if Content-Type is generic or missing
|
|
98
|
+
if not correct_ext or correct_ext == '.bin':
|
|
99
|
+
url_path = urlparse(url).path
|
|
100
|
+
_, ext_from_url = os.path.splitext(os.path.basename(url_path))
|
|
101
|
+
if ext_from_url and len(ext_from_url) > 1: # Ensure it's not just a dot
|
|
102
|
+
logger.debug(f"Using fallback extension '{ext_from_url}' from URL.")
|
|
103
|
+
correct_ext = ext_from_url
|
|
104
|
+
|
|
105
|
+
if not correct_ext:
|
|
106
|
+
logger.warning("Could not determine a file extension. The file will be saved without one.")
|
|
107
|
+
|
|
108
|
+
# 5. Construct final filename and path
|
|
109
|
+
base_filename, _ = os.path.splitext(filename)
|
|
110
|
+
final_filename = f"{base_filename}{correct_ext}"
|
|
111
|
+
save_path = os.path.join(destination_dir, final_filename)
|
|
112
|
+
|
|
113
|
+
# Ensure filename is unique to avoid overwriting
|
|
114
|
+
counter = 1
|
|
115
|
+
while os.path.exists(save_path):
|
|
116
|
+
final_filename = f"{base_filename}_{counter}{correct_ext}"
|
|
117
|
+
save_path = os.path.join(destination_dir, final_filename)
|
|
118
|
+
counter += 1
|
|
119
|
+
|
|
120
|
+
# 6. Stream file content to disk
|
|
121
|
+
with open(save_path, 'wb') as f:
|
|
122
|
+
async for chunk in response.content.iter_chunked(8192):
|
|
123
|
+
f.write(chunk)
|
|
124
|
+
|
|
125
|
+
logger.info(f"Successfully downloaded and saved file to: {save_path}")
|
|
126
|
+
return f"Successfully downloaded file to: {save_path}"
|
|
127
|
+
|
|
128
|
+
except aiohttp.ClientError as e:
|
|
129
|
+
logger.error(f"Network error while downloading from {url}: {e}", exc_info=True)
|
|
130
|
+
raise ConnectionError(f"Failed to download from {url}: {e}")
|
|
131
|
+
except IOError as e:
|
|
132
|
+
logger.error(f"Failed to write downloaded file to {destination_dir}: {e}", exc_info=True)
|
|
133
|
+
raise
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logger.error(f"An unexpected error occurred during download from {url}: {e}", exc_info=True)
|
|
136
|
+
raise RuntimeError(f"An unexpected error occurred: {e}")
|
|
@@ -0,0 +1,200 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
import logging
|
|
4
|
+
from typing import TYPE_CHECKING, List
|
|
5
|
+
|
|
6
|
+
from autobyteus.tools.functional_tool import tool
|
|
7
|
+
from autobyteus.tools.tool_category import ToolCategory
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from autobyteus.agent.context import AgentContext
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
_HUNK_HEADER_RE = re.compile(r"^@@ -(?P<old_start>\d+)(?:,(?P<old_count>\d+))? \+(?P<new_start>\d+)(?:,(?P<new_count>\d+))? @@")
|
|
15
|
+
|
|
16
|
+
class PatchApplicationError(ValueError):
|
|
17
|
+
"""Raised when a unified diff patch cannot be applied to the target file."""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _resolve_file_path(context: 'AgentContext', path: str) -> str:
|
|
21
|
+
"""Resolves an absolute path for the given input, using the agent workspace when needed."""
|
|
22
|
+
if os.path.isabs(path):
|
|
23
|
+
final_path = path
|
|
24
|
+
logger.debug("FileEdit: provided path '%s' is absolute.", path)
|
|
25
|
+
else:
|
|
26
|
+
if not context.workspace:
|
|
27
|
+
error_msg = ("Relative path '%s' provided, but no workspace is configured for agent '%s'. "
|
|
28
|
+
"A workspace is required to resolve relative paths.")
|
|
29
|
+
logger.error(error_msg, path, context.agent_id)
|
|
30
|
+
raise ValueError(error_msg % (path, context.agent_id))
|
|
31
|
+
base_path = context.workspace.get_base_path()
|
|
32
|
+
if not base_path or not isinstance(base_path, str):
|
|
33
|
+
error_msg = ("Agent '%s' has a configured workspace, but it provided an invalid base path ('%s'). "
|
|
34
|
+
"Cannot resolve relative path '%s'.")
|
|
35
|
+
logger.error(error_msg, context.agent_id, base_path, path)
|
|
36
|
+
raise ValueError(error_msg % (context.agent_id, base_path, path))
|
|
37
|
+
final_path = os.path.join(base_path, path)
|
|
38
|
+
logger.debug("FileEdit: resolved relative path '%s' against workspace base '%s' to '%s'.", path, base_path, final_path)
|
|
39
|
+
|
|
40
|
+
normalized_path = os.path.normpath(final_path)
|
|
41
|
+
logger.debug("FileEdit: normalized path to '%s'.", normalized_path)
|
|
42
|
+
return normalized_path
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _apply_unified_diff(original_lines: List[str], patch: str) -> List[str]:
|
|
46
|
+
"""Applies a unified diff patch to the provided original lines and returns the patched lines."""
|
|
47
|
+
if not patch or not patch.strip():
|
|
48
|
+
raise PatchApplicationError("Patch content is empty; nothing to apply.")
|
|
49
|
+
|
|
50
|
+
patched_lines: List[str] = []
|
|
51
|
+
orig_idx = 0
|
|
52
|
+
patch_lines = patch.splitlines(keepends=True)
|
|
53
|
+
line_idx = 0
|
|
54
|
+
|
|
55
|
+
while line_idx < len(patch_lines):
|
|
56
|
+
line = patch_lines[line_idx]
|
|
57
|
+
|
|
58
|
+
if line.startswith('---') or line.startswith('+++'):
|
|
59
|
+
logger.debug("FileEdit: skipping diff header line '%s'.", line.strip())
|
|
60
|
+
line_idx += 1
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
if not line.startswith('@@'):
|
|
64
|
+
stripped = line.strip()
|
|
65
|
+
if stripped == '':
|
|
66
|
+
line_idx += 1
|
|
67
|
+
continue
|
|
68
|
+
raise PatchApplicationError(f"Unexpected content outside of hunk header: '{stripped}'.")
|
|
69
|
+
|
|
70
|
+
match = _HUNK_HEADER_RE.match(line)
|
|
71
|
+
if not match:
|
|
72
|
+
raise PatchApplicationError(f"Malformed hunk header: '{line.strip()}'.")
|
|
73
|
+
|
|
74
|
+
old_start = int(match.group('old_start'))
|
|
75
|
+
old_count = int(match.group('old_count') or '1')
|
|
76
|
+
new_start = int(match.group('new_start'))
|
|
77
|
+
new_count = int(match.group('new_count') or '1')
|
|
78
|
+
logger.debug("FileEdit: processing hunk old_start=%s old_count=%s new_start=%s new_count=%s.",
|
|
79
|
+
old_start, old_count, new_start, new_count)
|
|
80
|
+
|
|
81
|
+
target_idx = old_start - 1 if old_start > 0 else 0
|
|
82
|
+
if target_idx > len(original_lines):
|
|
83
|
+
raise PatchApplicationError("Patch hunk starts beyond end of file.")
|
|
84
|
+
if target_idx < orig_idx:
|
|
85
|
+
raise PatchApplicationError("Patch hunks overlap or are out of order.")
|
|
86
|
+
|
|
87
|
+
patched_lines.extend(original_lines[orig_idx:target_idx])
|
|
88
|
+
orig_idx = target_idx
|
|
89
|
+
|
|
90
|
+
line_idx += 1
|
|
91
|
+
hunk_consumed = 0
|
|
92
|
+
removed = 0
|
|
93
|
+
added = 0
|
|
94
|
+
|
|
95
|
+
while line_idx < len(patch_lines):
|
|
96
|
+
hunk_line = patch_lines[line_idx]
|
|
97
|
+
if hunk_line.startswith('@@'):
|
|
98
|
+
break
|
|
99
|
+
|
|
100
|
+
if hunk_line.startswith('-'):
|
|
101
|
+
if orig_idx >= len(original_lines):
|
|
102
|
+
raise PatchApplicationError("Patch attempts to remove lines beyond file length.")
|
|
103
|
+
if original_lines[orig_idx] != hunk_line[1:]:
|
|
104
|
+
raise PatchApplicationError("Patch removal does not match file content.")
|
|
105
|
+
orig_idx += 1
|
|
106
|
+
hunk_consumed += 1
|
|
107
|
+
removed += 1
|
|
108
|
+
elif hunk_line.startswith('+'):
|
|
109
|
+
patched_lines.append(hunk_line[1:])
|
|
110
|
+
added += 1
|
|
111
|
+
elif hunk_line.startswith(' '):
|
|
112
|
+
if orig_idx >= len(original_lines):
|
|
113
|
+
raise PatchApplicationError("Patch context exceeds file length.")
|
|
114
|
+
if original_lines[orig_idx] != hunk_line[1:]:
|
|
115
|
+
raise PatchApplicationError("Patch context does not match file content.")
|
|
116
|
+
patched_lines.append(original_lines[orig_idx])
|
|
117
|
+
orig_idx += 1
|
|
118
|
+
hunk_consumed += 1
|
|
119
|
+
elif hunk_line.startswith('\\'):
|
|
120
|
+
if hunk_line.strip() == '\':
|
|
121
|
+
if patched_lines:
|
|
122
|
+
patched_lines[-1] = patched_lines[-1].rstrip('\n')
|
|
123
|
+
else:
|
|
124
|
+
raise PatchApplicationError(f"Unsupported patch directive: '{hunk_line.strip()}'.")
|
|
125
|
+
elif hunk_line.strip() == '':
|
|
126
|
+
patched_lines.append(hunk_line)
|
|
127
|
+
else:
|
|
128
|
+
raise PatchApplicationError(f"Unsupported patch line: '{hunk_line.strip()}'.")
|
|
129
|
+
|
|
130
|
+
line_idx += 1
|
|
131
|
+
|
|
132
|
+
consumed_total = hunk_consumed
|
|
133
|
+
if old_count == 0:
|
|
134
|
+
if consumed_total != 0:
|
|
135
|
+
raise PatchApplicationError("Patch expects zero original lines but consumed some context.")
|
|
136
|
+
else:
|
|
137
|
+
if consumed_total != old_count:
|
|
138
|
+
raise PatchApplicationError(
|
|
139
|
+
f"Patch expected to consume {old_count} original lines but consumed {consumed_total}.")
|
|
140
|
+
|
|
141
|
+
context_lines = consumed_total - removed
|
|
142
|
+
expected_new_lines = context_lines + added
|
|
143
|
+
if new_count == 0:
|
|
144
|
+
if expected_new_lines != 0:
|
|
145
|
+
raise PatchApplicationError("Patch declares zero new lines but produced changes.")
|
|
146
|
+
else:
|
|
147
|
+
if expected_new_lines != new_count:
|
|
148
|
+
raise PatchApplicationError(
|
|
149
|
+
f"Patch expected to produce {new_count} new lines but produced {expected_new_lines}.")
|
|
150
|
+
|
|
151
|
+
patched_lines.extend(original_lines[orig_idx:])
|
|
152
|
+
return patched_lines
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
@tool(name="FileEdit", category=ToolCategory.FILE_SYSTEM)
|
|
156
|
+
async def file_edit(context: 'AgentContext', path: str, patch: str, create_if_missing: bool = False) -> str:
|
|
157
|
+
"""Applies a unified diff patch to update a text file without overwriting unrelated content.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
path: Path to the target file. Relative paths are resolved against the agent workspace when available.
|
|
161
|
+
patch: Unified diff patch describing the edits to apply.
|
|
162
|
+
create_if_missing: When True, allows applying a patch that introduces content to a non-existent file.
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
FileNotFoundError: If the file does not exist and create_if_missing is False.
|
|
166
|
+
PatchApplicationError: If the patch content cannot be applied cleanly.
|
|
167
|
+
IOError: If file reading or writing fails.
|
|
168
|
+
"""
|
|
169
|
+
logger.debug("FileEdit: requested edit for agent '%s' on path '%s'.", context.agent_id, path)
|
|
170
|
+
final_path = _resolve_file_path(context, path)
|
|
171
|
+
|
|
172
|
+
dir_path = os.path.dirname(final_path)
|
|
173
|
+
if dir_path and not os.path.exists(dir_path) and create_if_missing:
|
|
174
|
+
os.makedirs(dir_path, exist_ok=True)
|
|
175
|
+
|
|
176
|
+
file_exists = os.path.exists(final_path)
|
|
177
|
+
if not file_exists and not create_if_missing:
|
|
178
|
+
raise FileNotFoundError(f"The file at resolved path {final_path} does not exist.")
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
original_lines: List[str]
|
|
182
|
+
if file_exists:
|
|
183
|
+
with open(final_path, 'r', encoding='utf-8') as source:
|
|
184
|
+
original_lines = source.read().splitlines(keepends=True)
|
|
185
|
+
else:
|
|
186
|
+
original_lines = []
|
|
187
|
+
|
|
188
|
+
patched_lines = _apply_unified_diff(original_lines, patch)
|
|
189
|
+
|
|
190
|
+
with open(final_path, 'w', encoding='utf-8') as destination:
|
|
191
|
+
destination.writelines(patched_lines)
|
|
192
|
+
|
|
193
|
+
logger.info("FileEdit: successfully applied patch to '%s'.", final_path)
|
|
194
|
+
return f"File edited successfully at {final_path}"
|
|
195
|
+
except PatchApplicationError as patch_err:
|
|
196
|
+
logger.error("FileEdit: failed to apply patch to '%s': %s", final_path, patch_err, exc_info=True)
|
|
197
|
+
raise patch_err
|
|
198
|
+
except Exception as exc: # pragma: no cover - general safeguard
|
|
199
|
+
logger.error("FileEdit: unexpected error while editing '%s': %s", final_path, exc, exc_info=True)
|
|
200
|
+
raise IOError(f"Could not edit file at '{final_path}': {exc}")
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# file: autobyteus/autobyteus/tools/usage/parsers/_string_decoders.py
|
|
2
|
+
"""Utility helpers for normalizing string content inside parsed tool payloads."""
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import html
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def decode_html_entities(data: Any) -> Any:
|
|
11
|
+
"""Recursively decode HTML/XML entities in strings within a data structure."""
|
|
12
|
+
if isinstance(data, dict):
|
|
13
|
+
return {key: decode_html_entities(value) for key, value in data.items()}
|
|
14
|
+
if isinstance(data, list):
|
|
15
|
+
return [decode_html_entities(item) for item in data]
|
|
16
|
+
if isinstance(data, str):
|
|
17
|
+
return html.unescape(data)
|
|
18
|
+
return data
|
|
@@ -3,6 +3,8 @@ import json
|
|
|
3
3
|
import logging
|
|
4
4
|
from typing import Dict, Any, TYPE_CHECKING, List
|
|
5
5
|
|
|
6
|
+
from ._string_decoders import decode_html_entities
|
|
7
|
+
|
|
6
8
|
from autobyteus.agent.tool_invocation import ToolInvocation
|
|
7
9
|
from .base_parser import BaseToolUsageParser
|
|
8
10
|
from .exceptions import ToolUsageParseException
|
|
@@ -53,9 +55,15 @@ class DefaultJsonToolUsageParser(BaseToolUsageParser):
|
|
|
53
55
|
logger.debug(f"Skipping tool block with invalid 'parameters' type ({type(arguments)}): {tool_block}")
|
|
54
56
|
continue
|
|
55
57
|
|
|
58
|
+
decoded_arguments = decode_html_entities(arguments)
|
|
59
|
+
decoded_tool_name = decode_html_entities(tool_name)
|
|
56
60
|
try:
|
|
57
61
|
# Pass id=None to trigger deterministic ID generation.
|
|
58
|
-
tool_invocation = ToolInvocation(
|
|
62
|
+
tool_invocation = ToolInvocation(
|
|
63
|
+
name=decoded_tool_name,
|
|
64
|
+
arguments=decoded_arguments,
|
|
65
|
+
id=None,
|
|
66
|
+
)
|
|
59
67
|
invocations.append(tool_invocation)
|
|
60
68
|
logger.info(f"Successfully parsed default JSON tool invocation for '{tool_name}'.")
|
|
61
69
|
except Exception as e:
|