autobyteus 1.1.9__py3-none-any.whl → 1.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (126) hide show
  1. autobyteus/agent/context/agent_runtime_state.py +4 -0
  2. autobyteus/agent/events/notifiers.py +5 -1
  3. autobyteus/agent/message/send_message_to.py +5 -4
  4. autobyteus/agent/streaming/agent_event_stream.py +5 -0
  5. autobyteus/agent/streaming/stream_event_payloads.py +25 -0
  6. autobyteus/agent/streaming/stream_events.py +13 -1
  7. autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +4 -4
  8. autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +12 -12
  9. autobyteus/agent_team/context/agent_team_runtime_state.py +2 -2
  10. autobyteus/agent_team/streaming/agent_team_event_notifier.py +4 -4
  11. autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +3 -3
  12. autobyteus/agent_team/streaming/agent_team_stream_events.py +8 -8
  13. autobyteus/agent_team/task_notification/activation_policy.py +1 -1
  14. autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +22 -22
  15. autobyteus/agent_team/task_notification/task_notification_mode.py +1 -1
  16. autobyteus/cli/agent_team_tui/app.py +4 -4
  17. autobyteus/cli/agent_team_tui/state.py +8 -8
  18. autobyteus/cli/agent_team_tui/widgets/focus_pane.py +3 -3
  19. autobyteus/cli/agent_team_tui/widgets/shared.py +1 -1
  20. autobyteus/cli/agent_team_tui/widgets/{task_board_panel.py → task_plan_panel.py} +5 -5
  21. autobyteus/clients/__init__.py +10 -0
  22. autobyteus/clients/autobyteus_client.py +318 -0
  23. autobyteus/clients/cert_utils.py +105 -0
  24. autobyteus/clients/certificates/cert.pem +34 -0
  25. autobyteus/events/event_types.py +4 -3
  26. autobyteus/llm/api/autobyteus_llm.py +1 -1
  27. autobyteus/llm/api/zhipu_llm.py +26 -0
  28. autobyteus/llm/autobyteus_provider.py +1 -1
  29. autobyteus/llm/llm_factory.py +23 -0
  30. autobyteus/llm/ollama_provider_resolver.py +1 -0
  31. autobyteus/llm/providers.py +1 -0
  32. autobyteus/llm/token_counter/token_counter_factory.py +3 -0
  33. autobyteus/llm/token_counter/zhipu_token_counter.py +24 -0
  34. autobyteus/multimedia/audio/api/__init__.py +3 -2
  35. autobyteus/multimedia/audio/api/autobyteus_audio_client.py +1 -1
  36. autobyteus/multimedia/audio/api/openai_audio_client.py +112 -0
  37. autobyteus/multimedia/audio/audio_client_factory.py +37 -0
  38. autobyteus/multimedia/audio/autobyteus_audio_provider.py +1 -1
  39. autobyteus/multimedia/image/api/autobyteus_image_client.py +1 -1
  40. autobyteus/multimedia/image/autobyteus_image_provider.py +1 -1
  41. autobyteus/multimedia/image/image_client_factory.py +1 -1
  42. autobyteus/task_management/__init__.py +44 -20
  43. autobyteus/task_management/{base_task_board.py → base_task_plan.py} +16 -13
  44. autobyteus/task_management/converters/__init__.py +2 -2
  45. autobyteus/task_management/converters/{task_board_converter.py → task_plan_converter.py} +13 -13
  46. autobyteus/task_management/events.py +7 -7
  47. autobyteus/task_management/{in_memory_task_board.py → in_memory_task_plan.py} +34 -22
  48. autobyteus/task_management/schemas/__init__.py +3 -0
  49. autobyteus/task_management/schemas/task_definition.py +1 -1
  50. autobyteus/task_management/schemas/task_status_report.py +3 -3
  51. autobyteus/task_management/schemas/todo_definition.py +15 -0
  52. autobyteus/task_management/todo.py +29 -0
  53. autobyteus/task_management/todo_list.py +75 -0
  54. autobyteus/task_management/tools/__init__.py +25 -7
  55. autobyteus/task_management/tools/task_tools/__init__.py +19 -0
  56. autobyteus/task_management/tools/task_tools/assign_task_to.py +125 -0
  57. autobyteus/task_management/tools/{publish_task.py → task_tools/create_task.py} +16 -18
  58. autobyteus/task_management/tools/{publish_tasks.py → task_tools/create_tasks.py} +19 -19
  59. autobyteus/task_management/tools/{get_my_tasks.py → task_tools/get_my_tasks.py} +15 -15
  60. autobyteus/task_management/tools/{get_task_board_status.py → task_tools/get_task_plan_status.py} +16 -16
  61. autobyteus/task_management/tools/{update_task_status.py → task_tools/update_task_status.py} +16 -16
  62. autobyteus/task_management/tools/todo_tools/__init__.py +18 -0
  63. autobyteus/task_management/tools/todo_tools/add_todo.py +78 -0
  64. autobyteus/task_management/tools/todo_tools/create_todo_list.py +79 -0
  65. autobyteus/task_management/tools/todo_tools/get_todo_list.py +55 -0
  66. autobyteus/task_management/tools/todo_tools/update_todo_status.py +85 -0
  67. autobyteus/tools/__init__.py +61 -21
  68. autobyteus/tools/bash/bash_executor.py +3 -3
  69. autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +5 -5
  70. autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +4 -4
  71. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +3 -3
  72. autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +3 -3
  73. autobyteus/tools/browser/standalone/navigate_to.py +13 -9
  74. autobyteus/tools/browser/standalone/web_page_pdf_generator.py +9 -5
  75. autobyteus/tools/browser/standalone/webpage_image_downloader.py +10 -6
  76. autobyteus/tools/browser/standalone/webpage_reader.py +13 -9
  77. autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +9 -5
  78. autobyteus/tools/file/__init__.py +13 -0
  79. autobyteus/tools/file/edit_file.py +200 -0
  80. autobyteus/tools/file/list_directory.py +168 -0
  81. autobyteus/tools/file/{file_reader.py → read_file.py} +3 -3
  82. autobyteus/tools/file/search_files.py +188 -0
  83. autobyteus/tools/file/{file_writer.py → write_file.py} +3 -3
  84. autobyteus/tools/functional_tool.py +10 -8
  85. autobyteus/tools/mcp/tool.py +3 -3
  86. autobyteus/tools/mcp/tool_registrar.py +5 -2
  87. autobyteus/tools/multimedia/__init__.py +2 -1
  88. autobyteus/tools/multimedia/audio_tools.py +2 -2
  89. autobyteus/tools/multimedia/download_media_tool.py +136 -0
  90. autobyteus/tools/multimedia/image_tools.py +4 -4
  91. autobyteus/tools/multimedia/media_reader_tool.py +1 -1
  92. autobyteus/tools/registry/tool_definition.py +66 -13
  93. autobyteus/tools/registry/tool_registry.py +29 -0
  94. autobyteus/tools/search/__init__.py +17 -0
  95. autobyteus/tools/search/base_strategy.py +35 -0
  96. autobyteus/tools/search/client.py +24 -0
  97. autobyteus/tools/search/factory.py +81 -0
  98. autobyteus/tools/search/google_cse_strategy.py +68 -0
  99. autobyteus/tools/search/providers.py +10 -0
  100. autobyteus/tools/search/serpapi_strategy.py +65 -0
  101. autobyteus/tools/search/serper_strategy.py +87 -0
  102. autobyteus/tools/search_tool.py +83 -0
  103. autobyteus/tools/timer.py +4 -0
  104. autobyteus/tools/tool_meta.py +4 -24
  105. autobyteus/tools/usage/parsers/_string_decoders.py +18 -0
  106. autobyteus/tools/usage/parsers/default_json_tool_usage_parser.py +9 -1
  107. autobyteus/tools/usage/parsers/default_xml_tool_usage_parser.py +15 -1
  108. autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +4 -1
  109. autobyteus/tools/usage/parsers/openai_json_tool_usage_parser.py +4 -1
  110. autobyteus/workflow/bootstrap_steps/coordinator_prompt_preparation_step.py +1 -2
  111. {autobyteus-1.1.9.dist-info → autobyteus-1.2.1.dist-info}/METADATA +7 -6
  112. {autobyteus-1.1.9.dist-info → autobyteus-1.2.1.dist-info}/RECORD +117 -94
  113. examples/run_agentic_software_engineer.py +239 -0
  114. examples/run_poem_writer.py +3 -3
  115. autobyteus/person/__init__.py +0 -0
  116. autobyteus/person/examples/__init__.py +0 -0
  117. autobyteus/person/examples/sample_persons.py +0 -14
  118. autobyteus/person/examples/sample_roles.py +0 -14
  119. autobyteus/person/person.py +0 -29
  120. autobyteus/person/role.py +0 -14
  121. autobyteus/tools/google_search.py +0 -149
  122. autobyteus/tools/image_downloader.py +0 -99
  123. autobyteus/tools/pdf_downloader.py +0 -89
  124. {autobyteus-1.1.9.dist-info → autobyteus-1.2.1.dist-info}/WHEEL +0 -0
  125. {autobyteus-1.1.9.dist-info → autobyteus-1.2.1.dist-info}/licenses/LICENSE +0 -0
  126. {autobyteus-1.1.9.dist-info → autobyteus-1.2.1.dist-info}/top_level.txt +0 -0
@@ -32,11 +32,11 @@ class BrowserSessionAwareWebPageScreenshotTaker(BrowserSessionAwareTool):
32
32
  if self.image_format not in ["png", "jpeg"]:
33
33
  logger.warning(f"Invalid image_format '{self.image_format}' in config. Defaulting to 'png'.")
34
34
  self.image_format = "png"
35
- logger.debug(f"BrowserSessionAwareWebPageScreenshotTaker initialized. Full page: {self.full_page}, Format: {self.image_format}")
35
+ logger.debug(f"take_webpage_screenshot (session-aware) initialized. Full page: {self.full_page}, Format: {self.image_format}")
36
36
 
37
37
  @classmethod
38
38
  def get_name(cls) -> str:
39
- return "WebPageScreenshotTaker"
39
+ return "take_webpage_screenshot"
40
40
 
41
41
  @classmethod
42
42
  def get_description(cls) -> str:
@@ -87,7 +87,7 @@ class BrowserSessionAwareWebPageScreenshotTaker(BrowserSessionAwareTool):
87
87
  file_name: str,
88
88
  webpage_url: str
89
89
  ) -> str:
90
- logger.info(f"BrowserSessionAwareWebPageScreenshotTaker performing action. Saving to '{file_name}'. Current page: {shared_session.page.url}")
90
+ logger.info(f"take_webpage_screenshot (session-aware) performing action. Saving to '{file_name}'. Current page: {shared_session.page.url}")
91
91
 
92
92
  output_dir = os.path.dirname(file_name)
93
93
  if output_dir:
@@ -23,7 +23,11 @@ class NavigateTo(BaseTool, UIIntegrator):
23
23
  def __init__(self, config: Optional[ToolConfig] = None):
24
24
  BaseTool.__init__(self, config=config)
25
25
  UIIntegrator.__init__(self)
26
- logger.debug("NavigateTo (standalone) tool initialized.")
26
+ logger.debug("navigate_to (standalone) tool initialized.")
27
+
28
+ @classmethod
29
+ def get_name(cls) -> str:
30
+ return "navigate_to"
27
31
 
28
32
  @classmethod
29
33
  def get_description(cls) -> str:
@@ -41,33 +45,33 @@ class NavigateTo(BaseTool, UIIntegrator):
41
45
  return schema
42
46
 
43
47
  async def _execute(self, context: 'AgentContext', url: str) -> str:
44
- logger.info(f"NavigateTo (standalone) for agent {context.agent_id} navigating to: {url}")
48
+ logger.info(f"navigate_to (standalone) for agent {context.agent_id} navigating to: {url}")
45
49
 
46
50
  if not self._is_valid_url(url):
47
51
  error_msg = f"Invalid URL format: {url}. Must include scheme (e.g., http, https) and netloc."
48
- logger.warning(f"NavigateTo (standalone) validation error for agent {context.agent_id}: {error_msg}")
52
+ logger.warning(f"navigate_to (standalone) validation error for agent {context.agent_id}: {error_msg}")
49
53
  raise ValueError(error_msg)
50
54
 
51
55
  try:
52
56
  await self.initialize()
53
57
  if not self.page:
54
- logger.error("Playwright page not initialized in NavigateTo (standalone).")
55
- raise RuntimeError("Playwright page not available for NavigateTo.")
58
+ logger.error("Playwright page not initialized in navigate_to (standalone).")
59
+ raise RuntimeError("Playwright page not available for navigate_to.")
56
60
 
57
61
  response = await self.page.goto(url, wait_until="domcontentloaded", timeout=60000)
58
62
 
59
63
  if response and response.ok:
60
64
  success_msg = f"Successfully navigated to {url}"
61
- logger.info(f"NavigateTo (standalone) for agent {context.agent_id}: {success_msg}")
65
+ logger.info(f"navigate_to (standalone) for agent {context.agent_id}: {success_msg}")
62
66
  return success_msg
63
67
  else:
64
68
  status = response.status if response else "Unknown"
65
69
  failure_msg = f"Navigation to {url} failed with status {status}"
66
- logger.warning(f"NavigateTo (standalone) for agent {context.agent_id}: {failure_msg}")
70
+ logger.warning(f"navigate_to (standalone) for agent {context.agent_id}: {failure_msg}")
67
71
  return failure_msg
68
72
  except Exception as e:
69
- logger.error(f"Error during NavigateTo (standalone) for URL '{url}', agent {context.agent_id}: {e}", exc_info=True)
70
- raise RuntimeError(f"NavigateTo (standalone) failed for URL '{url}': {str(e)}")
73
+ logger.error(f"Error during navigate_to (standalone) for URL '{url}', agent {context.agent_id}: {e}", exc_info=True)
74
+ raise RuntimeError(f"navigate_to (standalone) failed for URL '{url}': {str(e)}")
71
75
  finally:
72
76
  await self.close()
73
77
 
@@ -24,7 +24,11 @@ class WebPagePDFGenerator(BaseTool, UIIntegrator):
24
24
  def __init__(self, config: Optional[ToolConfig] = None):
25
25
  BaseTool.__init__(self, config=config)
26
26
  UIIntegrator.__init__(self)
27
- logger.debug("WebPagePDFGenerator (standalone) tool initialized.")
27
+ logger.debug("generate_webpage_pdf (standalone) tool initialized.")
28
+
29
+ @classmethod
30
+ def get_name(cls) -> str:
31
+ return "generate_webpage_pdf"
28
32
 
29
33
  @classmethod
30
34
  def get_description(cls) -> str:
@@ -49,7 +53,7 @@ class WebPagePDFGenerator(BaseTool, UIIntegrator):
49
53
  return schema
50
54
 
51
55
  async def _execute(self, context: 'AgentContext', url: str, save_dir: str) -> str:
52
- logger.info(f"WebPagePDFGenerator for agent {context.agent_id} generating PDF for '{url}', saving to directory '{save_dir}'.")
56
+ logger.info(f"generate_webpage_pdf for agent {context.agent_id} generating PDF for '{url}', saving to directory '{save_dir}'.")
53
57
 
54
58
  if not self._is_valid_page_url(url):
55
59
  raise ValueError(f"Invalid page URL format: {url}. Must be a full URL (e.g., http/https).")
@@ -73,8 +77,8 @@ class WebPagePDFGenerator(BaseTool, UIIntegrator):
73
77
  try:
74
78
  await self.initialize()
75
79
  if not self.page:
76
- logger.error("Playwright page not initialized in WebPagePDFGenerator.")
77
- raise RuntimeError("Playwright page not available for WebPagePDFGenerator.")
80
+ logger.error("Playwright page not initialized in generate_webpage_pdf.")
81
+ raise RuntimeError("Playwright page not available for generate_webpage_pdf.")
78
82
 
79
83
  await self.page.goto(url, wait_until="networkidle", timeout=60000)
80
84
 
@@ -85,7 +89,7 @@ class WebPagePDFGenerator(BaseTool, UIIntegrator):
85
89
  return absolute_file_path
86
90
  except Exception as e:
87
91
  logger.error(f"Error generating PDF for URL '{url}': {e}", exc_info=True)
88
- raise RuntimeError(f"WebPagePDFGenerator failed for URL '{url}': {str(e)}")
92
+ raise RuntimeError(f"generate_webpage_pdf failed for URL '{url}': {str(e)}")
89
93
  finally:
90
94
  await self.close()
91
95
 
@@ -24,7 +24,11 @@ class WebPageImageDownloader(BaseTool, UIIntegrator):
24
24
  def __init__(self, config: Optional[ToolConfig] = None):
25
25
  BaseTool.__init__(self, config=config)
26
26
  UIIntegrator.__init__(self)
27
- logger.debug("WebPageImageDownloader tool initialized.")
27
+ logger.debug("download_webpage_images tool initialized.")
28
+
29
+ @classmethod
30
+ def get_name(cls) -> str:
31
+ return "download_webpage_images"
28
32
 
29
33
  @classmethod
30
34
  def get_description(cls) -> str:
@@ -49,7 +53,7 @@ class WebPageImageDownloader(BaseTool, UIIntegrator):
49
53
  return schema
50
54
 
51
55
  async def _execute(self, context: 'AgentContext', url: str, save_dir: str) -> List[str]:
52
- logger.info(f"WebPageImageDownloader for agent {context.agent_id} downloading images from '{url}' to '{save_dir}'.")
56
+ logger.info(f"download_webpage_images for agent {context.agent_id} downloading images from '{url}' to '{save_dir}'.")
53
57
 
54
58
  if not self._is_valid_page_url(url):
55
59
  raise ValueError(f"Invalid page URL format: {url}. Must be a full URL (e.g., http/https).")
@@ -60,8 +64,8 @@ class WebPageImageDownloader(BaseTool, UIIntegrator):
60
64
  try:
61
65
  await self.initialize()
62
66
  if not self.page:
63
- logger.error("Playwright page not initialized in WebPageImageDownloader.")
64
- raise RuntimeError("Playwright page not available for WebPageImageDownloader.")
67
+ logger.error("Playwright page not initialized in download_webpage_images.")
68
+ raise RuntimeError("Playwright page not available for download_webpage_images.")
65
69
 
66
70
  await self.page.goto(url, wait_until="networkidle", timeout=60000)
67
71
 
@@ -104,8 +108,8 @@ class WebPageImageDownloader(BaseTool, UIIntegrator):
104
108
  return saved_paths
105
109
 
106
110
  except Exception as e:
107
- logger.error(f"Error in WebPageImageDownloader for URL '{url}': {e}", exc_info=True)
108
- raise RuntimeError(f"WebPageImageDownloader failed for URL '{url}': {str(e)}")
111
+ logger.error(f"Error in download_webpage_images for URL '{url}': {e}", exc_info=True)
112
+ raise RuntimeError(f"download_webpage_images failed for URL '{url}': {str(e)}")
109
113
  finally:
110
114
  await self.close()
111
115
 
@@ -1,6 +1,6 @@
1
1
  """
2
2
  File: autobyteus/tools/browser/standalone/webpage_reader.py
3
- This module provides a WebPageReader tool for reading and cleaning HTML content from webpages.
3
+ This module provides a read_webpage tool for reading and cleaning HTML content from webpages.
4
4
  """
5
5
 
6
6
  import logging
@@ -35,15 +35,19 @@ class WebPageReader(BaseTool, UIIntegrator):
35
35
  try:
36
36
  cleaning_mode_to_use = CleaningMode(cleaning_mode_value.upper())
37
37
  except ValueError:
38
- logger.warning(f"Invalid cleaning_mode string '{cleaning_mode_value}' in config for WebPageReader. Using THOROUGH.")
38
+ logger.warning(f"Invalid cleaning_mode string '{cleaning_mode_value}' in config for read_webpage. Using THOROUGH.")
39
39
  cleaning_mode_to_use = CleaningMode.THOROUGH
40
40
  elif isinstance(cleaning_mode_value, CleaningMode):
41
41
  cleaning_mode_to_use = cleaning_mode_value
42
42
  else:
43
- logger.warning(f"Invalid type for cleaning_mode in config for WebPageReader. Using THOROUGH.")
43
+ logger.warning(f"Invalid type for cleaning_mode in config for read_webpage. Using THOROUGH.")
44
44
 
45
45
  self.cleaning_mode = cleaning_mode_to_use
46
- logger.debug(f"WebPageReader initialized with cleaning_mode: {self.cleaning_mode}")
46
+ logger.debug(f"read_webpage initialized with cleaning_mode: {self.cleaning_mode}")
47
+
48
+ @classmethod
49
+ def get_name(cls) -> str:
50
+ return "read_webpage"
47
51
 
48
52
  @classmethod
49
53
  def get_description(cls) -> str:
@@ -63,7 +67,7 @@ class WebPageReader(BaseTool, UIIntegrator):
63
67
 
64
68
  @classmethod
65
69
  def get_config_schema(cls) -> Optional[ParameterSchema]:
66
- """Schema for parameters to configure the WebPageReader instance itself."""
70
+ """Schema for parameters to configure the read_webpage instance itself."""
67
71
  schema = ParameterSchema()
68
72
  schema.add_parameter(ParameterDefinition(
69
73
  name="cleaning_mode",
@@ -76,13 +80,13 @@ class WebPageReader(BaseTool, UIIntegrator):
76
80
  return schema
77
81
 
78
82
  async def _execute(self, context: 'AgentContext', url: str) -> str:
79
- logger.info(f"WebPageReader executing for agent {context.agent_id} with URL: '{url}'")
83
+ logger.info(f"read_webpage executing for agent {context.agent_id} with URL: '{url}'")
80
84
 
81
85
  try:
82
86
  await self.initialize()
83
87
  if not self.page:
84
- logger.error("Playwright page not initialized in WebPageReader.")
85
- raise RuntimeError("Playwright page not available for WebPageReader.")
88
+ logger.error("Playwright page not initialized in read_webpage.")
89
+ raise RuntimeError("Playwright page not available for read_webpage.")
86
90
 
87
91
  await self.page.goto(url, timeout=60000, wait_until="domcontentloaded")
88
92
  page_content = await self.page.content()
@@ -96,6 +100,6 @@ class WebPageReader(BaseTool, UIIntegrator):
96
100
  '''
97
101
  except Exception as e:
98
102
  logger.error(f"Error reading webpage at URL '{url}': {e}", exc_info=True)
99
- raise RuntimeError(f"WebPageReader failed for URL '{url}': {str(e)}")
103
+ raise RuntimeError(f"read_webpage failed for URL '{url}': {str(e)}")
100
104
  finally:
101
105
  await self.close()
@@ -31,7 +31,11 @@ class WebPageScreenshotTaker(BaseTool, UIIntegrator):
31
31
  if self.image_format not in ["png", "jpeg"]:
32
32
  logger.warning(f"Invalid image_format '{self.image_format}' in config. Defaulting to 'png'.")
33
33
  self.image_format = "png"
34
- logger.debug(f"WebPageScreenshotTaker initialized. Full page: {self.full_page}, Format: {self.image_format}")
34
+ logger.debug(f"take_webpage_screenshot initialized. Full page: {self.full_page}, Format: {self.image_format}")
35
+
36
+ @classmethod
37
+ def get_name(cls) -> str:
38
+ return "take_webpage_screenshot"
35
39
 
36
40
  @classmethod
37
41
  def get_description(cls) -> str:
@@ -75,7 +79,7 @@ class WebPageScreenshotTaker(BaseTool, UIIntegrator):
75
79
  return schema
76
80
 
77
81
  async def _execute(self, context: 'AgentContext', url: str, file_path: str) -> str:
78
- logger.info(f"WebPageScreenshotTaker for agent {context.agent_id} taking screenshot of '{url}', saving to '{file_path}'.")
82
+ logger.info(f"take_webpage_screenshot for agent {context.agent_id} taking screenshot of '{url}', saving to '{file_path}'.")
79
83
 
80
84
  output_dir = os.path.dirname(file_path)
81
85
  if output_dir:
@@ -84,8 +88,8 @@ class WebPageScreenshotTaker(BaseTool, UIIntegrator):
84
88
  try:
85
89
  await self.initialize()
86
90
  if not self.page:
87
- logger.error("Playwright page not initialized in WebPageScreenshotTaker.")
88
- raise RuntimeError("Playwright page not available for WebPageScreenshotTaker.")
91
+ logger.error("Playwright page not initialized in take_webpage_screenshot.")
92
+ raise RuntimeError("Playwright page not available for take_webpage_screenshot.")
89
93
 
90
94
  await self.page.goto(url, wait_until="networkidle", timeout=60000)
91
95
 
@@ -96,6 +100,6 @@ class WebPageScreenshotTaker(BaseTool, UIIntegrator):
96
100
  return absolute_file_path
97
101
  except Exception as e:
98
102
  logger.error(f"Error taking screenshot of URL '{url}': {e}", exc_info=True)
99
- raise RuntimeError(f"WebPageScreenshotTaker failed for URL '{url}': {str(e)}")
103
+ raise RuntimeError(f"take_webpage_screenshot failed for URL '{url}': {str(e)}")
100
104
  finally:
101
105
  await self.close()
@@ -0,0 +1,13 @@
1
+ from .edit_file import edit_file
2
+ from .read_file import read_file
3
+ from .write_file import write_file
4
+ from .search_files import search_files
5
+ from .list_directory import list_directory
6
+
7
+ __all__ = [
8
+ "edit_file",
9
+ "read_file",
10
+ "write_file",
11
+ "search_files",
12
+ "list_directory",
13
+ ]
@@ -0,0 +1,200 @@
1
+ import os
2
+ import re
3
+ import logging
4
+ from typing import TYPE_CHECKING, List
5
+
6
+ from autobyteus.tools.functional_tool import tool
7
+ from autobyteus.tools.tool_category import ToolCategory
8
+
9
+ if TYPE_CHECKING:
10
+ from autobyteus.agent.context import AgentContext
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+ _HUNK_HEADER_RE = re.compile(r"^@@ -(?P<old_start>\d+)(?:,(?P<old_count>\d+))? \+(?P<new_start>\d+)(?:,(?P<new_count>\d+))? @@")
15
+
16
+ class PatchApplicationError(ValueError):
17
+ """Raised when a unified diff patch cannot be applied to the target file."""
18
+
19
+
20
+ def _resolve_file_path(context: 'AgentContext', path: str) -> str:
21
+ """Resolves an absolute path for the given input, using the agent workspace when needed."""
22
+ if os.path.isabs(path):
23
+ final_path = path
24
+ logger.debug("edit_file: provided path '%s' is absolute.", path)
25
+ else:
26
+ if not context.workspace:
27
+ error_msg = ("Relative path '%s' provided, but no workspace is configured for agent '%s'. "
28
+ "A workspace is required to resolve relative paths.")
29
+ logger.error(error_msg, path, context.agent_id)
30
+ raise ValueError(error_msg % (path, context.agent_id))
31
+ base_path = context.workspace.get_base_path()
32
+ if not base_path or not isinstance(base_path, str):
33
+ error_msg = ("Agent '%s' has a configured workspace, but it provided an invalid base path ('%s'). "
34
+ "Cannot resolve relative path '%s'.")
35
+ logger.error(error_msg, context.agent_id, base_path, path)
36
+ raise ValueError(error_msg % (context.agent_id, base_path, path))
37
+ final_path = os.path.join(base_path, path)
38
+ logger.debug("edit_file: resolved relative path '%s' against workspace base '%s' to '%s'.", path, base_path, final_path)
39
+
40
+ normalized_path = os.path.normpath(final_path)
41
+ logger.debug("edit_file: normalized path to '%s'.", normalized_path)
42
+ return normalized_path
43
+
44
+
45
+ def _apply_unified_diff(original_lines: List[str], patch: str) -> List[str]:
46
+ """Applies a unified diff patch to the provided original lines and returns the patched lines."""
47
+ if not patch or not patch.strip():
48
+ raise PatchApplicationError("Patch content is empty; nothing to apply.")
49
+
50
+ patched_lines: List[str] = []
51
+ orig_idx = 0
52
+ patch_lines = patch.splitlines(keepends=True)
53
+ line_idx = 0
54
+
55
+ while line_idx < len(patch_lines):
56
+ line = patch_lines[line_idx]
57
+
58
+ if line.startswith('---') or line.startswith('+++'):
59
+ logger.debug("edit_file: skipping diff header line '%s'.", line.strip())
60
+ line_idx += 1
61
+ continue
62
+
63
+ if not line.startswith('@@'):
64
+ stripped = line.strip()
65
+ if stripped == '':
66
+ line_idx += 1
67
+ continue
68
+ raise PatchApplicationError(f"Unexpected content outside of hunk header: '{stripped}'.")
69
+
70
+ match = _HUNK_HEADER_RE.match(line)
71
+ if not match:
72
+ raise PatchApplicationError(f"Malformed hunk header: '{line.strip()}'.")
73
+
74
+ old_start = int(match.group('old_start'))
75
+ old_count = int(match.group('old_count') or '1')
76
+ new_start = int(match.group('new_start'))
77
+ new_count = int(match.group('new_count') or '1')
78
+ logger.debug("edit_file: processing hunk old_start=%s old_count=%s new_start=%s new_count=%s.",
79
+ old_start, old_count, new_start, new_count)
80
+
81
+ target_idx = old_start - 1 if old_start > 0 else 0
82
+ if target_idx > len(original_lines):
83
+ raise PatchApplicationError("Patch hunk starts beyond end of file.")
84
+ if target_idx < orig_idx:
85
+ raise PatchApplicationError("Patch hunks overlap or are out of order.")
86
+
87
+ patched_lines.extend(original_lines[orig_idx:target_idx])
88
+ orig_idx = target_idx
89
+
90
+ line_idx += 1
91
+ hunk_consumed = 0
92
+ removed = 0
93
+ added = 0
94
+
95
+ while line_idx < len(patch_lines):
96
+ hunk_line = patch_lines[line_idx]
97
+ if hunk_line.startswith('@@'):
98
+ break
99
+
100
+ if hunk_line.startswith('-'):
101
+ if orig_idx >= len(original_lines):
102
+ raise PatchApplicationError("Patch attempts to remove lines beyond file length.")
103
+ if original_lines[orig_idx] != hunk_line[1:]:
104
+ raise PatchApplicationError("Patch removal does not match file content.")
105
+ orig_idx += 1
106
+ hunk_consumed += 1
107
+ removed += 1
108
+ elif hunk_line.startswith('+'):
109
+ patched_lines.append(hunk_line[1:])
110
+ added += 1
111
+ elif hunk_line.startswith(' '):
112
+ if orig_idx >= len(original_lines):
113
+ raise PatchApplicationError("Patch context exceeds file length.")
114
+ if original_lines[orig_idx] != hunk_line[1:]:
115
+ raise PatchApplicationError("Patch context does not match file content.")
116
+ patched_lines.append(original_lines[orig_idx])
117
+ orig_idx += 1
118
+ hunk_consumed += 1
119
+ elif hunk_line.startswith('\\'):
120
+ if hunk_line.strip() == '\':
121
+ if patched_lines:
122
+ patched_lines[-1] = patched_lines[-1].rstrip('\n')
123
+ else:
124
+ raise PatchApplicationError(f"Unsupported patch directive: '{hunk_line.strip()}'.")
125
+ elif hunk_line.strip() == '':
126
+ patched_lines.append(hunk_line)
127
+ else:
128
+ raise PatchApplicationError(f"Unsupported patch line: '{hunk_line.strip()}'.")
129
+
130
+ line_idx += 1
131
+
132
+ consumed_total = hunk_consumed
133
+ if old_count == 0:
134
+ if consumed_total != 0:
135
+ raise PatchApplicationError("Patch expects zero original lines but consumed some context.")
136
+ else:
137
+ if consumed_total != old_count:
138
+ raise PatchApplicationError(
139
+ f"Patch expected to consume {old_count} original lines but consumed {consumed_total}.")
140
+
141
+ context_lines = consumed_total - removed
142
+ expected_new_lines = context_lines + added
143
+ if new_count == 0:
144
+ if expected_new_lines != 0:
145
+ raise PatchApplicationError("Patch declares zero new lines but produced changes.")
146
+ else:
147
+ if expected_new_lines != new_count:
148
+ raise PatchApplicationError(
149
+ f"Patch expected to produce {new_count} new lines but produced {expected_new_lines}.")
150
+
151
+ patched_lines.extend(original_lines[orig_idx:])
152
+ return patched_lines
153
+
154
+
155
+ @tool(name="edit_file", category=ToolCategory.FILE_SYSTEM)
156
+ async def edit_file(context: 'AgentContext', path: str, patch: str, create_if_missing: bool = False) -> str:
157
+ """Applies a unified diff patch to update a text file without overwriting unrelated content.
158
+
159
+ Args:
160
+ path: Path to the target file. Relative paths are resolved against the agent workspace when available.
161
+ patch: Unified diff patch describing the edits to apply.
162
+ create_if_missing: When True, allows applying a patch that introduces content to a non-existent file.
163
+
164
+ Raises:
165
+ FileNotFoundError: If the file does not exist and create_if_missing is False.
166
+ PatchApplicationError: If the patch content cannot be applied cleanly.
167
+ IOError: If file reading or writing fails.
168
+ """
169
+ logger.debug("edit_file: requested edit for agent '%s' on path '%s'.", context.agent_id, path)
170
+ final_path = _resolve_file_path(context, path)
171
+
172
+ dir_path = os.path.dirname(final_path)
173
+ if dir_path and not os.path.exists(dir_path) and create_if_missing:
174
+ os.makedirs(dir_path, exist_ok=True)
175
+
176
+ file_exists = os.path.exists(final_path)
177
+ if not file_exists and not create_if_missing:
178
+ raise FileNotFoundError(f"The file at resolved path {final_path} does not exist.")
179
+
180
+ try:
181
+ original_lines: List[str]
182
+ if file_exists:
183
+ with open(final_path, 'r', encoding='utf-8') as source:
184
+ original_lines = source.read().splitlines(keepends=True)
185
+ else:
186
+ original_lines = []
187
+
188
+ patched_lines = _apply_unified_diff(original_lines, patch)
189
+
190
+ with open(final_path, 'w', encoding='utf-8') as destination:
191
+ destination.writelines(patched_lines)
192
+
193
+ logger.info("edit_file: successfully applied patch to '%s'.", final_path)
194
+ return f"File edited successfully at {final_path}"
195
+ except PatchApplicationError as patch_err:
196
+ logger.error("edit_file: failed to apply patch to '%s': %s", final_path, patch_err, exc_info=True)
197
+ raise patch_err
198
+ except Exception as exc: # pragma: no cover - general safeguard
199
+ logger.error("edit_file: unexpected error while editing '%s': %s", final_path, exc, exc_info=True)
200
+ raise IOError(f"Could not edit file at '{final_path}': {exc}")
@@ -0,0 +1,168 @@
1
+ # file: autobyteus/autobyteus/tools/file/list_directory.py
2
+ """
3
+ This module provides a tool for listing directory contents in a structured,
4
+ tree-like format, mirroring the behavior of the Codex Rust implementation.
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ import os
10
+ from pathlib import Path
11
+ from collections import deque
12
+ from dataclasses import dataclass
13
+ from typing import List, Deque, Tuple, Optional, TYPE_CHECKING
14
+
15
+ from autobyteus.tools.functional_tool import tool
16
+ from autobyteus.tools.tool_category import ToolCategory
17
+
18
+ if TYPE_CHECKING:
19
+ from autobyteus.agent.context import AgentContext
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Constants from the design document
24
+ INDENTATION_SPACES = 2
25
+ MAX_ENTRY_LENGTH = 500
26
+
27
+ @dataclass
28
+ class DirEntry:
29
+ """Represents a collected directory entry for sorting and formatting."""
30
+ name: str
31
+ kind: str
32
+ depth: int
33
+
34
+ @tool(name="list_directory", category=ToolCategory.FILE_SYSTEM)
35
+ async def list_directory(
36
+ context: 'AgentContext',
37
+ path: str,
38
+ depth: int = 2,
39
+ limit: int = 25,
40
+ offset: int = 1
41
+ ) -> str:
42
+ """
43
+ Lists the contents of a directory in a structured, tree-like format.
44
+
45
+ This tool performs a breadth-first traversal of the specified directory up to a
46
+ given depth. It returns a deterministic, lexicographically sorted list of entries,
47
+ formatted with indentation and tree glyphs to represent the hierarchy.
48
+
49
+ Args:
50
+ path: The path to the directory to list. Relative paths are resolved against the agent's workspace.
51
+ depth: The maximum directory depth to traverse. Must be > 0.
52
+ limit: The maximum number of entries to return in the output. Must be > 0.
53
+ offset: The 1-indexed entry number to start from, for pagination. Must be > 0.
54
+ """
55
+ # --- 1. Argument Validation ---
56
+ logger.debug(f"list_directory for agent {context.agent_id}, initial path: {path}")
57
+
58
+ final_path: str
59
+ if os.path.isabs(path):
60
+ final_path = path
61
+ logger.debug(f"Path '{path}' is absolute. Using it directly.")
62
+ else:
63
+ if not context.workspace:
64
+ error_msg = f"Relative path '{path}' provided, but no workspace is configured for agent '{context.agent_id}'. A workspace is required to resolve relative paths."
65
+ logger.error(error_msg)
66
+ raise ValueError(error_msg)
67
+
68
+ base_path = context.workspace.get_base_path()
69
+ if not base_path or not isinstance(base_path, str):
70
+ error_msg = f"Agent '{context.agent_id}' has a configured workspace, but it provided an invalid base path ('{base_path}'). Cannot resolve relative path '{path}'."
71
+ logger.error(error_msg)
72
+ raise ValueError(error_msg)
73
+
74
+ final_path = os.path.join(base_path, path)
75
+ logger.debug(f"Path '{path}' is relative. Resolved to '{final_path}' using workspace base path '{base_path}'.")
76
+
77
+ final_path = os.path.normpath(final_path)
78
+
79
+ if not Path(final_path).is_dir():
80
+ raise FileNotFoundError(f"Directory not found at path: {final_path}")
81
+ if depth <= 0 or limit <= 0 or offset <= 0:
82
+ raise ValueError("depth, limit, and offset must all be greater than zero.")
83
+
84
+ # --- 2. Asynchronous Traversal ---
85
+ loop = asyncio.get_running_loop()
86
+ all_entries = await loop.run_in_executor(
87
+ None, _traverse_directory_bfs, Path(final_path), depth
88
+ )
89
+
90
+ # --- 3. Slicing ---
91
+ total_found = len(all_entries)
92
+ start_index = offset - 1
93
+ end_index = start_index + limit
94
+ sliced_entries = all_entries[start_index:end_index]
95
+
96
+ # --- 4. Formatting ---
97
+ output_lines = [f"Absolute path: {final_path}"]
98
+
99
+ # To correctly apply tree glyphs, we need to know which entry is the last in its directory
100
+ # This is complex with BFS. A simpler, visually acceptable approach is taken here.
101
+ # For a more accurate glyph representation like the Rust version, we would need to
102
+ # process entries directory by directory after collection.
103
+ for i, entry in enumerate(sliced_entries):
104
+ # A simplified glyph logic: last item in the slice gets the closing glyph
105
+ is_last = (i == len(sliced_entries) - 1)
106
+ output_lines.append(_format_entry_line(entry, is_last))
107
+
108
+ if total_found > end_index:
109
+ output_lines.append(f"More than {limit} entries found.")
110
+
111
+ return "\n".join(output_lines)
112
+
113
+
114
+ def _traverse_directory_bfs(start_path: Path, max_depth: int) -> List[DirEntry]:
115
+ """
116
+ Performs a breadth-first traversal of a directory. This is a synchronous function
117
+ designed to be run in a thread pool executor.
118
+ """
119
+ collected: List[DirEntry] = []
120
+ queue: Deque[Tuple[Path, int]] = deque([(start_path, 0)])
121
+
122
+ while queue:
123
+ current_path, current_depth = queue.popleft()
124
+
125
+ if current_depth >= max_depth:
126
+ continue
127
+
128
+ try:
129
+ # Use os.scandir for efficiency as it fetches file type info
130
+ entries_at_level = []
131
+ for entry in os.scandir(current_path):
132
+ kind = "[unknown]"
133
+ if entry.is_dir():
134
+ kind = "[dir]"
135
+ queue.append((Path(entry.path), current_depth + 1))
136
+ elif entry.is_file():
137
+ kind = "[file]"
138
+ elif entry.is_symlink():
139
+ kind = "[link]"
140
+
141
+ # Truncate long filenames
142
+ display_name = entry.name
143
+ if len(display_name) > MAX_ENTRY_LENGTH:
144
+ display_name = display_name[:MAX_ENTRY_LENGTH] + "..."
145
+
146
+ entries_at_level.append(DirEntry(name=display_name, kind=kind, depth=current_depth + 1))
147
+
148
+ # Sort entries at the current level before adding to the main list
149
+ entries_at_level.sort(key=lambda e: e.name)
150
+ collected.extend(entries_at_level)
151
+
152
+ except (PermissionError, OSError) as e:
153
+ logger.warning(f"Could not read directory '{current_path}': {e}")
154
+ continue
155
+
156
+ return collected
157
+
158
+
159
+ def _format_entry_line(entry: DirEntry, is_last_in_slice: bool) -> str:
160
+ """Formats a single directory entry into its final string representation."""
161
+ # This simplified glyph logic doesn't know about siblings, just the slice.
162
+ # A full implementation would require grouping by parent path after collection.
163
+ prefix = "└─ " if is_last_in_slice else "├─ "
164
+
165
+ # Indentation is based on depth from the root search path
166
+ indentation = " " * INDENTATION_SPACES * (entry.depth -1)
167
+
168
+ return f"{indentation}{prefix}{entry.kind} {entry.name}"