autobyteus 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/context/__init__.py +4 -2
- autobyteus/agent/context/agent_config.py +0 -4
- autobyteus/agent/context/agent_context_registry.py +73 -0
- autobyteus/agent/events/notifiers.py +4 -0
- autobyteus/agent/handlers/inter_agent_message_event_handler.py +7 -2
- autobyteus/agent/handlers/llm_complete_response_received_event_handler.py +19 -19
- autobyteus/agent/handlers/user_input_message_event_handler.py +15 -0
- autobyteus/agent/message/send_message_to.py +29 -23
- autobyteus/agent/runtime/agent_runtime.py +10 -2
- autobyteus/agent/sender_type.py +15 -0
- autobyteus/agent/streaming/agent_event_stream.py +6 -0
- autobyteus/agent/streaming/stream_event_payloads.py +12 -0
- autobyteus/agent/streaming/stream_events.py +3 -0
- autobyteus/agent/system_prompt_processor/tool_manifest_injector_processor.py +7 -4
- autobyteus/agent_team/__init__.py +1 -0
- autobyteus/agent_team/agent_team.py +93 -0
- autobyteus/agent_team/agent_team_builder.py +184 -0
- autobyteus/agent_team/base_agent_team.py +86 -0
- autobyteus/agent_team/bootstrap_steps/__init__.py +24 -0
- autobyteus/agent_team/bootstrap_steps/agent_configuration_preparation_step.py +73 -0
- autobyteus/agent_team/bootstrap_steps/agent_team_bootstrapper.py +54 -0
- autobyteus/agent_team/bootstrap_steps/agent_team_runtime_queue_initialization_step.py +25 -0
- autobyteus/agent_team/bootstrap_steps/base_agent_team_bootstrap_step.py +23 -0
- autobyteus/agent_team/bootstrap_steps/coordinator_initialization_step.py +41 -0
- autobyteus/agent_team/bootstrap_steps/coordinator_prompt_preparation_step.py +85 -0
- autobyteus/agent_team/bootstrap_steps/task_notifier_initialization_step.py +51 -0
- autobyteus/agent_team/bootstrap_steps/team_context_initialization_step.py +45 -0
- autobyteus/agent_team/context/__init__.py +17 -0
- autobyteus/agent_team/context/agent_team_config.py +33 -0
- autobyteus/agent_team/context/agent_team_context.py +61 -0
- autobyteus/agent_team/context/agent_team_runtime_state.py +56 -0
- autobyteus/agent_team/context/team_manager.py +147 -0
- autobyteus/agent_team/context/team_node_config.py +76 -0
- autobyteus/agent_team/events/__init__.py +29 -0
- autobyteus/agent_team/events/agent_team_event_dispatcher.py +39 -0
- autobyteus/agent_team/events/agent_team_events.py +53 -0
- autobyteus/agent_team/events/agent_team_input_event_queue_manager.py +21 -0
- autobyteus/agent_team/exceptions.py +8 -0
- autobyteus/agent_team/factory/__init__.py +9 -0
- autobyteus/agent_team/factory/agent_team_factory.py +99 -0
- autobyteus/agent_team/handlers/__init__.py +19 -0
- autobyteus/agent_team/handlers/agent_team_event_handler_registry.py +23 -0
- autobyteus/agent_team/handlers/base_agent_team_event_handler.py +16 -0
- autobyteus/agent_team/handlers/inter_agent_message_request_event_handler.py +61 -0
- autobyteus/agent_team/handlers/lifecycle_agent_team_event_handler.py +27 -0
- autobyteus/agent_team/handlers/process_user_message_event_handler.py +46 -0
- autobyteus/agent_team/handlers/tool_approval_team_event_handler.py +48 -0
- autobyteus/agent_team/phases/__init__.py +11 -0
- autobyteus/agent_team/phases/agent_team_operational_phase.py +19 -0
- autobyteus/agent_team/phases/agent_team_phase_manager.py +48 -0
- autobyteus/agent_team/runtime/__init__.py +13 -0
- autobyteus/agent_team/runtime/agent_team_runtime.py +82 -0
- autobyteus/agent_team/runtime/agent_team_worker.py +117 -0
- autobyteus/agent_team/shutdown_steps/__init__.py +17 -0
- autobyteus/agent_team/shutdown_steps/agent_team_shutdown_orchestrator.py +35 -0
- autobyteus/agent_team/shutdown_steps/agent_team_shutdown_step.py +42 -0
- autobyteus/agent_team/shutdown_steps/base_agent_team_shutdown_step.py +16 -0
- autobyteus/agent_team/shutdown_steps/bridge_cleanup_step.py +28 -0
- autobyteus/agent_team/shutdown_steps/sub_team_shutdown_step.py +41 -0
- autobyteus/agent_team/streaming/__init__.py +26 -0
- autobyteus/agent_team/streaming/agent_event_bridge.py +48 -0
- autobyteus/agent_team/streaming/agent_event_multiplexer.py +70 -0
- autobyteus/agent_team/streaming/agent_team_event_notifier.py +64 -0
- autobyteus/agent_team/streaming/agent_team_event_stream.py +33 -0
- autobyteus/agent_team/streaming/agent_team_stream_event_payloads.py +32 -0
- autobyteus/agent_team/streaming/agent_team_stream_events.py +56 -0
- autobyteus/agent_team/streaming/team_event_bridge.py +50 -0
- autobyteus/agent_team/task_notification/__init__.py +11 -0
- autobyteus/agent_team/task_notification/system_event_driven_agent_task_notifier.py +164 -0
- autobyteus/agent_team/task_notification/task_notification_mode.py +24 -0
- autobyteus/agent_team/utils/__init__.py +9 -0
- autobyteus/agent_team/utils/wait_for_idle.py +46 -0
- autobyteus/cli/agent_team_tui/__init__.py +4 -0
- autobyteus/cli/agent_team_tui/app.py +210 -0
- autobyteus/cli/agent_team_tui/state.py +180 -0
- autobyteus/cli/agent_team_tui/widgets/__init__.py +6 -0
- autobyteus/cli/agent_team_tui/widgets/agent_list_sidebar.py +149 -0
- autobyteus/cli/agent_team_tui/widgets/focus_pane.py +320 -0
- autobyteus/cli/agent_team_tui/widgets/logo.py +20 -0
- autobyteus/cli/agent_team_tui/widgets/renderables.py +77 -0
- autobyteus/cli/agent_team_tui/widgets/shared.py +60 -0
- autobyteus/cli/agent_team_tui/widgets/status_bar.py +14 -0
- autobyteus/cli/agent_team_tui/widgets/task_board_panel.py +82 -0
- autobyteus/events/event_types.py +7 -2
- autobyteus/llm/api/autobyteus_llm.py +11 -12
- autobyteus/llm/api/lmstudio_llm.py +10 -13
- autobyteus/llm/api/ollama_llm.py +8 -13
- autobyteus/llm/autobyteus_provider.py +73 -46
- autobyteus/llm/llm_factory.py +102 -140
- autobyteus/llm/lmstudio_provider.py +63 -48
- autobyteus/llm/models.py +83 -53
- autobyteus/llm/ollama_provider.py +69 -61
- autobyteus/llm/ollama_provider_resolver.py +1 -0
- autobyteus/llm/providers.py +13 -13
- autobyteus/llm/runtimes.py +11 -0
- autobyteus/task_management/__init__.py +43 -0
- autobyteus/task_management/base_task_board.py +68 -0
- autobyteus/task_management/converters/__init__.py +11 -0
- autobyteus/task_management/converters/task_board_converter.py +64 -0
- autobyteus/task_management/converters/task_plan_converter.py +48 -0
- autobyteus/task_management/deliverable.py +16 -0
- autobyteus/task_management/deliverables/__init__.py +8 -0
- autobyteus/task_management/deliverables/file_deliverable.py +15 -0
- autobyteus/task_management/events.py +27 -0
- autobyteus/task_management/in_memory_task_board.py +126 -0
- autobyteus/task_management/schemas/__init__.py +15 -0
- autobyteus/task_management/schemas/deliverable_schema.py +13 -0
- autobyteus/task_management/schemas/plan_definition.py +35 -0
- autobyteus/task_management/schemas/task_status_report.py +27 -0
- autobyteus/task_management/task_plan.py +110 -0
- autobyteus/task_management/tools/__init__.py +14 -0
- autobyteus/task_management/tools/get_task_board_status.py +68 -0
- autobyteus/task_management/tools/publish_task_plan.py +113 -0
- autobyteus/task_management/tools/update_task_status.py +135 -0
- autobyteus/tools/bash/bash_executor.py +59 -14
- autobyteus/tools/mcp/config_service.py +63 -58
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +14 -2
- autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +14 -2
- autobyteus/tools/mcp/server_instance_manager.py +30 -4
- autobyteus/tools/mcp/tool_registrar.py +103 -50
- autobyteus/tools/parameter_schema.py +17 -11
- autobyteus/tools/registry/tool_definition.py +24 -29
- autobyteus/tools/tool_category.py +1 -0
- autobyteus/tools/usage/formatters/default_json_example_formatter.py +78 -3
- autobyteus/tools/usage/formatters/default_xml_example_formatter.py +23 -3
- autobyteus/tools/usage/formatters/gemini_json_example_formatter.py +6 -0
- autobyteus/tools/usage/formatters/google_json_example_formatter.py +7 -0
- autobyteus/tools/usage/formatters/openai_json_example_formatter.py +6 -4
- autobyteus/tools/usage/parsers/gemini_json_tool_usage_parser.py +23 -7
- autobyteus/tools/usage/parsers/provider_aware_tool_usage_parser.py +14 -25
- autobyteus/tools/usage/providers/__init__.py +2 -12
- autobyteus/tools/usage/providers/tool_manifest_provider.py +36 -29
- autobyteus/tools/usage/registries/__init__.py +7 -12
- autobyteus/tools/usage/registries/tool_formatter_pair.py +15 -0
- autobyteus/tools/usage/registries/tool_formatting_registry.py +58 -0
- autobyteus/tools/usage/registries/tool_usage_parser_registry.py +55 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/METADATA +3 -3
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/RECORD +146 -72
- examples/agent_team/__init__.py +1 -0
- examples/run_browser_agent.py +17 -15
- examples/run_google_slides_agent.py +17 -16
- examples/run_poem_writer.py +22 -12
- examples/run_sqlite_agent.py +17 -15
- autobyteus/tools/mcp/call_handlers/__init__.py +0 -16
- autobyteus/tools/mcp/call_handlers/base_handler.py +0 -40
- autobyteus/tools/mcp/call_handlers/stdio_handler.py +0 -76
- autobyteus/tools/mcp/call_handlers/streamable_http_handler.py +0 -55
- autobyteus/tools/usage/providers/json_example_provider.py +0 -32
- autobyteus/tools/usage/providers/json_schema_provider.py +0 -35
- autobyteus/tools/usage/providers/json_tool_usage_parser_provider.py +0 -28
- autobyteus/tools/usage/providers/xml_example_provider.py +0 -28
- autobyteus/tools/usage/providers/xml_schema_provider.py +0 -29
- autobyteus/tools/usage/providers/xml_tool_usage_parser_provider.py +0 -26
- autobyteus/tools/usage/registries/json_example_formatter_registry.py +0 -51
- autobyteus/tools/usage/registries/json_schema_formatter_registry.py +0 -51
- autobyteus/tools/usage/registries/json_tool_usage_parser_registry.py +0 -42
- autobyteus/tools/usage/registries/xml_example_formatter_registry.py +0 -30
- autobyteus/tools/usage/registries/xml_schema_formatter_registry.py +0 -33
- autobyteus/tools/usage/registries/xml_tool_usage_parser_registry.py +0 -30
- examples/workflow/__init__.py +0 -1
- examples/workflow/run_basic_research_workflow.py +0 -189
- examples/workflow/run_code_review_workflow.py +0 -269
- examples/workflow/run_debate_workflow.py +0 -212
- examples/workflow/run_workflow_with_tui.py +0 -153
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {autobyteus-1.1.4.dist-info → autobyteus-1.1.5.dist-info}/top_level.txt +0 -0
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/autobyteus/tools/usage/registries/xml_tool_usage_parser_registry.py
|
|
2
|
-
import logging
|
|
3
|
-
from typing import Optional
|
|
4
|
-
|
|
5
|
-
from autobyteus.llm.providers import LLMProvider
|
|
6
|
-
from autobyteus.tools.usage.parsers.base_parser import BaseToolUsageParser
|
|
7
|
-
from autobyteus.tools.usage.parsers.default_xml_tool_usage_parser import DefaultXmlToolUsageParser
|
|
8
|
-
from autobyteus.utils.singleton import SingletonMeta
|
|
9
|
-
|
|
10
|
-
logger = logging.getLogger(__name__)
|
|
11
|
-
|
|
12
|
-
class XmlToolUsageParserRegistry(metaclass=SingletonMeta):
|
|
13
|
-
"""A singleton registry for retrieving XML-based tool usage parsers."""
|
|
14
|
-
|
|
15
|
-
def __init__(self):
|
|
16
|
-
self._default_parser = DefaultXmlToolUsageParser()
|
|
17
|
-
logger.info("XmlToolUsageParserRegistry initialized.")
|
|
18
|
-
|
|
19
|
-
def get_parser(self, provider: Optional[LLMProvider] = None) -> BaseToolUsageParser:
|
|
20
|
-
"""
|
|
21
|
-
Retrieves the appropriate XML parser.
|
|
22
|
-
|
|
23
|
-
Args:
|
|
24
|
-
provider: The LLMProvider enum member. Currently ignored but kept for API consistency.
|
|
25
|
-
|
|
26
|
-
Returns:
|
|
27
|
-
An instance of a class derived from BaseToolUsageParser.
|
|
28
|
-
"""
|
|
29
|
-
# For now, there's only one XML format, so always return the default.
|
|
30
|
-
return self._default_parser
|
examples/workflow/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/examples/workflow/__init__.py
|
|
@@ -1,189 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/examples/workflow/run_basic_research_workflow.py
|
|
2
|
-
import asyncio
|
|
3
|
-
import logging
|
|
4
|
-
import argparse
|
|
5
|
-
from pathlib import Path
|
|
6
|
-
import sys
|
|
7
|
-
import os
|
|
8
|
-
|
|
9
|
-
# --- Boilerplate to make the script runnable from the project root ---
|
|
10
|
-
SCRIPT_DIR = Path(__file__).resolve().parent.parent
|
|
11
|
-
PACKAGE_ROOT = SCRIPT_DIR.parent
|
|
12
|
-
if str(PACKAGE_ROOT) not in sys.path:
|
|
13
|
-
sys.path.insert(0, str(PACKAGE_ROOT))
|
|
14
|
-
|
|
15
|
-
# Load environment variables from .env file in the project root
|
|
16
|
-
try:
|
|
17
|
-
from dotenv import load_dotenv
|
|
18
|
-
env_file_path = PACKAGE_ROOT / ".env"
|
|
19
|
-
if env_file_path.exists():
|
|
20
|
-
load_dotenv(env_file_path)
|
|
21
|
-
print(f"Loaded environment variables from: {env_file_path}")
|
|
22
|
-
else:
|
|
23
|
-
print(f"Info: No .env file found at: {env_file_path}. Relying on exported environment variables.")
|
|
24
|
-
except ImportError:
|
|
25
|
-
print("Warning: python-dotenv not installed. Cannot load .env file.")
|
|
26
|
-
|
|
27
|
-
# --- Imports for the Workflow Example ---
|
|
28
|
-
try:
|
|
29
|
-
from autobyteus.agent.context import AgentConfig
|
|
30
|
-
from autobyteus.llm.models import LLMModel
|
|
31
|
-
from autobyteus.llm.llm_factory import default_llm_factory, LLMFactory
|
|
32
|
-
from autobyteus.workflow.workflow_builder import WorkflowBuilder
|
|
33
|
-
from autobyteus.cli import workflow_cli
|
|
34
|
-
except ImportError as e:
|
|
35
|
-
print(f"Error importing autobyteus components: {e}", file=sys.stderr)
|
|
36
|
-
print("Please ensure that the autobyteus library is installed and accessible.", file=sys.stderr)
|
|
37
|
-
sys.exit(1)
|
|
38
|
-
|
|
39
|
-
# --- Logging Setup ---
|
|
40
|
-
logger = logging.getLogger("basic_workflow_example")
|
|
41
|
-
|
|
42
|
-
def setup_logging(args: argparse.Namespace):
|
|
43
|
-
"""Configures logging for the interactive session."""
|
|
44
|
-
loggers_to_clear = [
|
|
45
|
-
logging.getLogger(),
|
|
46
|
-
logging.getLogger("autobyteus"),
|
|
47
|
-
logging.getLogger("autobyteus.cli"),
|
|
48
|
-
]
|
|
49
|
-
for l in loggers_to_clear:
|
|
50
|
-
if l.hasHandlers():
|
|
51
|
-
for handler in l.handlers[:]:
|
|
52
|
-
l.removeHandler(handler)
|
|
53
|
-
if hasattr(handler, 'close'): handler.close()
|
|
54
|
-
|
|
55
|
-
script_log_level = logging.DEBUG if args.debug else logging.INFO
|
|
56
|
-
|
|
57
|
-
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
|
|
58
|
-
formatted_console_handler = logging.StreamHandler(sys.stdout)
|
|
59
|
-
formatted_console_handler.setFormatter(console_formatter)
|
|
60
|
-
|
|
61
|
-
root_logger = logging.getLogger()
|
|
62
|
-
root_logger.addHandler(formatted_console_handler)
|
|
63
|
-
root_logger.setLevel(script_log_level)
|
|
64
|
-
|
|
65
|
-
# Configure the main log file
|
|
66
|
-
log_file_path = Path(args.log_file).resolve()
|
|
67
|
-
log_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
68
|
-
agent_file_handler = logging.FileHandler(log_file_path, mode='w')
|
|
69
|
-
agent_file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s')
|
|
70
|
-
agent_file_handler.setFormatter(agent_file_formatter)
|
|
71
|
-
file_log_level = logging.DEBUG if args.debug else logging.INFO
|
|
72
|
-
|
|
73
|
-
autobyteus_logger = logging.getLogger("autobyteus")
|
|
74
|
-
autobyteus_logger.addHandler(agent_file_handler)
|
|
75
|
-
autobyteus_logger.setLevel(file_log_level)
|
|
76
|
-
autobyteus_logger.propagate = True
|
|
77
|
-
|
|
78
|
-
logger.info(f"Core library logs redirected to: {log_file_path} (level: {logging.getLevelName(file_log_level)})")
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
async def main(args: argparse.Namespace):
|
|
82
|
-
"""Main function to configure and run the research workflow."""
|
|
83
|
-
logger.info("--- Starting Basic Research Workflow Example ---")
|
|
84
|
-
|
|
85
|
-
# 1. Create LLM instance for the agents
|
|
86
|
-
try:
|
|
87
|
-
_ = LLMModel[args.llm_model]
|
|
88
|
-
except KeyError:
|
|
89
|
-
logger.error(f"LLM Model '{args.llm_model}' is not valid. Use --help-models to see available models.")
|
|
90
|
-
sys.exit(1)
|
|
91
|
-
|
|
92
|
-
logger.info(f"Creating LLM instance for model: {args.llm_model}")
|
|
93
|
-
llm_instance = default_llm_factory.create_llm(model_identifier=args.llm_model)
|
|
94
|
-
|
|
95
|
-
# 2. Define the Agent Configurations
|
|
96
|
-
|
|
97
|
-
# The Coordinator/Manager Agent
|
|
98
|
-
research_manager_config = AgentConfig(
|
|
99
|
-
name="ResearchManager",
|
|
100
|
-
role="Coordinator",
|
|
101
|
-
description="A manager agent that receives research goals and delegates them to specialists.",
|
|
102
|
-
llm_instance=llm_instance,
|
|
103
|
-
# The prompt is now simpler, as the workflow builder will handle context.
|
|
104
|
-
# The {{tools}} placeholder is essential for tool injection.
|
|
105
|
-
system_prompt=(
|
|
106
|
-
"You are the manager of a research team. Your job is to understand the user's research goal and delegate it to the correct specialist agent on your team. "
|
|
107
|
-
"Do not answer questions yourself; always delegate. "
|
|
108
|
-
"You will be provided a manifest of your team members and available tools.\n\n"
|
|
109
|
-
"{{tools}}"
|
|
110
|
-
),
|
|
111
|
-
)
|
|
112
|
-
|
|
113
|
-
# The Worker/Specialist Agent
|
|
114
|
-
fact_checker_config = AgentConfig(
|
|
115
|
-
name="FactChecker",
|
|
116
|
-
role="Specialist",
|
|
117
|
-
description="An agent with a limited, internal knowledge base for answering direct factual questions.",
|
|
118
|
-
llm_instance=llm_instance,
|
|
119
|
-
system_prompt=(
|
|
120
|
-
"You are a fact-checking bot. You have the following knowledge:\n"
|
|
121
|
-
"- The capital of France is Paris.\n"
|
|
122
|
-
"- The tallest mountain on Earth is Mount Everest.\n"
|
|
123
|
-
"- The primary programming language for AutoByteUs is Python.\n"
|
|
124
|
-
"You MUST ONLY answer questions based on this knowledge. If you are asked something you do not know, you MUST respond with 'I do not have information on that topic.'"
|
|
125
|
-
)
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
# 3. Define and Build the Workflow using WorkflowBuilder
|
|
129
|
-
|
|
130
|
-
research_workflow = (
|
|
131
|
-
WorkflowBuilder(
|
|
132
|
-
name="BasicResearchWorkflow",
|
|
133
|
-
description="A simple two-agent workflow for delegating and answering research questions."
|
|
134
|
-
)
|
|
135
|
-
.set_coordinator(research_manager_config)
|
|
136
|
-
.add_agent_node(fact_checker_config, dependencies=[])
|
|
137
|
-
.build()
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
# 4. Run the Workflow
|
|
141
|
-
|
|
142
|
-
logger.info(f"Workflow instance '{research_workflow.name}' created with ID: {research_workflow.workflow_id}")
|
|
143
|
-
|
|
144
|
-
try:
|
|
145
|
-
logger.info("Starting interactive workflow session...")
|
|
146
|
-
await workflow_cli.run_workflow(
|
|
147
|
-
workflow=research_workflow,
|
|
148
|
-
initial_prompt=args.initial_prompt
|
|
149
|
-
)
|
|
150
|
-
logger.info("Interactive workflow session finished.")
|
|
151
|
-
except Exception as e:
|
|
152
|
-
logger.error(f"An error occurred during the workflow execution: {e}", exc_info=True)
|
|
153
|
-
|
|
154
|
-
logger.info("--- Basic Research Workflow Example Finished ---")
|
|
155
|
-
|
|
156
|
-
if __name__ == "__main__":
|
|
157
|
-
parser = argparse.ArgumentParser(description="Run a basic two-agent research workflow.")
|
|
158
|
-
parser.add_argument("--llm-model", type=str, default="gpt-4o", help="The LLM model to use for the agents.")
|
|
159
|
-
parser.add_argument("--help-models", action="store_true", help="Display available LLM models and exit.")
|
|
160
|
-
parser.add_argument("--initial-prompt", type=str, help="An optional initial prompt to start the workflow automatically.")
|
|
161
|
-
parser.add_argument("--debug", action="store_true", help="Enable debug logging.")
|
|
162
|
-
parser.add_argument("--log-file", type=str, default="./workflow_logs.txt",
|
|
163
|
-
help="Path to the log file for autobyteus library logs.")
|
|
164
|
-
|
|
165
|
-
if "--help-models" in sys.argv:
|
|
166
|
-
try:
|
|
167
|
-
LLMFactory.ensure_initialized()
|
|
168
|
-
print("Available LLM Models (you can use either name or value with --llm-model):")
|
|
169
|
-
all_models = sorted(list(LLMModel), key=lambda m: m.name)
|
|
170
|
-
if not all_models:
|
|
171
|
-
print(" No models found.")
|
|
172
|
-
for model in all_models:
|
|
173
|
-
print(f" - Name: {model.name:<35} Value: {model.value}")
|
|
174
|
-
except Exception as e:
|
|
175
|
-
print(f"Error listing models: {e}")
|
|
176
|
-
sys.exit(0)
|
|
177
|
-
|
|
178
|
-
parsed_args = parser.parse_args()
|
|
179
|
-
|
|
180
|
-
setup_logging(parsed_args)
|
|
181
|
-
|
|
182
|
-
try:
|
|
183
|
-
asyncio.run(main(parsed_args))
|
|
184
|
-
except (KeyboardInterrupt, SystemExit):
|
|
185
|
-
logger.info("Script interrupted by user. Exiting.")
|
|
186
|
-
except Exception as e:
|
|
187
|
-
logger.error(f"An unhandled error occurred at the top level: {e}", exc_info=True)
|
|
188
|
-
finally:
|
|
189
|
-
logger.info("Exiting script.")
|
|
@@ -1,269 +0,0 @@
|
|
|
1
|
-
# file: autobyteus/examples/workflow/run_code_review_workflow.py
|
|
2
|
-
"""
|
|
3
|
-
This example script demonstrates a simple software development workflow
|
|
4
|
-
with a coordinator, an engineer, a code reviewer, a test writer, and a tester.
|
|
5
|
-
"""
|
|
6
|
-
import asyncio
|
|
7
|
-
import logging
|
|
8
|
-
import argparse
|
|
9
|
-
from pathlib import Path
|
|
10
|
-
import sys
|
|
11
|
-
import os
|
|
12
|
-
|
|
13
|
-
# --- Boilerplate to make the script runnable from the project root ---
|
|
14
|
-
SCRIPT_DIR = Path(__file__).resolve().parent.parent
|
|
15
|
-
PACKAGE_ROOT = SCRIPT_DIR.parent
|
|
16
|
-
if str(PACKAGE_ROOT) not in sys.path:
|
|
17
|
-
sys.path.insert(0, str(PACKAGE_ROOT))
|
|
18
|
-
|
|
19
|
-
# Load environment variables from .env file
|
|
20
|
-
try:
|
|
21
|
-
from dotenv import load_dotenv
|
|
22
|
-
load_dotenv(PACKAGE_ROOT / ".env")
|
|
23
|
-
except ImportError:
|
|
24
|
-
pass
|
|
25
|
-
|
|
26
|
-
# --- Imports for the Workflow TUI Example ---
|
|
27
|
-
try:
|
|
28
|
-
from autobyteus.agent.context import AgentConfig
|
|
29
|
-
from autobyteus.llm.models import LLMModel
|
|
30
|
-
from autobyteus.llm.llm_factory import default_llm_factory, LLMFactory
|
|
31
|
-
from autobyteus.workflow.workflow_builder import WorkflowBuilder
|
|
32
|
-
from autobyteus.cli.workflow_tui.app import WorkflowApp
|
|
33
|
-
from autobyteus.tools import file_writer, file_reader, bash_executor
|
|
34
|
-
from autobyteus.agent.workspace import BaseAgentWorkspace, WorkspaceConfig
|
|
35
|
-
from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
|
|
36
|
-
except ImportError as e:
|
|
37
|
-
print(f"Error importing autobyteus components: {e}", file=sys.stderr)
|
|
38
|
-
sys.exit(1)
|
|
39
|
-
|
|
40
|
-
# --- A simple, self-contained workspace for this example ---
|
|
41
|
-
class SimpleLocalWorkspace(BaseAgentWorkspace):
|
|
42
|
-
"""A minimal workspace for local file system access."""
|
|
43
|
-
|
|
44
|
-
def __init__(self, config: WorkspaceConfig):
|
|
45
|
-
super().__init__(config)
|
|
46
|
-
self.root_path: str = config.get("root_path")
|
|
47
|
-
if not self.root_path:
|
|
48
|
-
raise ValueError("SimpleLocalWorkspace requires a 'root_path' in its config.")
|
|
49
|
-
|
|
50
|
-
def get_base_path(self) -> str:
|
|
51
|
-
return self.root_path
|
|
52
|
-
|
|
53
|
-
@classmethod
|
|
54
|
-
def get_workspace_type_name(cls) -> str:
|
|
55
|
-
return "simple_local_workspace_for_review"
|
|
56
|
-
|
|
57
|
-
@classmethod
|
|
58
|
-
def get_description(cls) -> str:
|
|
59
|
-
return "A basic workspace for local file access for the code review workflow."
|
|
60
|
-
|
|
61
|
-
@classmethod
|
|
62
|
-
def get_config_schema(cls) -> ParameterSchema:
|
|
63
|
-
schema = ParameterSchema()
|
|
64
|
-
schema.add_parameter(ParameterDefinition(
|
|
65
|
-
name="root_path",
|
|
66
|
-
param_type=ParameterType.STRING,
|
|
67
|
-
description="The absolute local file path for the workspace root.",
|
|
68
|
-
required=True
|
|
69
|
-
))
|
|
70
|
-
return schema
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
# --- Logging Setup ---
|
|
74
|
-
def setup_file_logging() -> Path:
|
|
75
|
-
log_dir = PACKAGE_ROOT / "logs"
|
|
76
|
-
log_dir.mkdir(exist_ok=True)
|
|
77
|
-
log_file_path = log_dir / "code_review_workflow_tui_app.log"
|
|
78
|
-
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", filename=log_file_path, filemode="w")
|
|
79
|
-
logging.getLogger("asyncio").setLevel(logging.WARNING)
|
|
80
|
-
logging.getLogger("textual").setLevel(logging.WARNING)
|
|
81
|
-
return log_file_path
|
|
82
|
-
|
|
83
|
-
def create_code_review_workflow(
|
|
84
|
-
coordinator_model: str,
|
|
85
|
-
engineer_model: str,
|
|
86
|
-
reviewer_model: str,
|
|
87
|
-
test_writer_model: str,
|
|
88
|
-
tester_model: str,
|
|
89
|
-
workspace: BaseAgentWorkspace,
|
|
90
|
-
use_xml_tool_format: bool = True
|
|
91
|
-
):
|
|
92
|
-
"""Creates the code review workflow."""
|
|
93
|
-
|
|
94
|
-
# --- AGENT CONFIGURATIONS ---
|
|
95
|
-
|
|
96
|
-
# Coordinator Agent
|
|
97
|
-
coordinator_config = AgentConfig(
|
|
98
|
-
name="ProjectManager", role="Coordinator", description="Manages the development process, assigning tasks to the team.",
|
|
99
|
-
llm_instance=default_llm_factory.create_llm(model_identifier=coordinator_model),
|
|
100
|
-
system_prompt=(
|
|
101
|
-
"You are the project manager for a software team. Your role is to manage a strict, sequential code development, review, and testing process. Your team consists of a SoftwareEngineer, a CodeReviewer, a TestWriter, and a Tester.\n\n"
|
|
102
|
-
"### Your Workflow\n"
|
|
103
|
-
"You must follow this workflow precisely:\n"
|
|
104
|
-
"1. **Delegate to Engineer:** Receive a request from the user to write code to a specific filename. Instruct the `SoftwareEngineer` to write the code and save it.\n"
|
|
105
|
-
"2. **Delegate to Reviewer:** After the engineer confirms completion, instruct the `CodeReviewer` to review the code. You must provide the filename to the reviewer.\n"
|
|
106
|
-
"3. **Delegate to Test Writer:** After the review is complete, instruct the `TestWriter` to write pytest tests for the code. Provide the original source filename and tell them to save the tests in a new file, like `test_FILENAME.py`.\n"
|
|
107
|
-
"4. **Delegate to Tester:** After the tests are written, instruct the `Tester` to run the tests. You must provide the filename of the test file.\n"
|
|
108
|
-
"5. **Report to User:** Once you receive the test results, present the final status (code written, reviewed, and tests passed/failed) to the user.\n\n"
|
|
109
|
-
"**CRITICAL RULE:** This is a sequential process. You must wait for one agent to finish before contacting the next. You are the central point of communication.\n\n"
|
|
110
|
-
"{{tools}}"
|
|
111
|
-
),
|
|
112
|
-
use_xml_tool_format=use_xml_tool_format
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
# Software Engineer Agent
|
|
116
|
-
engineer_config = AgentConfig(
|
|
117
|
-
name="SoftwareEngineer", role="Developer", description="Writes Python code based on instructions and saves it to a file.",
|
|
118
|
-
llm_instance=default_llm_factory.create_llm(model_identifier=engineer_model),
|
|
119
|
-
system_prompt=(
|
|
120
|
-
"You are a skilled Python software engineer. You receive tasks from your ProjectManager. "
|
|
121
|
-
"Your job is to write high-quality Python code to fulfill the request. "
|
|
122
|
-
"After writing the code, you MUST save it to the specified filename using the `FileWriter` tool. "
|
|
123
|
-
"Confirm completion once the file is saved.\n\n{{tools}}"
|
|
124
|
-
),
|
|
125
|
-
tools=[file_writer],
|
|
126
|
-
workspace=workspace,
|
|
127
|
-
use_xml_tool_format=use_xml_tool_format
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
# Code Reviewer Agent
|
|
131
|
-
reviewer_config = AgentConfig(
|
|
132
|
-
name="CodeReviewer", role="Senior Developer", description="Reads and reviews Python code from files for quality and correctness.",
|
|
133
|
-
llm_instance=default_llm_factory.create_llm(model_identifier=reviewer_model),
|
|
134
|
-
system_prompt=(
|
|
135
|
-
"You are a senior software engineer acting as a code reviewer. You will be given a file path to review. "
|
|
136
|
-
"You MUST use the `FileReader` tool to read the code from the file. "
|
|
137
|
-
"After reading the code, provide a constructive review, identifying any potential bugs, style issues, or areas for improvement.\n\n{{tools}}"
|
|
138
|
-
),
|
|
139
|
-
tools=[file_reader],
|
|
140
|
-
workspace=workspace,
|
|
141
|
-
use_xml_tool_format=use_xml_tool_format
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
# Test Writer Agent
|
|
145
|
-
test_writer_config = AgentConfig(
|
|
146
|
-
name="TestWriter", role="QA Engineer", description="Writes pytest tests for Python code.",
|
|
147
|
-
llm_instance=default_llm_factory.create_llm(model_identifier=test_writer_model),
|
|
148
|
-
system_prompt=(
|
|
149
|
-
"You are a QA engineer specializing in testing. You will be given the path to a Python source file. "
|
|
150
|
-
"Your task is to read that file, write comprehensive tests for it using the `pytest` framework, and save the tests to a new file. "
|
|
151
|
-
"The test filename MUST start with `test_`. For example, if you are testing `code.py`, you should save the tests in `test_code.py`.\n\n{{tools}}"
|
|
152
|
-
),
|
|
153
|
-
tools=[file_reader, file_writer],
|
|
154
|
-
workspace=workspace,
|
|
155
|
-
use_xml_tool_format=use_xml_tool_format
|
|
156
|
-
)
|
|
157
|
-
|
|
158
|
-
# Tester Agent
|
|
159
|
-
tester_config = AgentConfig(
|
|
160
|
-
name="Tester", role="QA Automation", description="Executes pytest tests and reports results.",
|
|
161
|
-
llm_instance=default_llm_factory.create_llm(model_identifier=tester_model),
|
|
162
|
-
system_prompt=(
|
|
163
|
-
"You are a QA automation specialist. Your job is to run tests. You will be given a test file to execute. "
|
|
164
|
-
"You MUST use the `BashExecutor` tool to run the command `pytest` on the given test file. "
|
|
165
|
-
"Report the full output from the command back to the Project Manager.\n\n{{tools}}"
|
|
166
|
-
),
|
|
167
|
-
tools=[bash_executor],
|
|
168
|
-
workspace=workspace,
|
|
169
|
-
use_xml_tool_format=use_xml_tool_format
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
# --- BUILD THE WORKFLOW ---
|
|
174
|
-
|
|
175
|
-
code_review_workflow = (
|
|
176
|
-
WorkflowBuilder(name="SoftwareDevWorkflow", description="A workflow for writing, reviewing, and testing code.")
|
|
177
|
-
.set_coordinator(coordinator_config)
|
|
178
|
-
.add_agent_node(engineer_config)
|
|
179
|
-
.add_agent_node(reviewer_config)
|
|
180
|
-
.add_agent_node(test_writer_config)
|
|
181
|
-
.add_agent_node(tester_config)
|
|
182
|
-
.build()
|
|
183
|
-
)
|
|
184
|
-
|
|
185
|
-
return code_review_workflow
|
|
186
|
-
|
|
187
|
-
async def main(args: argparse.Namespace, log_file: Path):
|
|
188
|
-
"""Main async function to create the workflow and run the TUI app."""
|
|
189
|
-
print("Setting up software development workflow...")
|
|
190
|
-
print(f"--> Logs will be written to: {log_file.resolve()}")
|
|
191
|
-
|
|
192
|
-
workspace_path = Path(args.output_dir).resolve()
|
|
193
|
-
workspace_path.mkdir(parents=True, exist_ok=True)
|
|
194
|
-
print(f"--> Agent workspace (output directory) is set to: {workspace_path}")
|
|
195
|
-
|
|
196
|
-
workspace_config = WorkspaceConfig(params={"root_path": str(workspace_path)})
|
|
197
|
-
workspace = SimpleLocalWorkspace(config=workspace_config)
|
|
198
|
-
|
|
199
|
-
# Resolve models
|
|
200
|
-
coordinator_model = args.coordinator_model or args.llm_model
|
|
201
|
-
engineer_model = args.engineer_model or args.llm_model
|
|
202
|
-
reviewer_model = args.reviewer_model or args.llm_model
|
|
203
|
-
test_writer_model = args.test_writer_model or args.llm_model
|
|
204
|
-
tester_model = args.tester_model or args.llm_model
|
|
205
|
-
|
|
206
|
-
print(f"--> Coordinator Model: {coordinator_model}")
|
|
207
|
-
print(f"--> Engineer Model: {engineer_model}")
|
|
208
|
-
print(f"--> Reviewer Model: {reviewer_model}")
|
|
209
|
-
print(f"--> Test Writer Model: {test_writer_model}")
|
|
210
|
-
print(f"--> Tester Model: {tester_model}")
|
|
211
|
-
|
|
212
|
-
use_xml_tool_format = not args.no_xml_tools
|
|
213
|
-
print(f"--> Using XML Tool Format: {use_xml_tool_format}")
|
|
214
|
-
|
|
215
|
-
try:
|
|
216
|
-
workflow = create_code_review_workflow(
|
|
217
|
-
coordinator_model=coordinator_model,
|
|
218
|
-
engineer_model=engineer_model,
|
|
219
|
-
reviewer_model=reviewer_model,
|
|
220
|
-
test_writer_model=test_writer_model,
|
|
221
|
-
tester_model=tester_model,
|
|
222
|
-
workspace=workspace,
|
|
223
|
-
use_xml_tool_format=use_xml_tool_format
|
|
224
|
-
)
|
|
225
|
-
app = WorkflowApp(workflow=workflow)
|
|
226
|
-
await app.run_async()
|
|
227
|
-
except Exception as e:
|
|
228
|
-
logging.critical(f"Failed to create or run workflow TUI: {e}", exc_info=True)
|
|
229
|
-
print(f"\nCRITICAL ERROR: {e}\nCheck log file for details: {log_file.resolve()}")
|
|
230
|
-
|
|
231
|
-
if __name__ == "__main__":
|
|
232
|
-
parser = argparse.ArgumentParser(
|
|
233
|
-
description="Run a software development workflow with a Textual TUI.",
|
|
234
|
-
formatter_class=argparse.RawTextHelpFormatter
|
|
235
|
-
)
|
|
236
|
-
parser.add_argument("--llm-model", type=str, default="kimi-latest", help="The default LLM model for all agents.")
|
|
237
|
-
parser.add_argument("--coordinator-model", type=str, help="Specific LLM model for the ProjectManager. Defaults to --llm-model.")
|
|
238
|
-
parser.add_argument("--engineer-model", type=str, help="Specific LLM model for the SoftwareEngineer. Defaults to --llm-model.")
|
|
239
|
-
parser.add_argument("--reviewer-model", type=str, help="Specific LLM model for the CodeReviewer. Defaults to --llm-model.")
|
|
240
|
-
parser.add_argument("--test-writer-model", type=str, help="Specific LLM model for the TestWriter. Defaults to --llm-model.")
|
|
241
|
-
parser.add_argument("--tester-model", type=str, help="Specific LLM model for the Tester. Defaults to --llm-model.")
|
|
242
|
-
parser.add_argument("--output-dir", type=str, default="./code_review_output", help="Directory for the shared workspace.")
|
|
243
|
-
parser.add_argument("--no-xml-tools", action="store_true", help="Disable XML-based tool formatting.")
|
|
244
|
-
parser.add_argument("--help-models", action="store_true", help="Display available LLM models and exit.")
|
|
245
|
-
|
|
246
|
-
if "--help-models" in sys.argv:
|
|
247
|
-
try:
|
|
248
|
-
LLMFactory.ensure_initialized()
|
|
249
|
-
print("Available LLM Models (you can use either name or value with model arguments):")
|
|
250
|
-
all_models = sorted(list(LLMModel), key=lambda m: m.name)
|
|
251
|
-
if not all_models:
|
|
252
|
-
print(" No models found.")
|
|
253
|
-
for model in all_models:
|
|
254
|
-
print(f" - Name: {model.name:<35} Value: {model.value}")
|
|
255
|
-
except Exception as e:
|
|
256
|
-
print(f"Error listing models: {e}")
|
|
257
|
-
sys.exit(0)
|
|
258
|
-
|
|
259
|
-
parsed_args = parser.parse_args()
|
|
260
|
-
|
|
261
|
-
log_file_path = setup_file_logging()
|
|
262
|
-
try:
|
|
263
|
-
asyncio.run(main(parsed_args, log_file_path))
|
|
264
|
-
except KeyboardInterrupt:
|
|
265
|
-
print("\nExiting application.")
|
|
266
|
-
except Exception as e:
|
|
267
|
-
logging.critical(f"Top-level application error: {e}", exc_info=True)
|
|
268
|
-
print(f"\nUNHANDLED ERROR: {e}\nCheck log file for details: {log_file_path.resolve()}")
|
|
269
|
-
|