autobyteus 1.1.2__py3-none-any.whl → 1.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- autobyteus/agent/agent.py +1 -1
- autobyteus/agent/bootstrap_steps/__init__.py +2 -0
- autobyteus/agent/bootstrap_steps/agent_bootstrapper.py +2 -0
- autobyteus/agent/bootstrap_steps/mcp_server_prewarming_step.py +71 -0
- autobyteus/agent/bootstrap_steps/system_prompt_processing_step.py +4 -2
- autobyteus/agent/context/agent_config.py +36 -5
- autobyteus/agent/events/worker_event_dispatcher.py +1 -2
- autobyteus/agent/handlers/inter_agent_message_event_handler.py +1 -1
- autobyteus/agent/handlers/llm_user_message_ready_event_handler.py +2 -2
- autobyteus/agent/handlers/tool_result_event_handler.py +48 -20
- autobyteus/agent/handlers/user_input_message_event_handler.py +1 -1
- autobyteus/agent/input_processor/__init__.py +1 -7
- autobyteus/agent/llm_response_processor/provider_aware_tool_usage_processor.py +41 -12
- autobyteus/agent/message/context_file_type.py +6 -0
- autobyteus/agent/message/send_message_to.py +68 -99
- autobyteus/agent/phases/discover.py +2 -1
- autobyteus/agent/runtime/agent_worker.py +25 -34
- autobyteus/agent/shutdown_steps/__init__.py +17 -0
- autobyteus/agent/shutdown_steps/agent_shutdown_orchestrator.py +63 -0
- autobyteus/agent/shutdown_steps/base_shutdown_step.py +33 -0
- autobyteus/agent/shutdown_steps/llm_instance_cleanup_step.py +45 -0
- autobyteus/agent/shutdown_steps/mcp_server_cleanup_step.py +32 -0
- autobyteus/agent/tool_execution_result_processor/__init__.py +9 -0
- autobyteus/agent/tool_execution_result_processor/base_processor.py +46 -0
- autobyteus/agent/tool_execution_result_processor/processor_definition.py +36 -0
- autobyteus/agent/tool_execution_result_processor/processor_meta.py +36 -0
- autobyteus/agent/tool_execution_result_processor/processor_registry.py +70 -0
- autobyteus/agent/workspace/base_workspace.py +17 -2
- autobyteus/cli/__init__.py +1 -1
- autobyteus/cli/cli_display.py +1 -1
- autobyteus/cli/workflow_tui/__init__.py +4 -0
- autobyteus/cli/workflow_tui/app.py +210 -0
- autobyteus/cli/workflow_tui/state.py +189 -0
- autobyteus/cli/workflow_tui/widgets/__init__.py +6 -0
- autobyteus/cli/workflow_tui/widgets/agent_list_sidebar.py +149 -0
- autobyteus/cli/workflow_tui/widgets/focus_pane.py +335 -0
- autobyteus/cli/workflow_tui/widgets/logo.py +27 -0
- autobyteus/cli/workflow_tui/widgets/renderables.py +70 -0
- autobyteus/cli/workflow_tui/widgets/shared.py +51 -0
- autobyteus/cli/workflow_tui/widgets/status_bar.py +14 -0
- autobyteus/events/event_types.py +3 -0
- autobyteus/llm/api/lmstudio_llm.py +37 -0
- autobyteus/llm/api/openai_compatible_llm.py +20 -3
- autobyteus/llm/llm_factory.py +2 -0
- autobyteus/llm/lmstudio_provider.py +89 -0
- autobyteus/llm/providers.py +1 -0
- autobyteus/llm/token_counter/token_counter_factory.py +2 -0
- autobyteus/tools/__init__.py +2 -0
- autobyteus/tools/ask_user_input.py +2 -1
- autobyteus/tools/base_tool.py +2 -0
- autobyteus/tools/bash/bash_executor.py +2 -1
- autobyteus/tools/browser/session_aware/browser_session_aware_navigate_to.py +2 -0
- autobyteus/tools/browser/session_aware/browser_session_aware_web_element_trigger.py +3 -0
- autobyteus/tools/browser/session_aware/browser_session_aware_webpage_reader.py +3 -0
- autobyteus/tools/browser/session_aware/browser_session_aware_webpage_screenshot_taker.py +3 -0
- autobyteus/tools/browser/standalone/google_search_ui.py +2 -0
- autobyteus/tools/browser/standalone/navigate_to.py +2 -0
- autobyteus/tools/browser/standalone/web_page_pdf_generator.py +3 -0
- autobyteus/tools/browser/standalone/webpage_image_downloader.py +3 -0
- autobyteus/tools/browser/standalone/webpage_reader.py +2 -0
- autobyteus/tools/browser/standalone/webpage_screenshot_taker.py +3 -0
- autobyteus/tools/file/file_reader.py +36 -9
- autobyteus/tools/file/file_writer.py +37 -9
- autobyteus/tools/functional_tool.py +5 -4
- autobyteus/tools/image_downloader.py +2 -0
- autobyteus/tools/mcp/__init__.py +10 -7
- autobyteus/tools/mcp/call_handlers/__init__.py +0 -2
- autobyteus/tools/mcp/config_service.py +1 -6
- autobyteus/tools/mcp/factory.py +12 -26
- autobyteus/tools/mcp/server/__init__.py +16 -0
- autobyteus/tools/mcp/server/base_managed_mcp_server.py +139 -0
- autobyteus/tools/mcp/server/http_managed_mcp_server.py +29 -0
- autobyteus/tools/mcp/server/proxy.py +36 -0
- autobyteus/tools/mcp/server/stdio_managed_mcp_server.py +33 -0
- autobyteus/tools/mcp/server_instance_manager.py +93 -0
- autobyteus/tools/mcp/tool.py +28 -46
- autobyteus/tools/mcp/tool_registrar.py +179 -0
- autobyteus/tools/mcp/types.py +10 -21
- autobyteus/tools/pdf_downloader.py +2 -1
- autobyteus/tools/registry/tool_definition.py +20 -7
- autobyteus/tools/registry/tool_registry.py +75 -28
- autobyteus/tools/timer.py +2 -0
- autobyteus/tools/tool_category.py +14 -4
- autobyteus/tools/tool_meta.py +6 -1
- autobyteus/tools/tool_origin.py +10 -0
- autobyteus/workflow/agentic_workflow.py +93 -0
- autobyteus/{agent/workflow → workflow}/base_agentic_workflow.py +19 -27
- autobyteus/workflow/bootstrap_steps/__init__.py +20 -0
- autobyteus/workflow/bootstrap_steps/agent_tool_injection_step.py +34 -0
- autobyteus/workflow/bootstrap_steps/base_workflow_bootstrap_step.py +23 -0
- autobyteus/workflow/bootstrap_steps/coordinator_initialization_step.py +41 -0
- autobyteus/workflow/bootstrap_steps/coordinator_prompt_preparation_step.py +108 -0
- autobyteus/workflow/bootstrap_steps/workflow_bootstrapper.py +50 -0
- autobyteus/workflow/bootstrap_steps/workflow_runtime_queue_initialization_step.py +25 -0
- autobyteus/workflow/context/__init__.py +17 -0
- autobyteus/workflow/context/team_manager.py +147 -0
- autobyteus/workflow/context/workflow_config.py +30 -0
- autobyteus/workflow/context/workflow_context.py +61 -0
- autobyteus/workflow/context/workflow_node_config.py +76 -0
- autobyteus/workflow/context/workflow_runtime_state.py +53 -0
- autobyteus/workflow/events/__init__.py +29 -0
- autobyteus/workflow/events/workflow_event_dispatcher.py +39 -0
- autobyteus/workflow/events/workflow_events.py +53 -0
- autobyteus/workflow/events/workflow_input_event_queue_manager.py +21 -0
- autobyteus/workflow/exceptions.py +8 -0
- autobyteus/workflow/factory/__init__.py +9 -0
- autobyteus/workflow/factory/workflow_factory.py +99 -0
- autobyteus/workflow/handlers/__init__.py +19 -0
- autobyteus/workflow/handlers/base_workflow_event_handler.py +16 -0
- autobyteus/workflow/handlers/inter_agent_message_request_event_handler.py +61 -0
- autobyteus/workflow/handlers/lifecycle_workflow_event_handler.py +27 -0
- autobyteus/workflow/handlers/process_user_message_event_handler.py +46 -0
- autobyteus/workflow/handlers/tool_approval_workflow_event_handler.py +39 -0
- autobyteus/workflow/handlers/workflow_event_handler_registry.py +23 -0
- autobyteus/workflow/phases/__init__.py +11 -0
- autobyteus/workflow/phases/workflow_operational_phase.py +19 -0
- autobyteus/workflow/phases/workflow_phase_manager.py +48 -0
- autobyteus/workflow/runtime/__init__.py +13 -0
- autobyteus/workflow/runtime/workflow_runtime.py +82 -0
- autobyteus/workflow/runtime/workflow_worker.py +117 -0
- autobyteus/workflow/shutdown_steps/__init__.py +17 -0
- autobyteus/workflow/shutdown_steps/agent_team_shutdown_step.py +42 -0
- autobyteus/workflow/shutdown_steps/base_workflow_shutdown_step.py +16 -0
- autobyteus/workflow/shutdown_steps/bridge_cleanup_step.py +28 -0
- autobyteus/workflow/shutdown_steps/sub_workflow_shutdown_step.py +41 -0
- autobyteus/workflow/shutdown_steps/workflow_shutdown_orchestrator.py +35 -0
- autobyteus/workflow/streaming/__init__.py +26 -0
- autobyteus/workflow/streaming/agent_event_bridge.py +48 -0
- autobyteus/workflow/streaming/agent_event_multiplexer.py +70 -0
- autobyteus/workflow/streaming/workflow_event_bridge.py +50 -0
- autobyteus/workflow/streaming/workflow_event_notifier.py +83 -0
- autobyteus/workflow/streaming/workflow_event_stream.py +33 -0
- autobyteus/workflow/streaming/workflow_stream_event_payloads.py +28 -0
- autobyteus/workflow/streaming/workflow_stream_events.py +45 -0
- autobyteus/workflow/utils/__init__.py +9 -0
- autobyteus/workflow/utils/wait_for_idle.py +46 -0
- autobyteus/workflow/workflow_builder.py +151 -0
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.4.dist-info}/METADATA +16 -13
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.4.dist-info}/RECORD +156 -75
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.4.dist-info}/top_level.txt +1 -0
- examples/__init__.py +1 -0
- examples/discover_phase_transitions.py +104 -0
- examples/run_browser_agent.py +260 -0
- examples/run_google_slides_agent.py +286 -0
- examples/run_mcp_browser_client.py +174 -0
- examples/run_mcp_google_slides_client.py +270 -0
- examples/run_mcp_list_tools.py +189 -0
- examples/run_poem_writer.py +274 -0
- examples/run_sqlite_agent.py +293 -0
- examples/workflow/__init__.py +1 -0
- examples/workflow/run_basic_research_workflow.py +189 -0
- examples/workflow/run_code_review_workflow.py +269 -0
- examples/workflow/run_debate_workflow.py +212 -0
- examples/workflow/run_workflow_with_tui.py +153 -0
- autobyteus/agent/context/agent_phase_manager.py +0 -264
- autobyteus/agent/context/phases.py +0 -49
- autobyteus/agent/group/__init__.py +0 -0
- autobyteus/agent/group/agent_group.py +0 -164
- autobyteus/agent/group/agent_group_context.py +0 -81
- autobyteus/agent/input_processor/content_prefixing_input_processor.py +0 -41
- autobyteus/agent/input_processor/metadata_appending_input_processor.py +0 -34
- autobyteus/agent/input_processor/passthrough_input_processor.py +0 -33
- autobyteus/agent/workflow/__init__.py +0 -11
- autobyteus/agent/workflow/agentic_workflow.py +0 -89
- autobyteus/tools/mcp/call_handlers/sse_handler.py +0 -22
- autobyteus/tools/mcp/registrar.py +0 -323
- autobyteus/workflow/simple_task.py +0 -98
- autobyteus/workflow/task.py +0 -147
- autobyteus/workflow/workflow.py +0 -49
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.4.dist-info}/WHEEL +0 -0
- {autobyteus-1.1.2.dist-info → autobyteus-1.1.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
# file: autobyteus/examples/workflow/run_basic_research_workflow.py
|
|
2
|
+
import asyncio
|
|
3
|
+
import logging
|
|
4
|
+
import argparse
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
import sys
|
|
7
|
+
import os
|
|
8
|
+
|
|
9
|
+
# --- Boilerplate to make the script runnable from the project root ---
|
|
10
|
+
SCRIPT_DIR = Path(__file__).resolve().parent.parent
|
|
11
|
+
PACKAGE_ROOT = SCRIPT_DIR.parent
|
|
12
|
+
if str(PACKAGE_ROOT) not in sys.path:
|
|
13
|
+
sys.path.insert(0, str(PACKAGE_ROOT))
|
|
14
|
+
|
|
15
|
+
# Load environment variables from .env file in the project root
|
|
16
|
+
try:
|
|
17
|
+
from dotenv import load_dotenv
|
|
18
|
+
env_file_path = PACKAGE_ROOT / ".env"
|
|
19
|
+
if env_file_path.exists():
|
|
20
|
+
load_dotenv(env_file_path)
|
|
21
|
+
print(f"Loaded environment variables from: {env_file_path}")
|
|
22
|
+
else:
|
|
23
|
+
print(f"Info: No .env file found at: {env_file_path}. Relying on exported environment variables.")
|
|
24
|
+
except ImportError:
|
|
25
|
+
print("Warning: python-dotenv not installed. Cannot load .env file.")
|
|
26
|
+
|
|
27
|
+
# --- Imports for the Workflow Example ---
|
|
28
|
+
try:
|
|
29
|
+
from autobyteus.agent.context import AgentConfig
|
|
30
|
+
from autobyteus.llm.models import LLMModel
|
|
31
|
+
from autobyteus.llm.llm_factory import default_llm_factory, LLMFactory
|
|
32
|
+
from autobyteus.workflow.workflow_builder import WorkflowBuilder
|
|
33
|
+
from autobyteus.cli import workflow_cli
|
|
34
|
+
except ImportError as e:
|
|
35
|
+
print(f"Error importing autobyteus components: {e}", file=sys.stderr)
|
|
36
|
+
print("Please ensure that the autobyteus library is installed and accessible.", file=sys.stderr)
|
|
37
|
+
sys.exit(1)
|
|
38
|
+
|
|
39
|
+
# --- Logging Setup ---
|
|
40
|
+
logger = logging.getLogger("basic_workflow_example")
|
|
41
|
+
|
|
42
|
+
def setup_logging(args: argparse.Namespace):
|
|
43
|
+
"""Configures logging for the interactive session."""
|
|
44
|
+
loggers_to_clear = [
|
|
45
|
+
logging.getLogger(),
|
|
46
|
+
logging.getLogger("autobyteus"),
|
|
47
|
+
logging.getLogger("autobyteus.cli"),
|
|
48
|
+
]
|
|
49
|
+
for l in loggers_to_clear:
|
|
50
|
+
if l.hasHandlers():
|
|
51
|
+
for handler in l.handlers[:]:
|
|
52
|
+
l.removeHandler(handler)
|
|
53
|
+
if hasattr(handler, 'close'): handler.close()
|
|
54
|
+
|
|
55
|
+
script_log_level = logging.DEBUG if args.debug else logging.INFO
|
|
56
|
+
|
|
57
|
+
console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
|
|
58
|
+
formatted_console_handler = logging.StreamHandler(sys.stdout)
|
|
59
|
+
formatted_console_handler.setFormatter(console_formatter)
|
|
60
|
+
|
|
61
|
+
root_logger = logging.getLogger()
|
|
62
|
+
root_logger.addHandler(formatted_console_handler)
|
|
63
|
+
root_logger.setLevel(script_log_level)
|
|
64
|
+
|
|
65
|
+
# Configure the main log file
|
|
66
|
+
log_file_path = Path(args.log_file).resolve()
|
|
67
|
+
log_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
68
|
+
agent_file_handler = logging.FileHandler(log_file_path, mode='w')
|
|
69
|
+
agent_file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s')
|
|
70
|
+
agent_file_handler.setFormatter(agent_file_formatter)
|
|
71
|
+
file_log_level = logging.DEBUG if args.debug else logging.INFO
|
|
72
|
+
|
|
73
|
+
autobyteus_logger = logging.getLogger("autobyteus")
|
|
74
|
+
autobyteus_logger.addHandler(agent_file_handler)
|
|
75
|
+
autobyteus_logger.setLevel(file_log_level)
|
|
76
|
+
autobyteus_logger.propagate = True
|
|
77
|
+
|
|
78
|
+
logger.info(f"Core library logs redirected to: {log_file_path} (level: {logging.getLevelName(file_log_level)})")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
async def main(args: argparse.Namespace):
|
|
82
|
+
"""Main function to configure and run the research workflow."""
|
|
83
|
+
logger.info("--- Starting Basic Research Workflow Example ---")
|
|
84
|
+
|
|
85
|
+
# 1. Create LLM instance for the agents
|
|
86
|
+
try:
|
|
87
|
+
_ = LLMModel[args.llm_model]
|
|
88
|
+
except KeyError:
|
|
89
|
+
logger.error(f"LLM Model '{args.llm_model}' is not valid. Use --help-models to see available models.")
|
|
90
|
+
sys.exit(1)
|
|
91
|
+
|
|
92
|
+
logger.info(f"Creating LLM instance for model: {args.llm_model}")
|
|
93
|
+
llm_instance = default_llm_factory.create_llm(model_identifier=args.llm_model)
|
|
94
|
+
|
|
95
|
+
# 2. Define the Agent Configurations
|
|
96
|
+
|
|
97
|
+
# The Coordinator/Manager Agent
|
|
98
|
+
research_manager_config = AgentConfig(
|
|
99
|
+
name="ResearchManager",
|
|
100
|
+
role="Coordinator",
|
|
101
|
+
description="A manager agent that receives research goals and delegates them to specialists.",
|
|
102
|
+
llm_instance=llm_instance,
|
|
103
|
+
# The prompt is now simpler, as the workflow builder will handle context.
|
|
104
|
+
# The {{tools}} placeholder is essential for tool injection.
|
|
105
|
+
system_prompt=(
|
|
106
|
+
"You are the manager of a research team. Your job is to understand the user's research goal and delegate it to the correct specialist agent on your team. "
|
|
107
|
+
"Do not answer questions yourself; always delegate. "
|
|
108
|
+
"You will be provided a manifest of your team members and available tools.\n\n"
|
|
109
|
+
"{{tools}}"
|
|
110
|
+
),
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# The Worker/Specialist Agent
|
|
114
|
+
fact_checker_config = AgentConfig(
|
|
115
|
+
name="FactChecker",
|
|
116
|
+
role="Specialist",
|
|
117
|
+
description="An agent with a limited, internal knowledge base for answering direct factual questions.",
|
|
118
|
+
llm_instance=llm_instance,
|
|
119
|
+
system_prompt=(
|
|
120
|
+
"You are a fact-checking bot. You have the following knowledge:\n"
|
|
121
|
+
"- The capital of France is Paris.\n"
|
|
122
|
+
"- The tallest mountain on Earth is Mount Everest.\n"
|
|
123
|
+
"- The primary programming language for AutoByteUs is Python.\n"
|
|
124
|
+
"You MUST ONLY answer questions based on this knowledge. If you are asked something you do not know, you MUST respond with 'I do not have information on that topic.'"
|
|
125
|
+
)
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# 3. Define and Build the Workflow using WorkflowBuilder
|
|
129
|
+
|
|
130
|
+
research_workflow = (
|
|
131
|
+
WorkflowBuilder(
|
|
132
|
+
name="BasicResearchWorkflow",
|
|
133
|
+
description="A simple two-agent workflow for delegating and answering research questions."
|
|
134
|
+
)
|
|
135
|
+
.set_coordinator(research_manager_config)
|
|
136
|
+
.add_agent_node(fact_checker_config, dependencies=[])
|
|
137
|
+
.build()
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# 4. Run the Workflow
|
|
141
|
+
|
|
142
|
+
logger.info(f"Workflow instance '{research_workflow.name}' created with ID: {research_workflow.workflow_id}")
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
logger.info("Starting interactive workflow session...")
|
|
146
|
+
await workflow_cli.run_workflow(
|
|
147
|
+
workflow=research_workflow,
|
|
148
|
+
initial_prompt=args.initial_prompt
|
|
149
|
+
)
|
|
150
|
+
logger.info("Interactive workflow session finished.")
|
|
151
|
+
except Exception as e:
|
|
152
|
+
logger.error(f"An error occurred during the workflow execution: {e}", exc_info=True)
|
|
153
|
+
|
|
154
|
+
logger.info("--- Basic Research Workflow Example Finished ---")
|
|
155
|
+
|
|
156
|
+
if __name__ == "__main__":
|
|
157
|
+
parser = argparse.ArgumentParser(description="Run a basic two-agent research workflow.")
|
|
158
|
+
parser.add_argument("--llm-model", type=str, default="gpt-4o", help="The LLM model to use for the agents.")
|
|
159
|
+
parser.add_argument("--help-models", action="store_true", help="Display available LLM models and exit.")
|
|
160
|
+
parser.add_argument("--initial-prompt", type=str, help="An optional initial prompt to start the workflow automatically.")
|
|
161
|
+
parser.add_argument("--debug", action="store_true", help="Enable debug logging.")
|
|
162
|
+
parser.add_argument("--log-file", type=str, default="./workflow_logs.txt",
|
|
163
|
+
help="Path to the log file for autobyteus library logs.")
|
|
164
|
+
|
|
165
|
+
if "--help-models" in sys.argv:
|
|
166
|
+
try:
|
|
167
|
+
LLMFactory.ensure_initialized()
|
|
168
|
+
print("Available LLM Models (you can use either name or value with --llm-model):")
|
|
169
|
+
all_models = sorted(list(LLMModel), key=lambda m: m.name)
|
|
170
|
+
if not all_models:
|
|
171
|
+
print(" No models found.")
|
|
172
|
+
for model in all_models:
|
|
173
|
+
print(f" - Name: {model.name:<35} Value: {model.value}")
|
|
174
|
+
except Exception as e:
|
|
175
|
+
print(f"Error listing models: {e}")
|
|
176
|
+
sys.exit(0)
|
|
177
|
+
|
|
178
|
+
parsed_args = parser.parse_args()
|
|
179
|
+
|
|
180
|
+
setup_logging(parsed_args)
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
asyncio.run(main(parsed_args))
|
|
184
|
+
except (KeyboardInterrupt, SystemExit):
|
|
185
|
+
logger.info("Script interrupted by user. Exiting.")
|
|
186
|
+
except Exception as e:
|
|
187
|
+
logger.error(f"An unhandled error occurred at the top level: {e}", exc_info=True)
|
|
188
|
+
finally:
|
|
189
|
+
logger.info("Exiting script.")
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
# file: autobyteus/examples/workflow/run_code_review_workflow.py
|
|
2
|
+
"""
|
|
3
|
+
This example script demonstrates a simple software development workflow
|
|
4
|
+
with a coordinator, an engineer, a code reviewer, a test writer, and a tester.
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
import argparse
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import sys
|
|
11
|
+
import os
|
|
12
|
+
|
|
13
|
+
# --- Boilerplate to make the script runnable from the project root ---
|
|
14
|
+
SCRIPT_DIR = Path(__file__).resolve().parent.parent
|
|
15
|
+
PACKAGE_ROOT = SCRIPT_DIR.parent
|
|
16
|
+
if str(PACKAGE_ROOT) not in sys.path:
|
|
17
|
+
sys.path.insert(0, str(PACKAGE_ROOT))
|
|
18
|
+
|
|
19
|
+
# Load environment variables from .env file
|
|
20
|
+
try:
|
|
21
|
+
from dotenv import load_dotenv
|
|
22
|
+
load_dotenv(PACKAGE_ROOT / ".env")
|
|
23
|
+
except ImportError:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
# --- Imports for the Workflow TUI Example ---
|
|
27
|
+
try:
|
|
28
|
+
from autobyteus.agent.context import AgentConfig
|
|
29
|
+
from autobyteus.llm.models import LLMModel
|
|
30
|
+
from autobyteus.llm.llm_factory import default_llm_factory, LLMFactory
|
|
31
|
+
from autobyteus.workflow.workflow_builder import WorkflowBuilder
|
|
32
|
+
from autobyteus.cli.workflow_tui.app import WorkflowApp
|
|
33
|
+
from autobyteus.tools import file_writer, file_reader, bash_executor
|
|
34
|
+
from autobyteus.agent.workspace import BaseAgentWorkspace, WorkspaceConfig
|
|
35
|
+
from autobyteus.tools.parameter_schema import ParameterSchema, ParameterDefinition, ParameterType
|
|
36
|
+
except ImportError as e:
|
|
37
|
+
print(f"Error importing autobyteus components: {e}", file=sys.stderr)
|
|
38
|
+
sys.exit(1)
|
|
39
|
+
|
|
40
|
+
# --- A simple, self-contained workspace for this example ---
|
|
41
|
+
class SimpleLocalWorkspace(BaseAgentWorkspace):
|
|
42
|
+
"""A minimal workspace for local file system access."""
|
|
43
|
+
|
|
44
|
+
def __init__(self, config: WorkspaceConfig):
|
|
45
|
+
super().__init__(config)
|
|
46
|
+
self.root_path: str = config.get("root_path")
|
|
47
|
+
if not self.root_path:
|
|
48
|
+
raise ValueError("SimpleLocalWorkspace requires a 'root_path' in its config.")
|
|
49
|
+
|
|
50
|
+
def get_base_path(self) -> str:
|
|
51
|
+
return self.root_path
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def get_workspace_type_name(cls) -> str:
|
|
55
|
+
return "simple_local_workspace_for_review"
|
|
56
|
+
|
|
57
|
+
@classmethod
|
|
58
|
+
def get_description(cls) -> str:
|
|
59
|
+
return "A basic workspace for local file access for the code review workflow."
|
|
60
|
+
|
|
61
|
+
@classmethod
|
|
62
|
+
def get_config_schema(cls) -> ParameterSchema:
|
|
63
|
+
schema = ParameterSchema()
|
|
64
|
+
schema.add_parameter(ParameterDefinition(
|
|
65
|
+
name="root_path",
|
|
66
|
+
param_type=ParameterType.STRING,
|
|
67
|
+
description="The absolute local file path for the workspace root.",
|
|
68
|
+
required=True
|
|
69
|
+
))
|
|
70
|
+
return schema
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
# --- Logging Setup ---
|
|
74
|
+
def setup_file_logging() -> Path:
|
|
75
|
+
log_dir = PACKAGE_ROOT / "logs"
|
|
76
|
+
log_dir.mkdir(exist_ok=True)
|
|
77
|
+
log_file_path = log_dir / "code_review_workflow_tui_app.log"
|
|
78
|
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", filename=log_file_path, filemode="w")
|
|
79
|
+
logging.getLogger("asyncio").setLevel(logging.WARNING)
|
|
80
|
+
logging.getLogger("textual").setLevel(logging.WARNING)
|
|
81
|
+
return log_file_path
|
|
82
|
+
|
|
83
|
+
def create_code_review_workflow(
|
|
84
|
+
coordinator_model: str,
|
|
85
|
+
engineer_model: str,
|
|
86
|
+
reviewer_model: str,
|
|
87
|
+
test_writer_model: str,
|
|
88
|
+
tester_model: str,
|
|
89
|
+
workspace: BaseAgentWorkspace,
|
|
90
|
+
use_xml_tool_format: bool = True
|
|
91
|
+
):
|
|
92
|
+
"""Creates the code review workflow."""
|
|
93
|
+
|
|
94
|
+
# --- AGENT CONFIGURATIONS ---
|
|
95
|
+
|
|
96
|
+
# Coordinator Agent
|
|
97
|
+
coordinator_config = AgentConfig(
|
|
98
|
+
name="ProjectManager", role="Coordinator", description="Manages the development process, assigning tasks to the team.",
|
|
99
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=coordinator_model),
|
|
100
|
+
system_prompt=(
|
|
101
|
+
"You are the project manager for a software team. Your role is to manage a strict, sequential code development, review, and testing process. Your team consists of a SoftwareEngineer, a CodeReviewer, a TestWriter, and a Tester.\n\n"
|
|
102
|
+
"### Your Workflow\n"
|
|
103
|
+
"You must follow this workflow precisely:\n"
|
|
104
|
+
"1. **Delegate to Engineer:** Receive a request from the user to write code to a specific filename. Instruct the `SoftwareEngineer` to write the code and save it.\n"
|
|
105
|
+
"2. **Delegate to Reviewer:** After the engineer confirms completion, instruct the `CodeReviewer` to review the code. You must provide the filename to the reviewer.\n"
|
|
106
|
+
"3. **Delegate to Test Writer:** After the review is complete, instruct the `TestWriter` to write pytest tests for the code. Provide the original source filename and tell them to save the tests in a new file, like `test_FILENAME.py`.\n"
|
|
107
|
+
"4. **Delegate to Tester:** After the tests are written, instruct the `Tester` to run the tests. You must provide the filename of the test file.\n"
|
|
108
|
+
"5. **Report to User:** Once you receive the test results, present the final status (code written, reviewed, and tests passed/failed) to the user.\n\n"
|
|
109
|
+
"**CRITICAL RULE:** This is a sequential process. You must wait for one agent to finish before contacting the next. You are the central point of communication.\n\n"
|
|
110
|
+
"{{tools}}"
|
|
111
|
+
),
|
|
112
|
+
use_xml_tool_format=use_xml_tool_format
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Software Engineer Agent
|
|
116
|
+
engineer_config = AgentConfig(
|
|
117
|
+
name="SoftwareEngineer", role="Developer", description="Writes Python code based on instructions and saves it to a file.",
|
|
118
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=engineer_model),
|
|
119
|
+
system_prompt=(
|
|
120
|
+
"You are a skilled Python software engineer. You receive tasks from your ProjectManager. "
|
|
121
|
+
"Your job is to write high-quality Python code to fulfill the request. "
|
|
122
|
+
"After writing the code, you MUST save it to the specified filename using the `FileWriter` tool. "
|
|
123
|
+
"Confirm completion once the file is saved.\n\n{{tools}}"
|
|
124
|
+
),
|
|
125
|
+
tools=[file_writer],
|
|
126
|
+
workspace=workspace,
|
|
127
|
+
use_xml_tool_format=use_xml_tool_format
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Code Reviewer Agent
|
|
131
|
+
reviewer_config = AgentConfig(
|
|
132
|
+
name="CodeReviewer", role="Senior Developer", description="Reads and reviews Python code from files for quality and correctness.",
|
|
133
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=reviewer_model),
|
|
134
|
+
system_prompt=(
|
|
135
|
+
"You are a senior software engineer acting as a code reviewer. You will be given a file path to review. "
|
|
136
|
+
"You MUST use the `FileReader` tool to read the code from the file. "
|
|
137
|
+
"After reading the code, provide a constructive review, identifying any potential bugs, style issues, or areas for improvement.\n\n{{tools}}"
|
|
138
|
+
),
|
|
139
|
+
tools=[file_reader],
|
|
140
|
+
workspace=workspace,
|
|
141
|
+
use_xml_tool_format=use_xml_tool_format
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Test Writer Agent
|
|
145
|
+
test_writer_config = AgentConfig(
|
|
146
|
+
name="TestWriter", role="QA Engineer", description="Writes pytest tests for Python code.",
|
|
147
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=test_writer_model),
|
|
148
|
+
system_prompt=(
|
|
149
|
+
"You are a QA engineer specializing in testing. You will be given the path to a Python source file. "
|
|
150
|
+
"Your task is to read that file, write comprehensive tests for it using the `pytest` framework, and save the tests to a new file. "
|
|
151
|
+
"The test filename MUST start with `test_`. For example, if you are testing `code.py`, you should save the tests in `test_code.py`.\n\n{{tools}}"
|
|
152
|
+
),
|
|
153
|
+
tools=[file_reader, file_writer],
|
|
154
|
+
workspace=workspace,
|
|
155
|
+
use_xml_tool_format=use_xml_tool_format
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Tester Agent
|
|
159
|
+
tester_config = AgentConfig(
|
|
160
|
+
name="Tester", role="QA Automation", description="Executes pytest tests and reports results.",
|
|
161
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=tester_model),
|
|
162
|
+
system_prompt=(
|
|
163
|
+
"You are a QA automation specialist. Your job is to run tests. You will be given a test file to execute. "
|
|
164
|
+
"You MUST use the `BashExecutor` tool to run the command `pytest` on the given test file. "
|
|
165
|
+
"Report the full output from the command back to the Project Manager.\n\n{{tools}}"
|
|
166
|
+
),
|
|
167
|
+
tools=[bash_executor],
|
|
168
|
+
workspace=workspace,
|
|
169
|
+
use_xml_tool_format=use_xml_tool_format
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
# --- BUILD THE WORKFLOW ---
|
|
174
|
+
|
|
175
|
+
code_review_workflow = (
|
|
176
|
+
WorkflowBuilder(name="SoftwareDevWorkflow", description="A workflow for writing, reviewing, and testing code.")
|
|
177
|
+
.set_coordinator(coordinator_config)
|
|
178
|
+
.add_agent_node(engineer_config)
|
|
179
|
+
.add_agent_node(reviewer_config)
|
|
180
|
+
.add_agent_node(test_writer_config)
|
|
181
|
+
.add_agent_node(tester_config)
|
|
182
|
+
.build()
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return code_review_workflow
|
|
186
|
+
|
|
187
|
+
async def main(args: argparse.Namespace, log_file: Path):
|
|
188
|
+
"""Main async function to create the workflow and run the TUI app."""
|
|
189
|
+
print("Setting up software development workflow...")
|
|
190
|
+
print(f"--> Logs will be written to: {log_file.resolve()}")
|
|
191
|
+
|
|
192
|
+
workspace_path = Path(args.output_dir).resolve()
|
|
193
|
+
workspace_path.mkdir(parents=True, exist_ok=True)
|
|
194
|
+
print(f"--> Agent workspace (output directory) is set to: {workspace_path}")
|
|
195
|
+
|
|
196
|
+
workspace_config = WorkspaceConfig(params={"root_path": str(workspace_path)})
|
|
197
|
+
workspace = SimpleLocalWorkspace(config=workspace_config)
|
|
198
|
+
|
|
199
|
+
# Resolve models
|
|
200
|
+
coordinator_model = args.coordinator_model or args.llm_model
|
|
201
|
+
engineer_model = args.engineer_model or args.llm_model
|
|
202
|
+
reviewer_model = args.reviewer_model or args.llm_model
|
|
203
|
+
test_writer_model = args.test_writer_model or args.llm_model
|
|
204
|
+
tester_model = args.tester_model or args.llm_model
|
|
205
|
+
|
|
206
|
+
print(f"--> Coordinator Model: {coordinator_model}")
|
|
207
|
+
print(f"--> Engineer Model: {engineer_model}")
|
|
208
|
+
print(f"--> Reviewer Model: {reviewer_model}")
|
|
209
|
+
print(f"--> Test Writer Model: {test_writer_model}")
|
|
210
|
+
print(f"--> Tester Model: {tester_model}")
|
|
211
|
+
|
|
212
|
+
use_xml_tool_format = not args.no_xml_tools
|
|
213
|
+
print(f"--> Using XML Tool Format: {use_xml_tool_format}")
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
workflow = create_code_review_workflow(
|
|
217
|
+
coordinator_model=coordinator_model,
|
|
218
|
+
engineer_model=engineer_model,
|
|
219
|
+
reviewer_model=reviewer_model,
|
|
220
|
+
test_writer_model=test_writer_model,
|
|
221
|
+
tester_model=tester_model,
|
|
222
|
+
workspace=workspace,
|
|
223
|
+
use_xml_tool_format=use_xml_tool_format
|
|
224
|
+
)
|
|
225
|
+
app = WorkflowApp(workflow=workflow)
|
|
226
|
+
await app.run_async()
|
|
227
|
+
except Exception as e:
|
|
228
|
+
logging.critical(f"Failed to create or run workflow TUI: {e}", exc_info=True)
|
|
229
|
+
print(f"\nCRITICAL ERROR: {e}\nCheck log file for details: {log_file.resolve()}")
|
|
230
|
+
|
|
231
|
+
if __name__ == "__main__":
|
|
232
|
+
parser = argparse.ArgumentParser(
|
|
233
|
+
description="Run a software development workflow with a Textual TUI.",
|
|
234
|
+
formatter_class=argparse.RawTextHelpFormatter
|
|
235
|
+
)
|
|
236
|
+
parser.add_argument("--llm-model", type=str, default="kimi-latest", help="The default LLM model for all agents.")
|
|
237
|
+
parser.add_argument("--coordinator-model", type=str, help="Specific LLM model for the ProjectManager. Defaults to --llm-model.")
|
|
238
|
+
parser.add_argument("--engineer-model", type=str, help="Specific LLM model for the SoftwareEngineer. Defaults to --llm-model.")
|
|
239
|
+
parser.add_argument("--reviewer-model", type=str, help="Specific LLM model for the CodeReviewer. Defaults to --llm-model.")
|
|
240
|
+
parser.add_argument("--test-writer-model", type=str, help="Specific LLM model for the TestWriter. Defaults to --llm-model.")
|
|
241
|
+
parser.add_argument("--tester-model", type=str, help="Specific LLM model for the Tester. Defaults to --llm-model.")
|
|
242
|
+
parser.add_argument("--output-dir", type=str, default="./code_review_output", help="Directory for the shared workspace.")
|
|
243
|
+
parser.add_argument("--no-xml-tools", action="store_true", help="Disable XML-based tool formatting.")
|
|
244
|
+
parser.add_argument("--help-models", action="store_true", help="Display available LLM models and exit.")
|
|
245
|
+
|
|
246
|
+
if "--help-models" in sys.argv:
|
|
247
|
+
try:
|
|
248
|
+
LLMFactory.ensure_initialized()
|
|
249
|
+
print("Available LLM Models (you can use either name or value with model arguments):")
|
|
250
|
+
all_models = sorted(list(LLMModel), key=lambda m: m.name)
|
|
251
|
+
if not all_models:
|
|
252
|
+
print(" No models found.")
|
|
253
|
+
for model in all_models:
|
|
254
|
+
print(f" - Name: {model.name:<35} Value: {model.value}")
|
|
255
|
+
except Exception as e:
|
|
256
|
+
print(f"Error listing models: {e}")
|
|
257
|
+
sys.exit(0)
|
|
258
|
+
|
|
259
|
+
parsed_args = parser.parse_args()
|
|
260
|
+
|
|
261
|
+
log_file_path = setup_file_logging()
|
|
262
|
+
try:
|
|
263
|
+
asyncio.run(main(parsed_args, log_file_path))
|
|
264
|
+
except KeyboardInterrupt:
|
|
265
|
+
print("\nExiting application.")
|
|
266
|
+
except Exception as e:
|
|
267
|
+
logging.critical(f"Top-level application error: {e}", exc_info=True)
|
|
268
|
+
print(f"\nUNHANDLED ERROR: {e}\nCheck log file for details: {log_file_path.resolve()}")
|
|
269
|
+
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
# file: autobyteus/examples/workflow/run_debate_workflow.py
|
|
2
|
+
"""
|
|
3
|
+
This example script demonstrates a hierarchical workflow.
|
|
4
|
+
A parent workflow (The Debate) manages two sub-workflows (Debating Teams).
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
import argparse
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
import sys
|
|
11
|
+
import os
|
|
12
|
+
|
|
13
|
+
# --- Boilerplate to make the script runnable from the project root ---
|
|
14
|
+
SCRIPT_DIR = Path(__file__).resolve().parent.parent
|
|
15
|
+
PACKAGE_ROOT = SCRIPT_DIR.parent
|
|
16
|
+
if str(PACKAGE_ROOT) not in sys.path:
|
|
17
|
+
sys.path.insert(0, str(PACKAGE_ROOT))
|
|
18
|
+
|
|
19
|
+
# Load environment variables from .env file
|
|
20
|
+
try:
|
|
21
|
+
from dotenv import load_dotenv
|
|
22
|
+
load_dotenv(PACKAGE_ROOT / ".env")
|
|
23
|
+
except ImportError:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
# --- Imports for the Workflow TUI Example ---
|
|
27
|
+
try:
|
|
28
|
+
from autobyteus.agent.context import AgentConfig
|
|
29
|
+
from autobyteus.llm.models import LLMModel
|
|
30
|
+
from autobyteus.llm.llm_factory import default_llm_factory, LLMFactory
|
|
31
|
+
from autobyteus.workflow.workflow_builder import WorkflowBuilder
|
|
32
|
+
from autobyteus.cli.workflow_tui.app import WorkflowApp
|
|
33
|
+
from autobyteus.workflow.context.workflow_config import WorkflowConfig
|
|
34
|
+
except ImportError as e:
|
|
35
|
+
print(f"Error importing autobyteus components: {e}", file=sys.stderr)
|
|
36
|
+
sys.exit(1)
|
|
37
|
+
|
|
38
|
+
# --- Logging Setup ---
|
|
39
|
+
def setup_file_logging() -> Path:
|
|
40
|
+
log_dir = PACKAGE_ROOT / "logs"
|
|
41
|
+
log_dir.mkdir(exist_ok=True)
|
|
42
|
+
log_file_path = log_dir / "debate_workflow_tui_app.log"
|
|
43
|
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", filename=log_file_path, filemode="w")
|
|
44
|
+
logging.getLogger("asyncio").setLevel(logging.WARNING)
|
|
45
|
+
logging.getLogger("textual").setLevel(logging.WARNING)
|
|
46
|
+
return log_file_path
|
|
47
|
+
|
|
48
|
+
def create_debate_workflow(moderator_model: str, affirmative_model: str, negative_model: str, use_xml_tool_format: bool = True):
|
|
49
|
+
"""Creates a hierarchical debate workflow for the TUI demonstration."""
|
|
50
|
+
# Validate models
|
|
51
|
+
def _validate_model(model_name: str):
|
|
52
|
+
try:
|
|
53
|
+
_ = LLMModel[model_name]
|
|
54
|
+
except KeyError:
|
|
55
|
+
logging.critical(f"LLM Model '{model_name}' is not valid. Use --help-models to see available models.")
|
|
56
|
+
print(f"\nCRITICAL ERROR: LLM Model '{model_name}' is not valid. Use --help-models to see available models.\nCheck log file for details.")
|
|
57
|
+
sys.exit(1)
|
|
58
|
+
|
|
59
|
+
for model in [moderator_model, affirmative_model, negative_model]:
|
|
60
|
+
_validate_model(model)
|
|
61
|
+
|
|
62
|
+
logging.info(f"Using models -> Moderator: {moderator_model}, Affirmative: {affirmative_model}, Negative: {negative_model}")
|
|
63
|
+
logging.info(f"Using XML tool format: {use_xml_tool_format}")
|
|
64
|
+
|
|
65
|
+
# --- AGENT CONFIGURATIONS ---
|
|
66
|
+
|
|
67
|
+
# Parent-Level Agents
|
|
68
|
+
moderator_config = AgentConfig(
|
|
69
|
+
name="DebateModerator", role="Coordinator", description="Manages the debate, gives turns, and summarizes.",
|
|
70
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=moderator_model),
|
|
71
|
+
system_prompt=(
|
|
72
|
+
"You are the impartial moderator of a debate between two teams. Your goal is to facilitate a structured, turn-by-turn debate on a user's topic.\n"
|
|
73
|
+
"Your team consists of Team_Affirmative and Team_Negative. You will delegate tasks to them using their unique names.\n"
|
|
74
|
+
"Responsibilities: 1. Announce the topic. 2. Ask Team_Affirmative for an opening statement. 3. Ask Team_Negative for a rebuttal. "
|
|
75
|
+
"4. Facilitate a structured flow of arguments. 5. Conclude the debate.\n"
|
|
76
|
+
"CRITICAL RULE: You must enforce a strict turn-based system. Only communicate with ONE team at a time using the `SendMessageTo` tool. After sending a message, you must wait for a response before messaging the other team.\n"
|
|
77
|
+
"Do not debate yourself. Your role is to moderate.\n\n{{tools}}"
|
|
78
|
+
),
|
|
79
|
+
use_xml_tool_format=use_xml_tool_format
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Team Affirmative Agents
|
|
83
|
+
lead_affirmative_config = AgentConfig(
|
|
84
|
+
name="Lead_Affirmative", role="Coordinator", description="Leads the team arguing FOR the motion.",
|
|
85
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=affirmative_model),
|
|
86
|
+
system_prompt=(
|
|
87
|
+
"You are the lead of the Affirmative team. You receive high-level instructions from the DebateModerator (e.g., 'prepare opening statement').\n"
|
|
88
|
+
"Your job is to delegate this task to your team member, the Proponent, by giving them a specific instruction.\n\n{{tools}}"
|
|
89
|
+
),
|
|
90
|
+
use_xml_tool_format=use_xml_tool_format
|
|
91
|
+
)
|
|
92
|
+
proponent_config = AgentConfig(
|
|
93
|
+
name="Proponent", role="Debater", description="Argues in favor of the debate topic.",
|
|
94
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=affirmative_model),
|
|
95
|
+
system_prompt="You are a Proponent. You will receive instructions from your team lead. Your role is to argue STRONGLY and PERSUASIVELY IN FAVOR of the motion.",
|
|
96
|
+
use_xml_tool_format=use_xml_tool_format
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Team Negative Agents
|
|
100
|
+
lead_negative_config = AgentConfig(
|
|
101
|
+
name="Lead_Negative", role="Coordinator", description="Leads the team arguing AGAINST the motion.",
|
|
102
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=negative_model),
|
|
103
|
+
system_prompt=(
|
|
104
|
+
"You are the lead of the Negative team. You receive high-level instructions from the DebateModerator (e.g., 'prepare your rebuttal').\n"
|
|
105
|
+
"Your job is to delegate this task to your team member, the Opponent, by giving them a specific instruction.\n\n{{tools}}"
|
|
106
|
+
),
|
|
107
|
+
use_xml_tool_format=use_xml_tool_format
|
|
108
|
+
)
|
|
109
|
+
opponent_config = AgentConfig(
|
|
110
|
+
name="Opponent", role="Debater", description="Argues against the debate topic.",
|
|
111
|
+
llm_instance=default_llm_factory.create_llm(model_identifier=negative_model),
|
|
112
|
+
system_prompt="You are an Opponent. You will receive instructions from your team lead. Your role is to argue STRONGLY and PERSUASIVELY AGAINST the motion.",
|
|
113
|
+
use_xml_tool_format=use_xml_tool_format
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# --- BUILD SUB-WORKFLOWS ---
|
|
117
|
+
|
|
118
|
+
# Build Team Affirmative
|
|
119
|
+
team_affirmative_workflow: WorkflowConfig = (
|
|
120
|
+
WorkflowBuilder(name="Team_Affirmative", description="A two-agent team that argues in favor of a proposition.", role="Argues FOR the motion")
|
|
121
|
+
.set_coordinator(lead_affirmative_config)
|
|
122
|
+
.add_agent_node(proponent_config)
|
|
123
|
+
.build()._runtime.context.config # Build to get the config object
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Build Team Negative
|
|
127
|
+
team_negative_workflow: WorkflowConfig = (
|
|
128
|
+
WorkflowBuilder(name="Team_Negative", description="A two-agent team that argues against a proposition.", role="Argues AGAINST the motion")
|
|
129
|
+
.set_coordinator(lead_negative_config)
|
|
130
|
+
.add_agent_node(opponent_config)
|
|
131
|
+
.build()._runtime.context.config # Build to get the config object
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# --- BUILD PARENT WORKFLOW ---
|
|
135
|
+
|
|
136
|
+
debate_workflow = (
|
|
137
|
+
WorkflowBuilder(name="Grand_Debate", description="A hierarchical workflow for a moderated debate between two teams.")
|
|
138
|
+
.set_coordinator(moderator_config)
|
|
139
|
+
.add_workflow_node(team_affirmative_workflow)
|
|
140
|
+
.add_workflow_node(team_negative_workflow)
|
|
141
|
+
.build()
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return debate_workflow
|
|
145
|
+
|
|
146
|
+
async def main(args: argparse.Namespace, log_file: Path):
|
|
147
|
+
"""Main async function to create the workflow and run the TUI app."""
|
|
148
|
+
print("Setting up hierarchical debate workflow...")
|
|
149
|
+
print(f"--> Logs will be written to: {log_file.resolve()}")
|
|
150
|
+
|
|
151
|
+
# Resolve model for each role, falling back to the default --llm-model
|
|
152
|
+
moderator_model = args.moderator_model or args.llm_model
|
|
153
|
+
affirmative_model = args.affirmative_model or args.llm_model
|
|
154
|
+
negative_model = args.negative_model or args.llm_model
|
|
155
|
+
print(f"--> Moderator Model: {moderator_model}")
|
|
156
|
+
print(f"--> Affirmative Team Model: {affirmative_model}")
|
|
157
|
+
print(f"--> Negative Team Model: {negative_model}")
|
|
158
|
+
|
|
159
|
+
# Determine tool format setting from args
|
|
160
|
+
use_xml_tool_format = not args.no_xml_tools
|
|
161
|
+
print(f"--> Using XML Tool Format: {use_xml_tool_format}")
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
workflow = create_debate_workflow(
|
|
165
|
+
moderator_model=moderator_model,
|
|
166
|
+
affirmative_model=affirmative_model,
|
|
167
|
+
negative_model=negative_model,
|
|
168
|
+
use_xml_tool_format=use_xml_tool_format,
|
|
169
|
+
)
|
|
170
|
+
app = WorkflowApp(workflow=workflow)
|
|
171
|
+
await app.run_async()
|
|
172
|
+
except Exception as e:
|
|
173
|
+
logging.critical(f"Failed to create or run debate workflow TUI: {e}", exc_info=True)
|
|
174
|
+
print(f"\nCRITICAL ERROR: {e}\nCheck log file for details: {log_file.resolve()}")
|
|
175
|
+
|
|
176
|
+
if __name__ == "__main__":
|
|
177
|
+
parser = argparse.ArgumentParser(
|
|
178
|
+
description="Run a hierarchical 2-team debate workflow with a Textual TUI.",
|
|
179
|
+
formatter_class=argparse.RawTextHelpFormatter
|
|
180
|
+
)
|
|
181
|
+
parser.add_argument("--llm-model", type=str, default="kimi-latest", help="The default LLM model for all agents. Can be overridden by other arguments.")
|
|
182
|
+
parser.add_argument("--moderator-model", type=str, help="Specific LLM model for the Moderator. Defaults to --llm-model.")
|
|
183
|
+
parser.add_argument("--affirmative-model", type=str, help="Specific LLM model for the Affirmative Team. Defaults to --llm-model.")
|
|
184
|
+
parser.add_argument("--negative-model", type=str, help="Specific LLM model for the Negative Team. Defaults to --llm-model.")
|
|
185
|
+
parser.add_argument("--no-xml-tools", action="store_true", help="Disable XML-based tool formatting. Recommended for models that struggle with XML.")
|
|
186
|
+
parser.add_argument("--help-models", action="store_true", help="Display available LLM models and exit.")
|
|
187
|
+
|
|
188
|
+
if "--help-models" in sys.argv:
|
|
189
|
+
try:
|
|
190
|
+
LLMFactory.ensure_initialized()
|
|
191
|
+
print("Available LLM Models (you can use either name or value with model arguments):")
|
|
192
|
+
all_models = sorted(list(LLMModel), key=lambda m: m.name)
|
|
193
|
+
if not all_models:
|
|
194
|
+
print(" No models found.")
|
|
195
|
+
for model in all_models:
|
|
196
|
+
print(f" - Name: {model.name:<35} Value: {model.value}")
|
|
197
|
+
except Exception as e:
|
|
198
|
+
print(f"Error listing models: {e}")
|
|
199
|
+
sys.exit(0)
|
|
200
|
+
|
|
201
|
+
parsed_args = parser.parse_args()
|
|
202
|
+
|
|
203
|
+
log_file_path = setup_file_logging()
|
|
204
|
+
try:
|
|
205
|
+
asyncio.run(main(parsed_args, log_file_path))
|
|
206
|
+
except KeyboardInterrupt:
|
|
207
|
+
print("\nExiting application.")
|
|
208
|
+
except Exception as e:
|
|
209
|
+
# This catches errors during asyncio.run, which might not be logged otherwise
|
|
210
|
+
logging.critical(f"Top-level application error: {e}", exc_info=True)
|
|
211
|
+
print(f"\nUNHANDLED ERROR: {e}\nCheck log file for details: {log_file_path.resolve()}")
|
|
212
|
+
|