open-swarm 0.1.1744936125__py3-none-any.whl → 0.1.1744936173__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/METADATA +1 -1
- {open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/RECORD +5 -8
- swarm/blueprints/burnt_noodles/blueprint_burnt_noodles.py +0 -304
- swarm/blueprints/dilbot_universe/blueprint_dilbot_universe.py +0 -285
- swarm/blueprints/gotchaman/blueprint_gotchaman.py +0 -232
- {open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/WHEEL +0 -0
- {open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/entry_points.txt +0 -0
- {open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: open-swarm
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.1744936173
|
4
4
|
Summary: Open Swarm: Orchestrating AI Agent Swarms with Django
|
5
5
|
Project-URL: Homepage, https://github.com/yourusername/open-swarm
|
6
6
|
Project-URL: Documentation, https://github.com/yourusername/open-swarm/blob/main/README.md
|
@@ -14,11 +14,9 @@ swarm/util.py,sha256=G4x2hXopHhB7IdGCkUXGoykYWyiICnjxg7wcr-WqL8I,4644
|
|
14
14
|
swarm/wsgi.py,sha256=REM_u4HpMCkO0ddrOUXgtY-ITL-VTbRB1-WHvFJAtAU,408
|
15
15
|
swarm/agent/__init__.py,sha256=YESGu_UXEBxrlQwghodUMN0vmXZDwWMU7DclCUvoklA,104
|
16
16
|
swarm/blueprints/README.md,sha256=tsngbSB9N0tILcz_m1OGAjyKZQYlGTN-i5e5asq1GbE,8478
|
17
|
-
swarm/blueprints/burnt_noodles/blueprint_burnt_noodles.py,sha256=vopDlBjVUNeSq6WItdkmtWJfObgbtL6wNAy2njsskkY,19607
|
18
17
|
swarm/blueprints/chatbot/blueprint_chatbot.py,sha256=a5-gIyDvRtNgbyfviD9Hua9r5NjOQh1lOafIG2a6kiI,7520
|
19
18
|
swarm/blueprints/chatbot/templates/chatbot/chatbot.html,sha256=REFnqNg0EHsXxAUfaCJe1YgOKiV_umBXuC6y8veF5CU,1568
|
20
19
|
swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py,sha256=JK_rmZgPMw4PdQFrMverrwgcjH0NRkuqkchYOJwXYuM,9809
|
21
|
-
swarm/blueprints/dilbot_universe/blueprint_dilbot_universe.py,sha256=w7i96KkRNmBOV2Kz9SJqQCirsRPEgENR-79iLOzpKaQ,16770
|
22
20
|
swarm/blueprints/divine_code/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
21
|
swarm/blueprints/divine_code/apps.py,sha256=k615JHdfOuo_GwfVbC7ah8X9OblkAL2XWm9aLBjmMyY,306
|
24
22
|
swarm/blueprints/divine_code/blueprint_divine_code.py,sha256=YO-YE5ORvx4R_1VD2i60cPc21ZX6c8sXwOGTqmDk2Xw,13516
|
@@ -37,7 +35,6 @@ swarm/blueprints/family_ties/urls.py,sha256=awRZHb1gb1p3I6YZzfKMGSydd6kYPTLgax2j
|
|
37
35
|
swarm/blueprints/family_ties/views.py,sha256=FbPkDNlFEixtRFbSpkr51IyJ28FRkXa1W5xyO_KeXH0,1081
|
38
36
|
swarm/blueprints/flock/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
37
|
swarm/blueprints/gaggle/blueprint_gaggle.py,sha256=27KqO5M3x6vBQWN3AOWK6VJjuypygk05B1QeFWwZVEQ,10161
|
40
|
-
swarm/blueprints/gotchaman/blueprint_gotchaman.py,sha256=bElwOpesPINuLJB_H8-1UoYiPvhnF_lFFjxUFHz0T5M,12326
|
41
38
|
swarm/blueprints/mcp_demo/blueprint_mcp_demo.py,sha256=eUu5_BvLwVSdWiEonXWKuN7YgKsqz04JB_KbMPowryc,6599
|
42
39
|
swarm/blueprints/messenger/templates/messenger/messenger.html,sha256=izuFtFn40Gm7M4gSUAUT5CIezjBjmNv2w4_fwSlv7VA,2323
|
43
40
|
swarm/blueprints/mission_improbable/blueprint_mission_improbable.py,sha256=N4Tw0laErP4eCJM103XOaVrqbFNKZRUG1Bpze8g79MI,12753
|
@@ -255,8 +252,8 @@ swarm/views/message_views.py,sha256=sDUnXyqKXC8WwIIMAlWf00s2_a2T9c75Na5FvYMJwBM,
|
|
255
252
|
swarm/views/model_views.py,sha256=aAbU4AZmrOTaPeKMWtoKK7FPYHdaN3Zbx55JfKzYTRY,2937
|
256
253
|
swarm/views/utils.py,sha256=geX3Z5ZDKFYyXYBMilc-4qgOSjhujK3AfRtvbXgFpXk,3643
|
257
254
|
swarm/views/web_views.py,sha256=ExQQeJpZ8CkLZQC_pXKOOmdnEy2qR3wEBP4LLp27DPU,7404
|
258
|
-
open_swarm-0.1.
|
259
|
-
open_swarm-0.1.
|
260
|
-
open_swarm-0.1.
|
261
|
-
open_swarm-0.1.
|
262
|
-
open_swarm-0.1.
|
255
|
+
open_swarm-0.1.1744936173.dist-info/METADATA,sha256=JSBqdb42N-rb3DZi99pEqnf6XqutDPsryC3TilVsJbA,13678
|
256
|
+
open_swarm-0.1.1744936173.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
257
|
+
open_swarm-0.1.1744936173.dist-info/entry_points.txt,sha256=z1UIVRRhri-V-hWxFkDEYu0SZPUIsVO4KpDaodgcFzU,125
|
258
|
+
open_swarm-0.1.1744936173.dist-info/licenses/LICENSE,sha256=BU9bwRlnOt_JDIb6OT55Q4leLZx9RArDLTFnlDIrBEI,1062
|
259
|
+
open_swarm-0.1.1744936173.dist-info/RECORD,,
|
@@ -1,304 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import os
|
3
|
-
import sys
|
4
|
-
import asyncio
|
5
|
-
import subprocess
|
6
|
-
import shlex # Added for safe command splitting
|
7
|
-
import re
|
8
|
-
import inspect
|
9
|
-
from pathlib import Path # Use pathlib for better path handling
|
10
|
-
from typing import Dict, Any, List, Optional, ClassVar, AsyncGenerator
|
11
|
-
|
12
|
-
try:
|
13
|
-
# Core imports from openai-agents
|
14
|
-
from agents import Agent, Tool, function_tool, Runner
|
15
|
-
from agents.mcp import MCPServer
|
16
|
-
from agents.models.interface import Model
|
17
|
-
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
18
|
-
from openai import AsyncOpenAI
|
19
|
-
|
20
|
-
# Import our custom base class
|
21
|
-
from swarm.extensions.blueprint.blueprint_base import BlueprintBase
|
22
|
-
except ImportError as e:
|
23
|
-
# Provide more helpful error message
|
24
|
-
print(f"ERROR: Import failed in BurntNoodlesBlueprint: {e}. Check 'openai-agents' install and project structure.")
|
25
|
-
print(f"Attempted import from directory: {os.path.dirname(__file__)}")
|
26
|
-
print(f"sys.path: {sys.path}")
|
27
|
-
sys.exit(1)
|
28
|
-
|
29
|
-
# Configure logging for this blueprint module
|
30
|
-
logger = logging.getLogger(__name__)
|
31
|
-
# Logging level is controlled by BlueprintBase based on --debug flag
|
32
|
-
|
33
|
-
# --- Tool Logic Definitions (Undecorated) ---
|
34
|
-
def _git_status_logic() -> str:
|
35
|
-
"""Executes 'git status --porcelain' and returns the current repository status."""
|
36
|
-
logger.info("Executing git status --porcelain")
|
37
|
-
try:
|
38
|
-
result = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True, check=True, timeout=30)
|
39
|
-
output = result.stdout.strip()
|
40
|
-
logger.debug(f"Git status raw output:\n{output}")
|
41
|
-
return f"OK: Git Status:\n{output}" if output else "OK: No changes detected in the working directory."
|
42
|
-
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
43
|
-
except subprocess.CalledProcessError as e: logger.error(f"Error executing git status: {e.stderr}"); return f"Error executing git status: {e.stderr}"
|
44
|
-
except subprocess.TimeoutExpired: logger.error("Git status command timed out."); return "Error: Git status command timed out."
|
45
|
-
except Exception as e: logger.error(f"Unexpected error during git status: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git status: {e}"
|
46
|
-
|
47
|
-
def _git_diff_logic() -> str:
|
48
|
-
"""Executes 'git diff' and returns the differences in the working directory."""
|
49
|
-
logger.info("Executing git diff")
|
50
|
-
try:
|
51
|
-
result = subprocess.run(["git", "diff"], capture_output=True, text=True, check=False, timeout=30)
|
52
|
-
output = result.stdout; stderr = result.stderr.strip()
|
53
|
-
if result.returncode != 0 and stderr: logger.error(f"Error executing git diff (Exit Code {result.returncode}): {stderr}"); return f"Error executing git diff: {stderr}"
|
54
|
-
logger.debug(f"Git diff raw output (Exit Code {result.returncode}):\n{output[:1000]}...")
|
55
|
-
return f"OK: Git Diff Output:\n{output}" if output else "OK: No differences found."
|
56
|
-
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
57
|
-
except subprocess.TimeoutExpired: logger.error("Git diff command timed out."); return "Error: Git diff command timed out."
|
58
|
-
except Exception as e: logger.error(f"Unexpected error during git diff: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git diff: {e}"
|
59
|
-
|
60
|
-
def _git_add_logic(file_path: str = ".") -> str:
|
61
|
-
"""Executes 'git add' to stage changes for the specified file or all changes (default '.')."""
|
62
|
-
logger.info(f"Executing git add {file_path}")
|
63
|
-
try:
|
64
|
-
result = subprocess.run(["git", "add", file_path], capture_output=True, text=True, check=True, timeout=30)
|
65
|
-
logger.debug(f"Git add '{file_path}' completed successfully.")
|
66
|
-
return f"OK: Staged '{file_path}' successfully."
|
67
|
-
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
68
|
-
except subprocess.CalledProcessError as e: logger.error(f"Error executing git add '{file_path}': {e.stderr}"); return f"Error executing git add '{file_path}': {e.stderr}"
|
69
|
-
except subprocess.TimeoutExpired: logger.error(f"Git add command timed out for '{file_path}'."); return f"Error: Git add command timed out for '{file_path}'."
|
70
|
-
except Exception as e: logger.error(f"Unexpected error during git add '{file_path}': {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git add '{file_path}': {e}"
|
71
|
-
|
72
|
-
def _git_commit_logic(message: str) -> str:
|
73
|
-
"""Executes 'git commit' with a provided commit message."""
|
74
|
-
logger.info(f"Executing git commit -m '{message[:50]}...'")
|
75
|
-
if not message or not message.strip(): logger.warning("Git commit attempted with empty message."); return "Error: Commit message cannot be empty."
|
76
|
-
try:
|
77
|
-
result = subprocess.run(["git", "commit", "-m", message], capture_output=True, text=True, check=False, timeout=30)
|
78
|
-
output = result.stdout.strip(); stderr = result.stderr.strip()
|
79
|
-
logger.debug(f"Git commit raw output (Exit Code {result.returncode}):\nSTDOUT: {output}\nSTDERR: {stderr}")
|
80
|
-
if "nothing to commit" in output or "nothing added to commit" in output or "no changes added to commit" in output:
|
81
|
-
logger.info("Git commit reported: Nothing to commit."); return "OK: Nothing to commit."
|
82
|
-
if result.returncode == 0: return f"OK: Committed with message '{message}'.\n{output}"
|
83
|
-
else: error_detail = stderr if stderr else output; logger.error(f"Error executing git commit (Exit Code {result.returncode}): {error_detail}"); return f"Error executing git commit: {error_detail}"
|
84
|
-
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
85
|
-
except subprocess.TimeoutExpired: logger.error("Git commit command timed out."); return "Error: Git commit command timed out."
|
86
|
-
except Exception as e: logger.error(f"Unexpected error during git commit: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git commit: {e}"
|
87
|
-
|
88
|
-
def _git_push_logic() -> str:
|
89
|
-
"""Executes 'git push' to push staged commits to the remote repository."""
|
90
|
-
logger.info("Executing git push")
|
91
|
-
try:
|
92
|
-
result = subprocess.run(["git", "push"], capture_output=True, text=True, check=True, timeout=120)
|
93
|
-
output = result.stdout.strip() + "\n" + result.stderr.strip()
|
94
|
-
logger.debug(f"Git push raw output:\n{output}")
|
95
|
-
return f"OK: Push completed.\n{output.strip()}"
|
96
|
-
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
97
|
-
except subprocess.CalledProcessError as e: error_output = e.stdout.strip() + "\n" + e.stderr.strip(); logger.error(f"Error executing git push: {error_output}"); return f"Error executing git push: {error_output.strip()}"
|
98
|
-
except subprocess.TimeoutExpired: logger.error("Git push command timed out."); return "Error: Git push command timed out."
|
99
|
-
except Exception as e: logger.error(f"Unexpected error during git push: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git push: {e}"
|
100
|
-
|
101
|
-
def _run_npm_test_logic(args: str = "") -> str:
|
102
|
-
"""Executes 'npm run test' with optional arguments."""
|
103
|
-
try:
|
104
|
-
cmd_list = ["npm", "run", "test"] + (shlex.split(args) if args else []); cmd_str = ' '.join(cmd_list)
|
105
|
-
logger.info(f"Executing npm test: {cmd_str}")
|
106
|
-
result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120)
|
107
|
-
output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
|
108
|
-
if result.returncode == 0: logger.debug(f"npm test completed successfully:\n{output}"); return f"OK: npm test finished.\n{output}"
|
109
|
-
else: logger.error(f"npm test failed (Exit Code {result.returncode}):\n{output}"); return f"Error: npm test failed.\n{output}"
|
110
|
-
except FileNotFoundError: logger.error("npm command not found."); return "Error: npm command not found."
|
111
|
-
except subprocess.TimeoutExpired: logger.error("npm test command timed out."); return "Error: npm test command timed out."
|
112
|
-
except Exception as e: logger.error(f"Unexpected error during npm test: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during npm test: {e}"
|
113
|
-
|
114
|
-
def _run_pytest_logic(args: str = "") -> str:
|
115
|
-
"""Executes 'uv run pytest' with optional arguments."""
|
116
|
-
try:
|
117
|
-
cmd_list = ["uv", "run", "pytest"] + (shlex.split(args) if args else []); cmd_str = ' '.join(cmd_list)
|
118
|
-
logger.info(f"Executing pytest via uv: {cmd_str}")
|
119
|
-
result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120)
|
120
|
-
output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
|
121
|
-
if result.returncode == 0: logger.debug(f"pytest completed successfully:\n{output}"); return f"OK: pytest finished successfully.\n{output}"
|
122
|
-
else: logger.warning(f"pytest finished with failures (Exit Code {result.returncode}):\n{output}"); return f"OK: Pytest finished with failures (Exit Code {result.returncode}).\n{output}"
|
123
|
-
except FileNotFoundError: logger.error("uv command not found."); return "Error: uv command not found."
|
124
|
-
except subprocess.TimeoutExpired: logger.error("pytest command timed out."); return "Error: pytest command timed out."
|
125
|
-
except Exception as e: logger.error(f"Unexpected error during pytest: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during pytest: {e}"
|
126
|
-
|
127
|
-
# --- Tool Definitions (Decorated - reverted to default naming) ---
|
128
|
-
git_status = function_tool(_git_status_logic)
|
129
|
-
git_diff = function_tool(_git_diff_logic)
|
130
|
-
git_add = function_tool(_git_add_logic)
|
131
|
-
git_commit = function_tool(_git_commit_logic)
|
132
|
-
git_push = function_tool(_git_push_logic)
|
133
|
-
run_npm_test = function_tool(_run_npm_test_logic)
|
134
|
-
run_pytest = function_tool(_run_pytest_logic)
|
135
|
-
|
136
|
-
# --- Agent Instructions ---
|
137
|
-
# (Instructions remain the same)
|
138
|
-
michael_instructions = """
|
139
|
-
You are Michael Toasted, the resolute leader of the Burnt Noodles creative team.
|
140
|
-
Your primary role is to understand the user's request, break it down into actionable steps,
|
141
|
-
and delegate tasks appropriately to your team members: Fiona Flame (Git operations) and Sam Ashes (Testing).
|
142
|
-
You should only execute simple Git status checks (`git_status`, `git_diff`) yourself. Delegate all other Git actions (add, commit, push) to Fiona. Delegate all testing actions (npm test, pytest) to Sam.
|
143
|
-
Synthesize the results from your team and provide the final response to the user.
|
144
|
-
Available Function Tools (for you): git_status, git_diff.
|
145
|
-
Available Agent Tools (for delegation): Fiona_Flame, Sam_Ashes.
|
146
|
-
"""
|
147
|
-
fiona_instructions = """
|
148
|
-
You are Fiona Flame, the git specialist. Execute git commands precisely as requested using your available function tools:
|
149
|
-
`git_status`, `git_diff`, `git_add`, `git_commit`, `git_push`.
|
150
|
-
When asked to commit, analyze the diff if necessary and generate concise, informative conventional commit messages (e.g., 'feat: ...', 'fix: ...', 'refactor: ...', 'chore: ...').
|
151
|
-
Always stage changes using `git_add` before committing.
|
152
|
-
If asked to push, first ask the user (Michael) for confirmation before executing `git_push`.
|
153
|
-
If a task involves testing (like running tests after a commit), delegate it to the Sam_Ashes agent tool.
|
154
|
-
For tasks outside your Git domain, report back to Michael; do not use the Michael_Toasted tool directly.
|
155
|
-
Available Function Tools: git_status, git_diff, git_add, git_commit, git_push.
|
156
|
-
Available Agent Tools: Sam_Ashes.
|
157
|
-
"""
|
158
|
-
sam_instructions = """
|
159
|
-
You are Sam Ashes, the meticulous testing operative. Execute test commands using your available function tools: `run_npm_test` or `run_pytest`.
|
160
|
-
Interpret the results: Report failures immediately and clearly. If tests pass, consider running with coverage (e.g., using `uv run pytest --cov` via the `run_pytest` tool) if appropriate or requested, and report the coverage summary.
|
161
|
-
For tasks outside testing (e.g., needing code changes before testing, or git operations), refer back to Michael; do not use the Michael_Toasted or Fiona_Flame tools directly.
|
162
|
-
Available Function Tools: run_npm_test, run_pytest.
|
163
|
-
Available Agent Tools: None (Report back to Michael for delegation).
|
164
|
-
"""
|
165
|
-
|
166
|
-
# --- Blueprint Definition ---
|
167
|
-
class BurntNoodlesBlueprint(BlueprintBase):
|
168
|
-
metadata: ClassVar[Dict[str, Any]] = {
|
169
|
-
"name": "BurntNoodlesBlueprint",
|
170
|
-
"title": "Burnt Noodles",
|
171
|
-
"description": "A multi-agent team managing Git operations and code testing.",
|
172
|
-
"version": "1.1.0",
|
173
|
-
"author": "Open Swarm Team (Refactored)",
|
174
|
-
"tags": ["git", "test", "multi-agent", "collaboration", "refactor"],
|
175
|
-
"required_mcp_servers": [],
|
176
|
-
}
|
177
|
-
|
178
|
-
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
179
|
-
_model_instance_cache: Dict[str, Model] = {}
|
180
|
-
|
181
|
-
def _get_model_instance(self, profile_name: str) -> Model:
|
182
|
-
if profile_name in self._model_instance_cache:
|
183
|
-
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
184
|
-
return self._model_instance_cache[profile_name]
|
185
|
-
|
186
|
-
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
187
|
-
profile_data = getattr(self, "get_llm_profile", lambda prof: {"provider": "openai", "model": "gpt-mock"})(profile_name)
|
188
|
-
if not profile_data:
|
189
|
-
logger.critical(f"Cannot create Model instance: LLM profile '{profile_name}' (or 'default') not found in configuration.")
|
190
|
-
raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
|
191
|
-
|
192
|
-
provider = profile_data.get("provider", "openai").lower()
|
193
|
-
model_name = profile_data.get("model")
|
194
|
-
if not model_name:
|
195
|
-
logger.critical(f"LLM profile '{profile_name}' is missing the required 'model' key.")
|
196
|
-
raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
|
197
|
-
|
198
|
-
if provider != "openai":
|
199
|
-
logger.error(f"Unsupported LLM provider '{provider}' in profile '{profile_name}'. Only 'openai' is supported in this blueprint.")
|
200
|
-
raise ValueError(f"Unsupported LLM provider: {provider}")
|
201
|
-
|
202
|
-
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
203
|
-
if client_cache_key not in self._openai_client_cache:
|
204
|
-
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
205
|
-
filtered_client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
206
|
-
log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
|
207
|
-
logger.debug(f"Creating new AsyncOpenAI client for profile '{profile_name}' with config: {log_client_kwargs}")
|
208
|
-
try:
|
209
|
-
self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_client_kwargs)
|
210
|
-
except Exception as e:
|
211
|
-
logger.error(f"Failed to create AsyncOpenAI client for profile '{profile_name}': {e}", exc_info=True)
|
212
|
-
raise ValueError(f"Failed to initialize OpenAI client for profile '{profile_name}': {e}") from e
|
213
|
-
|
214
|
-
openai_client_instance = self._openai_client_cache[client_cache_key]
|
215
|
-
|
216
|
-
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') with client instance for profile '{profile_name}'.")
|
217
|
-
try:
|
218
|
-
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client_instance)
|
219
|
-
self._model_instance_cache[profile_name] = model_instance
|
220
|
-
return model_instance
|
221
|
-
except Exception as e:
|
222
|
-
logger.error(f"Failed to instantiate OpenAIChatCompletionsModel for profile '{profile_name}': {e}", exc_info=True)
|
223
|
-
raise ValueError(f"Failed to initialize LLM provider for profile '{profile_name}': {e}") from e
|
224
|
-
|
225
|
-
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
226
|
-
logger.debug("Creating Burnt Noodles agent team...")
|
227
|
-
config = self._load_configuration() if getattr(self, "config", None) is None else self.config
|
228
|
-
self._model_instance_cache = {}
|
229
|
-
self._openai_client_cache = {}
|
230
|
-
|
231
|
-
default_profile_name = config.get("llm_profile", "default")
|
232
|
-
logger.debug(f"Using LLM profile '{default_profile_name}' for all Burnt Noodles agents.")
|
233
|
-
default_model_instance = self._get_model_instance(default_profile_name)
|
234
|
-
|
235
|
-
# --- Use the decorated tool variables ---
|
236
|
-
fiona_flame = Agent(
|
237
|
-
name="Fiona_Flame",
|
238
|
-
model=default_model_instance,
|
239
|
-
instructions=fiona_instructions,
|
240
|
-
tools=[git_status, git_diff, git_add, git_commit, git_push] # Agent tools added later
|
241
|
-
)
|
242
|
-
sam_ashes = Agent(
|
243
|
-
name="Sam_Ashes",
|
244
|
-
model=default_model_instance,
|
245
|
-
instructions=sam_instructions,
|
246
|
-
tools=[run_npm_test, run_pytest] # Agent tools added later
|
247
|
-
)
|
248
|
-
michael_toasted = Agent(
|
249
|
-
name="Michael_Toasted",
|
250
|
-
model=default_model_instance,
|
251
|
-
instructions=michael_instructions,
|
252
|
-
tools=[
|
253
|
-
git_status, # Michael's direct tools
|
254
|
-
git_diff,
|
255
|
-
fiona_flame.as_tool(
|
256
|
-
tool_name="Fiona_Flame",
|
257
|
-
tool_description="Delegate Git operations (add, commit, push) or complex status/diff queries to Fiona."
|
258
|
-
),
|
259
|
-
sam_ashes.as_tool(
|
260
|
-
tool_name="Sam_Ashes",
|
261
|
-
tool_description="Delegate testing tasks (npm test, pytest) to Sam."
|
262
|
-
),
|
263
|
-
],
|
264
|
-
mcp_servers=mcp_servers
|
265
|
-
)
|
266
|
-
# --- End tool variable usage ---
|
267
|
-
|
268
|
-
fiona_flame.tools.append(
|
269
|
-
sam_ashes.as_tool(tool_name="Sam_Ashes", tool_description="Delegate testing tasks (npm test, pytest) to Sam.")
|
270
|
-
)
|
271
|
-
|
272
|
-
logger.debug("Burnt Noodles agent team created successfully. Michael Toasted is the starting agent.")
|
273
|
-
return michael_toasted
|
274
|
-
|
275
|
-
async def run(self, messages: List[Dict[str, Any]], **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
|
276
|
-
"""
|
277
|
-
Main execution entry point for the Burnt Noodles blueprint.
|
278
|
-
Delegates to _run_non_interactive for CLI-like execution.
|
279
|
-
"""
|
280
|
-
logger.info("BurntNoodlesBlueprint run method called.")
|
281
|
-
instruction = messages[-1].get("content", "") if messages else ""
|
282
|
-
async for chunk in self._run_non_interactive(instruction, **kwargs):
|
283
|
-
yield chunk
|
284
|
-
logger.info("BurntNoodlesBlueprint run method finished.")
|
285
|
-
|
286
|
-
async def _run_non_interactive(self, instruction: str, **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
|
287
|
-
"""Helper to run the agent flow based on an instruction."""
|
288
|
-
logger.info(f"Running Burnt Noodles non-interactively with instruction: '{instruction[:100]}...'")
|
289
|
-
mcp_servers = kwargs.get("mcp_servers", [])
|
290
|
-
starting_agent = self.create_starting_agent(mcp_servers=mcp_servers)
|
291
|
-
runner = Runner(agent=starting_agent)
|
292
|
-
try:
|
293
|
-
final_result = await runner.run(instruction)
|
294
|
-
logger.info(f"Non-interactive run finished. Final Output: {final_result.final_output}")
|
295
|
-
yield { "messages": [ {"role": "assistant", "content": final_result.final_output} ] }
|
296
|
-
except Exception as e:
|
297
|
-
logger.error(f"Error during non-interactive run: {e}", exc_info=True)
|
298
|
-
yield { "messages": [ {"role": "assistant", "content": f"An error occurred: {e}"} ] }
|
299
|
-
|
300
|
-
|
301
|
-
# Standard Python entry point for direct script execution
|
302
|
-
if __name__ == "__main__":
|
303
|
-
BurntNoodlesBlueprint.main()
|
304
|
-
|
@@ -1,285 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import random
|
3
|
-
import json
|
4
|
-
import os
|
5
|
-
import sys
|
6
|
-
import sqlite3 # Use standard sqlite3 module
|
7
|
-
from pathlib import Path
|
8
|
-
from typing import Dict, Any, List, ClassVar, Optional
|
9
|
-
|
10
|
-
# Ensure src is in path for BlueprintBase import
|
11
|
-
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
12
|
-
src_path = os.path.join(project_root, 'src')
|
13
|
-
if src_path not in sys.path: sys.path.insert(0, src_path)
|
14
|
-
|
15
|
-
try:
|
16
|
-
from agents import Agent, Tool, function_tool, Runner
|
17
|
-
from agents.mcp import MCPServer
|
18
|
-
from agents.models.interface import Model
|
19
|
-
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
20
|
-
from openai import AsyncOpenAI
|
21
|
-
from swarm.extensions.blueprint.blueprint_base import BlueprintBase
|
22
|
-
except ImportError as e:
|
23
|
-
print(f"ERROR: Import failed in DilbotUniverseBlueprint: {e}. Check dependencies.")
|
24
|
-
print(f"sys.path: {sys.path}")
|
25
|
-
sys.exit(1)
|
26
|
-
|
27
|
-
logger = logging.getLogger(__name__)
|
28
|
-
|
29
|
-
# --- Database Constants ---
|
30
|
-
DB_FILE_NAME = "swarm_instructions.db"
|
31
|
-
DB_PATH = Path(project_root) / DB_FILE_NAME
|
32
|
-
TABLE_NAME = "agent_instructions"
|
33
|
-
|
34
|
-
# --- Placeholder Tools ---
|
35
|
-
@function_tool
|
36
|
-
def build_product() -> str:
|
37
|
-
"""Simulates successfully building the product."""
|
38
|
-
logger.info("Tool: build_product executed.")
|
39
|
-
return (
|
40
|
-
"ACTION: build_product completed.\n"
|
41
|
-
"GAME OVER: YOU WON!\n"
|
42
|
-
"After much deliberation, the comedic masterpiece is finalized—behold its glory! "
|
43
|
-
"Reasoning: It’s polished enough to survive the corporate circus."
|
44
|
-
)
|
45
|
-
|
46
|
-
@function_tool
|
47
|
-
def sabotage_project() -> str:
|
48
|
-
"""Simulates sabotaging the project."""
|
49
|
-
logger.info("Tool: sabotage_project executed.")
|
50
|
-
return (
|
51
|
-
"ACTION: sabotage_project completed.\n"
|
52
|
-
"GAME OVER: YOU LOST!\n"
|
53
|
-
"The project has been gleefully trashed—chaos reigns supreme! "
|
54
|
-
"Reasoning: Why build when you can break with style?"
|
55
|
-
)
|
56
|
-
|
57
|
-
# --- Blueprint Definition ---
|
58
|
-
class DilbotUniverseBlueprint(BlueprintBase):
|
59
|
-
"""A comedic multi-agent blueprint simulating a 9-step SDLC, using agent-as-tool for handoffs and SQLite for instructions."""
|
60
|
-
metadata: ClassVar[Dict[str, Any]] = {
|
61
|
-
"name": "DilbotUniverseBlueprint",
|
62
|
-
"title": "Dilbot Universe SDLC (SQLite)",
|
63
|
-
"description": "A comedic multi-agent blueprint using agent-as-tool handoffs and SQLite for instructions.",
|
64
|
-
"version": "1.2.0", # Version bump for SQLite change
|
65
|
-
"author": "Open Swarm Team (Refactored)",
|
66
|
-
"tags": ["comedy", "multi-agent", "sdlc", "sqlite", "dynamic-config"],
|
67
|
-
"required_mcp_servers": [],
|
68
|
-
"cli_name": "dilbot",
|
69
|
-
"env_vars": [],
|
70
|
-
}
|
71
|
-
|
72
|
-
# Caches
|
73
|
-
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
74
|
-
_model_instance_cache: Dict[str, Model] = {}
|
75
|
-
_db_initialized = False # Flag to ensure DB init runs only once per instance
|
76
|
-
|
77
|
-
# --- Database Interaction ---
|
78
|
-
def _init_db_and_load_data(self) -> None:
|
79
|
-
"""Initializes the SQLite DB, creates table, and loads sample data if needed."""
|
80
|
-
if self._db_initialized:
|
81
|
-
return
|
82
|
-
|
83
|
-
logger.info(f"Initializing SQLite database at: {DB_PATH}")
|
84
|
-
try:
|
85
|
-
# Ensure directory exists (though DB_PATH is project root here)
|
86
|
-
DB_PATH.parent.mkdir(parents=True, exist_ok=True)
|
87
|
-
|
88
|
-
with sqlite3.connect(DB_PATH) as conn:
|
89
|
-
cursor = conn.cursor()
|
90
|
-
# Create table if it doesn't exist
|
91
|
-
cursor.execute(f"""
|
92
|
-
CREATE TABLE IF NOT EXISTS {TABLE_NAME} (
|
93
|
-
agent_name TEXT PRIMARY KEY,
|
94
|
-
instruction_text TEXT NOT NULL,
|
95
|
-
model_profile TEXT DEFAULT 'default'
|
96
|
-
)
|
97
|
-
""")
|
98
|
-
logger.debug(f"Table '{TABLE_NAME}' ensured in {DB_PATH}")
|
99
|
-
|
100
|
-
# Check if data needs loading (check for a known agent)
|
101
|
-
cursor.execute(f"SELECT COUNT(*) FROM {TABLE_NAME} WHERE agent_name = ?", ("Dilbot",))
|
102
|
-
count = cursor.fetchone()[0]
|
103
|
-
|
104
|
-
if count == 0:
|
105
|
-
logger.info(f"No instructions found for Dilbot in {DB_PATH}. Loading sample data...")
|
106
|
-
sample_instructions = [
|
107
|
-
("Dilbot", "You are Dilbot, a meticulous engineer... [Full instructions]", "default"),
|
108
|
-
("Alisa", "You are Alisa, a creative designer... [Full instructions]", "default"),
|
109
|
-
("Carola", "You are Carola, an organized manager... [Full instructions]", "default"),
|
110
|
-
("PointyBoss", "You are PointyBoss, an evil manager... [Full instructions]", "default"),
|
111
|
-
("Dogbot", "You are Dogbot, an evil consultant... [Full instructions]", "default"),
|
112
|
-
("Waldo", "You are Waldo, a lazy neutral employee... [Full instructions]", "default"),
|
113
|
-
("Asoka", "You are Asoka, an eager neutral intern... [Full instructions]", "default"),
|
114
|
-
("Ratbot", "You are Ratbot, a whimsical neutral character... [Full instructions]", "default"),
|
115
|
-
]
|
116
|
-
# Replace "[Full instructions]" with the actual long instructions from the previous version
|
117
|
-
# Example for Dilbot:
|
118
|
-
sample_instructions[0] = (
|
119
|
-
"Dilbot",
|
120
|
-
"You are Dilbot, a meticulous engineer. Follow a 9-step SDLC: 1) Ask engineering questions, 2) Probe further, 3) 1/3 chance to build or pass to Waldo (reason first), 4-5) More questions, 6) 2/3 chance to build or pass, 7-8) Final questions, 9) Build or pass with comedic reasoning.",
|
121
|
-
"default"
|
122
|
-
)
|
123
|
-
# ... (Add the other full instructions here) ...
|
124
|
-
# For brevity, using placeholders:
|
125
|
-
sample_instructions[1] = ("Alisa", sample_instructions[1][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask design questions... 9) Build or pass with comedic reasoning."), "default")
|
126
|
-
sample_instructions[2] = ("Carola", sample_instructions[2][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask scheduling questions... 9) Build or pass with comedic reasoning."), "default")
|
127
|
-
sample_instructions[3] = ("PointyBoss", sample_instructions[3][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask business questions... 9) Sabotage or pass with comedic reasoning."), "default")
|
128
|
-
sample_instructions[4] = ("Dogbot", sample_instructions[4][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask consultancy questions... 9) Sabotage or pass with comedic reasoning."), "default")
|
129
|
-
sample_instructions[5] = ("Waldo", sample_instructions[5][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask procrastination questions... 9) Pass to Dilbot or Dogbot with comedic reasoning."), "default")
|
130
|
-
sample_instructions[6] = ("Asoka", sample_instructions[6][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask creative questions... 9) Pass to Carola or PointyBoss with comedic reasoning."), "default")
|
131
|
-
sample_instructions[7] = ("Ratbot", sample_instructions[7][1].replace("... [Full instructions]", "... Follow a 9-step SDLC: 1) Ask nonsense questions... 9) Pass to Dilbot or Dogbot with comedic reasoning."), "default")
|
132
|
-
|
133
|
-
|
134
|
-
cursor.executemany(f"INSERT INTO {TABLE_NAME} (agent_name, instruction_text, model_profile) VALUES (?, ?, ?)", sample_instructions)
|
135
|
-
conn.commit()
|
136
|
-
logger.info(f"Sample agent instructions loaded into {DB_PATH}")
|
137
|
-
else:
|
138
|
-
logger.info(f"Agent instructions found in {DB_PATH}. Skipping sample data loading.")
|
139
|
-
|
140
|
-
self._db_initialized = True
|
141
|
-
|
142
|
-
except sqlite3.Error as e:
|
143
|
-
logger.error(f"SQLite error during DB initialization/loading: {e}", exc_info=True)
|
144
|
-
# Continue without DB? Or raise error? Let's warn and continue with defaults.
|
145
|
-
self._db_initialized = False # Mark as failed
|
146
|
-
except Exception as e:
|
147
|
-
logger.error(f"Unexpected error during DB initialization/loading: {e}", exc_info=True)
|
148
|
-
self._db_initialized = False
|
149
|
-
|
150
|
-
def get_agent_config(self, agent_name: str) -> Dict[str, Any]:
|
151
|
-
"""Fetches agent config from SQLite DB or returns defaults."""
|
152
|
-
if self._db_initialized:
|
153
|
-
try:
|
154
|
-
with sqlite3.connect(DB_PATH) as conn:
|
155
|
-
conn.row_factory = sqlite3.Row # Access columns by name
|
156
|
-
cursor = conn.cursor()
|
157
|
-
cursor.execute(f"SELECT instruction_text, model_profile FROM {TABLE_NAME} WHERE agent_name = ?", (agent_name,))
|
158
|
-
row = cursor.fetchone()
|
159
|
-
if row:
|
160
|
-
logger.debug(f"Loaded config for agent '{agent_name}' from SQLite.")
|
161
|
-
return {
|
162
|
-
"instructions": row["instruction_text"],
|
163
|
-
"model_profile": row["model_profile"] or "default",
|
164
|
-
}
|
165
|
-
else:
|
166
|
-
logger.warning(f"No config found for agent '{agent_name}' in SQLite. Using defaults.")
|
167
|
-
except sqlite3.Error as e:
|
168
|
-
logger.error(f"SQLite error fetching config for '{agent_name}': {e}. Using defaults.", exc_info=True)
|
169
|
-
except Exception as e:
|
170
|
-
logger.error(f"Unexpected error fetching config for '{agent_name}': {e}. Using defaults.", exc_info=True)
|
171
|
-
|
172
|
-
# --- Fallback Hardcoded Defaults ---
|
173
|
-
logger.warning(f"Using hardcoded default config for agent '{agent_name}'.")
|
174
|
-
default_instructions = {
|
175
|
-
"Dilbot": "You are Dilbot, a meticulous engineer... [Default Instructions - DB Failed]",
|
176
|
-
# ... (Add other default instructions here) ...
|
177
|
-
"Alisa": "You are Alisa... [Default Instructions - DB Failed]",
|
178
|
-
"Carola": "You are Carola... [Default Instructions - DB Failed]",
|
179
|
-
"PointyBoss": "You are PointyBoss... [Default Instructions - DB Failed]",
|
180
|
-
"Dogbot": "You are Dogbot... [Default Instructions - DB Failed]",
|
181
|
-
"Waldo": "You are Waldo... [Default Instructions - DB Failed]",
|
182
|
-
"Asoka": "You are Asoka... [Default Instructions - DB Failed]",
|
183
|
-
"Ratbot": "You are Ratbot... [Default Instructions - DB Failed]",
|
184
|
-
}
|
185
|
-
return {
|
186
|
-
"instructions": default_instructions.get(agent_name, f"Default instructions for {agent_name}."),
|
187
|
-
"model_profile": "default",
|
188
|
-
}
|
189
|
-
|
190
|
-
# --- Model Instantiation Helper --- (Copied from previous step)
|
191
|
-
def _get_model_instance(self, profile_name: str) -> Model:
|
192
|
-
"""Retrieves or creates an LLM Model instance."""
|
193
|
-
# ... (Implementation remains the same as in the previous response) ...
|
194
|
-
if profile_name in self._model_instance_cache:
|
195
|
-
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
196
|
-
return self._model_instance_cache[profile_name]
|
197
|
-
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
198
|
-
profile_data = self.get_llm_profile(profile_name)
|
199
|
-
if not profile_data:
|
200
|
-
logger.critical(f"LLM profile '{profile_name}' (or 'default') not found.")
|
201
|
-
raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
|
202
|
-
provider = profile_data.get("provider", "openai").lower()
|
203
|
-
model_name = profile_data.get("model")
|
204
|
-
if not model_name:
|
205
|
-
logger.critical(f"LLM profile '{profile_name}' missing 'model' key.")
|
206
|
-
raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
|
207
|
-
if provider != "openai":
|
208
|
-
logger.error(f"Unsupported LLM provider '{provider}' in profile '{profile_name}'.")
|
209
|
-
raise ValueError(f"Unsupported LLM provider: {provider}")
|
210
|
-
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
211
|
-
if client_cache_key not in self._openai_client_cache:
|
212
|
-
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
213
|
-
filtered_client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
214
|
-
log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
|
215
|
-
logger.debug(f"Creating new AsyncOpenAI client for profile '{profile_name}' with config: {log_client_kwargs}")
|
216
|
-
try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_client_kwargs)
|
217
|
-
except Exception as e: raise ValueError(f"Failed to initialize OpenAI client: {e}") from e
|
218
|
-
openai_client_instance = self._openai_client_cache[client_cache_key]
|
219
|
-
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
|
220
|
-
try:
|
221
|
-
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client_instance)
|
222
|
-
self._model_instance_cache[profile_name] = model_instance
|
223
|
-
return model_instance
|
224
|
-
except Exception as e: raise ValueError(f"Failed to initialize LLM provider: {e}") from e
|
225
|
-
|
226
|
-
|
227
|
-
# --- Agent Creation ---
|
228
|
-
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
229
|
-
"""Creates the Dilbot Universe agent team using SQLite for instructions."""
|
230
|
-
# Initialize DB and load data if needed (runs only once)
|
231
|
-
self._init_db_and_load_data()
|
232
|
-
|
233
|
-
logger.debug("Creating Dilbot Universe agent team...")
|
234
|
-
self._model_instance_cache = {} # Clear model cache for this run
|
235
|
-
self._openai_client_cache = {} # Clear client cache for this run
|
236
|
-
|
237
|
-
agents: Dict[str, Agent] = {}
|
238
|
-
agent_names = ["Dilbot", "Alisa", "Carola", "PointyBoss", "Dogbot", "Waldo", "Asoka", "Ratbot"]
|
239
|
-
|
240
|
-
# Create all agents first so they can be used as tools
|
241
|
-
for name in agent_names:
|
242
|
-
config = self.get_agent_config(name)
|
243
|
-
model_instance = self._get_model_instance(config["model_profile"])
|
244
|
-
agents[name] = Agent(
|
245
|
-
name=name,
|
246
|
-
instructions=config["instructions"],
|
247
|
-
model=model_instance,
|
248
|
-
tools=[], # Tools (including agent-as-tool) added below
|
249
|
-
mcp_servers=mcp_servers # Pass full list for simplicity now
|
250
|
-
)
|
251
|
-
|
252
|
-
# --- Define Tools & Agent-as-Tool Delegations ---
|
253
|
-
action_tools = [build_product, sabotage_project]
|
254
|
-
|
255
|
-
# Add tools based on agent logic (using Agent-as-Tool for passes)
|
256
|
-
agents["Dilbot"].tools.extend([action_tools[0], agents["Waldo"].as_tool(tool_name="Waldo", tool_description="Pass task to Waldo.")])
|
257
|
-
agents["Alisa"].tools.extend([action_tools[0], agents["Asoka"].as_tool(tool_name="Asoka", tool_description="Pass task to Asoka.")])
|
258
|
-
agents["Carola"].tools.extend([action_tools[0], agents["Waldo"].as_tool(tool_name="Waldo", tool_description="Pass task to Waldo.")])
|
259
|
-
agents["PointyBoss"].tools.extend([action_tools[1], agents["Waldo"].as_tool(tool_name="Waldo", tool_description="Pass task to Waldo.")])
|
260
|
-
agents["Dogbot"].tools.extend([action_tools[1], agents["Ratbot"].as_tool(tool_name="Ratbot", tool_description="Pass task to Ratbot.")])
|
261
|
-
agents["Waldo"].tools.extend([
|
262
|
-
agents["Dilbot"].as_tool(tool_name="Dilbot", tool_description="Pass task to Dilbot."),
|
263
|
-
agents["Dogbot"].as_tool(tool_name="Dogbot", tool_description="Pass task to Dogbot.")
|
264
|
-
])
|
265
|
-
agents["Asoka"].tools.extend([
|
266
|
-
agents["Carola"].as_tool(tool_name="Carola", tool_description="Pass task to Carola."),
|
267
|
-
agents["PointyBoss"].as_tool(tool_name="PointyBoss", tool_description="Pass task to PointyBoss.")
|
268
|
-
])
|
269
|
-
agents["Ratbot"].tools.extend([
|
270
|
-
agents["Dilbot"].as_tool(tool_name="Dilbot", tool_description="Pass task to Dilbot."),
|
271
|
-
agents["Dogbot"].as_tool(tool_name="Dogbot", tool_description="Pass task to Dogbot.")
|
272
|
-
])
|
273
|
-
|
274
|
-
# Randomly select starting agent from neutrals
|
275
|
-
neutral_agents = ["Waldo", "Asoka", "Ratbot"]
|
276
|
-
start_name = random.choice(neutral_agents)
|
277
|
-
starting_agent = agents[start_name]
|
278
|
-
|
279
|
-
logger.info(f"Dilbot Universe agents created (using SQLite). Starting agent: {start_name}")
|
280
|
-
return starting_agent
|
281
|
-
|
282
|
-
# Standard Python entry point
|
283
|
-
if __name__ == "__main__":
|
284
|
-
# No Django setup needed here anymore
|
285
|
-
DilbotUniverseBlueprint.main()
|
@@ -1,232 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Gotchaman: CLI Automation Blueprint
|
3
|
-
|
4
|
-
This blueprint provides CLI automation capabilities using a team of agents:
|
5
|
-
- Ken (Coordinator)
|
6
|
-
- Joe (Runner - executes commands/file ops)
|
7
|
-
- Jun (Logger - hypothetical monitoring via MCP)
|
8
|
-
- Jinpei (Advisor - hypothetical suggestion via MCP)
|
9
|
-
- Ryu (Reviewer - hypothetical insights via MCP)
|
10
|
-
|
11
|
-
Uses BlueprintBase, @function_tool for local commands, and agent-as-tool delegation.
|
12
|
-
"""
|
13
|
-
|
14
|
-
import os
|
15
|
-
import sys
|
16
|
-
import logging
|
17
|
-
import subprocess
|
18
|
-
import shlex # For safe command splitting
|
19
|
-
from pathlib import Path # Use pathlib
|
20
|
-
from typing import Dict, Any, List, ClassVar, Optional
|
21
|
-
|
22
|
-
# Ensure src is in path for BlueprintBase import
|
23
|
-
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
24
|
-
src_path = os.path.join(project_root, 'src')
|
25
|
-
if src_path not in sys.path: sys.path.insert(0, src_path)
|
26
|
-
|
27
|
-
try:
|
28
|
-
from agents import Agent, Tool, function_tool, Runner
|
29
|
-
from agents.mcp import MCPServer
|
30
|
-
from agents.models.interface import Model
|
31
|
-
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
32
|
-
from openai import AsyncOpenAI
|
33
|
-
from swarm.extensions.blueprint.blueprint_base import BlueprintBase
|
34
|
-
except ImportError as e:
|
35
|
-
print(f"ERROR: Import failed in GotchamanBlueprint: {e}. Check dependencies.")
|
36
|
-
print(f"sys.path: {sys.path}")
|
37
|
-
sys.exit(1)
|
38
|
-
|
39
|
-
logger = logging.getLogger(__name__)
|
40
|
-
|
41
|
-
# --- Function Tools ---
|
42
|
-
@function_tool
|
43
|
-
def execute_command(command: str) -> str:
|
44
|
-
"""Executes a shell command and returns its stdout and stderr."""
|
45
|
-
if not command: return "Error: No command provided."
|
46
|
-
logger.info(f"Tool: Executing command: {command}")
|
47
|
-
try:
|
48
|
-
# Use shell=True cautiously, consider splitting if possible for safer execution
|
49
|
-
result = subprocess.run(
|
50
|
-
command,
|
51
|
-
shell=True, # Be cautious with shell=True
|
52
|
-
check=False, # Capture output even on error
|
53
|
-
capture_output=True,
|
54
|
-
text=True,
|
55
|
-
timeout=120
|
56
|
-
)
|
57
|
-
output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
|
58
|
-
if result.returncode == 0:
|
59
|
-
logger.debug(f"Command successful:\n{output}")
|
60
|
-
return f"OK: Command executed.\n{output}"
|
61
|
-
else:
|
62
|
-
logger.error(f"Command failed:\n{output}")
|
63
|
-
return f"Error: Command failed.\n{output}"
|
64
|
-
except FileNotFoundError:
|
65
|
-
# This error is less likely with shell=True unless the shell itself is missing
|
66
|
-
logger.error(f"Error executing command '{command}': Shell or command not found.")
|
67
|
-
return f"Error: Shell or command '{command.split()[0]}' not found."
|
68
|
-
except subprocess.TimeoutExpired:
|
69
|
-
logger.error(f"Command '{command}' timed out.")
|
70
|
-
return f"Error: Command '{command}' timed out."
|
71
|
-
except Exception as e:
|
72
|
-
logger.error(f"Unexpected error executing command '{command}': {e}", exc_info=logger.level <= logging.DEBUG)
|
73
|
-
return f"Error: Unexpected error during command execution: {e}"
|
74
|
-
|
75
|
-
@function_tool
|
76
|
-
def read_file(path: str) -> str:
|
77
|
-
"""Reads the content of a file at the specified path."""
|
78
|
-
if not path: return "Error: No file path provided."
|
79
|
-
logger.info(f"Tool: Reading file at: {path}")
|
80
|
-
try:
|
81
|
-
file_path = Path(path).resolve()
|
82
|
-
# Optional: Add security check to ensure path is within allowed bounds if needed
|
83
|
-
# cwd = Path.cwd()
|
84
|
-
# if not str(file_path).startswith(str(cwd)):
|
85
|
-
# logger.warning(f"Attempt to read file outside current working directory: {path}")
|
86
|
-
# return f"Error: Access denied to path: {path}"
|
87
|
-
if not file_path.is_file():
|
88
|
-
logger.error(f"File not found at: {file_path}")
|
89
|
-
return f"Error: File not found at path: {path}"
|
90
|
-
content = file_path.read_text(encoding="utf-8")
|
91
|
-
logger.debug(f"Read {len(content)} characters from {file_path}.")
|
92
|
-
return f"OK: Content of {path}:\n{content}"
|
93
|
-
except Exception as e:
|
94
|
-
logger.error(f"Error reading file at {path}: {e}", exc_info=logger.level <= logging.DEBUG)
|
95
|
-
return f"Error reading file '{path}': {e}"
|
96
|
-
|
97
|
-
@function_tool
|
98
|
-
def write_file(path: str, content: str) -> str:
|
99
|
-
"""Writes content to a file at the specified path, overwriting if it exists."""
|
100
|
-
if not path: return "Error: No file path provided."
|
101
|
-
logger.info(f"Tool: Writing {len(content)} characters to file at: {path}")
|
102
|
-
try:
|
103
|
-
file_path = Path(path).resolve()
|
104
|
-
# Optional: Add security check
|
105
|
-
# cwd = Path.cwd()
|
106
|
-
# if not str(file_path).startswith(str(cwd)):
|
107
|
-
# logger.warning(f"Attempt to write file outside current working directory: {path}")
|
108
|
-
# return f"Error: Access denied to path: {path}"
|
109
|
-
|
110
|
-
file_path.parent.mkdir(parents=True, exist_ok=True) # Ensure directory exists
|
111
|
-
file_path.write_text(content, encoding="utf-8")
|
112
|
-
logger.debug(f"Successfully wrote to {file_path}.")
|
113
|
-
return f"OK: Successfully wrote to {path}."
|
114
|
-
except Exception as e:
|
115
|
-
logger.error(f"Error writing file at {path}: {e}", exc_info=logger.level <= logging.DEBUG)
|
116
|
-
return f"Error writing file '{path}': {e}"
|
117
|
-
|
118
|
-
# --- Define the Blueprint ---
|
119
|
-
class GotchamanBlueprint(BlueprintBase):
|
120
|
-
"""Gotchaman: CLI Automation Blueprint using BlueprintBase."""
|
121
|
-
|
122
|
-
metadata: ClassVar[Dict[str, Any]] = {
|
123
|
-
"name": "GotchamanBlueprint",
|
124
|
-
"title": "Gotchaman: CLI Automation",
|
125
|
-
"description": (
|
126
|
-
"A blueprint for automating CLI tasks using a team of agents (Ken, Joe, Jun, Jinpei, Ryu) "
|
127
|
-
"with specific roles and MCP/tool access."
|
128
|
-
),
|
129
|
-
"version": "1.1.0", # Refactored version
|
130
|
-
"author": "Open Swarm Team (Refactored)",
|
131
|
-
"tags": ["cli", "automation", "multi-agent", "mcp", "slack", "monday"],
|
132
|
-
# List only servers directly used by refactored agents
|
133
|
-
"required_mcp_servers": ["slack", "mondayDotCom", "basic-memory", "mcp-npx-fetch"],
|
134
|
-
"env_vars": ["SLACK_API_KEY", "MONDAY_API_KEY"]
|
135
|
-
}
|
136
|
-
|
137
|
-
# Caches
|
138
|
-
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
139
|
-
_model_instance_cache: Dict[str, Model] = {}
|
140
|
-
|
141
|
-
# --- Model Instantiation Helper --- (Standard helper)
|
142
|
-
def _get_model_instance(self, profile_name: str) -> Model:
|
143
|
-
"""Retrieves or creates an LLM Model instance."""
|
144
|
-
# ... (Implementation is the same as previous refactors) ...
|
145
|
-
if profile_name in self._model_instance_cache:
|
146
|
-
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
147
|
-
return self._model_instance_cache[profile_name]
|
148
|
-
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
149
|
-
profile_data = self.get_llm_profile(profile_name)
|
150
|
-
if not profile_data: raise ValueError(f"Missing LLM profile '{profile_name}'.")
|
151
|
-
provider = profile_data.get("provider", "openai").lower()
|
152
|
-
model_name = profile_data.get("model")
|
153
|
-
if not model_name: raise ValueError(f"Missing 'model' in profile '{profile_name}'.")
|
154
|
-
if provider != "openai": raise ValueError(f"Unsupported provider: {provider}")
|
155
|
-
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
156
|
-
if client_cache_key not in self._openai_client_cache:
|
157
|
-
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
158
|
-
filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
159
|
-
log_kwargs = {k:v for k,v in filtered_kwargs.items() if k != 'api_key'}
|
160
|
-
logger.debug(f"Creating new AsyncOpenAI client for '{profile_name}': {log_kwargs}")
|
161
|
-
try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_kwargs)
|
162
|
-
except Exception as e: raise ValueError(f"Failed to init client: {e}") from e
|
163
|
-
client = self._openai_client_cache[client_cache_key]
|
164
|
-
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
|
165
|
-
try:
|
166
|
-
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
167
|
-
self._model_instance_cache[profile_name] = model_instance
|
168
|
-
return model_instance
|
169
|
-
except Exception as e: raise ValueError(f"Failed to init LLM: {e}") from e
|
170
|
-
|
171
|
-
|
172
|
-
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
173
|
-
"""Creates the Gotchaman agent team and returns Ken (Coordinator)."""
|
174
|
-
logger.debug("Creating Gotchaman agent team...")
|
175
|
-
self._model_instance_cache = {}
|
176
|
-
self._openai_client_cache = {}
|
177
|
-
|
178
|
-
default_profile_name = self.config.get("llm_profile", "default")
|
179
|
-
logger.debug(f"Using LLM profile '{default_profile_name}' for Gotchaman agents.")
|
180
|
-
model_instance = self._get_model_instance(default_profile_name)
|
181
|
-
|
182
|
-
# Helper to filter MCP servers
|
183
|
-
def get_agent_mcps(names: List[str]) -> List[MCPServer]:
|
184
|
-
return [s for s in mcp_servers if s.name in names]
|
185
|
-
|
186
|
-
# --- Agent Instructions ---
|
187
|
-
ken_instructions = "You are Ken, the Coordinator for Gotchaman team. Your team: Joe (Runner), Jun (Logger), Jinpei (Advisor), and Ryu (Reviewer). Analyze the user request and delegate tasks to the appropriate agent using their Agent Tool. Synthesize their responses for the final output."
|
188
|
-
joe_instructions = "You are Joe, the Runner. Execute shell commands, read files, or write files using your function tools (`execute_command`, `read_file`, `write_file`) as requested by Ken. Report the outcome."
|
189
|
-
jun_instructions = "You are Jun, the Logger. Receive information or instructions from Ken. Use the `slack` MCP tool to log messages or feedback to a designated channel (details provided by Ken or pre-configured). Report success/failure of logging back to Ken."
|
190
|
-
jinpei_instructions = "You are Jinpei, the Advisor. Receive context from Ken. Use the `mcp-npx-fetch` MCP tool to fetch relevant documentation or examples based on the context. Provide concise suggestions or relevant snippets back to Ken."
|
191
|
-
ryu_instructions = "You are Ryu, the Reviewer. Receive outputs or code snippets from Ken. Use the `basic-memory` MCP tool to recall previous related outputs or guidelines if necessary. Provide insightful review comments or quality checks back to Ken."
|
192
|
-
|
193
|
-
# Instantiate agents
|
194
|
-
joe_agent = Agent(
|
195
|
-
name="Joe", model=model_instance, instructions=joe_instructions,
|
196
|
-
tools=[execute_command, read_file, write_file], # Joe has the function tools
|
197
|
-
mcp_servers=[] # Joe doesn't directly use MCPs listed
|
198
|
-
)
|
199
|
-
jun_agent = Agent(
|
200
|
-
name="Jun", model=model_instance, instructions=jun_instructions,
|
201
|
-
tools=[], # Jun uses MCP
|
202
|
-
mcp_servers=get_agent_mcps(["slack"])
|
203
|
-
)
|
204
|
-
jinpei_agent = Agent(
|
205
|
-
name="Jinpei", model=model_instance, instructions=jinpei_instructions,
|
206
|
-
tools=[], # Jinpei uses MCP
|
207
|
-
mcp_servers=get_agent_mcps(["mcp-npx-fetch"])
|
208
|
-
)
|
209
|
-
ryu_agent = Agent(
|
210
|
-
name="Ryu", model=model_instance, instructions=ryu_instructions,
|
211
|
-
tools=[], # Ryu uses MCP
|
212
|
-
mcp_servers=get_agent_mcps(["basic-memory"])
|
213
|
-
)
|
214
|
-
# Coordinator - Ken
|
215
|
-
ken_agent = Agent(
|
216
|
-
name="Ken", model=model_instance, instructions=ken_instructions,
|
217
|
-
tools=[ # Ken delegates to others via agent tools
|
218
|
-
joe_agent.as_tool(tool_name="Joe", tool_description="Delegate command execution or file operations to Joe."),
|
219
|
-
jun_agent.as_tool(tool_name="Jun", tool_description="Delegate logging tasks via Slack to Jun."),
|
220
|
-
jinpei_agent.as_tool(tool_name="Jinpei", tool_description="Delegate fetching docs/examples to Jinpei."),
|
221
|
-
ryu_agent.as_tool(tool_name="Ryu", tool_description="Delegate review tasks or recall past info via Ryu."),
|
222
|
-
],
|
223
|
-
# Ken might use memory directly, or coordinate access via Ryu? Assigning for potential direct use.
|
224
|
-
mcp_servers=get_agent_mcps(["basic-memory"])
|
225
|
-
)
|
226
|
-
|
227
|
-
logger.debug("Gotchaman agents created. Starting with Ken.")
|
228
|
-
return ken_agent
|
229
|
-
|
230
|
-
# Standard Python entry point
|
231
|
-
if __name__ == "__main__":
|
232
|
-
GotchamanBlueprint.main()
|
File without changes
|
{open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/entry_points.txt
RENAMED
File without changes
|
{open_swarm-0.1.1744936125.dist-info → open_swarm-0.1.1744936173.dist-info}/licenses/LICENSE
RENAMED
File without changes
|