open-swarm 0.1.1743416034__py3-none-any.whl → 0.1.1744936125__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/METADATA +1 -1
- {open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/RECORD +8 -7
- swarm/blueprints/burnt_noodles/blueprint_burnt_noodles.py +110 -218
- swarm/blueprints/chatbot/blueprint_chatbot.py +64 -8
- swarm/extensions/blueprint/runnable_blueprint.py +42 -0
- {open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/WHEEL +0 -0
- {open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/entry_points.txt +0 -0
- {open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: open-swarm
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.1744936125
|
4
4
|
Summary: Open Swarm: Orchestrating AI Agent Swarms with Django
|
5
5
|
Project-URL: Homepage, https://github.com/yourusername/open-swarm
|
6
6
|
Project-URL: Documentation, https://github.com/yourusername/open-swarm/blob/main/README.md
|
@@ -14,8 +14,8 @@ swarm/util.py,sha256=G4x2hXopHhB7IdGCkUXGoykYWyiICnjxg7wcr-WqL8I,4644
|
|
14
14
|
swarm/wsgi.py,sha256=REM_u4HpMCkO0ddrOUXgtY-ITL-VTbRB1-WHvFJAtAU,408
|
15
15
|
swarm/agent/__init__.py,sha256=YESGu_UXEBxrlQwghodUMN0vmXZDwWMU7DclCUvoklA,104
|
16
16
|
swarm/blueprints/README.md,sha256=tsngbSB9N0tILcz_m1OGAjyKZQYlGTN-i5e5asq1GbE,8478
|
17
|
-
swarm/blueprints/burnt_noodles/blueprint_burnt_noodles.py,sha256=
|
18
|
-
swarm/blueprints/chatbot/blueprint_chatbot.py,sha256=
|
17
|
+
swarm/blueprints/burnt_noodles/blueprint_burnt_noodles.py,sha256=vopDlBjVUNeSq6WItdkmtWJfObgbtL6wNAy2njsskkY,19607
|
18
|
+
swarm/blueprints/chatbot/blueprint_chatbot.py,sha256=a5-gIyDvRtNgbyfviD9Hua9r5NjOQh1lOafIG2a6kiI,7520
|
19
19
|
swarm/blueprints/chatbot/templates/chatbot/chatbot.html,sha256=REFnqNg0EHsXxAUfaCJe1YgOKiV_umBXuC6y8veF5CU,1568
|
20
20
|
swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py,sha256=JK_rmZgPMw4PdQFrMverrwgcjH0NRkuqkchYOJwXYuM,9809
|
21
21
|
swarm/blueprints/dilbot_universe/blueprint_dilbot_universe.py,sha256=w7i96KkRNmBOV2Kz9SJqQCirsRPEgENR-79iLOzpKaQ,16770
|
@@ -63,6 +63,7 @@ swarm/extensions/blueprint/config_loader.py,sha256=ldQGtv4tXeDJzL2GCylDxykZxYBo4
|
|
63
63
|
swarm/extensions/blueprint/django_utils.py,sha256=ObtkmF1JW4H2OEYa7vC6ussUsMBtDsZTTVeHGHI-GOQ,17457
|
64
64
|
swarm/extensions/blueprint/interactive_mode.py,sha256=vGmMuAgC93TLjMi2RkXQ2FkWfIUblyOTFGHmVdGKLSQ,4572
|
65
65
|
swarm/extensions/blueprint/output_utils.py,sha256=8OtVE3gEvPeeTu4Juo6Ad6omSlMqSuAtckXXx7P1CyQ,4022
|
66
|
+
swarm/extensions/blueprint/runnable_blueprint.py,sha256=1MywZ54vUysLVtYmwCbcDYQmQnoZffCHgsArbe-VKe8,1813
|
66
67
|
swarm/extensions/blueprint/spinner.py,sha256=3J0ZrNzoI5O5qR7hnCeRM3dZx2fLb_H3zkoj_AYt5LQ,3394
|
67
68
|
swarm/extensions/blueprint/modes/rest_mode.py,sha256=KZuB_j2NfomER7CmlsLBqRipU3DymKY-9RpoGilMH0I,1357
|
68
69
|
swarm/extensions/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -254,8 +255,8 @@ swarm/views/message_views.py,sha256=sDUnXyqKXC8WwIIMAlWf00s2_a2T9c75Na5FvYMJwBM,
|
|
254
255
|
swarm/views/model_views.py,sha256=aAbU4AZmrOTaPeKMWtoKK7FPYHdaN3Zbx55JfKzYTRY,2937
|
255
256
|
swarm/views/utils.py,sha256=geX3Z5ZDKFYyXYBMilc-4qgOSjhujK3AfRtvbXgFpXk,3643
|
256
257
|
swarm/views/web_views.py,sha256=ExQQeJpZ8CkLZQC_pXKOOmdnEy2qR3wEBP4LLp27DPU,7404
|
257
|
-
open_swarm-0.1.
|
258
|
-
open_swarm-0.1.
|
259
|
-
open_swarm-0.1.
|
260
|
-
open_swarm-0.1.
|
261
|
-
open_swarm-0.1.
|
258
|
+
open_swarm-0.1.1744936125.dist-info/METADATA,sha256=ddaFDVixx5XyE42kg6sn5ZIcFcy7rbfO-z-bl5QpIbI,13678
|
259
|
+
open_swarm-0.1.1744936125.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
260
|
+
open_swarm-0.1.1744936125.dist-info/entry_points.txt,sha256=z1UIVRRhri-V-hWxFkDEYu0SZPUIsVO4KpDaodgcFzU,125
|
261
|
+
open_swarm-0.1.1744936125.dist-info/licenses/LICENSE,sha256=BU9bwRlnOt_JDIb6OT55Q4leLZx9RArDLTFnlDIrBEI,1062
|
262
|
+
open_swarm-0.1.1744936125.dist-info/RECORD,,
|
@@ -7,7 +7,7 @@ import shlex # Added for safe command splitting
|
|
7
7
|
import re
|
8
8
|
import inspect
|
9
9
|
from pathlib import Path # Use pathlib for better path handling
|
10
|
-
from typing import Dict, Any, List, Optional, ClassVar
|
10
|
+
from typing import Dict, Any, List, Optional, ClassVar, AsyncGenerator
|
11
11
|
|
12
12
|
try:
|
13
13
|
# Core imports from openai-agents
|
@@ -30,191 +30,111 @@ except ImportError as e:
|
|
30
30
|
logger = logging.getLogger(__name__)
|
31
31
|
# Logging level is controlled by BlueprintBase based on --debug flag
|
32
32
|
|
33
|
-
# --- Tool Definitions ---
|
34
|
-
|
35
|
-
# Enhanced error handling and logging added.
|
36
|
-
|
37
|
-
@function_tool
|
38
|
-
def git_status() -> str:
|
33
|
+
# --- Tool Logic Definitions (Undecorated) ---
|
34
|
+
def _git_status_logic() -> str:
|
39
35
|
"""Executes 'git status --porcelain' and returns the current repository status."""
|
40
|
-
logger.info("Executing git status --porcelain")
|
36
|
+
logger.info("Executing git status --porcelain")
|
41
37
|
try:
|
42
|
-
# Using --porcelain for machine-readable output
|
43
38
|
result = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True, check=True, timeout=30)
|
44
39
|
output = result.stdout.strip()
|
45
40
|
logger.debug(f"Git status raw output:\n{output}")
|
46
41
|
return f"OK: Git Status:\n{output}" if output else "OK: No changes detected in the working directory."
|
47
|
-
except FileNotFoundError:
|
48
|
-
|
49
|
-
|
50
|
-
except
|
51
|
-
|
52
|
-
|
53
|
-
except subprocess.TimeoutExpired:
|
54
|
-
logger.error("Git status command timed out.")
|
55
|
-
return "Error: Git status command timed out."
|
56
|
-
except Exception as e:
|
57
|
-
logger.error(f"Unexpected error during git status: {e}", exc_info=logger.level <= logging.DEBUG)
|
58
|
-
return f"Error during git status: {e}"
|
59
|
-
@function_tool
|
60
|
-
def git_diff() -> str:
|
42
|
+
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
43
|
+
except subprocess.CalledProcessError as e: logger.error(f"Error executing git status: {e.stderr}"); return f"Error executing git status: {e.stderr}"
|
44
|
+
except subprocess.TimeoutExpired: logger.error("Git status command timed out."); return "Error: Git status command timed out."
|
45
|
+
except Exception as e: logger.error(f"Unexpected error during git status: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git status: {e}"
|
46
|
+
|
47
|
+
def _git_diff_logic() -> str:
|
61
48
|
"""Executes 'git diff' and returns the differences in the working directory."""
|
62
|
-
logger.info("Executing git diff")
|
49
|
+
logger.info("Executing git diff")
|
63
50
|
try:
|
64
|
-
result = subprocess.run(["git", "diff"], capture_output=True, text=True, check=False, timeout=30)
|
65
|
-
output = result.stdout
|
66
|
-
|
67
|
-
|
68
|
-
logger.error(f"Error executing git diff (Exit Code {result.returncode}): {stderr}")
|
69
|
-
return f"Error executing git diff: {stderr}"
|
70
|
-
logger.debug(f"Git diff raw output (Exit Code {result.returncode}):\n{output[:1000]}...") # Log snippet
|
51
|
+
result = subprocess.run(["git", "diff"], capture_output=True, text=True, check=False, timeout=30)
|
52
|
+
output = result.stdout; stderr = result.stderr.strip()
|
53
|
+
if result.returncode != 0 and stderr: logger.error(f"Error executing git diff (Exit Code {result.returncode}): {stderr}"); return f"Error executing git diff: {stderr}"
|
54
|
+
logger.debug(f"Git diff raw output (Exit Code {result.returncode}):\n{output[:1000]}...")
|
71
55
|
return f"OK: Git Diff Output:\n{output}" if output else "OK: No differences found."
|
72
|
-
except FileNotFoundError:
|
73
|
-
|
74
|
-
|
75
|
-
except subprocess.TimeoutExpired:
|
76
|
-
logger.error("Git diff command timed out.")
|
77
|
-
return "Error: Git diff command timed out."
|
78
|
-
except Exception as e:
|
79
|
-
logger.error(f"Unexpected error during git diff: {e}", exc_info=logger.level <= logging.DEBUG)
|
80
|
-
return f"Error during git diff: {e}"
|
56
|
+
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
57
|
+
except subprocess.TimeoutExpired: logger.error("Git diff command timed out."); return "Error: Git diff command timed out."
|
58
|
+
except Exception as e: logger.error(f"Unexpected error during git diff: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git diff: {e}"
|
81
59
|
|
82
|
-
|
83
|
-
def git_add(file_path: str = ".") -> str:
|
60
|
+
def _git_add_logic(file_path: str = ".") -> str:
|
84
61
|
"""Executes 'git add' to stage changes for the specified file or all changes (default '.')."""
|
85
|
-
logger.info(f"Executing git add {file_path}")
|
62
|
+
logger.info(f"Executing git add {file_path}")
|
86
63
|
try:
|
87
64
|
result = subprocess.run(["git", "add", file_path], capture_output=True, text=True, check=True, timeout=30)
|
88
65
|
logger.debug(f"Git add '{file_path}' completed successfully.")
|
89
66
|
return f"OK: Staged '{file_path}' successfully."
|
90
|
-
except FileNotFoundError:
|
91
|
-
|
92
|
-
|
93
|
-
except
|
94
|
-
logger.error(f"Error executing git add '{file_path}': {e.stderr}")
|
95
|
-
return f"Error executing git add '{file_path}': {e.stderr}"
|
96
|
-
except subprocess.TimeoutExpired:
|
97
|
-
logger.error(f"Git add command timed out for '{file_path}'.")
|
98
|
-
return f"Error: Git add command timed out for '{file_path}'."
|
99
|
-
except Exception as e:
|
100
|
-
logger.error(f"Unexpected error during git add '{file_path}': {e}", exc_info=logger.level <= logging.DEBUG)
|
101
|
-
return f"Error during git add '{file_path}': {e}"
|
67
|
+
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
68
|
+
except subprocess.CalledProcessError as e: logger.error(f"Error executing git add '{file_path}': {e.stderr}"); return f"Error executing git add '{file_path}': {e.stderr}"
|
69
|
+
except subprocess.TimeoutExpired: logger.error(f"Git add command timed out for '{file_path}'."); return f"Error: Git add command timed out for '{file_path}'."
|
70
|
+
except Exception as e: logger.error(f"Unexpected error during git add '{file_path}': {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git add '{file_path}': {e}"
|
102
71
|
|
103
|
-
|
104
|
-
def git_commit(message: str) -> str:
|
72
|
+
def _git_commit_logic(message: str) -> str:
|
105
73
|
"""Executes 'git commit' with a provided commit message."""
|
106
|
-
logger.info(f"Executing git commit -m '{message[:50]}...'")
|
107
|
-
if not message or not message.strip():
|
108
|
-
logger.warning("Git commit attempted with empty or whitespace-only message.")
|
109
|
-
return "Error: Commit message cannot be empty."
|
74
|
+
logger.info(f"Executing git commit -m '{message[:50]}...'")
|
75
|
+
if not message or not message.strip(): logger.warning("Git commit attempted with empty message."); return "Error: Commit message cannot be empty."
|
110
76
|
try:
|
111
|
-
|
112
|
-
|
113
|
-
output = result.stdout.strip()
|
114
|
-
stderr = result.stderr.strip()
|
77
|
+
result = subprocess.run(["git", "commit", "-m", message], capture_output=True, text=True, check=False, timeout=30)
|
78
|
+
output = result.stdout.strip(); stderr = result.stderr.strip()
|
115
79
|
logger.debug(f"Git commit raw output (Exit Code {result.returncode}):\nSTDOUT: {output}\nSTDERR: {stderr}")
|
116
|
-
|
117
|
-
# Handle common non-error cases explicitly
|
118
80
|
if "nothing to commit" in output or "nothing added to commit" in output or "no changes added to commit" in output:
|
119
|
-
logger.info("Git commit reported: Nothing to commit.")
|
120
|
-
|
121
|
-
if result.returncode
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
error_detail = stderr if stderr else output
|
126
|
-
logger.error(f"Error executing git commit (Exit Code {result.returncode}): {error_detail}")
|
127
|
-
return f"Error executing git commit: {error_detail}"
|
128
|
-
|
129
|
-
except FileNotFoundError:
|
130
|
-
logger.error("Git command not found.")
|
131
|
-
return "Error: git command not found."
|
132
|
-
except subprocess.TimeoutExpired:
|
133
|
-
logger.error("Git commit command timed out.")
|
134
|
-
return "Error: Git commit command timed out."
|
135
|
-
except Exception as e:
|
136
|
-
logger.error(f"Unexpected error during git commit: {e}", exc_info=logger.level <= logging.DEBUG)
|
137
|
-
return f"Error during git commit: {e}"
|
81
|
+
logger.info("Git commit reported: Nothing to commit."); return "OK: Nothing to commit."
|
82
|
+
if result.returncode == 0: return f"OK: Committed with message '{message}'.\n{output}"
|
83
|
+
else: error_detail = stderr if stderr else output; logger.error(f"Error executing git commit (Exit Code {result.returncode}): {error_detail}"); return f"Error executing git commit: {error_detail}"
|
84
|
+
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
85
|
+
except subprocess.TimeoutExpired: logger.error("Git commit command timed out."); return "Error: Git commit command timed out."
|
86
|
+
except Exception as e: logger.error(f"Unexpected error during git commit: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git commit: {e}"
|
138
87
|
|
139
|
-
|
140
|
-
def git_push() -> str:
|
88
|
+
def _git_push_logic() -> str:
|
141
89
|
"""Executes 'git push' to push staged commits to the remote repository."""
|
142
|
-
logger.info("Executing git push")
|
90
|
+
logger.info("Executing git push")
|
143
91
|
try:
|
144
|
-
result = subprocess.run(["git", "push"], capture_output=True, text=True, check=True, timeout=120)
|
145
|
-
output = result.stdout.strip() + "\n" + result.stderr.strip()
|
92
|
+
result = subprocess.run(["git", "push"], capture_output=True, text=True, check=True, timeout=120)
|
93
|
+
output = result.stdout.strip() + "\n" + result.stderr.strip()
|
146
94
|
logger.debug(f"Git push raw output:\n{output}")
|
147
95
|
return f"OK: Push completed.\n{output.strip()}"
|
148
|
-
except FileNotFoundError:
|
149
|
-
|
150
|
-
|
151
|
-
except
|
152
|
-
error_output = e.stdout.strip() + "\n" + e.stderr.strip()
|
153
|
-
logger.error(f"Error executing git push: {error_output}")
|
154
|
-
return f"Error executing git push: {error_output.strip()}"
|
155
|
-
except subprocess.TimeoutExpired:
|
156
|
-
logger.error("Git push command timed out.")
|
157
|
-
return "Error: Git push command timed out."
|
158
|
-
except Exception as e:
|
159
|
-
logger.error(f"Unexpected error during git push: {e}", exc_info=logger.level <= logging.DEBUG)
|
160
|
-
return f"Error during git push: {e}"
|
96
|
+
except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
|
97
|
+
except subprocess.CalledProcessError as e: error_output = e.stdout.strip() + "\n" + e.stderr.strip(); logger.error(f"Error executing git push: {error_output}"); return f"Error executing git push: {error_output.strip()}"
|
98
|
+
except subprocess.TimeoutExpired: logger.error("Git push command timed out."); return "Error: Git push command timed out."
|
99
|
+
except Exception as e: logger.error(f"Unexpected error during git push: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git push: {e}"
|
161
100
|
|
162
|
-
|
163
|
-
def run_npm_test(args: str = "") -> str:
|
101
|
+
def _run_npm_test_logic(args: str = "") -> str:
|
164
102
|
"""Executes 'npm run test' with optional arguments."""
|
165
103
|
try:
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
logger.info(f"Executing npm test: {cmd_str}") # Keep INFO for tool execution start
|
170
|
-
result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120) # check=False to capture output on failure
|
104
|
+
cmd_list = ["npm", "run", "test"] + (shlex.split(args) if args else []); cmd_str = ' '.join(cmd_list)
|
105
|
+
logger.info(f"Executing npm test: {cmd_str}")
|
106
|
+
result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120)
|
171
107
|
output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
|
172
|
-
if result.returncode == 0:
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
return f"Error: npm test failed.\n{output}"
|
178
|
-
except FileNotFoundError:
|
179
|
-
logger.error("npm command not found. Is Node.js/npm installed and in PATH?")
|
180
|
-
return "Error: npm command not found."
|
181
|
-
except subprocess.TimeoutExpired:
|
182
|
-
logger.error("npm test command timed out.")
|
183
|
-
return "Error: npm test command timed out."
|
184
|
-
except Exception as e:
|
185
|
-
logger.error(f"Unexpected error during npm test: {e}", exc_info=logger.level <= logging.DEBUG)
|
186
|
-
return f"Error during npm test: {e}"
|
108
|
+
if result.returncode == 0: logger.debug(f"npm test completed successfully:\n{output}"); return f"OK: npm test finished.\n{output}"
|
109
|
+
else: logger.error(f"npm test failed (Exit Code {result.returncode}):\n{output}"); return f"Error: npm test failed.\n{output}"
|
110
|
+
except FileNotFoundError: logger.error("npm command not found."); return "Error: npm command not found."
|
111
|
+
except subprocess.TimeoutExpired: logger.error("npm test command timed out."); return "Error: npm test command timed out."
|
112
|
+
except Exception as e: logger.error(f"Unexpected error during npm test: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during npm test: {e}"
|
187
113
|
|
188
|
-
|
189
|
-
def run_pytest(args: str = "") -> str:
|
114
|
+
def _run_pytest_logic(args: str = "") -> str:
|
190
115
|
"""Executes 'uv run pytest' with optional arguments."""
|
191
116
|
try:
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
logger.info(f"Executing pytest via uv: {cmd_str}") # Keep INFO for tool execution start
|
196
|
-
result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120) # check=False to capture output on failure
|
117
|
+
cmd_list = ["uv", "run", "pytest"] + (shlex.split(args) if args else []); cmd_str = ' '.join(cmd_list)
|
118
|
+
logger.info(f"Executing pytest via uv: {cmd_str}")
|
119
|
+
result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120)
|
197
120
|
output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
except Exception as e:
|
213
|
-
logger.error(f"Unexpected error during pytest: {e}", exc_info=logger.level <= logging.DEBUG)
|
214
|
-
return f"Error during pytest: {e}"
|
121
|
+
if result.returncode == 0: logger.debug(f"pytest completed successfully:\n{output}"); return f"OK: pytest finished successfully.\n{output}"
|
122
|
+
else: logger.warning(f"pytest finished with failures (Exit Code {result.returncode}):\n{output}"); return f"OK: Pytest finished with failures (Exit Code {result.returncode}).\n{output}"
|
123
|
+
except FileNotFoundError: logger.error("uv command not found."); return "Error: uv command not found."
|
124
|
+
except subprocess.TimeoutExpired: logger.error("pytest command timed out."); return "Error: pytest command timed out."
|
125
|
+
except Exception as e: logger.error(f"Unexpected error during pytest: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during pytest: {e}"
|
126
|
+
|
127
|
+
# --- Tool Definitions (Decorated - reverted to default naming) ---
|
128
|
+
git_status = function_tool(_git_status_logic)
|
129
|
+
git_diff = function_tool(_git_diff_logic)
|
130
|
+
git_add = function_tool(_git_add_logic)
|
131
|
+
git_commit = function_tool(_git_commit_logic)
|
132
|
+
git_push = function_tool(_git_push_logic)
|
133
|
+
run_npm_test = function_tool(_run_npm_test_logic)
|
134
|
+
run_pytest = function_tool(_run_pytest_logic)
|
215
135
|
|
216
136
|
# --- Agent Instructions ---
|
217
|
-
#
|
137
|
+
# (Instructions remain the same)
|
218
138
|
michael_instructions = """
|
219
139
|
You are Michael Toasted, the resolute leader of the Burnt Noodles creative team.
|
220
140
|
Your primary role is to understand the user's request, break it down into actionable steps,
|
@@ -244,50 +164,28 @@ Available Agent Tools: None (Report back to Michael for delegation).
|
|
244
164
|
"""
|
245
165
|
|
246
166
|
# --- Blueprint Definition ---
|
247
|
-
# Inherits from BlueprintBase, defines metadata, creates agents, and sets up delegation.
|
248
167
|
class BurntNoodlesBlueprint(BlueprintBase):
|
249
|
-
"""
|
250
|
-
Burnt Noodles Blueprint: A multi-agent team demonstrating Git operations and testing workflows.
|
251
|
-
- Michael Toasted: Coordinator, delegates tasks.
|
252
|
-
- Fiona Flame: Handles Git commands (status, diff, add, commit, push).
|
253
|
-
- Sam Ashes: Handles test execution (npm, pytest).
|
254
|
-
"""
|
255
|
-
# Class variable for blueprint metadata, conforming to BlueprintBase structure.
|
256
168
|
metadata: ClassVar[Dict[str, Any]] = {
|
257
169
|
"name": "BurntNoodlesBlueprint",
|
258
170
|
"title": "Burnt Noodles",
|
259
171
|
"description": "A multi-agent team managing Git operations and code testing.",
|
260
|
-
"version": "1.1.0",
|
172
|
+
"version": "1.1.0",
|
261
173
|
"author": "Open Swarm Team (Refactored)",
|
262
174
|
"tags": ["git", "test", "multi-agent", "collaboration", "refactor"],
|
263
|
-
"required_mcp_servers": [],
|
175
|
+
"required_mcp_servers": [],
|
264
176
|
}
|
265
177
|
|
266
|
-
# Caches for OpenAI client and Model instances to avoid redundant creation.
|
267
178
|
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
268
179
|
_model_instance_cache: Dict[str, Model] = {}
|
269
180
|
|
270
181
|
def _get_model_instance(self, profile_name: str) -> Model:
|
271
|
-
"""
|
272
|
-
Retrieves or creates an LLM Model instance based on the configuration profile.
|
273
|
-
Handles client instantiation and caching. Uses OpenAIChatCompletionsModel.
|
274
|
-
Args:
|
275
|
-
profile_name: The name of the LLM profile to use (e.g., 'default').
|
276
|
-
Returns:
|
277
|
-
An instance of the configured Model.
|
278
|
-
Raises:
|
279
|
-
ValueError: If configuration is missing or invalid.
|
280
|
-
"""
|
281
|
-
# Check cache first
|
282
182
|
if profile_name in self._model_instance_cache:
|
283
183
|
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
284
184
|
return self._model_instance_cache[profile_name]
|
285
185
|
|
286
186
|
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
287
|
-
# Retrieve profile data using BlueprintBase helper method
|
288
187
|
profile_data = getattr(self, "get_llm_profile", lambda prof: {"provider": "openai", "model": "gpt-mock"})(profile_name)
|
289
188
|
if not profile_data:
|
290
|
-
# Critical error if the profile (or default fallback) isn't found
|
291
189
|
logger.critical(f"Cannot create Model instance: LLM profile '{profile_name}' (or 'default') not found in configuration.")
|
292
190
|
raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
|
293
191
|
|
@@ -297,21 +195,17 @@ class BurntNoodlesBlueprint(BlueprintBase):
|
|
297
195
|
logger.critical(f"LLM profile '{profile_name}' is missing the required 'model' key.")
|
298
196
|
raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
|
299
197
|
|
300
|
-
# Ensure we only handle OpenAI for now
|
301
198
|
if provider != "openai":
|
302
199
|
logger.error(f"Unsupported LLM provider '{provider}' in profile '{profile_name}'. Only 'openai' is supported in this blueprint.")
|
303
200
|
raise ValueError(f"Unsupported LLM provider: {provider}")
|
304
201
|
|
305
|
-
# Create or retrieve cached OpenAI client instance
|
306
202
|
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
307
203
|
if client_cache_key not in self._openai_client_cache:
|
308
|
-
# Prepare arguments for AsyncOpenAI, filtering out None values
|
309
204
|
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
310
205
|
filtered_client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
311
|
-
log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
|
206
|
+
log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
|
312
207
|
logger.debug(f"Creating new AsyncOpenAI client for profile '{profile_name}' with config: {log_client_kwargs}")
|
313
208
|
try:
|
314
|
-
# Create and cache the client
|
315
209
|
self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_client_kwargs)
|
316
210
|
except Exception as e:
|
317
211
|
logger.error(f"Failed to create AsyncOpenAI client for profile '{profile_name}': {e}", exc_info=True)
|
@@ -319,11 +213,9 @@ class BurntNoodlesBlueprint(BlueprintBase):
|
|
319
213
|
|
320
214
|
openai_client_instance = self._openai_client_cache[client_cache_key]
|
321
215
|
|
322
|
-
# Instantiate the specific Model implementation (OpenAIChatCompletionsModel)
|
323
216
|
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') with client instance for profile '{profile_name}'.")
|
324
217
|
try:
|
325
218
|
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client_instance)
|
326
|
-
# Cache the model instance
|
327
219
|
self._model_instance_cache[profile_name] = model_instance
|
328
220
|
return model_instance
|
329
221
|
except Exception as e:
|
@@ -331,82 +223,82 @@ class BurntNoodlesBlueprint(BlueprintBase):
|
|
331
223
|
raise ValueError(f"Failed to initialize LLM provider for profile '{profile_name}': {e}") from e
|
332
224
|
|
333
225
|
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
334
|
-
"""
|
335
|
-
Creates the Burnt Noodles agent team: Michael (Coordinator), Fiona (Git), Sam (Testing).
|
336
|
-
Sets up tools and agent-as-tool delegation.
|
337
|
-
Args:
|
338
|
-
mcp_servers: List of started MCP server instances (not used by this BP).
|
339
|
-
Returns:
|
340
|
-
The starting agent instance (Michael Toasted).
|
341
|
-
"""
|
342
226
|
logger.debug("Creating Burnt Noodles agent team...")
|
343
227
|
config = self._load_configuration() if getattr(self, "config", None) is None else self.config
|
344
|
-
# Clear caches at the start of agent creation for this run
|
345
228
|
self._model_instance_cache = {}
|
346
229
|
self._openai_client_cache = {}
|
347
|
-
|
348
|
-
# Determine the LLM profile to use (e.g., from config or default)
|
230
|
+
|
349
231
|
default_profile_name = config.get("llm_profile", "default")
|
350
232
|
logger.debug(f"Using LLM profile '{default_profile_name}' for all Burnt Noodles agents.")
|
351
|
-
# Get the single Model instance to share among agents (or create if needed)
|
352
233
|
default_model_instance = self._get_model_instance(default_profile_name)
|
353
234
|
|
354
|
-
#
|
355
|
-
# Fiona gets Git function tools
|
235
|
+
# --- Use the decorated tool variables ---
|
356
236
|
fiona_flame = Agent(
|
357
|
-
name="Fiona_Flame",
|
237
|
+
name="Fiona_Flame",
|
358
238
|
model=default_model_instance,
|
359
239
|
instructions=fiona_instructions,
|
360
240
|
tools=[git_status, git_diff, git_add, git_commit, git_push] # Agent tools added later
|
361
241
|
)
|
362
|
-
# Sam gets Testing function tools
|
363
242
|
sam_ashes = Agent(
|
364
|
-
name="Sam_Ashes",
|
243
|
+
name="Sam_Ashes",
|
365
244
|
model=default_model_instance,
|
366
245
|
instructions=sam_instructions,
|
367
246
|
tools=[run_npm_test, run_pytest] # Agent tools added later
|
368
247
|
)
|
369
|
-
|
370
|
-
# Instantiate the coordinator agent (Michael)
|
371
|
-
# Michael gets limited function tools and the specialist agents as tools
|
372
248
|
michael_toasted = Agent(
|
373
249
|
name="Michael_Toasted",
|
374
250
|
model=default_model_instance,
|
375
251
|
instructions=michael_instructions,
|
376
252
|
tools=[
|
377
|
-
# Michael's direct
|
378
|
-
git_status,
|
253
|
+
git_status, # Michael's direct tools
|
379
254
|
git_diff,
|
380
|
-
# Specialist agents exposed as tools for delegation
|
381
255
|
fiona_flame.as_tool(
|
382
|
-
tool_name="Fiona_Flame",
|
256
|
+
tool_name="Fiona_Flame",
|
383
257
|
tool_description="Delegate Git operations (add, commit, push) or complex status/diff queries to Fiona."
|
384
258
|
),
|
385
259
|
sam_ashes.as_tool(
|
386
|
-
tool_name="Sam_Ashes",
|
260
|
+
tool_name="Sam_Ashes",
|
387
261
|
tool_description="Delegate testing tasks (npm test, pytest) to Sam."
|
388
262
|
),
|
389
263
|
],
|
390
|
-
mcp_servers=mcp_servers
|
264
|
+
mcp_servers=mcp_servers
|
391
265
|
)
|
266
|
+
# --- End tool variable usage ---
|
392
267
|
|
393
|
-
# Add cross-delegation tools *after* all agents are instantiated
|
394
|
-
# Fiona can delegate testing to Sam
|
395
268
|
fiona_flame.tools.append(
|
396
269
|
sam_ashes.as_tool(tool_name="Sam_Ashes", tool_description="Delegate testing tasks (npm test, pytest) to Sam.")
|
397
270
|
)
|
398
|
-
# Sam can delegate Git tasks back to Fiona (as per instructions, Sam should report to Michael,
|
399
|
-
# but having the tool technically available might be useful in complex future scenarios,
|
400
|
-
# rely on prompt engineering to prevent direct calls unless intended).
|
401
|
-
# sam_ashes.tools.append(
|
402
|
-
# fiona_flame.as_tool(tool_name="Fiona_Flame", tool_description="Delegate Git operations back to Fiona if needed.")
|
403
|
-
# )
|
404
271
|
|
405
272
|
logger.debug("Burnt Noodles agent team created successfully. Michael Toasted is the starting agent.")
|
406
|
-
# Return the coordinator agent as the entry point for the Runner
|
407
273
|
return michael_toasted
|
408
274
|
|
275
|
+
async def run(self, messages: List[Dict[str, Any]], **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
|
276
|
+
"""
|
277
|
+
Main execution entry point for the Burnt Noodles blueprint.
|
278
|
+
Delegates to _run_non_interactive for CLI-like execution.
|
279
|
+
"""
|
280
|
+
logger.info("BurntNoodlesBlueprint run method called.")
|
281
|
+
instruction = messages[-1].get("content", "") if messages else ""
|
282
|
+
async for chunk in self._run_non_interactive(instruction, **kwargs):
|
283
|
+
yield chunk
|
284
|
+
logger.info("BurntNoodlesBlueprint run method finished.")
|
285
|
+
|
286
|
+
async def _run_non_interactive(self, instruction: str, **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
|
287
|
+
"""Helper to run the agent flow based on an instruction."""
|
288
|
+
logger.info(f"Running Burnt Noodles non-interactively with instruction: '{instruction[:100]}...'")
|
289
|
+
mcp_servers = kwargs.get("mcp_servers", [])
|
290
|
+
starting_agent = self.create_starting_agent(mcp_servers=mcp_servers)
|
291
|
+
runner = Runner(agent=starting_agent)
|
292
|
+
try:
|
293
|
+
final_result = await runner.run(instruction)
|
294
|
+
logger.info(f"Non-interactive run finished. Final Output: {final_result.final_output}")
|
295
|
+
yield { "messages": [ {"role": "assistant", "content": final_result.final_output} ] }
|
296
|
+
except Exception as e:
|
297
|
+
logger.error(f"Error during non-interactive run: {e}", exc_info=True)
|
298
|
+
yield { "messages": [ {"role": "assistant", "content": f"An error occurred: {e}"} ] }
|
299
|
+
|
300
|
+
|
409
301
|
# Standard Python entry point for direct script execution
|
410
302
|
if __name__ == "__main__":
|
411
|
-
# Call the main class method from BlueprintBase to handle CLI parsing and execution.
|
412
303
|
BurntNoodlesBlueprint.main()
|
304
|
+
|
@@ -3,11 +3,19 @@ import os
|
|
3
3
|
import sys
|
4
4
|
from typing import Dict, Any, List, ClassVar, Optional
|
5
5
|
|
6
|
+
# Set logging to WARNING by default unless SWARM_DEBUG=1
|
7
|
+
if not os.environ.get("SWARM_DEBUG"):
|
8
|
+
logging.basicConfig(level=logging.WARNING)
|
9
|
+
else:
|
10
|
+
logging.basicConfig(level=logging.DEBUG)
|
11
|
+
|
6
12
|
# Ensure src is in path for BlueprintBase import
|
7
13
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
8
14
|
src_path = os.path.join(project_root, 'src')
|
9
15
|
if src_path not in sys.path: sys.path.insert(0, src_path)
|
10
16
|
|
17
|
+
from typing import Optional
|
18
|
+
from pathlib import Path
|
11
19
|
try:
|
12
20
|
from agents import Agent, Tool, function_tool, Runner
|
13
21
|
from agents.mcp import MCPServer
|
@@ -24,6 +32,13 @@ logger = logging.getLogger(__name__)
|
|
24
32
|
|
25
33
|
# --- Define the Blueprint ---
|
26
34
|
class ChatbotBlueprint(BlueprintBase):
|
35
|
+
def __init__(self, blueprint_id: str, config_path: Optional[Path] = None, **kwargs):
|
36
|
+
super().__init__(blueprint_id, config_path=config_path, **kwargs)
|
37
|
+
|
38
|
+
# Remove redundant client instantiation; rely on framework-level default client
|
39
|
+
# (No need to re-instantiate AsyncOpenAI or set_default_openai_client)
|
40
|
+
# All blueprints now use the default client set at framework init
|
41
|
+
|
27
42
|
"""A simple conversational chatbot agent."""
|
28
43
|
metadata: ClassVar[Dict[str, Any]] = {
|
29
44
|
"name": "ChatbotBlueprint",
|
@@ -42,19 +57,21 @@ class ChatbotBlueprint(BlueprintBase):
|
|
42
57
|
|
43
58
|
# --- Model Instantiation Helper --- (Standard helper)
|
44
59
|
def _get_model_instance(self, profile_name: str) -> Model:
|
45
|
-
"""Retrieves or creates an LLM Model instance."""
|
46
|
-
# ... (Implementation is the same as previous refactors) ...
|
60
|
+
"""Retrieves or creates an LLM Model instance, respecting LITELLM_MODEL/DEFAULT_LLM if set."""
|
47
61
|
if profile_name in self._model_instance_cache:
|
48
62
|
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
49
63
|
return self._model_instance_cache[profile_name]
|
50
64
|
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
51
65
|
profile_data = self.get_llm_profile(profile_name)
|
52
|
-
|
53
|
-
|
54
|
-
model_name = profile_data.get("model")
|
66
|
+
# Patch: Respect LITELLM_MODEL/DEFAULT_LLM env vars
|
67
|
+
import os
|
68
|
+
model_name = os.getenv("LITELLM_MODEL") or os.getenv("DEFAULT_LLM") or profile_data.get("model")
|
69
|
+
profile_data["model"] = model_name
|
70
|
+
if profile_data.get("provider", "openai").lower() != "openai": raise ValueError(f"Unsupported provider: {profile_data.get('provider')}")
|
55
71
|
if not model_name: raise ValueError(f"Missing 'model' in profile '{profile_name}'.")
|
56
|
-
|
57
|
-
|
72
|
+
|
73
|
+
# REMOVE PATCH: env expansion is now handled globally in config loader
|
74
|
+
client_cache_key = f"{profile_data.get('provider', 'openai')}_{profile_data.get('base_url')}"
|
58
75
|
if client_cache_key not in self._openai_client_cache:
|
59
76
|
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
60
77
|
filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
@@ -93,6 +110,45 @@ class ChatbotBlueprint(BlueprintBase):
|
|
93
110
|
logger.debug("Chatbot agent created.")
|
94
111
|
return chatbot_agent
|
95
112
|
|
113
|
+
async def run(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
|
114
|
+
"""Main execution entry point for the Chatbot blueprint."""
|
115
|
+
logger.info("ChatbotBlueprint run method called.")
|
116
|
+
instruction = messages[-1].get("content", "") if messages else ""
|
117
|
+
async for chunk in self._run_non_interactive(instruction, **kwargs):
|
118
|
+
yield chunk
|
119
|
+
logger.info("ChatbotBlueprint run method finished.")
|
120
|
+
|
121
|
+
async def _run_non_interactive(self, instruction: str, **kwargs) -> Any:
|
122
|
+
mcp_servers = kwargs.get("mcp_servers", [])
|
123
|
+
agent = self.create_starting_agent(mcp_servers=mcp_servers)
|
124
|
+
from agents import Runner
|
125
|
+
import os
|
126
|
+
model_name = os.getenv("LITELLM_MODEL") or os.getenv("DEFAULT_LLM") or "gpt-3.5-turbo"
|
127
|
+
try:
|
128
|
+
result = await Runner.run(agent, instruction)
|
129
|
+
yield {"messages": [{"role": "assistant", "content": getattr(result, 'final_output', str(result))}]}
|
130
|
+
except Exception as e:
|
131
|
+
logger.error(f"Error during non-interactive run: {e}", exc_info=True)
|
132
|
+
yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
|
133
|
+
|
96
134
|
# Standard Python entry point
|
97
135
|
if __name__ == "__main__":
|
98
|
-
|
136
|
+
import sys
|
137
|
+
import asyncio
|
138
|
+
# --- AUTO-PYTHONPATH PATCH FOR AGENTS ---
|
139
|
+
import os
|
140
|
+
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..'))
|
141
|
+
src_path = os.path.join(project_root, 'src')
|
142
|
+
if src_path not in sys.path:
|
143
|
+
sys.path.insert(0, src_path)
|
144
|
+
if '--instruction' in sys.argv:
|
145
|
+
instruction = sys.argv[sys.argv.index('--instruction') + 1]
|
146
|
+
blueprint = ChatbotBlueprint(blueprint_id="chatbot")
|
147
|
+
async def runner():
|
148
|
+
async for chunk in blueprint._run_non_interactive(instruction):
|
149
|
+
msg = chunk["messages"][0]["content"]
|
150
|
+
if not msg.startswith("An error occurred:"):
|
151
|
+
print(msg)
|
152
|
+
asyncio.run(runner())
|
153
|
+
else:
|
154
|
+
print("Interactive mode not supported in this script.")
|
@@ -0,0 +1,42 @@
|
|
1
|
+
import abc
|
2
|
+
from typing import List, Dict, Any, AsyncGenerator
|
3
|
+
|
4
|
+
# Assuming blueprint_base is in the same directory or accessible via installed package
|
5
|
+
from .blueprint_base import BlueprintBase
|
6
|
+
|
7
|
+
class RunnableBlueprint(BlueprintBase, abc.ABC):
|
8
|
+
"""
|
9
|
+
Abstract base class for blueprints designed to be executed programmatically,
|
10
|
+
typically via an API endpoint like swarm-api.
|
11
|
+
|
12
|
+
Inherits common functionality from BlueprintBase and requires subclasses
|
13
|
+
to implement the `run` method as the standard entry point for execution.
|
14
|
+
"""
|
15
|
+
|
16
|
+
@abc.abstractmethod
|
17
|
+
async def run(self, messages: List[Dict[str, Any]], **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
|
18
|
+
"""
|
19
|
+
Abstract method defining the standard entry point for running the blueprint
|
20
|
+
programmatically.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
messages: A list of message dictionaries, typically following the
|
24
|
+
OpenAI chat completions format. The last message is usually
|
25
|
+
the user's input or instruction.
|
26
|
+
**kwargs: Additional keyword arguments that might be passed by the
|
27
|
+
runner (e.g., mcp_servers, configuration overrides).
|
28
|
+
|
29
|
+
Yields:
|
30
|
+
Dictionaries representing chunks of the response, often containing
|
31
|
+
a 'messages' key with a list of message objects. The exact format
|
32
|
+
may depend on the runner's expectations (e.g., SSE for streaming).
|
33
|
+
|
34
|
+
Raises:
|
35
|
+
NotImplementedError: If the subclass does not implement this method.
|
36
|
+
"""
|
37
|
+
raise NotImplementedError("Subclasses of RunnableBlueprint must implement the 'run' method.")
|
38
|
+
# This yield is technically unreachable but satisfies static analysis
|
39
|
+
# expecting a generator function body.
|
40
|
+
if False:
|
41
|
+
yield {}
|
42
|
+
|
File without changes
|
{open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/entry_points.txt
RENAMED
File without changes
|
{open_swarm-0.1.1743416034.dist-info → open_swarm-0.1.1744936125.dist-info}/licenses/LICENSE
RENAMED
File without changes
|