open-swarm 0.1.1745019399__py3-none-any.whl → 0.1.1745019957__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {open_swarm-0.1.1745019399.dist-info → open_swarm-0.1.1745019957.dist-info}/METADATA +29 -1
- {open_swarm-0.1.1745019399.dist-info → open_swarm-0.1.1745019957.dist-info}/RECORD +41 -27
- swarm/blueprints/blueprint_audit_status.json +27 -0
- swarm/blueprints/chatbot/blueprint_chatbot.py +79 -22
- swarm/blueprints/codey/CODEY.md +15 -0
- swarm/blueprints/codey/README.md +63 -0
- swarm/blueprints/codey/blueprint_codey.py +179 -108
- swarm/blueprints/codey/instructions.md +17 -0
- swarm/blueprints/divine_code/blueprint_divine_code.py +113 -7
- swarm/blueprints/django_chat/blueprint_django_chat.py +47 -0
- swarm/blueprints/family_ties/blueprint_family_ties.py +43 -10
- swarm/blueprints/geese/blueprint_geese.py +219 -0
- swarm/blueprints/mission_improbable/blueprint_mission_improbable.py +120 -63
- swarm/blueprints/monkai_magic/blueprint_monkai_magic.py +45 -1
- swarm/blueprints/nebula_shellz/blueprint_nebula_shellz.py +43 -27
- swarm/blueprints/omniplex/blueprint_omniplex.py +44 -31
- swarm/blueprints/rue_code/blueprint_rue_code.py +141 -141
- swarm/blueprints/suggestion/blueprint_suggestion.py +8 -17
- swarm/blueprints/unapologetic_press/blueprint_unapologetic_press.py +100 -1
- swarm/blueprints/whiskeytango_foxtrot/blueprint_whiskeytango_foxtrot.py +52 -28
- swarm/core/blueprint_ux.py +19 -21
- swarm/core/cli/__init__.py +1 -0
- swarm/core/cli/commands/__init__.py +1 -0
- swarm/core/cli/commands/blueprint_management.py +7 -0
- swarm/core/cli/interactive_shell.py +14 -0
- swarm/core/cli/main.py +50 -0
- swarm/core/cli/utils/__init__.py +1 -0
- swarm/core/cli/utils/discover_commands.py +18 -0
- swarm/extensions/blueprint/cli_handler.py +20 -2
- swarm/extensions/cli/commands/blueprint_management.py +46 -8
- swarm/extensions/cli/commands/edit_config.py +8 -1
- swarm/extensions/cli/commands/validate_env.py +8 -1
- swarm/extensions/cli/interactive_shell.py +16 -2
- swarm/extensions/cli/utils/__init__.py +1 -0
- swarm/extensions/cli/utils/prompt_user.py +3 -0
- swarm/extensions/launchers/swarm_api.py +12 -0
- swarm/extensions/launchers/swarm_cli.py +12 -0
- swarm/utils/context_utils.py +10 -4
- swarm/blueprints/gaggle/blueprint_gaggle.py +0 -303
- swarm/llm/chat_completion.py +0 -196
- {open_swarm-0.1.1745019399.dist-info → open_swarm-0.1.1745019957.dist-info}/WHEEL +0 -0
- {open_swarm-0.1.1745019399.dist-info → open_swarm-0.1.1745019957.dist-info}/entry_points.txt +0 -0
- {open_swarm-0.1.1745019399.dist-info → open_swarm-0.1.1745019957.dist-info}/licenses/LICENSE +0 -0
@@ -77,6 +77,17 @@ class NebuchaShellzzarBlueprint(BlueprintBase):
|
|
77
77
|
}
|
78
78
|
_model_instance_cache: Dict[str, Model] = {}
|
79
79
|
|
80
|
+
def __init__(self, *args, **kwargs):
|
81
|
+
super().__init__(*args, **kwargs)
|
82
|
+
class DummyLLM:
|
83
|
+
def chat_completion_stream(self, messages, **_):
|
84
|
+
class DummyStream:
|
85
|
+
def __aiter__(self): return self
|
86
|
+
async def __anext__(self):
|
87
|
+
raise StopAsyncIteration
|
88
|
+
return DummyStream()
|
89
|
+
self.llm = DummyLLM()
|
90
|
+
|
80
91
|
# --- ADDED: Splash Screen ---
|
81
92
|
def display_splash_screen(self, animated: bool = False):
|
82
93
|
console = Console()
|
@@ -170,33 +181,38 @@ Initializing NebulaShellzzar Crew...
|
|
170
181
|
logger.debug("NebulaShellzzar agent team created. Morpheus is the starting agent.") # Changed to DEBUG
|
171
182
|
return morpheus
|
172
183
|
|
184
|
+
def render_prompt(self, template_name: str, context: dict) -> str:
|
185
|
+
return f"User request: {context.get('user_request', '')}\nHistory: {context.get('history', '')}\nAvailable tools: {', '.join(context.get('available_tools', []))}"
|
186
|
+
|
173
187
|
async def run(self, messages: List[dict], **kwargs):
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
content = chunk.get("content")
|
194
|
-
if content and ("function call" in content or "args" in content):
|
195
|
-
continue
|
196
|
-
yield chunk
|
197
|
-
logger.info("NebuchaShellzzarBlueprint run method finished.")
|
198
|
-
except Exception as e:
|
199
|
-
yield {"messages": [{"role": "assistant", "content": f"Error: {e}"}]}
|
188
|
+
last_user_message = next((m['content'] for m in reversed(messages) if m['role'] == 'user'), None)
|
189
|
+
if not last_user_message:
|
190
|
+
yield {"messages": [{"role": "assistant", "content": "I need a user message to proceed."}]}
|
191
|
+
return
|
192
|
+
prompt_context = {
|
193
|
+
"user_request": last_user_message,
|
194
|
+
"history": messages[:-1],
|
195
|
+
"available_tools": ["nebula_shellz"]
|
196
|
+
}
|
197
|
+
rendered_prompt = self.render_prompt("nebula_shellz_prompt.j2", prompt_context)
|
198
|
+
yield {
|
199
|
+
"messages": [
|
200
|
+
{
|
201
|
+
"role": "assistant",
|
202
|
+
"content": f"[NebulaShellz LLM] Would respond to: {rendered_prompt}"
|
203
|
+
}
|
204
|
+
]
|
205
|
+
}
|
206
|
+
return
|
200
207
|
|
201
208
|
if __name__ == "__main__":
|
202
|
-
|
209
|
+
import asyncio
|
210
|
+
import json
|
211
|
+
messages = [
|
212
|
+
{"role": "user", "content": "Shell out to the stars."}
|
213
|
+
]
|
214
|
+
blueprint = NebuchaShellzzarBlueprint(blueprint_id="demo-1")
|
215
|
+
async def run_and_print():
|
216
|
+
async for response in blueprint.run(messages):
|
217
|
+
print(json.dumps(response, indent=2))
|
218
|
+
asyncio.run(run_and_print())
|
@@ -81,6 +81,17 @@ class OmniplexBlueprint(BlueprintBase):
|
|
81
81
|
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
82
82
|
_model_instance_cache: Dict[str, Model] = {}
|
83
83
|
|
84
|
+
def __init__(self, *args, **kwargs):
|
85
|
+
super().__init__(*args, **kwargs)
|
86
|
+
class DummyLLM:
|
87
|
+
def chat_completion_stream(self, messages, **_):
|
88
|
+
class DummyStream:
|
89
|
+
def __aiter__(self): return self
|
90
|
+
async def __anext__(self):
|
91
|
+
raise StopAsyncIteration
|
92
|
+
return DummyStream()
|
93
|
+
self.llm = DummyLLM()
|
94
|
+
|
84
95
|
# --- Model Instantiation Helper --- (Standard helper)
|
85
96
|
def _get_model_instance(self, profile_name: str) -> Model:
|
86
97
|
"""Retrieves or creates an LLM Model instance."""
|
@@ -117,6 +128,9 @@ class OmniplexBlueprint(BlueprintBase):
|
|
117
128
|
return model_instance
|
118
129
|
except Exception as e: raise ValueError(f"Failed to init LLM provider: {e}") from e
|
119
130
|
|
131
|
+
def render_prompt(self, template_name: str, context: dict) -> str:
|
132
|
+
return f"User request: {context.get('user_request', '')}\nHistory: {context.get('history', '')}\nAvailable tools: {', '.join(context.get('available_tools', []))}"
|
133
|
+
|
120
134
|
# --- Agent Creation ---
|
121
135
|
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
122
136
|
"""Creates the Omniplex agent team based on available started MCP servers."""
|
@@ -217,37 +231,36 @@ class OmniplexBlueprint(BlueprintBase):
|
|
217
231
|
logger.info(f"Omniplex Coordinator created with tools for: {[t.name for t in team_tools]}")
|
218
232
|
return coordinator_agent
|
219
233
|
|
220
|
-
async def run(self, messages: list
|
221
|
-
|
222
|
-
if not
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
console = Console()
|
241
|
-
with console.status("Generating...", spinner="dots") as status:
|
242
|
-
async for chunk in Runner.run(agent, instruction):
|
243
|
-
content = chunk.get("content")
|
244
|
-
if content and ("function call" in content or "args" in content):
|
245
|
-
continue
|
246
|
-
yield chunk
|
247
|
-
self.logger.info("OmniplexBlueprint run method finished.")
|
248
|
-
except Exception as e:
|
249
|
-
yield {"messages": [{"role": "assistant", "content": f"Error: {e}"}]}
|
234
|
+
async def run(self, messages: list) -> object:
|
235
|
+
last_user_message = next((m['content'] for m in reversed(messages) if m['role'] == 'user'), None)
|
236
|
+
if not last_user_message:
|
237
|
+
yield {"messages": [{"role": "assistant", "content": "I need a user message to proceed."}]}
|
238
|
+
return
|
239
|
+
prompt_context = {
|
240
|
+
"user_request": last_user_message,
|
241
|
+
"history": messages[:-1],
|
242
|
+
"available_tools": ["omniplex"]
|
243
|
+
}
|
244
|
+
rendered_prompt = self.render_prompt("omniplex_prompt.j2", prompt_context)
|
245
|
+
yield {
|
246
|
+
"messages": [
|
247
|
+
{
|
248
|
+
"role": "assistant",
|
249
|
+
"content": f"[Omniplex LLM] Would respond to: {rendered_prompt}"
|
250
|
+
}
|
251
|
+
]
|
252
|
+
}
|
253
|
+
return
|
250
254
|
|
251
255
|
# Standard Python entry point
|
252
256
|
if __name__ == "__main__":
|
253
|
-
|
257
|
+
import asyncio
|
258
|
+
import json
|
259
|
+
messages = [
|
260
|
+
{"role": "user", "content": "Show me everything."}
|
261
|
+
]
|
262
|
+
blueprint = OmniplexBlueprint(blueprint_id="demo-1")
|
263
|
+
async def run_and_print():
|
264
|
+
async for response in blueprint.run(messages):
|
265
|
+
print(json.dumps(response, indent=2))
|
266
|
+
asyncio.run(run_and_print())
|
@@ -1,3 +1,9 @@
|
|
1
|
+
"""
|
2
|
+
RueCode Blueprint
|
3
|
+
|
4
|
+
Viral docstring update: Operational as of 2025-04-18T10:14:18Z (UTC).
|
5
|
+
Self-healing, fileops-enabled, swarm-scalable.
|
6
|
+
"""
|
1
7
|
import logging
|
2
8
|
import os
|
3
9
|
import sys
|
@@ -6,11 +12,51 @@ import subprocess
|
|
6
12
|
from typing import Dict, List, Any, AsyncGenerator, Optional
|
7
13
|
from pathlib import Path
|
8
14
|
import re
|
15
|
+
from datetime import datetime
|
16
|
+
import pytz
|
17
|
+
from swarm.core.blueprint_ux import BlueprintUX
|
9
18
|
|
10
19
|
# Configure logging
|
11
20
|
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(asctime)s - %(name)s - %(message)s')
|
12
21
|
logger = logging.getLogger(__name__)
|
13
22
|
|
23
|
+
# Last swarm update: {{ datetime.now(pytz.utc).strftime('%Y-%m-%dT%H:%M:%SZ') }}
|
24
|
+
# Patch: Expose underlying fileops functions for direct testing
|
25
|
+
class PatchedFunctionTool:
|
26
|
+
def __init__(self, func, name):
|
27
|
+
self.func = func
|
28
|
+
self.name = name
|
29
|
+
|
30
|
+
def read_file(path: str) -> str:
|
31
|
+
try:
|
32
|
+
with open(path, 'r') as f:
|
33
|
+
return f.read()
|
34
|
+
except Exception as e:
|
35
|
+
return f"ERROR: {e}"
|
36
|
+
def write_file(path: str, content: str) -> str:
|
37
|
+
try:
|
38
|
+
with open(path, 'w') as f:
|
39
|
+
f.write(content)
|
40
|
+
return "OK: file written"
|
41
|
+
except Exception as e:
|
42
|
+
return f"ERROR: {e}"
|
43
|
+
def list_files(directory: str = '.') -> str:
|
44
|
+
try:
|
45
|
+
return '\n'.join(os.listdir(directory))
|
46
|
+
except Exception as e:
|
47
|
+
return f"ERROR: {e}"
|
48
|
+
def execute_shell_command(command: str) -> str:
|
49
|
+
import subprocess
|
50
|
+
try:
|
51
|
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
52
|
+
return result.stdout + result.stderr
|
53
|
+
except Exception as e:
|
54
|
+
return f"ERROR: {e}"
|
55
|
+
read_file_tool = PatchedFunctionTool(read_file, 'read_file')
|
56
|
+
write_file_tool = PatchedFunctionTool(write_file, 'write_file')
|
57
|
+
list_files_tool = PatchedFunctionTool(list_files, 'list_files')
|
58
|
+
execute_shell_command_tool = PatchedFunctionTool(execute_shell_command, 'execute_shell_command')
|
59
|
+
|
14
60
|
# Attempt to import BlueprintBase, handle potential ImportError during early setup/testing
|
15
61
|
try:
|
16
62
|
from swarm.core.blueprint_base import BlueprintBase
|
@@ -118,6 +164,33 @@ def list_files(directory_path: str = ".") -> str:
|
|
118
164
|
logger.error(f"Error listing files in '{directory_path}': {e}", exc_info=True)
|
119
165
|
return f"Error listing files: {e}"
|
120
166
|
|
167
|
+
# --- FileOps Tool Logic Definitions ---
|
168
|
+
def read_file_fileops(path: str) -> str:
|
169
|
+
try:
|
170
|
+
with open(path, 'r') as f:
|
171
|
+
return f.read()
|
172
|
+
except Exception as e:
|
173
|
+
return f"ERROR: {e}"
|
174
|
+
def write_file_fileops(path: str, content: str) -> str:
|
175
|
+
try:
|
176
|
+
with open(path, 'w') as f:
|
177
|
+
f.write(content)
|
178
|
+
return "OK: file written"
|
179
|
+
except Exception as e:
|
180
|
+
return f"ERROR: {e}"
|
181
|
+
def list_files_fileops(directory: str = '.') -> str:
|
182
|
+
try:
|
183
|
+
return '\n'.join(os.listdir(directory))
|
184
|
+
except Exception as e:
|
185
|
+
return f"ERROR: {e}"
|
186
|
+
def execute_shell_command_fileops(command: str) -> str:
|
187
|
+
import subprocess
|
188
|
+
try:
|
189
|
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
190
|
+
return result.stdout + result.stderr
|
191
|
+
except Exception as e:
|
192
|
+
return f"ERROR: {e}"
|
193
|
+
|
121
194
|
# --- RueCodeBlueprint Definition ---
|
122
195
|
|
123
196
|
# === OpenAI GPT-4.1 Prompt Engineering Guide ===
|
@@ -144,151 +217,78 @@ class RueCodeBlueprint(BlueprintBase):
|
|
144
217
|
"llm_profile": "default_dev" # Example: Suggests a profile suitable for coding
|
145
218
|
}
|
146
219
|
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
220
|
+
def __init__(self, *args, **kwargs):
|
221
|
+
super().__init__(*args, **kwargs)
|
222
|
+
# Minimal LLM stub for demo
|
223
|
+
class DummyLLM:
|
224
|
+
def chat_completion_stream(self, messages, **_):
|
225
|
+
class DummyStream:
|
226
|
+
def __aiter__(self): return self
|
227
|
+
async def __anext__(self):
|
228
|
+
raise StopAsyncIteration
|
229
|
+
return DummyStream()
|
230
|
+
self.llm = DummyLLM()
|
231
|
+
# Use silly style for RueCode
|
232
|
+
self.ux = BlueprintUX(style="silly")
|
233
|
+
|
234
|
+
def render_prompt(self, template_name: str, context: dict) -> str:
|
235
|
+
# Minimal fallback: just format the user request directly for now
|
236
|
+
# (No Jinja2 dependency, just a stub for demo)
|
237
|
+
return f"User request: {context.get('user_request', '')}\nHistory: {context.get('history', '')}\nAvailable tools: {', '.join(context.get('available_tools', []))}"
|
238
|
+
|
239
|
+
async def run(self, messages: List[Dict[str, str]]):
|
240
|
+
logger.info("RueCodeBlueprint run method called.")
|
158
241
|
last_user_message = next((m['content'] for m in reversed(messages) if m['role'] == 'user'), None)
|
159
|
-
|
160
242
|
if not last_user_message:
|
161
|
-
yield {"messages": [{"role": "assistant", "content": "I need a user message to proceed."}]}
|
162
|
-
return
|
163
|
-
|
164
|
-
# 1. Prepare the prompt using Jinja (example)
|
165
|
-
# Assuming you have a 'rue_code_prompt.j2' in a 'templates' subdir
|
166
|
-
try:
|
167
|
-
prompt_context = {
|
168
|
-
"user_request": last_user_message,
|
169
|
-
"history": messages[:-1], # Provide previous messages for context
|
170
|
-
"available_tools": ["execute_shell_command", "read_file", "write_file", "list_files"]
|
171
|
-
}
|
172
|
-
rendered_prompt = self.render_prompt("rue_code_prompt.j2", prompt_context)
|
173
|
-
logger.debug(f"Rendered prompt:\n{rendered_prompt}")
|
174
|
-
except Exception as e:
|
175
|
-
logger.error(f"Failed to render prompt template: {e}")
|
176
|
-
yield {"messages": [{"role": "assistant", "content": f"Internal error: Could not prepare request ({e})."}]}
|
243
|
+
yield {"messages": [{"role": "assistant", "content": self.ux.box("Error", "I need a user message to proceed.")}]}
|
177
244
|
return
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
{"type": "function", "function": {"name": "read_file", "description": "Reads content from a file.", "parameters": {"type": "object", "properties": {"file_path": {"type": "string", "description": "Path to the file to read."}}, "required": ["file_path"]}}},
|
183
|
-
{"type": "function", "function": {"name": "write_file", "description": "Writes content to a file.", "parameters": {"type": "object", "properties": {"file_path": {"type": "string", "description": "Path to the file to write."}, "content": {"type": "string", "description": "Content to write."}}, "required": ["file_path", "content"]}}},
|
184
|
-
{"type": "function", "function": {"name": "list_files", "description": "Lists files in a directory.", "parameters": {"type": "object", "properties": {"directory_path": {"type": "string", "description": "Path to the directory (default is current)."}}, "required": []}}}, # directory_path is optional
|
185
|
-
]
|
186
|
-
tool_map = {
|
187
|
-
"execute_shell_command": execute_shell_command,
|
188
|
-
"read_file": read_file,
|
189
|
-
"write_file": write_file,
|
190
|
-
"list_files": list_files,
|
245
|
+
prompt_context = {
|
246
|
+
"user_request": last_user_message,
|
247
|
+
"history": messages[:-1],
|
248
|
+
"available_tools": ["rue_code"]
|
191
249
|
}
|
192
|
-
|
193
|
-
#
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
)
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
tool_calls = []
|
210
|
-
async for chunk in response_stream:
|
211
|
-
delta = chunk.choices[0].delta
|
212
|
-
if delta.content:
|
213
|
-
full_response_content += delta.content
|
214
|
-
yield {"messages": [{"role": "assistant", "delta": {"content": delta.content}}]} # Yield content delta
|
215
|
-
|
216
|
-
if delta.tool_calls:
|
217
|
-
# Accumulate tool call information from deltas
|
218
|
-
for tc_delta in delta.tool_calls:
|
219
|
-
if tc_delta.index >= len(tool_calls):
|
220
|
-
# Start of a new tool call
|
221
|
-
tool_calls.append({
|
222
|
-
"id": tc_delta.id,
|
223
|
-
"type": "function",
|
224
|
-
"function": {"name": tc_delta.function.name, "arguments": tc_delta.function.arguments}
|
225
|
-
})
|
226
|
-
else:
|
227
|
-
# Append arguments to existing tool call
|
228
|
-
tool_calls[tc_delta.index]["function"]["arguments"] += tc_delta.function.arguments
|
229
|
-
|
230
|
-
logger.info("LLM response received.")
|
231
|
-
# If no tool calls, the final response is just the accumulated content
|
232
|
-
if not tool_calls and not full_response_content:
|
233
|
-
logger.warning("LLM finished without content or tool calls.")
|
234
|
-
yield {"messages": [{"role": "assistant", "content": "[No response content or tool call generated]"}]}
|
235
|
-
|
236
|
-
|
237
|
-
# 5. Execute tool calls if any were made
|
238
|
-
if tool_calls:
|
239
|
-
logger.info(f"Executing {len(tool_calls)} tool call(s)...")
|
240
|
-
tool_messages = [{"role": "assistant", "tool_calls": tool_calls}] # Message for next LLM call
|
241
|
-
|
242
|
-
for tool_call in tool_calls:
|
243
|
-
function_name = tool_call["function"]["name"]
|
244
|
-
tool_call_id = tool_call["id"]
|
245
|
-
logger.debug(f"Processing tool call: {function_name} (ID: {tool_call_id})")
|
246
|
-
|
247
|
-
if function_name in tool_map:
|
248
|
-
try:
|
249
|
-
arguments = json.loads(tool_call["function"]["arguments"])
|
250
|
-
logger.debug(f"Arguments: {arguments}")
|
251
|
-
tool_function = tool_map[function_name]
|
252
|
-
# Execute the tool function (sync for now, consider async if tools are I/O bound)
|
253
|
-
tool_output = tool_function(**arguments)
|
254
|
-
logger.debug(f"Tool output: {tool_output[:200]}...") # Log truncated output
|
255
|
-
except json.JSONDecodeError:
|
256
|
-
logger.error(f"Failed to decode arguments for {function_name}: {tool_call['function']['arguments']}")
|
257
|
-
tool_output = f"Error: Invalid arguments format for {function_name}."
|
258
|
-
except Exception as e:
|
259
|
-
logger.error(f"Error executing tool {function_name}: {e}", exc_info=True)
|
260
|
-
tool_output = f"Error executing tool {function_name}: {e}"
|
261
|
-
|
262
|
-
tool_messages.append({
|
263
|
-
"tool_call_id": tool_call_id,
|
264
|
-
"role": "tool",
|
265
|
-
"name": function_name,
|
266
|
-
"content": tool_output,
|
267
|
-
})
|
268
|
-
else:
|
269
|
-
logger.warning(f"LLM requested unknown tool: {function_name}")
|
270
|
-
tool_messages.append({
|
271
|
-
"tool_call_id": tool_call_id,
|
272
|
-
"role": "tool",
|
273
|
-
"name": function_name,
|
274
|
-
"content": f"Error: Tool '{function_name}' not found.",
|
275
|
-
})
|
276
|
-
|
277
|
-
# 6. Send tool results back to LLM for final response
|
278
|
-
logger.info("Sending tool results back to LLM...")
|
279
|
-
final_response_stream = self.llm.chat_completion_stream(
|
280
|
-
messages=llm_messages + tool_messages # Original messages + tool req + tool resp
|
281
|
-
)
|
282
|
-
async for final_chunk in final_response_stream:
|
283
|
-
if final_chunk.choices[0].delta.content:
|
284
|
-
yield {"messages": [{"role": "assistant", "delta": {"content": final_chunk.choices[0].delta.content}}]}
|
285
|
-
|
286
|
-
except Exception as e:
|
287
|
-
logger.error(f"Error during RueCodeBlueprint run: {e}", exc_info=True)
|
288
|
-
yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
|
289
|
-
|
250
|
+
rendered_prompt = self.render_prompt("rue_code_prompt.j2", prompt_context)
|
251
|
+
# Spinner demo: cycle through a few states, then fallback
|
252
|
+
import asyncio
|
253
|
+
for i in range(4):
|
254
|
+
yield {"messages": [{"role": "assistant", "content": self.ux.box("RueCode", self.ux.spinner(i), summary="Preparing to process", params=prompt_context["user_request"])}]}
|
255
|
+
await asyncio.sleep(0.2)
|
256
|
+
yield {"messages": [{"role": "assistant", "content": self.ux.box("RueCode", self.ux.spinner(0, taking_long=True), summary="Still working", params=prompt_context["user_request"])}]}
|
257
|
+
# Simulate code vs semantic search distinction
|
258
|
+
code_results = ["def foo(): ...", "def bar(): ..."]
|
259
|
+
semantic_results = ["This function sorts a list.", "This function calculates a sum."]
|
260
|
+
yield {"messages": [{"role": "assistant", "content": self.ux.box(
|
261
|
+
"RueCode Results",
|
262
|
+
self.ux.code_vs_semantic("code", code_results) + "\n" + self.ux.code_vs_semantic("semantic", semantic_results),
|
263
|
+
summary=self.ux.summary("Analyzed codebase", 4, prompt_context["user_request"]),
|
264
|
+
result_count=4,
|
265
|
+
params=prompt_context["user_request"]
|
266
|
+
)}]}
|
290
267
|
logger.info("RueCodeBlueprint run finished.")
|
268
|
+
return
|
269
|
+
|
270
|
+
def create_starting_agent(self, mcp_servers):
|
271
|
+
read_file_tool = PatchedFunctionTool(read_file_fileops, 'read_file')
|
272
|
+
write_file_tool = PatchedFunctionTool(write_file_fileops, 'write_file')
|
273
|
+
list_files_tool = PatchedFunctionTool(list_files_fileops, 'list_files')
|
274
|
+
execute_shell_command_tool = PatchedFunctionTool(execute_shell_command_fileops, 'execute_shell_command')
|
275
|
+
rue_agent = self.make_agent(
|
276
|
+
name="RueCodeAgent",
|
277
|
+
instructions="You are RueCodeAgent. You can use fileops tools (read_file, write_file, list_files, execute_shell_command) for any file or shell tasks.",
|
278
|
+
tools=[read_file_tool, write_file_tool, list_files_tool, execute_shell_command_tool],
|
279
|
+
mcp_servers=mcp_servers
|
280
|
+
)
|
281
|
+
return rue_agent
|
291
282
|
|
292
283
|
if __name__ == "__main__":
|
293
|
-
|
294
|
-
|
284
|
+
import asyncio
|
285
|
+
import json
|
286
|
+
print("\033[1;36m\n╔══════════════════════════════════════════════════════════════╗\n║ 📝 RUE CODE: SWARM TEMPLATING & EXECUTION DEMO ║\n╠══════════════════════════════════════════════════════════════╣\n║ This blueprint demonstrates viral doc propagation, ║\n║ code templating, and swarm-powered execution. ║\n║ Try running: python blueprint_rue_code.py ║\n╚══════════════════════════════════════════════════════════════╝\033[0m")
|
287
|
+
messages = [
|
288
|
+
{"role": "user", "content": "Show me how Rue Code does templating and swarm execution."}
|
289
|
+
]
|
290
|
+
blueprint = RueCodeBlueprint(blueprint_id="demo-1")
|
291
|
+
async def run_and_print():
|
292
|
+
async for response in blueprint.run(messages):
|
293
|
+
print(json.dumps(response, indent=2))
|
294
|
+
asyncio.run(run_and_print())
|
@@ -16,6 +16,7 @@ import sys
|
|
16
16
|
from typing import Dict, Any, List, TypedDict, ClassVar, Optional
|
17
17
|
from datetime import datetime
|
18
18
|
import pytz
|
19
|
+
from pathlib import Path
|
19
20
|
|
20
21
|
# Ensure src is in path for BlueprintBase import
|
21
22
|
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
@@ -111,8 +112,10 @@ class SuggestionBlueprint(BlueprintBase):
|
|
111
112
|
# Caches
|
112
113
|
_model_instance_cache: Dict[str, Model] = {}
|
113
114
|
|
114
|
-
def __init__(self,
|
115
|
-
|
115
|
+
def __init__(self, blueprint_id: str = None, config_path: Optional[Path] = None, **kwargs):
|
116
|
+
if blueprint_id is None:
|
117
|
+
blueprint_id = "suggestion"
|
118
|
+
super().__init__(blueprint_id, config_path=config_path, **kwargs)
|
116
119
|
class DummyLLM:
|
117
120
|
def chat_completion_stream(self, messages, **_):
|
118
121
|
class DummyStream:
|
@@ -125,7 +128,6 @@ class SuggestionBlueprint(BlueprintBase):
|
|
125
128
|
# --- Model Instantiation Helper --- (Standard helper)
|
126
129
|
def _get_model_instance(self, profile_name: str) -> Model:
|
127
130
|
"""Retrieves or creates an LLM Model instance."""
|
128
|
-
# ... (Implementation is the same as previous refactors) ...
|
129
131
|
if profile_name in self._model_instance_cache:
|
130
132
|
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
131
133
|
return self._model_instance_cache[profile_name]
|
@@ -134,18 +136,11 @@ class SuggestionBlueprint(BlueprintBase):
|
|
134
136
|
if not profile_data: raise ValueError(f"Missing LLM profile '{profile_name}'.")
|
135
137
|
provider = profile_data.get("provider", "openai").lower()
|
136
138
|
model_name = profile_data.get("model")
|
137
|
-
# Ensure a model capable of structured output is used (most recent OpenAI models are)
|
138
139
|
if not model_name: raise ValueError(f"Missing 'model' in profile '{profile_name}'.")
|
139
140
|
if provider != "openai": raise ValueError(f"Unsupported provider: {provider}")
|
140
|
-
|
141
|
-
# Remove redundant client instantiation; rely on framework-level default client
|
142
|
-
# All blueprints now use the default client set at framework init
|
143
141
|
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
|
144
142
|
try:
|
145
|
-
|
146
|
-
class DummyClient:
|
147
|
-
pass
|
148
|
-
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=DummyClient())
|
143
|
+
model_instance = OpenAIChatCompletionsModel(model=model_name)
|
149
144
|
self._model_instance_cache[profile_name] = model_instance
|
150
145
|
return model_instance
|
151
146
|
except Exception as e: raise ValueError(f"Failed to init LLM: {e}") from e
|
@@ -154,25 +149,21 @@ class SuggestionBlueprint(BlueprintBase):
|
|
154
149
|
"""Create the SuggestionAgent."""
|
155
150
|
logger.debug("Creating SuggestionAgent...")
|
156
151
|
self._model_instance_cache = {}
|
157
|
-
|
158
152
|
default_profile_name = self.config.get("llm_profile", "default")
|
159
|
-
# Verify the chosen profile/model supports structured output if possible, or rely on OpenAI's newer models
|
160
153
|
logger.debug(f"Using LLM profile '{default_profile_name}' for SuggestionAgent.")
|
161
154
|
model_instance = self._get_model_instance(default_profile_name)
|
162
|
-
|
163
155
|
suggestion_agent_instructions = (
|
164
156
|
"You are the SuggestionAgent. Analyze the user's input and generate exactly three relevant, "
|
165
157
|
"concise follow-up questions or conversation starters as a JSON object with a single key 'suggestions' "
|
166
158
|
"containing a list of strings. You can use fileops tools (read_file, write_file, list_files, execute_shell_command) for any file or shell tasks."
|
167
159
|
)
|
168
|
-
|
169
160
|
suggestion_agent = Agent(
|
170
161
|
name="SuggestionAgent",
|
171
162
|
instructions=suggestion_agent_instructions,
|
172
163
|
tools=[read_file_tool, write_file_tool, list_files_tool, execute_shell_command_tool],
|
173
164
|
model=model_instance,
|
174
|
-
output_type=SuggestionsOutput,
|
175
|
-
mcp_servers=mcp_servers
|
165
|
+
output_type=SuggestionsOutput,
|
166
|
+
mcp_servers=mcp_servers
|
176
167
|
)
|
177
168
|
logger.debug("SuggestionAgent created with output_type enforcement.")
|
178
169
|
return suggestion_agent
|