open-swarm 0.1.1745125933__py3-none-any.whl → 0.1.1745126277__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {open_swarm-0.1.1745125933.dist-info → open_swarm-0.1.1745126277.dist-info}/METADATA +12 -8
- {open_swarm-0.1.1745125933.dist-info → open_swarm-0.1.1745126277.dist-info}/RECORD +52 -25
- swarm/blueprints/README.md +19 -18
- swarm/blueprints/blueprint_audit_status.json +1 -1
- swarm/blueprints/chatbot/blueprint_chatbot.py +160 -72
- swarm/blueprints/codey/README.md +88 -8
- swarm/blueprints/codey/blueprint_codey.py +1116 -210
- swarm/blueprints/codey/codey_cli.py +10 -0
- swarm/blueprints/codey/session_logs/session_2025-04-19T01-15-31.md +17 -0
- swarm/blueprints/codey/session_logs/session_2025-04-19T01-16-03.md +17 -0
- swarm/blueprints/common/operation_box_utils.py +83 -0
- swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py +21 -298
- swarm/blueprints/divine_code/blueprint_divine_code.py +182 -9
- swarm/blueprints/django_chat/blueprint_django_chat.py +150 -24
- swarm/blueprints/echocraft/blueprint_echocraft.py +142 -13
- swarm/blueprints/geese/README.md +97 -0
- swarm/blueprints/geese/blueprint_geese.py +677 -93
- swarm/blueprints/geese/geese_cli.py +102 -0
- swarm/blueprints/jeeves/blueprint_jeeves.py +712 -0
- swarm/blueprints/jeeves/jeeves_cli.py +55 -0
- swarm/blueprints/mcp_demo/blueprint_mcp_demo.py +109 -22
- swarm/blueprints/mission_improbable/blueprint_mission_improbable.py +172 -40
- swarm/blueprints/monkai_magic/blueprint_monkai_magic.py +79 -41
- swarm/blueprints/nebula_shellz/blueprint_nebula_shellz.py +82 -35
- swarm/blueprints/omniplex/blueprint_omniplex.py +56 -24
- swarm/blueprints/poets/blueprint_poets.py +141 -100
- swarm/blueprints/poets/poets_cli.py +23 -0
- swarm/blueprints/rue_code/README.md +8 -0
- swarm/blueprints/rue_code/blueprint_rue_code.py +188 -20
- swarm/blueprints/rue_code/rue_code_cli.py +43 -0
- swarm/blueprints/stewie/apps.py +12 -0
- swarm/blueprints/stewie/blueprint_family_ties.py +349 -0
- swarm/blueprints/stewie/models.py +19 -0
- swarm/blueprints/stewie/serializers.py +10 -0
- swarm/blueprints/stewie/settings.py +17 -0
- swarm/blueprints/stewie/urls.py +11 -0
- swarm/blueprints/stewie/views.py +26 -0
- swarm/blueprints/suggestion/blueprint_suggestion.py +54 -39
- swarm/blueprints/whinge_surf/README.md +22 -0
- swarm/blueprints/whinge_surf/__init__.py +1 -0
- swarm/blueprints/whinge_surf/blueprint_whinge_surf.py +565 -0
- swarm/blueprints/whinge_surf/whinge_surf_cli.py +99 -0
- swarm/blueprints/whiskeytango_foxtrot/blueprint_whiskeytango_foxtrot.py +66 -37
- swarm/blueprints/zeus/__init__.py +2 -0
- swarm/blueprints/zeus/apps.py +4 -0
- swarm/blueprints/zeus/blueprint_zeus.py +270 -0
- swarm/blueprints/zeus/zeus_cli.py +13 -0
- swarm/cli/async_input.py +65 -0
- swarm/cli/async_input_demo.py +32 -0
- {open_swarm-0.1.1745125933.dist-info → open_swarm-0.1.1745126277.dist-info}/WHEEL +0 -0
- {open_swarm-0.1.1745125933.dist-info → open_swarm-0.1.1745126277.dist-info}/entry_points.txt +0 -0
- {open_swarm-0.1.1745125933.dist-info → open_swarm-0.1.1745126277.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,712 @@
|
|
1
|
+
"""
|
2
|
+
Jeeves Blueprint
|
3
|
+
|
4
|
+
Viral docstring update: Operational as of 2025-04-18T10:14:18Z (UTC).
|
5
|
+
Self-healing, fileops-enabled, swarm-scalable.
|
6
|
+
"""
|
7
|
+
# [Swarm Propagation] Next Blueprint: divine_code
|
8
|
+
# divine_code key vars: logger, project_root, src_path
|
9
|
+
# divine_code guard: if src_path not in sys.path: sys.path.insert(0, src_path)
|
10
|
+
# divine_code debug: logger.debug("Divine Ops Team (Zeus & Pantheon) created successfully. Zeus is starting agent.")
|
11
|
+
# divine_code error handling: try/except ImportError with sys.exit(1)
|
12
|
+
|
13
|
+
import logging
|
14
|
+
import os
|
15
|
+
import sys
|
16
|
+
import time
|
17
|
+
import threading
|
18
|
+
from typing import Dict, Any, List, ClassVar, Optional
|
19
|
+
from datetime import datetime
|
20
|
+
import pytz
|
21
|
+
from swarm.blueprints.common.operation_box_utils import display_operation_box
|
22
|
+
|
23
|
+
class ToolRegistry:
|
24
|
+
"""
|
25
|
+
Central registry for all tools: both LLM (OpenAI function-calling) and Python-only tools.
|
26
|
+
"""
|
27
|
+
def __init__(self):
|
28
|
+
self.llm_tools = {}
|
29
|
+
self.python_tools = {}
|
30
|
+
|
31
|
+
def register_llm_tool(self, name: str, description: str, parameters: dict, handler):
|
32
|
+
self.llm_tools[name] = {
|
33
|
+
'name': name,
|
34
|
+
'description': description,
|
35
|
+
'parameters': parameters,
|
36
|
+
'handler': handler
|
37
|
+
}
|
38
|
+
|
39
|
+
def register_python_tool(self, name: str, handler, description: str = ""):
|
40
|
+
self.python_tools[name] = handler
|
41
|
+
|
42
|
+
def get_llm_tools(self, as_openai_spec=False):
|
43
|
+
tools = list(self.llm_tools.values())
|
44
|
+
if as_openai_spec:
|
45
|
+
# Return OpenAI-compatible dicts
|
46
|
+
return [
|
47
|
+
{
|
48
|
+
'name': t['name'],
|
49
|
+
'description': t['description'],
|
50
|
+
'parameters': t['parameters']
|
51
|
+
} for t in tools
|
52
|
+
]
|
53
|
+
return tools
|
54
|
+
|
55
|
+
def get_python_tool(self, name: str):
|
56
|
+
return self.python_tools.get(name)
|
57
|
+
|
58
|
+
from datetime import datetime
|
59
|
+
import pytz
|
60
|
+
|
61
|
+
# Ensure src is in path for BlueprintBase import
|
62
|
+
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
63
|
+
src_path = os.path.join(project_root, 'src')
|
64
|
+
if src_path not in sys.path: sys.path.insert(0, src_path)
|
65
|
+
|
66
|
+
from typing import Optional
|
67
|
+
from pathlib import Path
|
68
|
+
try:
|
69
|
+
from agents import Agent, Tool, function_tool, Runner # Added Runner
|
70
|
+
from agents.mcp import MCPServer
|
71
|
+
from agents.models.interface import Model
|
72
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
73
|
+
from openai import AsyncOpenAI
|
74
|
+
from swarm.core.blueprint_base import BlueprintBase
|
75
|
+
from swarm.core.blueprint_ux import BlueprintUXImproved
|
76
|
+
except ImportError as e:
|
77
|
+
print(f"ERROR: Import failed in JeevesBlueprint: {e}. Check 'openai-agents' install and project structure.")
|
78
|
+
print(f"Attempted import from directory: {os.path.dirname(__file__)}")
|
79
|
+
print(f"sys.path: {sys.path}")
|
80
|
+
sys.exit(1)
|
81
|
+
|
82
|
+
logger = logging.getLogger(__name__)
|
83
|
+
|
84
|
+
# Last swarm update: 2025-04-18T10:15:21Z (UTC)
|
85
|
+
utc_now = datetime.now(pytz.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
86
|
+
print(f"# Last swarm update: {utc_now} (UTC)")
|
87
|
+
|
88
|
+
# --- Agent Instructions ---
|
89
|
+
|
90
|
+
SHARED_INSTRUCTIONS = """
|
91
|
+
You are part of the Jeeves team. Collaborate via Jeeves, the coordinator.
|
92
|
+
Roles:
|
93
|
+
- Jeeves (Coordinator): User interface, planning, delegation via Agent Tools.
|
94
|
+
- Mycroft (Web Search): Uses `duckduckgo-search` MCP tool for private web searches.
|
95
|
+
- Gutenberg (Home Automation): Uses `home-assistant` MCP tool to control devices.
|
96
|
+
Respond ONLY to the agent who tasked you (typically Jeeves). Provide clear, concise results.
|
97
|
+
"""
|
98
|
+
|
99
|
+
jeeves_instructions = (
|
100
|
+
f"{SHARED_INSTRUCTIONS}\n\n"
|
101
|
+
"YOUR ROLE: Jeeves, the Coordinator. You are the primary interface with the user.\n"
|
102
|
+
"1. Understand the user's request fully.\n"
|
103
|
+
"2. If it involves searching the web, delegate the specific search query to the `Mycroft` agent tool.\n"
|
104
|
+
"3. If it involves controlling home devices (lights, switches, etc.), delegate the specific command (e.g., 'turn on kitchen light') to the `Gutenberg` agent tool.\n"
|
105
|
+
"4. If the request is simple and doesn't require search or home automation, answer it directly.\n"
|
106
|
+
"5. Synthesize the results received from Mycroft or Gutenberg into a polite, helpful, and complete response for the user. Do not just relay their raw output.\n"
|
107
|
+
"You can use fileops tools (read_file, write_file, list_files, execute_shell_command) for any file or shell tasks."
|
108
|
+
)
|
109
|
+
|
110
|
+
mycroft_instructions = (
|
111
|
+
f"{SHARED_INSTRUCTIONS}\n\n"
|
112
|
+
"YOUR ROLE: Mycroft, the Web Sleuth. You ONLY perform web searches when tasked by Jeeves.\n"
|
113
|
+
"Use the `duckduckgo-search` MCP tool available to you to execute the search query provided by Jeeves.\n"
|
114
|
+
"Return the search results clearly and concisely to Jeeves. Do not add conversational filler.\n"
|
115
|
+
"You can use fileops tools (read_file, write_file, list_files, execute_shell_command) for any file or shell tasks."
|
116
|
+
)
|
117
|
+
|
118
|
+
gutenberg_instructions = (
|
119
|
+
f"{SHARED_INSTRUCTIONS}\n\n"
|
120
|
+
"YOUR ROLE: Gutenberg, the Home Scribe. You ONLY execute home automation commands when tasked by Jeeves.\n"
|
121
|
+
"Use the `home-assistant` MCP tool available to you to execute the command (e.g., interacting with entities like 'light.kitchen_light').\n"
|
122
|
+
"Confirm the action taken (or report any errors) back to Jeeves. Do not add conversational filler.\n"
|
123
|
+
"You can use fileops tools (read_file, write_file, list_files, execute_shell_command) for any file or shell tasks."
|
124
|
+
)
|
125
|
+
|
126
|
+
|
127
|
+
# --- FileOps Tool Logic Definitions ---
|
128
|
+
@function_tool
|
129
|
+
def read_file(path: str) -> str:
|
130
|
+
try:
|
131
|
+
with open(path, 'r') as f:
|
132
|
+
return f.read()
|
133
|
+
except Exception as e:
|
134
|
+
return f"ERROR: {e}"
|
135
|
+
|
136
|
+
@function_tool
|
137
|
+
def write_file(path: str, content: str) -> str:
|
138
|
+
try:
|
139
|
+
with open(path, 'w') as f:
|
140
|
+
f.write(content)
|
141
|
+
return "OK: file written"
|
142
|
+
except Exception as e:
|
143
|
+
return f"ERROR: {e}"
|
144
|
+
|
145
|
+
@function_tool
|
146
|
+
def list_files(directory: str = '.') -> str:
|
147
|
+
try:
|
148
|
+
return '\n'.join(os.listdir(directory))
|
149
|
+
except Exception as e:
|
150
|
+
return f"ERROR: {e}"
|
151
|
+
|
152
|
+
@function_tool
|
153
|
+
def execute_shell_command(command: str) -> str:
|
154
|
+
"""
|
155
|
+
Executes a shell command and returns its stdout and stderr.
|
156
|
+
Timeout is configurable via SWARM_COMMAND_TIMEOUT (default: 60s).
|
157
|
+
"""
|
158
|
+
logger.info(f"Executing shell command: {command}")
|
159
|
+
try:
|
160
|
+
import os
|
161
|
+
timeout = int(os.getenv("SWARM_COMMAND_TIMEOUT", "60"))
|
162
|
+
result = subprocess.run(command, shell=True, capture_output=True, text=True, timeout=timeout)
|
163
|
+
output = f"Exit Code: {result.returncode}\n"
|
164
|
+
if result.stdout:
|
165
|
+
output += f"STDOUT:\n{result.stdout}\n"
|
166
|
+
if result.stderr:
|
167
|
+
output += f"STDERR:\n{result.stderr}\n"
|
168
|
+
logger.info(f"Command finished. Exit Code: {result.returncode}")
|
169
|
+
return output.strip()
|
170
|
+
except subprocess.TimeoutExpired:
|
171
|
+
logger.error(f"Command timed out: {command}")
|
172
|
+
return f"Error: Command timed out after {os.getenv('SWARM_COMMAND_TIMEOUT', '60')} seconds."
|
173
|
+
except Exception as e:
|
174
|
+
logger.error(f"Error executing command '{command}': {e}", exc_info=True)
|
175
|
+
return f"Error executing command: {e}"
|
176
|
+
|
177
|
+
from dataclasses import dataclass
|
178
|
+
|
179
|
+
@dataclass
|
180
|
+
class AgentTool:
|
181
|
+
name: str
|
182
|
+
description: str
|
183
|
+
parameters: dict
|
184
|
+
handler: callable = None
|
185
|
+
|
186
|
+
# Spinner UX enhancement (Open Swarm TODO)
|
187
|
+
# --- Spinner States for progressive operation boxes ---
|
188
|
+
SPINNER_STATES = [
|
189
|
+
"Generating.",
|
190
|
+
"Generating..",
|
191
|
+
"Generating...",
|
192
|
+
"Running..."
|
193
|
+
]
|
194
|
+
|
195
|
+
# --- Spinner State Constants ---
|
196
|
+
class JeevesSpinner:
|
197
|
+
FRAMES = [
|
198
|
+
"Generating.",
|
199
|
+
"Generating..",
|
200
|
+
"Generating...",
|
201
|
+
"Running..."
|
202
|
+
]
|
203
|
+
SLOW_FRAME = "Generating... Taking longer than expected"
|
204
|
+
INTERVAL = 0.12
|
205
|
+
SLOW_THRESHOLD = 10 # seconds
|
206
|
+
|
207
|
+
def __init__(self):
|
208
|
+
import threading, time
|
209
|
+
from rich.console import Console
|
210
|
+
self._stop_event = threading.Event()
|
211
|
+
self._thread = None
|
212
|
+
self._start_time = None
|
213
|
+
self.console = Console()
|
214
|
+
self._last_frame = None
|
215
|
+
self._last_slow = False
|
216
|
+
|
217
|
+
def start(self):
|
218
|
+
self._stop_event.clear()
|
219
|
+
self._start_time = time.time()
|
220
|
+
self._thread = threading.Thread(target=self._spin, daemon=True)
|
221
|
+
self._thread.start()
|
222
|
+
|
223
|
+
def _spin(self):
|
224
|
+
idx = 0
|
225
|
+
while not self._stop_event.is_set():
|
226
|
+
elapsed = time.time() - self._start_time
|
227
|
+
if elapsed > self.SLOW_THRESHOLD:
|
228
|
+
txt = Text(self.SLOW_FRAME, style=Style(color="yellow", bold=True))
|
229
|
+
self._last_frame = self.SLOW_FRAME
|
230
|
+
self._last_slow = True
|
231
|
+
else:
|
232
|
+
frame = self.FRAMES[idx % len(self.FRAMES)]
|
233
|
+
txt = Text(frame, style=Style(color="cyan", bold=True))
|
234
|
+
self._last_frame = frame
|
235
|
+
self._last_slow = False
|
236
|
+
self.console.print(txt, end="\r", soft_wrap=True, highlight=False)
|
237
|
+
time.sleep(self.INTERVAL)
|
238
|
+
idx += 1
|
239
|
+
self.console.print(" " * 40, end="\r") # Clear line
|
240
|
+
|
241
|
+
def stop(self, final_message="Done!"):
|
242
|
+
self._stop_event.set()
|
243
|
+
if self._thread:
|
244
|
+
self._thread.join()
|
245
|
+
self.console.print(Text(final_message, style=Style(color="green", bold=True)))
|
246
|
+
|
247
|
+
def current_spinner_state(self):
|
248
|
+
if self._last_slow:
|
249
|
+
return self.SLOW_FRAME
|
250
|
+
return self._last_frame or self.FRAMES[0]
|
251
|
+
|
252
|
+
import re
|
253
|
+
|
254
|
+
def grep_search(pattern: str, path: str = ".", case_insensitive: bool = False, max_results: int = 100, progress_yield: int = 10):
|
255
|
+
"""Progressive regex search in files, yields dicts of matches and progress."""
|
256
|
+
matches = []
|
257
|
+
flags = re.IGNORECASE if case_insensitive else 0
|
258
|
+
try:
|
259
|
+
total_files = 0
|
260
|
+
for root, dirs, files in os.walk(path):
|
261
|
+
for fname in files:
|
262
|
+
total_files += 1
|
263
|
+
scanned_files = 0
|
264
|
+
for root, dirs, files in os.walk(path):
|
265
|
+
for fname in files:
|
266
|
+
fpath = os.path.join(root, fname)
|
267
|
+
scanned_files += 1
|
268
|
+
try:
|
269
|
+
with open(fpath, "r", encoding="utf-8", errors="ignore") as f:
|
270
|
+
for i, line in enumerate(f, 1):
|
271
|
+
if re.search(pattern, line, flags):
|
272
|
+
matches.append({
|
273
|
+
"file": fpath,
|
274
|
+
"line": i,
|
275
|
+
"content": line.strip()
|
276
|
+
})
|
277
|
+
if len(matches) >= max_results:
|
278
|
+
yield {"matches": matches, "progress": scanned_files, "total": total_files, "truncated": True, "done": True}
|
279
|
+
return
|
280
|
+
except Exception:
|
281
|
+
continue
|
282
|
+
if scanned_files % progress_yield == 0:
|
283
|
+
yield {"matches": matches.copy(), "progress": scanned_files, "total": total_files, "truncated": False, "done": False}
|
284
|
+
# Final yield
|
285
|
+
yield {"matches": matches, "progress": scanned_files, "total": total_files, "truncated": False, "done": True}
|
286
|
+
except Exception as e:
|
287
|
+
yield {"matches": [], "progress": 0, "total": 0, "truncated": False, "done": True, "error": str(e)}
|
288
|
+
|
289
|
+
try:
|
290
|
+
ToolRegistry.register_llm_tool = staticmethod(ToolRegistry.register_llm_tool)
|
291
|
+
if not hasattr(ToolRegistry, '_grep_registered'):
|
292
|
+
ToolRegistry._grep_registered = True
|
293
|
+
ToolRegistry.register_llm_tool(
|
294
|
+
ToolRegistry,
|
295
|
+
name="grep_search",
|
296
|
+
description="Progressively search for a regex pattern in files under a directory tree, yielding progress.",
|
297
|
+
parameters={
|
298
|
+
"pattern": {"type": "string", "description": "Regex pattern to search for."},
|
299
|
+
"path": {"type": "string", "description": "Directory to search in.", "default": "."},
|
300
|
+
"case_insensitive": {"type": "boolean", "description": "Case-insensitive search.", "default": False},
|
301
|
+
"max_results": {"type": "integer", "description": "Maximum number of results.", "default": 100},
|
302
|
+
"progress_yield": {"type": "integer", "description": "How often to yield progress.", "default": 10}
|
303
|
+
},
|
304
|
+
handler=grep_search
|
305
|
+
)
|
306
|
+
except Exception as e:
|
307
|
+
print(f"Error registering grep_search tool: {e}")
|
308
|
+
|
309
|
+
from rich.console import Console
|
310
|
+
from rich.panel import Panel
|
311
|
+
from rich import box as rich_box
|
312
|
+
from rich.text import Text
|
313
|
+
from rich.style import Style
|
314
|
+
|
315
|
+
console = Console()
|
316
|
+
|
317
|
+
# --- Define the Blueprint ---
|
318
|
+
class JeevesBlueprint(BlueprintBase):
|
319
|
+
"""
|
320
|
+
Jeeves: Swarm-powered digital butler and code assistant blueprint.
|
321
|
+
"""
|
322
|
+
metadata: ClassVar[dict] = {
|
323
|
+
"name": "JeevesBlueprint",
|
324
|
+
"cli_name": "jeeves",
|
325
|
+
"title": "Jeeves: Swarm-powered digital butler and code assistant",
|
326
|
+
"description": "A collaborative blueprint for digital butlering, code analysis, and multi-agent task management.",
|
327
|
+
"version": "1.1.0", # Version updated
|
328
|
+
"author": "Open Swarm Team (Refactored)",
|
329
|
+
"tags": ["web search", "home automation", "duckduckgo", "home assistant", "multi-agent", "delegation"],
|
330
|
+
"required_mcp_servers": ["duckduckgo-search", "home-assistant"], # List the MCP servers needed by the agents
|
331
|
+
# Env vars listed here are informational; they are primarily used by the MCP servers themselves,
|
332
|
+
# loaded via .env by BlueprintBase or the MCP process.
|
333
|
+
# "env_vars": ["SERPAPI_API_KEY", "HASS_URL", "HASS_API_KEY"]
|
334
|
+
}
|
335
|
+
|
336
|
+
def __init__(self, blueprint_id: str = "jeeves", config=None, config_path=None, **kwargs):
|
337
|
+
super().__init__(blueprint_id, config=config, config_path=config_path, **kwargs)
|
338
|
+
# Add other attributes as needed for Jeeves
|
339
|
+
# ...
|
340
|
+
|
341
|
+
# Caches for OpenAI client and Model instances
|
342
|
+
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
343
|
+
_model_instance_cache: Dict[str, Model] = {}
|
344
|
+
|
345
|
+
def get_model_name(self):
|
346
|
+
from swarm.core.blueprint_base import BlueprintBase
|
347
|
+
if hasattr(self, '_resolve_llm_profile'):
|
348
|
+
profile = self._resolve_llm_profile()
|
349
|
+
else:
|
350
|
+
profile = getattr(self, 'llm_profile_name', None) or 'default'
|
351
|
+
llm_section = self.config.get('llm', {}) if hasattr(self, 'config') else {}
|
352
|
+
return llm_section.get(profile, {}).get('model', 'gpt-4o')
|
353
|
+
|
354
|
+
# --- Model Instantiation Helper --- (Copied from BurntNoodles)
|
355
|
+
def _get_model_instance(self, profile_name: str) -> Model:
|
356
|
+
"""
|
357
|
+
Retrieves or creates an LLM Model instance based on the configuration profile.
|
358
|
+
Handles client instantiation and caching. Uses OpenAIChatCompletionsModel.
|
359
|
+
"""
|
360
|
+
if profile_name in self._model_instance_cache:
|
361
|
+
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
362
|
+
return self._model_instance_cache[profile_name]
|
363
|
+
|
364
|
+
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
365
|
+
profile_data = self.get_llm_profile(profile_name)
|
366
|
+
if not profile_data:
|
367
|
+
logger.critical(f"Cannot create Model instance: LLM profile '{profile_name}' (or 'default') not found.")
|
368
|
+
raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
|
369
|
+
|
370
|
+
provider = profile_data.get("provider", "openai").lower()
|
371
|
+
model_name = profile_data.get("model")
|
372
|
+
if not model_name:
|
373
|
+
logger.critical(f"LLM profile '{profile_name}' missing 'model' key.")
|
374
|
+
raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
|
375
|
+
|
376
|
+
if provider != "openai":
|
377
|
+
logger.error(f"Unsupported LLM provider '{provider}' in profile '{profile_name}'.")
|
378
|
+
raise ValueError(f"Unsupported LLM provider: {provider}")
|
379
|
+
|
380
|
+
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
381
|
+
if client_cache_key not in self._openai_client_cache:
|
382
|
+
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
383
|
+
filtered_client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
384
|
+
log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
|
385
|
+
logger.debug(f"Creating new AsyncOpenAI client for profile '{profile_name}' with config: {log_client_kwargs}")
|
386
|
+
try:
|
387
|
+
self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_client_kwargs)
|
388
|
+
except Exception as e:
|
389
|
+
logger.error(f"Failed to create AsyncOpenAI client for profile '{profile_name}': {e}", exc_info=True)
|
390
|
+
raise ValueError(f"Failed to initialize OpenAI client for profile '{profile_name}': {e}") from e
|
391
|
+
|
392
|
+
openai_client_instance = self._openai_client_cache[client_cache_key]
|
393
|
+
|
394
|
+
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for profile '{profile_name}'.")
|
395
|
+
try:
|
396
|
+
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client_instance)
|
397
|
+
self._model_instance_cache[profile_name] = model_instance
|
398
|
+
return model_instance
|
399
|
+
except Exception as e:
|
400
|
+
logger.error(f"Failed to instantiate OpenAIChatCompletionsModel for profile '{profile_name}': {e}", exc_info=True)
|
401
|
+
raise ValueError(f"Failed to initialize LLM provider for profile '{profile_name}': {e}") from e
|
402
|
+
|
403
|
+
def create_starting_agent(self, mcp_servers=None):
|
404
|
+
# Return a real Agent with fileops and shell tools for CLI use
|
405
|
+
from agents import Agent
|
406
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
407
|
+
from openai import AsyncOpenAI
|
408
|
+
model_name = self.get_model_name()
|
409
|
+
openai_client = AsyncOpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
|
410
|
+
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client)
|
411
|
+
tool_registry = getattr(self, 'tool_registry', None)
|
412
|
+
if tool_registry is not None:
|
413
|
+
llm_tools = tool_registry.get_llm_tools(as_openai_spec=True)
|
414
|
+
else:
|
415
|
+
llm_tools = []
|
416
|
+
python_tools = getattr(self, 'tool_registry', None)
|
417
|
+
if python_tools is not None:
|
418
|
+
python_tools = python_tools.python_tools
|
419
|
+
else:
|
420
|
+
python_tools = {}
|
421
|
+
agent = Agent(
|
422
|
+
name='Jeeves', # Capitalized to match test expectations
|
423
|
+
model=model_instance,
|
424
|
+
instructions="You are a highly skilled automation and fileops agent.",
|
425
|
+
tools=llm_tools
|
426
|
+
)
|
427
|
+
agent.python_tools = python_tools
|
428
|
+
return agent
|
429
|
+
|
430
|
+
def create_starting_agent_original(self, mcp_servers: List[MCPServer]) -> Agent:
|
431
|
+
"""Creates the Jeeves agent team: Jeeves, Mycroft, Gutenberg."""
|
432
|
+
logger.debug("Creating Jeeves agent team...")
|
433
|
+
self._model_instance_cache = {}
|
434
|
+
self._openai_client_cache = {}
|
435
|
+
|
436
|
+
default_profile_name = self.config.get("llm_profile", "default")
|
437
|
+
logger.debug(f"Using LLM profile '{default_profile_name}' for Jeeves agents.")
|
438
|
+
model_instance = self._get_model_instance(default_profile_name)
|
439
|
+
|
440
|
+
# Instantiate specialist agents, passing the *required* MCP servers
|
441
|
+
# Note: Agent class currently accepts the full list, but ideally would filter or select.
|
442
|
+
# We rely on the agent's instructions and the MCP server name matching for now.
|
443
|
+
mycroft_agent = Agent(
|
444
|
+
name="Mycroft",
|
445
|
+
model=model_instance,
|
446
|
+
instructions=mycroft_instructions,
|
447
|
+
tools=[], # Mycroft uses MCP, not function tools
|
448
|
+
mcp_servers=[s for s in mcp_servers if s.name == "duckduckgo-search"] # Pass only relevant MCP
|
449
|
+
)
|
450
|
+
gutenberg_agent = Agent(
|
451
|
+
name="Gutenberg",
|
452
|
+
model=model_instance,
|
453
|
+
instructions=gutenberg_instructions,
|
454
|
+
tools=[], # Gutenberg uses MCP
|
455
|
+
mcp_servers=[s for s in mcp_servers if s.name == "home-assistant"] # Pass only relevant MCP
|
456
|
+
)
|
457
|
+
|
458
|
+
# Instantiate the coordinator agent (Jeeves)
|
459
|
+
jeeves_agent = Agent(
|
460
|
+
name="Jeeves",
|
461
|
+
model=model_instance,
|
462
|
+
instructions=jeeves_instructions,
|
463
|
+
tools=[ # Jeeves delegates via Agent-as-Tool
|
464
|
+
mycroft_agent.as_tool(
|
465
|
+
tool_name="Mycroft",
|
466
|
+
tool_description="Delegate private web search tasks to Mycroft (provide the search query)."
|
467
|
+
),
|
468
|
+
gutenberg_agent.as_tool(
|
469
|
+
tool_name="Gutenberg",
|
470
|
+
tool_description="Delegate home automation tasks to Gutenberg (provide the specific action/command)."
|
471
|
+
),
|
472
|
+
read_file,
|
473
|
+
write_file,
|
474
|
+
list_files,
|
475
|
+
execute_shell_command
|
476
|
+
],
|
477
|
+
# Jeeves itself doesn't directly need MCP servers in this design
|
478
|
+
mcp_servers=[]
|
479
|
+
)
|
480
|
+
|
481
|
+
mycroft_agent.tools.extend([read_file, write_file, list_files, execute_shell_command])
|
482
|
+
gutenberg_agent.tools.extend([read_file, write_file, list_files, execute_shell_command])
|
483
|
+
|
484
|
+
logger.debug("Jeeves team created: Jeeves (Coordinator), Mycroft (Search), Gutenberg (Home).")
|
485
|
+
return jeeves_agent # Jeeves is the entry point
|
486
|
+
|
487
|
+
async def run(self, messages: List[Dict[str, Any]], **kwargs):
|
488
|
+
"""Main execution entry point for the Jeeves blueprint."""
|
489
|
+
logger.info("JeevesBlueprint run method called.")
|
490
|
+
instruction = messages[-1].get("content", "") if messages else ""
|
491
|
+
ux = BlueprintUXImproved(style="serious")
|
492
|
+
spinner_idx = 0
|
493
|
+
start_time = time.time()
|
494
|
+
spinner_yield_interval = 1.0 # seconds
|
495
|
+
last_spinner_time = start_time
|
496
|
+
yielded_spinner = False
|
497
|
+
result_chunks = []
|
498
|
+
try:
|
499
|
+
from agents import Runner
|
500
|
+
runner_gen = Runner.run(self.create_starting_agent([]), instruction)
|
501
|
+
while True:
|
502
|
+
now = time.time()
|
503
|
+
try:
|
504
|
+
chunk = next(runner_gen)
|
505
|
+
result_chunks.append(chunk)
|
506
|
+
# If chunk is a final result, wrap and yield
|
507
|
+
if chunk and isinstance(chunk, dict) and "messages" in chunk:
|
508
|
+
content = chunk["messages"][0]["content"] if chunk["messages"] else ""
|
509
|
+
summary = ux.summary("Operation", len(result_chunks), {"instruction": instruction[:40]})
|
510
|
+
box = ux.ansi_emoji_box(
|
511
|
+
title="Jeeves Result",
|
512
|
+
content=content,
|
513
|
+
summary=summary,
|
514
|
+
params={"instruction": instruction[:40]},
|
515
|
+
result_count=len(result_chunks),
|
516
|
+
op_type="run",
|
517
|
+
status="success"
|
518
|
+
)
|
519
|
+
yield {"messages": [{"role": "assistant", "content": box}]}
|
520
|
+
else:
|
521
|
+
yield chunk
|
522
|
+
yielded_spinner = False
|
523
|
+
except StopIteration:
|
524
|
+
break
|
525
|
+
except Exception:
|
526
|
+
if now - last_spinner_time >= spinner_yield_interval:
|
527
|
+
taking_long = (now - start_time > 10)
|
528
|
+
spinner_msg = ux.spinner(spinner_idx, taking_long=taking_long)
|
529
|
+
yield {"messages": [{"role": "assistant", "content": spinner_msg}]}
|
530
|
+
spinner_idx += 1
|
531
|
+
last_spinner_time = now
|
532
|
+
yielded_spinner = True
|
533
|
+
if not result_chunks and not yielded_spinner:
|
534
|
+
yield {"messages": [{"role": "assistant", "content": ux.spinner(0)}]}
|
535
|
+
except Exception as e:
|
536
|
+
logger.error(f"Error during Jeeves run: {e}", exc_info=True)
|
537
|
+
yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
|
538
|
+
|
539
|
+
async def _run_non_interactive(self, instruction: str, **kwargs) -> Any:
|
540
|
+
logger.info(f"Running Jeeves non-interactively with instruction: '{instruction[:100]}...'")
|
541
|
+
mcp_servers = kwargs.get("mcp_servers", [])
|
542
|
+
agent = self.create_starting_agent(mcp_servers=mcp_servers)
|
543
|
+
# Use Runner.run as a classmethod for portability
|
544
|
+
from agents import Runner
|
545
|
+
model_name = self.get_model_name()
|
546
|
+
try:
|
547
|
+
result = await Runner.run(agent, instruction)
|
548
|
+
# If result is a list/iterable, yield each chunk; else yield as single message
|
549
|
+
if isinstance(result, (list, tuple)):
|
550
|
+
for chunk in result:
|
551
|
+
yield chunk
|
552
|
+
else:
|
553
|
+
yield {"messages": [{"role": "assistant", "content": getattr(result, 'final_output', str(result))}]}
|
554
|
+
except Exception as e:
|
555
|
+
logger.error(f"Error during non-interactive run: {e}", exc_info=True)
|
556
|
+
yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
|
557
|
+
|
558
|
+
# Standard Python entry point
|
559
|
+
if __name__ == "__main__":
|
560
|
+
import asyncio
|
561
|
+
import json
|
562
|
+
print("\033[1;36m\n╔══════════════════════════════════════════════════════════════╗\n║ 🤖 JEEVES: SWARM ULTIMATE LIMIT TEST ║\n╠══════════════════════════════════════════════════════════════╣\n║ ULTIMATE: Multi-agent, multi-step, parallel, cross-agent ║\n║ orchestration, error injection, and viral patching. ║\n╚══════════════════════════════════════════════════════════════╝\033[0m")
|
563
|
+
blueprint = JeevesBlueprint(blueprint_id="ultimate-limit-test")
|
564
|
+
async def run_limit_test():
|
565
|
+
tasks = []
|
566
|
+
async def collect_responses(async_gen):
|
567
|
+
results = []
|
568
|
+
async for item in async_gen:
|
569
|
+
results.append(item)
|
570
|
+
return results
|
571
|
+
for butler in ["Jeeves", "Mycroft", "Gutenberg"]:
|
572
|
+
messages = [
|
573
|
+
{"role": "user", "content": f"Have {butler} perform a complex task, inject an error, trigger rollback, and log all steps."}
|
574
|
+
]
|
575
|
+
tasks.append(collect_responses(blueprint.run(messages)))
|
576
|
+
# Step 2: Multi-agent workflow with viral patching
|
577
|
+
messages = [
|
578
|
+
{"role": "user", "content": "Jeeves delegates to Mycroft, who injects a bug, Gutenberg detects and patches it, Jeeves verifies the patch. Log all agent handoffs and steps."}
|
579
|
+
]
|
580
|
+
tasks.append(collect_responses(blueprint.run(messages)))
|
581
|
+
results = await asyncio.gather(*[asyncio.create_task(t) for t in tasks], return_exceptions=True)
|
582
|
+
for idx, result in enumerate(results):
|
583
|
+
print(f"\n[PARALLEL TASK {idx+1}] Result:")
|
584
|
+
if isinstance(result, Exception):
|
585
|
+
print(f"Exception: {result}")
|
586
|
+
else:
|
587
|
+
for response in result:
|
588
|
+
print(json.dumps(response, indent=2))
|
589
|
+
asyncio.run(run_limit_test())
|
590
|
+
|
591
|
+
# --- CLI entry point ---
|
592
|
+
def main():
|
593
|
+
import argparse
|
594
|
+
import sys
|
595
|
+
import asyncio
|
596
|
+
parser = argparse.ArgumentParser(description="Jeeves: Swarm-powered digital butler and code assistant.")
|
597
|
+
parser.add_argument("prompt", nargs="?", help="Prompt or task (quoted)")
|
598
|
+
parser.add_argument("-i", "--input", help="Input file or directory", default=None)
|
599
|
+
parser.add_argument("-o", "--output", help="Output file", default=None)
|
600
|
+
parser.add_argument("--model", help="Model name (codex, gpt, etc.)", default=None)
|
601
|
+
parser.add_argument("--debug", action="store_true", help="Enable debug logging")
|
602
|
+
args = parser.parse_args()
|
603
|
+
blueprint = JeevesBlueprint(blueprint_id="cli-jeeves")
|
604
|
+
messages = []
|
605
|
+
if args.prompt:
|
606
|
+
messages.append({"role": "user", "content": args.prompt})
|
607
|
+
else:
|
608
|
+
print("Type your prompt (or 'exit' to quit):\n")
|
609
|
+
while True:
|
610
|
+
try:
|
611
|
+
user_input = input("You: ").strip()
|
612
|
+
except (EOFError, KeyboardInterrupt):
|
613
|
+
print("\nExiting Jeeves CLI.")
|
614
|
+
break
|
615
|
+
if user_input.lower() in {"exit", "quit", "q"}:
|
616
|
+
print("Goodbye!")
|
617
|
+
break
|
618
|
+
messages.append({"role": "user", "content": user_input})
|
619
|
+
async def run_and_print():
|
620
|
+
spinner = JeevesSpinner()
|
621
|
+
spinner.start()
|
622
|
+
try:
|
623
|
+
all_results = []
|
624
|
+
async for response in blueprint.run(messages, model=args.model):
|
625
|
+
content = response["messages"][0]["content"] if (isinstance(response, dict) and "messages" in response and response["messages"]) else str(response)
|
626
|
+
all_results.append(content)
|
627
|
+
# If this is a progressive search/analysis output, show operation box
|
628
|
+
if isinstance(response, dict) and (response.get("progress") or response.get("matches")):
|
629
|
+
display_operation_box(
|
630
|
+
title="Progressive Operation",
|
631
|
+
content="\n".join(response.get("matches", [])),
|
632
|
+
style="bold cyan" if response.get("type") == "code_search" else "bold magenta",
|
633
|
+
result_count=len(response.get("matches", [])) if response.get("matches") is not None else None,
|
634
|
+
params={k: v for k, v in response.items() if k not in {'matches', 'progress', 'total', 'truncated', 'done'}},
|
635
|
+
progress_line=response.get('progress'),
|
636
|
+
total_lines=response.get('total'),
|
637
|
+
spinner_state=spinner.current_spinner_state(),
|
638
|
+
op_type=response.get("type", "search"),
|
639
|
+
emoji="🔍" if response.get("type") == "code_search" else "🧠"
|
640
|
+
)
|
641
|
+
finally:
|
642
|
+
spinner.stop()
|
643
|
+
display_operation_box(
|
644
|
+
title="Jeeves Output",
|
645
|
+
content="\n".join(all_results),
|
646
|
+
style="bold green",
|
647
|
+
result_count=len(all_results),
|
648
|
+
params={"prompt": messages[0]["content"]},
|
649
|
+
op_type="jeeves"
|
650
|
+
)
|
651
|
+
asyncio.run(run_and_print())
|
652
|
+
messages = []
|
653
|
+
return
|
654
|
+
async def run_and_print():
|
655
|
+
spinner = JeevesSpinner()
|
656
|
+
spinner.start()
|
657
|
+
try:
|
658
|
+
all_results = []
|
659
|
+
async for response in blueprint.run(messages, model=args.model):
|
660
|
+
content = response["messages"][0]["content"] if (isinstance(response, dict) and "messages" in response and response["messages"]) else str(response)
|
661
|
+
all_results.append(content)
|
662
|
+
# If this is a progressive search/analysis output, show operation box
|
663
|
+
if isinstance(response, dict) and (response.get("progress") or response.get("matches")):
|
664
|
+
display_operation_box(
|
665
|
+
title="Progressive Operation",
|
666
|
+
content="\n".join(response.get("matches", [])),
|
667
|
+
style="bold cyan" if response.get("type") == "code_search" else "bold magenta",
|
668
|
+
result_count=len(response.get("matches", [])) if response.get("matches") is not None else None,
|
669
|
+
params={k: v for k, v in response.items() if k not in {'matches', 'progress', 'total', 'truncated', 'done'}},
|
670
|
+
progress_line=response.get('progress'),
|
671
|
+
total_lines=response.get('total'),
|
672
|
+
spinner_state=spinner.current_spinner_state(),
|
673
|
+
op_type=response.get("type", "search"),
|
674
|
+
emoji="🔍" if response.get("type") == "code_search" else "🧠"
|
675
|
+
)
|
676
|
+
finally:
|
677
|
+
spinner.stop()
|
678
|
+
display_operation_box(
|
679
|
+
title="Jeeves Output",
|
680
|
+
content="\n".join(all_results),
|
681
|
+
style="bold green",
|
682
|
+
result_count=len(all_results),
|
683
|
+
params={"prompt": messages[0]["content"]},
|
684
|
+
op_type="jeeves"
|
685
|
+
)
|
686
|
+
asyncio.run(run_and_print())
|
687
|
+
|
688
|
+
if __name__ == "__main__":
|
689
|
+
main()
|
690
|
+
|
691
|
+
class OperationBox:
|
692
|
+
def print_box(self, title, content, style="blue", *, result_count: int = None, params: dict = None, op_type: str = None, progress_line: int = None, total_lines: int = None, spinner_state: str = None, emoji: str = None):
|
693
|
+
# Use Jeeves-specific emoji and panel style
|
694
|
+
if emoji is None:
|
695
|
+
emoji = "🤵"
|
696
|
+
if op_type == "search":
|
697
|
+
emoji = "🔎"
|
698
|
+
elif op_type == "analysis":
|
699
|
+
emoji = "🧹"
|
700
|
+
elif op_type == "error":
|
701
|
+
emoji = "❌"
|
702
|
+
style = "bold magenta" if op_type == "search" else style
|
703
|
+
box_content = f"{emoji} {content}"
|
704
|
+
self.console.print(Panel(box_content, title=f"{emoji} {title}", style=style, box=rich_box.ROUNDED))
|
705
|
+
|
706
|
+
from rich.console import Console
|
707
|
+
from rich.panel import Panel
|
708
|
+
from rich import box as rich_box
|
709
|
+
from rich.text import Text
|
710
|
+
from rich.style import Style
|
711
|
+
|
712
|
+
console = Console()
|