open-swarm 0.1.1744936173__py3-none-any.whl → 0.1.1744936297__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/METADATA +1 -1
- {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/RECORD +27 -27
- {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/entry_points.txt +1 -0
- swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py +28 -0
- swarm/blueprints/divine_code/blueprint_divine_code.py +26 -0
- swarm/blueprints/django_chat/blueprint_django_chat.py +15 -4
- swarm/blueprints/echocraft/blueprint_echocraft.py +9 -2
- swarm/blueprints/family_ties/blueprint_family_ties.py +28 -0
- swarm/blueprints/gaggle/blueprint_gaggle.py +117 -15
- swarm/blueprints/monkai_magic/blueprint_monkai_magic.py +10 -0
- swarm/blueprints/nebula_shellz/blueprint_nebula_shellz.py +47 -29
- swarm/blueprints/omniplex/blueprint_omniplex.py +21 -0
- swarm/blueprints/rue_code/blueprint_rue_code.py +24 -25
- swarm/blueprints/suggestion/blueprint_suggestion.py +35 -12
- swarm/consumers.py +19 -0
- swarm/extensions/blueprint/agent_utils.py +1 -1
- swarm/extensions/blueprint/blueprint_base.py +265 -43
- swarm/extensions/blueprint/blueprint_discovery.py +13 -11
- swarm/extensions/blueprint/cli_handler.py +33 -55
- swarm/extensions/blueprint/output_utils.py +78 -0
- swarm/extensions/blueprint/spinner.py +30 -21
- swarm/extensions/cli/cli_args.py +6 -0
- swarm/extensions/config/config_loader.py +4 -1
- swarm/llm/chat_completion.py +31 -1
- swarm/settings.py +6 -7
- {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/WHEEL +0 -0
- {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,16 @@
|
|
1
|
+
# --- REMOVE noisy debug/framework prints unless SWARM_DEBUG=1 ---
|
2
|
+
import os
|
3
|
+
|
4
|
+
def _should_debug():
|
5
|
+
return os.environ.get("SWARM_DEBUG") == "1"
|
6
|
+
|
7
|
+
def _debug_print(*args, **kwargs):
|
8
|
+
if _should_debug():
|
9
|
+
print(*args, **kwargs)
|
10
|
+
|
11
|
+
def _framework_print(*args, **kwargs):
|
12
|
+
if _should_debug():
|
13
|
+
print(*args, **kwargs)
|
1
14
|
|
2
15
|
# --- Content for src/swarm/extensions/blueprint/blueprint_base.py ---
|
3
16
|
import logging
|
@@ -8,9 +21,98 @@ from pathlib import Path
|
|
8
21
|
from django.apps import apps # Import Django apps registry
|
9
22
|
|
10
23
|
# Keep the function import
|
11
|
-
from swarm.extensions.config.config_loader import get_profile_from_config
|
24
|
+
from swarm.extensions.config.config_loader import get_profile_from_config, _substitute_env_vars
|
25
|
+
|
26
|
+
from openai import AsyncOpenAI
|
27
|
+
from agents import set_default_openai_client
|
28
|
+
from .slash_commands import slash_registry, SlashCommandRegistry
|
29
|
+
from blueprint_agents import * # Import all from blueprint_agents
|
12
30
|
|
13
31
|
logger = logging.getLogger(__name__)
|
32
|
+
from rich.console import Console
|
33
|
+
import traceback
|
34
|
+
|
35
|
+
# --- PATCH: Suppress OpenAI tracing/telemetry errors if using LiteLLM/custom endpoint ---
|
36
|
+
import logging
|
37
|
+
import os
|
38
|
+
if os.environ.get("LITELLM_BASE_URL") or os.environ.get("OPENAI_BASE_URL"):
|
39
|
+
# Silence openai.agents tracing/telemetry errors
|
40
|
+
logging.getLogger("openai.agents").setLevel(logging.CRITICAL)
|
41
|
+
try:
|
42
|
+
import openai.agents.tracing
|
43
|
+
openai.agents.tracing.TracingClient = lambda *a, **kw: None
|
44
|
+
except Exception:
|
45
|
+
pass
|
46
|
+
|
47
|
+
# --- Spinner/Status Message Enhancements ---
|
48
|
+
# To be used by all blueprints for consistent UX
|
49
|
+
import itertools
|
50
|
+
import sys
|
51
|
+
import threading
|
52
|
+
import time
|
53
|
+
|
54
|
+
class Spinner:
|
55
|
+
def __init__(self, message_sequence=None, interval=0.3, slow_threshold=10):
|
56
|
+
self.message_sequence = message_sequence or ['Generating.', 'Generating..', 'Generating...', 'Running...']
|
57
|
+
self.interval = interval
|
58
|
+
self.slow_threshold = slow_threshold # seconds before 'Taking longer than expected'
|
59
|
+
self._stop_event = threading.Event()
|
60
|
+
self._thread = None
|
61
|
+
self._start_time = None
|
62
|
+
|
63
|
+
def start(self):
|
64
|
+
self._stop_event.clear()
|
65
|
+
self._start_time = time.time()
|
66
|
+
self._thread = threading.Thread(target=self._spin)
|
67
|
+
self._thread.start()
|
68
|
+
|
69
|
+
def _spin(self):
|
70
|
+
for msg in itertools.cycle(self.message_sequence):
|
71
|
+
if self._stop_event.is_set():
|
72
|
+
break
|
73
|
+
elapsed = time.time() - self._start_time
|
74
|
+
if elapsed > self.slow_threshold:
|
75
|
+
sys.stdout.write('\rGenerating... Taking longer than expected ')
|
76
|
+
else:
|
77
|
+
sys.stdout.write(f'\r{msg} ')
|
78
|
+
sys.stdout.flush()
|
79
|
+
time.sleep(self.interval)
|
80
|
+
sys.stdout.write('\r')
|
81
|
+
sys.stdout.flush()
|
82
|
+
|
83
|
+
def stop(self, final_message=''):
|
84
|
+
self._stop_event.set()
|
85
|
+
if self._thread:
|
86
|
+
self._thread.join()
|
87
|
+
if final_message:
|
88
|
+
sys.stdout.write(f'\r{final_message}\n')
|
89
|
+
sys.stdout.flush()
|
90
|
+
|
91
|
+
# Usage Example (to be called in blueprints):
|
92
|
+
# spinner = Spinner()
|
93
|
+
# spinner.start()
|
94
|
+
# ... do work ...
|
95
|
+
# spinner.stop('Done!')
|
96
|
+
|
97
|
+
def configure_openai_client_from_env():
|
98
|
+
"""
|
99
|
+
Framework-level function: Always instantiate and set the default OpenAI client.
|
100
|
+
Prints out the config being used for debug.
|
101
|
+
"""
|
102
|
+
import os
|
103
|
+
from agents import set_default_openai_client
|
104
|
+
from openai import AsyncOpenAI
|
105
|
+
base_url = os.environ.get("LITELLM_BASE_URL") or os.environ.get("OPENAI_BASE_URL")
|
106
|
+
api_key = os.environ.get("LITELLM_API_KEY") or os.environ.get("OPENAI_API_KEY")
|
107
|
+
_debug_print(f"[DEBUG] Using OpenAI client config: base_url={base_url}, api_key={'set' if api_key else 'NOT SET'}")
|
108
|
+
if base_url and api_key:
|
109
|
+
client = AsyncOpenAI(base_url=base_url, api_key=api_key)
|
110
|
+
set_default_openai_client(client)
|
111
|
+
_framework_print(f"[FRAMEWORK] Set default OpenAI client: base_url={base_url}, api_key={'set' if api_key else 'NOT SET'}")
|
112
|
+
else:
|
113
|
+
_framework_print("[FRAMEWORK] WARNING: base_url or api_key missing, OpenAI client not set!")
|
114
|
+
|
115
|
+
configure_openai_client_from_env()
|
14
116
|
|
15
117
|
class BlueprintBase(ABC):
|
16
118
|
"""
|
@@ -18,53 +120,157 @@ class BlueprintBase(ABC):
|
|
18
120
|
|
19
121
|
Defines the core interface for blueprint initialization and execution.
|
20
122
|
"""
|
21
|
-
|
22
|
-
"""
|
23
|
-
Initializes the blueprint.
|
123
|
+
enable_terminal_commands: bool = False # By default, terminal command execution is disabled
|
24
124
|
|
25
|
-
|
26
|
-
|
27
|
-
config_path: Optional path to a specific swarm_config.json file.
|
28
|
-
If None, the standard search logic will be used.
|
125
|
+
@classmethod
|
126
|
+
def main(cls):
|
29
127
|
"""
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
128
|
+
Standard CLI entry point for all blueprints.
|
129
|
+
Subclasses can override metadata/config_path if needed.
|
130
|
+
"""
|
131
|
+
from swarm.extensions.blueprint.cli_handler import run_blueprint_cli
|
132
|
+
from pathlib import Path
|
133
|
+
swarm_version = getattr(cls, "SWARM_VERSION", "1.0.0")
|
134
|
+
config_path = getattr(cls, "DEFAULT_CONFIG_PATH", Path(__file__).parent / "swarm_config.json")
|
135
|
+
run_blueprint_cli(cls, swarm_version=swarm_version, default_config_path=config_path)
|
136
|
+
|
137
|
+
def display_splash_screen(self, animated: bool = False):
|
138
|
+
"""Default splash screen. Subclasses can override for custom CLI/API branding."""
|
139
|
+
console = Console()
|
140
|
+
console.print(f"[bold cyan]Welcome to {self.__class__.__name__}![/]", style="bold")
|
141
|
+
|
142
|
+
def __init__(self, blueprint_id: str, config_path: Optional[Path] = None, enable_terminal_commands: Optional[bool] = None):
|
143
|
+
try:
|
144
|
+
if not blueprint_id:
|
145
|
+
raise ValueError("blueprint_id cannot be empty or None")
|
146
|
+
self.blueprint_id = blueprint_id
|
147
|
+
self.config_path = config_path # Note: config_path is currently unused if we rely on AppConfig
|
148
|
+
self._config: Optional[Dict[str, Any]] = None
|
149
|
+
self._llm_profile_name: Optional[str] = None
|
150
|
+
self._llm_profile_data: Optional[Dict[str, Any]] = None
|
151
|
+
self._markdown_output: bool = True # Default
|
152
|
+
# Allow per-instance override
|
153
|
+
if enable_terminal_commands is not None:
|
154
|
+
self.enable_terminal_commands = enable_terminal_commands
|
155
|
+
# Else: use class attribute (default False or set by subclass)
|
156
|
+
|
157
|
+
logger.info(f"Initializing blueprint '{self.blueprint_id}' (Type: {self.__class__.__name__})")
|
158
|
+
|
159
|
+
# --- Ensure custom OpenAI client for custom LLM providers ---
|
160
|
+
import os
|
161
|
+
|
162
|
+
# Remove monkey patching and envvar hacks. Always pass config values directly.
|
163
|
+
# (Retain only explicit AsyncOpenAI client instantiation in blueprints)
|
164
|
+
# (No changes needed here for direct client pattern)
|
165
|
+
|
166
|
+
self._load_and_process_config()
|
167
|
+
except AttributeError as e:
|
168
|
+
logger.debug(f"[BlueprintBase.__init__] AttributeError: {e}")
|
169
|
+
traceback.print_exc()
|
170
|
+
raise
|
41
171
|
|
42
172
|
def _load_and_process_config(self):
|
43
|
-
"""Loads the main Swarm config and extracts relevant settings."""
|
173
|
+
"""Loads the main Swarm config and extracts relevant settings. Falls back to empty config if Django unavailable or not found."""
|
174
|
+
import os
|
175
|
+
import json
|
176
|
+
from pathlib import Path
|
177
|
+
def redact(val):
|
178
|
+
if not isinstance(val, str) or len(val) <= 4:
|
179
|
+
return "****"
|
180
|
+
return val[:2] + "*" * (len(val)-4) + val[-2:]
|
181
|
+
def redact_dict(d):
|
182
|
+
if isinstance(d, dict):
|
183
|
+
return {k: (redact_dict(v) if not (isinstance(v, str) and ("key" in k.lower() or "token" in k.lower() or "secret" in k.lower())) else redact(v)) for k, v in d.items()}
|
184
|
+
elif isinstance(d, list):
|
185
|
+
return [redact_dict(item) for item in d]
|
186
|
+
return d
|
44
187
|
try:
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
188
|
+
try:
|
189
|
+
# --- Get config from the AppConfig instance (Django) ---
|
190
|
+
app_config_instance = apps.get_app_config('swarm')
|
191
|
+
if not hasattr(app_config_instance, 'config') or not app_config_instance.config:
|
192
|
+
raise ValueError("AppConfig for 'swarm' does not have a valid 'config' attribute.")
|
193
|
+
config = app_config_instance.config
|
194
|
+
logger.debug("Loaded config from Django AppConfig.")
|
195
|
+
except Exception as e:
|
196
|
+
if _should_debug():
|
197
|
+
logger.warning(f"Falling back to CLI/home config due to error: {e}")
|
198
|
+
config = None
|
199
|
+
# 1. CLI argument (not handled here, handled in cli_handler)
|
200
|
+
# 2. Current working directory
|
201
|
+
cwd_config = Path.cwd() / "swarm_config.json"
|
202
|
+
if cwd_config.exists():
|
203
|
+
with open(cwd_config, 'r') as f:
|
204
|
+
config = json.load(f)
|
205
|
+
# 3. XDG_CONFIG_HOME or ~/.config/swarm/swarm_config.json
|
206
|
+
elif os.environ.get("XDG_CONFIG_HOME"):
|
207
|
+
xdg_config = Path(os.environ["XDG_CONFIG_HOME"]) / "swarm" / "swarm_config.json"
|
208
|
+
if xdg_config.exists():
|
209
|
+
with open(xdg_config, 'r') as f:
|
210
|
+
config = json.load(f)
|
211
|
+
elif (Path.home() / ".config/swarm/swarm_config.json").exists():
|
212
|
+
with open(Path.home() / ".config/swarm/swarm_config.json", 'r') as f:
|
213
|
+
config = json.load(f)
|
214
|
+
# 4. Legacy fallback: ~/.swarm/swarm_config.json
|
215
|
+
elif (Path.home() / ".swarm/swarm_config.json").exists():
|
216
|
+
with open(Path.home() / ".swarm/swarm_config.json", 'r') as f:
|
217
|
+
config = json.load(f)
|
218
|
+
# 5. Fallback: OPENAI_API_KEY envvar
|
219
|
+
elif os.environ.get("OPENAI_API_KEY"):
|
220
|
+
config = {
|
221
|
+
"llm": {"default": {"provider": "openai", "model": "gpt-3.5-turbo", "api_key": os.environ["OPENAI_API_KEY"]}},
|
222
|
+
"settings": {"default_llm_profile": "default", "default_markdown_output": True},
|
223
|
+
"blueprints": {},
|
224
|
+
"llm_profile": "default",
|
225
|
+
"mcpServers": {}
|
226
|
+
}
|
227
|
+
logger.info("No config file found, using default config with OPENAI_API_KEY for CLI mode.")
|
228
|
+
else:
|
229
|
+
config = {}
|
230
|
+
logger.warning("No config file found and OPENAI_API_KEY is not set. Using empty config. CLI blueprints may fail if LLM config is required.")
|
231
|
+
if config is not None:
|
232
|
+
config = _substitute_env_vars(config)
|
233
|
+
self._config = config or {}
|
234
|
+
|
235
|
+
# --- After config is loaded, set OpenAI client from config if possible ---
|
236
|
+
try:
|
237
|
+
llm_profiles = self._config.get("llm", {})
|
238
|
+
default_profile = llm_profiles.get("default", {})
|
239
|
+
base_url = default_profile.get("base_url")
|
240
|
+
api_key = default_profile.get("api_key")
|
241
|
+
# Expand env vars if present
|
242
|
+
import os
|
243
|
+
if base_url and base_url.startswith("${"):
|
244
|
+
var = base_url[2:-1]
|
245
|
+
base_url = os.environ.get(var, base_url)
|
246
|
+
if api_key and api_key.startswith("${"):
|
247
|
+
var = api_key[2:-1]
|
248
|
+
api_key = os.environ.get(var, api_key)
|
249
|
+
if base_url and api_key:
|
250
|
+
from openai import AsyncOpenAI
|
251
|
+
from agents import set_default_openai_client
|
252
|
+
_debug_print(f"[DEBUG] (config) Setting OpenAI client: base_url={base_url}, api_key={'set' if api_key else 'NOT SET'}")
|
253
|
+
client = AsyncOpenAI(base_url=base_url, api_key=api_key)
|
254
|
+
set_default_openai_client(client)
|
255
|
+
except Exception as e:
|
256
|
+
_debug_print(f"[DEBUG] Failed to set OpenAI client from config: {e}")
|
257
|
+
|
258
|
+
# --- Debug: Print and log redacted config ---
|
259
|
+
redacted_config = redact_dict(self._config)
|
260
|
+
logger.debug(f"Loaded config (redacted): {json.dumps(redacted_config, indent=2)}")
|
261
|
+
|
262
|
+
# --- Process LLM profile name and data ---
|
263
|
+
settings_section = self._config.get("settings", {})
|
264
|
+
llm_section = self._config.get("llm", {})
|
265
|
+
default_profile = settings_section.get("default_llm_profile") or "default"
|
266
|
+
self._llm_profile_name = self._config.get("llm_profile") or default_profile
|
267
|
+
if "profiles" in llm_section:
|
268
|
+
self._llm_profile_data = llm_section["profiles"].get(self._llm_profile_name, {})
|
269
|
+
else:
|
270
|
+
self._llm_profile_data = llm_section.get(self._llm_profile_name, {})
|
271
|
+
|
66
272
|
blueprint_specific_settings = self._config.get("blueprints", {}).get(self.blueprint_id, {})
|
67
|
-
global_markdown_setting =
|
273
|
+
global_markdown_setting = settings_section.get("default_markdown_output", True)
|
68
274
|
self._markdown_output = blueprint_specific_settings.get("markdown_output", global_markdown_setting)
|
69
275
|
logger.debug(f"Markdown output for '{self.blueprint_id}': {self._markdown_output}")
|
70
276
|
|
@@ -96,6 +302,19 @@ class BlueprintBase(ABC):
|
|
96
302
|
raise RuntimeError("LLM profile name accessed before initialization or after failure.")
|
97
303
|
return self._llm_profile_name
|
98
304
|
|
305
|
+
@property
|
306
|
+
def slash_commands(self) -> SlashCommandRegistry:
|
307
|
+
"""Access the global slash command registry. Blueprints can register new commands here."""
|
308
|
+
return slash_registry
|
309
|
+
|
310
|
+
def get_llm_profile(self, profile_name: str) -> dict:
|
311
|
+
"""Returns the LLM profile dict for the given profile name from config, or empty dict if not found.
|
312
|
+
Supports both llm.profiles and direct llm keys for backward compatibility."""
|
313
|
+
llm_section = self.config.get("llm", {})
|
314
|
+
if "profiles" in llm_section:
|
315
|
+
return llm_section["profiles"].get(profile_name, {})
|
316
|
+
return llm_section.get(profile_name, {})
|
317
|
+
|
99
318
|
@property
|
100
319
|
def should_output_markdown(self) -> bool:
|
101
320
|
"""Returns whether the blueprint should format output as Markdown."""
|
@@ -106,6 +325,9 @@ class BlueprintBase(ABC):
|
|
106
325
|
"""
|
107
326
|
The main execution method for the blueprint.
|
108
327
|
"""
|
328
|
+
import os
|
329
|
+
import pprint
|
330
|
+
logger.debug("ENVIRONMENT DUMP BEFORE MODEL CALL:")
|
331
|
+
pprint.pprint(dict(os.environ))
|
109
332
|
raise NotImplementedError("Subclasses must implement the 'run' method.")
|
110
333
|
yield {}
|
111
|
-
|
@@ -54,35 +54,38 @@ def discover_blueprints(blueprint_dir: str) -> Dict[str, Type[BlueprintBase]]:
|
|
54
54
|
return blueprints
|
55
55
|
|
56
56
|
# Iterate over items inside the base blueprint directory
|
57
|
-
for
|
58
|
-
|
59
|
-
|
60
|
-
if not item_path.is_dir():
|
57
|
+
for subdir in base_dir.iterdir():
|
58
|
+
if not subdir.is_dir():
|
61
59
|
continue # Skip files directly under blueprints/
|
62
60
|
|
63
61
|
# Use directory name as blueprint name (e.g., 'echocraft')
|
64
|
-
blueprint_name =
|
65
|
-
logger.debug(f"Processing potential blueprint '{blueprint_name}' in directory: {
|
62
|
+
blueprint_name = subdir.name
|
63
|
+
logger.debug(f"Processing potential blueprint '{blueprint_name}' in directory: {subdir.name}")
|
66
64
|
|
67
65
|
# Look for the specific .py file, e.g., blueprint_echocraft.py
|
68
66
|
py_file_name = f"blueprint_{blueprint_name}.py"
|
69
|
-
py_file_path =
|
67
|
+
py_file_path = subdir / py_file_name
|
70
68
|
|
71
69
|
if not py_file_path.is_file():
|
72
70
|
# Also check for just {blueprint_name}.py if that's a convention
|
73
71
|
alt_py_file_name = f"{blueprint_name}.py"
|
74
|
-
alt_py_file_path =
|
72
|
+
alt_py_file_path = subdir / alt_py_file_name
|
75
73
|
if alt_py_file_path.is_file():
|
76
74
|
py_file_path = alt_py_file_path # Use the alternative path
|
77
75
|
py_file_name = alt_py_file_name
|
78
76
|
logger.debug(f"Found alternative blueprint file: {py_file_name}")
|
79
77
|
else:
|
80
|
-
logger.warning(f"Skipping directory '{
|
78
|
+
logger.warning(f"Skipping directory '{subdir.name}': Neither '{py_file_name}' nor '{alt_py_file_name}' found.")
|
81
79
|
continue
|
82
80
|
|
83
81
|
|
84
82
|
# Construct module import path, e.g., blueprints.echocraft.blueprint_echocraft
|
85
|
-
|
83
|
+
if py_file_path.name.startswith('blueprint_gatcha'):
|
84
|
+
module_import_path = f"swarm.blueprints.gatcha.{py_file_path.stem}"
|
85
|
+
elif py_file_path.name.startswith('blueprint_'):
|
86
|
+
module_import_path = f"swarm.blueprints.{subdir.name}.{py_file_path.stem}"
|
87
|
+
else:
|
88
|
+
continue
|
86
89
|
|
87
90
|
try:
|
88
91
|
# Ensure parent directory is in path
|
@@ -123,4 +126,3 @@ def discover_blueprints(blueprint_dir: str) -> Dict[str, Type[BlueprintBase]]:
|
|
123
126
|
|
124
127
|
logger.info(f"Blueprint discovery complete. Found: {list(blueprints.keys())}")
|
125
128
|
return blueprints
|
126
|
-
|
@@ -2,8 +2,10 @@ import argparse
|
|
2
2
|
import asyncio
|
3
3
|
import json
|
4
4
|
import logging
|
5
|
+
import os
|
5
6
|
import signal
|
6
7
|
import sys
|
8
|
+
from dotenv import load_dotenv
|
7
9
|
from pathlib import Path
|
8
10
|
from typing import Any, Dict, Optional, Type
|
9
11
|
|
@@ -14,6 +16,19 @@ if TYPE_CHECKING:
|
|
14
16
|
|
15
17
|
logger = logging.getLogger("swarm.cli")
|
16
18
|
|
19
|
+
# --- DEBUG PRINTS REMOVED BY CASCADE ---
|
20
|
+
# print(f"[DEBUG] CLI handler startup: sys.argv={sys.argv}")
|
21
|
+
# print(f"[DEBUG] CLI handler startup: LITELLM_MODEL={os.environ.get('LITELLM_MODEL')}, DEFAULT_LLM={os.environ.get('DEFAULT_LLM')}")
|
22
|
+
|
23
|
+
# --- FORCE LOAD .env EARLY for CLI/LLM ---
|
24
|
+
project_root = Path(__file__).parent.parent.parent.parent # /home/chatgpt/open-swarm
|
25
|
+
dotenv_path = project_root / ".env"
|
26
|
+
load_dotenv(dotenv_path=dotenv_path, override=True)
|
27
|
+
# print(f"[DEBUG] Loaded .env from: {dotenv_path}")
|
28
|
+
# print(f"[DEBUG] LITELLM_MODEL={os.environ.get('LITELLM_MODEL')}")
|
29
|
+
# print(f"[DEBUG] LITELLM_BASE_URL={os.environ.get('LITELLM_BASE_URL')}")
|
30
|
+
# print(f"[DEBUG] LITELLM_API_KEY={'set' if os.environ.get('LITELLM_API_KEY') else 'NOT SET'}")
|
31
|
+
|
17
32
|
async def _run_blueprint_async_with_shutdown(blueprint: 'BlueprintBase', instruction: str):
|
18
33
|
"""Runs the blueprint's async method and handles graceful shutdown."""
|
19
34
|
loop = asyncio.get_running_loop()
|
@@ -40,55 +55,20 @@ async def _run_blueprint_async_with_shutdown(blueprint: 'BlueprintBase', instruc
|
|
40
55
|
logger.error(f"Unexpected error setting fallback signal handler for {sig.name}: {e}", exc_info=True)
|
41
56
|
|
42
57
|
|
43
|
-
#
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
58
|
+
# Instead of wrapping in a task and awaiting, use async for to support async generators
|
59
|
+
try:
|
60
|
+
async for chunk in blueprint._run_non_interactive(instruction):
|
61
|
+
# Print the full JSON chunk
|
62
|
+
print(json.dumps(chunk, ensure_ascii=False))
|
63
|
+
# If chunk contains 'messages', print each assistant message's content for CLI/test UX
|
64
|
+
if isinstance(chunk, dict) and 'messages' in chunk:
|
65
|
+
for msg in chunk['messages']:
|
66
|
+
if msg.get('role') == 'assistant' and 'content' in msg:
|
67
|
+
print(msg['content'])
|
68
|
+
except Exception as e:
|
69
|
+
logger.critical(f"Blueprint execution failed with unhandled exception: {e}", exc_info=True)
|
70
|
+
sys.exit(1)
|
51
71
|
|
52
|
-
# Cleanup signal handlers after wait returns
|
53
|
-
for sig in (signal.SIGINT, signal.SIGTERM):
|
54
|
-
try:
|
55
|
-
loop.remove_signal_handler(sig)
|
56
|
-
except NotImplementedError:
|
57
|
-
try:
|
58
|
-
signal.signal(sig, signal.SIG_DFL) # Restore default handler
|
59
|
-
except Exception:
|
60
|
-
pass # Ignore errors during cleanup
|
61
|
-
|
62
|
-
# Check if the stop event was triggered
|
63
|
-
if stop_event.is_set():
|
64
|
-
logger.warning("Graceful shutdown initiated. Cancelling main task...")
|
65
|
-
if not main_task.done():
|
66
|
-
main_task.cancel()
|
67
|
-
try:
|
68
|
-
# Wait briefly for cancellation to propagate and cleanup within the task
|
69
|
-
await asyncio.wait_for(main_task, timeout=10.0) # Increased timeout slightly
|
70
|
-
except asyncio.CancelledError:
|
71
|
-
logger.info("Main task successfully cancelled.")
|
72
|
-
except asyncio.TimeoutError:
|
73
|
-
logger.error("Main task did not cancel within timeout. Potential resource leak.")
|
74
|
-
except Exception as e:
|
75
|
-
logger.error(f"Error during task cancellation waiting: {e}", exc_info=True)
|
76
|
-
else:
|
77
|
-
logger.info("Main task already completed before cancellation request.")
|
78
|
-
# The _run_non_interactive's AsyncExitStack should handle MCP cleanup
|
79
|
-
else:
|
80
|
-
# If the main task finished first, check for exceptions
|
81
|
-
if main_task in done:
|
82
|
-
try:
|
83
|
-
main_task.result() # Raise exception if one occurred in the task
|
84
|
-
logger.debug("Main task completed successfully.")
|
85
|
-
except asyncio.CancelledError:
|
86
|
-
logger.info("Main task was cancelled externally (unexpected).")
|
87
|
-
except Exception as e:
|
88
|
-
# Error should have been logged within _run_non_interactive
|
89
|
-
# We exit here because the main operation failed
|
90
|
-
logger.critical(f"Blueprint execution failed with unhandled exception: {e}", exc_info=True)
|
91
|
-
sys.exit(1) # Exit with error status if task failed
|
92
72
|
|
93
73
|
|
94
74
|
def run_blueprint_cli(
|
@@ -149,14 +129,13 @@ def run_blueprint_cli(
|
|
149
129
|
# --- Instantiate and Run Blueprint ---
|
150
130
|
blueprint_instance: Optional['BlueprintBase'] = None
|
151
131
|
try:
|
132
|
+
# Always provide a blueprint_id (use class name if not supplied by CLI args)
|
133
|
+
blueprint_id = getattr(args, 'blueprint_id', None) or getattr(blueprint_cls, 'DEFAULT_BLUEPRINT_ID', None) or blueprint_cls.__name__
|
152
134
|
# Instantiate the blueprint, passing necessary config/flags
|
153
135
|
blueprint_instance = blueprint_cls(
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
debug=args.debug,
|
158
|
-
quiet=args.quiet,
|
159
|
-
force_markdown=args.markdown,
|
136
|
+
blueprint_id,
|
137
|
+
config_path=args.config_path,
|
138
|
+
|
160
139
|
# Pass necessary context if needed by __init__
|
161
140
|
# default_config_path=default_config_path,
|
162
141
|
# swarm_version=swarm_version
|
@@ -182,4 +161,3 @@ def run_blueprint_cli(
|
|
182
161
|
finally:
|
183
162
|
logger.debug("Blueprint CLI execution finished.")
|
184
163
|
# Any final cleanup outside the async loop (rarely needed here)
|
185
|
-
|
@@ -4,6 +4,7 @@ Output utilities for Swarm blueprints.
|
|
4
4
|
|
5
5
|
import json
|
6
6
|
import logging
|
7
|
+
import os
|
7
8
|
import sys
|
8
9
|
from typing import List, Dict, Any
|
9
10
|
|
@@ -11,6 +12,9 @@ from typing import List, Dict, Any
|
|
11
12
|
try:
|
12
13
|
from rich.markdown import Markdown
|
13
14
|
from rich.console import Console
|
15
|
+
from rich.panel import Panel
|
16
|
+
from rich.text import Text
|
17
|
+
from rich.rule import Rule
|
14
18
|
RICH_AVAILABLE = True
|
15
19
|
except ImportError:
|
16
20
|
RICH_AVAILABLE = False
|
@@ -28,6 +32,44 @@ def render_markdown(content: str) -> None:
|
|
28
32
|
md = Markdown(content)
|
29
33
|
console.print(md) # Rich handles flushing
|
30
34
|
|
35
|
+
def ansi_box(title: str, content: str, color: str = "94", emoji: str = "🔎", border: str = "─", width: int = 70) -> str:
|
36
|
+
"""Return a string or Panel with ANSI box formatting for search/analysis results using Rich if available."""
|
37
|
+
if RICH_AVAILABLE:
|
38
|
+
console = Console()
|
39
|
+
# Rich supports color names or hex, map color code to name
|
40
|
+
color_map = {
|
41
|
+
"94": "bright_blue",
|
42
|
+
"96": "bright_cyan",
|
43
|
+
"92": "bright_green",
|
44
|
+
"93": "bright_yellow",
|
45
|
+
"91": "bright_red",
|
46
|
+
"95": "bright_magenta",
|
47
|
+
"90": "grey82",
|
48
|
+
}
|
49
|
+
style = color_map.get(color, "bright_blue")
|
50
|
+
panel = Panel(
|
51
|
+
content,
|
52
|
+
title=f"{emoji} {title} {emoji}",
|
53
|
+
border_style=style,
|
54
|
+
width=width
|
55
|
+
)
|
56
|
+
# Return the rendered panel as a string for testability
|
57
|
+
with console.capture() as capture:
|
58
|
+
console.print(panel)
|
59
|
+
return capture.get()
|
60
|
+
# Fallback: legacy manual ANSI box
|
61
|
+
top = f"\033[{color}m{emoji} {border * (width - 4)} {emoji}\033[0m"
|
62
|
+
mid_title = f"\033[{color}m│ {title.center(width - 6)} │\033[0m"
|
63
|
+
lines = content.splitlines()
|
64
|
+
boxed = [top, mid_title, top]
|
65
|
+
for line in lines:
|
66
|
+
boxed.append(f"\033[{color}m│\033[0m {line.ljust(width - 6)} \033[{color}m│\033[0m")
|
67
|
+
boxed.append(top)
|
68
|
+
return "\n".join(boxed)
|
69
|
+
|
70
|
+
def print_search_box(title: str, content: str, color: str = "94", emoji: str = "🔎"):
|
71
|
+
print(ansi_box(title, content, color=color, emoji=emoji))
|
72
|
+
|
31
73
|
def pretty_print_response(messages: List[Dict[str, Any]], use_markdown: bool = False, spinner=None) -> None:
|
32
74
|
"""Format and print messages, optionally rendering assistant content as markdown."""
|
33
75
|
# --- DEBUG PRINT ---
|
@@ -92,4 +134,40 @@ def pretty_print_response(messages: List[Dict[str, Any]], use_markdown: bool = F
|
|
92
134
|
# --- DEBUG PRINT ---
|
93
135
|
print(f"[DEBUG Skipping message {i} with role '{role}']", flush=True)
|
94
136
|
|
137
|
+
def print_terminal_command_result(cmd: str, result: dict, max_lines: int = 10):
|
138
|
+
"""
|
139
|
+
Render a terminal command result in the CLI with a shell prompt emoji, header, and Rich box.
|
140
|
+
- Header: 🐚 Ran terminal command
|
141
|
+
- Top line: colored, [basename(pwd)] > [cmd]
|
142
|
+
- Output: Rich Panel, max 10 lines, tailing if longer, show hint for toggle
|
143
|
+
"""
|
144
|
+
if not RICH_AVAILABLE:
|
145
|
+
# Fallback to simple print
|
146
|
+
print(f"🐚 Ran terminal command\n[{os.path.basename(result['cwd'])}] > {cmd}")
|
147
|
+
lines = result['output'].splitlines()
|
148
|
+
if len(lines) > max_lines:
|
149
|
+
lines = lines[-max_lines:]
|
150
|
+
print("[Output truncated. Showing last 10 lines.]")
|
151
|
+
print("\n".join(lines))
|
152
|
+
return
|
95
153
|
|
154
|
+
console = Console()
|
155
|
+
cwd_base = os.path.basename(result['cwd'])
|
156
|
+
header = Text(f"🐚 Ran terminal command", style="bold yellow")
|
157
|
+
subheader = Rule(f"[{cwd_base}] > {cmd}", style="bright_black")
|
158
|
+
lines = result['output'].splitlines()
|
159
|
+
truncated = False
|
160
|
+
if len(lines) > max_lines:
|
161
|
+
lines = lines[-max_lines:]
|
162
|
+
truncated = True
|
163
|
+
output_body = "\n".join(lines)
|
164
|
+
panel = Panel(
|
165
|
+
output_body,
|
166
|
+
title="Output",
|
167
|
+
border_style="cyan",
|
168
|
+
subtitle="[Output truncated. Showing last 10 lines. Press [t] to expand.]" if truncated else "",
|
169
|
+
width=80
|
170
|
+
)
|
171
|
+
console.print(header)
|
172
|
+
console.print(subheader)
|
173
|
+
console.print(panel)
|