open-swarm 0.1.1744936173__py3-none-any.whl → 0.1.1744936297__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/METADATA +1 -1
  2. {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/RECORD +27 -27
  3. {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/entry_points.txt +1 -0
  4. swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py +28 -0
  5. swarm/blueprints/divine_code/blueprint_divine_code.py +26 -0
  6. swarm/blueprints/django_chat/blueprint_django_chat.py +15 -4
  7. swarm/blueprints/echocraft/blueprint_echocraft.py +9 -2
  8. swarm/blueprints/family_ties/blueprint_family_ties.py +28 -0
  9. swarm/blueprints/gaggle/blueprint_gaggle.py +117 -15
  10. swarm/blueprints/monkai_magic/blueprint_monkai_magic.py +10 -0
  11. swarm/blueprints/nebula_shellz/blueprint_nebula_shellz.py +47 -29
  12. swarm/blueprints/omniplex/blueprint_omniplex.py +21 -0
  13. swarm/blueprints/rue_code/blueprint_rue_code.py +24 -25
  14. swarm/blueprints/suggestion/blueprint_suggestion.py +35 -12
  15. swarm/consumers.py +19 -0
  16. swarm/extensions/blueprint/agent_utils.py +1 -1
  17. swarm/extensions/blueprint/blueprint_base.py +265 -43
  18. swarm/extensions/blueprint/blueprint_discovery.py +13 -11
  19. swarm/extensions/blueprint/cli_handler.py +33 -55
  20. swarm/extensions/blueprint/output_utils.py +78 -0
  21. swarm/extensions/blueprint/spinner.py +30 -21
  22. swarm/extensions/cli/cli_args.py +6 -0
  23. swarm/extensions/config/config_loader.py +4 -1
  24. swarm/llm/chat_completion.py +31 -1
  25. swarm/settings.py +6 -7
  26. {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/WHEEL +0 -0
  27. {open_swarm-0.1.1744936173.dist-info → open_swarm-0.1.1744936297.dist-info}/licenses/LICENSE +0 -0
@@ -59,6 +59,13 @@ cypher_instructions = "You are Cypher..."
59
59
  tank_instructions = "You are Tank..."
60
60
 
61
61
  # --- Blueprint Definition ---
62
+ from rich.console import Console
63
+ from rich.panel import Panel
64
+ from rich.live import Live
65
+ from rich.text import Text
66
+ import random
67
+ import time
68
+
62
69
  class NebuchaShellzzarBlueprint(BlueprintBase):
63
70
  """A multi-agent blueprint inspired by The Matrix for sysadmin and coding tasks."""
64
71
  metadata: ClassVar[Dict[str, Any]] = {
@@ -68,22 +75,46 @@ class NebuchaShellzzarBlueprint(BlueprintBase):
68
75
  "tags": ["matrix", "multi-agent", "shell", "coding", "mcp"],
69
76
  "required_mcp_servers": ["memory"],
70
77
  }
71
- _openai_client_cache: Dict[str, AsyncOpenAI] = {}
72
78
  _model_instance_cache: Dict[str, Model] = {}
73
79
 
74
80
  # --- ADDED: Splash Screen ---
75
- def display_splash_screen(self):
76
- """Displays a Matrix-themed splash screen."""
77
- splash_text = """
81
+ def display_splash_screen(self, animated: bool = False):
82
+ console = Console()
83
+ if not animated:
84
+ splash_text = """
78
85
  [bold green]Wake up, Neo...[/]
79
86
  [green]The Matrix has you...[/]
80
87
  [bold green]Follow the white rabbit.[/]
81
88
 
82
89
  Initializing NebulaShellzzar Crew...
83
- """
84
- panel = Panel(splash_text.strip(), title="[bold green]NebulaShellzzar[/]", border_style="green", expand=False)
85
- self.console.print(panel)
86
- self.console.print() # Add a blank line
90
+ """
91
+ panel = Panel(splash_text.strip(), title="[bold green]NebulaShellzzar[/]", border_style="green", expand=False)
92
+ console.print(panel)
93
+ console.print() # Add a blank line
94
+ else:
95
+ # Animated Matrix rain effect
96
+ width = 60
97
+ height = 12
98
+ charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789@#$%&"
99
+ rain_cols = [0] * width
100
+ with Live(refresh_per_second=20, console=console, transient=True) as live:
101
+ for _ in range(30):
102
+ matrix = ""
103
+ for y in range(height):
104
+ line = ""
105
+ for x in range(width):
106
+ if random.random() < 0.02:
107
+ rain_cols[x] = 0
108
+ char = random.choice(charset) if rain_cols[x] < y else " "
109
+ line += f"[green]{char}[/]"
110
+ matrix += line + "\n"
111
+ panel = Panel(Text.from_markup(matrix), title="[bold green]NebulaShellzzar[/]", border_style="green", expand=False)
112
+ live.update(panel)
113
+ time.sleep(0.07)
114
+ console.print("[bold green]Wake up, Neo...[/]")
115
+ console.print("[green]The Matrix has you...[/]")
116
+ console.print("[bold green]Follow the white rabbit.[/]")
117
+ console.print("\nInitializing NebulaShellzzar Crew...\n")
87
118
 
88
119
  def _get_model_instance(self, profile_name: str) -> Model:
89
120
  """Gets or creates a Model instance for the given profile name."""
@@ -100,26 +131,14 @@ Initializing NebulaShellzzar Crew...
100
131
  if not model_name:
101
132
  logger.critical(f"LLM profile '{profile_name}' is missing the 'model' key.")
102
133
  raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
103
- client_cache_key = f"{provider}_{profile_data.get('base_url')}"
104
- if provider == "openai":
105
- if client_cache_key not in self._openai_client_cache:
106
- client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
107
- filtered_client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
108
- log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
109
- logger.debug(f"Creating new AsyncOpenAI client for profile '{profile_name}' with config: {log_client_kwargs}") # Changed to DEBUG
110
- try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_client_kwargs)
111
- except Exception as e:
112
- logger.error(f"Failed to create AsyncOpenAI client for profile '{profile_name}': {e}", exc_info=True)
113
- raise ValueError(f"Failed to initialize OpenAI client for profile '{profile_name}': {e}") from e
114
- openai_client_instance = self._openai_client_cache[client_cache_key]
115
- logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') with specific client instance.")
116
- try: model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client_instance)
117
- except Exception as e:
118
- logger.error(f"Failed to instantiate OpenAIChatCompletionsModel for profile '{profile_name}': {e}", exc_info=True)
119
- raise ValueError(f"Failed to initialize LLM provider for profile '{profile_name}': {e}") from e
120
- else:
121
- logger.error(f"Unsupported LLM provider '{provider}' in profile '{profile_name}'.")
122
- raise ValueError(f"Unsupported LLM provider: {provider}")
134
+
135
+ # Remove redundant client instantiation; rely on framework-level default client
136
+ # All blueprints now use the default client set at framework init
137
+ logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') with default client.")
138
+ try: model_instance = OpenAIChatCompletionsModel(model=model_name)
139
+ except Exception as e:
140
+ logger.error(f"Failed to instantiate OpenAIChatCompletionsModel for profile '{profile_name}': {e}", exc_info=True)
141
+ raise ValueError(f"Failed to initialize LLM provider for profile '{profile_name}': {e}") from e
123
142
  self._model_instance_cache[profile_name] = model_instance
124
143
  return model_instance
125
144
 
@@ -127,7 +146,6 @@ Initializing NebulaShellzzar Crew...
127
146
  """Creates the Matrix-themed agent team with Morpheus as the coordinator."""
128
147
  logger.debug(f"Creating NebulaShellzzar agent team with {len(mcp_servers)} MCP server(s)...") # Changed to DEBUG
129
148
  self._model_instance_cache = {}
130
- self._openai_client_cache = {}
131
149
  default_profile_name = self.config.get("llm_profile", "default")
132
150
  default_model_instance = self._get_model_instance(default_profile_name)
133
151
  logger.debug(f"Using LLM profile '{default_profile_name}' for all agents.") # Changed to DEBUG
@@ -216,6 +216,27 @@ class OmniplexBlueprint(BlueprintBase):
216
216
  logger.info(f"Omniplex Coordinator created with tools for: {[t.name for t in team_tools]}")
217
217
  return coordinator_agent
218
218
 
219
+ async def run(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
220
+ """Main execution entry point for the Omniplex blueprint."""
221
+ logger.info("OmniplexBlueprint run method called.")
222
+ instruction = messages[-1].get("content", "") if messages else ""
223
+ async for chunk in self._run_non_interactive(instruction, **kwargs):
224
+ yield chunk
225
+ logger.info("OmniplexBlueprint run method finished.")
226
+
227
+ async def _run_non_interactive(self, instruction: str, **kwargs) -> Any:
228
+ logger.info(f"Running OmniplexBlueprint non-interactively with instruction: '{instruction[:100]}...'")
229
+ mcp_servers = kwargs.get("mcp_servers", [])
230
+ agent = self.create_starting_agent(mcp_servers=mcp_servers)
231
+ from agents import Runner
232
+ model_name = os.getenv("LITELLM_MODEL") or os.getenv("DEFAULT_LLM") or "gpt-3.5-turbo"
233
+ try:
234
+ for chunk in Runner.run(agent, instruction):
235
+ yield chunk
236
+ except Exception as e:
237
+ logger.error(f"Error during non-interactive run: {e}", exc_info=True)
238
+ yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
239
+
219
240
  # Standard Python entry point
220
241
  if __name__ == "__main__":
221
242
  OmniplexBlueprint.main()
@@ -57,51 +57,41 @@ def execute_shell_command(command: str) -> str:
57
57
 
58
58
  def read_file(file_path: str) -> str:
59
59
  """Reads the content of a specified file."""
60
- logger.info(f"Reading file: {file_path}")
60
+ logger.info(f"📄 Reading file: {file_path}")
61
61
  try:
62
- # Basic path traversal check (can be enhanced)
63
62
  if ".." in file_path:
64
- logger.warning(f"Attempted path traversal detected in read_file: {file_path}")
65
- return "Error: Invalid file path (potential traversal)."
66
- # Consider restricting base path if needed
67
- # base_path = Path("/workspace").resolve()
68
- # target_path = (base_path / file_path).resolve()
69
- # if not target_path.is_relative_to(base_path):
70
- # return "Error: Access denied."
71
-
63
+ logger.warning(f"Attempted path traversal detected in read_file: {file_path}")
64
+ return "\033[91m❌ Error: Invalid file path (potential traversal).\033[0m"
72
65
  path = Path(file_path)
73
66
  if not path.is_file():
74
- return f"Error: File not found at {file_path}"
67
+ logger.warning(f"File not found: {file_path}")
68
+ return f"\033[91m❌ Error: File not found at {file_path}\033[0m"
75
69
  content = path.read_text(encoding='utf-8')
76
70
  logger.info(f"Successfully read {len(content)} characters from {file_path}")
77
- # Truncate long files?
78
71
  max_len = 10000
79
72
  if len(content) > max_len:
80
- logger.warning(f"File {file_path} truncated to {max_len} characters.")
81
- return content[:max_len] + "\n... [File Truncated]"
82
- return content
73
+ logger.warning(f"File {file_path} truncated to {max_len} characters.")
74
+ return f"\033[93m⚠️ {content[:max_len]}\n... [File Truncated]\033[0m"
75
+ return f"\033[92m✅ File read successfully!\033[0m\n\033[94m{content}\033[0m"
83
76
  except Exception as e:
84
77
  logger.error(f"Error reading file '{file_path}': {e}", exc_info=True)
85
- return f"Error reading file: {e}"
78
+ return f"\033[91m❌ Error reading file: {e}\033[0m"
86
79
 
87
80
  def write_file(file_path: str, content: str) -> str:
88
81
  """Writes content to a specified file, creating directories if needed."""
89
- logger.info(f"Writing to file: {file_path}")
82
+ logger.info(f"✏️ Writing to file: {file_path}")
90
83
  try:
91
- # Basic path traversal check
92
84
  if ".." in file_path:
93
- logger.warning(f"Attempted path traversal detected in write_file: {file_path}")
94
- return "Error: Invalid file path (potential traversal)."
95
- # Consider restricting base path
96
-
85
+ logger.warning(f"Attempted path traversal detected in write_file: {file_path}")
86
+ return "\033[91m❌ Error: Invalid file path (potential traversal).\033[0m"
97
87
  path = Path(file_path)
98
88
  path.parent.mkdir(parents=True, exist_ok=True)
99
89
  path.write_text(content, encoding='utf-8')
100
90
  logger.info(f"Successfully wrote {len(content)} characters to {file_path}")
101
- return f"Successfully wrote to {file_path}"
91
+ return f"\033[92m✅ Successfully wrote to {file_path}\033[0m"
102
92
  except Exception as e:
103
93
  logger.error(f"Error writing file '{file_path}': {e}", exc_info=True)
104
- return f"Error writing file: {e}"
94
+ return f"\033[91m❌ Error writing file: {e}\033[0m"
105
95
 
106
96
  def list_files(directory_path: str = ".") -> str:
107
97
  """Lists files and directories in a specified path."""
@@ -130,6 +120,16 @@ def list_files(directory_path: str = ".") -> str:
130
120
 
131
121
  # --- RueCodeBlueprint Definition ---
132
122
 
123
+ # === OpenAI GPT-4.1 Prompt Engineering Guide ===
124
+ # See: https://github.com/openai/openai-cookbook/blob/main/examples/gpt4-1_prompting_guide.ipynb
125
+ #
126
+ # Agentic System Prompt Example (recommended for code generation/repair agents):
127
+ SYS_PROMPT_AGENTIC = """
128
+ You are an agent - please keep going until the user’s query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved.
129
+ If you are not sure about file content or codebase structure pertaining to the user’s request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer.
130
+ You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.
131
+ """
132
+
133
133
  class RueCodeBlueprint(BlueprintBase):
134
134
  """
135
135
  A blueprint designed for code generation, execution, and file system interaction.
@@ -288,4 +288,3 @@ class RueCodeBlueprint(BlueprintBase):
288
288
  yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
289
289
 
290
290
  logger.info("RueCodeBlueprint run finished.")
291
-
@@ -27,6 +27,16 @@ class SuggestionsOutput(TypedDict):
27
27
  suggestions: List[str]
28
28
 
29
29
  # --- Define the Blueprint ---
30
+ # === OpenAI GPT-4.1 Prompt Engineering Guide ===
31
+ # See: https://github.com/openai/openai-cookbook/blob/main/examples/gpt4-1_prompting_guide.ipynb
32
+ #
33
+ # Agentic System Prompt Example (recommended for structured output/suggestion agents):
34
+ SYS_PROMPT_AGENTIC = """
35
+ You are an agent - please keep going until the user’s query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved.
36
+ If you are not sure about file content or codebase structure pertaining to the user’s request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer.
37
+ You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.
38
+ """
39
+
30
40
  class SuggestionBlueprint(BlueprintBase):
31
41
  """A blueprint defining an agent that generates structured JSON suggestions using output_type."""
32
42
 
@@ -42,7 +52,6 @@ class SuggestionBlueprint(BlueprintBase):
42
52
  }
43
53
 
44
54
  # Caches
45
- _openai_client_cache: Dict[str, AsyncOpenAI] = {}
46
55
  _model_instance_cache: Dict[str, Model] = {}
47
56
 
48
57
  # --- Model Instantiation Helper --- (Standard helper)
@@ -61,19 +70,12 @@ class SuggestionBlueprint(BlueprintBase):
61
70
  if not model_name: raise ValueError(f"Missing 'model' in profile '{profile_name}'.")
62
71
  if provider != "openai": raise ValueError(f"Unsupported provider: {provider}")
63
72
 
64
- client_cache_key = f"{provider}_{profile_data.get('base_url')}"
65
- if client_cache_key not in self._openai_client_cache:
66
- client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
67
- filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
68
- log_kwargs = {k:v for k,v in filtered_kwargs.items() if k != 'api_key'}
69
- logger.debug(f"Creating new AsyncOpenAI client for '{profile_name}': {log_kwargs}")
70
- try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_kwargs)
71
- except Exception as e: raise ValueError(f"Failed to init client: {e}") from e
72
- client = self._openai_client_cache[client_cache_key]
73
+ # Remove redundant client instantiation; rely on framework-level default client
74
+ # All blueprints now use the default client set at framework init
73
75
  logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
74
76
  try:
75
77
  # Ensure the model selected supports structured output (most recent OpenAI do)
76
- model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
78
+ model_instance = OpenAIChatCompletionsModel(model=model_name)
77
79
  self._model_instance_cache[profile_name] = model_instance
78
80
  return model_instance
79
81
  except Exception as e: raise ValueError(f"Failed to init LLM: {e}") from e
@@ -82,7 +84,6 @@ class SuggestionBlueprint(BlueprintBase):
82
84
  """Create the SuggestionAgent."""
83
85
  logger.debug("Creating SuggestionAgent...")
84
86
  self._model_instance_cache = {}
85
- self._openai_client_cache = {}
86
87
 
87
88
  default_profile_name = self.config.get("llm_profile", "default")
88
89
  # Verify the chosen profile/model supports structured output if possible, or rely on OpenAI's newer models
@@ -106,5 +107,27 @@ class SuggestionBlueprint(BlueprintBase):
106
107
  logger.debug("SuggestionAgent created with output_type enforcement.")
107
108
  return suggestion_agent
108
109
 
110
+ async def run(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
111
+ """Main execution entry point for the Suggestion blueprint."""
112
+ logger.info("SuggestionBlueprint run method called.")
113
+ instruction = messages[-1].get("content", "") if messages else ""
114
+ async for chunk in self._run_non_interactive(instruction, **kwargs):
115
+ yield chunk
116
+ logger.info("SuggestionBlueprint run method finished.")
117
+
118
+ async def _run_non_interactive(self, instruction: str, **kwargs) -> Any:
119
+ logger.info(f"Running SuggestionBlueprint non-interactively with instruction: '{instruction[:100]}...'")
120
+ mcp_servers = kwargs.get("mcp_servers", [])
121
+ agent = self.create_starting_agent(mcp_servers=mcp_servers)
122
+ from agents import Runner
123
+ import os
124
+ model_name = os.getenv("LITELLM_MODEL") or os.getenv("DEFAULT_LLM") or "gpt-3.5-turbo"
125
+ try:
126
+ for chunk in Runner.run(agent, instruction):
127
+ yield chunk
128
+ except Exception as e:
129
+ logger.error(f"Error during non-interactive run: {e}", exc_info=True)
130
+ yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
131
+
109
132
  if __name__ == "__main__":
110
133
  SuggestionBlueprint.main()
swarm/consumers.py CHANGED
@@ -62,6 +62,25 @@ class DjangoChatConsumer(AsyncWebsocketConsumer):
62
62
  await self.send(text_data=system_message_html)
63
63
 
64
64
  client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
65
+
66
+ # --- PATCH: Enforce LiteLLM-only endpoint and suppress OpenAI tracing/telemetry ---
67
+ import os, logging
68
+ if os.environ.get("LITELLM_BASE_URL") or os.environ.get("OPENAI_BASE_URL"):
69
+ logging.getLogger("openai.agents").setLevel(logging.CRITICAL)
70
+ try:
71
+ import openai.agents.tracing
72
+ openai.agents.tracing.TracingClient = lambda *a, **kw: None
73
+ except Exception:
74
+ pass
75
+ def _enforce_litellm_only(client):
76
+ base_url = getattr(client, 'base_url', None)
77
+ if base_url and 'openai.com' in base_url:
78
+ return
79
+ if base_url and 'openai.com' not in base_url:
80
+ import traceback
81
+ raise RuntimeError(f"Attempted fallback to OpenAI API when custom base_url is set! base_url={base_url}\n{traceback.format_stack()}")
82
+ _enforce_litellm_only(client)
83
+
65
84
  stream = await client.chat.completions.create(
66
85
  model=os.getenv("OPENAI_MODEL"),
67
86
  messages=self.messages,
@@ -6,7 +6,7 @@ This module has been updated to remove dependency on swarm.types;
6
6
  instead, it now imports Agent from the openai-agents SDK.
7
7
  """
8
8
 
9
- from agents.agent import Agent # Updated import
9
+ from blueprint_agents.agent import Agent # Updated import
10
10
 
11
11
  def get_agent_name(agent: Agent) -> str:
12
12
  """