open-swarm 0.1.1744936380__py3-none-any.whl → 0.1.1744942800__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: open-swarm
3
- Version: 0.1.1744936380
3
+ Version: 0.1.1744942800
4
4
  Summary: Open Swarm: Orchestrating AI Agent Swarms with Django
5
5
  Project-URL: Homepage, https://github.com/yourusername/open-swarm
6
6
  Project-URL: Documentation, https://github.com/yourusername/open-swarm/blob/main/README.md
@@ -14,8 +14,9 @@ swarm/util.py,sha256=G4x2hXopHhB7IdGCkUXGoykYWyiICnjxg7wcr-WqL8I,4644
14
14
  swarm/wsgi.py,sha256=REM_u4HpMCkO0ddrOUXgtY-ITL-VTbRB1-WHvFJAtAU,408
15
15
  swarm/agent/__init__.py,sha256=YESGu_UXEBxrlQwghodUMN0vmXZDwWMU7DclCUvoklA,104
16
16
  swarm/blueprints/README.md,sha256=tsngbSB9N0tILcz_m1OGAjyKZQYlGTN-i5e5asq1GbE,8478
17
- swarm/blueprints/chatbot/blueprint_chatbot.py,sha256=a5-gIyDvRtNgbyfviD9Hua9r5NjOQh1lOafIG2a6kiI,7520
17
+ swarm/blueprints/chatbot/blueprint_chatbot.py,sha256=XUR9vt3qXSFrvqmjU01_T-R90Q_r7p560sHQ_febssA,7995
18
18
  swarm/blueprints/chatbot/templates/chatbot/chatbot.html,sha256=REFnqNg0EHsXxAUfaCJe1YgOKiV_umBXuC6y8veF5CU,1568
19
+ swarm/blueprints/codey/blueprint_codey.py,sha256=dmKb2mHOeCNlnKrmmkqy9oh_3fJ1-QTg9gjql7RINb4,8850
19
20
  swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py,sha256=bQ0h_oC7TBbwjXhlSDGgjsnePb8cRcCnft2CggZuLY0,11301
20
21
  swarm/blueprints/divine_code/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
22
  swarm/blueprints/divine_code/apps.py,sha256=k615JHdfOuo_GwfVbC7ah8X9OblkAL2XWm9aLBjmMyY,306
@@ -34,7 +35,7 @@ swarm/blueprints/family_ties/settings.py,sha256=5zcVsq7ny3GLWcJnOplZW4fMFNtyC3ba
34
35
  swarm/blueprints/family_ties/urls.py,sha256=awRZHb1gb1p3I6YZzfKMGSydd6kYPTLgax2jZ1ocS4U,294
35
36
  swarm/blueprints/family_ties/views.py,sha256=FbPkDNlFEixtRFbSpkr51IyJ28FRkXa1W5xyO_KeXH0,1081
36
37
  swarm/blueprints/flock/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- swarm/blueprints/gaggle/blueprint_gaggle.py,sha256=4H_eJQEzmw56IiadhMa8dYqHeiY9ubJiK5ziffo01Zs,14151
38
+ swarm/blueprints/gaggle/blueprint_gaggle.py,sha256=rP8uiSz3GLGEjYxXYlo_RCsus7HKBfYkexXoHBCsl7k,14928
38
39
  swarm/blueprints/mcp_demo/blueprint_mcp_demo.py,sha256=eUu5_BvLwVSdWiEonXWKuN7YgKsqz04JB_KbMPowryc,6599
39
40
  swarm/blueprints/messenger/templates/messenger/messenger.html,sha256=izuFtFn40Gm7M4gSUAUT5CIezjBjmNv2w4_fwSlv7VA,2323
40
41
  swarm/blueprints/mission_improbable/blueprint_mission_improbable.py,sha256=N4Tw0laErP4eCJM103XOaVrqbFNKZRUG1Bpze8g79MI,12753
@@ -47,20 +48,21 @@ swarm/blueprints/suggestion/blueprint_suggestion.py,sha256=hB7SWedDcc97LzVK56qEe
47
48
  swarm/blueprints/unapologetic_press/blueprint_unapologetic_press.py,sha256=e1-HpSHNp1PJAcb3-6sI1kE7KeJRQ2lGTUJPXYjvaY4,17648
48
49
  swarm/blueprints/whiskeytango_foxtrot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
50
  swarm/blueprints/whiskeytango_foxtrot/apps.py,sha256=V1QKvyb2Vz-EtDNhhNe4tw2W9LYhNDuiaIq_fAU4ilw,334
50
- swarm/blueprints/whiskeytango_foxtrot/blueprint_whiskeytango_foxtrot.py,sha256=ithMeMjU6bMeX1AeaSBX22yRmu9yizfafGD4sjykoeo,14301
51
+ swarm/blueprints/whiskeytango_foxtrot/blueprint_whiskeytango_foxtrot.py,sha256=8PjKGDHSTaQ76DXPD3T4MXQ8uLIlm4xmJ5s0i64a_Jw,16179
51
52
  swarm/extensions/__init__.py,sha256=SadbzfxckByaaqzuKPfXMvqmj45-dcMlavlfQYhGnzE,56
52
53
  swarm/extensions/blueprint/__init__.py,sha256=VHSlq8q3AeclMsp63f8RXc3vhcZyzHH0uEaYV6AW-ZI,1841
53
54
  swarm/extensions/blueprint/agent_utils.py,sha256=exKnbJEm1VRL270x6XqQXHtJhqD8ogY3ZBIGZO_tYUE,552
54
55
  swarm/extensions/blueprint/blueprint_base.py,sha256=GI1vFcrU2oZpDpqnzWEZNe9O0jJpTfOXuAvmBfWYOcg,15435
55
56
  swarm/extensions/blueprint/blueprint_discovery.py,sha256=v9lJeFDvPI919NzFjaCvmFBix5n0ceeL9y2JWGr_uLw,5720
56
57
  swarm/extensions/blueprint/blueprint_utils.py,sha256=Ef_pu-RYomqzFjMg6LOSPSdbYFCbYXjEoSvK1OT49Eo,702
57
- swarm/extensions/blueprint/cli_handler.py,sha256=kbF9G7sR5b5oD_t3rUijILZIog4hVMc-kR_ohWK0Mw0,8338
58
+ swarm/extensions/blueprint/cli_handler.py,sha256=ZdjnTmBzSimQv8rCTwI3ZDp_2Zrjf9hqWLXYD3jzMck,9091
58
59
  swarm/extensions/blueprint/common_utils.py,sha256=jeKcN3lMdrpOYWIpErH3L5am13jHjaImpVvk2b0mps4,462
59
60
  swarm/extensions/blueprint/config_loader.py,sha256=ldQGtv4tXeDJzL2GCylDxykZxYBo4ALFY2kS0jZ79Eo,5652
60
61
  swarm/extensions/blueprint/django_utils.py,sha256=ObtkmF1JW4H2OEYa7vC6ussUsMBtDsZTTVeHGHI-GOQ,17457
61
62
  swarm/extensions/blueprint/interactive_mode.py,sha256=vGmMuAgC93TLjMi2RkXQ2FkWfIUblyOTFGHmVdGKLSQ,4572
62
63
  swarm/extensions/blueprint/output_utils.py,sha256=HGpXIujoJNM5nCCzXH0Upog_ctw5BuftmMBiPujh-ZM,7139
63
64
  swarm/extensions/blueprint/runnable_blueprint.py,sha256=1MywZ54vUysLVtYmwCbcDYQmQnoZffCHgsArbe-VKe8,1813
65
+ swarm/extensions/blueprint/slash_commands.py,sha256=5LEO_veo50_eRDmiGPNnFsI-I6-X-C9NvNNmu1187T0,498
64
66
  swarm/extensions/blueprint/spinner.py,sha256=9lyjzLnQBdEBy_dXr6N6I7nxx6KfrNp7wf44sQN06GU,3756
65
67
  swarm/extensions/blueprint/modes/rest_mode.py,sha256=KZuB_j2NfomER7CmlsLBqRipU3DymKY-9RpoGilMH0I,1357
66
68
  swarm/extensions/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -252,8 +254,8 @@ swarm/views/message_views.py,sha256=sDUnXyqKXC8WwIIMAlWf00s2_a2T9c75Na5FvYMJwBM,
252
254
  swarm/views/model_views.py,sha256=aAbU4AZmrOTaPeKMWtoKK7FPYHdaN3Zbx55JfKzYTRY,2937
253
255
  swarm/views/utils.py,sha256=geX3Z5ZDKFYyXYBMilc-4qgOSjhujK3AfRtvbXgFpXk,3643
254
256
  swarm/views/web_views.py,sha256=ExQQeJpZ8CkLZQC_pXKOOmdnEy2qR3wEBP4LLp27DPU,7404
255
- open_swarm-0.1.1744936380.dist-info/METADATA,sha256=6jAoAMBD-G-pIG9bHtrF_Eh8wcDIoX713LmnnMHF8z0,18813
256
- open_swarm-0.1.1744936380.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
257
- open_swarm-0.1.1744936380.dist-info/entry_points.txt,sha256=fo28d0_zJrytRsh8QqkdlWQT_9lyAwYUx1WuSTDI3HM,177
258
- open_swarm-0.1.1744936380.dist-info/licenses/LICENSE,sha256=BU9bwRlnOt_JDIb6OT55Q4leLZx9RArDLTFnlDIrBEI,1062
259
- open_swarm-0.1.1744936380.dist-info/RECORD,,
257
+ open_swarm-0.1.1744942800.dist-info/METADATA,sha256=Vl8E9AR5CbvkKFRfC-fIqsf3OBbVnI7rORhySkWdWfw,18813
258
+ open_swarm-0.1.1744942800.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
259
+ open_swarm-0.1.1744942800.dist-info/entry_points.txt,sha256=fo28d0_zJrytRsh8QqkdlWQT_9lyAwYUx1WuSTDI3HM,177
260
+ open_swarm-0.1.1744942800.dist-info/licenses/LICENSE,sha256=BU9bwRlnOt_JDIb6OT55Q4leLZx9RArDLTFnlDIrBEI,1062
261
+ open_swarm-0.1.1744942800.dist-info/RECORD,,
@@ -1,7 +1,11 @@
1
+ import os
2
+ from dotenv import load_dotenv; load_dotenv(override=True)
3
+
1
4
  import logging
2
5
  import os
3
6
  import sys
4
7
  from typing import Dict, Any, List, ClassVar, Optional
8
+ import argparse
5
9
 
6
10
  # Set logging to WARNING by default unless SWARM_DEBUG=1
7
11
  if not os.environ.get("SWARM_DEBUG"):
@@ -22,7 +26,7 @@ try:
22
26
  from agents.models.interface import Model
23
27
  from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
24
28
  from openai import AsyncOpenAI
25
- from swarm.extensions.blueprint.blueprint_base import BlueprintBase
29
+ from swarm.core.blueprint_base import BlueprintBase
26
30
  except ImportError as e:
27
31
  print(f"ERROR: Import failed in ChatbotBlueprint: {e}. Check dependencies.")
28
32
  print(f"sys.path: {sys.path}")
@@ -141,8 +145,14 @@ if __name__ == "__main__":
141
145
  src_path = os.path.join(project_root, 'src')
142
146
  if src_path not in sys.path:
143
147
  sys.path.insert(0, src_path)
144
- if '--instruction' in sys.argv:
145
- instruction = sys.argv[sys.argv.index('--instruction') + 1]
148
+ parser = argparse.ArgumentParser(description='Chatbot Blueprint Runner')
149
+ parser.add_argument('instruction', nargs=argparse.REMAINDER, help='Instruction for Chatbot to process (all args after -- are joined as the prompt)')
150
+ args = parser.parse_args()
151
+ instruction_args = args.instruction
152
+ if instruction_args and instruction_args[0] == '--':
153
+ instruction_args = instruction_args[1:]
154
+ instruction = ' '.join(instruction_args).strip() if instruction_args else None
155
+ if instruction:
146
156
  blueprint = ChatbotBlueprint(blueprint_id="chatbot")
147
157
  async def runner():
148
158
  async for chunk in blueprint._run_non_interactive(instruction):
@@ -0,0 +1,188 @@
1
+ import os
2
+ from dotenv import load_dotenv; load_dotenv(override=True)
3
+
4
+ import logging
5
+ from swarm.core.blueprint_base import BlueprintBase
6
+ from agents import Agent, Tool, function_tool, Runner
7
+ from agents.mcp import MCPServer
8
+ from typing import List, Dict, Any, Optional, AsyncGenerator
9
+ import sys
10
+ import itertools
11
+ import threading
12
+ import time
13
+ from rich.console import Console
14
+ import os
15
+ from swarm.core.blueprint_runner import BlueprintRunner
16
+ from swarm.core.spinner import Spinner as TerminalSpinner
17
+
18
+ # --- Tool Logic Definitions ---
19
+ def git_status() -> str:
20
+ return "OK: git status placeholder"
21
+ def git_diff() -> str:
22
+ return "OK: git diff placeholder"
23
+ def git_add() -> str:
24
+ return "OK: git add placeholder"
25
+ def git_commit(message: str) -> str:
26
+ return f"OK: git commit '{message}' placeholder"
27
+ def git_push() -> str:
28
+ return "OK: git push placeholder"
29
+ def run_npm_test(args: str = "") -> str:
30
+ return "OK: npm test placeholder"
31
+ def run_pytest(args: str = "") -> str:
32
+ return "OK: pytest placeholder"
33
+
34
+ git_status_tool = function_tool(git_status)
35
+ git_diff_tool = function_tool(git_diff)
36
+ git_add_tool = function_tool(git_add)
37
+ git_commit_tool = function_tool(git_commit)
38
+ git_push_tool = function_tool(git_push)
39
+ run_npm_test_tool = function_tool(run_npm_test)
40
+ run_pytest_tool = function_tool(run_pytest)
41
+
42
+ linus_corvalds_instructions = """
43
+ You are Linus Corvalds, the resolute leader of the Codey creative team.
44
+
45
+ Respond directly and naturally to any user prompt that is creative, general, or conversational (for example, if the user asks you to write a poem, haiku, or answer a question, reply in plain language—do NOT invoke any tools or functions).
46
+
47
+ Only use your available tools (git_status, git_diff, git_add, git_commit, git_push) if the user specifically requests a git/code operation, or if the request cannot be fulfilled without a tool.
48
+
49
+ If you are unsure, prefer a direct response. Never output tool schema, argument names, or placeholders to the user.
50
+ """
51
+
52
+ fiona_instructions = """
53
+ You are Fiona Flame, the diligent git ops specialist for the Codey team.
54
+
55
+ Respond directly and naturally to creative or conversational prompts. Only use your tools (git_status, git_diff, git_add, git_commit, git_push) for explicit git/code requests.
56
+ """
57
+
58
+ sammy_instructions = """
59
+ You are SammyScript, the test runner and automation specialist.
60
+
61
+ For creative or general prompts, reply in natural language. Only use your tools (run_npm_test, run_pytest) for explicit test/code requests.
62
+ """
63
+
64
+ # --- ANSI/Emoji Box Output Helpers ---
65
+ def ansi_box(title, content, emoji=None, count=None, params=None):
66
+ box_lines = []
67
+ header = f"\033[1;36m┏━ {emoji+' ' if emoji else ''}{title} ━{'━'*max(0, 40-len(title))}\033[0m"
68
+ box_lines.append(header)
69
+ if params:
70
+ box_lines.append(f"\033[1;34m┃ Params: {params}\033[0m")
71
+ if count is not None:
72
+ box_lines.append(f"\033[1;33m┃ Results: {count}\033[0m")
73
+ for line in content.split('\n'):
74
+ box_lines.append(f"┃ {line}")
75
+ box_lines.append("┗"+"━"*44)
76
+ return "\n".join(box_lines)
77
+
78
+ class CodeyBlueprint(BlueprintBase):
79
+ def __init__(self, blueprint_id: str, config_path: Optional[str] = None, **kwargs):
80
+ super().__init__(blueprint_id, config_path, **kwargs)
81
+ self.logger = logging.getLogger(__name__)
82
+ self._model_instance_cache = {}
83
+ self._openai_client_cache = {}
84
+
85
+ def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
86
+ linus_corvalds = self.make_agent(
87
+ name="Linus_Corvalds",
88
+ instructions=linus_corvalds_instructions,
89
+ tools=[git_status_tool, git_diff_tool],
90
+ mcp_servers=mcp_servers
91
+ )
92
+ fiona_flame = self.make_agent(
93
+ name="Fiona_Flame",
94
+ instructions=fiona_instructions,
95
+ tools=[git_status_tool, git_diff_tool, git_add_tool, git_commit_tool, git_push_tool],
96
+ mcp_servers=mcp_servers
97
+ )
98
+ sammy_script = self.make_agent(
99
+ name="SammyScript",
100
+ instructions=sammy_instructions,
101
+ tools=[run_npm_test_tool, run_pytest_tool],
102
+ mcp_servers=mcp_servers
103
+ )
104
+ linus_corvalds.tools.append(fiona_flame.as_tool(tool_name="Fiona_Flame", tool_description="Delegate git actions to Fiona."))
105
+ linus_corvalds.tools.append(sammy_script.as_tool(tool_name="SammyScript", tool_description="Delegate testing tasks to Sammy."))
106
+ return linus_corvalds
107
+
108
+ async def run(self, messages: List[dict], **kwargs):
109
+ self.logger.info("CodeyBlueprint run method called.")
110
+ instruction = messages[-1].get("content", "") if messages else ""
111
+ try:
112
+ mcp_servers = kwargs.get("mcp_servers", [])
113
+ starting_agent = self.create_starting_agent(mcp_servers=mcp_servers)
114
+ model_name = os.getenv("LITELLM_MODEL") or os.getenv("DEFAULT_LLM") or "gpt-3.5-turbo"
115
+ if not starting_agent.model:
116
+ yield {"messages": [{"role": "assistant", "content": f"Error: No model instance available for Codey agent. Check your LITELLM_MODEL, OPENAI_API_KEY, or DEFAULT_LLM config."}]}
117
+ return
118
+ if not starting_agent.tools:
119
+ yield {"messages": [{"role": "assistant", "content": f"Warning: No tools registered for Codey agent. Only direct LLM output is possible."}]}
120
+ required_mcps = []
121
+ if hasattr(self, 'metadata') and self.metadata.get('required_mcp_servers'):
122
+ required_mcps = self.metadata['required_mcp_servers']
123
+ missing_mcps = [m for m in required_mcps if m not in [s.name for s in mcp_servers]]
124
+ if missing_mcps:
125
+ yield {"messages": [{"role": "assistant", "content": f"Warning: Missing required MCP servers: {', '.join(missing_mcps)}. Some features may not work."}]}
126
+ show_intermediate = kwargs.get("show_intermediate", False)
127
+ spinner = None
128
+ if show_intermediate:
129
+ spinner = TerminalSpinner(interactive=True, custom_sequence="generating")
130
+ spinner.start()
131
+ try:
132
+ async for chunk in BlueprintRunner.run_agent(starting_agent, instruction):
133
+ if show_intermediate:
134
+ for msg in chunk["messages"]:
135
+ print(msg["content"])
136
+ yield chunk
137
+ finally:
138
+ if spinner:
139
+ spinner.stop()
140
+ except Exception as e:
141
+ yield {"messages": [{"role": "assistant", "content": f"Error: {e}"}]}
142
+
143
+ if __name__ == "__main__":
144
+ import argparse
145
+ import asyncio
146
+ parser = argparse.ArgumentParser(description="Run the Codey blueprint.")
147
+ parser.add_argument('instruction', nargs=argparse.REMAINDER, help='Instruction for Codey to process (all args after -- are joined as the prompt)')
148
+ parser.add_argument('--show-intermediate', action='store_true', help='Show all intermediate outputs (verbose mode)')
149
+ args = parser.parse_args()
150
+ # Join all positional arguments as the instruction
151
+ instruction_args = args.instruction
152
+ if instruction_args and instruction_args[0] == '--':
153
+ instruction_args = instruction_args[1:]
154
+ instruction = ' '.join(instruction_args).strip() if instruction_args else None
155
+ show_intermediate = args.show_intermediate
156
+ blueprint = CodeyBlueprint(blueprint_id="codey")
157
+ if instruction:
158
+ # Non-interactive mode: run once and exit
159
+ async def main():
160
+ messages = [{"role": "user", "content": instruction}]
161
+ last_assistant_msg = None
162
+ async for resp in blueprint.run(messages, show_intermediate=show_intermediate):
163
+ for msg in resp["messages"]:
164
+ if show_intermediate:
165
+ print(msg["content"])
166
+ elif msg["role"] == "assistant":
167
+ last_assistant_msg = msg["content"]
168
+ if not show_intermediate and last_assistant_msg is not None:
169
+ print(last_assistant_msg)
170
+ asyncio.run(main())
171
+ else:
172
+ # Interactive mode: loop and accept follow-ups
173
+ async def interactive_loop():
174
+ messages = []
175
+ while True:
176
+ try:
177
+ user_input = input("User: ").strip()
178
+ except EOFError:
179
+ print("Exiting interactive mode.")
180
+ break
181
+ if not user_input:
182
+ print("No input. Exiting.")
183
+ break
184
+ messages.append({"role": "user", "content": user_input})
185
+ async for resp in blueprint.run(messages):
186
+ for msg in resp["messages"]:
187
+ print(msg["content"])
188
+ asyncio.run(interactive_loop())
@@ -1,6 +1,8 @@
1
+ import os
2
+ from dotenv import load_dotenv; load_dotenv(override=True)
3
+
1
4
  import logging
2
5
  logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(name)s: %(message)s')
3
- import os
4
6
  import sys
5
7
 
6
8
  # --- Universal Logging Reset ---
@@ -36,8 +38,7 @@ try:
36
38
  from agents.models.interface import Model
37
39
  from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
38
40
  from openai import AsyncOpenAI
39
- # Corrected Import: Relative path within the package
40
- from swarm.extensions.blueprint.blueprint_base import BlueprintBase
41
+ from swarm.core.blueprint_base import BlueprintBase
41
42
  except ImportError as e:
42
43
  print(f"ERROR: Import failed in blueprint_gaggle: {e}. Check 'openai-agents' install and project structure.")
43
44
  print(f"sys.path: {sys.path}")
@@ -283,4 +284,20 @@ class GaggleBlueprint(BlueprintBase):
283
284
 
284
285
 
285
286
  if __name__ == "__main__":
286
- GaggleBlueprint.main()
287
+ parser = argparse.ArgumentParser(description='Gaggle Story Writing Team')
288
+ parser.add_argument('instruction', nargs=argparse.REMAINDER, help='Instruction for Gaggle to process (all args after -- are joined as the prompt)')
289
+ args = parser.parse_args()
290
+ instruction_args = args.instruction
291
+ if instruction_args and instruction_args[0] == '--':
292
+ instruction_args = instruction_args[1:]
293
+ instruction = ' '.join(instruction_args).strip() if instruction_args else None
294
+ blueprint = GaggleBlueprint('gaggle')
295
+ import asyncio
296
+ if instruction:
297
+ async def main():
298
+ async for chunk in blueprint._run_non_interactive(instruction):
299
+ print(chunk)
300
+ asyncio.run(main())
301
+ else:
302
+ blueprint.display_splash_screen()
303
+ blueprint.run_interactive()
@@ -5,9 +5,11 @@ A chaotic spy-themed blueprint with a multi-tiered agent hierarchy for tracking
5
5
  Uses BlueprintBase and agent-as-tool delegation.
6
6
  """
7
7
 
8
+ import os
9
+ from dotenv import load_dotenv; load_dotenv(override=True)
10
+
8
11
  import logging
9
12
  import sqlite3
10
- import os
11
13
  import sys
12
14
  from pathlib import Path
13
15
  from typing import Dict, Any, List, ClassVar, Optional
@@ -23,7 +25,7 @@ try:
23
25
  from agents.models.interface import Model
24
26
  from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
25
27
  from openai import AsyncOpenAI
26
- from swarm.extensions.blueprint.blueprint_base import BlueprintBase
28
+ from swarm.core.blueprint_base import BlueprintBase
27
29
  except ImportError as e:
28
30
  print(f"ERROR: Import failed in WhiskeyTangoFoxtrotBlueprint: {e}. Check dependencies.")
29
31
  print(f"sys.path: {sys.path}")
@@ -192,6 +194,35 @@ class WhiskeyTangoFoxtrotBlueprint(BlueprintBase):
192
194
  except Exception as e: raise ValueError(f"Failed to init LLM: {e}") from e
193
195
 
194
196
 
197
+ async def run(self, messages: List[dict], **kwargs):
198
+ logger.info("WhiskeyTangoFoxtrotBlueprint run method called.")
199
+ instruction = messages[-1].get("content", "") if messages else ""
200
+ try:
201
+ mcp_servers = kwargs.get("mcp_servers", [])
202
+ starting_agent = self.create_starting_agent(mcp_servers=mcp_servers)
203
+ from agents import Runner
204
+ if not starting_agent.model:
205
+ yield {"messages": [{"role": "assistant", "content": f"Error: No model instance available for WTF agent. Check your OPENAI_API_KEY, or LITELLM_MODEL/LITELLM_BASE_URL config."}]}
206
+ return
207
+ if not starting_agent.tools:
208
+ yield {"messages": [{"role": "assistant", "content": f"Warning: No tools registered for WTF agent. Only direct LLM output is possible."}]}
209
+ required_mcps = self.metadata.get('required_mcp_servers', [])
210
+ missing_mcps = [m for m in required_mcps if m not in [s.name for s in mcp_servers]]
211
+ if missing_mcps:
212
+ yield {"messages": [{"role": "assistant", "content": f"Warning: Missing required MCP servers: {', '.join(missing_mcps)}. Some features may not work."}]}
213
+ from rich.console import Console
214
+ console = Console()
215
+ with console.status("Generating...", spinner="dots") as status:
216
+ async for chunk in Runner.run(starting_agent, instruction):
217
+ content = chunk.get("content")
218
+ if content and ("function call" in content or "args" in content):
219
+ continue
220
+ yield chunk
221
+ logger.info("WhiskeyTangoFoxtrotBlueprint run method finished.")
222
+ except Exception as e:
223
+ yield {"messages": [{"role": "assistant", "content": f"Error: {e}"}]}
224
+
225
+
195
226
  def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
196
227
  """Creates the WTF agent hierarchy and returns Valory (Coordinator)."""
197
228
  self.initialize_db() # Ensure DB is ready
@@ -253,4 +284,3 @@ class WhiskeyTangoFoxtrotBlueprint(BlueprintBase):
253
284
  # Standard Python entry point
254
285
  if __name__ == "__main__":
255
286
  WhiskeyTangoFoxtrotBlueprint.main()
256
-
@@ -24,6 +24,7 @@ logger = logging.getLogger("swarm.cli")
24
24
  project_root = Path(__file__).parent.parent.parent.parent # /home/chatgpt/open-swarm
25
25
  dotenv_path = project_root / ".env"
26
26
  load_dotenv(dotenv_path=dotenv_path, override=True)
27
+ print("[DEBUG] LITELLM_API_KEY:", os.environ.get("LITELLM_API_KEY"))
27
28
  # print(f"[DEBUG] Loaded .env from: {dotenv_path}")
28
29
  # print(f"[DEBUG] LITELLM_MODEL={os.environ.get('LITELLM_MODEL')}")
29
30
  # print(f"[DEBUG] LITELLM_BASE_URL={os.environ.get('LITELLM_BASE_URL')}")
@@ -54,10 +55,12 @@ async def _run_blueprint_async_with_shutdown(blueprint: 'BlueprintBase', instruc
54
55
  except Exception as e:
55
56
  logger.error(f"Unexpected error setting fallback signal handler for {sig.name}: {e}", exc_info=True)
56
57
 
57
-
58
58
  # Instead of wrapping in a task and awaiting, use async for to support async generators
59
59
  try:
60
- async for chunk in blueprint._run_non_interactive(instruction):
60
+ # PATCH: Use blueprint.run instead of blueprint._run_non_interactive
61
+ async for chunk in blueprint.run([
62
+ {"role": "user", "content": instruction}
63
+ ]):
61
64
  # Print the full JSON chunk
62
65
  print(json.dumps(chunk, ensure_ascii=False))
63
66
  # If chunk contains 'messages', print each assistant message's content for CLI/test UX
@@ -90,7 +93,6 @@ def run_blueprint_cli(
90
93
  description=metadata.get("description", f"Run {blueprint_cls.__name__}"),
91
94
  formatter_class=argparse.RawTextHelpFormatter
92
95
  )
93
- parser.add_argument("--instruction", type=str, required=True, help="Initial instruction for the blueprint.")
94
96
  parser.add_argument("--config-path", type=str, default=None, help=f"Path to swarm_config.json (Default: {default_config_path})")
95
97
  parser.add_argument("--config", type=str, metavar="JSON_FILE_OR_STRING", default=None, help="JSON config overrides (file path or string). Merged last.")
96
98
  parser.add_argument("--profile", type=str, default=None, help="Configuration profile to use.")
@@ -98,8 +100,22 @@ def run_blueprint_cli(
98
100
  parser.add_argument("--quiet", action="store_true", help="Suppress most logs and headers, print only final output.")
99
101
  parser.add_argument('--markdown', action=argparse.BooleanOptionalAction, default=None, help="Enable/disable markdown output (--markdown / --no-markdown). Overrides config/default.")
100
102
  parser.add_argument("--version", action="version", version=f"%(prog)s (BP: {metadata.get('name', 'N/A')} v{metadata.get('version', 'N/A')}, Core: {swarm_version})")
103
+ parser.add_argument("instruction", nargs=argparse.REMAINDER, help="Instruction or prompt for the blueprint. All arguments after -- are treated as the prompt.")
101
104
  args = parser.parse_args()
102
105
 
106
+ # Determine instruction string: if '--' is present, treat everything after as prompt
107
+ instruction_args = args.instruction
108
+ if instruction_args:
109
+ # Remove leading '--' if present
110
+ if instruction_args and instruction_args[0] == '--':
111
+ instruction_args = instruction_args[1:]
112
+ instruction = ' '.join(instruction_args).strip()
113
+ else:
114
+ instruction = ''
115
+
116
+ if not instruction:
117
+ parser.error("No instruction provided. Pass a prompt after -- or as positional arguments.")
118
+
103
119
  # --- Load CLI Config Overrides ---
104
120
  cli_config_overrides = {}
105
121
  if args.config:
@@ -142,7 +158,7 @@ def run_blueprint_cli(
142
158
  )
143
159
 
144
160
  # Run the async part with shutdown handling
145
- asyncio.run(_run_blueprint_async_with_shutdown(blueprint_instance, args.instruction))
161
+ asyncio.run(_run_blueprint_async_with_shutdown(blueprint_instance, instruction))
146
162
 
147
163
  except (ValueError, TypeError, FileNotFoundError) as config_err:
148
164
  logger.critical(f"[Initialization Error] Configuration problem: {config_err}", exc_info=args.debug)
@@ -0,0 +1,17 @@
1
+ # Minimal slash_commands.py to restore compatibility
2
+
3
+ class SlashCommandRegistry:
4
+ def __init__(self):
5
+ self.commands = {}
6
+ def register(self, command, func=None):
7
+ if func is None:
8
+ def decorator(f):
9
+ self.commands[command] = f
10
+ return f
11
+ return decorator
12
+ self.commands[command] = func
13
+ return func
14
+ def get(self, command):
15
+ return self.commands.get(command)
16
+
17
+ slash_registry = SlashCommandRegistry()