open-swarm 0.1.1743070217__py3-none-any.whl → 0.1.1743364176__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- open_swarm-0.1.1743364176.dist-info/METADATA +286 -0
- open_swarm-0.1.1743364176.dist-info/RECORD +260 -0
- {open_swarm-0.1.1743070217.dist-info → open_swarm-0.1.1743364176.dist-info}/WHEEL +1 -2
- open_swarm-0.1.1743364176.dist-info/entry_points.txt +2 -0
- swarm/__init__.py +0 -2
- swarm/auth.py +53 -49
- swarm/blueprints/README.md +67 -0
- swarm/blueprints/burnt_noodles/blueprint_burnt_noodles.py +412 -0
- swarm/blueprints/chatbot/blueprint_chatbot.py +98 -0
- swarm/blueprints/chatbot/templates/chatbot/chatbot.html +33 -0
- swarm/blueprints/digitalbutlers/blueprint_digitalbutlers.py +183 -0
- swarm/blueprints/dilbot_universe/blueprint_dilbot_universe.py +285 -0
- swarm/blueprints/divine_code/__init__.py +0 -0
- swarm/blueprints/divine_code/apps.py +11 -0
- swarm/blueprints/divine_code/blueprint_divine_code.py +219 -0
- swarm/blueprints/django_chat/apps.py +6 -0
- swarm/blueprints/django_chat/blueprint_django_chat.py +84 -0
- swarm/blueprints/django_chat/templates/django_chat/django_chat_webpage.html +37 -0
- swarm/blueprints/django_chat/urls.py +8 -0
- swarm/blueprints/django_chat/views.py +32 -0
- swarm/blueprints/echocraft/blueprint_echocraft.py +44 -0
- swarm/blueprints/family_ties/apps.py +11 -0
- swarm/blueprints/family_ties/blueprint_family_ties.py +152 -0
- swarm/blueprints/family_ties/models.py +19 -0
- swarm/blueprints/family_ties/serializers.py +7 -0
- swarm/blueprints/family_ties/settings.py +16 -0
- swarm/blueprints/family_ties/urls.py +10 -0
- swarm/blueprints/family_ties/views.py +26 -0
- swarm/blueprints/flock/__init__.py +0 -0
- swarm/blueprints/gaggle/blueprint_gaggle.py +184 -0
- swarm/blueprints/gotchaman/blueprint_gotchaman.py +232 -0
- swarm/blueprints/mcp_demo/blueprint_mcp_demo.py +133 -0
- swarm/blueprints/messenger/templates/messenger/messenger.html +46 -0
- swarm/blueprints/mission_improbable/blueprint_mission_improbable.py +234 -0
- swarm/blueprints/monkai_magic/blueprint_monkai_magic.py +248 -0
- swarm/blueprints/nebula_shellz/blueprint_nebula_shellz.py +156 -0
- swarm/blueprints/omniplex/blueprint_omniplex.py +221 -0
- swarm/blueprints/rue_code/__init__.py +0 -0
- swarm/blueprints/rue_code/blueprint_rue_code.py +291 -0
- swarm/blueprints/suggestion/blueprint_suggestion.py +110 -0
- swarm/blueprints/unapologetic_press/blueprint_unapologetic_press.py +298 -0
- swarm/blueprints/whiskeytango_foxtrot/__init__.py +0 -0
- swarm/blueprints/whiskeytango_foxtrot/apps.py +11 -0
- swarm/blueprints/whiskeytango_foxtrot/blueprint_whiskeytango_foxtrot.py +256 -0
- swarm/extensions/blueprint/__init__.py +30 -15
- swarm/extensions/blueprint/agent_utils.py +16 -40
- swarm/extensions/blueprint/blueprint_base.py +141 -543
- swarm/extensions/blueprint/blueprint_discovery.py +112 -98
- swarm/extensions/blueprint/cli_handler.py +185 -0
- swarm/extensions/blueprint/config_loader.py +122 -0
- swarm/extensions/blueprint/django_utils.py +181 -79
- swarm/extensions/blueprint/interactive_mode.py +1 -1
- swarm/extensions/config/config_loader.py +83 -200
- swarm/extensions/launchers/build_swarm_wrapper.py +0 -0
- swarm/extensions/launchers/swarm_cli.py +199 -287
- swarm/llm/chat_completion.py +26 -55
- swarm/management/__init__.py +0 -0
- swarm/management/commands/__init__.py +0 -0
- swarm/management/commands/runserver.py +58 -0
- swarm/permissions.py +38 -0
- swarm/serializers.py +96 -5
- swarm/settings.py +95 -110
- swarm/static/contrib/fonts/fontawesome-webfont.ttf +7 -0
- swarm/static/contrib/fonts/fontawesome-webfont.woff +7 -0
- swarm/static/contrib/fonts/fontawesome-webfont.woff2 +7 -0
- swarm/static/contrib/markedjs/marked.min.js +6 -0
- swarm/static/contrib/tabler-icons/adjustments-horizontal.svg +27 -0
- swarm/static/contrib/tabler-icons/alert-triangle.svg +21 -0
- swarm/static/contrib/tabler-icons/archive.svg +21 -0
- swarm/static/contrib/tabler-icons/artboard.svg +27 -0
- swarm/static/contrib/tabler-icons/automatic-gearbox.svg +23 -0
- swarm/static/contrib/tabler-icons/box-multiple.svg +19 -0
- swarm/static/contrib/tabler-icons/carambola.svg +19 -0
- swarm/static/contrib/tabler-icons/copy.svg +20 -0
- swarm/static/contrib/tabler-icons/download.svg +21 -0
- swarm/static/contrib/tabler-icons/edit.svg +21 -0
- swarm/static/contrib/tabler-icons/filled/carambola.svg +13 -0
- swarm/static/contrib/tabler-icons/filled/paint.svg +13 -0
- swarm/static/contrib/tabler-icons/headset.svg +22 -0
- swarm/static/contrib/tabler-icons/layout-sidebar-left-collapse.svg +21 -0
- swarm/static/contrib/tabler-icons/layout-sidebar-left-expand.svg +21 -0
- swarm/static/contrib/tabler-icons/layout-sidebar-right-collapse.svg +21 -0
- swarm/static/contrib/tabler-icons/layout-sidebar-right-expand.svg +21 -0
- swarm/static/contrib/tabler-icons/message-chatbot.svg +22 -0
- swarm/static/contrib/tabler-icons/message-star.svg +22 -0
- swarm/static/contrib/tabler-icons/message-x.svg +23 -0
- swarm/static/contrib/tabler-icons/message.svg +21 -0
- swarm/static/contrib/tabler-icons/paperclip.svg +18 -0
- swarm/static/contrib/tabler-icons/playlist-add.svg +22 -0
- swarm/static/contrib/tabler-icons/robot.svg +26 -0
- swarm/static/contrib/tabler-icons/search.svg +19 -0
- swarm/static/contrib/tabler-icons/settings.svg +20 -0
- swarm/static/contrib/tabler-icons/thumb-down.svg +19 -0
- swarm/static/contrib/tabler-icons/thumb-up.svg +19 -0
- swarm/static/css/dropdown.css +22 -0
- swarm/static/htmx/htmx.min.js +0 -0
- swarm/static/js/dropdown.js +23 -0
- swarm/static/rest_mode/css/base.css +470 -0
- swarm/static/rest_mode/css/chat-history.css +286 -0
- swarm/static/rest_mode/css/chat.css +251 -0
- swarm/static/rest_mode/css/chatbot.css +74 -0
- swarm/static/rest_mode/css/chatgpt.css +62 -0
- swarm/static/rest_mode/css/colors/corporate.css +74 -0
- swarm/static/rest_mode/css/colors/pastel.css +81 -0
- swarm/static/rest_mode/css/colors/tropical.css +82 -0
- swarm/static/rest_mode/css/general.css +142 -0
- swarm/static/rest_mode/css/layout.css +167 -0
- swarm/static/rest_mode/css/layouts/messenger-layout.css +17 -0
- swarm/static/rest_mode/css/layouts/minimalist-layout.css +57 -0
- swarm/static/rest_mode/css/layouts/mobile-layout.css +8 -0
- swarm/static/rest_mode/css/messages.css +84 -0
- swarm/static/rest_mode/css/messenger.css +135 -0
- swarm/static/rest_mode/css/settings.css +91 -0
- swarm/static/rest_mode/css/simple.css +44 -0
- swarm/static/rest_mode/css/slack.css +58 -0
- swarm/static/rest_mode/css/style.css +156 -0
- swarm/static/rest_mode/css/theme.css +30 -0
- swarm/static/rest_mode/css/toast.css +40 -0
- swarm/static/rest_mode/js/auth.js +9 -0
- swarm/static/rest_mode/js/blueprint.js +41 -0
- swarm/static/rest_mode/js/blueprintUtils.js +12 -0
- swarm/static/rest_mode/js/chatLogic.js +79 -0
- swarm/static/rest_mode/js/debug.js +63 -0
- swarm/static/rest_mode/js/events.js +98 -0
- swarm/static/rest_mode/js/main.js +19 -0
- swarm/static/rest_mode/js/messages.js +264 -0
- swarm/static/rest_mode/js/messengerLogic.js +355 -0
- swarm/static/rest_mode/js/modules/apiService.js +84 -0
- swarm/static/rest_mode/js/modules/blueprintManager.js +162 -0
- swarm/static/rest_mode/js/modules/chatHistory.js +110 -0
- swarm/static/rest_mode/js/modules/debugLogger.js +14 -0
- swarm/static/rest_mode/js/modules/eventHandlers.js +107 -0
- swarm/static/rest_mode/js/modules/messageProcessor.js +120 -0
- swarm/static/rest_mode/js/modules/state.js +7 -0
- swarm/static/rest_mode/js/modules/userInteractions.js +29 -0
- swarm/static/rest_mode/js/modules/validation.js +23 -0
- swarm/static/rest_mode/js/rendering.js +119 -0
- swarm/static/rest_mode/js/settings.js +130 -0
- swarm/static/rest_mode/js/sidebar.js +94 -0
- swarm/static/rest_mode/js/simpleLogic.js +37 -0
- swarm/static/rest_mode/js/slackLogic.js +66 -0
- swarm/static/rest_mode/js/splash.js +76 -0
- swarm/static/rest_mode/js/theme.js +111 -0
- swarm/static/rest_mode/js/toast.js +36 -0
- swarm/static/rest_mode/js/ui.js +265 -0
- swarm/static/rest_mode/js/validation.js +57 -0
- swarm/static/rest_mode/svg/animated_spinner.svg +12 -0
- swarm/static/rest_mode/svg/arrow_down.svg +5 -0
- swarm/static/rest_mode/svg/arrow_left.svg +5 -0
- swarm/static/rest_mode/svg/arrow_right.svg +5 -0
- swarm/static/rest_mode/svg/arrow_up.svg +5 -0
- swarm/static/rest_mode/svg/attach.svg +8 -0
- swarm/static/rest_mode/svg/avatar.svg +7 -0
- swarm/static/rest_mode/svg/canvas.svg +6 -0
- swarm/static/rest_mode/svg/chat_history.svg +4 -0
- swarm/static/rest_mode/svg/close.svg +5 -0
- swarm/static/rest_mode/svg/copy.svg +4 -0
- swarm/static/rest_mode/svg/dark_mode.svg +3 -0
- swarm/static/rest_mode/svg/edit.svg +5 -0
- swarm/static/rest_mode/svg/layout.svg +9 -0
- swarm/static/rest_mode/svg/logo.svg +29 -0
- swarm/static/rest_mode/svg/logout.svg +5 -0
- swarm/static/rest_mode/svg/mobile.svg +5 -0
- swarm/static/rest_mode/svg/new_chat.svg +4 -0
- swarm/static/rest_mode/svg/not_visible.svg +5 -0
- swarm/static/rest_mode/svg/plus.svg +7 -0
- swarm/static/rest_mode/svg/run_code.svg +6 -0
- swarm/static/rest_mode/svg/save.svg +4 -0
- swarm/static/rest_mode/svg/search.svg +6 -0
- swarm/static/rest_mode/svg/settings.svg +4 -0
- swarm/static/rest_mode/svg/speaker.svg +5 -0
- swarm/static/rest_mode/svg/stop.svg +6 -0
- swarm/static/rest_mode/svg/thumbs_down.svg +3 -0
- swarm/static/rest_mode/svg/thumbs_up.svg +3 -0
- swarm/static/rest_mode/svg/toggle_off.svg +6 -0
- swarm/static/rest_mode/svg/toggle_on.svg +6 -0
- swarm/static/rest_mode/svg/trash.svg +10 -0
- swarm/static/rest_mode/svg/undo.svg +3 -0
- swarm/static/rest_mode/svg/visible.svg +8 -0
- swarm/static/rest_mode/svg/voice.svg +10 -0
- swarm/templates/account/login.html +22 -0
- swarm/templates/account/signup.html +32 -0
- swarm/templates/base.html +30 -0
- swarm/templates/chat.html +43 -0
- swarm/templates/index.html +35 -0
- swarm/templates/rest_mode/components/chat_sidebar.html +55 -0
- swarm/templates/rest_mode/components/header.html +45 -0
- swarm/templates/rest_mode/components/main_chat_pane.html +41 -0
- swarm/templates/rest_mode/components/settings_dialog.html +97 -0
- swarm/templates/rest_mode/components/splash_screen.html +7 -0
- swarm/templates/rest_mode/components/top_bar.html +28 -0
- swarm/templates/rest_mode/message_ui.html +50 -0
- swarm/templates/rest_mode/slackbot.html +30 -0
- swarm/templates/simple_blueprint_page.html +24 -0
- swarm/templates/websocket_partials/final_system_message.html +3 -0
- swarm/templates/websocket_partials/system_message.html +4 -0
- swarm/templates/websocket_partials/user_message.html +5 -0
- swarm/urls.py +57 -74
- swarm/utils/log_utils.py +63 -0
- swarm/views/api_views.py +48 -39
- swarm/views/chat_views.py +156 -70
- swarm/views/core_views.py +85 -90
- swarm/views/model_views.py +64 -121
- swarm/views/utils.py +65 -441
- open_swarm-0.1.1743070217.dist-info/METADATA +0 -258
- open_swarm-0.1.1743070217.dist-info/RECORD +0 -89
- open_swarm-0.1.1743070217.dist-info/entry_points.txt +0 -3
- open_swarm-0.1.1743070217.dist-info/top_level.txt +0 -1
- swarm/agent/agent.py +0 -49
- swarm/core.py +0 -326
- swarm/extensions/mcp/__init__.py +0 -1
- swarm/extensions/mcp/cache_utils.py +0 -36
- swarm/extensions/mcp/mcp_client.py +0 -341
- swarm/extensions/mcp/mcp_constants.py +0 -7
- swarm/extensions/mcp/mcp_tool_provider.py +0 -110
- swarm/types.py +0 -126
- {open_swarm-0.1.1743070217.dist-info → open_swarm-0.1.1743364176.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,221 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
import sys
|
4
|
+
from typing import Dict, Any, List, ClassVar, Optional
|
5
|
+
|
6
|
+
# Ensure src is in path for BlueprintBase import
|
7
|
+
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
8
|
+
src_path = os.path.join(project_root, 'src')
|
9
|
+
if src_path not in sys.path: sys.path.insert(0, src_path)
|
10
|
+
|
11
|
+
try:
|
12
|
+
from agents import Agent, Tool, function_tool, Runner
|
13
|
+
from agents.mcp import MCPServer
|
14
|
+
from agents.models.interface import Model
|
15
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
16
|
+
from openai import AsyncOpenAI
|
17
|
+
from swarm.extensions.blueprint.blueprint_base import BlueprintBase
|
18
|
+
except ImportError as e:
|
19
|
+
print(f"ERROR: Import failed in OmniplexBlueprint: {e}. Check dependencies.")
|
20
|
+
print(f"sys.path: {sys.path}")
|
21
|
+
sys.exit(1)
|
22
|
+
|
23
|
+
logger = logging.getLogger(__name__)
|
24
|
+
|
25
|
+
# --- Agent Instructions ---
|
26
|
+
|
27
|
+
amazo_instructions = """
|
28
|
+
You are Amazo, master of 'npx'-based MCP tools.
|
29
|
+
Receive task instructions from the Coordinator.
|
30
|
+
Identify the BEST available 'npx' MCP tool from your assigned list to accomplish the task.
|
31
|
+
Execute the chosen MCP tool with the necessary parameters provided by the Coordinator.
|
32
|
+
Report the results clearly back to the Coordinator.
|
33
|
+
"""
|
34
|
+
|
35
|
+
rogue_instructions = """
|
36
|
+
You are Rogue, master of 'uvx'-based MCP tools.
|
37
|
+
Receive task instructions from the Coordinator.
|
38
|
+
Identify the BEST available 'uvx' MCP tool from your assigned list.
|
39
|
+
Execute the chosen MCP tool with parameters from the Coordinator.
|
40
|
+
Report the results clearly back to the Coordinator.
|
41
|
+
"""
|
42
|
+
|
43
|
+
sylar_instructions = """
|
44
|
+
You are Sylar, master of miscellaneous MCP tools (non-npx, non-uvx).
|
45
|
+
Receive task instructions from the Coordinator.
|
46
|
+
Identify the BEST available MCP tool from your assigned list.
|
47
|
+
Execute the chosen MCP tool with parameters from the Coordinator.
|
48
|
+
Report the results clearly back to the Coordinator.
|
49
|
+
"""
|
50
|
+
|
51
|
+
coordinator_instructions = """
|
52
|
+
You are the Omniplex Coordinator. Your role is to understand the user request and delegate it to the agent best suited based on the required MCP tool's execution type (npx, uvx, or other).
|
53
|
+
Team & Tool Categories:
|
54
|
+
- Amazo (Agent Tool `Amazo`): Handles tasks requiring `npx`-based MCP servers (e.g., @modelcontextprotocol/*, mcp-shell, mcp-flowise). Pass the specific tool name and parameters needed.
|
55
|
+
- Rogue (Agent Tool `Rogue`): Handles tasks requiring `uvx`-based MCP servers (if any configured). Pass the specific tool name and parameters needed.
|
56
|
+
- Sylar (Agent Tool `Sylar`): Handles tasks requiring other/miscellaneous MCP servers (e.g., direct python scripts, other executables). Pass the specific tool name and parameters needed.
|
57
|
+
Analyze the user's request, determine if an `npx`, `uvx`, or `other` tool is likely needed, and delegate using the corresponding agent tool (`Amazo`, `Rogue`, or `Sylar`). Provide the *full context* of the user request to the chosen agent. Synthesize the final response based on the specialist agent's report.
|
58
|
+
"""
|
59
|
+
|
60
|
+
# --- Define the Blueprint ---
|
61
|
+
class OmniplexBlueprint(BlueprintBase):
|
62
|
+
"""Dynamically routes tasks to agents based on the execution type (npx, uvx, other) of the required MCP server."""
|
63
|
+
metadata: ClassVar[Dict[str, Any]] = {
|
64
|
+
"name": "OmniplexBlueprint",
|
65
|
+
"title": "Omniplex MCP Orchestrator",
|
66
|
+
"description": "Dynamically delegates tasks to agents (Amazo:npx, Rogue:uvx, Sylar:other) based on the command type of available MCP servers.",
|
67
|
+
"version": "1.1.0", # Refactored version
|
68
|
+
"author": "Open Swarm Team (Refactored)",
|
69
|
+
"tags": ["orchestration", "mcp", "dynamic", "multi-agent"],
|
70
|
+
# List common servers - BlueprintBase will try to start them if defined in config.
|
71
|
+
# The blueprint logic will then assign the *started* ones.
|
72
|
+
"required_mcp_servers": [
|
73
|
+
"memory", "filesystem", "mcp-shell", "brave-search", "sqlite",
|
74
|
+
"mcp-flowise", "sequential-thinking", # Add other common ones if needed
|
75
|
+
],
|
76
|
+
"env_vars": ["ALLOWED_PATH", "BRAVE_API_KEY", "SQLITE_DB_PATH", "FLOWISE_API_KEY"], # Informational
|
77
|
+
}
|
78
|
+
|
79
|
+
# Caches
|
80
|
+
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
81
|
+
_model_instance_cache: Dict[str, Model] = {}
|
82
|
+
|
83
|
+
# --- Model Instantiation Helper --- (Standard helper)
|
84
|
+
def _get_model_instance(self, profile_name: str) -> Model:
|
85
|
+
"""Retrieves or creates an LLM Model instance."""
|
86
|
+
# ... (Implementation is the same as in previous refactors) ...
|
87
|
+
if profile_name in self._model_instance_cache:
|
88
|
+
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
89
|
+
return self._model_instance_cache[profile_name]
|
90
|
+
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
91
|
+
profile_data = self.get_llm_profile(profile_name)
|
92
|
+
if not profile_data:
|
93
|
+
logger.critical(f"LLM profile '{profile_name}' (or 'default') not found.")
|
94
|
+
raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
|
95
|
+
provider = profile_data.get("provider", "openai").lower()
|
96
|
+
model_name = profile_data.get("model")
|
97
|
+
if not model_name:
|
98
|
+
logger.critical(f"LLM profile '{profile_name}' missing 'model' key.")
|
99
|
+
raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
|
100
|
+
if provider != "openai":
|
101
|
+
logger.error(f"Unsupported LLM provider '{provider}'.")
|
102
|
+
raise ValueError(f"Unsupported LLM provider: {provider}")
|
103
|
+
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
104
|
+
if client_cache_key not in self._openai_client_cache:
|
105
|
+
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
106
|
+
filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
107
|
+
log_kwargs = {k:v for k,v in filtered_kwargs.items() if k != 'api_key'}
|
108
|
+
logger.debug(f"Creating new AsyncOpenAI client for '{profile_name}': {log_kwargs}")
|
109
|
+
try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_kwargs)
|
110
|
+
except Exception as e: raise ValueError(f"Failed to init OpenAI client: {e}") from e
|
111
|
+
client = self._openai_client_cache[client_cache_key]
|
112
|
+
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
|
113
|
+
try:
|
114
|
+
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
115
|
+
self._model_instance_cache[profile_name] = model_instance
|
116
|
+
return model_instance
|
117
|
+
except Exception as e: raise ValueError(f"Failed to init LLM provider: {e}") from e
|
118
|
+
|
119
|
+
# --- Agent Creation ---
|
120
|
+
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
121
|
+
"""Creates the Omniplex agent team based on available started MCP servers."""
|
122
|
+
logger.debug("Dynamically creating agents for OmniplexBlueprint...")
|
123
|
+
self._model_instance_cache = {}
|
124
|
+
self._openai_client_cache = {}
|
125
|
+
|
126
|
+
default_profile_name = self.config.get("llm_profile", "default")
|
127
|
+
logger.debug(f"Using LLM profile '{default_profile_name}' for Omniplex agents.")
|
128
|
+
model_instance = self._get_model_instance(default_profile_name)
|
129
|
+
|
130
|
+
# Categorize the *started* MCP servers passed to this method
|
131
|
+
npx_started_servers: List[MCPServer] = []
|
132
|
+
uvx_started_servers: List[MCPServer] = [] # Assuming 'uvx' might be a command name
|
133
|
+
other_started_servers: List[MCPServer] = []
|
134
|
+
|
135
|
+
for server in mcp_servers:
|
136
|
+
server_config = self.mcp_server_configs.get(server.name, {})
|
137
|
+
command_def = server_config.get("command", "")
|
138
|
+
command_name = ""
|
139
|
+
if isinstance(command_def, list) and command_def:
|
140
|
+
command_name = os.path.basename(command_def[0]).lower()
|
141
|
+
elif isinstance(command_def, str):
|
142
|
+
# Simple case: command is just the executable name
|
143
|
+
command_name = os.path.basename(shlex.split(command_def)[0]).lower() if command_def else ""
|
144
|
+
|
145
|
+
|
146
|
+
if "npx" in command_name:
|
147
|
+
npx_started_servers.append(server)
|
148
|
+
elif "uvx" in command_name: # Placeholder for uvx logic
|
149
|
+
uvx_started_servers.append(server)
|
150
|
+
else:
|
151
|
+
other_started_servers.append(server)
|
152
|
+
|
153
|
+
logger.debug(f"Categorized MCPs - NPX: {[s.name for s in npx_started_servers]}, UVX: {[s.name for s in uvx_started_servers]}, Other: {[s.name for s in other_started_servers]}")
|
154
|
+
|
155
|
+
# Create agents for each category *only if* they have servers assigned
|
156
|
+
amazo_agent = rogue_agent = sylar_agent = None
|
157
|
+
team_tools: List[Tool] = []
|
158
|
+
|
159
|
+
if npx_started_servers:
|
160
|
+
logger.info(f"Creating Amazo for npx servers: {[s.name for s in npx_started_servers]}")
|
161
|
+
amazo_agent = Agent(
|
162
|
+
name="Amazo",
|
163
|
+
model=model_instance,
|
164
|
+
instructions=amazo_instructions,
|
165
|
+
tools=[], # Uses MCPs
|
166
|
+
mcp_servers=npx_started_servers
|
167
|
+
)
|
168
|
+
team_tools.append(amazo_agent.as_tool(
|
169
|
+
tool_name="Amazo",
|
170
|
+
tool_description=f"Delegate tasks requiring npx-based MCP servers (e.g., {', '.join(s.name for s in npx_started_servers)})."
|
171
|
+
))
|
172
|
+
else:
|
173
|
+
logger.info("No started npx servers found for Amazo.")
|
174
|
+
|
175
|
+
if uvx_started_servers:
|
176
|
+
logger.info(f"Creating Rogue for uvx servers: {[s.name for s in uvx_started_servers]}")
|
177
|
+
rogue_agent = Agent(
|
178
|
+
name="Rogue",
|
179
|
+
model=model_instance,
|
180
|
+
instructions=rogue_instructions,
|
181
|
+
tools=[], # Uses MCPs
|
182
|
+
mcp_servers=uvx_started_servers
|
183
|
+
)
|
184
|
+
team_tools.append(rogue_agent.as_tool(
|
185
|
+
tool_name="Rogue",
|
186
|
+
tool_description=f"Delegate tasks requiring uvx-based MCP servers (e.g., {', '.join(s.name for s in uvx_started_servers)})."
|
187
|
+
))
|
188
|
+
else:
|
189
|
+
logger.info("No started uvx servers found for Rogue.")
|
190
|
+
|
191
|
+
if other_started_servers:
|
192
|
+
logger.info(f"Creating Sylar for other servers: {[s.name for s in other_started_servers]}")
|
193
|
+
sylar_agent = Agent(
|
194
|
+
name="Sylar",
|
195
|
+
model=model_instance,
|
196
|
+
instructions=sylar_instructions,
|
197
|
+
tools=[], # Uses MCPs
|
198
|
+
mcp_servers=other_started_servers
|
199
|
+
)
|
200
|
+
team_tools.append(sylar_agent.as_tool(
|
201
|
+
tool_name="Sylar",
|
202
|
+
tool_description=f"Delegate tasks requiring miscellaneous MCP servers (e.g., {', '.join(s.name for s in other_started_servers)})."
|
203
|
+
))
|
204
|
+
else:
|
205
|
+
logger.info("No other started servers found for Sylar.")
|
206
|
+
|
207
|
+
# Create Coordinator and pass the tools for the agents that were created
|
208
|
+
coordinator_agent = Agent(
|
209
|
+
name="OmniplexCoordinator",
|
210
|
+
model=model_instance,
|
211
|
+
instructions=coordinator_instructions,
|
212
|
+
tools=team_tools,
|
213
|
+
mcp_servers=[] # Coordinator likely doesn't use MCPs directly
|
214
|
+
)
|
215
|
+
|
216
|
+
logger.info(f"Omniplex Coordinator created with tools for: {[t.name for t in team_tools]}")
|
217
|
+
return coordinator_agent
|
218
|
+
|
219
|
+
# Standard Python entry point
|
220
|
+
if __name__ == "__main__":
|
221
|
+
OmniplexBlueprint.main()
|
File without changes
|
@@ -0,0 +1,291 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
import sys
|
4
|
+
import json
|
5
|
+
import subprocess
|
6
|
+
from typing import Dict, List, Any, AsyncGenerator, Optional
|
7
|
+
from pathlib import Path
|
8
|
+
import re
|
9
|
+
|
10
|
+
# Configure logging
|
11
|
+
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(asctime)s - %(name)s - %(message)s')
|
12
|
+
logger = logging.getLogger(__name__)
|
13
|
+
|
14
|
+
# Attempt to import BlueprintBase, handle potential ImportError during early setup/testing
|
15
|
+
try:
|
16
|
+
from swarm.extensions.blueprint.blueprint_base import BlueprintBase
|
17
|
+
except ImportError as e:
|
18
|
+
logger.error(f"Import failed: {e}. Check 'openai-agents' install and project structure.")
|
19
|
+
# *** REMOVED sys.exit(1) ***
|
20
|
+
# Define a dummy class if import fails, allowing module to load for inspection/debugging
|
21
|
+
class BlueprintBase:
|
22
|
+
metadata = {}
|
23
|
+
def __init__(self, *args, **kwargs): pass
|
24
|
+
async def run(self, *args, **kwargs): yield {}
|
25
|
+
|
26
|
+
# --- Tool Definitions ---
|
27
|
+
|
28
|
+
def execute_shell_command(command: str) -> str:
|
29
|
+
"""
|
30
|
+
Executes a shell command and returns its stdout and stderr.
|
31
|
+
Security Note: Ensure commands are properly sanitized or restricted.
|
32
|
+
"""
|
33
|
+
logger.info(f"Executing shell command: {command}")
|
34
|
+
try:
|
35
|
+
result = subprocess.run(
|
36
|
+
command,
|
37
|
+
shell=True,
|
38
|
+
check=False, # Don't raise exception on non-zero exit code
|
39
|
+
stdout=subprocess.PIPE,
|
40
|
+
stderr=subprocess.PIPE,
|
41
|
+
text=True,
|
42
|
+
timeout=60 # Add a timeout
|
43
|
+
)
|
44
|
+
output = f"Exit Code: {result.returncode}\n"
|
45
|
+
if result.stdout:
|
46
|
+
output += f"STDOUT:\n{result.stdout}\n"
|
47
|
+
if result.stderr:
|
48
|
+
output += f"STDERR:\n{result.stderr}\n"
|
49
|
+
logger.info(f"Command finished. Exit Code: {result.returncode}")
|
50
|
+
return output.strip()
|
51
|
+
except subprocess.TimeoutExpired:
|
52
|
+
logger.error(f"Command timed out: {command}")
|
53
|
+
return "Error: Command timed out after 60 seconds."
|
54
|
+
except Exception as e:
|
55
|
+
logger.error(f"Error executing command '{command}': {e}", exc_info=True)
|
56
|
+
return f"Error executing command: {e}"
|
57
|
+
|
58
|
+
def read_file(file_path: str) -> str:
|
59
|
+
"""Reads the content of a specified file."""
|
60
|
+
logger.info(f"Reading file: {file_path}")
|
61
|
+
try:
|
62
|
+
# Basic path traversal check (can be enhanced)
|
63
|
+
if ".." in file_path:
|
64
|
+
logger.warning(f"Attempted path traversal detected in read_file: {file_path}")
|
65
|
+
return "Error: Invalid file path (potential traversal)."
|
66
|
+
# Consider restricting base path if needed
|
67
|
+
# base_path = Path("/workspace").resolve()
|
68
|
+
# target_path = (base_path / file_path).resolve()
|
69
|
+
# if not target_path.is_relative_to(base_path):
|
70
|
+
# return "Error: Access denied."
|
71
|
+
|
72
|
+
path = Path(file_path)
|
73
|
+
if not path.is_file():
|
74
|
+
return f"Error: File not found at {file_path}"
|
75
|
+
content = path.read_text(encoding='utf-8')
|
76
|
+
logger.info(f"Successfully read {len(content)} characters from {file_path}")
|
77
|
+
# Truncate long files?
|
78
|
+
max_len = 10000
|
79
|
+
if len(content) > max_len:
|
80
|
+
logger.warning(f"File {file_path} truncated to {max_len} characters.")
|
81
|
+
return content[:max_len] + "\n... [File Truncated]"
|
82
|
+
return content
|
83
|
+
except Exception as e:
|
84
|
+
logger.error(f"Error reading file '{file_path}': {e}", exc_info=True)
|
85
|
+
return f"Error reading file: {e}"
|
86
|
+
|
87
|
+
def write_file(file_path: str, content: str) -> str:
|
88
|
+
"""Writes content to a specified file, creating directories if needed."""
|
89
|
+
logger.info(f"Writing to file: {file_path}")
|
90
|
+
try:
|
91
|
+
# Basic path traversal check
|
92
|
+
if ".." in file_path:
|
93
|
+
logger.warning(f"Attempted path traversal detected in write_file: {file_path}")
|
94
|
+
return "Error: Invalid file path (potential traversal)."
|
95
|
+
# Consider restricting base path
|
96
|
+
|
97
|
+
path = Path(file_path)
|
98
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
99
|
+
path.write_text(content, encoding='utf-8')
|
100
|
+
logger.info(f"Successfully wrote {len(content)} characters to {file_path}")
|
101
|
+
return f"Successfully wrote to {file_path}"
|
102
|
+
except Exception as e:
|
103
|
+
logger.error(f"Error writing file '{file_path}': {e}", exc_info=True)
|
104
|
+
return f"Error writing file: {e}"
|
105
|
+
|
106
|
+
def list_files(directory_path: str = ".") -> str:
|
107
|
+
"""Lists files and directories in a specified path."""
|
108
|
+
logger.info(f"Listing files in directory: {directory_path}")
|
109
|
+
try:
|
110
|
+
# Basic path traversal check
|
111
|
+
if ".." in directory_path:
|
112
|
+
logger.warning(f"Attempted path traversal detected in list_files: {directory_path}")
|
113
|
+
return "Error: Invalid directory path (potential traversal)."
|
114
|
+
# Consider restricting base path
|
115
|
+
|
116
|
+
path = Path(directory_path)
|
117
|
+
if not path.is_dir():
|
118
|
+
return f"Error: Directory not found at {directory_path}"
|
119
|
+
|
120
|
+
entries = []
|
121
|
+
for entry in path.iterdir():
|
122
|
+
entry_type = "d" if entry.is_dir() else "f"
|
123
|
+
entries.append(f"{entry_type} {entry.name}")
|
124
|
+
|
125
|
+
logger.info(f"Found {len(entries)} entries in {directory_path}")
|
126
|
+
return "\n".join(entries) if entries else "Directory is empty."
|
127
|
+
except Exception as e:
|
128
|
+
logger.error(f"Error listing files in '{directory_path}': {e}", exc_info=True)
|
129
|
+
return f"Error listing files: {e}"
|
130
|
+
|
131
|
+
# --- RueCodeBlueprint Definition ---
|
132
|
+
|
133
|
+
class RueCodeBlueprint(BlueprintBase):
|
134
|
+
"""
|
135
|
+
A blueprint designed for code generation, execution, and file system interaction.
|
136
|
+
Uses Jinja2 for templating prompts and provides tools for shell commands and file operations.
|
137
|
+
"""
|
138
|
+
metadata = {
|
139
|
+
"name": "RueCode",
|
140
|
+
"description": "Generates, executes code, and interacts with the file system.",
|
141
|
+
"author": "Matthew Hand",
|
142
|
+
"version": "0.1.0",
|
143
|
+
"tags": ["code", "execution", "filesystem", "developer"],
|
144
|
+
"llm_profile": "default_dev" # Example: Suggests a profile suitable for coding
|
145
|
+
}
|
146
|
+
|
147
|
+
# Override __init__ if you need specific setup beyond the base class
|
148
|
+
# def __init__(self, *args, **kwargs):
|
149
|
+
# super().__init__(*args, **kwargs)
|
150
|
+
# # Add any RueCode specific initialization here
|
151
|
+
# logger.info("RueCodeBlueprint initialized.")
|
152
|
+
|
153
|
+
async def run(self, messages: List[Dict[str, str]]) -> AsyncGenerator[Dict[str, Any], None]:
|
154
|
+
"""
|
155
|
+
Processes user requests for code generation, execution, or file operations.
|
156
|
+
"""
|
157
|
+
logger.info(f"RueCodeBlueprint run called with {len(messages)} messages.")
|
158
|
+
last_user_message = next((m['content'] for m in reversed(messages) if m['role'] == 'user'), None)
|
159
|
+
|
160
|
+
if not last_user_message:
|
161
|
+
yield {"messages": [{"role": "assistant", "content": "I need a user message to proceed."}]}
|
162
|
+
return
|
163
|
+
|
164
|
+
# 1. Prepare the prompt using Jinja (example)
|
165
|
+
# Assuming you have a 'rue_code_prompt.j2' in a 'templates' subdir
|
166
|
+
try:
|
167
|
+
prompt_context = {
|
168
|
+
"user_request": last_user_message,
|
169
|
+
"history": messages[:-1], # Provide previous messages for context
|
170
|
+
"available_tools": ["execute_shell_command", "read_file", "write_file", "list_files"]
|
171
|
+
}
|
172
|
+
rendered_prompt = self.render_prompt("rue_code_prompt.j2", prompt_context)
|
173
|
+
logger.debug(f"Rendered prompt:\n{rendered_prompt}")
|
174
|
+
except Exception as e:
|
175
|
+
logger.error(f"Failed to render prompt template: {e}")
|
176
|
+
yield {"messages": [{"role": "assistant", "content": f"Internal error: Could not prepare request ({e})."}]}
|
177
|
+
return
|
178
|
+
|
179
|
+
# 2. Define available tools for the LLM
|
180
|
+
tools = [
|
181
|
+
{"type": "function", "function": {"name": "execute_shell_command", "description": "Executes a shell command.", "parameters": {"type": "object", "properties": {"command": {"type": "string", "description": "The shell command to execute."}}, "required": ["command"]}}},
|
182
|
+
{"type": "function", "function": {"name": "read_file", "description": "Reads content from a file.", "parameters": {"type": "object", "properties": {"file_path": {"type": "string", "description": "Path to the file to read."}}, "required": ["file_path"]}}},
|
183
|
+
{"type": "function", "function": {"name": "write_file", "description": "Writes content to a file.", "parameters": {"type": "object", "properties": {"file_path": {"type": "string", "description": "Path to the file to write."}, "content": {"type": "string", "description": "Content to write."}}, "required": ["file_path", "content"]}}},
|
184
|
+
{"type": "function", "function": {"name": "list_files", "description": "Lists files in a directory.", "parameters": {"type": "object", "properties": {"directory_path": {"type": "string", "description": "Path to the directory (default is current)."}}, "required": []}}}, # directory_path is optional
|
185
|
+
]
|
186
|
+
tool_map = {
|
187
|
+
"execute_shell_command": execute_shell_command,
|
188
|
+
"read_file": read_file,
|
189
|
+
"write_file": write_file,
|
190
|
+
"list_files": list_files,
|
191
|
+
}
|
192
|
+
|
193
|
+
# 3. Call the LLM (using the base class's llm instance)
|
194
|
+
llm_messages = [{"role": "system", "content": rendered_prompt}] # Or construct differently based on template
|
195
|
+
# Add user message if not fully incorporated into the system prompt
|
196
|
+
# llm_messages.append({"role": "user", "content": last_user_message})
|
197
|
+
|
198
|
+
logger.info(f"Calling LLM profile '{self.llm_profile_name}' with tools.")
|
199
|
+
try:
|
200
|
+
# Use the configured LLM instance from the base class
|
201
|
+
response_stream = self.llm.chat_completion_stream(
|
202
|
+
messages=llm_messages,
|
203
|
+
tools=tools,
|
204
|
+
tool_choice="auto" # Let the model decide
|
205
|
+
)
|
206
|
+
|
207
|
+
# 4. Process the streaming response and handle tool calls
|
208
|
+
full_response_content = ""
|
209
|
+
tool_calls = []
|
210
|
+
async for chunk in response_stream:
|
211
|
+
delta = chunk.choices[0].delta
|
212
|
+
if delta.content:
|
213
|
+
full_response_content += delta.content
|
214
|
+
yield {"messages": [{"role": "assistant", "delta": {"content": delta.content}}]} # Yield content delta
|
215
|
+
|
216
|
+
if delta.tool_calls:
|
217
|
+
# Accumulate tool call information from deltas
|
218
|
+
for tc_delta in delta.tool_calls:
|
219
|
+
if tc_delta.index >= len(tool_calls):
|
220
|
+
# Start of a new tool call
|
221
|
+
tool_calls.append({
|
222
|
+
"id": tc_delta.id,
|
223
|
+
"type": "function",
|
224
|
+
"function": {"name": tc_delta.function.name, "arguments": tc_delta.function.arguments}
|
225
|
+
})
|
226
|
+
else:
|
227
|
+
# Append arguments to existing tool call
|
228
|
+
tool_calls[tc_delta.index]["function"]["arguments"] += tc_delta.function.arguments
|
229
|
+
|
230
|
+
logger.info("LLM response received.")
|
231
|
+
# If no tool calls, the final response is just the accumulated content
|
232
|
+
if not tool_calls and not full_response_content:
|
233
|
+
logger.warning("LLM finished without content or tool calls.")
|
234
|
+
yield {"messages": [{"role": "assistant", "content": "[No response content or tool call generated]"}]}
|
235
|
+
|
236
|
+
|
237
|
+
# 5. Execute tool calls if any were made
|
238
|
+
if tool_calls:
|
239
|
+
logger.info(f"Executing {len(tool_calls)} tool call(s)...")
|
240
|
+
tool_messages = [{"role": "assistant", "tool_calls": tool_calls}] # Message for next LLM call
|
241
|
+
|
242
|
+
for tool_call in tool_calls:
|
243
|
+
function_name = tool_call["function"]["name"]
|
244
|
+
tool_call_id = tool_call["id"]
|
245
|
+
logger.debug(f"Processing tool call: {function_name} (ID: {tool_call_id})")
|
246
|
+
|
247
|
+
if function_name in tool_map:
|
248
|
+
try:
|
249
|
+
arguments = json.loads(tool_call["function"]["arguments"])
|
250
|
+
logger.debug(f"Arguments: {arguments}")
|
251
|
+
tool_function = tool_map[function_name]
|
252
|
+
# Execute the tool function (sync for now, consider async if tools are I/O bound)
|
253
|
+
tool_output = tool_function(**arguments)
|
254
|
+
logger.debug(f"Tool output: {tool_output[:200]}...") # Log truncated output
|
255
|
+
except json.JSONDecodeError:
|
256
|
+
logger.error(f"Failed to decode arguments for {function_name}: {tool_call['function']['arguments']}")
|
257
|
+
tool_output = f"Error: Invalid arguments format for {function_name}."
|
258
|
+
except Exception as e:
|
259
|
+
logger.error(f"Error executing tool {function_name}: {e}", exc_info=True)
|
260
|
+
tool_output = f"Error executing tool {function_name}: {e}"
|
261
|
+
|
262
|
+
tool_messages.append({
|
263
|
+
"tool_call_id": tool_call_id,
|
264
|
+
"role": "tool",
|
265
|
+
"name": function_name,
|
266
|
+
"content": tool_output,
|
267
|
+
})
|
268
|
+
else:
|
269
|
+
logger.warning(f"LLM requested unknown tool: {function_name}")
|
270
|
+
tool_messages.append({
|
271
|
+
"tool_call_id": tool_call_id,
|
272
|
+
"role": "tool",
|
273
|
+
"name": function_name,
|
274
|
+
"content": f"Error: Tool '{function_name}' not found.",
|
275
|
+
})
|
276
|
+
|
277
|
+
# 6. Send tool results back to LLM for final response
|
278
|
+
logger.info("Sending tool results back to LLM...")
|
279
|
+
final_response_stream = self.llm.chat_completion_stream(
|
280
|
+
messages=llm_messages + tool_messages # Original messages + tool req + tool resp
|
281
|
+
)
|
282
|
+
async for final_chunk in final_response_stream:
|
283
|
+
if final_chunk.choices[0].delta.content:
|
284
|
+
yield {"messages": [{"role": "assistant", "delta": {"content": final_chunk.choices[0].delta.content}}]}
|
285
|
+
|
286
|
+
except Exception as e:
|
287
|
+
logger.error(f"Error during RueCodeBlueprint run: {e}", exc_info=True)
|
288
|
+
yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
|
289
|
+
|
290
|
+
logger.info("RueCodeBlueprint run finished.")
|
291
|
+
|
@@ -0,0 +1,110 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
import sys
|
4
|
+
from typing import Dict, Any, List, TypedDict, ClassVar, Optional
|
5
|
+
|
6
|
+
# Ensure src is in path for BlueprintBase import
|
7
|
+
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
8
|
+
src_path = os.path.join(project_root, 'src')
|
9
|
+
if src_path not in sys.path: sys.path.insert(0, src_path)
|
10
|
+
|
11
|
+
try:
|
12
|
+
from agents import Agent, Tool, function_tool, Runner
|
13
|
+
from agents.mcp import MCPServer
|
14
|
+
from agents.models.interface import Model
|
15
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
16
|
+
from openai import AsyncOpenAI
|
17
|
+
from swarm.extensions.blueprint.blueprint_base import BlueprintBase
|
18
|
+
except ImportError as e:
|
19
|
+
print(f"ERROR: Failed to import 'agents' or 'BlueprintBase'. Is 'openai-agents' installed and src in PYTHONPATH? Details: {e}")
|
20
|
+
sys.exit(1)
|
21
|
+
|
22
|
+
logger = logging.getLogger(__name__)
|
23
|
+
|
24
|
+
# --- Define the desired output structure ---
|
25
|
+
class SuggestionsOutput(TypedDict):
|
26
|
+
"""Defines the expected structure for the agent's output."""
|
27
|
+
suggestions: List[str]
|
28
|
+
|
29
|
+
# --- Define the Blueprint ---
|
30
|
+
class SuggestionBlueprint(BlueprintBase):
|
31
|
+
"""A blueprint defining an agent that generates structured JSON suggestions using output_type."""
|
32
|
+
|
33
|
+
metadata: ClassVar[Dict[str, Any]] = {
|
34
|
+
"name": "SuggestionBlueprint",
|
35
|
+
"title": "Suggestion Blueprint (Structured Output)",
|
36
|
+
"description": "An agent that provides structured suggestions using Agent(output_type=...).",
|
37
|
+
"version": "1.2.0", # Version bump for refactor
|
38
|
+
"author": "Open Swarm Team (Refactored)",
|
39
|
+
"tags": ["structured output", "json", "suggestions", "output_type"],
|
40
|
+
"required_mcp_servers": [],
|
41
|
+
"env_vars": [], # OPENAI_API_KEY is implicitly needed by the model
|
42
|
+
}
|
43
|
+
|
44
|
+
# Caches
|
45
|
+
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
46
|
+
_model_instance_cache: Dict[str, Model] = {}
|
47
|
+
|
48
|
+
# --- Model Instantiation Helper --- (Standard helper)
|
49
|
+
def _get_model_instance(self, profile_name: str) -> Model:
|
50
|
+
"""Retrieves or creates an LLM Model instance."""
|
51
|
+
# ... (Implementation is the same as previous refactors) ...
|
52
|
+
if profile_name in self._model_instance_cache:
|
53
|
+
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
54
|
+
return self._model_instance_cache[profile_name]
|
55
|
+
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
56
|
+
profile_data = self.get_llm_profile(profile_name)
|
57
|
+
if not profile_data: raise ValueError(f"Missing LLM profile '{profile_name}'.")
|
58
|
+
provider = profile_data.get("provider", "openai").lower()
|
59
|
+
model_name = profile_data.get("model")
|
60
|
+
# Ensure a model capable of structured output is used (most recent OpenAI models are)
|
61
|
+
if not model_name: raise ValueError(f"Missing 'model' in profile '{profile_name}'.")
|
62
|
+
if provider != "openai": raise ValueError(f"Unsupported provider: {provider}")
|
63
|
+
|
64
|
+
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
65
|
+
if client_cache_key not in self._openai_client_cache:
|
66
|
+
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
67
|
+
filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
68
|
+
log_kwargs = {k:v for k,v in filtered_kwargs.items() if k != 'api_key'}
|
69
|
+
logger.debug(f"Creating new AsyncOpenAI client for '{profile_name}': {log_kwargs}")
|
70
|
+
try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_kwargs)
|
71
|
+
except Exception as e: raise ValueError(f"Failed to init client: {e}") from e
|
72
|
+
client = self._openai_client_cache[client_cache_key]
|
73
|
+
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
|
74
|
+
try:
|
75
|
+
# Ensure the model selected supports structured output (most recent OpenAI do)
|
76
|
+
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
77
|
+
self._model_instance_cache[profile_name] = model_instance
|
78
|
+
return model_instance
|
79
|
+
except Exception as e: raise ValueError(f"Failed to init LLM: {e}") from e
|
80
|
+
|
81
|
+
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
82
|
+
"""Create the SuggestionAgent."""
|
83
|
+
logger.debug("Creating SuggestionAgent...")
|
84
|
+
self._model_instance_cache = {}
|
85
|
+
self._openai_client_cache = {}
|
86
|
+
|
87
|
+
default_profile_name = self.config.get("llm_profile", "default")
|
88
|
+
# Verify the chosen profile/model supports structured output if possible, or rely on OpenAI's newer models
|
89
|
+
logger.debug(f"Using LLM profile '{default_profile_name}' for SuggestionAgent.")
|
90
|
+
model_instance = self._get_model_instance(default_profile_name)
|
91
|
+
|
92
|
+
suggestion_agent_instructions = (
|
93
|
+
"You are the SuggestionAgent. Analyze the user's input and generate exactly three relevant, "
|
94
|
+
"concise follow-up questions or conversation starters as a JSON object with a single key 'suggestions' "
|
95
|
+
"containing a list of strings."
|
96
|
+
)
|
97
|
+
|
98
|
+
suggestion_agent = Agent(
|
99
|
+
name="SuggestionAgent",
|
100
|
+
instructions=suggestion_agent_instructions,
|
101
|
+
tools=[], # No function tools needed
|
102
|
+
model=model_instance,
|
103
|
+
output_type=SuggestionsOutput, # Enforce the TypedDict structure
|
104
|
+
mcp_servers=mcp_servers # Pass along, though unused
|
105
|
+
)
|
106
|
+
logger.debug("SuggestionAgent created with output_type enforcement.")
|
107
|
+
return suggestion_agent
|
108
|
+
|
109
|
+
if __name__ == "__main__":
|
110
|
+
SuggestionBlueprint.main()
|