open-swarm 0.1.1744936125__py3-none-any.whl → 0.1.1744936234__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,51 +57,41 @@ def execute_shell_command(command: str) -> str:
57
57
 
58
58
  def read_file(file_path: str) -> str:
59
59
  """Reads the content of a specified file."""
60
- logger.info(f"Reading file: {file_path}")
60
+ logger.info(f"📄 Reading file: {file_path}")
61
61
  try:
62
- # Basic path traversal check (can be enhanced)
63
62
  if ".." in file_path:
64
- logger.warning(f"Attempted path traversal detected in read_file: {file_path}")
65
- return "Error: Invalid file path (potential traversal)."
66
- # Consider restricting base path if needed
67
- # base_path = Path("/workspace").resolve()
68
- # target_path = (base_path / file_path).resolve()
69
- # if not target_path.is_relative_to(base_path):
70
- # return "Error: Access denied."
71
-
63
+ logger.warning(f"Attempted path traversal detected in read_file: {file_path}")
64
+ return "\033[91m❌ Error: Invalid file path (potential traversal).\033[0m"
72
65
  path = Path(file_path)
73
66
  if not path.is_file():
74
- return f"Error: File not found at {file_path}"
67
+ logger.warning(f"File not found: {file_path}")
68
+ return f"\033[91m❌ Error: File not found at {file_path}\033[0m"
75
69
  content = path.read_text(encoding='utf-8')
76
70
  logger.info(f"Successfully read {len(content)} characters from {file_path}")
77
- # Truncate long files?
78
71
  max_len = 10000
79
72
  if len(content) > max_len:
80
- logger.warning(f"File {file_path} truncated to {max_len} characters.")
81
- return content[:max_len] + "\n... [File Truncated]"
82
- return content
73
+ logger.warning(f"File {file_path} truncated to {max_len} characters.")
74
+ return f"\033[93m⚠️ {content[:max_len]}\n... [File Truncated]\033[0m"
75
+ return f"\033[92m✅ File read successfully!\033[0m\n\033[94m{content}\033[0m"
83
76
  except Exception as e:
84
77
  logger.error(f"Error reading file '{file_path}': {e}", exc_info=True)
85
- return f"Error reading file: {e}"
78
+ return f"\033[91m❌ Error reading file: {e}\033[0m"
86
79
 
87
80
  def write_file(file_path: str, content: str) -> str:
88
81
  """Writes content to a specified file, creating directories if needed."""
89
- logger.info(f"Writing to file: {file_path}")
82
+ logger.info(f"✏️ Writing to file: {file_path}")
90
83
  try:
91
- # Basic path traversal check
92
84
  if ".." in file_path:
93
- logger.warning(f"Attempted path traversal detected in write_file: {file_path}")
94
- return "Error: Invalid file path (potential traversal)."
95
- # Consider restricting base path
96
-
85
+ logger.warning(f"Attempted path traversal detected in write_file: {file_path}")
86
+ return "\033[91m❌ Error: Invalid file path (potential traversal).\033[0m"
97
87
  path = Path(file_path)
98
88
  path.parent.mkdir(parents=True, exist_ok=True)
99
89
  path.write_text(content, encoding='utf-8')
100
90
  logger.info(f"Successfully wrote {len(content)} characters to {file_path}")
101
- return f"Successfully wrote to {file_path}"
91
+ return f"\033[92m✅ Successfully wrote to {file_path}\033[0m"
102
92
  except Exception as e:
103
93
  logger.error(f"Error writing file '{file_path}': {e}", exc_info=True)
104
- return f"Error writing file: {e}"
94
+ return f"\033[91m❌ Error writing file: {e}\033[0m"
105
95
 
106
96
  def list_files(directory_path: str = ".") -> str:
107
97
  """Lists files and directories in a specified path."""
@@ -130,6 +120,16 @@ def list_files(directory_path: str = ".") -> str:
130
120
 
131
121
  # --- RueCodeBlueprint Definition ---
132
122
 
123
+ # === OpenAI GPT-4.1 Prompt Engineering Guide ===
124
+ # See: https://github.com/openai/openai-cookbook/blob/main/examples/gpt4-1_prompting_guide.ipynb
125
+ #
126
+ # Agentic System Prompt Example (recommended for code generation/repair agents):
127
+ SYS_PROMPT_AGENTIC = """
128
+ You are an agent - please keep going until the user’s query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved.
129
+ If you are not sure about file content or codebase structure pertaining to the user’s request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer.
130
+ You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.
131
+ """
132
+
133
133
  class RueCodeBlueprint(BlueprintBase):
134
134
  """
135
135
  A blueprint designed for code generation, execution, and file system interaction.
@@ -288,4 +288,3 @@ class RueCodeBlueprint(BlueprintBase):
288
288
  yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
289
289
 
290
290
  logger.info("RueCodeBlueprint run finished.")
291
-
@@ -27,6 +27,16 @@ class SuggestionsOutput(TypedDict):
27
27
  suggestions: List[str]
28
28
 
29
29
  # --- Define the Blueprint ---
30
+ # === OpenAI GPT-4.1 Prompt Engineering Guide ===
31
+ # See: https://github.com/openai/openai-cookbook/blob/main/examples/gpt4-1_prompting_guide.ipynb
32
+ #
33
+ # Agentic System Prompt Example (recommended for structured output/suggestion agents):
34
+ SYS_PROMPT_AGENTIC = """
35
+ You are an agent - please keep going until the user’s query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved.
36
+ If you are not sure about file content or codebase structure pertaining to the user’s request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer.
37
+ You MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.
38
+ """
39
+
30
40
  class SuggestionBlueprint(BlueprintBase):
31
41
  """A blueprint defining an agent that generates structured JSON suggestions using output_type."""
32
42
 
@@ -42,7 +52,6 @@ class SuggestionBlueprint(BlueprintBase):
42
52
  }
43
53
 
44
54
  # Caches
45
- _openai_client_cache: Dict[str, AsyncOpenAI] = {}
46
55
  _model_instance_cache: Dict[str, Model] = {}
47
56
 
48
57
  # --- Model Instantiation Helper --- (Standard helper)
@@ -61,19 +70,12 @@ class SuggestionBlueprint(BlueprintBase):
61
70
  if not model_name: raise ValueError(f"Missing 'model' in profile '{profile_name}'.")
62
71
  if provider != "openai": raise ValueError(f"Unsupported provider: {provider}")
63
72
 
64
- client_cache_key = f"{provider}_{profile_data.get('base_url')}"
65
- if client_cache_key not in self._openai_client_cache:
66
- client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
67
- filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
68
- log_kwargs = {k:v for k,v in filtered_kwargs.items() if k != 'api_key'}
69
- logger.debug(f"Creating new AsyncOpenAI client for '{profile_name}': {log_kwargs}")
70
- try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_kwargs)
71
- except Exception as e: raise ValueError(f"Failed to init client: {e}") from e
72
- client = self._openai_client_cache[client_cache_key]
73
+ # Remove redundant client instantiation; rely on framework-level default client
74
+ # All blueprints now use the default client set at framework init
73
75
  logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
74
76
  try:
75
77
  # Ensure the model selected supports structured output (most recent OpenAI do)
76
- model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
78
+ model_instance = OpenAIChatCompletionsModel(model=model_name)
77
79
  self._model_instance_cache[profile_name] = model_instance
78
80
  return model_instance
79
81
  except Exception as e: raise ValueError(f"Failed to init LLM: {e}") from e
@@ -82,7 +84,6 @@ class SuggestionBlueprint(BlueprintBase):
82
84
  """Create the SuggestionAgent."""
83
85
  logger.debug("Creating SuggestionAgent...")
84
86
  self._model_instance_cache = {}
85
- self._openai_client_cache = {}
86
87
 
87
88
  default_profile_name = self.config.get("llm_profile", "default")
88
89
  # Verify the chosen profile/model supports structured output if possible, or rely on OpenAI's newer models
@@ -106,5 +107,27 @@ class SuggestionBlueprint(BlueprintBase):
106
107
  logger.debug("SuggestionAgent created with output_type enforcement.")
107
108
  return suggestion_agent
108
109
 
110
+ async def run(self, messages: List[Dict[str, Any]], **kwargs) -> Any:
111
+ """Main execution entry point for the Suggestion blueprint."""
112
+ logger.info("SuggestionBlueprint run method called.")
113
+ instruction = messages[-1].get("content", "") if messages else ""
114
+ async for chunk in self._run_non_interactive(instruction, **kwargs):
115
+ yield chunk
116
+ logger.info("SuggestionBlueprint run method finished.")
117
+
118
+ async def _run_non_interactive(self, instruction: str, **kwargs) -> Any:
119
+ logger.info(f"Running SuggestionBlueprint non-interactively with instruction: '{instruction[:100]}...'")
120
+ mcp_servers = kwargs.get("mcp_servers", [])
121
+ agent = self.create_starting_agent(mcp_servers=mcp_servers)
122
+ from agents import Runner
123
+ import os
124
+ model_name = os.getenv("LITELLM_MODEL") or os.getenv("DEFAULT_LLM") or "gpt-3.5-turbo"
125
+ try:
126
+ for chunk in Runner.run(agent, instruction):
127
+ yield chunk
128
+ except Exception as e:
129
+ logger.error(f"Error during non-interactive run: {e}", exc_info=True)
130
+ yield {"messages": [{"role": "assistant", "content": f"An error occurred: {e}"}]}
131
+
109
132
  if __name__ == "__main__":
110
133
  SuggestionBlueprint.main()
@@ -1,304 +0,0 @@
1
- import logging
2
- import os
3
- import sys
4
- import asyncio
5
- import subprocess
6
- import shlex # Added for safe command splitting
7
- import re
8
- import inspect
9
- from pathlib import Path # Use pathlib for better path handling
10
- from typing import Dict, Any, List, Optional, ClassVar, AsyncGenerator
11
-
12
- try:
13
- # Core imports from openai-agents
14
- from agents import Agent, Tool, function_tool, Runner
15
- from agents.mcp import MCPServer
16
- from agents.models.interface import Model
17
- from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
18
- from openai import AsyncOpenAI
19
-
20
- # Import our custom base class
21
- from swarm.extensions.blueprint.blueprint_base import BlueprintBase
22
- except ImportError as e:
23
- # Provide more helpful error message
24
- print(f"ERROR: Import failed in BurntNoodlesBlueprint: {e}. Check 'openai-agents' install and project structure.")
25
- print(f"Attempted import from directory: {os.path.dirname(__file__)}")
26
- print(f"sys.path: {sys.path}")
27
- sys.exit(1)
28
-
29
- # Configure logging for this blueprint module
30
- logger = logging.getLogger(__name__)
31
- # Logging level is controlled by BlueprintBase based on --debug flag
32
-
33
- # --- Tool Logic Definitions (Undecorated) ---
34
- def _git_status_logic() -> str:
35
- """Executes 'git status --porcelain' and returns the current repository status."""
36
- logger.info("Executing git status --porcelain")
37
- try:
38
- result = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True, check=True, timeout=30)
39
- output = result.stdout.strip()
40
- logger.debug(f"Git status raw output:\n{output}")
41
- return f"OK: Git Status:\n{output}" if output else "OK: No changes detected in the working directory."
42
- except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
43
- except subprocess.CalledProcessError as e: logger.error(f"Error executing git status: {e.stderr}"); return f"Error executing git status: {e.stderr}"
44
- except subprocess.TimeoutExpired: logger.error("Git status command timed out."); return "Error: Git status command timed out."
45
- except Exception as e: logger.error(f"Unexpected error during git status: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git status: {e}"
46
-
47
- def _git_diff_logic() -> str:
48
- """Executes 'git diff' and returns the differences in the working directory."""
49
- logger.info("Executing git diff")
50
- try:
51
- result = subprocess.run(["git", "diff"], capture_output=True, text=True, check=False, timeout=30)
52
- output = result.stdout; stderr = result.stderr.strip()
53
- if result.returncode != 0 and stderr: logger.error(f"Error executing git diff (Exit Code {result.returncode}): {stderr}"); return f"Error executing git diff: {stderr}"
54
- logger.debug(f"Git diff raw output (Exit Code {result.returncode}):\n{output[:1000]}...")
55
- return f"OK: Git Diff Output:\n{output}" if output else "OK: No differences found."
56
- except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
57
- except subprocess.TimeoutExpired: logger.error("Git diff command timed out."); return "Error: Git diff command timed out."
58
- except Exception as e: logger.error(f"Unexpected error during git diff: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git diff: {e}"
59
-
60
- def _git_add_logic(file_path: str = ".") -> str:
61
- """Executes 'git add' to stage changes for the specified file or all changes (default '.')."""
62
- logger.info(f"Executing git add {file_path}")
63
- try:
64
- result = subprocess.run(["git", "add", file_path], capture_output=True, text=True, check=True, timeout=30)
65
- logger.debug(f"Git add '{file_path}' completed successfully.")
66
- return f"OK: Staged '{file_path}' successfully."
67
- except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
68
- except subprocess.CalledProcessError as e: logger.error(f"Error executing git add '{file_path}': {e.stderr}"); return f"Error executing git add '{file_path}': {e.stderr}"
69
- except subprocess.TimeoutExpired: logger.error(f"Git add command timed out for '{file_path}'."); return f"Error: Git add command timed out for '{file_path}'."
70
- except Exception as e: logger.error(f"Unexpected error during git add '{file_path}': {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git add '{file_path}': {e}"
71
-
72
- def _git_commit_logic(message: str) -> str:
73
- """Executes 'git commit' with a provided commit message."""
74
- logger.info(f"Executing git commit -m '{message[:50]}...'")
75
- if not message or not message.strip(): logger.warning("Git commit attempted with empty message."); return "Error: Commit message cannot be empty."
76
- try:
77
- result = subprocess.run(["git", "commit", "-m", message], capture_output=True, text=True, check=False, timeout=30)
78
- output = result.stdout.strip(); stderr = result.stderr.strip()
79
- logger.debug(f"Git commit raw output (Exit Code {result.returncode}):\nSTDOUT: {output}\nSTDERR: {stderr}")
80
- if "nothing to commit" in output or "nothing added to commit" in output or "no changes added to commit" in output:
81
- logger.info("Git commit reported: Nothing to commit."); return "OK: Nothing to commit."
82
- if result.returncode == 0: return f"OK: Committed with message '{message}'.\n{output}"
83
- else: error_detail = stderr if stderr else output; logger.error(f"Error executing git commit (Exit Code {result.returncode}): {error_detail}"); return f"Error executing git commit: {error_detail}"
84
- except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
85
- except subprocess.TimeoutExpired: logger.error("Git commit command timed out."); return "Error: Git commit command timed out."
86
- except Exception as e: logger.error(f"Unexpected error during git commit: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git commit: {e}"
87
-
88
- def _git_push_logic() -> str:
89
- """Executes 'git push' to push staged commits to the remote repository."""
90
- logger.info("Executing git push")
91
- try:
92
- result = subprocess.run(["git", "push"], capture_output=True, text=True, check=True, timeout=120)
93
- output = result.stdout.strip() + "\n" + result.stderr.strip()
94
- logger.debug(f"Git push raw output:\n{output}")
95
- return f"OK: Push completed.\n{output.strip()}"
96
- except FileNotFoundError: logger.error("Git command not found."); return "Error: git command not found."
97
- except subprocess.CalledProcessError as e: error_output = e.stdout.strip() + "\n" + e.stderr.strip(); logger.error(f"Error executing git push: {error_output}"); return f"Error executing git push: {error_output.strip()}"
98
- except subprocess.TimeoutExpired: logger.error("Git push command timed out."); return "Error: Git push command timed out."
99
- except Exception as e: logger.error(f"Unexpected error during git push: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during git push: {e}"
100
-
101
- def _run_npm_test_logic(args: str = "") -> str:
102
- """Executes 'npm run test' with optional arguments."""
103
- try:
104
- cmd_list = ["npm", "run", "test"] + (shlex.split(args) if args else []); cmd_str = ' '.join(cmd_list)
105
- logger.info(f"Executing npm test: {cmd_str}")
106
- result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120)
107
- output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
108
- if result.returncode == 0: logger.debug(f"npm test completed successfully:\n{output}"); return f"OK: npm test finished.\n{output}"
109
- else: logger.error(f"npm test failed (Exit Code {result.returncode}):\n{output}"); return f"Error: npm test failed.\n{output}"
110
- except FileNotFoundError: logger.error("npm command not found."); return "Error: npm command not found."
111
- except subprocess.TimeoutExpired: logger.error("npm test command timed out."); return "Error: npm test command timed out."
112
- except Exception as e: logger.error(f"Unexpected error during npm test: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during npm test: {e}"
113
-
114
- def _run_pytest_logic(args: str = "") -> str:
115
- """Executes 'uv run pytest' with optional arguments."""
116
- try:
117
- cmd_list = ["uv", "run", "pytest"] + (shlex.split(args) if args else []); cmd_str = ' '.join(cmd_list)
118
- logger.info(f"Executing pytest via uv: {cmd_str}")
119
- result = subprocess.run(cmd_list, capture_output=True, text=True, check=False, timeout=120)
120
- output = f"Exit Code: {result.returncode}\nSTDOUT:\n{result.stdout.strip()}\nSTDERR:\n{result.stderr.strip()}"
121
- if result.returncode == 0: logger.debug(f"pytest completed successfully:\n{output}"); return f"OK: pytest finished successfully.\n{output}"
122
- else: logger.warning(f"pytest finished with failures (Exit Code {result.returncode}):\n{output}"); return f"OK: Pytest finished with failures (Exit Code {result.returncode}).\n{output}"
123
- except FileNotFoundError: logger.error("uv command not found."); return "Error: uv command not found."
124
- except subprocess.TimeoutExpired: logger.error("pytest command timed out."); return "Error: pytest command timed out."
125
- except Exception as e: logger.error(f"Unexpected error during pytest: {e}", exc_info=logger.level <= logging.DEBUG); return f"Error during pytest: {e}"
126
-
127
- # --- Tool Definitions (Decorated - reverted to default naming) ---
128
- git_status = function_tool(_git_status_logic)
129
- git_diff = function_tool(_git_diff_logic)
130
- git_add = function_tool(_git_add_logic)
131
- git_commit = function_tool(_git_commit_logic)
132
- git_push = function_tool(_git_push_logic)
133
- run_npm_test = function_tool(_run_npm_test_logic)
134
- run_pytest = function_tool(_run_pytest_logic)
135
-
136
- # --- Agent Instructions ---
137
- # (Instructions remain the same)
138
- michael_instructions = """
139
- You are Michael Toasted, the resolute leader of the Burnt Noodles creative team.
140
- Your primary role is to understand the user's request, break it down into actionable steps,
141
- and delegate tasks appropriately to your team members: Fiona Flame (Git operations) and Sam Ashes (Testing).
142
- You should only execute simple Git status checks (`git_status`, `git_diff`) yourself. Delegate all other Git actions (add, commit, push) to Fiona. Delegate all testing actions (npm test, pytest) to Sam.
143
- Synthesize the results from your team and provide the final response to the user.
144
- Available Function Tools (for you): git_status, git_diff.
145
- Available Agent Tools (for delegation): Fiona_Flame, Sam_Ashes.
146
- """
147
- fiona_instructions = """
148
- You are Fiona Flame, the git specialist. Execute git commands precisely as requested using your available function tools:
149
- `git_status`, `git_diff`, `git_add`, `git_commit`, `git_push`.
150
- When asked to commit, analyze the diff if necessary and generate concise, informative conventional commit messages (e.g., 'feat: ...', 'fix: ...', 'refactor: ...', 'chore: ...').
151
- Always stage changes using `git_add` before committing.
152
- If asked to push, first ask the user (Michael) for confirmation before executing `git_push`.
153
- If a task involves testing (like running tests after a commit), delegate it to the Sam_Ashes agent tool.
154
- For tasks outside your Git domain, report back to Michael; do not use the Michael_Toasted tool directly.
155
- Available Function Tools: git_status, git_diff, git_add, git_commit, git_push.
156
- Available Agent Tools: Sam_Ashes.
157
- """
158
- sam_instructions = """
159
- You are Sam Ashes, the meticulous testing operative. Execute test commands using your available function tools: `run_npm_test` or `run_pytest`.
160
- Interpret the results: Report failures immediately and clearly. If tests pass, consider running with coverage (e.g., using `uv run pytest --cov` via the `run_pytest` tool) if appropriate or requested, and report the coverage summary.
161
- For tasks outside testing (e.g., needing code changes before testing, or git operations), refer back to Michael; do not use the Michael_Toasted or Fiona_Flame tools directly.
162
- Available Function Tools: run_npm_test, run_pytest.
163
- Available Agent Tools: None (Report back to Michael for delegation).
164
- """
165
-
166
- # --- Blueprint Definition ---
167
- class BurntNoodlesBlueprint(BlueprintBase):
168
- metadata: ClassVar[Dict[str, Any]] = {
169
- "name": "BurntNoodlesBlueprint",
170
- "title": "Burnt Noodles",
171
- "description": "A multi-agent team managing Git operations and code testing.",
172
- "version": "1.1.0",
173
- "author": "Open Swarm Team (Refactored)",
174
- "tags": ["git", "test", "multi-agent", "collaboration", "refactor"],
175
- "required_mcp_servers": [],
176
- }
177
-
178
- _openai_client_cache: Dict[str, AsyncOpenAI] = {}
179
- _model_instance_cache: Dict[str, Model] = {}
180
-
181
- def _get_model_instance(self, profile_name: str) -> Model:
182
- if profile_name in self._model_instance_cache:
183
- logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
184
- return self._model_instance_cache[profile_name]
185
-
186
- logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
187
- profile_data = getattr(self, "get_llm_profile", lambda prof: {"provider": "openai", "model": "gpt-mock"})(profile_name)
188
- if not profile_data:
189
- logger.critical(f"Cannot create Model instance: LLM profile '{profile_name}' (or 'default') not found in configuration.")
190
- raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
191
-
192
- provider = profile_data.get("provider", "openai").lower()
193
- model_name = profile_data.get("model")
194
- if not model_name:
195
- logger.critical(f"LLM profile '{profile_name}' is missing the required 'model' key.")
196
- raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
197
-
198
- if provider != "openai":
199
- logger.error(f"Unsupported LLM provider '{provider}' in profile '{profile_name}'. Only 'openai' is supported in this blueprint.")
200
- raise ValueError(f"Unsupported LLM provider: {provider}")
201
-
202
- client_cache_key = f"{provider}_{profile_data.get('base_url')}"
203
- if client_cache_key not in self._openai_client_cache:
204
- client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
205
- filtered_client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
206
- log_client_kwargs = {k:v for k,v in filtered_client_kwargs.items() if k != 'api_key'}
207
- logger.debug(f"Creating new AsyncOpenAI client for profile '{profile_name}' with config: {log_client_kwargs}")
208
- try:
209
- self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_client_kwargs)
210
- except Exception as e:
211
- logger.error(f"Failed to create AsyncOpenAI client for profile '{profile_name}': {e}", exc_info=True)
212
- raise ValueError(f"Failed to initialize OpenAI client for profile '{profile_name}': {e}") from e
213
-
214
- openai_client_instance = self._openai_client_cache[client_cache_key]
215
-
216
- logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') with client instance for profile '{profile_name}'.")
217
- try:
218
- model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=openai_client_instance)
219
- self._model_instance_cache[profile_name] = model_instance
220
- return model_instance
221
- except Exception as e:
222
- logger.error(f"Failed to instantiate OpenAIChatCompletionsModel for profile '{profile_name}': {e}", exc_info=True)
223
- raise ValueError(f"Failed to initialize LLM provider for profile '{profile_name}': {e}") from e
224
-
225
- def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
226
- logger.debug("Creating Burnt Noodles agent team...")
227
- config = self._load_configuration() if getattr(self, "config", None) is None else self.config
228
- self._model_instance_cache = {}
229
- self._openai_client_cache = {}
230
-
231
- default_profile_name = config.get("llm_profile", "default")
232
- logger.debug(f"Using LLM profile '{default_profile_name}' for all Burnt Noodles agents.")
233
- default_model_instance = self._get_model_instance(default_profile_name)
234
-
235
- # --- Use the decorated tool variables ---
236
- fiona_flame = Agent(
237
- name="Fiona_Flame",
238
- model=default_model_instance,
239
- instructions=fiona_instructions,
240
- tools=[git_status, git_diff, git_add, git_commit, git_push] # Agent tools added later
241
- )
242
- sam_ashes = Agent(
243
- name="Sam_Ashes",
244
- model=default_model_instance,
245
- instructions=sam_instructions,
246
- tools=[run_npm_test, run_pytest] # Agent tools added later
247
- )
248
- michael_toasted = Agent(
249
- name="Michael_Toasted",
250
- model=default_model_instance,
251
- instructions=michael_instructions,
252
- tools=[
253
- git_status, # Michael's direct tools
254
- git_diff,
255
- fiona_flame.as_tool(
256
- tool_name="Fiona_Flame",
257
- tool_description="Delegate Git operations (add, commit, push) or complex status/diff queries to Fiona."
258
- ),
259
- sam_ashes.as_tool(
260
- tool_name="Sam_Ashes",
261
- tool_description="Delegate testing tasks (npm test, pytest) to Sam."
262
- ),
263
- ],
264
- mcp_servers=mcp_servers
265
- )
266
- # --- End tool variable usage ---
267
-
268
- fiona_flame.tools.append(
269
- sam_ashes.as_tool(tool_name="Sam_Ashes", tool_description="Delegate testing tasks (npm test, pytest) to Sam.")
270
- )
271
-
272
- logger.debug("Burnt Noodles agent team created successfully. Michael Toasted is the starting agent.")
273
- return michael_toasted
274
-
275
- async def run(self, messages: List[Dict[str, Any]], **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
276
- """
277
- Main execution entry point for the Burnt Noodles blueprint.
278
- Delegates to _run_non_interactive for CLI-like execution.
279
- """
280
- logger.info("BurntNoodlesBlueprint run method called.")
281
- instruction = messages[-1].get("content", "") if messages else ""
282
- async for chunk in self._run_non_interactive(instruction, **kwargs):
283
- yield chunk
284
- logger.info("BurntNoodlesBlueprint run method finished.")
285
-
286
- async def _run_non_interactive(self, instruction: str, **kwargs) -> AsyncGenerator[Dict[str, Any], None]:
287
- """Helper to run the agent flow based on an instruction."""
288
- logger.info(f"Running Burnt Noodles non-interactively with instruction: '{instruction[:100]}...'")
289
- mcp_servers = kwargs.get("mcp_servers", [])
290
- starting_agent = self.create_starting_agent(mcp_servers=mcp_servers)
291
- runner = Runner(agent=starting_agent)
292
- try:
293
- final_result = await runner.run(instruction)
294
- logger.info(f"Non-interactive run finished. Final Output: {final_result.final_output}")
295
- yield { "messages": [ {"role": "assistant", "content": final_result.final_output} ] }
296
- except Exception as e:
297
- logger.error(f"Error during non-interactive run: {e}", exc_info=True)
298
- yield { "messages": [ {"role": "assistant", "content": f"An error occurred: {e}"} ] }
299
-
300
-
301
- # Standard Python entry point for direct script execution
302
- if __name__ == "__main__":
303
- BurntNoodlesBlueprint.main()
304
-