open-swarm 0.1.1745017234__py3-none-any.whl → 0.1.1745019858__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/METADATA +29 -1
- {open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/RECORD +41 -27
- swarm/blueprints/blueprint_audit_status.json +27 -0
- swarm/blueprints/chatbot/blueprint_chatbot.py +93 -28
- swarm/blueprints/codey/CODEY.md +15 -0
- swarm/blueprints/codey/README.md +63 -0
- swarm/blueprints/codey/blueprint_codey.py +179 -108
- swarm/blueprints/codey/instructions.md +17 -0
- swarm/blueprints/divine_code/blueprint_divine_code.py +113 -7
- swarm/blueprints/django_chat/blueprint_django_chat.py +47 -0
- swarm/blueprints/family_ties/blueprint_family_ties.py +43 -10
- swarm/blueprints/geese/blueprint_geese.py +219 -0
- swarm/blueprints/mission_improbable/blueprint_mission_improbable.py +120 -63
- swarm/blueprints/monkai_magic/blueprint_monkai_magic.py +45 -1
- swarm/blueprints/nebula_shellz/blueprint_nebula_shellz.py +43 -27
- swarm/blueprints/omniplex/blueprint_omniplex.py +44 -31
- swarm/blueprints/rue_code/blueprint_rue_code.py +141 -141
- swarm/blueprints/suggestion/blueprint_suggestion.py +8 -17
- swarm/blueprints/unapologetic_press/blueprint_unapologetic_press.py +100 -1
- swarm/blueprints/whiskeytango_foxtrot/blueprint_whiskeytango_foxtrot.py +52 -28
- swarm/core/blueprint_ux.py +19 -21
- swarm/core/cli/__init__.py +1 -0
- swarm/core/cli/commands/__init__.py +1 -0
- swarm/core/cli/commands/blueprint_management.py +7 -0
- swarm/core/cli/interactive_shell.py +14 -0
- swarm/core/cli/main.py +50 -0
- swarm/core/cli/utils/__init__.py +1 -0
- swarm/core/cli/utils/discover_commands.py +18 -0
- swarm/extensions/blueprint/cli_handler.py +19 -0
- swarm/extensions/cli/commands/blueprint_management.py +46 -8
- swarm/extensions/cli/commands/edit_config.py +8 -1
- swarm/extensions/cli/commands/validate_env.py +8 -1
- swarm/extensions/cli/interactive_shell.py +16 -2
- swarm/extensions/cli/utils/__init__.py +1 -0
- swarm/extensions/cli/utils/prompt_user.py +3 -0
- swarm/extensions/launchers/swarm_api.py +12 -0
- swarm/extensions/launchers/swarm_cli.py +12 -0
- swarm/utils/context_utils.py +10 -4
- swarm/blueprints/gaggle/blueprint_gaggle.py +0 -303
- swarm/llm/chat_completion.py +0 -196
- {open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/WHEEL +0 -0
- {open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/entry_points.txt +0 -0
- {open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/licenses/LICENSE +0 -0
@@ -1,303 +0,0 @@
|
|
1
|
-
import os
|
2
|
-
from dotenv import load_dotenv; load_dotenv(override=True)
|
3
|
-
|
4
|
-
import logging
|
5
|
-
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(name)s: %(message)s')
|
6
|
-
import sys
|
7
|
-
|
8
|
-
# --- Universal Logging Reset ---
|
9
|
-
def force_info_logging():
|
10
|
-
root = logging.getLogger()
|
11
|
-
for handler in root.handlers[:]:
|
12
|
-
root.removeHandler(handler)
|
13
|
-
loglevel = os.environ.get('LOGLEVEL', None)
|
14
|
-
debug_env = os.environ.get('SWARM_DEBUG', '0') == '1'
|
15
|
-
debug_arg = '--debug' in sys.argv
|
16
|
-
if debug_arg or debug_env or (loglevel and loglevel.upper() == 'DEBUG'):
|
17
|
-
level = logging.DEBUG
|
18
|
-
else:
|
19
|
-
level = logging.INFO
|
20
|
-
logging.basicConfig(level=level, format='[%(levelname)s] %(name)s: %(message)s')
|
21
|
-
root.setLevel(level)
|
22
|
-
|
23
|
-
force_info_logging()
|
24
|
-
|
25
|
-
import argparse
|
26
|
-
from typing import List, Dict, Any, Optional, ClassVar
|
27
|
-
|
28
|
-
# Ensure src is in path for BlueprintBase import (if needed, adjust path)
|
29
|
-
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
30
|
-
src_path = os.path.join(project_root, 'src')
|
31
|
-
if src_path not in sys.path: sys.path.insert(0, src_path)
|
32
|
-
|
33
|
-
from typing import Optional
|
34
|
-
from pathlib import Path
|
35
|
-
try:
|
36
|
-
from agents import Agent, Tool, function_tool, Runner
|
37
|
-
from agents.mcp import MCPServer
|
38
|
-
from agents.models.interface import Model
|
39
|
-
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
40
|
-
from openai import AsyncOpenAI
|
41
|
-
from swarm.core.blueprint_base import BlueprintBase
|
42
|
-
except ImportError as e:
|
43
|
-
print(f"ERROR: Import failed in blueprint_gaggle: {e}. Check 'openai-agents' install and project structure.")
|
44
|
-
print(f"sys.path: {sys.path}")
|
45
|
-
sys.exit(1)
|
46
|
-
|
47
|
-
import argparse
|
48
|
-
|
49
|
-
# --- Logging Setup ---
|
50
|
-
def setup_logging():
|
51
|
-
parser = argparse.ArgumentParser(add_help=False)
|
52
|
-
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
|
53
|
-
args, _ = parser.parse_known_args()
|
54
|
-
loglevel = os.environ.get('LOGLEVEL', None)
|
55
|
-
if args.debug or os.environ.get('SWARM_DEBUG', '0') == '1' or (loglevel and loglevel.upper() == 'DEBUG'):
|
56
|
-
logging.basicConfig(level=logging.DEBUG)
|
57
|
-
else:
|
58
|
-
logging.basicConfig(level=logging.INFO)
|
59
|
-
return args
|
60
|
-
|
61
|
-
args = setup_logging()
|
62
|
-
|
63
|
-
logger = logging.getLogger(__name__)
|
64
|
-
|
65
|
-
# --- Tools ---
|
66
|
-
def _create_story_outline(topic: str) -> str:
|
67
|
-
logger.info(f"Tool: Generating outline for: {topic}")
|
68
|
-
outline = f"Story Outline for '{topic}':\n1. Beginning: Introduce characters and setting.\n2. Middle: Develop conflict and rising action.\n3. Climax: The peak of the conflict.\n4. End: Resolution and aftermath."
|
69
|
-
logger.debug(f"Generated outline: {outline}")
|
70
|
-
return outline
|
71
|
-
|
72
|
-
@function_tool
|
73
|
-
def create_story_outline(topic: str) -> str:
|
74
|
-
"""Generates a basic story outline based on a topic."""
|
75
|
-
return _create_story_outline(topic)
|
76
|
-
|
77
|
-
def _write_story_part(part_name: str, outline: str, previous_parts: str) -> str:
|
78
|
-
logger.info(f"Tool: Writing story part: {part_name}")
|
79
|
-
content = f"## {part_name}\n\nThis is the draft content for the '{part_name}' section. It follows:\n'{previous_parts[:100]}...' \nIt should align with the outline:\n'{outline}'"
|
80
|
-
logger.debug(f"Generated content for {part_name}: {content[:100]}...")
|
81
|
-
return content
|
82
|
-
|
83
|
-
@function_tool
|
84
|
-
def write_story_part(part_name: str, outline: str, previous_parts: str) -> str:
|
85
|
-
"""Writes a specific part of the story using the outline and previous context."""
|
86
|
-
return _write_story_part(part_name, outline, previous_parts)
|
87
|
-
|
88
|
-
def _edit_story(full_story: str, edit_instructions: str) -> str:
|
89
|
-
logger.info(f"Tool: Editing story with instructions: {edit_instructions}")
|
90
|
-
edited_content = f"*** Edited Story Draft ***\n(Based on instructions: '{edit_instructions}')\n\n{full_story}\n\n[Editor's Notes: Minor tweaks applied for flow.]"
|
91
|
-
logger.debug("Editing complete.")
|
92
|
-
return edited_content
|
93
|
-
|
94
|
-
@function_tool
|
95
|
-
def edit_story(full_story: str, edit_instructions: str) -> str:
|
96
|
-
"""Edits the complete story based on instructions."""
|
97
|
-
return _edit_story(full_story, edit_instructions)
|
98
|
-
|
99
|
-
# --- Blueprint Definition ---
|
100
|
-
from rich.console import Console
|
101
|
-
from rich.panel import Panel
|
102
|
-
|
103
|
-
class GaggleBlueprint(BlueprintBase):
|
104
|
-
def __init__(self, blueprint_id: str, config_path: Optional[Path] = None, **kwargs):
|
105
|
-
super().__init__(blueprint_id, config_path=config_path, **kwargs)
|
106
|
-
|
107
|
-
"""A multi-agent blueprint using a Coordinator, Planner, Writer, and Editor for collaborative story writing."""
|
108
|
-
metadata: ClassVar[Dict[str, Any]] = {
|
109
|
-
"name": "GaggleBlueprint",
|
110
|
-
"title": "Gaggle Story Writing Team",
|
111
|
-
"description": "A multi-agent blueprint for collaborative story writing using Planner, Writer, and Editor roles coordinated by a central agent.",
|
112
|
-
"version": "1.2.0", # Updated version
|
113
|
-
"author": "Open Swarm Team (Refactored)",
|
114
|
-
"tags": ["writing", "collaboration", "multi-agent", "storytelling"],
|
115
|
-
"required_mcp_servers": [],
|
116
|
-
"env_vars": [],
|
117
|
-
}
|
118
|
-
|
119
|
-
# Caches
|
120
|
-
_openai_client_cache: Dict[str, AsyncOpenAI] = {}
|
121
|
-
_model_instance_cache: Dict[str, Model] = {}
|
122
|
-
|
123
|
-
def display_splash_screen(self, animated: bool = False):
|
124
|
-
console = Console()
|
125
|
-
splash = r'''
|
126
|
-
[bold magenta]
|
127
|
-
____ _ _ ____ _ _
|
128
|
-
/ ___| __ _ _ __ __ _| | ___| |__ / ___|| |_ __ _ _ __| |_ ___
|
129
|
-
| | _ / _` | '_ \ / _` | |/ _ \ '_ \ \___ \| __/ _` | '__| __/ _ \
|
130
|
-
| |_| | (_| | | | | (_| | | __/ | | | ___) | || (_| | | | || __/
|
131
|
-
\____|\__,_|_| |_|\__, |_|\___|_| |_|____/ \__\__,_|_| \__\___|
|
132
|
-
|___/
|
133
|
-
[/bold magenta]
|
134
|
-
[white]Collaborative Story Writing Blueprint[/white]
|
135
|
-
'''
|
136
|
-
panel = Panel(splash, title="[bold magenta]Gaggle Blueprint[/]", border_style="magenta", expand=False)
|
137
|
-
console.print(panel)
|
138
|
-
console.print() # Blank line for spacing
|
139
|
-
|
140
|
-
# --- Model Instantiation Helper --- (Standard helper)
|
141
|
-
def _get_model_instance(self, profile_name: str) -> Model:
|
142
|
-
"""Retrieves or creates an LLM Model instance."""
|
143
|
-
# ... (Implementation is the same as in previous refactors, e.g., Dilbot's) ...
|
144
|
-
if profile_name in self._model_instance_cache:
|
145
|
-
logger.debug(f"Using cached Model instance for profile '{profile_name}'.")
|
146
|
-
return self._model_instance_cache[profile_name]
|
147
|
-
logger.debug(f"Creating new Model instance for profile '{profile_name}'.")
|
148
|
-
profile_data = self.get_llm_profile(profile_name)
|
149
|
-
if not profile_data:
|
150
|
-
logger.critical(f"LLM profile '{profile_name}' (or 'default') not found.")
|
151
|
-
raise ValueError(f"Missing LLM profile configuration for '{profile_name}' or 'default'.")
|
152
|
-
provider = profile_data.get("provider", "openai").lower()
|
153
|
-
model_name = profile_data.get("model")
|
154
|
-
if not model_name:
|
155
|
-
logger.critical(f"LLM profile '{profile_name}' missing 'model' key.")
|
156
|
-
raise ValueError(f"Missing 'model' key in LLM profile '{profile_name}'.")
|
157
|
-
if provider != "openai":
|
158
|
-
logger.error(f"Unsupported LLM provider '{provider}'.")
|
159
|
-
raise ValueError(f"Unsupported LLM provider: {provider}")
|
160
|
-
client_cache_key = f"{provider}_{profile_data.get('base_url')}"
|
161
|
-
if client_cache_key not in self._openai_client_cache:
|
162
|
-
client_kwargs = { "api_key": profile_data.get("api_key"), "base_url": profile_data.get("base_url") }
|
163
|
-
filtered_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
164
|
-
log_kwargs = {k:v for k,v in filtered_kwargs.items() if k != 'api_key'}
|
165
|
-
logger.debug(f"Creating new AsyncOpenAI client for '{profile_name}': {log_kwargs}")
|
166
|
-
try: self._openai_client_cache[client_cache_key] = AsyncOpenAI(**filtered_kwargs)
|
167
|
-
except Exception as e: raise ValueError(f"Failed to init OpenAI client: {e}") from e
|
168
|
-
client = self._openai_client_cache[client_cache_key]
|
169
|
-
logger.debug(f"Instantiating OpenAIChatCompletionsModel(model='{model_name}') for '{profile_name}'.")
|
170
|
-
try:
|
171
|
-
model_instance = OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
172
|
-
self._model_instance_cache[profile_name] = model_instance
|
173
|
-
return model_instance
|
174
|
-
except Exception as e: raise ValueError(f"Failed to init LLM provider: {e}") from e
|
175
|
-
|
176
|
-
|
177
|
-
def create_starting_agent(self, mcp_servers: List[MCPServer]) -> Agent:
|
178
|
-
"""Creates the story writing team and returns the Coordinator."""
|
179
|
-
logger.debug("Creating Gaggle Story Writing Team...")
|
180
|
-
self._model_instance_cache = {}
|
181
|
-
self._openai_client_cache = {}
|
182
|
-
|
183
|
-
default_profile_name = self.config.get("llm_profile", "default")
|
184
|
-
logger.debug(f"Using LLM profile '{default_profile_name}' for Gaggle agents.")
|
185
|
-
model_instance = self._get_model_instance(default_profile_name)
|
186
|
-
|
187
|
-
# --- Define Agent Instructions ---
|
188
|
-
planner_instructions = "You are the Planner. Your goal is to take a user's story topic and create a coherent outline using the 'create_story_outline' tool. Respond ONLY with the generated outline string."
|
189
|
-
writer_instructions = "You are a Writer. You receive a story part name (e.g., 'Introduction', 'Climax'), the full outline, and any previously written parts. Write the content for ONLY your assigned part using the 'write_story_part' tool, ensuring it flows logically from previous parts and fits the outline. Respond ONLY with the text generated for your part."
|
190
|
-
editor_instructions = "You are the Editor. You receive the complete draft of the story and editing instructions (e.g., 'make it funnier', 'check for consistency'). Use the 'edit_story' tool to revise the text. Respond ONLY with the final, edited story string."
|
191
|
-
coordinator_instructions = (
|
192
|
-
"You are the Coordinator for a team of writing agents (Planner, Writer, Editor).\n"
|
193
|
-
"1. Receive the user's story topic.\n"
|
194
|
-
"2. Delegate to the Planner tool to get a story outline.\n"
|
195
|
-
"3. Identify the story parts from the outline (e.g., Beginning, Middle, Climax, End).\n"
|
196
|
-
"4. Sequentially delegate writing each part to the Writer tool. Provide the part name, the full outline, and all previously written parts as context for the Writer.\n"
|
197
|
-
"5. Accumulate the written parts into a full draft.\n"
|
198
|
-
"6. Delegate the complete draft to the Editor tool with simple instructions like 'Ensure coherence and flow'.\n"
|
199
|
-
"7. Return the final, edited story as the result."
|
200
|
-
)
|
201
|
-
|
202
|
-
# Instantiate agents, passing their specific function tools
|
203
|
-
planner_agent = Agent(
|
204
|
-
name="Planner",
|
205
|
-
instructions=planner_instructions,
|
206
|
-
model=model_instance,
|
207
|
-
tools=[create_story_outline],
|
208
|
-
mcp_servers=mcp_servers
|
209
|
-
)
|
210
|
-
writer_agent = Agent(
|
211
|
-
name="Writer",
|
212
|
-
instructions=writer_instructions,
|
213
|
-
model=model_instance,
|
214
|
-
tools=[write_story_part],
|
215
|
-
mcp_servers=mcp_servers
|
216
|
-
)
|
217
|
-
editor_agent = Agent(
|
218
|
-
name="Editor",
|
219
|
-
instructions=editor_instructions,
|
220
|
-
model=model_instance,
|
221
|
-
tools=[edit_story],
|
222
|
-
mcp_servers=mcp_servers
|
223
|
-
)
|
224
|
-
|
225
|
-
# Instantiate Coordinator, giving it the other agents as tools
|
226
|
-
coordinator_agent = Agent(
|
227
|
-
name="Coordinator",
|
228
|
-
instructions=coordinator_instructions,
|
229
|
-
model=model_instance, # Coordinator also needs a model
|
230
|
-
tools=[
|
231
|
-
planner_agent.as_tool(
|
232
|
-
tool_name="Planner",
|
233
|
-
tool_description="Delegate creating a story outline based on a topic."
|
234
|
-
),
|
235
|
-
writer_agent.as_tool(
|
236
|
-
tool_name="Writer",
|
237
|
-
tool_description="Delegate writing a specific part of the story. Requires part_name, outline, and previous_parts."
|
238
|
-
),
|
239
|
-
editor_agent.as_tool(
|
240
|
-
tool_name="Editor",
|
241
|
-
tool_description="Delegate editing the full story draft. Requires full_story and edit_instructions."
|
242
|
-
),
|
243
|
-
],
|
244
|
-
mcp_servers=mcp_servers
|
245
|
-
)
|
246
|
-
|
247
|
-
logger.debug("Gaggle Story Writing Team created. Coordinator is the starting agent.")
|
248
|
-
return coordinator_agent
|
249
|
-
|
250
|
-
async def run(self, messages: List[Dict[str, str]]):
|
251
|
-
"""
|
252
|
-
Run the Gaggle blueprint agentic workflow.
|
253
|
-
Accepts a list of messages (e.g., task prompt from CLI) and yields output chunks.
|
254
|
-
"""
|
255
|
-
# For demonstration, this will run the collaborative story workflow
|
256
|
-
topic = None
|
257
|
-
for msg in messages:
|
258
|
-
if msg.get("role") == "user":
|
259
|
-
topic = msg.get("content")
|
260
|
-
break
|
261
|
-
if not topic:
|
262
|
-
yield {"messages": [{"role": "system", "content": "No topic provided."}]}
|
263
|
-
return
|
264
|
-
# Step 1: Planner creates outline
|
265
|
-
outline = _create_story_outline(topic)
|
266
|
-
yield {"messages": [{"role": "planner", "content": outline}]}
|
267
|
-
# Step 2: Writer writes story parts (simulate parts)
|
268
|
-
story_parts = []
|
269
|
-
for part in ["Beginning", "Middle", "Climax", "End"]:
|
270
|
-
part_text = _write_story_part(part, outline, "\n".join(story_parts))
|
271
|
-
story_parts.append(part_text)
|
272
|
-
yield {"messages": [{"role": "writer", "content": part_text}]}
|
273
|
-
# Step 3: Editor edits the full story
|
274
|
-
full_story = "\n\n".join(story_parts)
|
275
|
-
edited = _edit_story(full_story, "Polish for flow and clarity.")
|
276
|
-
yield {"messages": [{"role": "editor", "content": edited}]}
|
277
|
-
|
278
|
-
async def _run_non_interactive(self, instruction: str, **kwargs):
|
279
|
-
"""Adapter for CLI non-interactive execution, yields results from the public run method. Accepts **kwargs for compatibility."""
|
280
|
-
messages = [{"role": "user", "content": instruction}]
|
281
|
-
async for chunk in self.run(messages, **kwargs):
|
282
|
-
yield chunk
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
if __name__ == "__main__":
|
287
|
-
parser = argparse.ArgumentParser(description='Gaggle Story Writing Team')
|
288
|
-
parser.add_argument('instruction', nargs=argparse.REMAINDER, help='Instruction for Gaggle to process (all args after -- are joined as the prompt)')
|
289
|
-
args = parser.parse_args()
|
290
|
-
instruction_args = args.instruction
|
291
|
-
if instruction_args and instruction_args[0] == '--':
|
292
|
-
instruction_args = instruction_args[1:]
|
293
|
-
instruction = ' '.join(instruction_args).strip() if instruction_args else None
|
294
|
-
blueprint = GaggleBlueprint('gaggle')
|
295
|
-
import asyncio
|
296
|
-
if instruction:
|
297
|
-
async def main():
|
298
|
-
async for chunk in blueprint._run_non_interactive(instruction):
|
299
|
-
print(chunk)
|
300
|
-
asyncio.run(main())
|
301
|
-
else:
|
302
|
-
blueprint.display_splash_screen()
|
303
|
-
blueprint.run_interactive()
|
swarm/llm/chat_completion.py
DELETED
@@ -1,196 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Chat Completion Module
|
3
|
-
|
4
|
-
This module handles chat completion logic for the Swarm framework, including message preparation,
|
5
|
-
tool call repair, and interaction with the OpenAI API. Located in llm/ for LLM-specific functionality.
|
6
|
-
"""
|
7
|
-
|
8
|
-
import os
|
9
|
-
import json
|
10
|
-
import logging
|
11
|
-
from typing import List, Optional, Dict, Any, Union, AsyncGenerator
|
12
|
-
from collections import defaultdict
|
13
|
-
|
14
|
-
import asyncio
|
15
|
-
from openai import AsyncOpenAI, OpenAIError
|
16
|
-
from ..types import Agent
|
17
|
-
from ..utils.redact import redact_sensitive_data
|
18
|
-
from ..utils.general_utils import serialize_datetime
|
19
|
-
from ..utils.message_utils import filter_duplicate_system_messages, update_null_content
|
20
|
-
from ..utils.context_utils import get_token_count, truncate_message_history
|
21
|
-
|
22
|
-
# Configure module-level logging
|
23
|
-
logger = logging.getLogger(__name__)
|
24
|
-
# logger.setLevel(logging.DEBUG) # Keep level controlled by main setup
|
25
|
-
if not logger.handlers:
|
26
|
-
stream_handler = logging.StreamHandler()
|
27
|
-
formatter = logging.Formatter("[%(levelname)s] %(asctime)s - %(name)s - %(message)s")
|
28
|
-
stream_handler.setFormatter(formatter)
|
29
|
-
logger.addHandler(stream_handler)
|
30
|
-
|
31
|
-
# --- PATCH: Suppress OpenAI tracing/telemetry errors if using LiteLLM/custom endpoint ---
|
32
|
-
import logging
|
33
|
-
import os
|
34
|
-
if os.environ.get("LITELLM_BASE_URL") or os.environ.get("OPENAI_BASE_URL"):
|
35
|
-
# Silence openai.agents tracing/telemetry errors
|
36
|
-
logging.getLogger("openai.agents").setLevel(logging.CRITICAL)
|
37
|
-
try:
|
38
|
-
import openai.agents.tracing
|
39
|
-
openai.agents.tracing.TracingClient = lambda *a, **kw: None
|
40
|
-
except Exception:
|
41
|
-
pass
|
42
|
-
|
43
|
-
# --- PATCH: Enforce custom endpoint, never fallback to OpenAI if custom base_url is set ---
|
44
|
-
def _enforce_litellm_only(client):
|
45
|
-
# If client has a base_url attribute, check it
|
46
|
-
base_url = getattr(client, 'base_url', None)
|
47
|
-
if base_url and 'openai.com' in base_url:
|
48
|
-
return # Using OpenAI, allowed
|
49
|
-
if base_url and 'openai.com' not in base_url:
|
50
|
-
# If any fallback to OpenAI API is attempted, raise error
|
51
|
-
import traceback
|
52
|
-
raise RuntimeError(f"Attempted fallback to OpenAI API when custom base_url is set! base_url={base_url}\n{traceback.format_stack()}")
|
53
|
-
|
54
|
-
|
55
|
-
async def get_chat_completion(
|
56
|
-
client: AsyncOpenAI,
|
57
|
-
agent: Agent,
|
58
|
-
history: List[Dict[str, Any]],
|
59
|
-
context_variables: dict,
|
60
|
-
current_llm_config: Dict[str, Any],
|
61
|
-
max_context_tokens: int,
|
62
|
-
max_context_messages: int,
|
63
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
64
|
-
tool_choice: Optional[str] = "auto",
|
65
|
-
model_override: Optional[str] = None,
|
66
|
-
stream: bool = False,
|
67
|
-
debug: bool = False
|
68
|
-
) -> Union[Dict[str, Any], AsyncGenerator[Any, None]]:
|
69
|
-
_enforce_litellm_only(client)
|
70
|
-
"""
|
71
|
-
Retrieve a chat completion from the LLM for the given agent and history.
|
72
|
-
Relies on openai-agents Runner for actual execution, this might become deprecated.
|
73
|
-
"""
|
74
|
-
if not agent:
|
75
|
-
logger.error("Cannot generate chat completion: Agent is None")
|
76
|
-
raise ValueError("Agent is required")
|
77
|
-
|
78
|
-
logger.debug(f"Generating chat completion for agent '{agent.name}'")
|
79
|
-
active_model = model_override or current_llm_config.get("model", "default")
|
80
|
-
client_kwargs = {
|
81
|
-
"api_key": current_llm_config.get("api_key"),
|
82
|
-
"base_url": current_llm_config.get("base_url")
|
83
|
-
}
|
84
|
-
client_kwargs = {k: v for k, v in client_kwargs.items() if v is not None}
|
85
|
-
redacted_kwargs = redact_sensitive_data(client_kwargs, sensitive_keys=["api_key"])
|
86
|
-
logger.debug(f"Using client with model='{active_model}', base_url='{client_kwargs.get('base_url', 'default')}', api_key={redacted_kwargs['api_key']}")
|
87
|
-
|
88
|
-
# --- ENFORCE: Disallow fallback to OpenAI if custom base_url is set ---
|
89
|
-
if client_kwargs.get("base_url") and "openai.com" not in client_kwargs["base_url"]:
|
90
|
-
# If the base_url is set and is not OpenAI, ensure no fallback to OpenAI API
|
91
|
-
if "openai.com" in os.environ.get("OPENAI_API_BASE", ""):
|
92
|
-
raise RuntimeError(f"[SECURITY] Fallback to OpenAI API attempted with base_url={client_kwargs['base_url']}. Refusing for safety.")
|
93
|
-
|
94
|
-
context_variables = defaultdict(str, context_variables)
|
95
|
-
instructions = agent.instructions(context_variables) if callable(agent.instructions) else agent.instructions
|
96
|
-
if not isinstance(instructions, str):
|
97
|
-
logger.warning(f"Invalid instructions type for '{agent.name}': {type(instructions)}. Converting to string.")
|
98
|
-
instructions = str(instructions)
|
99
|
-
|
100
|
-
# --- REMOVED call to repair_message_payload for system message ---
|
101
|
-
messages = [{"role": "system", "content": instructions}]
|
102
|
-
|
103
|
-
if not isinstance(history, list):
|
104
|
-
logger.error(f"Invalid history type for '{agent.name}': {type(history)}. Expected list.")
|
105
|
-
history = []
|
106
|
-
seen_ids = set()
|
107
|
-
for msg in history:
|
108
|
-
msg_id = msg.get("id", hash(json.dumps(msg, sort_keys=True, default=serialize_datetime)))
|
109
|
-
if msg_id not in seen_ids:
|
110
|
-
seen_ids.add(msg_id)
|
111
|
-
if "tool_calls" in msg and msg["tool_calls"] is not None and not isinstance(msg["tool_calls"], list):
|
112
|
-
logger.warning(f"Invalid tool_calls in history for '{msg.get('sender', 'unknown')}': {msg['tool_calls']}. Setting to None.")
|
113
|
-
msg["tool_calls"] = None
|
114
|
-
if "content" in msg and msg["content"] is None:
|
115
|
-
msg["content"] = ""
|
116
|
-
messages.append(msg)
|
117
|
-
|
118
|
-
messages = filter_duplicate_system_messages(messages)
|
119
|
-
messages = truncate_message_history(messages, active_model, max_context_tokens, max_context_messages)
|
120
|
-
# --- REMOVED call to repair_message_payload after truncation ---
|
121
|
-
messages = update_null_content(messages) # Keep null content update
|
122
|
-
|
123
|
-
logger.debug(f"Prepared {len(messages)} messages for '{agent.name}'")
|
124
|
-
if debug:
|
125
|
-
logger.debug(f"Messages: {json.dumps(messages, indent=2, default=str)}")
|
126
|
-
|
127
|
-
create_params = {
|
128
|
-
"model": active_model,
|
129
|
-
"messages": messages,
|
130
|
-
"stream": stream,
|
131
|
-
"temperature": current_llm_config.get("temperature", 0.7),
|
132
|
-
"tools": tools if tools else None,
|
133
|
-
"tool_choice": tool_choice if tools else None,
|
134
|
-
}
|
135
|
-
if getattr(agent, "response_format", None):
|
136
|
-
create_params["response_format"] = agent.response_format
|
137
|
-
create_params = {k: v for k, v in create_params.items() if v is not None}
|
138
|
-
|
139
|
-
tool_info_log = f", tools_count={len(tools)}" if tools else ", tools=None"
|
140
|
-
logger.debug(f"Chat completion params: model='{active_model}', messages_count={len(messages)}, stream={stream}{tool_info_log}, tool_choice={create_params.get('tool_choice')}")
|
141
|
-
|
142
|
-
try:
|
143
|
-
logger.debug(f"Calling OpenAI API for '{agent.name}' with model='{active_model}'")
|
144
|
-
prev_openai_api_key = os.environ.pop("OPENAI_API_KEY", None)
|
145
|
-
try:
|
146
|
-
completion = await client.chat.completions.create(**create_params)
|
147
|
-
if stream:
|
148
|
-
return completion
|
149
|
-
|
150
|
-
if completion.choices and len(completion.choices) > 0 and completion.choices[0].message:
|
151
|
-
message_dict = completion.choices[0].message.model_dump(exclude_none=True)
|
152
|
-
log_msg = message_dict.get("content", "No content")[:50] if message_dict.get("content") else "No content"
|
153
|
-
if message_dict.get("tool_calls"): log_msg += f" (+{len(message_dict['tool_calls'])} tool calls)"
|
154
|
-
logger.debug(f"OpenAI completion received for '{agent.name}': {log_msg}...")
|
155
|
-
return message_dict
|
156
|
-
else:
|
157
|
-
logger.warning(f"No valid message in completion for '{agent.name}'")
|
158
|
-
return {"role": "assistant", "content": "No response generated"}
|
159
|
-
finally:
|
160
|
-
if prev_openai_api_key is not None:
|
161
|
-
os.environ["OPENAI_API_KEY"] = prev_openai_api_key
|
162
|
-
except OpenAIError as e:
|
163
|
-
logger.error(f"Chat completion failed for '{agent.name}': {e}")
|
164
|
-
raise
|
165
|
-
except Exception as e:
|
166
|
-
logger.error(f"Unexpected error during chat completion for '{agent.name}': {e}", exc_info=True)
|
167
|
-
raise
|
168
|
-
|
169
|
-
|
170
|
-
async def get_chat_completion_message(
|
171
|
-
client: AsyncOpenAI,
|
172
|
-
agent: Agent,
|
173
|
-
history: List[Dict[str, Any]],
|
174
|
-
context_variables: dict,
|
175
|
-
current_llm_config: Dict[str, Any],
|
176
|
-
max_context_tokens: int,
|
177
|
-
max_context_messages: int,
|
178
|
-
tools: Optional[List[Dict[str, Any]]] = None,
|
179
|
-
tool_choice: Optional[str] = "auto",
|
180
|
-
model_override: Optional[str] = None,
|
181
|
-
stream: bool = False,
|
182
|
-
debug: bool = False
|
183
|
-
) -> Union[Dict[str, Any], AsyncGenerator[Any, None]]:
|
184
|
-
_enforce_litellm_only(client)
|
185
|
-
"""
|
186
|
-
Wrapper to retrieve and validate a chat completion message (returns dict or stream).
|
187
|
-
Relies on openai-agents Runner for actual execution, this might become deprecated.
|
188
|
-
"""
|
189
|
-
logger.debug(f"Fetching chat completion message for '{agent.name}'")
|
190
|
-
completion_result = await get_chat_completion(
|
191
|
-
client, agent, history, context_variables, current_llm_config,
|
192
|
-
max_context_tokens, max_context_messages,
|
193
|
-
tools=tools, tool_choice=tool_choice,
|
194
|
-
model_override=model_override, stream=stream, debug=debug
|
195
|
-
)
|
196
|
-
return completion_result
|
File without changes
|
{open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/entry_points.txt
RENAMED
File without changes
|
{open_swarm-0.1.1745017234.dist-info → open_swarm-0.1.1745019858.dist-info}/licenses/LICENSE
RENAMED
File without changes
|