swarms 7.6.1__py3-none-any.whl → 7.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/__init__.py +1 -0
- swarms/agents/__init__.py +4 -5
- swarms/agents/flexion_agent.py +2 -1
- swarms/agents/reasoning_agents.py +10 -0
- swarms/client/__init__.py +15 -0
- swarms/prompts/multi_agent_collab_prompt.py +313 -0
- swarms/structs/__init__.py +10 -17
- swarms/structs/agent.py +178 -262
- swarms/structs/base_swarm.py +0 -7
- swarms/structs/concurrent_workflow.py +2 -2
- swarms/structs/conversation.py +16 -2
- swarms/structs/de_hallucination_swarm.py +8 -4
- swarms/structs/dynamic_conversational_swarm.py +226 -0
- swarms/structs/groupchat.py +80 -84
- swarms/structs/hiearchical_swarm.py +1 -1
- swarms/structs/hybrid_hiearchical_peer_swarm.py +256 -0
- swarms/structs/majority_voting.py +1 -1
- swarms/structs/mixture_of_agents.py +1 -1
- swarms/structs/multi_agent_exec.py +63 -139
- swarms/structs/multi_agent_orchestrator.py +1 -1
- swarms/structs/output_types.py +3 -0
- swarms/structs/rearrange.py +66 -205
- swarms/structs/sequential_workflow.py +34 -47
- swarms/structs/swarm_router.py +3 -2
- swarms/telemetry/bootup.py +19 -38
- swarms/telemetry/main.py +62 -22
- swarms/tools/tool_schema_base_model.py +57 -0
- swarms/utils/auto_download_check_packages.py +2 -2
- swarms/utils/disable_logging.py +0 -17
- swarms/utils/history_output_formatter.py +8 -3
- swarms/utils/litellm_wrapper.py +117 -1
- {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/METADATA +1 -5
- {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/RECORD +37 -37
- swarms/structs/agent_security.py +0 -318
- swarms/structs/airflow_swarm.py +0 -430
- swarms/structs/output_type.py +0 -18
- swarms/utils/agent_ops_check.py +0 -26
- swarms/utils/pandas_utils.py +0 -92
- /swarms/{structs/swarms_api.py → client/main.py} +0 -0
- {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/LICENSE +0 -0
- {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/WHEEL +0 -0
- {swarms-7.6.1.dist-info → swarms-7.6.4.dist-info}/entry_points.txt +0 -0
swarms/structs/conversation.py
CHANGED
@@ -119,7 +119,7 @@ class Conversation(BaseStructure):
|
|
119
119
|
content (Union[str, dict, list]): The content of the message to be added.
|
120
120
|
"""
|
121
121
|
now = datetime.datetime.now()
|
122
|
-
|
122
|
+
now.strftime("%Y-%m-%d %H:%M:%S")
|
123
123
|
|
124
124
|
# Base message with role
|
125
125
|
message = {
|
@@ -129,8 +129,12 @@ class Conversation(BaseStructure):
|
|
129
129
|
# Handle different content types
|
130
130
|
if isinstance(content, dict) or isinstance(content, list):
|
131
131
|
message["content"] = content
|
132
|
+
elif self.time_enabled:
|
133
|
+
message["content"] = (
|
134
|
+
f"Time: {now.strftime('%Y-%m-%d %H:%M:%S')} \n {content}"
|
135
|
+
)
|
132
136
|
else:
|
133
|
-
message["content"] =
|
137
|
+
message["content"] = content
|
134
138
|
|
135
139
|
# Add the message to history immediately without waiting for token count
|
136
140
|
self.conversation_history.append(message)
|
@@ -510,6 +514,16 @@ class Conversation(BaseStructure):
|
|
510
514
|
"""
|
511
515
|
return f"{self.conversation_history[-1]['role']}: {self.conversation_history[-1]['content']}"
|
512
516
|
|
517
|
+
def get_final_message_content(self):
|
518
|
+
"""Return the content of the final message from the conversation history.
|
519
|
+
|
520
|
+
Returns:
|
521
|
+
str: The content of the final message.
|
522
|
+
"""
|
523
|
+
output = self.conversation_history[-1]["content"]
|
524
|
+
# print(output)
|
525
|
+
return output
|
526
|
+
|
513
527
|
|
514
528
|
# # Example usage
|
515
529
|
# # conversation = Conversation()
|
@@ -4,7 +4,8 @@ from loguru import logger
|
|
4
4
|
from swarms.structs.agent import Agent
|
5
5
|
|
6
6
|
# Prompt templates for different agent roles
|
7
|
-
GENERATOR_PROMPT = """
|
7
|
+
GENERATOR_PROMPT = """
|
8
|
+
You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics.
|
8
9
|
|
9
10
|
Your responsibilities:
|
10
11
|
1. Provide accurate information based on your training data
|
@@ -22,7 +23,8 @@ When responding to queries:
|
|
22
23
|
Remember, it's better to acknowledge ignorance than to provide incorrect information.
|
23
24
|
"""
|
24
25
|
|
25
|
-
CRITIC_PROMPT = """
|
26
|
+
CRITIC_PROMPT = """
|
27
|
+
You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text.
|
26
28
|
|
27
29
|
Your responsibilities:
|
28
30
|
1. Carefully analyze the provided text for factual errors
|
@@ -47,7 +49,8 @@ Focus particularly on:
|
|
47
49
|
Be thorough and specific in your critique. Provide actionable feedback for improvement.
|
48
50
|
"""
|
49
51
|
|
50
|
-
REFINER_PROMPT = """
|
52
|
+
REFINER_PROMPT = """
|
53
|
+
You are a refinement specialist tasked with improving text based on critical feedback.
|
51
54
|
|
52
55
|
Your responsibilities:
|
53
56
|
1. Carefully review the original text and the critical feedback
|
@@ -67,7 +70,8 @@ Guidelines for refinement:
|
|
67
70
|
The refined text should be helpful and informative while being scrupulously accurate.
|
68
71
|
"""
|
69
72
|
|
70
|
-
VALIDATOR_PROMPT = """
|
73
|
+
VALIDATOR_PROMPT = """
|
74
|
+
You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs.
|
71
75
|
|
72
76
|
Your responsibilities:
|
73
77
|
1. Verify that all critical issues from previous feedback have been properly addressed
|
@@ -0,0 +1,226 @@
|
|
1
|
+
import json
|
2
|
+
import random
|
3
|
+
from swarms.structs.agent import Agent
|
4
|
+
from typing import List
|
5
|
+
from swarms.structs.conversation import Conversation
|
6
|
+
from swarms.utils.history_output_formatter import (
|
7
|
+
history_output_formatter,
|
8
|
+
)
|
9
|
+
from swarms.utils.any_to_str import any_to_str
|
10
|
+
|
11
|
+
tools = [
|
12
|
+
{
|
13
|
+
"type": "function",
|
14
|
+
"function": {
|
15
|
+
"name": "select_agent",
|
16
|
+
"description": "Analyzes the input task and selects the most appropriate agent configuration, outputting both the agent name and the formatted response.",
|
17
|
+
"parameters": {
|
18
|
+
"type": "object",
|
19
|
+
"properties": {
|
20
|
+
"respond_or_no_respond": {
|
21
|
+
"type": "boolean",
|
22
|
+
"description": "Whether the agent should respond to the response or not.",
|
23
|
+
},
|
24
|
+
"reasoning": {
|
25
|
+
"type": "string",
|
26
|
+
"description": "The reasoning behind the selection of the agent and response.",
|
27
|
+
},
|
28
|
+
"agent_name": {
|
29
|
+
"type": "string",
|
30
|
+
"description": "The name of the selected agent that is most appropriate for handling the given task.",
|
31
|
+
},
|
32
|
+
"response": {
|
33
|
+
"type": "string",
|
34
|
+
"description": "A clear and structured description of the response for the next agent.",
|
35
|
+
},
|
36
|
+
},
|
37
|
+
"required": [
|
38
|
+
"reasoning",
|
39
|
+
"agent_name",
|
40
|
+
"response",
|
41
|
+
"respond_or_no_respond",
|
42
|
+
],
|
43
|
+
},
|
44
|
+
},
|
45
|
+
},
|
46
|
+
]
|
47
|
+
|
48
|
+
|
49
|
+
class DynamicConversationalSwarm:
|
50
|
+
def __init__(
|
51
|
+
self,
|
52
|
+
name: str = "Dynamic Conversational Swarm",
|
53
|
+
description: str = "A swarm that uses a dynamic conversational model to solve complex tasks.",
|
54
|
+
agents: List[Agent] = [],
|
55
|
+
max_loops: int = 1,
|
56
|
+
output_type: str = "list",
|
57
|
+
*args,
|
58
|
+
**kwargs,
|
59
|
+
):
|
60
|
+
self.name = name
|
61
|
+
self.description = description
|
62
|
+
self.agents = agents
|
63
|
+
self.max_loops = max_loops
|
64
|
+
self.output_type = output_type
|
65
|
+
|
66
|
+
self.conversation = Conversation()
|
67
|
+
|
68
|
+
# Agents in the chat
|
69
|
+
agents_in_chat = self.get_agents_info()
|
70
|
+
self.conversation.add(
|
71
|
+
role="Conversation Log", content=agents_in_chat
|
72
|
+
)
|
73
|
+
|
74
|
+
self.inject_tools()
|
75
|
+
|
76
|
+
# Inject tools into the agents
|
77
|
+
def inject_tools(self):
|
78
|
+
for agent in self.agents:
|
79
|
+
agent.tools_list_dictionary = tools
|
80
|
+
|
81
|
+
def parse_json_into_dict(self, json_str: str) -> dict:
|
82
|
+
try:
|
83
|
+
return json.loads(json_str)
|
84
|
+
except json.JSONDecodeError:
|
85
|
+
raise ValueError("Invalid JSON string")
|
86
|
+
|
87
|
+
def find_agent_by_name(self, agent_name: str) -> Agent:
|
88
|
+
for agent in self.agents:
|
89
|
+
if agent.name == agent_name:
|
90
|
+
return agent
|
91
|
+
raise ValueError(f"Agent with name {agent_name} not found")
|
92
|
+
|
93
|
+
def run_agent(self, agent_name: str, task: str) -> str:
|
94
|
+
agent = self.find_agent_by_name(agent_name)
|
95
|
+
return agent.run(task)
|
96
|
+
|
97
|
+
def fetch_random_agent_name(self) -> str:
|
98
|
+
return random.choice(self.agents).agent_name
|
99
|
+
|
100
|
+
def run(self, task: str) -> str:
|
101
|
+
"""
|
102
|
+
Run the dynamic conversational swarm for a specified number of loops.
|
103
|
+
Each agent has access to the full conversation history.
|
104
|
+
|
105
|
+
Args:
|
106
|
+
task (str): The initial task/prompt to process
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
str: The final response after all loops are complete
|
110
|
+
"""
|
111
|
+
self.conversation.add(
|
112
|
+
role=f"{self.fetch_random_agent_name()}", content=task
|
113
|
+
)
|
114
|
+
|
115
|
+
# for loop in range(self.max_loops):
|
116
|
+
# # Add loop marker to conversation for clarity
|
117
|
+
# self.conversation.add(
|
118
|
+
# role="System",
|
119
|
+
# content=f"=== Starting Loop {loop + 1}/{self.max_loops} ==="
|
120
|
+
# )
|
121
|
+
|
122
|
+
# # First agent interaction
|
123
|
+
# current_agent = self.randomly_select_agent()
|
124
|
+
# response = self.run_agent(current_agent.name, self.conversation.get_str())
|
125
|
+
# self.conversation.add(role=current_agent.name, content=any_to_str(response))
|
126
|
+
|
127
|
+
# try:
|
128
|
+
# # Parse response and get next agent
|
129
|
+
# response_dict = self.parse_json_into_dict(response)
|
130
|
+
|
131
|
+
# # Check if we should continue or end the loop
|
132
|
+
# if not response_dict.get("respond_or_no_respond", True):
|
133
|
+
# break
|
134
|
+
|
135
|
+
# # Get the task description for the next agent
|
136
|
+
# next_task = response_dict.get("task_description", self.conversation.get_str())
|
137
|
+
|
138
|
+
# # Run the next agent with the specific task description
|
139
|
+
# next_agent = self.find_agent_by_name(response_dict["agent_name"])
|
140
|
+
# next_response = self.run_agent(next_agent.name, next_task)
|
141
|
+
|
142
|
+
# # Add both the task description and response to the conversation
|
143
|
+
# self.conversation.add(
|
144
|
+
# role="System",
|
145
|
+
# content=f"Response from {response_dict['agent_name']}: {next_task}"
|
146
|
+
# )
|
147
|
+
# self.conversation.add(role=next_agent.name, content=any_to_str(next_response))
|
148
|
+
|
149
|
+
# except (ValueError, KeyError) as e:
|
150
|
+
# self.conversation.add(
|
151
|
+
# role="System",
|
152
|
+
# content=f"Error in loop {loop + 1}: {str(e)}"
|
153
|
+
# )
|
154
|
+
# break
|
155
|
+
|
156
|
+
# Run first agent
|
157
|
+
current_agent = self.randomly_select_agent()
|
158
|
+
response = self.run_agent(
|
159
|
+
current_agent.agent_name, self.conversation.get_str()
|
160
|
+
)
|
161
|
+
self.conversation.add(
|
162
|
+
role=current_agent.agent_name,
|
163
|
+
content=any_to_str(response),
|
164
|
+
)
|
165
|
+
|
166
|
+
# Convert to json
|
167
|
+
response_dict = self.parse_json_into_dict(response)
|
168
|
+
|
169
|
+
# Fetch task
|
170
|
+
respone_two = response_dict["response"]
|
171
|
+
agent_name = response_dict["agent_name"]
|
172
|
+
|
173
|
+
print(f"Response from {agent_name}: {respone_two}")
|
174
|
+
|
175
|
+
# Run next agent
|
176
|
+
next_response = self.run_agent(
|
177
|
+
agent_name, self.conversation.get_str()
|
178
|
+
)
|
179
|
+
self.conversation.add(
|
180
|
+
role=agent_name, content=any_to_str(next_response)
|
181
|
+
)
|
182
|
+
|
183
|
+
# # Get the next agent
|
184
|
+
# response_three = self.parse_json_into_dict(next_response)
|
185
|
+
# agent_name_three = response_three["agent_name"]
|
186
|
+
# respone_four = response_three["response"]
|
187
|
+
|
188
|
+
# print(f"Response from {agent_name_three}: {respone_four}")
|
189
|
+
# # Run the next agent
|
190
|
+
# next_response = self.run_agent(agent_name_three, self.conversation.get_str())
|
191
|
+
# self.conversation.add(role=agent_name_three, content=any_to_str(next_response))
|
192
|
+
|
193
|
+
# Format and return the final conversation history
|
194
|
+
return history_output_formatter(
|
195
|
+
self.conversation, type=self.output_type
|
196
|
+
)
|
197
|
+
|
198
|
+
def randomly_select_agent(self) -> Agent:
|
199
|
+
return random.choice(self.agents)
|
200
|
+
|
201
|
+
def get_agents_info(self) -> str:
|
202
|
+
"""
|
203
|
+
Fetches and formats information about all available agents in the system.
|
204
|
+
|
205
|
+
Returns:
|
206
|
+
str: A formatted string containing names and descriptions of all agents.
|
207
|
+
"""
|
208
|
+
if not self.agents:
|
209
|
+
return "No agents currently available in the system."
|
210
|
+
|
211
|
+
agents_info = [
|
212
|
+
"Agents In the System:",
|
213
|
+
"",
|
214
|
+
] # Empty string for line spacing
|
215
|
+
|
216
|
+
for idx, agent in enumerate(self.agents, 1):
|
217
|
+
agents_info.extend(
|
218
|
+
[
|
219
|
+
f"[Agent {idx}]",
|
220
|
+
f"Name: {agent.name}",
|
221
|
+
f"Description: {agent.description}",
|
222
|
+
"", # Empty string for line spacing between agents
|
223
|
+
]
|
224
|
+
)
|
225
|
+
|
226
|
+
return "\n".join(agents_info).strip()
|
swarms/structs/groupchat.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
import concurrent.futures
|
2
|
+
import random
|
2
3
|
from datetime import datetime
|
3
4
|
from typing import Callable, List
|
4
5
|
|
@@ -7,6 +8,13 @@ from pydantic import BaseModel, Field
|
|
7
8
|
|
8
9
|
from swarms.structs.agent import Agent
|
9
10
|
from swarms.structs.conversation import Conversation
|
11
|
+
from swarms.structs.multi_agent_exec import get_agents_info
|
12
|
+
from swarms.utils.history_output_formatter import (
|
13
|
+
history_output_formatter,
|
14
|
+
)
|
15
|
+
from swarms.prompts.multi_agent_collab_prompt import (
|
16
|
+
MULTI_AGENT_COLLAB_PROMPT_TWO,
|
17
|
+
)
|
10
18
|
|
11
19
|
|
12
20
|
class AgentResponse(BaseModel):
|
@@ -230,15 +238,23 @@ class GroupChat:
|
|
230
238
|
speaker_fn: SpeakerFunction = round_robin,
|
231
239
|
max_loops: int = 1,
|
232
240
|
rules: str = "",
|
241
|
+
output_type: str = "string",
|
233
242
|
):
|
234
243
|
self.name = name
|
235
244
|
self.description = description
|
236
245
|
self.agents = agents
|
237
246
|
self.speaker_fn = speaker_fn
|
238
247
|
self.max_loops = max_loops
|
239
|
-
self.
|
248
|
+
self.output_type = output_type
|
240
249
|
self.rules = rules
|
241
250
|
|
251
|
+
self.conversation = Conversation(
|
252
|
+
time_enabled=False, rules=rules
|
253
|
+
)
|
254
|
+
|
255
|
+
agent_context = f"\n Group Chat Name: {self.name}\nGroup Chat Description: {self.description}\n Agents in your Group Chat: {get_agents_info(self.agents)}"
|
256
|
+
self.conversation.add(role="System", content=agent_context)
|
257
|
+
|
242
258
|
self.reliability_check()
|
243
259
|
|
244
260
|
def reliability_check(self):
|
@@ -248,23 +264,24 @@ class GroupChat:
|
|
248
264
|
Raises:
|
249
265
|
ValueError: If any required components are missing or invalid
|
250
266
|
"""
|
267
|
+
|
251
268
|
if len(self.agents) < 2:
|
252
269
|
raise ValueError(
|
253
270
|
"At least two agents are required for a group chat"
|
254
271
|
)
|
255
|
-
|
256
|
-
raise ValueError("No speaker function provided")
|
272
|
+
|
257
273
|
if self.max_loops <= 0:
|
258
274
|
raise ValueError("Max loops must be greater than 0")
|
275
|
+
|
259
276
|
for agent in self.agents:
|
260
|
-
|
261
|
-
raise ValueError(
|
262
|
-
f"Invalid agent type: {type(agent)}. Must be Agent instance"
|
263
|
-
)
|
277
|
+
agent.system_prompt += MULTI_AGENT_COLLAB_PROMPT_TWO
|
264
278
|
|
265
279
|
def run(self, task: str, img: str = None, *args, **kwargs) -> str:
|
266
280
|
"""
|
267
|
-
Executes a conversation between agents about the given task.
|
281
|
+
Executes a dynamic conversation between agents about the given task.
|
282
|
+
|
283
|
+
Agents are selected randomly to speak, creating a more natural flow
|
284
|
+
with varying conversation lengths.
|
268
285
|
|
269
286
|
Args:
|
270
287
|
task (str): The task or topic for agents to discuss
|
@@ -279,106 +296,85 @@ class GroupChat:
|
|
279
296
|
ValueError: If task is empty or invalid
|
280
297
|
Exception: If any error occurs during conversation
|
281
298
|
"""
|
299
|
+
|
282
300
|
if not task or not isinstance(task, str):
|
283
301
|
raise ValueError("Task must be a non-empty string")
|
284
302
|
|
285
303
|
# Initialize conversation with context
|
286
|
-
agent_context = f"Group Chat Name: {self.name}\nGroup Chat Description: {self.description}\nRules: {self.rules}\n Other agents: {', '.join([a.agent_name for a in self.agents])}"
|
287
|
-
self.conversation.add(role="system", content=agent_context)
|
288
304
|
self.conversation.add(role="User", content=task)
|
289
305
|
|
290
|
-
print(
|
291
|
-
f"....... conversation history: \n {self.conversation.return_history_as_string()}"
|
292
|
-
)
|
293
|
-
|
294
306
|
try:
|
295
307
|
turn = 0
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
# Get agents who should speak this turn
|
303
|
-
speaking_agents = [
|
304
|
-
agent
|
305
|
-
for agent in self.agents
|
306
|
-
if self.speaker_fn(context, agent)
|
307
|
-
]
|
308
|
-
|
309
|
-
if not speaking_agents:
|
310
|
-
consecutive_silent_turns += 1
|
311
|
-
if consecutive_silent_turns >= max_silent_turns:
|
312
|
-
logger.debug(
|
313
|
-
"Multiple silent turns, ending conversation"
|
314
|
-
)
|
315
|
-
break
|
316
|
-
continue
|
308
|
+
# Determine a random number of conversation turns
|
309
|
+
target_turns = random.randint(1, 4)
|
310
|
+
logger.debug(
|
311
|
+
f"Planning for approximately {target_turns} conversation turns"
|
312
|
+
)
|
317
313
|
|
318
|
-
|
319
|
-
|
320
|
-
)
|
314
|
+
# Keep track of which agent spoke last to create realistic exchanges
|
315
|
+
last_speaker = None
|
321
316
|
|
322
|
-
|
323
|
-
for agent in speaking_agents:
|
324
|
-
try:
|
325
|
-
# Build context-aware prompt
|
326
|
-
prompt = (
|
327
|
-
f"You're {agent.agent_name} participating in a group chat.\n"
|
328
|
-
f"Chat Purpose: {self.description}\n"
|
329
|
-
f"Current Discussion: {task}\n"
|
330
|
-
f"Chat History:\n{self.conversation.return_history_as_string()}\n"
|
331
|
-
f"As {agent.agent_name}, please provide your response:"
|
332
|
-
)
|
317
|
+
while turn < target_turns:
|
333
318
|
|
334
|
-
|
335
|
-
|
336
|
-
)
|
319
|
+
# Select an agent to speak (different from the last speaker if possible)
|
320
|
+
available_agents = self.agents.copy()
|
337
321
|
|
338
|
-
|
339
|
-
|
340
|
-
img=img,
|
341
|
-
*args,
|
342
|
-
**kwargs,
|
343
|
-
)
|
322
|
+
if last_speaker and len(available_agents) > 1:
|
323
|
+
available_agents.remove(last_speaker)
|
344
324
|
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
325
|
+
current_speaker = random.choice(available_agents)
|
326
|
+
|
327
|
+
try:
|
328
|
+
# Build complete context with conversation history
|
329
|
+
conversation_history = (
|
330
|
+
self.conversation.return_history_as_string()
|
331
|
+
)
|
332
|
+
|
333
|
+
# Prepare a prompt that explicitly encourages responding to others
|
334
|
+
if last_speaker:
|
335
|
+
prompt = f"The previous message was from {last_speaker.agent_name}. As {current_speaker.agent_name}, please respond to what they and others have said about: {task}"
|
336
|
+
else:
|
337
|
+
prompt = f"As {current_speaker.agent_name}, please start the discussion about: {task}"
|
350
338
|
|
339
|
+
# Get the agent's response with full context awareness
|
340
|
+
message = current_speaker.run(
|
341
|
+
task=f"{conversation_history} {prompt}",
|
342
|
+
)
|
343
|
+
|
344
|
+
# Only add meaningful responses
|
345
|
+
if message and not message.isspace():
|
351
346
|
self.conversation.add(
|
352
|
-
role=
|
347
|
+
role=current_speaker.agent_name,
|
348
|
+
content=message,
|
353
349
|
)
|
354
350
|
|
355
351
|
logger.info(
|
356
|
-
f"Turn {turn}, {
|
352
|
+
f"Turn {turn}, {current_speaker.agent_name} responded"
|
357
353
|
)
|
358
354
|
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
)
|
363
|
-
# Continue with other agents instead of crashing
|
364
|
-
continue
|
355
|
+
# Update the last speaker
|
356
|
+
last_speaker = current_speaker
|
357
|
+
turn += 1
|
365
358
|
|
366
|
-
|
359
|
+
# Occasionally end early to create natural variation
|
360
|
+
if (
|
361
|
+
turn > 3 and random.random() < 0.15
|
362
|
+
): # 15% chance to end after at least 3 turns
|
363
|
+
logger.debug(
|
364
|
+
"Random early conversation end"
|
365
|
+
)
|
366
|
+
break
|
367
367
|
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
)
|
372
|
-
if all(
|
373
|
-
"conclusion" in msg.lower()
|
374
|
-
for msg in last_messages
|
375
|
-
):
|
376
|
-
logger.debug(
|
377
|
-
"Natural conversation conclusion detected"
|
368
|
+
except Exception as e:
|
369
|
+
logger.error(
|
370
|
+
f"Error from {current_speaker.agent_name}: {e}"
|
378
371
|
)
|
379
|
-
|
372
|
+
# Skip this agent and continue conversation
|
373
|
+
continue
|
380
374
|
|
381
|
-
return
|
375
|
+
return history_output_formatter(
|
376
|
+
self.conversation, self.output_type
|
377
|
+
)
|
382
378
|
|
383
379
|
except Exception as e:
|
384
380
|
logger.error(f"Error in chat: {e}")
|
@@ -8,7 +8,7 @@ from pydantic import BaseModel, Field
|
|
8
8
|
from swarms.structs.agent import Agent
|
9
9
|
from swarms.structs.base_swarm import BaseSwarm
|
10
10
|
from swarms.structs.conversation import Conversation
|
11
|
-
from swarms.structs.
|
11
|
+
from swarms.structs.output_types import OutputType
|
12
12
|
from swarms.utils.formatter import formatter
|
13
13
|
|
14
14
|
from swarms.utils.function_caller_model import OpenAIFunctionCaller
|