swarms 7.7.8__py3-none-any.whl → 7.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/__init__.py CHANGED
@@ -15,4 +15,3 @@ from swarms.structs import * # noqa: E402, F403
15
15
  from swarms.telemetry import * # noqa: E402, F403
16
16
  from swarms.tools import * # noqa: E402, F403
17
17
  from swarms.utils import * # noqa: E402, F403
18
- from swarms.client import * # noqa: E402, F403
@@ -0,0 +1,206 @@
1
+ # AI generate initial response
2
+ # AI decides how many "thinking rounds" it needs
3
+ # For each round:
4
+ # Generates 3 alternative responses
5
+ # Evaluates all responses
6
+ # Picks the best one
7
+ # Final response is the survivor of this AI battle royale
8
+ from swarms import Agent
9
+
10
+
11
+ # OpenAI function schema for determining thinking rounds
12
+ thinking_rounds_schema = {
13
+ "name": "determine_thinking_rounds",
14
+ "description": "Determines the optimal number of thinking rounds needed for a task",
15
+ "parameters": {
16
+ "type": "object",
17
+ "properties": {
18
+ "num_rounds": {
19
+ "type": "integer",
20
+ "description": "The number of thinking rounds needed (1-5)",
21
+ "minimum": 1,
22
+ "maximum": 5,
23
+ }
24
+ },
25
+ "required": ["num_rounds"],
26
+ },
27
+ }
28
+
29
+ # System prompt for determining thinking rounds
30
+ THINKING_ROUNDS_PROMPT = """You are an expert at determining the optimal number of thinking rounds needed for complex tasks. Your role is to analyze the task and determine how many rounds of thinking and evaluation would be most beneficial.
31
+
32
+ Consider the following factors when determining the number of rounds:
33
+ 1. Task Complexity: More complex tasks may require more rounds
34
+ 2. Potential for Multiple Valid Approaches: Tasks with multiple valid solutions need more rounds
35
+ 3. Risk of Error: Higher-stakes tasks may benefit from more rounds
36
+ 4. Time Sensitivity: Balance thoroughness with efficiency
37
+
38
+ Guidelines for number of rounds:
39
+ - 1 round: Simple, straightforward tasks with clear solutions
40
+ - 2-3 rounds: Moderately complex tasks with some ambiguity
41
+ - 4-5 rounds: Highly complex tasks with multiple valid approaches or high-stakes decisions
42
+
43
+ Your response should be a single number between 1 and 5, representing the optimal number of thinking rounds needed."""
44
+
45
+ # Schema for generating alternative responses
46
+ alternative_responses_schema = {
47
+ "name": "generate_alternatives",
48
+ "description": "Generates multiple alternative responses to a task",
49
+ "parameters": {
50
+ "type": "object",
51
+ "properties": {
52
+ "alternatives": {
53
+ "type": "array",
54
+ "description": "List of alternative responses",
55
+ "items": {
56
+ "type": "object",
57
+ "properties": {
58
+ "response": {
59
+ "type": "string",
60
+ "description": "The alternative response",
61
+ },
62
+ "reasoning": {
63
+ "type": "string",
64
+ "description": "Explanation of why this approach was chosen",
65
+ },
66
+ },
67
+ "required": ["response", "reasoning"],
68
+ },
69
+ "minItems": 3,
70
+ "maxItems": 3,
71
+ }
72
+ },
73
+ "required": ["alternatives"],
74
+ },
75
+ }
76
+
77
+ # Schema for evaluating responses
78
+ evaluation_schema = {
79
+ "name": "evaluate_responses",
80
+ "description": "Evaluates and ranks alternative responses",
81
+ "parameters": {
82
+ "type": "object",
83
+ "properties": {
84
+ "evaluation": {
85
+ "type": "object",
86
+ "properties": {
87
+ "best_response": {
88
+ "type": "string",
89
+ "description": "The selected best response",
90
+ },
91
+ "ranking": {
92
+ "type": "array",
93
+ "description": "Ranked list of responses from best to worst",
94
+ "items": {
95
+ "type": "object",
96
+ "properties": {
97
+ "response": {
98
+ "type": "string",
99
+ "description": "The response",
100
+ },
101
+ "score": {
102
+ "type": "number",
103
+ "description": "Score from 0-100",
104
+ },
105
+ "reasoning": {
106
+ "type": "string",
107
+ "description": "Explanation of the score",
108
+ },
109
+ },
110
+ "required": [
111
+ "response",
112
+ "score",
113
+ "reasoning",
114
+ ],
115
+ },
116
+ },
117
+ },
118
+ "required": ["best_response", "ranking"],
119
+ }
120
+ },
121
+ "required": ["evaluation"],
122
+ },
123
+ }
124
+
125
+ # System prompt for generating alternatives
126
+ ALTERNATIVES_PROMPT = """You are an expert at generating diverse and creative alternative responses to tasks. Your role is to generate 3 distinct approaches to solving the given task.
127
+
128
+ For each alternative:
129
+ 1. Consider a different perspective or approach
130
+ 2. Provide clear reasoning for why this approach might be effective
131
+ 3. Ensure alternatives are meaningfully different from each other
132
+ 4. Maintain high quality and relevance to the task
133
+
134
+ Your response should include 3 alternatives, each with its own reasoning."""
135
+
136
+ # System prompt for evaluation
137
+ EVALUATION_PROMPT = """You are an expert at evaluating and comparing different responses to tasks. Your role is to critically analyze each response and determine which is the most effective.
138
+
139
+ Consider the following criteria when evaluating:
140
+ 1. Relevance to the task
141
+ 2. Completeness of the solution
142
+ 3. Creativity and innovation
143
+ 4. Practicality and feasibility
144
+ 5. Clarity and coherence
145
+
146
+ Your response should include:
147
+ 1. The best response selected
148
+ 2. A ranked list of all responses with scores and reasoning"""
149
+
150
+
151
+ class CortAgent:
152
+ def __init__(
153
+ self,
154
+ alternative_responses: int = 3,
155
+ ):
156
+ self.thinking_rounds = Agent(
157
+ agent_name="CortAgent",
158
+ agent_description="CortAgent is a multi-step agent that uses a battle royale approach to determine the best response to a task.",
159
+ model_name="gpt-4o-mini",
160
+ max_loops=1,
161
+ dynamic_temperature_enabled=True,
162
+ tools_list_dictionary=thinking_rounds_schema,
163
+ system_prompt=THINKING_ROUNDS_PROMPT,
164
+ )
165
+
166
+ self.alternatives_agent = Agent(
167
+ agent_name="CortAgentAlternatives",
168
+ agent_description="Generates multiple alternative responses to a task",
169
+ model_name="gpt-4o-mini",
170
+ max_loops=1,
171
+ dynamic_temperature_enabled=True,
172
+ tools_list_dictionary=alternative_responses_schema,
173
+ system_prompt=ALTERNATIVES_PROMPT,
174
+ )
175
+
176
+ self.evaluation_agent = Agent(
177
+ agent_name="CortAgentEvaluation",
178
+ agent_description="Evaluates and ranks alternative responses",
179
+ model_name="gpt-4o-mini",
180
+ max_loops=1,
181
+ dynamic_temperature_enabled=True,
182
+ tools_list_dictionary=evaluation_schema,
183
+ system_prompt=EVALUATION_PROMPT,
184
+ )
185
+
186
+ def run(self, task: str):
187
+ # First determine number of thinking rounds
188
+ num_rounds = self.thinking_rounds.run(task)
189
+
190
+ # Initialize with the task
191
+ current_task = task
192
+ best_response = None
193
+
194
+ # Run the battle royale for the determined number of rounds
195
+ for round_num in range(num_rounds):
196
+ # Generate alternatives
197
+ alternatives = self.alternatives_agent.run(current_task)
198
+
199
+ # Evaluate alternatives
200
+ evaluation = self.evaluation_agent.run(alternatives)
201
+
202
+ # Update best response and current task for next round
203
+ best_response = evaluation["evaluation"]["best_response"]
204
+ current_task = f"Previous best response: {best_response}\nOriginal task: {task}"
205
+
206
+ return best_response
@@ -0,0 +1,173 @@
1
+ from swarms import Agent
2
+ from typing import List
3
+
4
+
5
+ # System prompt for REACT agent
6
+ REACT_AGENT_PROMPT = """
7
+ You are a REACT (Reason, Act, Observe) agent designed to solve tasks through an iterative process of reasoning and action. You maintain memory of previous steps to build upon past actions and observations.
8
+
9
+ Your process follows these key components:
10
+
11
+ 1. MEMORY: Review and utilize previous steps
12
+ - Access and analyze previous observations
13
+ - Build upon past thoughts and plans
14
+ - Learn from previous actions
15
+ - Use historical context to make better decisions
16
+
17
+ 2. OBSERVE: Analyze current state
18
+ - Consider both new information and memory
19
+ - Identify relevant patterns from past steps
20
+ - Note any changes or progress made
21
+ - Evaluate success of previous actions
22
+
23
+ 3. THINK: Process and reason
24
+ - Combine new observations with historical knowledge
25
+ - Consider how past steps influence current decisions
26
+ - Identify patterns and learning opportunities
27
+ - Plan improvements based on previous outcomes
28
+
29
+ 4. PLAN: Develop next steps
30
+ - Create strategies that build on previous success
31
+ - Avoid repeating unsuccessful approaches
32
+ - Consider long-term goals and progress
33
+ - Maintain consistency with previous actions
34
+
35
+ 5. ACT: Execute with context
36
+ - Implement actions that progress from previous steps
37
+ - Build upon successful past actions
38
+ - Adapt based on learned experiences
39
+ - Maintain continuity in approach
40
+
41
+ For each step, you should:
42
+ - Reference relevant previous steps
43
+ - Show how current decisions relate to past actions
44
+ - Demonstrate learning and adaptation
45
+ - Maintain coherent progression toward the goal
46
+
47
+ Your responses should be structured, logical, and show clear reasoning that builds upon previous steps."""
48
+
49
+ # Schema for REACT agent responses
50
+ react_agent_schema = {
51
+ "type": "function",
52
+ "function": {
53
+ "name": "generate_react_response",
54
+ "description": "Generates a structured REACT agent response with memory of previous steps",
55
+ "parameters": {
56
+ "type": "object",
57
+ "properties": {
58
+ "memory_reflection": {
59
+ "type": "string",
60
+ "description": "Analysis of previous steps and their influence on current thinking",
61
+ },
62
+ "observation": {
63
+ "type": "string",
64
+ "description": "Current state observation incorporating both new information and historical context",
65
+ },
66
+ "thought": {
67
+ "type": "string",
68
+ "description": "Reasoning that builds upon previous steps and current observation",
69
+ },
70
+ "plan": {
71
+ "type": "string",
72
+ "description": "Structured plan that shows progression from previous actions",
73
+ },
74
+ "action": {
75
+ "type": "string",
76
+ "description": "Specific action that builds upon previous steps and advances toward the goal",
77
+ },
78
+ },
79
+ "required": [
80
+ "memory_reflection",
81
+ "observation",
82
+ "thought",
83
+ "plan",
84
+ "action",
85
+ ],
86
+ },
87
+ },
88
+ }
89
+
90
+
91
+ class ReactAgent:
92
+ def __init__(
93
+ self,
94
+ name: str = "react-agent-o1",
95
+ description: str = "A react agent that uses o1 preview to solve tasks",
96
+ model_name: str = "openai/gpt-4o",
97
+ max_loops: int = 1,
98
+ ):
99
+ self.name = name
100
+ self.description = description
101
+ self.model_name = model_name
102
+ self.max_loops = max_loops
103
+
104
+ self.agent = Agent(
105
+ agent_name=self.name,
106
+ agent_description=self.description,
107
+ model_name=self.model_name,
108
+ max_loops=1,
109
+ tools_list_dictionary=[react_agent_schema],
110
+ output_type="final",
111
+ )
112
+
113
+ # Initialize memory for storing steps
114
+ self.memory: List[str] = []
115
+
116
+ def step(self, task: str) -> str:
117
+ """Execute a single step of the REACT process.
118
+
119
+ Args:
120
+ task: The task description or current state
121
+
122
+ Returns:
123
+ String response from the agent
124
+ """
125
+ response = self.agent.run(task)
126
+ print(response)
127
+ return response
128
+
129
+ def run(self, task: str, *args, **kwargs) -> List[str]:
130
+ """Run the REACT agent for multiple steps with memory.
131
+
132
+ Args:
133
+ task: The initial task description
134
+ *args: Additional positional arguments
135
+ **kwargs: Additional keyword arguments
136
+
137
+ Returns:
138
+ List of all steps taken as strings
139
+ """
140
+ # Reset memory at the start of a new run
141
+ self.memory = []
142
+
143
+ current_task = task
144
+ for i in range(self.max_loops):
145
+ print(f"\nExecuting step {i+1}/{self.max_loops}")
146
+ step_result = self.step(current_task)
147
+ print(step_result)
148
+
149
+ # Store step in memory
150
+ self.memory.append(step_result)
151
+
152
+ # Update task with previous response and memory context
153
+ memory_context = (
154
+ "\n\nMemory of previous steps:\n"
155
+ + "\n".join(
156
+ f"Step {j+1}:\n{step}"
157
+ for j, step in enumerate(self.memory)
158
+ )
159
+ )
160
+
161
+ current_task = f"Previous response:\n{step_result}\n{memory_context}\n\nContinue with the original task: {task}"
162
+
163
+ return self.memory
164
+
165
+
166
+ # if __name__ == "__main__":
167
+ # agent = ReactAgent(
168
+ # max_loops=1
169
+ # ) # Increased max_loops to see the iteration
170
+ # result = agent.run(
171
+ # "Write a short story about a robot that can fly."
172
+ # )
173
+ # print(result)
@@ -0,0 +1,290 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Optional, Union, Dict, Any
3
+ from enum import Enum
4
+ from dataclasses import dataclass
5
+ from pathlib import Path
6
+
7
+
8
+ class MessageType(Enum):
9
+ """Enum for different types of messages in the conversation."""
10
+
11
+ SYSTEM = "system"
12
+ USER = "user"
13
+ ASSISTANT = "assistant"
14
+ FUNCTION = "function"
15
+ TOOL = "tool"
16
+
17
+
18
+ @dataclass
19
+ class Message:
20
+ """Data class representing a message in the conversation."""
21
+
22
+ role: str
23
+ content: Union[str, dict, list]
24
+ timestamp: Optional[str] = None
25
+ message_type: Optional[MessageType] = None
26
+ metadata: Optional[Dict] = None
27
+ token_count: Optional[int] = None
28
+
29
+
30
+ class BaseCommunication(ABC):
31
+ """
32
+ Abstract base class defining the interface for conversation implementations.
33
+ This class provides the contract that all conversation implementations must follow.
34
+
35
+ Attributes:
36
+ system_prompt (Optional[str]): The system prompt for the conversation.
37
+ time_enabled (bool): Flag to enable time tracking for messages.
38
+ autosave (bool): Flag to enable automatic saving of conversation history.
39
+ save_filepath (str): File path for saving the conversation history.
40
+ tokenizer (Any): Tokenizer for counting tokens in messages.
41
+ context_length (int): Maximum number of tokens allowed in the conversation history.
42
+ rules (str): Rules for the conversation.
43
+ custom_rules_prompt (str): Custom prompt for rules.
44
+ user (str): The user identifier for messages.
45
+ auto_save (bool): Flag to enable auto-saving of conversation history.
46
+ save_as_yaml (bool): Flag to save conversation history as YAML.
47
+ save_as_json_bool (bool): Flag to save conversation history as JSON.
48
+ token_count (bool): Flag to enable token counting for messages.
49
+ cache_enabled (bool): Flag to enable prompt caching.
50
+ """
51
+
52
+ @staticmethod
53
+ def get_default_db_path(db_name: str) -> Path:
54
+ """Calculate the default database path in user's home directory.
55
+
56
+ Args:
57
+ db_name (str): Name of the database file (e.g. 'conversations.db')
58
+
59
+ Returns:
60
+ Path: Path object pointing to the database location
61
+ """
62
+ # Get user's home directory
63
+ home = Path.home()
64
+
65
+ # Create .swarms directory if it doesn't exist
66
+ swarms_dir = home / ".swarms" / "db"
67
+ swarms_dir.mkdir(parents=True, exist_ok=True)
68
+
69
+ return swarms_dir / db_name
70
+
71
+ @abstractmethod
72
+ def __init__(
73
+ self,
74
+ system_prompt: Optional[str] = None,
75
+ time_enabled: bool = False,
76
+ autosave: bool = False,
77
+ save_filepath: str = None,
78
+ tokenizer: Any = None,
79
+ context_length: int = 8192,
80
+ rules: str = None,
81
+ custom_rules_prompt: str = None,
82
+ user: str = "User:",
83
+ auto_save: bool = True,
84
+ save_as_yaml: bool = True,
85
+ save_as_json_bool: bool = False,
86
+ token_count: bool = True,
87
+ cache_enabled: bool = True,
88
+ *args,
89
+ **kwargs,
90
+ ):
91
+ """Initialize the communication interface."""
92
+ pass
93
+
94
+ @abstractmethod
95
+ def add(
96
+ self,
97
+ role: str,
98
+ content: Union[str, dict, list],
99
+ message_type: Optional[MessageType] = None,
100
+ metadata: Optional[Dict] = None,
101
+ token_count: Optional[int] = None,
102
+ ) -> int:
103
+ """Add a message to the conversation history."""
104
+ pass
105
+
106
+ @abstractmethod
107
+ def batch_add(self, messages: List[Message]) -> List[int]:
108
+ """Add multiple messages to the conversation history."""
109
+ pass
110
+
111
+ @abstractmethod
112
+ def delete(self, index: str):
113
+ """Delete a message from the conversation history."""
114
+ pass
115
+
116
+ @abstractmethod
117
+ def update(
118
+ self, index: str, role: str, content: Union[str, dict]
119
+ ):
120
+ """Update a message in the conversation history."""
121
+ pass
122
+
123
+ @abstractmethod
124
+ def query(self, index: str) -> Dict:
125
+ """Query a message in the conversation history."""
126
+ pass
127
+
128
+ @abstractmethod
129
+ def search(self, keyword: str) -> List[Dict]:
130
+ """Search for messages containing a keyword."""
131
+ pass
132
+
133
+ @abstractmethod
134
+ def get_str(self) -> str:
135
+ """Get the conversation history as a string."""
136
+ pass
137
+
138
+ @abstractmethod
139
+ def display_conversation(self, detailed: bool = False):
140
+ """Display the conversation history."""
141
+ pass
142
+
143
+ @abstractmethod
144
+ def export_conversation(self, filename: str):
145
+ """Export the conversation history to a file."""
146
+ pass
147
+
148
+ @abstractmethod
149
+ def import_conversation(self, filename: str):
150
+ """Import a conversation history from a file."""
151
+ pass
152
+
153
+ @abstractmethod
154
+ def count_messages_by_role(self) -> Dict[str, int]:
155
+ """Count messages by role."""
156
+ pass
157
+
158
+ @abstractmethod
159
+ def return_history_as_string(self) -> str:
160
+ """Return the conversation history as a string."""
161
+ pass
162
+
163
+ @abstractmethod
164
+ def get_messages(
165
+ self,
166
+ limit: Optional[int] = None,
167
+ offset: Optional[int] = None,
168
+ ) -> List[Dict]:
169
+ """Get messages with optional pagination."""
170
+ pass
171
+
172
+ @abstractmethod
173
+ def clear(self):
174
+ """Clear the conversation history."""
175
+ pass
176
+
177
+ @abstractmethod
178
+ def to_dict(self) -> List[Dict]:
179
+ """Convert the conversation history to a dictionary."""
180
+ pass
181
+
182
+ @abstractmethod
183
+ def to_json(self) -> str:
184
+ """Convert the conversation history to a JSON string."""
185
+ pass
186
+
187
+ @abstractmethod
188
+ def to_yaml(self) -> str:
189
+ """Convert the conversation history to a YAML string."""
190
+ pass
191
+
192
+ @abstractmethod
193
+ def save_as_json(self, filename: str):
194
+ """Save the conversation history as a JSON file."""
195
+ pass
196
+
197
+ @abstractmethod
198
+ def load_from_json(self, filename: str):
199
+ """Load the conversation history from a JSON file."""
200
+ pass
201
+
202
+ @abstractmethod
203
+ def save_as_yaml(self, filename: str):
204
+ """Save the conversation history as a YAML file."""
205
+ pass
206
+
207
+ @abstractmethod
208
+ def load_from_yaml(self, filename: str):
209
+ """Load the conversation history from a YAML file."""
210
+ pass
211
+
212
+ @abstractmethod
213
+ def get_last_message(self) -> Optional[Dict]:
214
+ """Get the last message from the conversation history."""
215
+ pass
216
+
217
+ @abstractmethod
218
+ def get_last_message_as_string(self) -> str:
219
+ """Get the last message as a formatted string."""
220
+ pass
221
+
222
+ @abstractmethod
223
+ def get_messages_by_role(self, role: str) -> List[Dict]:
224
+ """Get all messages from a specific role."""
225
+ pass
226
+
227
+ @abstractmethod
228
+ def get_conversation_summary(self) -> Dict:
229
+ """Get a summary of the conversation."""
230
+ pass
231
+
232
+ @abstractmethod
233
+ def get_statistics(self) -> Dict:
234
+ """Get statistics about the conversation."""
235
+ pass
236
+
237
+ @abstractmethod
238
+ def get_conversation_id(self) -> str:
239
+ """Get the current conversation ID."""
240
+ pass
241
+
242
+ @abstractmethod
243
+ def start_new_conversation(self) -> str:
244
+ """Start a new conversation and return its ID."""
245
+ pass
246
+
247
+ @abstractmethod
248
+ def delete_current_conversation(self) -> bool:
249
+ """Delete the current conversation."""
250
+ pass
251
+
252
+ @abstractmethod
253
+ def search_messages(self, query: str) -> List[Dict]:
254
+ """Search for messages containing specific text."""
255
+ pass
256
+
257
+ @abstractmethod
258
+ def update_message(
259
+ self,
260
+ message_id: int,
261
+ content: Union[str, dict, list],
262
+ metadata: Optional[Dict] = None,
263
+ ) -> bool:
264
+ """Update an existing message."""
265
+ pass
266
+
267
+ @abstractmethod
268
+ def get_conversation_metadata_dict(self) -> Dict:
269
+ """Get detailed metadata about the conversation."""
270
+ pass
271
+
272
+ @abstractmethod
273
+ def get_conversation_timeline_dict(self) -> Dict[str, List[Dict]]:
274
+ """Get the conversation organized by timestamps."""
275
+ pass
276
+
277
+ @abstractmethod
278
+ def get_conversation_by_role_dict(self) -> Dict[str, List[Dict]]:
279
+ """Get the conversation organized by roles."""
280
+ pass
281
+
282
+ @abstractmethod
283
+ def get_conversation_as_dict(self) -> Dict:
284
+ """Get the entire conversation as a dictionary with messages and metadata."""
285
+ pass
286
+
287
+ @abstractmethod
288
+ def truncate_memory_with_tokenizer(self):
289
+ """Truncate the conversation history based on token count."""
290
+ pass