swarms 7.7.2__py3-none-any.whl → 7.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. swarms/prompts/ag_prompt.py +51 -19
  2. swarms/prompts/agent_system_prompts.py +13 -4
  3. swarms/prompts/multi_agent_collab_prompt.py +18 -0
  4. swarms/prompts/prompt.py +6 -10
  5. swarms/schemas/__init__.py +0 -3
  6. swarms/structs/__init__.py +2 -4
  7. swarms/structs/agent.py +201 -160
  8. swarms/structs/aop.py +8 -1
  9. swarms/structs/auto_swarm_builder.py +271 -210
  10. swarms/structs/conversation.py +22 -65
  11. swarms/structs/hiearchical_swarm.py +93 -122
  12. swarms/structs/ma_utils.py +96 -0
  13. swarms/structs/mixture_of_agents.py +20 -103
  14. swarms/structs/multi_agent_router.py +32 -95
  15. swarms/structs/output_types.py +3 -16
  16. swarms/structs/stopping_conditions.py +30 -0
  17. swarms/structs/swarm_router.py +56 -4
  18. swarms/structs/swarming_architectures.py +576 -185
  19. swarms/telemetry/main.py +1 -7
  20. swarms/tools/mcp_client.py +209 -53
  21. swarms/tools/mcp_integration.py +1 -53
  22. swarms/utils/generate_keys.py +64 -0
  23. swarms/utils/history_output_formatter.py +2 -0
  24. {swarms-7.7.2.dist-info → swarms-7.7.3.dist-info}/METADATA +98 -263
  25. {swarms-7.7.2.dist-info → swarms-7.7.3.dist-info}/RECORD +28 -32
  26. swarms/schemas/agent_input_schema.py +0 -149
  27. swarms/structs/agents_available.py +0 -87
  28. swarms/structs/graph_swarm.py +0 -612
  29. swarms/structs/queue_swarm.py +0 -193
  30. swarms/structs/swarm_builder.py +0 -395
  31. swarms/structs/swarm_output_type.py +0 -23
  32. {swarms-7.7.2.dist-info → swarms-7.7.3.dist-info}/LICENSE +0 -0
  33. {swarms-7.7.2.dist-info → swarms-7.7.3.dist-info}/WHEEL +0 -0
  34. {swarms-7.7.2.dist-info → swarms-7.7.3.dist-info}/entry_points.txt +0 -0
@@ -1,612 +0,0 @@
1
- import asyncio
2
- import json
3
- import time
4
- from concurrent.futures import ThreadPoolExecutor
5
- from datetime import datetime
6
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
-
8
- import networkx as nx
9
- from loguru import logger
10
- from pydantic import BaseModel, Field
11
-
12
- from swarms.structs.agent import Agent
13
- from swarms.utils.auto_download_check_packages import (
14
- auto_check_and_download_package,
15
- )
16
-
17
-
18
- class AgentOutput(BaseModel):
19
- """Structured output from an agent."""
20
-
21
- agent_name: str
22
- timestamp: float = Field(default_factory=time.time)
23
- output: Any
24
- execution_time: float
25
- error: Optional[str] = None
26
- metadata: Dict = Field(default_factory=dict)
27
-
28
-
29
- class SwarmOutput(BaseModel):
30
- """Structured output from the entire swarm."""
31
-
32
- timestamp: float = Field(default_factory=time.time)
33
- outputs: Dict[str, AgentOutput]
34
- execution_time: float
35
- success: bool
36
- error: Optional[str] = None
37
- metadata: Dict = Field(default_factory=dict)
38
-
39
-
40
- class SwarmMemory:
41
- """Vector-based memory system for GraphSwarm using ChromaDB."""
42
-
43
- def __init__(self, collection_name: str = "swarm_memories"):
44
- """Initialize SwarmMemory with ChromaDB."""
45
-
46
- try:
47
- import chromadb
48
- except ImportError:
49
- auto_check_and_download_package(
50
- "chromadb", package_manager="pip", upgrade=True
51
- )
52
- import chromadb
53
-
54
- self.client = chromadb.Client()
55
-
56
- # Get or create collection
57
- self.collection = self.client.get_or_create_collection(
58
- name=collection_name,
59
- metadata={"description": "GraphSwarm execution memories"},
60
- )
61
-
62
- def store_execution(self, task: str, result: SwarmOutput):
63
- """Store execution results in vector memory."""
64
- try:
65
- # Create metadata
66
- metadata = {
67
- "timestamp": datetime.now().isoformat(),
68
- "success": result.success,
69
- "execution_time": result.execution_time,
70
- "agent_sequence": json.dumps(
71
- [name for name in result.outputs.keys()]
72
- ),
73
- "error": result.error if result.error else "",
74
- }
75
-
76
- # Create document from outputs
77
- document = {
78
- "task": task,
79
- "outputs": json.dumps(
80
- {
81
- name: {
82
- "output": str(output.output),
83
- "execution_time": output.execution_time,
84
- "error": output.error,
85
- }
86
- for name, output in result.outputs.items()
87
- }
88
- ),
89
- }
90
-
91
- # Store in ChromaDB
92
- self.collection.add(
93
- documents=[json.dumps(document)],
94
- metadatas=[metadata],
95
- ids=[f"exec_{datetime.now().timestamp()}"],
96
- )
97
-
98
- print("added to database")
99
-
100
- logger.info(f"Stored execution in memory: {task}")
101
-
102
- except Exception as e:
103
- logger.error(
104
- f"Failed to store execution in memory: {str(e)}"
105
- )
106
-
107
- def get_similar_executions(self, task: str, limit: int = 5):
108
- """Retrieve similar past executions."""
109
- try:
110
- # Query ChromaDB for similar executions
111
- results = self.collection.query(
112
- query_texts=[task],
113
- n_results=limit,
114
- include=["documents", "metadatas"],
115
- )
116
-
117
- print(results)
118
-
119
- if not results["documents"]:
120
- return []
121
-
122
- # Process results
123
- executions = []
124
- for doc, metadata in zip(
125
- results["documents"][0], results["metadatas"][0]
126
- ):
127
- doc_dict = json.loads(doc)
128
- executions.append(
129
- {
130
- "task": doc_dict["task"],
131
- "outputs": json.loads(doc_dict["outputs"]),
132
- "success": metadata["success"],
133
- "execution_time": metadata["execution_time"],
134
- "agent_sequence": json.loads(
135
- metadata["agent_sequence"]
136
- ),
137
- "timestamp": metadata["timestamp"],
138
- }
139
- )
140
-
141
- return executions
142
-
143
- except Exception as e:
144
- logger.error(
145
- f"Failed to retrieve similar executions: {str(e)}"
146
- )
147
- return []
148
-
149
- def get_optimal_sequence(self, task: str) -> Optional[List[str]]:
150
- """Get the most successful agent sequence for similar tasks."""
151
- similar_executions = self.get_similar_executions(task)
152
- print(f"similar_executions {similar_executions}")
153
-
154
- if not similar_executions:
155
- return None
156
-
157
- # Sort by success and execution time
158
- successful_execs = [
159
- ex for ex in similar_executions if ex["success"]
160
- ]
161
-
162
- if not successful_execs:
163
- return None
164
-
165
- # Return sequence from most successful execution
166
- return successful_execs[0]["agent_sequence"]
167
-
168
- def clear_memory(self):
169
- """Clear all memories."""
170
- self.client.delete_collection(self.collection.name)
171
- self.collection = self.client.get_or_create_collection(
172
- name=self.collection.name
173
- )
174
-
175
-
176
- class GraphSwarm:
177
- """
178
- Enhanced framework for creating and managing swarms of collaborative agents.
179
- """
180
-
181
- def __init__(
182
- self,
183
- name: str = "graph-swarm-01",
184
- description: str = "Graph swarm : build your own graph of agents",
185
- agents: Union[
186
- List[Agent], List[Tuple[Agent, List[str]]], List[Callable]
187
- ] = None,
188
- max_workers: Optional[int] = None,
189
- swarm_name: str = "Collaborative Agent Swarm",
190
- memory_collection: str = "swarm_memory",
191
- *args,
192
- **kwargs,
193
- ):
194
- """Initialize GraphSwarm."""
195
- self.name = name
196
- self.description = description
197
- self.graph = nx.DiGraph()
198
- self.agents: Dict[str, Agent] = {}
199
- self.dependencies: Dict[str, List[str]] = {}
200
- self.executor = ThreadPoolExecutor(max_workers=max_workers)
201
- self.swarm_name = swarm_name
202
- self.memory_collection = memory_collection
203
- self.memory = SwarmMemory(collection_name=memory_collection)
204
-
205
- if agents:
206
- self.initialize_agents(agents)
207
-
208
- logger.info(f"Initialized GraphSwarm: {swarm_name}")
209
-
210
- def initialize_agents(
211
- self,
212
- agents: Union[List[Agent], List[Tuple[Agent, List[str]]]],
213
- ):
214
- """Initialize agents and their dependencies."""
215
- try:
216
- # Handle list of Agents or (Agent, dependencies) tuples
217
- for item in agents:
218
- if isinstance(item, tuple):
219
- agent, dependencies = item
220
- else:
221
- agent, dependencies = item, []
222
-
223
- if not isinstance(agent, Agent):
224
- raise ValueError(
225
- f"Expected Agent object, got {type(agent)}"
226
- )
227
-
228
- self.agents[agent.agent_name] = agent
229
- self.dependencies[agent.agent_name] = dependencies
230
- self.graph.add_node(agent.agent_name, agent=agent)
231
-
232
- # Add dependencies
233
- for dep in dependencies:
234
- if dep not in self.agents:
235
- raise ValueError(
236
- f"Dependency {dep} not found for agent {agent.agent_name}"
237
- )
238
- self.graph.add_edge(dep, agent.agent_name)
239
-
240
- self._validate_graph()
241
-
242
- except Exception as e:
243
- logger.error(f"Failed to initialize agents: {str(e)}")
244
- raise
245
-
246
- def _validate_graph(self):
247
- """Validate the agent dependency graph."""
248
- if not self.graph.nodes():
249
- raise ValueError("No agents added to swarm")
250
-
251
- if not nx.is_directed_acyclic_graph(self.graph):
252
- cycles = list(nx.simple_cycles(self.graph))
253
- raise ValueError(
254
- f"Agent dependency graph contains cycles: {cycles}"
255
- )
256
-
257
- def _get_agent_role_description(self, agent_name: str) -> str:
258
- """Generate a description of the agent's role in the swarm."""
259
- predecessors = list(self.graph.predecessors(agent_name))
260
- successors = list(self.graph.successors(agent_name))
261
- position = (
262
- "initial"
263
- if not predecessors
264
- else ("final" if not successors else "intermediate")
265
- )
266
-
267
- role = f"""You are {agent_name}, a specialized agent in the {self.swarm_name}.
268
- Position: {position} agent in the workflow
269
-
270
- Your relationships:"""
271
-
272
- if predecessors:
273
- role += (
274
- f"\nYou receive input from: {', '.join(predecessors)}"
275
- )
276
- if successors:
277
- role += f"\nYour output will be used by: {', '.join(successors)}"
278
-
279
- return role
280
-
281
- def _generate_workflow_context(self) -> str:
282
- """Generate a description of the entire workflow."""
283
- execution_order = list(nx.topological_sort(self.graph))
284
-
285
- workflow = f"""Workflow Overview of {self.swarm_name}:
286
-
287
- Processing Order:
288
- {' -> '.join(execution_order)}
289
-
290
- Agent Roles:
291
- """
292
-
293
- for agent_name in execution_order:
294
- predecessors = list(self.graph.predecessors(agent_name))
295
- successors = list(self.graph.successors(agent_name))
296
-
297
- workflow += f"\n\n{agent_name}:"
298
- if predecessors:
299
- workflow += (
300
- f"\n- Receives from: {', '.join(predecessors)}"
301
- )
302
- if successors:
303
- workflow += f"\n- Sends to: {', '.join(successors)}"
304
- if not predecessors and not successors:
305
- workflow += "\n- Independent agent"
306
-
307
- return workflow
308
-
309
- def _build_agent_prompt(
310
- self, agent_name: str, task: str, context: Dict = None
311
- ) -> str:
312
- """Build a comprehensive prompt for the agent including role and context."""
313
- prompt_parts = [
314
- self._get_agent_role_description(agent_name),
315
- "\nWorkflow Context:",
316
- self._generate_workflow_context(),
317
- "\nYour Task:",
318
- task,
319
- ]
320
-
321
- if context:
322
- prompt_parts.extend(
323
- ["\nContext from Previous Agents:", str(context)]
324
- )
325
-
326
- prompt_parts.extend(
327
- [
328
- "\nInstructions:",
329
- "1. Process the task according to your role",
330
- "2. Consider the input from previous agents when available",
331
- "3. Provide clear, structured output",
332
- "4. Remember that your output will be used by subsequent agents",
333
- "\nResponse Guidelines:",
334
- "- Provide clear, well-organized output",
335
- "- Include relevant details and insights",
336
- "- Highlight key findings",
337
- "- Flag any uncertainties or issues",
338
- ]
339
- )
340
-
341
- return "\n".join(prompt_parts)
342
-
343
- async def _execute_agent(
344
- self, agent_name: str, task: str, context: Dict = None
345
- ) -> AgentOutput:
346
- """Execute a single agent."""
347
- start_time = time.time()
348
- agent = self.agents[agent_name]
349
-
350
- try:
351
- # Build comprehensive prompt
352
- full_prompt = self._build_agent_prompt(
353
- agent_name, task, context
354
- )
355
- logger.debug(f"Prompt for {agent_name}:\n{full_prompt}")
356
-
357
- # Execute agent
358
- output = await asyncio.to_thread(agent.run, full_prompt)
359
-
360
- return AgentOutput(
361
- agent_name=agent_name,
362
- output=output,
363
- execution_time=time.time() - start_time,
364
- metadata={
365
- "task": task,
366
- "context": context,
367
- "position_in_workflow": list(
368
- nx.topological_sort(self.graph)
369
- ).index(agent_name),
370
- },
371
- )
372
-
373
- except Exception as e:
374
- logger.error(
375
- f"Error executing agent {agent_name}: {str(e)}"
376
- )
377
- return AgentOutput(
378
- agent_name=agent_name,
379
- output=None,
380
- execution_time=time.time() - start_time,
381
- error=str(e),
382
- metadata={"task": task},
383
- )
384
-
385
- async def execute(self, task: str) -> SwarmOutput:
386
- """
387
- Execute the entire swarm of agents with memory integration.
388
-
389
- Args:
390
- task: Initial task to execute
391
-
392
- Returns:
393
- SwarmOutput: Structured output from all agents
394
- """
395
- start_time = time.time()
396
- outputs = {}
397
- success = True
398
- error = None
399
-
400
- try:
401
- # Get similar past executions
402
- similar_executions = self.memory.get_similar_executions(
403
- task, limit=3
404
- )
405
- optimal_sequence = self.memory.get_optimal_sequence(task)
406
-
407
- # Get base execution order
408
- base_execution_order = list(
409
- nx.topological_sort(self.graph)
410
- )
411
-
412
- # Determine final execution order
413
- if optimal_sequence and all(
414
- agent in base_execution_order
415
- for agent in optimal_sequence
416
- ):
417
- logger.info(
418
- f"Using optimal sequence from memory: {optimal_sequence}"
419
- )
420
- execution_order = optimal_sequence
421
- else:
422
- execution_order = base_execution_order
423
-
424
- # Get historical context if available
425
- historical_context = {}
426
- if similar_executions:
427
- best_execution = similar_executions[0]
428
- if best_execution["success"]:
429
- historical_context = {
430
- "similar_task": best_execution["task"],
431
- "previous_outputs": best_execution["outputs"],
432
- "execution_time": best_execution[
433
- "execution_time"
434
- ],
435
- "success_patterns": self._extract_success_patterns(
436
- similar_executions
437
- ),
438
- }
439
-
440
- # Execute agents in order
441
- for agent_name in execution_order:
442
- try:
443
- # Get context from dependencies and history
444
- agent_context = {
445
- "dependencies": {
446
- dep: outputs[dep].output
447
- for dep in self.graph.predecessors(
448
- agent_name
449
- )
450
- if dep in outputs
451
- },
452
- "historical": historical_context,
453
- "position": execution_order.index(agent_name),
454
- "total_agents": len(execution_order),
455
- }
456
-
457
- # Execute agent with enhanced context
458
- output = await self._execute_agent(
459
- agent_name, task, agent_context
460
- )
461
- outputs[agent_name] = output
462
-
463
- # Update historical context with current execution
464
- if output.output:
465
- historical_context.update(
466
- {
467
- f"current_{agent_name}_output": output.output
468
- }
469
- )
470
-
471
- # Check for errors
472
- if output.error:
473
- success = False
474
- error = f"Agent {agent_name} failed: {output.error}"
475
-
476
- # Try to recover using memory
477
- if similar_executions:
478
- recovery_output = self._attempt_recovery(
479
- agent_name, task, similar_executions
480
- )
481
- if recovery_output:
482
- outputs[agent_name] = recovery_output
483
- success = True
484
- error = None
485
- continue
486
- break
487
-
488
- except Exception as agent_error:
489
- logger.error(
490
- f"Error executing agent {agent_name}: {str(agent_error)}"
491
- )
492
- success = False
493
- error = f"Agent {agent_name} failed: {str(agent_error)}"
494
- break
495
-
496
- # Create result
497
- result = SwarmOutput(
498
- outputs=outputs,
499
- execution_time=time.time() - start_time,
500
- success=success,
501
- error=error,
502
- metadata={
503
- "task": task,
504
- "used_optimal_sequence": optimal_sequence
505
- is not None,
506
- "similar_executions_found": len(
507
- similar_executions
508
- ),
509
- "execution_order": execution_order,
510
- "historical_context_used": bool(
511
- historical_context
512
- ),
513
- },
514
- )
515
-
516
- # Store execution in memory
517
- await self._store_execution_async(task, result)
518
-
519
- return result
520
-
521
- except Exception as e:
522
- logger.error(f"Swarm execution failed: {str(e)}")
523
- return SwarmOutput(
524
- outputs=outputs,
525
- execution_time=time.time() - start_time,
526
- success=False,
527
- error=str(e),
528
- metadata={"task": task},
529
- )
530
-
531
- def run(self, task: str) -> SwarmOutput:
532
- """Synchronous interface to execute the swarm."""
533
- return asyncio.run(self.execute(task))
534
-
535
- def _extract_success_patterns(
536
- self, similar_executions: List[Dict]
537
- ) -> Dict:
538
- """Extract success patterns from similar executions."""
539
- patterns = {}
540
- successful_execs = [
541
- ex for ex in similar_executions if ex["success"]
542
- ]
543
-
544
- if successful_execs:
545
- patterns = {
546
- "common_sequences": self._find_common_sequences(
547
- successful_execs
548
- ),
549
- "avg_execution_time": sum(
550
- ex["execution_time"] for ex in successful_execs
551
- )
552
- / len(successful_execs),
553
- "successful_strategies": self._extract_strategies(
554
- successful_execs
555
- ),
556
- }
557
-
558
- return patterns
559
-
560
- def _attempt_recovery(
561
- self,
562
- failed_agent: str,
563
- task: str,
564
- similar_executions: List[Dict],
565
- ) -> Optional[AgentOutput]:
566
- """Attempt to recover from failure using memory."""
567
- for execution in similar_executions:
568
- if (
569
- execution["success"]
570
- and failed_agent in execution["outputs"]
571
- ):
572
- historical_output = execution["outputs"][failed_agent]
573
-
574
- return AgentOutput(
575
- agent_name=failed_agent,
576
- output=historical_output["output"],
577
- execution_time=historical_output[
578
- "execution_time"
579
- ],
580
- metadata={
581
- "recovered_from_memory": True,
582
- "original_task": execution["task"],
583
- },
584
- )
585
- return None
586
-
587
- async def _store_execution_async(
588
- self, task: str, result: SwarmOutput
589
- ):
590
- """Asynchronously store execution in memory."""
591
- try:
592
- await asyncio.to_thread(
593
- self.memory.store_execution, task, result
594
- )
595
- except Exception as e:
596
- logger.error(
597
- f"Failed to store execution in memory: {str(e)}"
598
- )
599
-
600
- def add_agent(self, agent: Agent, dependencies: List[str] = None):
601
- """Add a new agent to the swarm."""
602
- dependencies = dependencies or []
603
- self.agents[agent.agent_name] = agent
604
- self.dependencies[agent.agent_name] = dependencies
605
- self.graph.add_node(agent.agent_name, agent=agent)
606
-
607
- for dep in dependencies:
608
- if dep not in self.agents:
609
- raise ValueError(f"Dependency {dep} not found")
610
- self.graph.add_edge(dep, agent.agent_name)
611
-
612
- self._validate_graph()