swarms 7.7.1__py3-none-any.whl → 7.7.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. swarms/prompts/ag_prompt.py +51 -19
  2. swarms/prompts/agent_system_prompts.py +13 -4
  3. swarms/prompts/multi_agent_collab_prompt.py +18 -0
  4. swarms/prompts/prompt.py +6 -10
  5. swarms/schemas/__init__.py +0 -3
  6. swarms/structs/__init__.py +3 -8
  7. swarms/structs/agent.py +211 -163
  8. swarms/structs/aop.py +8 -1
  9. swarms/structs/auto_swarm_builder.py +271 -210
  10. swarms/structs/conversation.py +23 -56
  11. swarms/structs/hiearchical_swarm.py +93 -122
  12. swarms/structs/ma_utils.py +96 -0
  13. swarms/structs/mixture_of_agents.py +20 -103
  14. swarms/structs/{multi_agent_orchestrator.py → multi_agent_router.py} +32 -95
  15. swarms/structs/output_types.py +3 -16
  16. swarms/structs/stopping_conditions.py +30 -0
  17. swarms/structs/swarm_router.py +57 -5
  18. swarms/structs/swarming_architectures.py +576 -185
  19. swarms/telemetry/main.py +6 -2
  20. swarms/tools/mcp_client.py +209 -53
  21. swarms/tools/mcp_integration.py +1 -53
  22. swarms/utils/formatter.py +15 -1
  23. swarms/utils/generate_keys.py +64 -0
  24. swarms/utils/history_output_formatter.py +2 -0
  25. {swarms-7.7.1.dist-info → swarms-7.7.3.dist-info}/METADATA +98 -263
  26. {swarms-7.7.1.dist-info → swarms-7.7.3.dist-info}/RECORD +29 -38
  27. swarms/schemas/agent_input_schema.py +0 -149
  28. swarms/structs/agents_available.py +0 -87
  29. swarms/structs/async_workflow.py +0 -818
  30. swarms/structs/graph_swarm.py +0 -612
  31. swarms/structs/octotools.py +0 -844
  32. swarms/structs/pulsar_swarm.py +0 -469
  33. swarms/structs/queue_swarm.py +0 -193
  34. swarms/structs/swarm_builder.py +0 -395
  35. swarms/structs/swarm_load_balancer.py +0 -344
  36. swarms/structs/swarm_output_type.py +0 -23
  37. swarms/structs/talk_hier.py +0 -729
  38. {swarms-7.7.1.dist-info → swarms-7.7.3.dist-info}/LICENSE +0 -0
  39. {swarms-7.7.1.dist-info → swarms-7.7.3.dist-info}/WHEEL +0 -0
  40. {swarms-7.7.1.dist-info → swarms-7.7.3.dist-info}/entry_points.txt +0 -0
swarms/structs/agent.py CHANGED
@@ -1,4 +1,3 @@
1
- import concurrent.futures
2
1
  import asyncio
3
2
  import json
4
3
  import logging
@@ -46,12 +45,7 @@ from swarms.structs.safe_loading import (
46
45
  )
47
46
  from swarms.telemetry.main import log_agent_data
48
47
  from swarms.tools.base_tool import BaseTool
49
-
50
- # from swarms.tools.mcp_integration import (
51
- # MCPServerSseParams,
52
- # batch_mcp_flow,
53
- # mcp_flow_get_tool_schema,
54
- # )
48
+ from swarms.tools.mcp_integration import MCPServerSseParams
55
49
  from swarms.tools.tool_parse_exec import parse_and_execute_json
56
50
  from swarms.utils.any_to_str import any_to_str
57
51
  from swarms.utils.data_to_text import data_to_text
@@ -59,11 +53,18 @@ from swarms.utils.file_processing import create_file_in_folder
59
53
  from swarms.utils.formatter import formatter
60
54
  from swarms.utils.history_output_formatter import (
61
55
  history_output_formatter,
62
- HistoryOutputType,
63
56
  )
64
57
  from swarms.utils.litellm_tokenizer import count_tokens
65
58
  from swarms.utils.litellm_wrapper import LiteLLM
66
59
  from swarms.utils.pdf_to_text import pdf_to_text
60
+ from swarms.structs.output_types import OutputType
61
+ from swarms.utils.str_to_dict import str_to_dict
62
+ from swarms.tools.mcp_client import (
63
+ execute_mcp_tool,
64
+ list_tools_for_multiple_urls,
65
+ list_all,
66
+ find_and_execute_tool,
67
+ )
67
68
 
68
69
 
69
70
  # Utils
@@ -335,7 +336,7 @@ class Agent:
335
336
  # [Tools]
336
337
  custom_tools_prompt: Optional[Callable] = None,
337
338
  tool_schema: ToolUsageType = None,
338
- output_type: HistoryOutputType = "str",
339
+ output_type: OutputType = "str-all-except-first",
339
340
  function_calling_type: str = "json",
340
341
  output_cleaner: Optional[Callable] = None,
341
342
  function_calling_format_type: Optional[str] = "OpenAI",
@@ -392,7 +393,9 @@ class Agent:
392
393
  role: agent_roles = "worker",
393
394
  no_print: bool = False,
394
395
  tools_list_dictionary: Optional[List[Dict[str, Any]]] = None,
395
- # mcp_servers: List[MCPServerSseParams] = [],
396
+ mcp_servers: MCPServerSseParams = None,
397
+ mcp_url: str = None,
398
+ mcp_urls: List[str] = None,
396
399
  *args,
397
400
  **kwargs,
398
401
  ):
@@ -512,105 +515,93 @@ class Agent:
512
515
  self.role = role
513
516
  self.no_print = no_print
514
517
  self.tools_list_dictionary = tools_list_dictionary
515
- # self.mcp_servers = mcp_servers
518
+ self.mcp_servers = mcp_servers
519
+ self.mcp_url = mcp_url
520
+ self.mcp_urls = mcp_urls
516
521
 
517
522
  self._cached_llm = (
518
523
  None # Add this line to cache the LLM instance
519
524
  )
520
- self._default_model = (
521
- "gpt-4o-mini" # Move default model name here
522
- )
523
525
 
526
+ self.short_memory = self.short_memory_init()
527
+
528
+ # Initialize the feedback
529
+ self.feedback = []
530
+
531
+ # self.init_handling()
532
+ # Define tasks as pairs of (function, condition)
533
+ # Each task will only run if its condition is True
534
+ self.setup_config()
535
+
536
+ if exists(self.docs_folder):
537
+ self.get_docs_from_doc_folders()
538
+
539
+ if exists(self.tools):
540
+ self.handle_tool_init()
541
+
542
+ if exists(self.tool_schema) or exists(self.list_base_models):
543
+ self.handle_tool_schema_ops()
544
+
545
+ if exists(self.sop) or exists(self.sop_list):
546
+ self.handle_sop_ops()
547
+
548
+ # Run sequential operations after all concurrent tasks are done
549
+ # self.agent_output = self.agent_output_model()
550
+ log_agent_data(self.to_dict())
551
+
552
+ if self.llm is None:
553
+ self.llm = self.llm_handling()
554
+
555
+ if self.mcp_url or self.mcp_servers is not None:
556
+ self.add_mcp_tools_to_memory()
557
+
558
+ def short_memory_init(self):
524
559
  if (
525
560
  self.agent_name is not None
526
561
  or self.agent_description is not None
527
562
  ):
528
- prompt = f"Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {system_prompt}"
563
+ prompt = f"\n Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {self.system_prompt}"
529
564
  else:
530
- prompt = system_prompt
565
+ prompt = self.system_prompt
531
566
 
532
567
  # Initialize the short term memory
533
568
  self.short_memory = Conversation(
534
569
  system_prompt=prompt,
535
570
  time_enabled=False,
536
- user=user_name,
537
- rules=rules,
571
+ user=self.user_name,
572
+ rules=self.rules,
538
573
  token_count=False,
539
- *args,
540
- **kwargs,
541
574
  )
542
575
 
543
- # Initialize the feedback
544
- self.feedback = []
545
-
546
- # Initialize the executor
547
- self.executor = ThreadPoolExecutor(
548
- max_workers=executor_workers
549
- )
550
-
551
- self.init_handling()
576
+ return self.short_memory
552
577
 
553
578
  def init_handling(self):
554
579
  # Define tasks as pairs of (function, condition)
555
580
  # Each task will only run if its condition is True
556
- tasks = [
557
- (self.setup_config, True), # Always run setup_config
558
- (
559
- self.get_docs_from_doc_folders,
560
- exists(self.docs_folder),
561
- ),
562
- (self.handle_tool_init, True), # Always run tool init
563
- (
564
- self.handle_tool_schema_ops,
565
- exists(self.tool_schema)
566
- or exists(self.list_base_models),
567
- ),
568
- (
569
- self.handle_sop_ops,
570
- exists(self.sop) or exists(self.sop_list),
571
- ),
572
- ]
573
-
574
- # Filter out tasks whose conditions are False
575
- filtered_tasks = [
576
- task for task, condition in tasks if condition
577
- ]
578
-
579
- # Execute all tasks concurrently
580
- with concurrent.futures.ThreadPoolExecutor(
581
- max_workers=os.cpu_count() * 4
582
- ) as executor:
583
- # Map tasks to futures and collect results
584
- results = {}
585
- future_to_task = {
586
- executor.submit(task): task.__name__
587
- for task in filtered_tasks
588
- }
581
+ self.setup_config()
589
582
 
590
- # Wait for each future to complete and collect results/exceptions
591
- for future in concurrent.futures.as_completed(
592
- future_to_task
593
- ):
594
- task_name = future_to_task[future]
595
- try:
596
- result = future.result()
597
- results[task_name] = result
598
- logging.info(
599
- f"Task {task_name} completed successfully"
600
- )
601
- except Exception as e:
602
- results[task_name] = None
603
- logging.error(
604
- f"Task {task_name} failed with error: {e}"
605
- )
583
+ if exists(self.docs_folder):
584
+ self.get_docs_from_doc_folders()
585
+
586
+ if exists(self.tools):
587
+ self.handle_tool_init()
588
+
589
+ if exists(self.tool_schema) or exists(self.list_base_models):
590
+ self.handle_tool_schema_ops()
591
+
592
+ if exists(self.sop) or exists(self.sop_list):
593
+ self.handle_sop_ops()
606
594
 
607
595
  # Run sequential operations after all concurrent tasks are done
608
- self.agent_output = self.agent_output_model()
596
+ # self.agent_output = self.agent_output_model()
609
597
  log_agent_data(self.to_dict())
610
598
 
611
599
  if self.llm is None:
612
600
  self.llm = self.llm_handling()
613
601
 
602
+ if self.mcp_url or self.mcp_servers is not None:
603
+ self.add_mcp_tools_to_memory()
604
+
614
605
  def agent_output_model(self):
615
606
  # Many steps
616
607
  id = agent_id()
@@ -637,10 +628,7 @@ class Agent:
637
628
  return self._cached_llm
638
629
 
639
630
  if self.model_name is None:
640
- logger.warning(
641
- f"Model name is not provided, using {self._default_model}. You can configure any model from litellm if desired."
642
- )
643
- self.model_name = self._default_model
631
+ self.model_name = "gpt-4o-mini"
644
632
 
645
633
  try:
646
634
  # Simplify initialization logic
@@ -719,68 +707,122 @@ class Agent:
719
707
  tool.__name__: tool for tool in self.tools
720
708
  }
721
709
 
722
- # def mcp_execution_flow(self, response: any):
723
- # """
724
- # Executes the MCP (Model Context Protocol) flow based on the provided response.
725
-
726
- # This method takes a response, converts it from a string to a dictionary format,
727
- # and checks for the presence of a tool name or a name in the response. If either
728
- # is found, it retrieves the tool name and proceeds to call the batch_mcp_flow
729
- # function to execute the corresponding tool actions.
730
-
731
- # Args:
732
- # response (any): The response to be processed, which can be in string format
733
- # that represents a dictionary.
734
-
735
- # Returns:
736
- # The output from the batch_mcp_flow function, which contains the results of
737
- # the tool execution. If an error occurs during processing, it logs the error
738
- # and returns None.
739
-
740
- # Raises:
741
- # Exception: Logs any exceptions that occur during the execution flow.
742
- # """
743
- # try:
744
- # response = str_to_dict(response)
745
-
746
- # tool_output = batch_mcp_flow(
747
- # self.mcp_servers,
748
- # function_call=response,
749
- # )
750
-
751
- # return tool_output
752
- # except Exception as e:
753
- # logger.error(f"Error in mcp_execution_flow: {e}")
754
- # return None
755
-
756
- # def mcp_tool_handling(self):
757
- # """
758
- # Handles the retrieval of tool schemas from the MCP servers.
759
-
760
- # This method iterates over the list of MCP servers, retrieves the tool schema
761
- # for each server using the mcp_flow_get_tool_schema function, and compiles
762
- # these schemas into a list. The resulting list is stored in the
763
- # tools_list_dictionary attribute.
764
-
765
- # Returns:
766
- # list: A list of tool schemas retrieved from the MCP servers. If an error
767
- # occurs during the retrieval process, it logs the error and returns None.
768
-
769
- # Raises:
770
- # Exception: Logs any exceptions that occur during the tool handling process.
771
- # """
772
- # try:
773
- # self.tools_list_dictionary = []
774
-
775
- # for mcp_server in self.mcp_servers:
776
- # tool_schema = mcp_flow_get_tool_schema(mcp_server)
777
- # self.tools_list_dictionary.append(tool_schema)
778
-
779
- # print(self.tools_list_dictionary)
780
- # return self.tools_list_dictionary
781
- # except Exception as e:
782
- # logger.error(f"Error in mcp_tool_handling: {e}")
783
- # return None
710
+ def add_mcp_tools_to_memory(self):
711
+ """
712
+ Adds MCP tools to the agent's short-term memory.
713
+
714
+ This function checks for either a single MCP URL or multiple MCP URLs and adds the available tools
715
+ to the agent's memory. The tools are listed in JSON format.
716
+
717
+ Raises:
718
+ Exception: If there's an error accessing the MCP tools
719
+ """
720
+ try:
721
+ if self.mcp_url is not None:
722
+ tools_available = list_all(
723
+ self.mcp_url, output_type="json"
724
+ )
725
+ self.short_memory.add(
726
+ role="Tools Available",
727
+ content=f"\n{tools_available}",
728
+ )
729
+
730
+ elif (
731
+ self.mcp_url is None
732
+ and self.mcp_urls is not None
733
+ and len(self.mcp_urls) > 1
734
+ ):
735
+ tools_available = list_tools_for_multiple_urls(
736
+ urls=self.mcp_urls,
737
+ output_type="json",
738
+ )
739
+
740
+ self.short_memory.add(
741
+ role="Tools Available",
742
+ content=f"\n{tools_available}",
743
+ )
744
+ except Exception as e:
745
+ logger.error(f"Error adding MCP tools to memory: {e}")
746
+ raise e
747
+
748
+ def _single_mcp_tool_handling(self, response: any):
749
+ """
750
+ Handles execution of a single MCP tool.
751
+
752
+ Args:
753
+ response (str): The tool response to process
754
+
755
+ Raises:
756
+ Exception: If there's an error executing the tool
757
+ """
758
+ try:
759
+ if isinstance(response, dict):
760
+ result = response
761
+ print(type(result))
762
+ else:
763
+ result = str_to_dict(response)
764
+ print(type(result))
765
+
766
+ output = execute_mcp_tool(
767
+ url=self.mcp_url,
768
+ parameters=result,
769
+ )
770
+
771
+ self.short_memory.add(
772
+ role="Tool Executor", content=str(output)
773
+ )
774
+ except Exception as e:
775
+ logger.error(f"Error in single MCP tool handling: {e}")
776
+ raise e
777
+
778
+ def _multiple_mcp_tool_handling(self, response: any):
779
+ """
780
+ Handles execution of multiple MCP tools.
781
+
782
+ Args:
783
+ response (any): The tool response to process
784
+
785
+ Raises:
786
+ Exception: If there's an error executing the tools
787
+ """
788
+ try:
789
+ if isinstance(response, str):
790
+ response = str_to_dict(response)
791
+
792
+ execution = find_and_execute_tool(
793
+ self.mcp_urls,
794
+ response["name"],
795
+ parameters=response,
796
+ )
797
+
798
+ self.short_memory.add(
799
+ role="Tool Executor", content=str(execution)
800
+ )
801
+ except Exception as e:
802
+ logger.error(f"Error in multiple MCP tool handling: {e}")
803
+ raise e
804
+
805
+ def mcp_tool_handling(self, response: any):
806
+ """
807
+ Main handler for MCP tool execution.
808
+
809
+ Args:
810
+ response (any): The tool response to process
811
+
812
+ Raises:
813
+ ValueError: If no MCP URL or MCP Servers are provided
814
+ Exception: If there's an error in tool handling
815
+ """
816
+ try:
817
+ # if self.mcp_url is not None:
818
+ self._single_mcp_tool_handling(response)
819
+ # elif self.mcp_url is None and len(self.mcp_servers) > 1:
820
+ # self._multiple_mcp_tool_handling(response)
821
+ # else:
822
+ # raise ValueError("No MCP URL or MCP Servers provided")
823
+ except Exception as e:
824
+ logger.error(f"Error in mcp_tool_handling: {e}")
825
+ raise e
784
826
 
785
827
  def setup_config(self):
786
828
  # The max_loops will be set dynamically if the dynamic_loop
@@ -1095,6 +1137,15 @@ class Agent:
1095
1137
  ) as executor:
1096
1138
  executor.map(lambda f: f(), update_tasks)
1097
1139
 
1140
+ ####### MCP TOOL HANDLING #######
1141
+ if (
1142
+ self.mcp_servers
1143
+ and self.tools_list_dictionary is not None
1144
+ ):
1145
+ self.mcp_tool_handling(response)
1146
+
1147
+ ####### MCP TOOL HANDLING #######
1148
+
1098
1149
  # Check and execute tools
1099
1150
  if self.tools is not None:
1100
1151
  out = self.parse_and_execute_tools(
@@ -1194,8 +1245,7 @@ class Agent:
1194
1245
  if self.autosave is True:
1195
1246
  log_agent_data(self.to_dict())
1196
1247
 
1197
- if self.autosave is True:
1198
- self.save()
1248
+ self.save()
1199
1249
 
1200
1250
  # log_agent_data(self.to_dict())
1201
1251
 
@@ -1223,7 +1273,7 @@ class Agent:
1223
1273
  except KeyboardInterrupt as error:
1224
1274
  self._handle_run_error(error)
1225
1275
 
1226
- def _handle_run_error(self, error: any):
1276
+ def __handle_run_error(self, error: any):
1227
1277
  log_agent_data(self.to_dict())
1228
1278
 
1229
1279
  if self.autosave is True:
@@ -1234,6 +1284,14 @@ class Agent:
1234
1284
  )
1235
1285
  raise error
1236
1286
 
1287
+ def _handle_run_error(self, error: any):
1288
+ process_thread = threading.Thread(
1289
+ target=self.__handle_run_error,
1290
+ args=(error,),
1291
+ daemon=True,
1292
+ )
1293
+ process_thread.start()
1294
+
1237
1295
  async def arun(
1238
1296
  self,
1239
1297
  task: Optional[str] = None,
@@ -1732,10 +1790,11 @@ class Agent:
1732
1790
  )
1733
1791
 
1734
1792
  # Reinitialize executor if needed
1735
- if not hasattr(self, "executor") or self.executor is None:
1736
- self.executor = ThreadPoolExecutor(
1737
- max_workers=os.cpu_count()
1738
- )
1793
+ # if not hasattr(self, "executor") or self.executor is None:
1794
+ with ThreadPoolExecutor(
1795
+ max_workers=os.cpu_count()
1796
+ ) as executor:
1797
+ self.executor = executor
1739
1798
 
1740
1799
  # # Reinitialize tool structure if needed
1741
1800
  # if hasattr(self, 'tools') and (self.tools or getattr(self, 'list_base_models', None)):
@@ -2487,12 +2546,7 @@ class Agent:
2487
2546
  self,
2488
2547
  task: Optional[Union[str, Any]] = None,
2489
2548
  img: Optional[str] = None,
2490
- device: Optional[str] = "cpu", # gpu
2491
- device_id: Optional[int] = 0,
2492
- all_cores: Optional[bool] = True,
2493
2549
  scheduled_run_date: Optional[datetime] = None,
2494
- do_not_use_cluster_ops: Optional[bool] = True,
2495
- all_gpus: Optional[bool] = False,
2496
2550
  *args,
2497
2551
  **kwargs,
2498
2552
  ) -> Any:
@@ -2532,7 +2586,6 @@ class Agent:
2532
2586
  ) # Sleep for a short period to avoid busy waiting
2533
2587
 
2534
2588
  try:
2535
- # If cluster ops disabled, run directly
2536
2589
  output = self._run(
2537
2590
  task=task,
2538
2591
  img=img,
@@ -2542,11 +2595,6 @@ class Agent:
2542
2595
 
2543
2596
  return output
2544
2597
 
2545
- # if self.tools_list_dictionary is not None:
2546
- # return str_to_dict(output)
2547
- # else:
2548
- # return output
2549
-
2550
2598
  except ValueError as e:
2551
2599
  self._handle_run_error(e)
2552
2600
 
swarms/structs/aop.py CHANGED
@@ -27,6 +27,7 @@ class AOP:
27
27
  name: Optional[str] = None,
28
28
  description: Optional[str] = None,
29
29
  url: Optional[str] = "http://localhost:8000/sse",
30
+ urls: Optional[list[str]] = None,
30
31
  *args,
31
32
  **kwargs,
32
33
  ):
@@ -44,7 +45,7 @@ class AOP:
44
45
  self.name = name
45
46
  self.description = description
46
47
  self.url = url
47
-
48
+ self.urls = urls
48
49
  self.tools = {}
49
50
  self.swarms = {}
50
51
 
@@ -527,6 +528,12 @@ class AOP:
527
528
  return tool
528
529
  return None
529
530
 
531
+ def list_tools_for_multiple_urls(self):
532
+ out = []
533
+ for url in self.urls:
534
+ out.append(self.list_all(url))
535
+ return out
536
+
530
537
  def search_if_tool_exists(self, name: str):
531
538
  out = self.list_all()
532
539
  for tool in out: