swarms 7.7.9__py3-none-any.whl → 7.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/structs/agent.py CHANGED
@@ -23,7 +23,6 @@ import yaml
23
23
  from loguru import logger
24
24
  from pydantic import BaseModel
25
25
 
26
- from swarms.agents.agent_print import agent_print
27
26
  from swarms.agents.ape_agent import auto_generate_prompt
28
27
  from swarms.artifacts.main_artifact import Artifact
29
28
  from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3
@@ -31,6 +30,10 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
31
30
  MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
32
31
  )
33
32
  from swarms.prompts.tools import tool_sop_prompt
33
+ from swarms.schemas.agent_mcp_errors import (
34
+ AgentMCPConnectionError,
35
+ AgentMCPToolError,
36
+ )
34
37
  from swarms.schemas.agent_step_schemas import ManySteps, Step
35
38
  from swarms.schemas.base_schemas import (
36
39
  AgentChatCompletionResponse,
@@ -46,14 +49,9 @@ from swarms.structs.safe_loading import (
46
49
  )
47
50
  from swarms.telemetry.main import log_agent_data
48
51
  from swarms.tools.base_tool import BaseTool
49
- from swarms.tools.mcp_client import (
50
- execute_mcp_tool,
51
- find_and_execute_tool,
52
- list_all,
53
- list_tools_for_multiple_urls,
52
+ from swarms.tools.py_func_to_openai_func_str import (
53
+ convert_multiple_functions_to_openai_function_schema,
54
54
  )
55
- from swarms.tools.mcp_integration import MCPServerSseParams
56
- from swarms.tools.tool_parse_exec import parse_and_execute_json
57
55
  from swarms.utils.any_to_str import any_to_str
58
56
  from swarms.utils.data_to_text import data_to_text
59
57
  from swarms.utils.file_processing import create_file_in_folder
@@ -64,10 +62,22 @@ from swarms.utils.history_output_formatter import (
64
62
  from swarms.utils.litellm_tokenizer import count_tokens
65
63
  from swarms.utils.litellm_wrapper import LiteLLM
66
64
  from swarms.utils.pdf_to_text import pdf_to_text
67
- from swarms.utils.str_to_dict import str_to_dict
68
65
  from swarms.prompts.react_base_prompt import REACT_SYS_PROMPT
69
66
  from swarms.prompts.max_loop_prompt import generate_reasoning_prompt
70
67
  from swarms.prompts.safety_prompt import SAFETY_PROMPT
68
+ from swarms.structs.ma_utils import set_random_models_for_agents
69
+ from swarms.tools.mcp_client_call import (
70
+ execute_tool_call_simple,
71
+ get_mcp_tools_sync,
72
+ )
73
+ from swarms.schemas.mcp_schemas import (
74
+ MCPConnection,
75
+ )
76
+ from swarms.utils.index import (
77
+ exists,
78
+ format_data_structure,
79
+ format_dict_to_string,
80
+ )
71
81
 
72
82
 
73
83
  # Utils
@@ -89,10 +99,6 @@ def agent_id():
89
99
  return uuid.uuid4().hex
90
100
 
91
101
 
92
- def exists(val):
93
- return val is not None
94
-
95
-
96
102
  # Agent output types
97
103
  ToolUsageType = Union[BaseModel, Dict[str, Any]]
98
104
 
@@ -358,9 +364,9 @@ class Agent:
358
364
  log_directory: str = None,
359
365
  tool_system_prompt: str = tool_sop_prompt(),
360
366
  max_tokens: int = 4096,
361
- frequency_penalty: float = 0.0,
362
- presence_penalty: float = 0.0,
363
- temperature: float = 0.1,
367
+ frequency_penalty: float = 0.8,
368
+ presence_penalty: float = 0.6,
369
+ temperature: float = 0.5,
364
370
  workspace_dir: str = "agent_workspace",
365
371
  timeout: Optional[int] = None,
366
372
  # short_memory: Optional[str] = None,
@@ -374,7 +380,6 @@ class Agent:
374
380
  "%Y-%m-%d %H:%M:%S", time.localtime()
375
381
  ),
376
382
  agent_output: ManySteps = None,
377
- executor_workers: int = os.cpu_count(),
378
383
  data_memory: Optional[Callable] = None,
379
384
  load_yaml_path: str = None,
380
385
  auto_generate_prompt: bool = False,
@@ -395,11 +400,13 @@ class Agent:
395
400
  role: agent_roles = "worker",
396
401
  no_print: bool = False,
397
402
  tools_list_dictionary: Optional[List[Dict[str, Any]]] = None,
398
- mcp_servers: MCPServerSseParams = None,
399
- mcp_url: str = None,
403
+ mcp_url: Optional[Union[str, MCPConnection]] = None,
400
404
  mcp_urls: List[str] = None,
401
405
  react_on: bool = False,
402
406
  safety_prompt_on: bool = False,
407
+ random_models_on: bool = False,
408
+ mcp_config: Optional[MCPConnection] = None,
409
+ top_p: float = 0.90,
403
410
  *args,
404
411
  **kwargs,
405
412
  ):
@@ -416,6 +423,7 @@ class Agent:
416
423
  self.stopping_token = stopping_token
417
424
  self.interactive = interactive
418
425
  self.dashboard = dashboard
426
+ self.saved_state_path = saved_state_path
419
427
  self.return_history = return_history
420
428
  self.dynamic_temperature_enabled = dynamic_temperature_enabled
421
429
  self.dynamic_loops = dynamic_loops
@@ -518,11 +526,13 @@ class Agent:
518
526
  self.role = role
519
527
  self.no_print = no_print
520
528
  self.tools_list_dictionary = tools_list_dictionary
521
- self.mcp_servers = mcp_servers
522
529
  self.mcp_url = mcp_url
523
530
  self.mcp_urls = mcp_urls
524
531
  self.react_on = react_on
525
532
  self.safety_prompt_on = safety_prompt_on
533
+ self.random_models_on = random_models_on
534
+ self.mcp_config = mcp_config
535
+ self.top_p = top_p
526
536
 
527
537
  self._cached_llm = (
528
538
  None # Add this line to cache the LLM instance
@@ -534,41 +544,58 @@ class Agent:
534
544
  self.feedback = []
535
545
 
536
546
  # self.init_handling()
537
- # Define tasks as pairs of (function, condition)
538
- # Each task will only run if its condition is True
539
547
  self.setup_config()
540
548
 
541
549
  if exists(self.docs_folder):
542
550
  self.get_docs_from_doc_folders()
543
551
 
544
- if exists(self.tools):
545
- self.handle_tool_init()
546
-
547
552
  if exists(self.tool_schema) or exists(self.list_base_models):
548
553
  self.handle_tool_schema_ops()
549
554
 
550
555
  if exists(self.sop) or exists(self.sop_list):
551
556
  self.handle_sop_ops()
552
557
 
558
+ if self.max_loops >= 2:
559
+ self.system_prompt += generate_reasoning_prompt(
560
+ self.max_loops
561
+ )
562
+
563
+ if self.react_on is True:
564
+ self.system_prompt += REACT_SYS_PROMPT
565
+
566
+ self.short_memory = self.short_memory_init()
567
+
553
568
  # Run sequential operations after all concurrent tasks are done
554
569
  # self.agent_output = self.agent_output_model()
555
570
  log_agent_data(self.to_dict())
556
571
 
572
+ if exists(self.tools):
573
+ self.tool_handling()
574
+
557
575
  if self.llm is None:
558
576
  self.llm = self.llm_handling()
559
577
 
560
- if self.mcp_url or self.mcp_servers is not None:
561
- self.add_mcp_tools_to_memory()
578
+ if self.random_models_on is True:
579
+ self.model_name = set_random_models_for_agents()
562
580
 
563
- if self.react_on is True:
564
- self.system_prompt += REACT_SYS_PROMPT
581
+ def tool_handling(self):
565
582
 
566
- if self.max_loops >= 2:
567
- self.system_prompt += generate_reasoning_prompt(
568
- self.max_loops
583
+ self.tool_struct = BaseTool(
584
+ tools=self.tools,
585
+ verbose=self.verbose,
586
+ )
587
+
588
+ # Convert all the tools into a list of dictionaries
589
+ self.tools_list_dictionary = (
590
+ convert_multiple_functions_to_openai_function_schema(
591
+ self.tools
569
592
  )
593
+ )
570
594
 
571
- self.short_memory = self.short_memory_init()
595
+ self.short_memory.add(
596
+ role=f"{self.agent_name}",
597
+ content=f"Tools available: {format_data_structure(self.tools_list_dictionary)}",
598
+ )
572
599
 
573
600
  def short_memory_init(self):
574
601
  if (
@@ -583,7 +610,7 @@ class Agent:
583
610
  prompt += SAFETY_PROMPT
584
611
 
585
612
  # Initialize the short term memory
586
- self.short_memory = Conversation(
613
+ memory = Conversation(
587
614
  system_prompt=prompt,
588
615
  time_enabled=False,
589
616
  user=self.user_name,
@@ -591,7 +618,7 @@ class Agent:
591
618
  token_count=False,
592
619
  )
593
620
 
594
- return self.short_memory
621
+ return memory
595
622
 
596
623
  def agent_output_model(self):
597
624
  # Many steps
@@ -621,6 +648,11 @@ class Agent:
621
648
  if self.model_name is None:
622
649
  self.model_name = "gpt-4o-mini"
623
650
 
651
+ if exists(self.tools) and len(self.tools) >= 2:
652
+ parallel_tool_calls = True
653
+ else:
654
+ parallel_tool_calls = False
655
+
624
656
  try:
625
657
  # Simplify initialization logic
626
658
  common_args = {
@@ -639,10 +671,16 @@ class Agent:
639
671
  **common_args,
640
672
  tools_list_dictionary=self.tools_list_dictionary,
641
673
  tool_choice="auto",
642
- parallel_tool_calls=len(
643
- self.tools_list_dictionary
644
- )
645
- > 1,
674
+ parallel_tool_calls=parallel_tool_calls,
675
+ )
676
+
677
+ elif self.mcp_url is not None:
678
+ self._cached_llm = LiteLLM(
679
+ **common_args,
680
+ tools_list_dictionary=self.add_mcp_tools_to_memory(),
681
+ tool_choice="auto",
682
+ parallel_tool_calls=parallel_tool_calls,
683
+ mcp_call=True,
646
684
  )
647
685
  else:
648
686
  self._cached_llm = LiteLLM(
@@ -656,48 +694,6 @@ class Agent:
656
694
  )
657
695
  return None
658
696
 
659
- def handle_tool_init(self):
660
- # Initialize the tool struct
661
- if (
662
- exists(self.tools)
663
- or exists(self.list_base_models)
664
- or exists(self.tool_schema)
665
- ):
666
-
667
- self.tool_struct = BaseTool(
668
- tools=self.tools,
669
- base_models=self.list_base_models,
670
- tool_system_prompt=self.tool_system_prompt,
671
- )
672
-
673
- if self.tools is not None:
674
- logger.info(
675
- "Tools provided make sure the functions have documentation ++ type hints, otherwise tool execution won't be reliable."
676
- )
677
- # Add the tool prompt to the memory
678
- self.short_memory.add(
679
- role="system", content=self.tool_system_prompt
680
- )
681
-
682
- # Log the tools
683
- logger.info(
684
- f"Tools provided: Accessing {len(self.tools)} tools"
685
- )
686
-
687
- # Transform the tools into an openai schema
688
- # self.convert_tool_into_openai_schema()
689
-
690
- # Transform the tools into an openai schema
691
- tool_dict = (
692
- self.tool_struct.convert_tool_into_openai_schema()
693
- )
694
- self.short_memory.add(role="system", content=tool_dict)
695
-
696
- # Now create a function calling map for every tools
697
- self.function_map = {
698
- tool.__name__: tool for tool in self.tools
699
- }
700
-
701
697
  def add_mcp_tools_to_memory(self):
702
698
  """
703
699
  Adds MCP tools to the agent's short-term memory.
@@ -709,110 +705,23 @@ class Agent:
709
705
  Exception: If there's an error accessing the MCP tools
710
706
  """
711
707
  try:
712
- if self.mcp_url is not None:
713
- tools_available = list_all(
714
- self.mcp_url, output_type="json"
715
- )
716
- self.short_memory.add(
717
- role="Tools Available",
718
- content=f"\n{tools_available}",
719
- )
720
-
721
- elif (
722
- self.mcp_url is None
723
- and self.mcp_urls is not None
724
- and len(self.mcp_urls) > 1
725
- ):
726
- tools_available = list_tools_for_multiple_urls(
727
- urls=self.mcp_urls,
728
- output_type="json",
729
- )
730
-
731
- self.short_memory.add(
732
- role="Tools Available",
733
- content=f"\n{tools_available}",
734
- )
735
- except Exception as e:
736
- logger.error(f"Error adding MCP tools to memory: {e}")
737
- raise e
738
-
739
- def _single_mcp_tool_handling(self, response: any):
740
- """
741
- Handles execution of a single MCP tool.
742
-
743
- Args:
744
- response (str): The tool response to process
745
-
746
- Raises:
747
- Exception: If there's an error executing the tool
748
- """
749
- try:
750
- if isinstance(response, dict):
751
- result = response
752
- print(type(result))
708
+ if exists(self.mcp_url):
709
+ tools = get_mcp_tools_sync(server_path=self.mcp_url)
710
+ elif exists(self.mcp_config):
711
+ tools = get_mcp_tools_sync(connection=self.mcp_config)
712
+ logger.info(f"Tools: {tools}")
753
713
  else:
754
- result = str_to_dict(response)
755
- print(type(result))
756
-
757
- output = execute_mcp_tool(
758
- url=self.mcp_url,
759
- parameters=result,
760
- )
761
-
762
- self.short_memory.add(
763
- role="Tool Executor", content=str(output)
764
- )
765
- except Exception as e:
766
- logger.error(f"Error in single MCP tool handling: {e}")
767
- raise e
768
-
769
- def _multiple_mcp_tool_handling(self, response: any):
770
- """
771
- Handles execution of multiple MCP tools.
772
-
773
- Args:
774
- response (any): The tool response to process
775
-
776
- Raises:
777
- Exception: If there's an error executing the tools
778
- """
779
- try:
780
- if isinstance(response, str):
781
- response = str_to_dict(response)
782
-
783
- execution = find_and_execute_tool(
784
- self.mcp_urls,
785
- response["name"],
786
- parameters=response,
787
- )
788
-
789
- self.short_memory.add(
790
- role="Tool Executor", content=str(execution)
714
+ raise AgentMCPConnectionError(
715
+ "mcp_url must be either a string URL or MCPConnection object"
716
+ )
717
+ self.pretty_print(
718
+ f"✨ [SYSTEM] Successfully integrated {len(tools)} MCP tools into agent: {self.agent_name} | Status: ONLINE | Time: {time.strftime('%H:%M:%S')} ✨",
719
+ loop_count=0,
791
720
  )
792
- except Exception as e:
793
- logger.error(f"Error in multiple MCP tool handling: {e}")
794
- raise e
795
-
796
- def mcp_tool_handling(self, response: any):
797
- """
798
- Main handler for MCP tool execution.
799
721
 
800
- Args:
801
- response (any): The tool response to process
802
-
803
- Raises:
804
- ValueError: If no MCP URL or MCP Servers are provided
805
- Exception: If there's an error in tool handling
806
- """
807
- try:
808
- # if self.mcp_url is not None:
809
- self._single_mcp_tool_handling(response)
810
- # elif self.mcp_url is None and len(self.mcp_servers) > 1:
811
- # self._multiple_mcp_tool_handling(response)
812
- # else:
813
- # raise ValueError("No MCP URL or MCP Servers provided")
814
- except Exception as e:
815
- logger.error(f"Error in mcp_tool_handling: {e}")
722
+ return tools
723
+ except AgentMCPConnectionError as e:
724
+ logger.error(f"Error in MCP connection: {e}")
816
725
  raise e
817
726
 
818
727
  def setup_config(self):
@@ -1096,60 +1005,67 @@ class Agent:
1096
1005
  *response_args, **kwargs
1097
1006
  )
1098
1007
 
1099
- # Convert to a str if the response is not a str
1008
+ if exists(self.tools_list_dictionary):
1009
+ if isinstance(response, BaseModel):
1010
+ response = response.model_dump()
1011
+
1012
+ # # Convert to a str if the response is not a str
1013
+ # if self.mcp_url is None or self.tools is None:
1100
1014
  response = self.parse_llm_output(response)
1101
1015
 
1102
1016
  self.short_memory.add(
1103
- role=self.agent_name, content=response
1017
+ role=self.agent_name,
1018
+ content=format_dict_to_string(response),
1104
1019
  )
1105
1020
 
1106
1021
  # Print
1107
1022
  self.pretty_print(response, loop_count)
1108
1023
 
1109
- # Output Cleaner
1110
- self.output_cleaner_op(response)
1111
-
1112
- ####### MCP TOOL HANDLING #######
1113
- if (
1114
- self.mcp_servers
1115
- and self.tools_list_dictionary is not None
1116
- ):
1117
- self.mcp_tool_handling(response)
1118
-
1119
- ####### MCP TOOL HANDLING #######
1024
+ # # Output Cleaner
1025
+ # self.output_cleaner_op(response)
1120
1026
 
1121
1027
  # Check and execute tools
1122
- if self.tools is not None:
1123
- out = self.parse_and_execute_tools(
1124
- response
1028
+ if exists(self.tools):
1029
+ # out = self.parse_and_execute_tools(
1030
+ # response
1031
+ # )
1032
+
1033
+ # self.short_memory.add(
1034
+ # role="Tool Executor", content=out
1035
+ # )
1036
+
1037
+ # if self.no_print is False:
1038
+ # agent_print(
1039
+ # f"{self.agent_name} - Tool Executor",
1040
+ # out,
1041
+ # loop_count,
1042
+ # self.streaming_on,
1043
+ # )
1044
+
1045
+ # out = self.call_llm(task=out)
1046
+
1047
+ # self.short_memory.add(
1048
+ # role=self.agent_name, content=out
1049
+ # )
1050
+
1051
+ # if self.no_print is False:
1052
+ # agent_print(
1053
+ # f"{self.agent_name} - Agent Analysis",
1054
+ # out,
1055
+ # loop_count,
1056
+ # self.streaming_on,
1057
+ # )
1058
+
1059
+ self.execute_tools(
1060
+ response=response,
1061
+ loop_count=loop_count,
1125
1062
  )
1126
1063
 
1127
- self.short_memory.add(
1128
- role="Tool Executor", content=out
1064
+ if exists(self.mcp_url):
1065
+ self.mcp_tool_handling(
1066
+ response, loop_count
1129
1067
  )
1130
1068
 
1131
- if self.no_print is False:
1132
- agent_print(
1133
- f"{self.agent_name} - Tool Executor",
1134
- out,
1135
- loop_count,
1136
- self.streaming_on,
1137
- )
1138
-
1139
- out = self.call_llm(task=out)
1140
-
1141
- self.short_memory.add(
1142
- role=self.agent_name, content=out
1143
- )
1144
-
1145
- if self.no_print is False:
1146
- agent_print(
1147
- f"{self.agent_name} - Agent Analysis",
1148
- out,
1149
- loop_count,
1150
- self.streaming_on,
1151
- )
1152
-
1153
1069
  self.sentiment_and_evaluator(response)
1154
1070
 
1155
1071
  success = True # Mark as successful to exit the retry loop
@@ -1367,36 +1283,36 @@ class Agent:
1367
1283
 
1368
1284
  return output.getvalue()
1369
1285
 
1370
- def parse_and_execute_tools(self, response: str, *args, **kwargs):
1371
- max_retries = 3 # Maximum number of retries
1372
- retries = 0
1373
- while retries < max_retries:
1374
- try:
1375
- logger.info("Executing tool...")
1376
-
1377
- # try to Execute the tool and return a string
1378
- out = parse_and_execute_json(
1379
- functions=self.tools,
1380
- json_string=response,
1381
- parse_md=True,
1382
- *args,
1383
- **kwargs,
1384
- )
1385
- logger.info(f"Tool Output: {out}")
1386
- # Add the output to the memory
1387
- # self.short_memory.add(
1388
- # role="Tool Executor",
1389
- # content=out,
1390
- # )
1391
- return out
1392
- except Exception as error:
1393
- retries += 1
1394
- logger.error(
1395
- f"Attempt {retries}: Error executing tool: {error}"
1396
- )
1397
- if retries == max_retries:
1398
- raise error
1399
- time.sleep(1) # Wait for a bit before retrying
1286
+ # def parse_and_execute_tools(self, response: str, *args, **kwargs):
1287
+ # max_retries = 3 # Maximum number of retries
1288
+ # retries = 0
1289
+ # while retries < max_retries:
1290
+ # try:
1291
+ # logger.info("Executing tool...")
1292
+
1293
+ # # try to Execute the tool and return a string
1294
+ # out = parse_and_execute_json(
1295
+ # functions=self.tools,
1296
+ # json_string=response,
1297
+ # parse_md=True,
1298
+ # *args,
1299
+ # **kwargs,
1300
+ # )
1301
+ # logger.info(f"Tool Output: {out}")
1302
+ # # Add the output to the memory
1303
+ # # self.short_memory.add(
1304
+ # # role="Tool Executor",
1305
+ # # content=out,
1306
+ # # )
1307
+ # return out
1308
+ # except Exception as error:
1309
+ # retries += 1
1310
+ # logger.error(
1311
+ # f"Attempt {retries}: Error executing tool: {error}"
1312
+ # )
1313
+ # if retries == max_retries:
1314
+ # raise error
1315
+ # time.sleep(1) # Wait for a bit before retrying
1400
1316
 
1401
1317
  def add_memory(self, message: str):
1402
1318
  """Add a memory to the agent
@@ -2711,7 +2627,7 @@ class Agent:
2711
2627
  f"Agent Name {self.agent_name} [Max Loops: {loop_count} ]",
2712
2628
  )
2713
2629
 
2714
- def parse_llm_output(self, response: Any) -> str:
2630
+ def parse_llm_output(self, response: Any):
2715
2631
  """Parse and standardize the output from the LLM.
2716
2632
 
2717
2633
  Args:
@@ -2724,7 +2640,7 @@ class Agent:
2724
2640
  ValueError: If the response format is unexpected and can't be handled
2725
2641
  """
2726
2642
  try:
2727
- # Handle dictionary responses
2643
+
2728
2644
  if isinstance(response, dict):
2729
2645
  if "choices" in response:
2730
2646
  return response["choices"][0]["message"][
@@ -2734,17 +2650,23 @@ class Agent:
2734
2650
  response
2735
2651
  ) # Convert other dicts to string
2736
2652
 
2737
- # Handle string responses
2738
- elif isinstance(response, str):
2739
- return response
2653
+ elif isinstance(response, BaseModel):
2654
+ out = response.model_dump()
2740
2655
 
2741
- # Handle list responses (from check_llm_outputs)
2742
- elif isinstance(response, list):
2743
- return "\n".join(response)
2656
+ # Handle List[BaseModel] responses
2657
+ elif (
2658
+ isinstance(response, list)
2659
+ and response
2660
+ and isinstance(response[0], BaseModel)
2661
+ ):
2662
+ return [item.model_dump() for item in response]
2744
2663
 
2745
- # Handle any other type by converting to string
2664
+ elif isinstance(response, list):
2665
+ out = format_data_structure(response)
2746
2666
  else:
2747
- return str(response)
2667
+ out = str(response)
2668
+
2669
+ return out
2748
2670
 
2749
2671
  except Exception as e:
2750
2672
  logger.error(f"Error parsing LLM output: {e}")
@@ -2781,3 +2703,124 @@ class Agent:
2781
2703
  role="Output Cleaner",
2782
2704
  content=response,
2783
2705
  )
2706
+
2707
+ def mcp_tool_handling(
2708
+ self, response: any, current_loop: Optional[int] = 0
2709
+ ):
2710
+ try:
2711
+
2712
+ if exists(self.mcp_url):
2713
+ # Execute the tool call
2714
+ tool_response = asyncio.run(
2715
+ execute_tool_call_simple(
2716
+ response=response,
2717
+ server_path=self.mcp_url,
2718
+ )
2719
+ )
2720
+ elif exists(self.mcp_config):
2721
+ # Execute the tool call
2722
+ tool_response = asyncio.run(
2723
+ execute_tool_call_simple(
2724
+ response=response,
2725
+ connection=self.mcp_config,
2726
+ )
2727
+ )
2728
+ else:
2729
+ raise AgentMCPConnectionError(
2730
+ "mcp_url must be either a string URL or MCPConnection object"
2731
+ )
2732
+
2733
+ # Get the text content from the tool response
2734
+ text_content = (
2735
+ tool_response.content[0].text
2736
+ if tool_response.content
2737
+ else str(tool_response)
2738
+ )
2739
+
2740
+ # Add to the memory
2741
+ self.short_memory.add(
2742
+ role="Tool Executor",
2743
+ content=text_content,
2744
+ )
2745
+
2746
+ # Create a temporary LLM instance without tools for the follow-up call
2747
+ try:
2748
+ temp_llm = LiteLLM(
2749
+ model_name=self.model_name,
2750
+ temperature=self.temperature,
2751
+ max_tokens=self.max_tokens,
2752
+ system_prompt=self.system_prompt,
2753
+ stream=self.streaming_on,
2754
+ )
2755
+
2756
+ summary = temp_llm.run(
2757
+ task=self.short_memory.get_str()
2758
+ )
2759
+ except Exception as e:
2760
+ logger.error(
2761
+ f"Error calling LLM after MCP tool execution: {e}"
2762
+ )
2763
+ # Fallback: provide a default summary
2764
+ summary = "I successfully executed the MCP tool and retrieved the information above."
2765
+
2766
+ self.pretty_print(summary, loop_count=current_loop)
2767
+
2768
+ # Add to the memory
2769
+ self.short_memory.add(
2770
+ role=self.agent_name, content=summary
2771
+ )
2772
+ except AgentMCPToolError as e:
2773
+ logger.error(f"Error in MCP tool: {e}")
2774
+ raise e
2775
+
2776
+ def execute_tools(self, response: any, loop_count: int):
2777
+
2778
+ output = (
2779
+ self.tool_struct.execute_function_calls_from_api_response(
2780
+ response
2781
+ )
2782
+ )
2783
+
2784
+ self.short_memory.add(
2785
+ role="Tool Executor",
2786
+ content=format_data_structure(output),
2787
+ )
2788
+
2789
+ self.pretty_print(
2790
+ f"{format_data_structure(output)}",
2791
+ loop_count,
2792
+ )
2793
+
2794
+ # Now run the LLM again without tools - create a temporary LLM instance
2795
+ # instead of modifying the cached one
2796
+ # Create a temporary LLM instance without tools for the follow-up call
2797
+ temp_llm = LiteLLM(
2798
+ model_name=self.model_name,
2799
+ temperature=self.temperature,
2800
+ max_tokens=self.max_tokens,
2801
+ system_prompt=self.system_prompt,
2802
+ stream=self.streaming_on,
2803
+ tools_list_dictionary=None,
2804
+ parallel_tool_calls=False,
2805
+ )
2806
+
2807
+ tool_response = temp_llm.run(
2808
+ f"""
2809
+ Please analyze and summarize the following tool execution output in a clear and concise way.
2810
+ Focus on the key information and insights that would be most relevant to the user's original request.
2811
+ If there are any errors or issues, highlight them prominently.
2812
+
2813
+ Tool Output:
2814
+ {output}
2815
+ """
2816
+ )
2817
+
2818
+ self.short_memory.add(
2819
+ role=self.agent_name,
2820
+ content=tool_response,
2821
+ )
2822
+
2823
+ self.pretty_print(
2824
+ f"{tool_response}",
2825
+ loop_count,
2826
+ )