swarms 7.8.4__py3-none-any.whl → 7.8.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/agents/ape_agent.py +5 -22
- swarms/agents/consistency_agent.py +1 -1
- swarms/agents/i_agent.py +1 -1
- swarms/agents/reasoning_agents.py +99 -3
- swarms/agents/reasoning_duo.py +1 -1
- swarms/cli/main.py +1 -1
- swarms/communication/__init__.py +1 -0
- swarms/communication/duckdb_wrap.py +32 -2
- swarms/communication/pulsar_struct.py +45 -19
- swarms/communication/redis_wrap.py +56 -11
- swarms/communication/supabase_wrap.py +1659 -0
- swarms/prompts/prompt.py +0 -3
- swarms/schemas/agent_completion_response.py +71 -0
- swarms/schemas/agent_rag_schema.py +7 -0
- swarms/schemas/conversation_schema.py +9 -0
- swarms/schemas/llm_agent_schema.py +99 -81
- swarms/schemas/swarms_api_schemas.py +164 -0
- swarms/structs/__init__.py +14 -11
- swarms/structs/agent.py +219 -199
- swarms/structs/agent_rag_handler.py +685 -0
- swarms/structs/base_swarm.py +2 -1
- swarms/structs/conversation.py +608 -87
- swarms/structs/csv_to_agent.py +153 -100
- swarms/structs/deep_research_swarm.py +197 -193
- swarms/structs/dynamic_conversational_swarm.py +18 -7
- swarms/structs/hiearchical_swarm.py +1 -1
- swarms/structs/hybrid_hiearchical_peer_swarm.py +2 -18
- swarms/structs/image_batch_processor.py +261 -0
- swarms/structs/interactive_groupchat.py +356 -0
- swarms/structs/ma_blocks.py +75 -0
- swarms/structs/majority_voting.py +1 -1
- swarms/structs/mixture_of_agents.py +1 -1
- swarms/structs/multi_agent_router.py +3 -2
- swarms/structs/rearrange.py +3 -3
- swarms/structs/sequential_workflow.py +3 -3
- swarms/structs/swarm_matcher.py +500 -411
- swarms/structs/swarm_router.py +15 -97
- swarms/structs/swarming_architectures.py +1 -1
- swarms/tools/mcp_client_call.py +3 -0
- swarms/utils/__init__.py +10 -2
- swarms/utils/check_all_model_max_tokens.py +43 -0
- swarms/utils/generate_keys.py +0 -27
- swarms/utils/history_output_formatter.py +5 -20
- swarms/utils/litellm_wrapper.py +208 -60
- swarms/utils/output_types.py +24 -0
- swarms/utils/vllm_wrapper.py +5 -6
- swarms/utils/xml_utils.py +37 -2
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/METADATA +31 -55
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/RECORD +53 -48
- swarms/structs/multi_agent_collab.py +0 -242
- swarms/structs/output_types.py +0 -6
- swarms/utils/markdown_message.py +0 -21
- swarms/utils/visualizer.py +0 -510
- swarms/utils/wrapper_clusterop.py +0 -127
- /swarms/{tools → schemas}/tool_schema_base_model.py +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/LICENSE +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/WHEEL +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/entry_points.txt +0 -0
swarms/structs/agent.py
CHANGED
@@ -40,9 +40,13 @@ from swarms.schemas.base_schemas import (
|
|
40
40
|
ChatCompletionResponseChoice,
|
41
41
|
ChatMessageResponse,
|
42
42
|
)
|
43
|
+
from swarms.schemas.llm_agent_schema import ModelConfigOrigin
|
44
|
+
from swarms.structs.agent_rag_handler import (
|
45
|
+
RAGConfig,
|
46
|
+
AgentRAGHandler,
|
47
|
+
)
|
43
48
|
from swarms.structs.agent_roles import agent_roles
|
44
49
|
from swarms.structs.conversation import Conversation
|
45
|
-
from swarms.structs.output_types import OutputType
|
46
50
|
from swarms.structs.safe_loading import (
|
47
51
|
SafeLoaderUtils,
|
48
52
|
SafeStateManager,
|
@@ -56,6 +60,7 @@ from swarms.utils.any_to_str import any_to_str
|
|
56
60
|
from swarms.utils.data_to_text import data_to_text
|
57
61
|
from swarms.utils.file_processing import create_file_in_folder
|
58
62
|
from swarms.utils.formatter import formatter
|
63
|
+
from swarms.utils.generate_keys import generate_api_key
|
59
64
|
from swarms.utils.history_output_formatter import (
|
60
65
|
history_output_formatter,
|
61
66
|
)
|
@@ -78,10 +83,10 @@ from swarms.utils.index import (
|
|
78
83
|
format_data_structure,
|
79
84
|
format_dict_to_string,
|
80
85
|
)
|
86
|
+
from swarms.schemas.conversation_schema import ConversationSchema
|
87
|
+
from swarms.utils.output_types import OutputType
|
81
88
|
|
82
89
|
|
83
|
-
# Utils
|
84
|
-
# Custom stopping condition
|
85
90
|
def stop_when_repeats(response: str) -> bool:
|
86
91
|
# Stop if the word stop appears in the response
|
87
92
|
return "stop" in response.lower()
|
@@ -317,7 +322,7 @@ class Agent:
|
|
317
322
|
pdf_path: Optional[str] = None,
|
318
323
|
list_of_pdf: Optional[str] = None,
|
319
324
|
tokenizer: Optional[Any] = None,
|
320
|
-
long_term_memory: Optional[Any] = None,
|
325
|
+
long_term_memory: Optional[Union[Callable, Any]] = None,
|
321
326
|
preset_stopping_token: Optional[bool] = False,
|
322
327
|
traceback: Optional[Any] = None,
|
323
328
|
traceback_handlers: Optional[Any] = None,
|
@@ -406,7 +411,13 @@ class Agent:
|
|
406
411
|
safety_prompt_on: bool = False,
|
407
412
|
random_models_on: bool = False,
|
408
413
|
mcp_config: Optional[MCPConnection] = None,
|
409
|
-
top_p: float = 0.90,
|
414
|
+
top_p: Optional[float] = 0.90,
|
415
|
+
conversation_schema: Optional[ConversationSchema] = None,
|
416
|
+
aditional_llm_config: Optional[ModelConfigOrigin] = None,
|
417
|
+
llm_base_url: Optional[str] = None,
|
418
|
+
llm_api_key: Optional[str] = None,
|
419
|
+
rag_config: Optional[RAGConfig] = None,
|
420
|
+
tool_call_summary: bool = False,
|
410
421
|
*args,
|
411
422
|
**kwargs,
|
412
423
|
):
|
@@ -435,7 +446,7 @@ class Agent:
|
|
435
446
|
self.system_prompt = system_prompt
|
436
447
|
self.agent_name = agent_name
|
437
448
|
self.agent_description = agent_description
|
438
|
-
self.saved_state_path = f"{self.agent_name}_state.json"
|
449
|
+
self.saved_state_path = f"{self.agent_name}_{generate_api_key(prefix='agent-')}_state.json"
|
439
450
|
self.autosave = autosave
|
440
451
|
self.response_filters = []
|
441
452
|
self.self_healing_enabled = self_healing_enabled
|
@@ -533,10 +544,12 @@ class Agent:
|
|
533
544
|
self.random_models_on = random_models_on
|
534
545
|
self.mcp_config = mcp_config
|
535
546
|
self.top_p = top_p
|
536
|
-
|
537
|
-
self.
|
538
|
-
|
539
|
-
|
547
|
+
self.conversation_schema = conversation_schema
|
548
|
+
self.aditional_llm_config = aditional_llm_config
|
549
|
+
self.llm_base_url = llm_base_url
|
550
|
+
self.llm_api_key = llm_api_key
|
551
|
+
self.rag_config = rag_config
|
552
|
+
self.tool_call_summary = tool_call_summary
|
540
553
|
|
541
554
|
# self.short_memory = self.short_memory_init()
|
542
555
|
|
@@ -546,6 +559,8 @@ class Agent:
|
|
546
559
|
# self.init_handling()
|
547
560
|
self.setup_config()
|
548
561
|
|
562
|
+
self.short_memory = self.short_memory_init()
|
563
|
+
|
549
564
|
if exists(self.docs_folder):
|
550
565
|
self.get_docs_from_doc_folders()
|
551
566
|
|
@@ -563,8 +578,6 @@ class Agent:
|
|
563
578
|
if self.react_on is True:
|
564
579
|
self.system_prompt += REACT_SYS_PROMPT
|
565
580
|
|
566
|
-
self.short_memory = self.short_memory_init()
|
567
|
-
|
568
581
|
# Run sequential operations after all concurrent tasks are done
|
569
582
|
# self.agent_output = self.agent_output_model()
|
570
583
|
log_agent_data(self.to_dict())
|
@@ -578,6 +591,22 @@ class Agent:
|
|
578
591
|
if self.random_models_on is True:
|
579
592
|
self.model_name = set_random_models_for_agents()
|
580
593
|
|
594
|
+
if self.long_term_memory is not None:
|
595
|
+
self.rag_handler = self.rag_setup_handling()
|
596
|
+
|
597
|
+
if self.dashboard is True:
|
598
|
+
self.print_dashboard()
|
599
|
+
|
600
|
+
self.reliability_check()
|
601
|
+
|
602
|
+
def rag_setup_handling(self):
|
603
|
+
return AgentRAGHandler(
|
604
|
+
long_term_memory=self.long_term_memory,
|
605
|
+
config=self.rag_config,
|
606
|
+
agent_name=self.agent_name,
|
607
|
+
verbose=self.verbose,
|
608
|
+
)
|
609
|
+
|
581
610
|
def tool_handling(self):
|
582
611
|
|
583
612
|
self.tool_struct = BaseTool(
|
@@ -612,10 +641,23 @@ class Agent:
|
|
612
641
|
# Initialize the short term memory
|
613
642
|
memory = Conversation(
|
614
643
|
system_prompt=prompt,
|
615
|
-
time_enabled=False,
|
616
644
|
user=self.user_name,
|
617
645
|
rules=self.rules,
|
618
|
-
token_count=
|
646
|
+
token_count=(
|
647
|
+
self.conversation_schema.count_tokens
|
648
|
+
if self.conversation_schema
|
649
|
+
else False
|
650
|
+
),
|
651
|
+
message_id_on=(
|
652
|
+
self.conversation_schema.message_id_on
|
653
|
+
if self.conversation_schema
|
654
|
+
else False
|
655
|
+
),
|
656
|
+
time_enabled=(
|
657
|
+
self.conversation_schema.time_enabled
|
658
|
+
if self.conversation_schema
|
659
|
+
else False
|
660
|
+
),
|
619
661
|
)
|
620
662
|
|
621
663
|
return memory
|
@@ -642,8 +684,8 @@ class Agent:
|
|
642
684
|
|
643
685
|
def llm_handling(self):
|
644
686
|
# Use cached instance if available
|
645
|
-
if self.
|
646
|
-
return self.
|
687
|
+
if self.llm is not None:
|
688
|
+
return self.llm
|
647
689
|
|
648
690
|
if self.model_name is None:
|
649
691
|
self.model_name = "gpt-4o-mini"
|
@@ -663,11 +705,9 @@ class Agent:
|
|
663
705
|
}
|
664
706
|
|
665
707
|
if self.llm_args is not None:
|
666
|
-
self.
|
667
|
-
**{**common_args, **self.llm_args}
|
668
|
-
)
|
708
|
+
self.llm = LiteLLM(**{**common_args, **self.llm_args})
|
669
709
|
elif self.tools_list_dictionary is not None:
|
670
|
-
self.
|
710
|
+
self.llm = LiteLLM(
|
671
711
|
**common_args,
|
672
712
|
tools_list_dictionary=self.tools_list_dictionary,
|
673
713
|
tool_choice="auto",
|
@@ -675,7 +715,7 @@ class Agent:
|
|
675
715
|
)
|
676
716
|
|
677
717
|
elif self.mcp_url is not None:
|
678
|
-
self.
|
718
|
+
self.llm = LiteLLM(
|
679
719
|
**common_args,
|
680
720
|
tools_list_dictionary=self.add_mcp_tools_to_memory(),
|
681
721
|
tool_choice="auto",
|
@@ -683,11 +723,14 @@ class Agent:
|
|
683
723
|
mcp_call=True,
|
684
724
|
)
|
685
725
|
else:
|
686
|
-
self.
|
687
|
-
|
726
|
+
# common_args.update(self.aditional_llm_config.model_dump())
|
727
|
+
|
728
|
+
self.llm = LiteLLM(
|
729
|
+
**common_args,
|
730
|
+
stream=self.streaming_on,
|
688
731
|
)
|
689
732
|
|
690
|
-
return self.
|
733
|
+
return self.llm
|
691
734
|
except AgentLLMInitializationError as e:
|
692
735
|
logger.error(
|
693
736
|
f"Error in llm_handling: {e} Your current configuration is not supported. Please check the configuration and parameters."
|
@@ -770,7 +813,7 @@ class Agent:
|
|
770
813
|
"No agent details found. Using task as fallback for prompt generation."
|
771
814
|
)
|
772
815
|
self.system_prompt = auto_generate_prompt(
|
773
|
-
task, self.llm
|
816
|
+
task=task, model=self.llm
|
774
817
|
)
|
775
818
|
else:
|
776
819
|
# Combine all available components
|
@@ -796,26 +839,6 @@ class Agent:
|
|
796
839
|
self.feedback.append(feedback)
|
797
840
|
logging.info(f"Feedback received: {feedback}")
|
798
841
|
|
799
|
-
def agent_initialization(self):
|
800
|
-
try:
|
801
|
-
logger.info(
|
802
|
-
f"Initializing Autonomous Agent {self.agent_name}..."
|
803
|
-
)
|
804
|
-
self.check_parameters()
|
805
|
-
logger.info(
|
806
|
-
f"{self.agent_name} Initialized Successfully."
|
807
|
-
)
|
808
|
-
logger.info(
|
809
|
-
f"Autonomous Agent {self.agent_name} Activated, all systems operational. Executing task..."
|
810
|
-
)
|
811
|
-
|
812
|
-
if self.dashboard is True:
|
813
|
-
self.print_dashboard()
|
814
|
-
|
815
|
-
except ValueError as e:
|
816
|
-
logger.info(f"Error initializing agent: {e}")
|
817
|
-
raise e
|
818
|
-
|
819
842
|
def _check_stopping_condition(self, response: str) -> bool:
|
820
843
|
"""Check if the stopping condition is met."""
|
821
844
|
try:
|
@@ -847,48 +870,38 @@ class Agent:
|
|
847
870
|
)
|
848
871
|
|
849
872
|
def print_dashboard(self):
|
850
|
-
|
851
|
-
|
852
|
-
f"Initializing Agent: {self.agent_name}"
|
853
|
-
)
|
854
|
-
|
855
|
-
data = self.to_dict()
|
856
|
-
|
857
|
-
# Beautify the data
|
858
|
-
# data = json.dumps(data, indent=4)
|
859
|
-
# json_data = json.dumps(data, indent=4)
|
860
|
-
|
873
|
+
tools_activated = True if self.tools is not None else False
|
874
|
+
mcp_activated = True if self.mcp_url is not None else False
|
861
875
|
formatter.print_panel(
|
862
876
|
f"""
|
863
|
-
|
864
|
-
|
865
|
-
|
866
|
-
|
867
|
-
|
868
|
-
|
869
|
-
|
870
|
-
|
871
|
-
|
872
|
-
|
873
|
-
|
877
|
+
|
878
|
+
🤖 Agent {self.agent_name} Dashboard 🚀
|
879
|
+
════════════════════════════════════════════════════════════
|
880
|
+
|
881
|
+
🎯 Agent {self.agent_name} Status: ONLINE & OPERATIONAL
|
882
|
+
────────────────────────────────────────────────────────────
|
883
|
+
|
884
|
+
📋 Agent Identity:
|
885
|
+
• 🏷️ Name: {self.agent_name}
|
886
|
+
• 📝 Description: {self.agent_description}
|
887
|
+
|
888
|
+
⚙️ Technical Specifications:
|
889
|
+
• 🤖 Model: {self.model_name}
|
890
|
+
• 🔄 Internal Loops: {self.max_loops}
|
891
|
+
• 🎯 Max Tokens: {self.max_tokens}
|
892
|
+
• 🌡️ Dynamic Temperature: {self.dynamic_temperature_enabled}
|
893
|
+
|
894
|
+
🔧 System Modules:
|
895
|
+
• 🛠️ Tools Activated: {tools_activated}
|
896
|
+
• 🔗 MCP Activated: {mcp_activated}
|
897
|
+
|
898
|
+
════════════════════════════════════════════════════════════
|
899
|
+
🚀 Ready for Tasks 🚀
|
900
|
+
|
901
|
+
""",
|
902
|
+
title=f"Agent {self.agent_name} Dashboard",
|
874
903
|
)
|
875
904
|
|
876
|
-
# Check parameters
|
877
|
-
def check_parameters(self):
|
878
|
-
if self.llm is None:
|
879
|
-
raise ValueError(
|
880
|
-
"Language model is not provided. Choose a model from the available models in swarm_models or create a class with a run(task: str) method and or a __call__ method."
|
881
|
-
)
|
882
|
-
|
883
|
-
if self.max_loops is None or self.max_loops == 0:
|
884
|
-
raise ValueError("Max loops is not provided")
|
885
|
-
|
886
|
-
if self.max_tokens == 0 or self.max_tokens is None:
|
887
|
-
raise ValueError("Max tokens is not provided")
|
888
|
-
|
889
|
-
if self.context_length == 0 or self.context_length is None:
|
890
|
-
raise ValueError("Context length is not provided")
|
891
|
-
|
892
905
|
# Main function
|
893
906
|
def _run(
|
894
907
|
self,
|
@@ -993,16 +1006,20 @@ class Agent:
|
|
993
1006
|
)
|
994
1007
|
self.memory_query(task_prompt)
|
995
1008
|
|
996
|
-
# Generate response using LLM
|
997
|
-
response_args = (
|
998
|
-
|
999
|
-
|
1000
|
-
|
1001
|
-
)
|
1009
|
+
# # Generate response using LLM
|
1010
|
+
# response_args = (
|
1011
|
+
# (task_prompt, *args)
|
1012
|
+
# if img is None
|
1013
|
+
# else (task_prompt, img, *args)
|
1014
|
+
# )
|
1015
|
+
|
1016
|
+
# # Call the LLM
|
1017
|
+
# response = self.call_llm(
|
1018
|
+
# *response_args, **kwargs
|
1019
|
+
# )
|
1002
1020
|
|
1003
|
-
# Call the LLM
|
1004
1021
|
response = self.call_llm(
|
1005
|
-
*
|
1022
|
+
task=task_prompt, img=img, *args, **kwargs
|
1006
1023
|
)
|
1007
1024
|
|
1008
1025
|
if exists(self.tools_list_dictionary):
|
@@ -1026,42 +1043,16 @@ class Agent:
|
|
1026
1043
|
|
1027
1044
|
# Check and execute tools
|
1028
1045
|
if exists(self.tools):
|
1029
|
-
# out = self.parse_and_execute_tools(
|
1030
|
-
# response
|
1031
|
-
# )
|
1032
|
-
|
1033
|
-
# self.short_memory.add(
|
1034
|
-
# role="Tool Executor", content=out
|
1035
|
-
# )
|
1036
|
-
|
1037
|
-
# if self.no_print is False:
|
1038
|
-
# agent_print(
|
1039
|
-
# f"{self.agent_name} - Tool Executor",
|
1040
|
-
# out,
|
1041
|
-
# loop_count,
|
1042
|
-
# self.streaming_on,
|
1043
|
-
# )
|
1044
|
-
|
1045
|
-
# out = self.call_llm(task=out)
|
1046
|
-
|
1047
|
-
# self.short_memory.add(
|
1048
|
-
# role=self.agent_name, content=out
|
1049
|
-
# )
|
1050
|
-
|
1051
|
-
# if self.no_print is False:
|
1052
|
-
# agent_print(
|
1053
|
-
# f"{self.agent_name} - Agent Analysis",
|
1054
|
-
# out,
|
1055
|
-
# loop_count,
|
1056
|
-
# self.streaming_on,
|
1057
|
-
# )
|
1058
1046
|
|
1059
1047
|
self.execute_tools(
|
1060
1048
|
response=response,
|
1061
1049
|
loop_count=loop_count,
|
1062
1050
|
)
|
1063
1051
|
|
1064
|
-
|
1052
|
+
# Handle MCP tools
|
1053
|
+
if exists(self.mcp_url) or exists(
|
1054
|
+
self.mcp_config
|
1055
|
+
):
|
1065
1056
|
self.mcp_tool_handling(
|
1066
1057
|
response, loop_count
|
1067
1058
|
)
|
@@ -1139,9 +1130,6 @@ class Agent:
|
|
1139
1130
|
|
1140
1131
|
log_agent_data(self.to_dict())
|
1141
1132
|
|
1142
|
-
if self.autosave:
|
1143
|
-
self.save()
|
1144
|
-
|
1145
1133
|
# Output formatting based on output_type
|
1146
1134
|
return history_output_formatter(
|
1147
1135
|
self.short_memory, type=self.output_type
|
@@ -1255,33 +1243,12 @@ class Agent:
|
|
1255
1243
|
def receive_message(
|
1256
1244
|
self, agent_name: str, task: str, *args, **kwargs
|
1257
1245
|
):
|
1258
|
-
|
1259
|
-
|
1246
|
+
improved_prompt = (
|
1247
|
+
f"You have received a message from agent '{agent_name}':\n\n"
|
1248
|
+
f'"{task}"\n\n'
|
1249
|
+
"Please process this message and respond appropriately."
|
1260
1250
|
)
|
1261
|
-
|
1262
|
-
def dict_to_csv(self, data: dict) -> str:
|
1263
|
-
"""
|
1264
|
-
Convert a dictionary to a CSV string.
|
1265
|
-
|
1266
|
-
Args:
|
1267
|
-
data (dict): The dictionary to convert.
|
1268
|
-
|
1269
|
-
Returns:
|
1270
|
-
str: The CSV string representation of the dictionary.
|
1271
|
-
"""
|
1272
|
-
import csv
|
1273
|
-
import io
|
1274
|
-
|
1275
|
-
output = io.StringIO()
|
1276
|
-
writer = csv.writer(output)
|
1277
|
-
|
1278
|
-
# Write header
|
1279
|
-
writer.writerow(data.keys())
|
1280
|
-
|
1281
|
-
# Write values
|
1282
|
-
writer.writerow(data.values())
|
1283
|
-
|
1284
|
-
return output.getvalue()
|
1251
|
+
return self.run(task=improved_prompt, *args, **kwargs)
|
1285
1252
|
|
1286
1253
|
# def parse_and_execute_tools(self, response: str, *args, **kwargs):
|
1287
1254
|
# max_retries = 3 # Maximum number of retries
|
@@ -1433,6 +1400,53 @@ class Agent:
|
|
1433
1400
|
logger.error(f"Error running batched tasks: {error}")
|
1434
1401
|
raise
|
1435
1402
|
|
1403
|
+
def reliability_check(self):
|
1404
|
+
from litellm.utils import (
|
1405
|
+
supports_function_calling,
|
1406
|
+
get_max_tokens,
|
1407
|
+
)
|
1408
|
+
from litellm import model_list
|
1409
|
+
|
1410
|
+
if self.system_prompt is None:
|
1411
|
+
logger.warning(
|
1412
|
+
"The system prompt is not set. Please set a system prompt for the agent to improve reliability."
|
1413
|
+
)
|
1414
|
+
|
1415
|
+
if self.agent_name is None:
|
1416
|
+
logger.warning(
|
1417
|
+
"The agent name is not set. Please set an agent name to improve reliability."
|
1418
|
+
)
|
1419
|
+
|
1420
|
+
if self.max_loops is None or self.max_loops == 0:
|
1421
|
+
raise AgentInitializationError(
|
1422
|
+
"Max loops is not provided or is set to 0. Please set max loops to 1 or more."
|
1423
|
+
)
|
1424
|
+
|
1425
|
+
if self.max_tokens is None or self.max_tokens == 0:
|
1426
|
+
self.max_tokens = get_max_tokens(self.model_name)
|
1427
|
+
|
1428
|
+
if self.context_length is None or self.context_length == 0:
|
1429
|
+
raise AgentInitializationError(
|
1430
|
+
"Context length is not provided. Please set a valid context length."
|
1431
|
+
)
|
1432
|
+
|
1433
|
+
if self.tools_list_dictionary is not None:
|
1434
|
+
if not supports_function_calling(self.model_name):
|
1435
|
+
raise AgentInitializationError(
|
1436
|
+
f"The model '{self.model_name}' does not support function calling. Please use a model that supports function calling."
|
1437
|
+
)
|
1438
|
+
|
1439
|
+
if self.max_tokens > get_max_tokens(self.model_name):
|
1440
|
+
raise AgentInitializationError(
|
1441
|
+
f"Max tokens is set to {self.max_tokens}, but the model '{self.model_name}' only supports {get_max_tokens(self.model_name)} tokens. Please set max tokens to {get_max_tokens(self.model_name)} or less."
|
1442
|
+
)
|
1443
|
+
|
1444
|
+
|
1445
|
+
if self.model_name not in model_list:
|
1446
|
+
logger.warning(
|
1447
|
+
f"The model '{self.model_name}' is not supported. Please use a supported model, or override the model name with the 'llm' parameter, which should be a class with a 'run(task: str)' method or a '__call__' method."
|
1448
|
+
)
|
1449
|
+
|
1436
1450
|
def save(self, file_path: str = None) -> None:
|
1437
1451
|
"""
|
1438
1452
|
Save the agent state to a file using SafeStateManager with atomic writing
|
@@ -2369,7 +2383,9 @@ class Agent:
|
|
2369
2383
|
|
2370
2384
|
return None
|
2371
2385
|
|
2372
|
-
def call_llm(
|
2386
|
+
def call_llm(
|
2387
|
+
self, task: str, img: Optional[str] = None, *args, **kwargs
|
2388
|
+
) -> str:
|
2373
2389
|
"""
|
2374
2390
|
Calls the appropriate method on the `llm` object based on the given task.
|
2375
2391
|
|
@@ -2388,17 +2404,14 @@ class Agent:
|
|
2388
2404
|
TypeError: If task is not a string or llm object is None.
|
2389
2405
|
ValueError: If task is empty.
|
2390
2406
|
"""
|
2391
|
-
# if not isinstance(task, str):
|
2392
|
-
# task = any_to_str(task)
|
2393
|
-
|
2394
|
-
# if img is not None:
|
2395
|
-
# kwargs['img'] = img
|
2396
|
-
|
2397
|
-
# if audio is not None:
|
2398
|
-
# kwargs['audio'] = audio
|
2399
2407
|
|
2400
2408
|
try:
|
2401
|
-
|
2409
|
+
if img is not None:
|
2410
|
+
out = self.llm.run(
|
2411
|
+
task=task, img=img, *args, **kwargs
|
2412
|
+
)
|
2413
|
+
else:
|
2414
|
+
out = self.llm.run(task=task, *args, **kwargs)
|
2402
2415
|
|
2403
2416
|
return out
|
2404
2417
|
except AgentLLMError as e:
|
@@ -2731,11 +2744,15 @@ class Agent:
|
|
2731
2744
|
)
|
2732
2745
|
|
2733
2746
|
# Get the text content from the tool response
|
2734
|
-
|
2735
|
-
|
2736
|
-
|
2737
|
-
|
2738
|
-
|
2747
|
+
# execute_tool_call_simple returns a string directly, not an object with content attribute
|
2748
|
+
text_content = f"MCP Tool Response: \n{json.dumps(tool_response, indent=2)}"
|
2749
|
+
|
2750
|
+
if self.no_print is False:
|
2751
|
+
formatter.print_panel(
|
2752
|
+
text_content,
|
2753
|
+
"MCP Tool Response: 🛠️",
|
2754
|
+
style="green",
|
2755
|
+
)
|
2739
2756
|
|
2740
2757
|
# Add to the memory
|
2741
2758
|
self.short_memory.add(
|
@@ -2745,13 +2762,7 @@ class Agent:
|
|
2745
2762
|
|
2746
2763
|
# Create a temporary LLM instance without tools for the follow-up call
|
2747
2764
|
try:
|
2748
|
-
temp_llm =
|
2749
|
-
model_name=self.model_name,
|
2750
|
-
temperature=self.temperature,
|
2751
|
-
max_tokens=self.max_tokens,
|
2752
|
-
system_prompt=self.system_prompt,
|
2753
|
-
stream=self.streaming_on,
|
2754
|
-
)
|
2765
|
+
temp_llm = self.temp_llm_instance_for_tool_summary()
|
2755
2766
|
|
2756
2767
|
summary = temp_llm.run(
|
2757
2768
|
task=self.short_memory.get_str()
|
@@ -2773,6 +2784,19 @@ class Agent:
|
|
2773
2784
|
logger.error(f"Error in MCP tool: {e}")
|
2774
2785
|
raise e
|
2775
2786
|
|
2787
|
+
def temp_llm_instance_for_tool_summary(self):
|
2788
|
+
return LiteLLM(
|
2789
|
+
model_name=self.model_name,
|
2790
|
+
temperature=self.temperature,
|
2791
|
+
max_tokens=self.max_tokens,
|
2792
|
+
system_prompt=self.system_prompt,
|
2793
|
+
stream=self.streaming_on,
|
2794
|
+
tools_list_dictionary=None,
|
2795
|
+
parallel_tool_calls=False,
|
2796
|
+
base_url=self.llm_base_url,
|
2797
|
+
api_key=self.llm_api_key,
|
2798
|
+
)
|
2799
|
+
|
2776
2800
|
def execute_tools(self, response: any, loop_count: int):
|
2777
2801
|
|
2778
2802
|
output = (
|
@@ -2794,33 +2818,29 @@ class Agent:
|
|
2794
2818
|
# Now run the LLM again without tools - create a temporary LLM instance
|
2795
2819
|
# instead of modifying the cached one
|
2796
2820
|
# Create a temporary LLM instance without tools for the follow-up call
|
2797
|
-
|
2798
|
-
|
2799
|
-
|
2800
|
-
|
2801
|
-
|
2802
|
-
|
2803
|
-
|
2804
|
-
|
2805
|
-
|
2821
|
+
if self.tool_call_summary is True:
|
2822
|
+
temp_llm = self.temp_llm_instance_for_tool_summary()
|
2823
|
+
|
2824
|
+
tool_response = temp_llm.run(
|
2825
|
+
f"""
|
2826
|
+
Please analyze and summarize the following tool execution output in a clear and concise way.
|
2827
|
+
Focus on the key information and insights that would be most relevant to the user's original request.
|
2828
|
+
If there are any errors or issues, highlight them prominently.
|
2829
|
+
|
2830
|
+
Tool Output:
|
2831
|
+
{output}
|
2832
|
+
"""
|
2833
|
+
)
|
2806
2834
|
|
2807
|
-
|
2808
|
-
|
2809
|
-
|
2810
|
-
|
2811
|
-
If there are any errors or issues, highlight them prominently.
|
2812
|
-
|
2813
|
-
Tool Output:
|
2814
|
-
{output}
|
2815
|
-
"""
|
2816
|
-
)
|
2835
|
+
self.short_memory.add(
|
2836
|
+
role=self.agent_name,
|
2837
|
+
content=tool_response,
|
2838
|
+
)
|
2817
2839
|
|
2818
|
-
|
2819
|
-
|
2820
|
-
|
2821
|
-
|
2840
|
+
self.pretty_print(
|
2841
|
+
f"{tool_response}",
|
2842
|
+
loop_count,
|
2843
|
+
)
|
2822
2844
|
|
2823
|
-
|
2824
|
-
|
2825
|
-
loop_count,
|
2826
|
-
)
|
2845
|
+
def list_output_types(self):
|
2846
|
+
return OutputType
|