ag2 0.4.1__py3-none-any.whl → 0.4.2b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- ag2-0.4.2b1.dist-info/METADATA +19 -0
- ag2-0.4.2b1.dist-info/RECORD +6 -0
- ag2-0.4.2b1.dist-info/top_level.txt +1 -0
- ag2-0.4.1.dist-info/METADATA +0 -500
- ag2-0.4.1.dist-info/RECORD +0 -158
- ag2-0.4.1.dist-info/top_level.txt +0 -1
- autogen/__init__.py +0 -17
- autogen/_pydantic.py +0 -116
- autogen/agentchat/__init__.py +0 -42
- autogen/agentchat/agent.py +0 -142
- autogen/agentchat/assistant_agent.py +0 -85
- autogen/agentchat/chat.py +0 -306
- autogen/agentchat/contrib/__init__.py +0 -0
- autogen/agentchat/contrib/agent_builder.py +0 -788
- autogen/agentchat/contrib/agent_eval/agent_eval.py +0 -107
- autogen/agentchat/contrib/agent_eval/criterion.py +0 -47
- autogen/agentchat/contrib/agent_eval/critic_agent.py +0 -47
- autogen/agentchat/contrib/agent_eval/quantifier_agent.py +0 -42
- autogen/agentchat/contrib/agent_eval/subcritic_agent.py +0 -48
- autogen/agentchat/contrib/agent_eval/task.py +0 -43
- autogen/agentchat/contrib/agent_optimizer.py +0 -450
- autogen/agentchat/contrib/capabilities/__init__.py +0 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +0 -21
- autogen/agentchat/contrib/capabilities/generate_images.py +0 -297
- autogen/agentchat/contrib/capabilities/teachability.py +0 -406
- autogen/agentchat/contrib/capabilities/text_compressors.py +0 -72
- autogen/agentchat/contrib/capabilities/transform_messages.py +0 -92
- autogen/agentchat/contrib/capabilities/transforms.py +0 -565
- autogen/agentchat/contrib/capabilities/transforms_util.py +0 -120
- autogen/agentchat/contrib/capabilities/vision_capability.py +0 -217
- autogen/agentchat/contrib/captainagent/tools/__init__.py +0 -0
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_correlation.py +0 -41
- autogen/agentchat/contrib/captainagent/tools/data_analysis/calculate_skewness_and_kurtosis.py +0 -29
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_iqr.py +0 -29
- autogen/agentchat/contrib/captainagent/tools/data_analysis/detect_outlier_zscore.py +0 -29
- autogen/agentchat/contrib/captainagent/tools/data_analysis/explore_csv.py +0 -22
- autogen/agentchat/contrib/captainagent/tools/data_analysis/shapiro_wilk_test.py +0 -31
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_download.py +0 -26
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/arxiv_search.py +0 -55
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_image.py +0 -54
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/extract_pdf_text.py +0 -39
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_wikipedia_text.py +0 -22
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/get_youtube_caption.py +0 -35
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/image_qa.py +0 -61
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/optical_character_recognition.py +0 -62
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/perform_web_search.py +0 -48
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/scrape_wikipedia_tables.py +0 -34
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/transcribe_audio_file.py +0 -22
- autogen/agentchat/contrib/captainagent/tools/information_retrieval/youtube_download.py +0 -36
- autogen/agentchat/contrib/captainagent/tools/math/calculate_circle_area_from_diameter.py +0 -22
- autogen/agentchat/contrib/captainagent/tools/math/calculate_day_of_the_week.py +0 -19
- autogen/agentchat/contrib/captainagent/tools/math/calculate_fraction_sum.py +0 -29
- autogen/agentchat/contrib/captainagent/tools/math/calculate_matrix_power.py +0 -32
- autogen/agentchat/contrib/captainagent/tools/math/calculate_reflected_point.py +0 -17
- autogen/agentchat/contrib/captainagent/tools/math/complex_numbers_product.py +0 -26
- autogen/agentchat/contrib/captainagent/tools/math/compute_currency_conversion.py +0 -24
- autogen/agentchat/contrib/captainagent/tools/math/count_distinct_permutations.py +0 -28
- autogen/agentchat/contrib/captainagent/tools/math/evaluate_expression.py +0 -29
- autogen/agentchat/contrib/captainagent/tools/math/find_continuity_point.py +0 -35
- autogen/agentchat/contrib/captainagent/tools/math/fraction_to_mixed_numbers.py +0 -40
- autogen/agentchat/contrib/captainagent/tools/math/modular_inverse_sum.py +0 -23
- autogen/agentchat/contrib/captainagent/tools/math/simplify_mixed_numbers.py +0 -37
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_digit_factorials.py +0 -16
- autogen/agentchat/contrib/captainagent/tools/math/sum_of_primes_below.py +0 -16
- autogen/agentchat/contrib/captainagent/tools/requirements.txt +0 -10
- autogen/agentchat/contrib/captainagent/tools/tool_description.tsv +0 -34
- autogen/agentchat/contrib/captainagent.py +0 -490
- autogen/agentchat/contrib/gpt_assistant_agent.py +0 -545
- autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
- autogen/agentchat/contrib/graph_rag/document.py +0 -30
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +0 -111
- autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +0 -81
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +0 -56
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +0 -64
- autogen/agentchat/contrib/img_utils.py +0 -390
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +0 -123
- autogen/agentchat/contrib/llava_agent.py +0 -176
- autogen/agentchat/contrib/math_user_proxy_agent.py +0 -471
- autogen/agentchat/contrib/multimodal_conversable_agent.py +0 -128
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +0 -325
- autogen/agentchat/contrib/retrieve_assistant_agent.py +0 -56
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +0 -705
- autogen/agentchat/contrib/society_of_mind_agent.py +0 -203
- autogen/agentchat/contrib/swarm_agent.py +0 -463
- autogen/agentchat/contrib/text_analyzer_agent.py +0 -76
- autogen/agentchat/contrib/tool_retriever.py +0 -120
- autogen/agentchat/contrib/vectordb/__init__.py +0 -0
- autogen/agentchat/contrib/vectordb/base.py +0 -243
- autogen/agentchat/contrib/vectordb/chromadb.py +0 -326
- autogen/agentchat/contrib/vectordb/mongodb.py +0 -559
- autogen/agentchat/contrib/vectordb/pgvectordb.py +0 -958
- autogen/agentchat/contrib/vectordb/qdrant.py +0 -334
- autogen/agentchat/contrib/vectordb/utils.py +0 -126
- autogen/agentchat/contrib/web_surfer.py +0 -305
- autogen/agentchat/conversable_agent.py +0 -2908
- autogen/agentchat/groupchat.py +0 -1668
- autogen/agentchat/user_proxy_agent.py +0 -109
- autogen/agentchat/utils.py +0 -207
- autogen/browser_utils.py +0 -291
- autogen/cache/__init__.py +0 -10
- autogen/cache/abstract_cache_base.py +0 -78
- autogen/cache/cache.py +0 -182
- autogen/cache/cache_factory.py +0 -85
- autogen/cache/cosmos_db_cache.py +0 -150
- autogen/cache/disk_cache.py +0 -109
- autogen/cache/in_memory_cache.py +0 -61
- autogen/cache/redis_cache.py +0 -128
- autogen/code_utils.py +0 -745
- autogen/coding/__init__.py +0 -22
- autogen/coding/base.py +0 -113
- autogen/coding/docker_commandline_code_executor.py +0 -262
- autogen/coding/factory.py +0 -45
- autogen/coding/func_with_reqs.py +0 -203
- autogen/coding/jupyter/__init__.py +0 -22
- autogen/coding/jupyter/base.py +0 -32
- autogen/coding/jupyter/docker_jupyter_server.py +0 -164
- autogen/coding/jupyter/embedded_ipython_code_executor.py +0 -182
- autogen/coding/jupyter/jupyter_client.py +0 -224
- autogen/coding/jupyter/jupyter_code_executor.py +0 -161
- autogen/coding/jupyter/local_jupyter_server.py +0 -168
- autogen/coding/local_commandline_code_executor.py +0 -410
- autogen/coding/markdown_code_extractor.py +0 -44
- autogen/coding/utils.py +0 -57
- autogen/exception_utils.py +0 -46
- autogen/extensions/__init__.py +0 -0
- autogen/formatting_utils.py +0 -76
- autogen/function_utils.py +0 -362
- autogen/graph_utils.py +0 -148
- autogen/io/__init__.py +0 -15
- autogen/io/base.py +0 -105
- autogen/io/console.py +0 -43
- autogen/io/websockets.py +0 -213
- autogen/logger/__init__.py +0 -11
- autogen/logger/base_logger.py +0 -140
- autogen/logger/file_logger.py +0 -287
- autogen/logger/logger_factory.py +0 -29
- autogen/logger/logger_utils.py +0 -42
- autogen/logger/sqlite_logger.py +0 -459
- autogen/math_utils.py +0 -356
- autogen/oai/__init__.py +0 -33
- autogen/oai/anthropic.py +0 -428
- autogen/oai/bedrock.py +0 -606
- autogen/oai/cerebras.py +0 -270
- autogen/oai/client.py +0 -1148
- autogen/oai/client_utils.py +0 -167
- autogen/oai/cohere.py +0 -453
- autogen/oai/completion.py +0 -1216
- autogen/oai/gemini.py +0 -469
- autogen/oai/groq.py +0 -281
- autogen/oai/mistral.py +0 -279
- autogen/oai/ollama.py +0 -582
- autogen/oai/openai_utils.py +0 -811
- autogen/oai/together.py +0 -343
- autogen/retrieve_utils.py +0 -487
- autogen/runtime_logging.py +0 -163
- autogen/token_count_utils.py +0 -259
- autogen/types.py +0 -20
- autogen/version.py +0 -7
- {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/LICENSE +0 -0
- {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/NOTICE.md +0 -0
- {ag2-0.4.1.dist-info → ag2-0.4.2b1.dist-info}/WHEEL +0 -0
|
@@ -1,203 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
-
#
|
|
3
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
-
#
|
|
5
|
-
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
-
# SPDX-License-Identifier: MIT
|
|
7
|
-
# ruff: noqa: E722
|
|
8
|
-
import copy
|
|
9
|
-
import traceback
|
|
10
|
-
from typing import Callable, Dict, List, Literal, Optional, Tuple, Union
|
|
11
|
-
|
|
12
|
-
from autogen import Agent, ConversableAgent, GroupChat, GroupChatManager, OpenAIWrapper
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class SocietyOfMindAgent(ConversableAgent):
|
|
16
|
-
"""(In preview) A single agent that runs a Group Chat as an inner monologue.
|
|
17
|
-
At the end of the conversation (termination for any reason), the SocietyOfMindAgent
|
|
18
|
-
applies the response_preparer method on the entire inner monologue message history to
|
|
19
|
-
extract a final answer for the reply.
|
|
20
|
-
|
|
21
|
-
Most arguments are inherited from ConversableAgent. New arguments are:
|
|
22
|
-
chat_manager (GroupChatManager): the group chat manager that will be running the inner monologue
|
|
23
|
-
response_preparer (Optional, Callable or String): If response_preparer is a callable function, then
|
|
24
|
-
it should have the signature:
|
|
25
|
-
f( self: SocietyOfMindAgent, messages: List[Dict])
|
|
26
|
-
where `self` is this SocietyOfMindAgent, and `messages` is a list of inner-monologue messages.
|
|
27
|
-
The function should return a string representing the final response (extracted or prepared)
|
|
28
|
-
from that history.
|
|
29
|
-
If response_preparer is a string, then it should be the LLM prompt used to extract the final
|
|
30
|
-
message from the inner chat transcript.
|
|
31
|
-
The default response_preparer depends on if an llm_config is provided. If llm_config is False,
|
|
32
|
-
then the response_preparer deterministically returns the last message in the inner-monolgue. If
|
|
33
|
-
llm_config is set to anything else, then a default LLM prompt is used.
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
def __init__(
|
|
37
|
-
self,
|
|
38
|
-
name: str,
|
|
39
|
-
chat_manager: GroupChatManager,
|
|
40
|
-
response_preparer: Optional[Union[str, Callable]] = None,
|
|
41
|
-
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
|
42
|
-
max_consecutive_auto_reply: Optional[int] = None,
|
|
43
|
-
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "TERMINATE",
|
|
44
|
-
function_map: Optional[Dict[str, Callable]] = None,
|
|
45
|
-
code_execution_config: Union[Dict, Literal[False]] = False,
|
|
46
|
-
llm_config: Optional[Union[Dict, Literal[False]]] = False,
|
|
47
|
-
default_auto_reply: Optional[Union[str, Dict, None]] = "",
|
|
48
|
-
**kwargs,
|
|
49
|
-
):
|
|
50
|
-
super().__init__(
|
|
51
|
-
name=name,
|
|
52
|
-
system_message="",
|
|
53
|
-
is_termination_msg=is_termination_msg,
|
|
54
|
-
max_consecutive_auto_reply=max_consecutive_auto_reply,
|
|
55
|
-
human_input_mode=human_input_mode,
|
|
56
|
-
function_map=function_map,
|
|
57
|
-
code_execution_config=code_execution_config,
|
|
58
|
-
llm_config=llm_config,
|
|
59
|
-
default_auto_reply=default_auto_reply,
|
|
60
|
-
**kwargs,
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
self.update_chat_manager(chat_manager)
|
|
64
|
-
|
|
65
|
-
# response_preparer default depends on if the llm_config is set, and if a client was created
|
|
66
|
-
if response_preparer is None:
|
|
67
|
-
if self.client is not None:
|
|
68
|
-
response_preparer = "Output a standalone response to the original request, without mentioning any of the intermediate discussion."
|
|
69
|
-
else:
|
|
70
|
-
|
|
71
|
-
def response_preparer(agent, messages):
|
|
72
|
-
return messages[-1]["content"].replace("TERMINATE", "").strip()
|
|
73
|
-
|
|
74
|
-
# Create the response_preparer callable, if given only a prompt string
|
|
75
|
-
if isinstance(response_preparer, str):
|
|
76
|
-
self.response_preparer = lambda agent, messages: agent._llm_response_preparer(response_preparer, messages)
|
|
77
|
-
else:
|
|
78
|
-
self.response_preparer = response_preparer
|
|
79
|
-
|
|
80
|
-
# NOTE: Async reply functions are not yet supported with this contrib agent
|
|
81
|
-
self._reply_func_list = []
|
|
82
|
-
self.register_reply([Agent, None], SocietyOfMindAgent.generate_inner_monologue_reply)
|
|
83
|
-
self.register_reply([Agent, None], ConversableAgent.generate_code_execution_reply)
|
|
84
|
-
self.register_reply([Agent, None], ConversableAgent.generate_function_call_reply)
|
|
85
|
-
self.register_reply([Agent, None], ConversableAgent.check_termination_and_human_reply)
|
|
86
|
-
|
|
87
|
-
def _llm_response_preparer(self, prompt, messages):
|
|
88
|
-
"""Default response_preparer when provided with a string prompt, rather than a callable.
|
|
89
|
-
|
|
90
|
-
Args:
|
|
91
|
-
prompt (str): The prompt used to extract the final response from the transcript.
|
|
92
|
-
messages (list): The messages generated as part of the inner monologue group chat.
|
|
93
|
-
"""
|
|
94
|
-
|
|
95
|
-
_messages = [
|
|
96
|
-
{
|
|
97
|
-
"role": "system",
|
|
98
|
-
"content": """Earlier you were asked to fulfill a request. You and your team worked diligently to address that request. Here is a transcript of that conversation:""",
|
|
99
|
-
}
|
|
100
|
-
]
|
|
101
|
-
|
|
102
|
-
for message in messages:
|
|
103
|
-
message = copy.deepcopy(message)
|
|
104
|
-
message["role"] = "user"
|
|
105
|
-
|
|
106
|
-
# Convert tool and function calls to basic messages to avoid an error on the LLM call
|
|
107
|
-
if "content" not in message:
|
|
108
|
-
message["content"] = ""
|
|
109
|
-
|
|
110
|
-
if "tool_calls" in message:
|
|
111
|
-
del message["tool_calls"]
|
|
112
|
-
if "tool_responses" in message:
|
|
113
|
-
del message["tool_responses"]
|
|
114
|
-
if "function_call" in message:
|
|
115
|
-
if message["content"] == "":
|
|
116
|
-
try:
|
|
117
|
-
message["content"] = (
|
|
118
|
-
message["function_call"]["name"] + "(" + message["function_call"]["arguments"] + ")"
|
|
119
|
-
)
|
|
120
|
-
except KeyError:
|
|
121
|
-
pass
|
|
122
|
-
del message["function_call"]
|
|
123
|
-
|
|
124
|
-
# Add the modified message to the transcript
|
|
125
|
-
_messages.append(message)
|
|
126
|
-
|
|
127
|
-
_messages.append(
|
|
128
|
-
{
|
|
129
|
-
"role": "system",
|
|
130
|
-
"content": prompt,
|
|
131
|
-
}
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
response = self.client.create(context=None, messages=_messages, cache=self.client_cache, agent=self.name)
|
|
135
|
-
extracted_response = self.client.extract_text_or_completion_object(response)[0]
|
|
136
|
-
if not isinstance(extracted_response, str):
|
|
137
|
-
return str(extracted_response.model_dump(mode="dict"))
|
|
138
|
-
else:
|
|
139
|
-
return extracted_response
|
|
140
|
-
|
|
141
|
-
@property
|
|
142
|
-
def chat_manager(self) -> Union[GroupChatManager, None]:
|
|
143
|
-
"""Return the group chat manager."""
|
|
144
|
-
return self._chat_manager
|
|
145
|
-
|
|
146
|
-
def update_chat_manager(self, chat_manager: Union[GroupChatManager, None]):
|
|
147
|
-
"""Update the chat manager.
|
|
148
|
-
|
|
149
|
-
Args:
|
|
150
|
-
chat_manager (GroupChatManager): the group chat manager
|
|
151
|
-
"""
|
|
152
|
-
self._chat_manager = chat_manager
|
|
153
|
-
|
|
154
|
-
# Awkward, but due to object cloning, there's no better way to do this
|
|
155
|
-
# Read the GroupChat object from the callback
|
|
156
|
-
self._group_chat = None
|
|
157
|
-
if self._chat_manager is not None:
|
|
158
|
-
for item in self._chat_manager._reply_func_list:
|
|
159
|
-
if isinstance(item["config"], GroupChat):
|
|
160
|
-
self._group_chat = item["config"]
|
|
161
|
-
break
|
|
162
|
-
|
|
163
|
-
def generate_inner_monologue_reply(
|
|
164
|
-
self,
|
|
165
|
-
messages: Optional[List[Dict]] = None,
|
|
166
|
-
sender: Optional[Agent] = None,
|
|
167
|
-
config: Optional[OpenAIWrapper] = None,
|
|
168
|
-
) -> Tuple[bool, Union[str, Dict, None]]:
|
|
169
|
-
"""Generate a reply by running the group chat"""
|
|
170
|
-
if self.chat_manager is None:
|
|
171
|
-
return False, None
|
|
172
|
-
if messages is None:
|
|
173
|
-
messages = self._oai_messages[sender]
|
|
174
|
-
|
|
175
|
-
# We want to clear the inner monolgue, keeping only the exteranl chat for context.
|
|
176
|
-
# Reset all the counters and histories, then populate agents with necessary context from the external chat
|
|
177
|
-
self.chat_manager.reset()
|
|
178
|
-
self.update_chat_manager(self.chat_manager)
|
|
179
|
-
|
|
180
|
-
external_history = []
|
|
181
|
-
if len(messages) > 1:
|
|
182
|
-
external_history = messages[0 : len(messages) - 1] # All but the current message
|
|
183
|
-
|
|
184
|
-
for agent in self._group_chat.agents:
|
|
185
|
-
agent.reset()
|
|
186
|
-
for message in external_history:
|
|
187
|
-
# Assign each message a name
|
|
188
|
-
attributed_message = message.copy()
|
|
189
|
-
if "name" not in attributed_message:
|
|
190
|
-
if attributed_message["role"] == "assistant":
|
|
191
|
-
attributed_message["name"] = self.name
|
|
192
|
-
else:
|
|
193
|
-
attributed_message["name"] = sender.name
|
|
194
|
-
|
|
195
|
-
self.chat_manager.send(attributed_message, agent, request_reply=False, silent=True)
|
|
196
|
-
|
|
197
|
-
try:
|
|
198
|
-
self.initiate_chat(self.chat_manager, message=messages[-1], clear_history=False)
|
|
199
|
-
except:
|
|
200
|
-
traceback.print_exc()
|
|
201
|
-
|
|
202
|
-
response_preparer = self.response_preparer
|
|
203
|
-
return True, response_preparer(self, self._group_chat.messages)
|
|
@@ -1,463 +0,0 @@
|
|
|
1
|
-
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
-
#
|
|
3
|
-
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
-
import copy
|
|
5
|
-
import json
|
|
6
|
-
from dataclasses import dataclass
|
|
7
|
-
from enum import Enum
|
|
8
|
-
from inspect import signature
|
|
9
|
-
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
|
|
10
|
-
|
|
11
|
-
from pydantic import BaseModel
|
|
12
|
-
|
|
13
|
-
from autogen.function_utils import get_function_schema
|
|
14
|
-
from autogen.oai import OpenAIWrapper
|
|
15
|
-
|
|
16
|
-
from ..agent import Agent
|
|
17
|
-
from ..chat import ChatResult
|
|
18
|
-
from ..conversable_agent import ConversableAgent
|
|
19
|
-
from ..groupchat import GroupChat, GroupChatManager
|
|
20
|
-
from ..user_proxy_agent import UserProxyAgent
|
|
21
|
-
|
|
22
|
-
# Parameter name for context variables
|
|
23
|
-
# Use the value in functions and they will be substituted with the context variables:
|
|
24
|
-
# e.g. def my_function(context_variables: Dict[str, Any], my_other_parameters: Any) -> Any:
|
|
25
|
-
__CONTEXT_VARIABLES_PARAM_NAME__ = "context_variables"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class AfterWorkOption(Enum):
|
|
29
|
-
TERMINATE = "TERMINATE"
|
|
30
|
-
REVERT_TO_USER = "REVERT_TO_USER"
|
|
31
|
-
STAY = "STAY"
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
@dataclass
|
|
35
|
-
class AFTER_WORK:
|
|
36
|
-
agent: Union[AfterWorkOption, "SwarmAgent", str, Callable]
|
|
37
|
-
|
|
38
|
-
def __post_init__(self):
|
|
39
|
-
if isinstance(self.agent, str):
|
|
40
|
-
self.agent = AfterWorkOption(self.agent.upper())
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
@dataclass
|
|
44
|
-
class ON_CONDITION:
|
|
45
|
-
agent: "SwarmAgent"
|
|
46
|
-
condition: str = ""
|
|
47
|
-
|
|
48
|
-
# Ensure that agent is a SwarmAgent
|
|
49
|
-
def __post_init__(self):
|
|
50
|
-
assert isinstance(self.agent, SwarmAgent), "Agent must be a SwarmAgent"
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
def initiate_swarm_chat(
|
|
54
|
-
initial_agent: "SwarmAgent",
|
|
55
|
-
messages: Union[List[Dict[str, Any]], str],
|
|
56
|
-
agents: List["SwarmAgent"],
|
|
57
|
-
user_agent: Optional[UserProxyAgent] = None,
|
|
58
|
-
max_rounds: int = 20,
|
|
59
|
-
context_variables: Optional[Dict[str, Any]] = None,
|
|
60
|
-
after_work: Optional[Union[AFTER_WORK, Callable]] = AFTER_WORK(AfterWorkOption.TERMINATE),
|
|
61
|
-
) -> Tuple[ChatResult, Dict[str, Any], "SwarmAgent"]:
|
|
62
|
-
"""Initialize and run a swarm chat
|
|
63
|
-
|
|
64
|
-
Args:
|
|
65
|
-
initial_agent: The first receiving agent of the conversation.
|
|
66
|
-
messages: Initial message(s).
|
|
67
|
-
agents: List of swarm agents.
|
|
68
|
-
user_agent: Optional user proxy agent for falling back to.
|
|
69
|
-
max_rounds: Maximum number of conversation rounds.
|
|
70
|
-
context_variables: Starting context variables.
|
|
71
|
-
after_work: Method to handle conversation continuation when an agent doesn't select the next agent. If no agent is selected and no tool calls are output, we will use this method to determine the next agent.
|
|
72
|
-
Must be a AFTER_WORK instance (which is a dataclass accepting a SwarmAgent, AfterWorkOption, A str (of the AfterWorkOption)) or a callable.
|
|
73
|
-
AfterWorkOption:
|
|
74
|
-
- TERMINATE (Default): Terminate the conversation.
|
|
75
|
-
- REVERT_TO_USER : Revert to the user agent if a user agent is provided. If not provided, terminate the conversation.
|
|
76
|
-
- STAY : Stay with the last speaker.
|
|
77
|
-
|
|
78
|
-
Callable: A custom function that takes the current agent, messages, groupchat, and context_variables as arguments and returns the next agent. The function should return None to terminate.
|
|
79
|
-
```python
|
|
80
|
-
def custom_afterwork_func(last_speaker: SwarmAgent, messages: List[Dict[str, Any]], groupchat: GroupChat, context_variables: Optional[Dict[str, Any]]) -> Optional[SwarmAgent]:
|
|
81
|
-
```
|
|
82
|
-
Returns:
|
|
83
|
-
ChatResult: Conversations chat history.
|
|
84
|
-
Dict[str, Any]: Updated Context variables.
|
|
85
|
-
SwarmAgent: Last speaker.
|
|
86
|
-
"""
|
|
87
|
-
assert isinstance(initial_agent, SwarmAgent), "initial_agent must be a SwarmAgent"
|
|
88
|
-
assert all(isinstance(agent, SwarmAgent) for agent in agents), "Agents must be a list of SwarmAgents"
|
|
89
|
-
# Ensure all agents in hand-off after-works are in the passed in agents list
|
|
90
|
-
for agent in agents:
|
|
91
|
-
if agent.after_work is not None:
|
|
92
|
-
if isinstance(agent.after_work.agent, SwarmAgent):
|
|
93
|
-
assert agent.after_work.agent in agents, "Agent in hand-off must be in the agents list"
|
|
94
|
-
|
|
95
|
-
context_variables = context_variables or {}
|
|
96
|
-
if isinstance(messages, str):
|
|
97
|
-
messages = [{"role": "user", "content": messages}]
|
|
98
|
-
|
|
99
|
-
swarm_agent_names = [agent.name for agent in agents]
|
|
100
|
-
|
|
101
|
-
tool_execution = SwarmAgent(
|
|
102
|
-
name="Tool_Execution",
|
|
103
|
-
system_message="Tool Execution",
|
|
104
|
-
)
|
|
105
|
-
tool_execution._set_to_tool_execution(context_variables=context_variables)
|
|
106
|
-
|
|
107
|
-
# Update tool execution agent with all the functions from all the agents
|
|
108
|
-
for agent in agents:
|
|
109
|
-
tool_execution._function_map.update(agent._function_map)
|
|
110
|
-
|
|
111
|
-
INIT_AGENT_USED = False
|
|
112
|
-
|
|
113
|
-
def swarm_transition(last_speaker: SwarmAgent, groupchat: GroupChat):
|
|
114
|
-
"""Swarm transition function to determine the next agent in the conversation"""
|
|
115
|
-
nonlocal INIT_AGENT_USED
|
|
116
|
-
if not INIT_AGENT_USED:
|
|
117
|
-
INIT_AGENT_USED = True
|
|
118
|
-
return initial_agent
|
|
119
|
-
|
|
120
|
-
if "tool_calls" in groupchat.messages[-1]:
|
|
121
|
-
return tool_execution
|
|
122
|
-
if tool_execution._next_agent is not None:
|
|
123
|
-
next_agent = tool_execution._next_agent
|
|
124
|
-
tool_execution._next_agent = None
|
|
125
|
-
return next_agent
|
|
126
|
-
|
|
127
|
-
# get the last swarm agent
|
|
128
|
-
last_swarm_speaker = None
|
|
129
|
-
for message in reversed(groupchat.messages):
|
|
130
|
-
if "name" in message and message["name"] in swarm_agent_names:
|
|
131
|
-
agent = groupchat.agent_by_name(name=message["name"])
|
|
132
|
-
if isinstance(agent, SwarmAgent):
|
|
133
|
-
last_swarm_speaker = agent
|
|
134
|
-
break
|
|
135
|
-
if last_swarm_speaker is None:
|
|
136
|
-
raise ValueError("No swarm agent found in the message history")
|
|
137
|
-
|
|
138
|
-
# If the user last spoke, return to the agent prior
|
|
139
|
-
if (user_agent and last_speaker == user_agent) or groupchat.messages[-1]["role"] == "tool":
|
|
140
|
-
return last_swarm_speaker
|
|
141
|
-
|
|
142
|
-
# No agent selected via hand-offs (tool calls)
|
|
143
|
-
# Assume the work is Done
|
|
144
|
-
# override if agent-level after_work is defined, else use the global after_work
|
|
145
|
-
tmp_after_work = last_swarm_speaker.after_work if last_swarm_speaker.after_work is not None else after_work
|
|
146
|
-
if isinstance(tmp_after_work, AFTER_WORK):
|
|
147
|
-
tmp_after_work = tmp_after_work.agent
|
|
148
|
-
|
|
149
|
-
if isinstance(tmp_after_work, SwarmAgent):
|
|
150
|
-
return tmp_after_work
|
|
151
|
-
elif isinstance(tmp_after_work, AfterWorkOption):
|
|
152
|
-
if tmp_after_work == AfterWorkOption.TERMINATE or (
|
|
153
|
-
user_agent is None and tmp_after_work == AfterWorkOption.REVERT_TO_USER
|
|
154
|
-
):
|
|
155
|
-
return None
|
|
156
|
-
elif tmp_after_work == AfterWorkOption.REVERT_TO_USER:
|
|
157
|
-
return user_agent
|
|
158
|
-
elif tmp_after_work == AfterWorkOption.STAY:
|
|
159
|
-
return last_speaker
|
|
160
|
-
elif isinstance(tmp_after_work, Callable):
|
|
161
|
-
return tmp_after_work(last_speaker, groupchat.messages, groupchat, context_variables)
|
|
162
|
-
else:
|
|
163
|
-
raise ValueError("Invalid After Work condition")
|
|
164
|
-
|
|
165
|
-
# If there's only one message and there's no identified swarm agent
|
|
166
|
-
# Start with a user proxy agent, creating one if they haven't passed one in
|
|
167
|
-
if len(messages) == 1 and "name" not in messages[0] and not user_agent:
|
|
168
|
-
temp_user_proxy = [UserProxyAgent(name="_User")]
|
|
169
|
-
else:
|
|
170
|
-
temp_user_proxy = []
|
|
171
|
-
|
|
172
|
-
groupchat = GroupChat(
|
|
173
|
-
agents=[tool_execution] + agents + ([user_agent] if user_agent is not None else temp_user_proxy),
|
|
174
|
-
messages=[], # Set to empty. We will resume the conversation with the messages
|
|
175
|
-
max_round=max_rounds,
|
|
176
|
-
speaker_selection_method=swarm_transition,
|
|
177
|
-
)
|
|
178
|
-
manager = GroupChatManager(groupchat)
|
|
179
|
-
clear_history = True
|
|
180
|
-
|
|
181
|
-
if len(messages) > 1:
|
|
182
|
-
last_agent, last_message = manager.resume(messages=messages)
|
|
183
|
-
clear_history = False
|
|
184
|
-
else:
|
|
185
|
-
last_message = messages[0]
|
|
186
|
-
|
|
187
|
-
if "name" in last_message:
|
|
188
|
-
if last_message["name"] in swarm_agent_names:
|
|
189
|
-
# If there's a name in the message and it's a swarm agent, use that
|
|
190
|
-
last_agent = groupchat.agent_by_name(name=last_message["name"])
|
|
191
|
-
elif user_agent and last_message["name"] == user_agent.name:
|
|
192
|
-
# If the user agent is passed in and is the first message
|
|
193
|
-
last_agent = user_agent
|
|
194
|
-
else:
|
|
195
|
-
raise ValueError(f"Invalid swarm agent name in last message: {last_message['name']}")
|
|
196
|
-
else:
|
|
197
|
-
# No name, so we're using the user proxy to start the conversation
|
|
198
|
-
if user_agent:
|
|
199
|
-
last_agent = user_agent
|
|
200
|
-
else:
|
|
201
|
-
# If no user agent passed in, use our temporary user proxy
|
|
202
|
-
last_agent = temp_user_proxy[0]
|
|
203
|
-
|
|
204
|
-
chat_result = last_agent.initiate_chat(
|
|
205
|
-
manager,
|
|
206
|
-
message=last_message,
|
|
207
|
-
clear_history=clear_history,
|
|
208
|
-
)
|
|
209
|
-
|
|
210
|
-
# Clear the temporary user proxy's name from messages
|
|
211
|
-
if len(temp_user_proxy) == 1:
|
|
212
|
-
for message in chat_result.chat_history:
|
|
213
|
-
if "name" in message and message["name"] == "_User":
|
|
214
|
-
# delete the name key from the message
|
|
215
|
-
del message["name"]
|
|
216
|
-
|
|
217
|
-
return chat_result, context_variables, manager.last_speaker
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
class SwarmResult(BaseModel):
|
|
221
|
-
"""
|
|
222
|
-
Encapsulates the possible return values for a swarm agent function.
|
|
223
|
-
|
|
224
|
-
Args:
|
|
225
|
-
values (str): The result values as a string.
|
|
226
|
-
agent (SwarmAgent): The swarm agent instance, if applicable.
|
|
227
|
-
context_variables (dict): A dictionary of context variables.
|
|
228
|
-
"""
|
|
229
|
-
|
|
230
|
-
values: str = ""
|
|
231
|
-
agent: Optional["SwarmAgent"] = None
|
|
232
|
-
context_variables: Dict[str, Any] = {}
|
|
233
|
-
|
|
234
|
-
class Config: # Add this inner class
|
|
235
|
-
arbitrary_types_allowed = True
|
|
236
|
-
|
|
237
|
-
def __str__(self):
|
|
238
|
-
return self.values
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
class SwarmAgent(ConversableAgent):
|
|
242
|
-
"""Swarm agent for participating in a swarm.
|
|
243
|
-
|
|
244
|
-
SwarmAgent is a subclass of ConversableAgent.
|
|
245
|
-
|
|
246
|
-
Additional args:
|
|
247
|
-
functions (List[Callable]): A list of functions to register with the agent.
|
|
248
|
-
"""
|
|
249
|
-
|
|
250
|
-
def __init__(
|
|
251
|
-
self,
|
|
252
|
-
name: str,
|
|
253
|
-
system_message: Optional[str] = "You are a helpful AI Assistant.",
|
|
254
|
-
llm_config: Optional[Union[Dict, Literal[False]]] = None,
|
|
255
|
-
functions: Union[List[Callable], Callable] = None,
|
|
256
|
-
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
|
257
|
-
max_consecutive_auto_reply: Optional[int] = None,
|
|
258
|
-
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
|
|
259
|
-
description: Optional[str] = None,
|
|
260
|
-
code_execution_config=False,
|
|
261
|
-
**kwargs,
|
|
262
|
-
) -> None:
|
|
263
|
-
super().__init__(
|
|
264
|
-
name,
|
|
265
|
-
system_message,
|
|
266
|
-
is_termination_msg,
|
|
267
|
-
max_consecutive_auto_reply,
|
|
268
|
-
human_input_mode,
|
|
269
|
-
llm_config=llm_config,
|
|
270
|
-
description=description,
|
|
271
|
-
code_execution_config=code_execution_config,
|
|
272
|
-
**kwargs,
|
|
273
|
-
)
|
|
274
|
-
|
|
275
|
-
if isinstance(functions, list):
|
|
276
|
-
if not all(isinstance(func, Callable) for func in functions):
|
|
277
|
-
raise TypeError("All elements in the functions list must be callable")
|
|
278
|
-
self.add_functions(functions)
|
|
279
|
-
elif isinstance(functions, Callable):
|
|
280
|
-
self.add_single_function(functions)
|
|
281
|
-
elif functions is not None:
|
|
282
|
-
raise TypeError("Functions must be a callable or a list of callables")
|
|
283
|
-
|
|
284
|
-
self.after_work = None
|
|
285
|
-
|
|
286
|
-
# use in the tool execution agent to transfer to the next agent
|
|
287
|
-
self._context_variables = {}
|
|
288
|
-
self._next_agent = None
|
|
289
|
-
|
|
290
|
-
def _set_to_tool_execution(self, context_variables: Optional[Dict[str, Any]] = None):
|
|
291
|
-
"""Set to a special instance of SwarmAgent that is responsible for executing tool calls from other swarm agents.
|
|
292
|
-
This agent will be used internally and should not be visible to the user.
|
|
293
|
-
|
|
294
|
-
It will execute the tool calls and update the context_variables and next_agent accordingly.
|
|
295
|
-
"""
|
|
296
|
-
self._next_agent = None
|
|
297
|
-
self._context_variables = context_variables or {}
|
|
298
|
-
self._reply_func_list.clear()
|
|
299
|
-
self.register_reply([Agent, None], SwarmAgent.generate_swarm_tool_reply)
|
|
300
|
-
|
|
301
|
-
def __str__(self):
|
|
302
|
-
return f"SwarmAgent --> {self.name}"
|
|
303
|
-
|
|
304
|
-
def register_hand_off(
|
|
305
|
-
self,
|
|
306
|
-
hand_to: Union[List[Union[ON_CONDITION, AFTER_WORK]], ON_CONDITION, AFTER_WORK],
|
|
307
|
-
):
|
|
308
|
-
"""Register a function to hand off to another agent.
|
|
309
|
-
|
|
310
|
-
Args:
|
|
311
|
-
hand_to: A list of ON_CONDITIONs and an, optional, AFTER_WORK condition
|
|
312
|
-
|
|
313
|
-
Hand off template:
|
|
314
|
-
def transfer_to_agent_name() -> SwarmAgent:
|
|
315
|
-
return agent_name
|
|
316
|
-
1. register the function with the agent
|
|
317
|
-
2. register the schema with the agent, description set to the condition
|
|
318
|
-
"""
|
|
319
|
-
# Ensure that hand_to is a list or ON_CONDITION or AFTER_WORK
|
|
320
|
-
if not isinstance(hand_to, (list, ON_CONDITION, AFTER_WORK)):
|
|
321
|
-
raise ValueError("hand_to must be a list of ON_CONDITION or AFTER_WORK")
|
|
322
|
-
|
|
323
|
-
if isinstance(hand_to, (ON_CONDITION, AFTER_WORK)):
|
|
324
|
-
hand_to = [hand_to]
|
|
325
|
-
|
|
326
|
-
for transit in hand_to:
|
|
327
|
-
if isinstance(transit, AFTER_WORK):
|
|
328
|
-
assert isinstance(
|
|
329
|
-
transit.agent, (AfterWorkOption, SwarmAgent, str, Callable)
|
|
330
|
-
), "Invalid After Work value"
|
|
331
|
-
self.after_work = transit
|
|
332
|
-
elif isinstance(transit, ON_CONDITION):
|
|
333
|
-
|
|
334
|
-
# Create closure with current loop transit value
|
|
335
|
-
# to ensure the condition matches the one in the loop
|
|
336
|
-
def make_transfer_function(current_transit):
|
|
337
|
-
def transfer_to_agent() -> "SwarmAgent":
|
|
338
|
-
return current_transit.agent
|
|
339
|
-
|
|
340
|
-
return transfer_to_agent
|
|
341
|
-
|
|
342
|
-
transfer_func = make_transfer_function(transit)
|
|
343
|
-
self.add_single_function(transfer_func, f"transfer_to_{transit.agent.name}", transit.condition)
|
|
344
|
-
else:
|
|
345
|
-
raise ValueError("Invalid hand off condition, must be either ON_CONDITION or AFTER_WORK")
|
|
346
|
-
|
|
347
|
-
def generate_swarm_tool_reply(
|
|
348
|
-
self,
|
|
349
|
-
messages: Optional[List[Dict]] = None,
|
|
350
|
-
sender: Optional[Agent] = None,
|
|
351
|
-
config: Optional[OpenAIWrapper] = None,
|
|
352
|
-
) -> Tuple[bool, dict]:
|
|
353
|
-
"""Pre-processes and generates tool call replies.
|
|
354
|
-
|
|
355
|
-
This function:
|
|
356
|
-
1. Adds context_variables back to the tool call for the function, if necessary.
|
|
357
|
-
2. Generates the tool calls reply.
|
|
358
|
-
3. Updates context_variables and next_agent based on the tool call response."""
|
|
359
|
-
|
|
360
|
-
if config is None:
|
|
361
|
-
config = self
|
|
362
|
-
if messages is None:
|
|
363
|
-
messages = self._oai_messages[sender]
|
|
364
|
-
|
|
365
|
-
message = messages[-1]
|
|
366
|
-
if "tool_calls" in message:
|
|
367
|
-
|
|
368
|
-
tool_call_count = len(message["tool_calls"])
|
|
369
|
-
|
|
370
|
-
# Loop through tool calls individually (so context can be updated after each function call)
|
|
371
|
-
next_agent = None
|
|
372
|
-
tool_responses_inner = []
|
|
373
|
-
contents = []
|
|
374
|
-
for index in range(tool_call_count):
|
|
375
|
-
|
|
376
|
-
# Deep copy to ensure no changes to messages when we insert the context variables
|
|
377
|
-
message_copy = copy.deepcopy(message)
|
|
378
|
-
|
|
379
|
-
# 1. add context_variables to the tool call arguments
|
|
380
|
-
tool_call = message_copy["tool_calls"][index]
|
|
381
|
-
|
|
382
|
-
if tool_call["type"] == "function":
|
|
383
|
-
function_name = tool_call["function"]["name"]
|
|
384
|
-
|
|
385
|
-
# Check if this function exists in our function map
|
|
386
|
-
if function_name in self._function_map:
|
|
387
|
-
func = self._function_map[function_name] # Get the original function
|
|
388
|
-
|
|
389
|
-
# Inject the context variables into the tool call if it has the parameter
|
|
390
|
-
sig = signature(func)
|
|
391
|
-
if __CONTEXT_VARIABLES_PARAM_NAME__ in sig.parameters:
|
|
392
|
-
|
|
393
|
-
current_args = json.loads(tool_call["function"]["arguments"])
|
|
394
|
-
current_args[__CONTEXT_VARIABLES_PARAM_NAME__] = self._context_variables
|
|
395
|
-
tool_call["function"]["arguments"] = json.dumps(current_args)
|
|
396
|
-
|
|
397
|
-
# Ensure we are only executing the one tool at a time
|
|
398
|
-
message_copy["tool_calls"] = [tool_call]
|
|
399
|
-
|
|
400
|
-
# 2. generate tool calls reply
|
|
401
|
-
_, tool_message = self.generate_tool_calls_reply([message_copy])
|
|
402
|
-
|
|
403
|
-
# 3. update context_variables and next_agent, convert content to string
|
|
404
|
-
for tool_response in tool_message["tool_responses"]:
|
|
405
|
-
content = tool_response.get("content")
|
|
406
|
-
if isinstance(content, SwarmResult):
|
|
407
|
-
if content.context_variables != {}:
|
|
408
|
-
self._context_variables.update(content.context_variables)
|
|
409
|
-
if content.agent is not None:
|
|
410
|
-
next_agent = content.agent
|
|
411
|
-
elif isinstance(content, Agent):
|
|
412
|
-
next_agent = content
|
|
413
|
-
|
|
414
|
-
tool_responses_inner.append(tool_response)
|
|
415
|
-
contents.append(str(tool_response["content"]))
|
|
416
|
-
|
|
417
|
-
self._next_agent = next_agent
|
|
418
|
-
|
|
419
|
-
# Put the tool responses and content strings back into the response message
|
|
420
|
-
# Caters for multiple tool calls
|
|
421
|
-
tool_message["tool_responses"] = tool_responses_inner
|
|
422
|
-
tool_message["content"] = "\n".join(contents)
|
|
423
|
-
|
|
424
|
-
return True, tool_message
|
|
425
|
-
return False, None
|
|
426
|
-
|
|
427
|
-
def add_single_function(self, func: Callable, name=None, description=""):
|
|
428
|
-
if name:
|
|
429
|
-
func._name = name
|
|
430
|
-
else:
|
|
431
|
-
func._name = func.__name__
|
|
432
|
-
|
|
433
|
-
if description:
|
|
434
|
-
func._description = description
|
|
435
|
-
else:
|
|
436
|
-
# Use function's docstring, strip whitespace, fall back to empty string
|
|
437
|
-
func._description = (func.__doc__ or "").strip()
|
|
438
|
-
|
|
439
|
-
f = get_function_schema(func, name=func._name, description=func._description)
|
|
440
|
-
|
|
441
|
-
# Remove context_variables parameter from function schema
|
|
442
|
-
f_no_context = f.copy()
|
|
443
|
-
if __CONTEXT_VARIABLES_PARAM_NAME__ in f_no_context["function"]["parameters"]["properties"]:
|
|
444
|
-
del f_no_context["function"]["parameters"]["properties"][__CONTEXT_VARIABLES_PARAM_NAME__]
|
|
445
|
-
if "required" in f_no_context["function"]["parameters"]:
|
|
446
|
-
required = f_no_context["function"]["parameters"]["required"]
|
|
447
|
-
f_no_context["function"]["parameters"]["required"] = [
|
|
448
|
-
param for param in required if param != __CONTEXT_VARIABLES_PARAM_NAME__
|
|
449
|
-
]
|
|
450
|
-
# If required list is empty, remove it
|
|
451
|
-
if not f_no_context["function"]["parameters"]["required"]:
|
|
452
|
-
del f_no_context["function"]["parameters"]["required"]
|
|
453
|
-
|
|
454
|
-
self.update_tool_signature(f_no_context, is_remove=False)
|
|
455
|
-
self.register_function({func._name: func})
|
|
456
|
-
|
|
457
|
-
def add_functions(self, func_list: List[Callable]):
|
|
458
|
-
for func in func_list:
|
|
459
|
-
self.add_single_function(func)
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
# Forward references for SwarmAgent in SwarmResult
|
|
463
|
-
SwarmResult.update_forward_refs()
|