ag2 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- ag2-0.3.2.dist-info/LICENSE +201 -0
- ag2-0.3.2.dist-info/METADATA +490 -0
- ag2-0.3.2.dist-info/NOTICE.md +19 -0
- ag2-0.3.2.dist-info/RECORD +112 -0
- ag2-0.3.2.dist-info/WHEEL +5 -0
- ag2-0.3.2.dist-info/top_level.txt +1 -0
- autogen/__init__.py +17 -0
- autogen/_pydantic.py +116 -0
- autogen/agentchat/__init__.py +26 -0
- autogen/agentchat/agent.py +142 -0
- autogen/agentchat/assistant_agent.py +85 -0
- autogen/agentchat/chat.py +306 -0
- autogen/agentchat/contrib/__init__.py +0 -0
- autogen/agentchat/contrib/agent_builder.py +785 -0
- autogen/agentchat/contrib/agent_optimizer.py +450 -0
- autogen/agentchat/contrib/capabilities/__init__.py +0 -0
- autogen/agentchat/contrib/capabilities/agent_capability.py +21 -0
- autogen/agentchat/contrib/capabilities/generate_images.py +297 -0
- autogen/agentchat/contrib/capabilities/teachability.py +406 -0
- autogen/agentchat/contrib/capabilities/text_compressors.py +72 -0
- autogen/agentchat/contrib/capabilities/transform_messages.py +92 -0
- autogen/agentchat/contrib/capabilities/transforms.py +565 -0
- autogen/agentchat/contrib/capabilities/transforms_util.py +120 -0
- autogen/agentchat/contrib/capabilities/vision_capability.py +217 -0
- autogen/agentchat/contrib/gpt_assistant_agent.py +545 -0
- autogen/agentchat/contrib/graph_rag/__init__.py +0 -0
- autogen/agentchat/contrib/graph_rag/document.py +24 -0
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +76 -0
- autogen/agentchat/contrib/graph_rag/graph_query_engine.py +50 -0
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +56 -0
- autogen/agentchat/contrib/img_utils.py +390 -0
- autogen/agentchat/contrib/llamaindex_conversable_agent.py +114 -0
- autogen/agentchat/contrib/llava_agent.py +176 -0
- autogen/agentchat/contrib/math_user_proxy_agent.py +471 -0
- autogen/agentchat/contrib/multimodal_conversable_agent.py +128 -0
- autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +325 -0
- autogen/agentchat/contrib/retrieve_assistant_agent.py +56 -0
- autogen/agentchat/contrib/retrieve_user_proxy_agent.py +701 -0
- autogen/agentchat/contrib/society_of_mind_agent.py +203 -0
- autogen/agentchat/contrib/text_analyzer_agent.py +76 -0
- autogen/agentchat/contrib/vectordb/__init__.py +0 -0
- autogen/agentchat/contrib/vectordb/base.py +243 -0
- autogen/agentchat/contrib/vectordb/chromadb.py +326 -0
- autogen/agentchat/contrib/vectordb/mongodb.py +559 -0
- autogen/agentchat/contrib/vectordb/pgvectordb.py +958 -0
- autogen/agentchat/contrib/vectordb/qdrant.py +334 -0
- autogen/agentchat/contrib/vectordb/utils.py +126 -0
- autogen/agentchat/contrib/web_surfer.py +305 -0
- autogen/agentchat/conversable_agent.py +2904 -0
- autogen/agentchat/groupchat.py +1666 -0
- autogen/agentchat/user_proxy_agent.py +109 -0
- autogen/agentchat/utils.py +207 -0
- autogen/browser_utils.py +291 -0
- autogen/cache/__init__.py +10 -0
- autogen/cache/abstract_cache_base.py +78 -0
- autogen/cache/cache.py +182 -0
- autogen/cache/cache_factory.py +85 -0
- autogen/cache/cosmos_db_cache.py +150 -0
- autogen/cache/disk_cache.py +109 -0
- autogen/cache/in_memory_cache.py +61 -0
- autogen/cache/redis_cache.py +128 -0
- autogen/code_utils.py +745 -0
- autogen/coding/__init__.py +22 -0
- autogen/coding/base.py +113 -0
- autogen/coding/docker_commandline_code_executor.py +262 -0
- autogen/coding/factory.py +45 -0
- autogen/coding/func_with_reqs.py +203 -0
- autogen/coding/jupyter/__init__.py +22 -0
- autogen/coding/jupyter/base.py +32 -0
- autogen/coding/jupyter/docker_jupyter_server.py +164 -0
- autogen/coding/jupyter/embedded_ipython_code_executor.py +182 -0
- autogen/coding/jupyter/jupyter_client.py +224 -0
- autogen/coding/jupyter/jupyter_code_executor.py +161 -0
- autogen/coding/jupyter/local_jupyter_server.py +168 -0
- autogen/coding/local_commandline_code_executor.py +410 -0
- autogen/coding/markdown_code_extractor.py +44 -0
- autogen/coding/utils.py +57 -0
- autogen/exception_utils.py +46 -0
- autogen/extensions/__init__.py +0 -0
- autogen/formatting_utils.py +76 -0
- autogen/function_utils.py +362 -0
- autogen/graph_utils.py +148 -0
- autogen/io/__init__.py +15 -0
- autogen/io/base.py +105 -0
- autogen/io/console.py +43 -0
- autogen/io/websockets.py +213 -0
- autogen/logger/__init__.py +11 -0
- autogen/logger/base_logger.py +140 -0
- autogen/logger/file_logger.py +287 -0
- autogen/logger/logger_factory.py +29 -0
- autogen/logger/logger_utils.py +42 -0
- autogen/logger/sqlite_logger.py +459 -0
- autogen/math_utils.py +356 -0
- autogen/oai/__init__.py +33 -0
- autogen/oai/anthropic.py +428 -0
- autogen/oai/bedrock.py +600 -0
- autogen/oai/cerebras.py +264 -0
- autogen/oai/client.py +1148 -0
- autogen/oai/client_utils.py +167 -0
- autogen/oai/cohere.py +453 -0
- autogen/oai/completion.py +1216 -0
- autogen/oai/gemini.py +469 -0
- autogen/oai/groq.py +281 -0
- autogen/oai/mistral.py +279 -0
- autogen/oai/ollama.py +576 -0
- autogen/oai/openai_utils.py +810 -0
- autogen/oai/together.py +343 -0
- autogen/retrieve_utils.py +487 -0
- autogen/runtime_logging.py +163 -0
- autogen/token_count_utils.py +257 -0
- autogen/types.py +20 -0
- autogen/version.py +7 -0
autogen/_pydantic.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from typing import Any, Dict, Optional, Tuple, Type, Union, get_args
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
from pydantic.version import VERSION as PYDANTIC_VERSION
|
|
11
|
+
from typing_extensions import get_origin
|
|
12
|
+
|
|
13
|
+
__all__ = ("JsonSchemaValue", "model_dump", "model_dump_json", "type2schema", "evaluate_forwardref")
|
|
14
|
+
|
|
15
|
+
PYDANTIC_V1 = PYDANTIC_VERSION.startswith("1.")
|
|
16
|
+
|
|
17
|
+
if not PYDANTIC_V1:
|
|
18
|
+
from pydantic import TypeAdapter
|
|
19
|
+
from pydantic._internal._typing_extra import eval_type_lenient as evaluate_forwardref
|
|
20
|
+
from pydantic.json_schema import JsonSchemaValue
|
|
21
|
+
|
|
22
|
+
def type2schema(t: Any) -> JsonSchemaValue:
|
|
23
|
+
"""Convert a type to a JSON schema
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
t (Type): The type to convert
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
JsonSchemaValue: The JSON schema
|
|
30
|
+
"""
|
|
31
|
+
return TypeAdapter(t).json_schema()
|
|
32
|
+
|
|
33
|
+
def model_dump(model: BaseModel) -> Dict[str, Any]:
|
|
34
|
+
"""Convert a pydantic model to a dict
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
model (BaseModel): The model to convert
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Dict[str, Any]: The dict representation of the model
|
|
41
|
+
|
|
42
|
+
"""
|
|
43
|
+
return model.model_dump()
|
|
44
|
+
|
|
45
|
+
def model_dump_json(model: BaseModel) -> str:
|
|
46
|
+
"""Convert a pydantic model to a JSON string
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
model (BaseModel): The model to convert
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
str: The JSON string representation of the model
|
|
53
|
+
"""
|
|
54
|
+
return model.model_dump_json()
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
# Remove this once we drop support for pydantic 1.x
|
|
58
|
+
else: # pragma: no cover
|
|
59
|
+
from pydantic import schema_of
|
|
60
|
+
from pydantic.typing import evaluate_forwardref as evaluate_forwardref # type: ignore[no-redef]
|
|
61
|
+
|
|
62
|
+
JsonSchemaValue = Dict[str, Any] # type: ignore[misc]
|
|
63
|
+
|
|
64
|
+
def type2schema(t: Any) -> JsonSchemaValue:
|
|
65
|
+
"""Convert a type to a JSON schema
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
t (Type): The type to convert
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
JsonSchemaValue: The JSON schema
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
if t is None:
|
|
75
|
+
return {"type": "null"}
|
|
76
|
+
elif get_origin(t) is Union:
|
|
77
|
+
return {"anyOf": [type2schema(tt) for tt in get_args(t)]}
|
|
78
|
+
elif get_origin(t) in [Tuple, tuple]:
|
|
79
|
+
prefixItems = [type2schema(tt) for tt in get_args(t)]
|
|
80
|
+
return {
|
|
81
|
+
"maxItems": len(prefixItems),
|
|
82
|
+
"minItems": len(prefixItems),
|
|
83
|
+
"prefixItems": prefixItems,
|
|
84
|
+
"type": "array",
|
|
85
|
+
}
|
|
86
|
+
else:
|
|
87
|
+
d = schema_of(t)
|
|
88
|
+
if "title" in d:
|
|
89
|
+
d.pop("title")
|
|
90
|
+
if "description" in d:
|
|
91
|
+
d.pop("description")
|
|
92
|
+
|
|
93
|
+
return d
|
|
94
|
+
|
|
95
|
+
def model_dump(model: BaseModel) -> Dict[str, Any]:
|
|
96
|
+
"""Convert a pydantic model to a dict
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
model (BaseModel): The model to convert
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Dict[str, Any]: The dict representation of the model
|
|
103
|
+
|
|
104
|
+
"""
|
|
105
|
+
return model.dict()
|
|
106
|
+
|
|
107
|
+
def model_dump_json(model: BaseModel) -> str:
|
|
108
|
+
"""Convert a pydantic model to a JSON string
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
model (BaseModel): The model to convert
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
str: The JSON string representation of the model
|
|
115
|
+
"""
|
|
116
|
+
return model.json()
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from .agent import Agent
|
|
8
|
+
from .assistant_agent import AssistantAgent
|
|
9
|
+
from .chat import ChatResult, initiate_chats
|
|
10
|
+
from .conversable_agent import ConversableAgent, register_function
|
|
11
|
+
from .groupchat import GroupChat, GroupChatManager
|
|
12
|
+
from .user_proxy_agent import UserProxyAgent
|
|
13
|
+
from .utils import gather_usage_summary
|
|
14
|
+
|
|
15
|
+
__all__ = (
|
|
16
|
+
"Agent",
|
|
17
|
+
"ConversableAgent",
|
|
18
|
+
"AssistantAgent",
|
|
19
|
+
"UserProxyAgent",
|
|
20
|
+
"GroupChat",
|
|
21
|
+
"GroupChatManager",
|
|
22
|
+
"register_function",
|
|
23
|
+
"initiate_chats",
|
|
24
|
+
"gather_usage_summary",
|
|
25
|
+
"ChatResult",
|
|
26
|
+
)
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from typing import Any, Dict, List, Optional, Protocol, Union, runtime_checkable
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@runtime_checkable
|
|
11
|
+
class Agent(Protocol):
|
|
12
|
+
"""(In preview) A protocol for Agent.
|
|
13
|
+
|
|
14
|
+
An agent can communicate with other agents and perform actions.
|
|
15
|
+
Different agents can differ in what actions they perform in the `receive` method.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def name(self) -> str:
|
|
20
|
+
"""The name of the agent."""
|
|
21
|
+
...
|
|
22
|
+
|
|
23
|
+
@property
|
|
24
|
+
def description(self) -> str:
|
|
25
|
+
"""The description of the agent. Used for the agent's introduction in
|
|
26
|
+
a group chat setting."""
|
|
27
|
+
...
|
|
28
|
+
|
|
29
|
+
def send(
|
|
30
|
+
self,
|
|
31
|
+
message: Union[Dict[str, Any], str],
|
|
32
|
+
recipient: "Agent",
|
|
33
|
+
request_reply: Optional[bool] = None,
|
|
34
|
+
) -> None:
|
|
35
|
+
"""Send a message to another agent.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
message (dict or str): the message to send. If a dict, it should be
|
|
39
|
+
a JSON-serializable and follows the OpenAI's ChatCompletion schema.
|
|
40
|
+
recipient (Agent): the recipient of the message.
|
|
41
|
+
request_reply (bool): whether to request a reply from the recipient.
|
|
42
|
+
"""
|
|
43
|
+
...
|
|
44
|
+
|
|
45
|
+
async def a_send(
|
|
46
|
+
self,
|
|
47
|
+
message: Union[Dict[str, Any], str],
|
|
48
|
+
recipient: "Agent",
|
|
49
|
+
request_reply: Optional[bool] = None,
|
|
50
|
+
) -> None:
|
|
51
|
+
"""(Async) Send a message to another agent.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
message (dict or str): the message to send. If a dict, it should be
|
|
55
|
+
a JSON-serializable and follows the OpenAI's ChatCompletion schema.
|
|
56
|
+
recipient (Agent): the recipient of the message.
|
|
57
|
+
request_reply (bool): whether to request a reply from the recipient.
|
|
58
|
+
"""
|
|
59
|
+
...
|
|
60
|
+
|
|
61
|
+
def receive(
|
|
62
|
+
self,
|
|
63
|
+
message: Union[Dict[str, Any], str],
|
|
64
|
+
sender: "Agent",
|
|
65
|
+
request_reply: Optional[bool] = None,
|
|
66
|
+
) -> None:
|
|
67
|
+
"""Receive a message from another agent.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
message (dict or str): the message received. If a dict, it should be
|
|
71
|
+
a JSON-serializable and follows the OpenAI's ChatCompletion schema.
|
|
72
|
+
sender (Agent): the sender of the message.
|
|
73
|
+
request_reply (bool): whether the sender requests a reply.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
async def a_receive(
|
|
77
|
+
self,
|
|
78
|
+
message: Union[Dict[str, Any], str],
|
|
79
|
+
sender: "Agent",
|
|
80
|
+
request_reply: Optional[bool] = None,
|
|
81
|
+
) -> None:
|
|
82
|
+
"""(Async) Receive a message from another agent.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
message (dict or str): the message received. If a dict, it should be
|
|
86
|
+
a JSON-serializable and follows the OpenAI's ChatCompletion schema.
|
|
87
|
+
sender (Agent): the sender of the message.
|
|
88
|
+
request_reply (bool): whether the sender requests a reply.
|
|
89
|
+
"""
|
|
90
|
+
...
|
|
91
|
+
|
|
92
|
+
def generate_reply(
|
|
93
|
+
self,
|
|
94
|
+
messages: Optional[List[Dict[str, Any]]] = None,
|
|
95
|
+
sender: Optional["Agent"] = None,
|
|
96
|
+
**kwargs: Any,
|
|
97
|
+
) -> Union[str, Dict[str, Any], None]:
|
|
98
|
+
"""Generate a reply based on the received messages.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
messages (list[dict]): a list of messages received from other agents.
|
|
102
|
+
The messages are dictionaries that are JSON-serializable and
|
|
103
|
+
follows the OpenAI's ChatCompletion schema.
|
|
104
|
+
sender: sender of an Agent instance.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
str or dict or None: the generated reply. If None, no reply is generated.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
async def a_generate_reply(
|
|
111
|
+
self,
|
|
112
|
+
messages: Optional[List[Dict[str, Any]]] = None,
|
|
113
|
+
sender: Optional["Agent"] = None,
|
|
114
|
+
**kwargs: Any,
|
|
115
|
+
) -> Union[str, Dict[str, Any], None]:
|
|
116
|
+
"""(Async) Generate a reply based on the received messages.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
messages (list[dict]): a list of messages received from other agents.
|
|
120
|
+
The messages are dictionaries that are JSON-serializable and
|
|
121
|
+
follows the OpenAI's ChatCompletion schema.
|
|
122
|
+
sender: sender of an Agent instance.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
str or dict or None: the generated reply. If None, no reply is generated.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
@runtime_checkable
|
|
130
|
+
class LLMAgent(Agent, Protocol):
|
|
131
|
+
"""(In preview) A protocol for an LLM agent."""
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def system_message(self) -> str:
|
|
135
|
+
"""The system message of this agent."""
|
|
136
|
+
|
|
137
|
+
def update_system_message(self, system_message: str) -> None:
|
|
138
|
+
"""Update this agent's system message.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
system_message (str): system message for inference.
|
|
142
|
+
"""
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
from typing import Callable, Dict, Literal, Optional, Union
|
|
8
|
+
|
|
9
|
+
from autogen.runtime_logging import log_new_agent, logging_enabled
|
|
10
|
+
|
|
11
|
+
from .conversable_agent import ConversableAgent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AssistantAgent(ConversableAgent):
|
|
15
|
+
"""(In preview) Assistant agent, designed to solve a task with LLM.
|
|
16
|
+
|
|
17
|
+
AssistantAgent is a subclass of ConversableAgent configured with a default system message.
|
|
18
|
+
The default system message is designed to solve a task with LLM,
|
|
19
|
+
including suggesting python code blocks and debugging.
|
|
20
|
+
`human_input_mode` is default to "NEVER"
|
|
21
|
+
and `code_execution_config` is default to False.
|
|
22
|
+
This agent doesn't execute code by default, and expects the user to execute the code.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
DEFAULT_SYSTEM_MESSAGE = """You are a helpful AI assistant.
|
|
26
|
+
Solve tasks using your coding and language skills.
|
|
27
|
+
In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
|
|
28
|
+
1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
|
|
29
|
+
2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
|
|
30
|
+
Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
|
|
31
|
+
When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
|
|
32
|
+
If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
|
|
33
|
+
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
|
|
34
|
+
When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
|
|
35
|
+
Reply "TERMINATE" in the end when everything is done.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
DEFAULT_DESCRIPTION = "A helpful and general-purpose AI assistant that has strong language skills, Python skills, and Linux command line skills."
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
name: str,
|
|
43
|
+
system_message: Optional[str] = DEFAULT_SYSTEM_MESSAGE,
|
|
44
|
+
llm_config: Optional[Union[Dict, Literal[False]]] = None,
|
|
45
|
+
is_termination_msg: Optional[Callable[[Dict], bool]] = None,
|
|
46
|
+
max_consecutive_auto_reply: Optional[int] = None,
|
|
47
|
+
human_input_mode: Literal["ALWAYS", "NEVER", "TERMINATE"] = "NEVER",
|
|
48
|
+
description: Optional[str] = None,
|
|
49
|
+
**kwargs,
|
|
50
|
+
):
|
|
51
|
+
"""
|
|
52
|
+
Args:
|
|
53
|
+
name (str): agent name.
|
|
54
|
+
system_message (str): system message for the ChatCompletion inference.
|
|
55
|
+
Please override this attribute if you want to reprogram the agent.
|
|
56
|
+
llm_config (dict or False or None): llm inference configuration.
|
|
57
|
+
Please refer to [OpenAIWrapper.create](/docs/reference/oai/client#create)
|
|
58
|
+
for available options.
|
|
59
|
+
is_termination_msg (function): a function that takes a message in the form of a dictionary
|
|
60
|
+
and returns a boolean value indicating if this received message is a termination message.
|
|
61
|
+
The dict can contain the following keys: "content", "role", "name", "function_call".
|
|
62
|
+
max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
|
|
63
|
+
default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
|
|
64
|
+
The limit only plays a role when human_input_mode is not "ALWAYS".
|
|
65
|
+
**kwargs (dict): Please refer to other kwargs in
|
|
66
|
+
[ConversableAgent](conversable_agent#__init__).
|
|
67
|
+
"""
|
|
68
|
+
super().__init__(
|
|
69
|
+
name,
|
|
70
|
+
system_message,
|
|
71
|
+
is_termination_msg,
|
|
72
|
+
max_consecutive_auto_reply,
|
|
73
|
+
human_input_mode,
|
|
74
|
+
llm_config=llm_config,
|
|
75
|
+
description=description,
|
|
76
|
+
**kwargs,
|
|
77
|
+
)
|
|
78
|
+
if logging_enabled():
|
|
79
|
+
log_new_agent(self, locals())
|
|
80
|
+
|
|
81
|
+
# Update the provided description if None, and we are using the default system_message,
|
|
82
|
+
# then use the default description.
|
|
83
|
+
if description is None:
|
|
84
|
+
if system_message == self.DEFAULT_SYSTEM_MESSAGE:
|
|
85
|
+
self.description = self.DEFAULT_DESCRIPTION
|
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import asyncio
|
|
8
|
+
import datetime
|
|
9
|
+
import logging
|
|
10
|
+
import warnings
|
|
11
|
+
from collections import abc, defaultdict
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from functools import partial
|
|
14
|
+
from typing import Any, Dict, List, Set, Tuple
|
|
15
|
+
|
|
16
|
+
from ..formatting_utils import colored
|
|
17
|
+
from ..io.base import IOStream
|
|
18
|
+
from .utils import consolidate_chat_info
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
Prerequisite = Tuple[int, int]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class ChatResult:
|
|
26
|
+
"""(Experimental) The result of a chat. Almost certain to be changed."""
|
|
27
|
+
|
|
28
|
+
chat_id: int = None
|
|
29
|
+
"""chat id"""
|
|
30
|
+
chat_history: List[Dict[str, Any]] = None
|
|
31
|
+
"""The chat history."""
|
|
32
|
+
summary: str = None
|
|
33
|
+
"""A summary obtained from the chat."""
|
|
34
|
+
cost: Dict[str, dict] = None # keys: "usage_including_cached_inference", "usage_excluding_cached_inference"
|
|
35
|
+
"""The cost of the chat.
|
|
36
|
+
The value for each usage type is a dictionary containing cost information for that specific type.
|
|
37
|
+
- "usage_including_cached_inference": Cost information on the total usage, including the tokens in cached inference.
|
|
38
|
+
- "usage_excluding_cached_inference": Cost information on the usage of tokens, excluding the tokens in cache. No larger than "usage_including_cached_inference".
|
|
39
|
+
"""
|
|
40
|
+
human_input: List[str] = None
|
|
41
|
+
"""A list of human input solicited during the chat."""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _validate_recipients(chat_queue: List[Dict[str, Any]]) -> None:
|
|
45
|
+
"""
|
|
46
|
+
Validate recipients exits and warn repetitive recipients.
|
|
47
|
+
"""
|
|
48
|
+
receipts_set = set()
|
|
49
|
+
for chat_info in chat_queue:
|
|
50
|
+
assert "recipient" in chat_info, "recipient must be provided."
|
|
51
|
+
receipts_set.add(chat_info["recipient"])
|
|
52
|
+
if len(receipts_set) < len(chat_queue):
|
|
53
|
+
warnings.warn(
|
|
54
|
+
"Repetitive recipients detected: The chat history will be cleared by default if a recipient appears more than once. To retain the chat history, please set 'clear_history=False' in the configuration of the repeating agent.",
|
|
55
|
+
UserWarning,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def __create_async_prerequisites(chat_queue: List[Dict[str, Any]]) -> List[Prerequisite]:
|
|
60
|
+
"""
|
|
61
|
+
Create list of Prerequisite (prerequisite_chat_id, chat_id)
|
|
62
|
+
"""
|
|
63
|
+
prerequisites = []
|
|
64
|
+
for chat_info in chat_queue:
|
|
65
|
+
if "chat_id" not in chat_info:
|
|
66
|
+
raise ValueError("Each chat must have a unique id for async multi-chat execution.")
|
|
67
|
+
chat_id = chat_info["chat_id"]
|
|
68
|
+
pre_chats = chat_info.get("prerequisites", [])
|
|
69
|
+
for pre_chat_id in pre_chats:
|
|
70
|
+
if not isinstance(pre_chat_id, int):
|
|
71
|
+
raise ValueError("Prerequisite chat id is not int.")
|
|
72
|
+
prerequisites.append((chat_id, pre_chat_id))
|
|
73
|
+
return prerequisites
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def __find_async_chat_order(chat_ids: Set[int], prerequisites: List[Prerequisite]) -> List[int]:
|
|
77
|
+
"""Find chat order for async execution based on the prerequisite chats
|
|
78
|
+
|
|
79
|
+
args:
|
|
80
|
+
num_chats: number of chats
|
|
81
|
+
prerequisites: List of Prerequisite (prerequisite_chat_id, chat_id)
|
|
82
|
+
|
|
83
|
+
returns:
|
|
84
|
+
list: a list of chat_id in order.
|
|
85
|
+
"""
|
|
86
|
+
edges = defaultdict(set)
|
|
87
|
+
indegree = defaultdict(int)
|
|
88
|
+
for pair in prerequisites:
|
|
89
|
+
chat, pre = pair[0], pair[1]
|
|
90
|
+
if chat not in edges[pre]:
|
|
91
|
+
indegree[chat] += 1
|
|
92
|
+
edges[pre].add(chat)
|
|
93
|
+
bfs = [i for i in chat_ids if i not in indegree]
|
|
94
|
+
chat_order = []
|
|
95
|
+
steps = len(indegree)
|
|
96
|
+
for _ in range(steps + 1):
|
|
97
|
+
if not bfs:
|
|
98
|
+
break
|
|
99
|
+
chat_order.extend(bfs)
|
|
100
|
+
nxt = []
|
|
101
|
+
for node in bfs:
|
|
102
|
+
if node in edges:
|
|
103
|
+
for course in edges[node]:
|
|
104
|
+
indegree[course] -= 1
|
|
105
|
+
if indegree[course] == 0:
|
|
106
|
+
nxt.append(course)
|
|
107
|
+
indegree.pop(course)
|
|
108
|
+
edges.pop(node)
|
|
109
|
+
bfs = nxt
|
|
110
|
+
|
|
111
|
+
if indegree:
|
|
112
|
+
return []
|
|
113
|
+
return chat_order
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _post_process_carryover_item(carryover_item):
|
|
117
|
+
if isinstance(carryover_item, str):
|
|
118
|
+
return carryover_item
|
|
119
|
+
elif isinstance(carryover_item, dict) and "content" in carryover_item:
|
|
120
|
+
return str(carryover_item["content"])
|
|
121
|
+
else:
|
|
122
|
+
return str(carryover_item)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def __post_carryover_processing(chat_info: Dict[str, Any]) -> None:
|
|
126
|
+
iostream = IOStream.get_default()
|
|
127
|
+
|
|
128
|
+
if "message" not in chat_info:
|
|
129
|
+
warnings.warn(
|
|
130
|
+
"message is not provided in a chat_queue entry. input() will be called to get the initial message.",
|
|
131
|
+
UserWarning,
|
|
132
|
+
)
|
|
133
|
+
print_carryover = (
|
|
134
|
+
("\n").join([_post_process_carryover_item(t) for t in chat_info["carryover"]])
|
|
135
|
+
if isinstance(chat_info["carryover"], list)
|
|
136
|
+
else chat_info["carryover"]
|
|
137
|
+
)
|
|
138
|
+
message = chat_info.get("message")
|
|
139
|
+
if isinstance(message, str):
|
|
140
|
+
print_message = message
|
|
141
|
+
elif callable(message):
|
|
142
|
+
print_message = "Callable: " + message.__name__
|
|
143
|
+
elif isinstance(message, dict):
|
|
144
|
+
print_message = "Dict: " + str(message)
|
|
145
|
+
elif message is None:
|
|
146
|
+
print_message = "None"
|
|
147
|
+
iostream.print(colored("\n" + "*" * 80, "blue"), flush=True, sep="")
|
|
148
|
+
iostream.print(
|
|
149
|
+
colored(
|
|
150
|
+
"Starting a new chat....",
|
|
151
|
+
"blue",
|
|
152
|
+
),
|
|
153
|
+
flush=True,
|
|
154
|
+
)
|
|
155
|
+
if chat_info.get("verbose", False):
|
|
156
|
+
iostream.print(colored("Message:\n" + print_message, "blue"), flush=True)
|
|
157
|
+
iostream.print(colored("Carryover:\n" + print_carryover, "blue"), flush=True)
|
|
158
|
+
iostream.print(colored("\n" + "*" * 80, "blue"), flush=True, sep="")
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def initiate_chats(chat_queue: List[Dict[str, Any]]) -> List[ChatResult]:
|
|
162
|
+
"""Initiate a list of chats.
|
|
163
|
+
Args:
|
|
164
|
+
chat_queue (List[Dict]): A list of dictionaries containing the information about the chats.
|
|
165
|
+
|
|
166
|
+
Each dictionary should contain the input arguments for
|
|
167
|
+
[`ConversableAgent.initiate_chat`](/docs/reference/agentchat/conversable_agent#initiate_chat).
|
|
168
|
+
For example:
|
|
169
|
+
- `"sender"` - the sender agent.
|
|
170
|
+
- `"recipient"` - the recipient agent.
|
|
171
|
+
- `"clear_history"` (bool) - whether to clear the chat history with the agent.
|
|
172
|
+
Default is True.
|
|
173
|
+
- `"silent"` (bool or None) - (Experimental) whether to print the messages in this
|
|
174
|
+
conversation. Default is False.
|
|
175
|
+
- `"cache"` (Cache or None) - the cache client to use for this conversation.
|
|
176
|
+
Default is None.
|
|
177
|
+
- `"max_turns"` (int or None) - maximum number of turns for the chat. If None, the chat
|
|
178
|
+
will continue until a termination condition is met. Default is None.
|
|
179
|
+
- `"summary_method"` (str or callable) - a string or callable specifying the method to get
|
|
180
|
+
a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".
|
|
181
|
+
- `"summary_args"` (dict) - a dictionary of arguments to be passed to the summary_method.
|
|
182
|
+
Default is {}.
|
|
183
|
+
- `"message"` (str, callable or None) - if None, input() will be called to get the
|
|
184
|
+
initial message.
|
|
185
|
+
- `**context` - additional context information to be passed to the chat.
|
|
186
|
+
- `"carryover"` - It can be used to specify the carryover information to be passed
|
|
187
|
+
to this chat. If provided, we will combine this carryover with the "message" content when
|
|
188
|
+
generating the initial chat message in `generate_init_message`.
|
|
189
|
+
- `"finished_chat_indexes_to_exclude_from_carryover"` - It can be used by specifying a list of indexes of the finished_chats list,
|
|
190
|
+
from which to exclude the summaries for carryover. If 'finished_chat_indexes_to_exclude_from_carryover' is not provided or an empty list,
|
|
191
|
+
then summary from all the finished chats will be taken.
|
|
192
|
+
Returns:
|
|
193
|
+
(list): a list of ChatResult objects corresponding to the finished chats in the chat_queue.
|
|
194
|
+
"""
|
|
195
|
+
|
|
196
|
+
consolidate_chat_info(chat_queue)
|
|
197
|
+
_validate_recipients(chat_queue)
|
|
198
|
+
current_chat_queue = chat_queue.copy()
|
|
199
|
+
finished_chats = []
|
|
200
|
+
while current_chat_queue:
|
|
201
|
+
chat_info = current_chat_queue.pop(0)
|
|
202
|
+
_chat_carryover = chat_info.get("carryover", [])
|
|
203
|
+
finished_chat_indexes_to_exclude_from_carryover = chat_info.get(
|
|
204
|
+
"finished_chat_indexes_to_exclude_from_carryover", []
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
if isinstance(_chat_carryover, str):
|
|
208
|
+
_chat_carryover = [_chat_carryover]
|
|
209
|
+
chat_info["carryover"] = _chat_carryover + [
|
|
210
|
+
r.summary for i, r in enumerate(finished_chats) if i not in finished_chat_indexes_to_exclude_from_carryover
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
if not chat_info.get("silent", False):
|
|
214
|
+
__post_carryover_processing(chat_info)
|
|
215
|
+
|
|
216
|
+
sender = chat_info["sender"]
|
|
217
|
+
chat_res = sender.initiate_chat(**chat_info)
|
|
218
|
+
finished_chats.append(chat_res)
|
|
219
|
+
return finished_chats
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def __system_now_str():
|
|
223
|
+
ct = datetime.datetime.now()
|
|
224
|
+
return f" System time at {ct}. "
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def _on_chat_future_done(chat_future: asyncio.Future, chat_id: int):
|
|
228
|
+
"""
|
|
229
|
+
Update ChatResult when async Task for Chat is completed.
|
|
230
|
+
"""
|
|
231
|
+
logger.debug(f"Update chat {chat_id} result on task completion." + __system_now_str())
|
|
232
|
+
chat_result = chat_future.result()
|
|
233
|
+
chat_result.chat_id = chat_id
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
async def _dependent_chat_future(
|
|
237
|
+
chat_id: int, chat_info: Dict[str, Any], prerequisite_chat_futures: Dict[int, asyncio.Future]
|
|
238
|
+
) -> asyncio.Task:
|
|
239
|
+
"""
|
|
240
|
+
Create an async Task for each chat.
|
|
241
|
+
"""
|
|
242
|
+
logger.debug(f"Create Task for chat {chat_id}." + __system_now_str())
|
|
243
|
+
_chat_carryover = chat_info.get("carryover", [])
|
|
244
|
+
finished_chat_indexes_to_exclude_from_carryover = chat_info.get(
|
|
245
|
+
"finished_chat_indexes_to_exclude_from_carryover", []
|
|
246
|
+
)
|
|
247
|
+
finished_chats = dict()
|
|
248
|
+
for chat in prerequisite_chat_futures:
|
|
249
|
+
chat_future = prerequisite_chat_futures[chat]
|
|
250
|
+
if chat_future.cancelled():
|
|
251
|
+
raise RuntimeError(f"Chat {chat} is cancelled.")
|
|
252
|
+
|
|
253
|
+
# wait for prerequisite chat results for the new chat carryover
|
|
254
|
+
finished_chats[chat] = await chat_future
|
|
255
|
+
|
|
256
|
+
if isinstance(_chat_carryover, str):
|
|
257
|
+
_chat_carryover = [_chat_carryover]
|
|
258
|
+
data = [
|
|
259
|
+
chat_result.summary
|
|
260
|
+
for chat_id, chat_result in finished_chats.items()
|
|
261
|
+
if chat_id not in finished_chat_indexes_to_exclude_from_carryover
|
|
262
|
+
]
|
|
263
|
+
chat_info["carryover"] = _chat_carryover + data
|
|
264
|
+
if not chat_info.get("silent", False):
|
|
265
|
+
__post_carryover_processing(chat_info)
|
|
266
|
+
|
|
267
|
+
sender = chat_info["sender"]
|
|
268
|
+
chat_res_future = asyncio.create_task(sender.a_initiate_chat(**chat_info))
|
|
269
|
+
call_back_with_args = partial(_on_chat_future_done, chat_id=chat_id)
|
|
270
|
+
chat_res_future.add_done_callback(call_back_with_args)
|
|
271
|
+
logger.debug(f"Task for chat {chat_id} created." + __system_now_str())
|
|
272
|
+
return chat_res_future
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
async def a_initiate_chats(chat_queue: List[Dict[str, Any]]) -> Dict[int, ChatResult]:
|
|
276
|
+
"""(async) Initiate a list of chats.
|
|
277
|
+
|
|
278
|
+
args:
|
|
279
|
+
- Please refer to `initiate_chats`.
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
returns:
|
|
283
|
+
- (Dict): a dict of ChatId: ChatResult corresponding to the finished chats in the chat_queue.
|
|
284
|
+
"""
|
|
285
|
+
consolidate_chat_info(chat_queue)
|
|
286
|
+
_validate_recipients(chat_queue)
|
|
287
|
+
chat_book = {chat_info["chat_id"]: chat_info for chat_info in chat_queue}
|
|
288
|
+
num_chats = chat_book.keys()
|
|
289
|
+
prerequisites = __create_async_prerequisites(chat_queue)
|
|
290
|
+
chat_order_by_id = __find_async_chat_order(num_chats, prerequisites)
|
|
291
|
+
finished_chat_futures = dict()
|
|
292
|
+
for chat_id in chat_order_by_id:
|
|
293
|
+
chat_info = chat_book[chat_id]
|
|
294
|
+
prerequisite_chat_ids = chat_info.get("prerequisites", [])
|
|
295
|
+
pre_chat_futures = dict()
|
|
296
|
+
for pre_chat_id in prerequisite_chat_ids:
|
|
297
|
+
pre_chat_future = finished_chat_futures[pre_chat_id]
|
|
298
|
+
pre_chat_futures[pre_chat_id] = pre_chat_future
|
|
299
|
+
current_chat_future = await _dependent_chat_future(chat_id, chat_info, pre_chat_futures)
|
|
300
|
+
finished_chat_futures[chat_id] = current_chat_future
|
|
301
|
+
await asyncio.gather(*list(finished_chat_futures.values()))
|
|
302
|
+
finished_chats = dict()
|
|
303
|
+
for chat in finished_chat_futures:
|
|
304
|
+
chat_result = finished_chat_futures[chat].result()
|
|
305
|
+
finished_chats[chat] = chat_result
|
|
306
|
+
return finished_chats
|
|
File without changes
|