ag2 0.9.9__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (113) hide show
  1. {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/METADATA +243 -214
  2. {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/RECORD +113 -87
  3. autogen/_website/generate_mkdocs.py +3 -3
  4. autogen/_website/notebook_processor.py +1 -1
  5. autogen/_website/utils.py +1 -1
  6. autogen/a2a/__init__.py +36 -0
  7. autogen/a2a/agent_executor.py +105 -0
  8. autogen/a2a/client.py +280 -0
  9. autogen/a2a/errors.py +18 -0
  10. autogen/a2a/httpx_client_factory.py +79 -0
  11. autogen/a2a/server.py +221 -0
  12. autogen/a2a/utils.py +165 -0
  13. autogen/agentchat/__init__.py +3 -0
  14. autogen/agentchat/agent.py +0 -2
  15. autogen/agentchat/assistant_agent.py +15 -15
  16. autogen/agentchat/chat.py +57 -41
  17. autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
  18. autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
  19. autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
  20. autogen/agentchat/contrib/capabilities/transforms.py +1 -1
  21. autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
  22. autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
  23. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
  24. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
  25. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
  26. autogen/agentchat/contrib/llava_agent.py +1 -13
  27. autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
  28. autogen/agentchat/contrib/rag/query_engine.py +11 -11
  29. autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
  30. autogen/agentchat/contrib/swarm_agent.py +3 -2
  31. autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
  32. autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
  33. autogen/agentchat/contrib/web_surfer.py +1 -1
  34. autogen/agentchat/conversable_agent.py +359 -150
  35. autogen/agentchat/group/context_expression.py +21 -21
  36. autogen/agentchat/group/group_tool_executor.py +46 -15
  37. autogen/agentchat/group/guardrails.py +41 -33
  38. autogen/agentchat/group/handoffs.py +11 -11
  39. autogen/agentchat/group/multi_agent_chat.py +56 -2
  40. autogen/agentchat/group/on_condition.py +11 -11
  41. autogen/agentchat/group/safeguards/__init__.py +21 -0
  42. autogen/agentchat/group/safeguards/api.py +241 -0
  43. autogen/agentchat/group/safeguards/enforcer.py +1158 -0
  44. autogen/agentchat/group/safeguards/events.py +119 -0
  45. autogen/agentchat/group/safeguards/validator.py +435 -0
  46. autogen/agentchat/groupchat.py +102 -49
  47. autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
  48. autogen/agentchat/realtime/experimental/function_observer.py +2 -3
  49. autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
  50. autogen/agentchat/realtime/experimental/realtime_swarm.py +22 -13
  51. autogen/agentchat/user_proxy_agent.py +55 -53
  52. autogen/agents/experimental/document_agent/document_agent.py +1 -10
  53. autogen/agents/experimental/document_agent/parser_utils.py +5 -1
  54. autogen/browser_utils.py +4 -4
  55. autogen/cache/abstract_cache_base.py +2 -6
  56. autogen/cache/disk_cache.py +1 -6
  57. autogen/cache/in_memory_cache.py +2 -6
  58. autogen/cache/redis_cache.py +1 -5
  59. autogen/coding/__init__.py +10 -2
  60. autogen/coding/base.py +2 -1
  61. autogen/coding/docker_commandline_code_executor.py +1 -6
  62. autogen/coding/factory.py +9 -0
  63. autogen/coding/jupyter/docker_jupyter_server.py +1 -7
  64. autogen/coding/jupyter/jupyter_client.py +2 -9
  65. autogen/coding/jupyter/jupyter_code_executor.py +2 -7
  66. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  67. autogen/coding/local_commandline_code_executor.py +0 -65
  68. autogen/coding/yepcode_code_executor.py +197 -0
  69. autogen/environments/docker_python_environment.py +3 -3
  70. autogen/environments/system_python_environment.py +5 -5
  71. autogen/environments/venv_python_environment.py +5 -5
  72. autogen/events/agent_events.py +1 -1
  73. autogen/events/client_events.py +1 -1
  74. autogen/fast_depends/utils.py +10 -0
  75. autogen/graph_utils.py +5 -7
  76. autogen/import_utils.py +3 -1
  77. autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
  78. autogen/io/processors/console_event_processor.py +8 -3
  79. autogen/llm_config/client.py +3 -2
  80. autogen/llm_config/config.py +168 -91
  81. autogen/llm_config/entry.py +38 -26
  82. autogen/llm_config/types.py +35 -0
  83. autogen/llm_config/utils.py +223 -0
  84. autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
  85. autogen/messages/agent_messages.py +1 -1
  86. autogen/messages/client_messages.py +1 -1
  87. autogen/oai/__init__.py +8 -1
  88. autogen/oai/bedrock.py +0 -13
  89. autogen/oai/client.py +25 -11
  90. autogen/oai/client_utils.py +31 -1
  91. autogen/oai/cohere.py +4 -14
  92. autogen/oai/gemini.py +4 -6
  93. autogen/oai/gemini_types.py +1 -0
  94. autogen/oai/openai_utils.py +44 -115
  95. autogen/remote/__init__.py +18 -0
  96. autogen/remote/agent.py +199 -0
  97. autogen/remote/agent_service.py +142 -0
  98. autogen/remote/errors.py +17 -0
  99. autogen/remote/httpx_client_factory.py +131 -0
  100. autogen/remote/protocol.py +37 -0
  101. autogen/remote/retry.py +102 -0
  102. autogen/remote/runtime.py +96 -0
  103. autogen/testing/__init__.py +12 -0
  104. autogen/testing/messages.py +45 -0
  105. autogen/testing/test_agent.py +111 -0
  106. autogen/tools/dependency_injection.py +4 -8
  107. autogen/tools/experimental/reliable/reliable.py +3 -2
  108. autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
  109. autogen/tools/function_utils.py +2 -1
  110. autogen/version.py +1 -1
  111. {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
  112. {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
  113. {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
autogen/a2a/utils.py ADDED
@@ -0,0 +1,165 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from typing import Any, cast
6
+ from uuid import uuid4
7
+
8
+ from a2a.types import Artifact, DataPart, Message, Part, Role, TextPart
9
+ from a2a.utils import new_agent_parts_message, new_artifact
10
+
11
+ from autogen.remote.protocol import RequestMessage, ResponseMessage
12
+
13
+ CLIENT_TOOLS_KEY = "ag2_client_tools"
14
+ CONTEXT_KEY = "ag2_context_update"
15
+
16
+
17
+ def request_message_to_a2a(
18
+ request_message: RequestMessage,
19
+ context_id: str,
20
+ ) -> Message:
21
+ metadata: dict[str, Any] = {}
22
+ if request_message.client_tools:
23
+ metadata[CLIENT_TOOLS_KEY] = request_message.client_tools
24
+ if request_message.context:
25
+ metadata[CONTEXT_KEY] = request_message.context
26
+
27
+ return Message(
28
+ role=Role.user,
29
+ parts=[message_to_part(message) for message in request_message.messages],
30
+ message_id=uuid4().hex,
31
+ context_id=context_id,
32
+ metadata=metadata,
33
+ )
34
+
35
+
36
+ def request_message_from_a2a(message: Message) -> RequestMessage:
37
+ metadata = message.metadata or {}
38
+ return RequestMessage(
39
+ messages=[message_from_part(part) for part in message.parts],
40
+ context=metadata.get(CONTEXT_KEY),
41
+ client_tools=metadata.get(CLIENT_TOOLS_KEY, []),
42
+ )
43
+
44
+
45
+ def response_message_from_a2a_artifacts(artifacts: list[Artifact] | None) -> ResponseMessage | None:
46
+ if not artifacts:
47
+ return None
48
+
49
+ if len(artifacts) > 1:
50
+ raise NotImplementedError("Multiple artifacts are not supported")
51
+
52
+ artifact = artifacts[-1]
53
+
54
+ if not artifact.parts:
55
+ return None
56
+
57
+ if len(artifact.parts) > 1:
58
+ raise NotImplementedError("Multiple parts are not supported")
59
+
60
+ return ResponseMessage(
61
+ messages=[message_from_part(artifact.parts[-1])],
62
+ context=(artifact.metadata or {}).get(CONTEXT_KEY),
63
+ )
64
+
65
+
66
+ def response_message_from_a2a_message(message: Message) -> ResponseMessage | None:
67
+ text_parts: list[Part] = []
68
+ data_parts: list[Part] = []
69
+ for part in message.parts:
70
+ if isinstance(part.root, TextPart):
71
+ text_parts.append(part)
72
+ elif isinstance(part.root, DataPart):
73
+ data_parts.append(part)
74
+ else:
75
+ raise NotImplementedError(f"Unsupported part type: {type(part.root)}")
76
+
77
+ tpn = len(text_parts)
78
+ if dpn := len(data_parts):
79
+ if dpn > 1:
80
+ raise NotImplementedError("Multiple data parts are not supported")
81
+
82
+ if tpn:
83
+ raise NotImplementedError("Data parts and text parts are not supported together")
84
+
85
+ messages = [message_from_part(data_parts[0])]
86
+ elif tpn == 1:
87
+ messages = [message_from_part(text_parts[0])]
88
+ else:
89
+ messages = [{"content": "\n".join(cast(TextPart, t.root).text for t in text_parts)}]
90
+
91
+ return ResponseMessage(
92
+ messages=messages,
93
+ context=(message.metadata or {}).get(CONTEXT_KEY),
94
+ )
95
+
96
+
97
+ def response_message_to_a2a(
98
+ result: ResponseMessage | None,
99
+ context_id: str | None,
100
+ task_id: str | None,
101
+ ) -> tuple[Artifact, list[Message]]:
102
+ # mypy ignores could be removed after
103
+ # https://github.com/a2aproject/a2a-python/pull/503
104
+
105
+ if not result:
106
+ return new_artifact(
107
+ name="result",
108
+ parts=[],
109
+ description=None, # type: ignore[arg-type]
110
+ ), []
111
+
112
+ artifact = new_artifact(
113
+ name="result",
114
+ parts=[message_to_part(result.messages[-1])],
115
+ description=None, # type: ignore[arg-type]
116
+ )
117
+
118
+ if result.context:
119
+ artifact.metadata = {CONTEXT_KEY: result.context}
120
+
121
+ return (
122
+ artifact,
123
+ [
124
+ new_agent_parts_message(
125
+ parts=[message_to_part(m) for m in result.messages],
126
+ context_id=context_id,
127
+ task_id=task_id,
128
+ ),
129
+ ],
130
+ )
131
+
132
+
133
+ def message_to_part(message: dict[str, Any]) -> Part:
134
+ message = message.copy()
135
+ text = message.pop("content", "") or ""
136
+ return Part(
137
+ root=TextPart(
138
+ text=text,
139
+ metadata=message or None,
140
+ )
141
+ )
142
+
143
+
144
+ def message_from_part(part: Part) -> dict[str, Any]:
145
+ root = part.root
146
+
147
+ if isinstance(root, TextPart):
148
+ return {
149
+ **(root.metadata or {}),
150
+ "content": root.text,
151
+ }
152
+
153
+ elif isinstance(root, DataPart):
154
+ if ( # pydantic-ai specific
155
+ set(root.data.keys()) == {"result"}
156
+ and root.metadata
157
+ and "json_schema" in root.metadata
158
+ and isinstance(data := root.data["result"], dict)
159
+ ):
160
+ return data
161
+
162
+ return root.data
163
+
164
+ else:
165
+ raise NotImplementedError(f"Unsupported part type: {type(part.root)}")
@@ -15,6 +15,7 @@ from .contrib.swarm_agent import (
15
15
  run_swarm,
16
16
  )
17
17
  from .conversable_agent import ConversableAgent, UpdateSystemMessage, register_function
18
+ from .group import ContextVariables, ReplyResult
18
19
  from .group.multi_agent_chat import a_initiate_group_chat, a_run_group_chat, initiate_group_chat, run_group_chat
19
20
  from .groupchat import GroupChat, GroupChatManager
20
21
  from .user_proxy_agent import UserProxyAgent
@@ -24,10 +25,12 @@ __all__ = [
24
25
  "Agent",
25
26
  "AssistantAgent",
26
27
  "ChatResult",
28
+ "ContextVariables",
27
29
  "ConversableAgent",
28
30
  "GroupChat",
29
31
  "GroupChatManager",
30
32
  "LLMAgent",
33
+ "ReplyResult",
31
34
  "UpdateSystemMessage",
32
35
  "UserProxyAgent",
33
36
  "a_initiate_chats",
@@ -105,7 +105,6 @@ class Agent(Protocol):
105
105
  self,
106
106
  messages: list[dict[str, Any]] | None = None,
107
107
  sender: Optional["Agent"] = None,
108
- **kwargs: Any,
109
108
  ) -> str | dict[str, Any] | None:
110
109
  """Generate a reply based on the received messages.
111
110
 
@@ -124,7 +123,6 @@ class Agent(Protocol):
124
123
  self,
125
124
  messages: list[dict[str, Any]] | None = None,
126
125
  sender: Optional["Agent"] = None,
127
- **kwargs: Any,
128
126
  ) -> str | dict[str, Any] | None:
129
127
  """(Async) Generate a reply based on the received messages.
130
128
 
@@ -19,10 +19,10 @@ class AssistantAgent(ConversableAgent):
19
19
 
20
20
  AssistantAgent is a subclass of ConversableAgent configured with a default system message.
21
21
  The default system message is designed to solve a task with LLM,
22
- including suggesting python code blocks and debugging.
23
- `human_input_mode` is default to "NEVER"
24
- and `code_execution_config` is default to False.
25
- This agent doesn't execute code by default, and expects the user to execute the code.
22
+ including suggesting python code blocks and debugging. \n
23
+ `human_input_mode` is default to "NEVER" \n
24
+ and `code_execution_config` is default to False. \n
25
+ This agent doesn't execute code by default, and expects the user to execute the code. \n
26
26
  """
27
27
 
28
28
  DEFAULT_SYSTEM_MESSAGE = """You are a helpful AI assistant.
@@ -52,20 +52,20 @@ Reply "TERMINATE" in the end when everything is done.
52
52
  **kwargs: Any,
53
53
  ):
54
54
  """Args:
55
- name (str): agent name.
56
- system_message (str): system message for the ChatCompletion inference.
55
+ - name (str): agent name. \n
56
+ - system_message (str): system message for the ChatCompletion inference. \n
57
57
  Please override this attribute if you want to reprogram the agent.
58
- llm_config (dict or False or None): llm inference configuration.
59
- Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create)
60
- for available options.
61
- is_termination_msg (function): a function that takes a message in the form of a dictionary
58
+ - llm_config (dict or False or None): llm inference configuration. \n
59
+ Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create) \n
60
+ for available options. \n
61
+ - is_termination_msg (function): a function that takes a message in the form of a dictionary
62
62
  and returns a boolean value indicating if this received message is a termination message.
63
- The dict can contain the following keys: "content", "role", "name", "function_call".
64
- max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
63
+ The dict can contain the following keys: "content", "role", "name", "function_call". \n
64
+ - max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
65
65
  default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
66
- The limit only plays a role when human_input_mode is not "ALWAYS".
67
- **kwargs (dict): Please refer to other kwargs in
68
- [ConversableAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/ConversableAgent).
66
+ The limit only plays a role when human_input_mode is not "ALWAYS". \n
67
+ - **kwargs (dict): Please refer to other kwargs in
68
+ [ConversableAgent](https://docs.ag2.ai/latest/docs/api-reference/autogen/ConversableAgent). \n
69
69
  """
70
70
  super().__init__(
71
71
  name,
autogen/agentchat/chat.py CHANGED
@@ -7,12 +7,14 @@
7
7
  import asyncio
8
8
  import datetime
9
9
  import logging
10
+ import uuid
10
11
  import warnings
11
12
  from collections import defaultdict
12
- from dataclasses import dataclass
13
+ from dataclasses import dataclass, field
13
14
  from functools import partial
14
- from typing import Any
15
+ from typing import Any, TypedDict
15
16
 
17
+ from ..code_utils import content_str
16
18
  from ..doc_utils import export_module
17
19
  from ..events.agent_events import PostCarryoverProcessingEvent
18
20
  from ..io.base import IOStream
@@ -24,26 +26,38 @@ Prerequisite = tuple[int, int]
24
26
  __all__ = ["ChatResult", "a_initiate_chats", "initiate_chats"]
25
27
 
26
28
 
29
+ class CostDict(TypedDict):
30
+ usage_including_cached_inference: dict[str, Any]
31
+ usage_excluding_cached_inference: dict[str, Any]
32
+
33
+
27
34
  @dataclass
28
35
  @export_module("autogen")
29
36
  class ChatResult:
30
37
  """(Experimental) The result of a chat. Almost certain to be changed."""
31
38
 
32
- chat_id: int = None
39
+ chat_id: int = field(default_factory=lambda: uuid.uuid4().int)
33
40
  """chat id"""
34
- chat_history: list[dict[str, Any]] = None
41
+
42
+ chat_history: list[dict[str, Any]] = field(default_factory=list)
35
43
  """The chat history."""
36
- summary: str = None
44
+
45
+ summary: str = ""
37
46
  """A summary obtained from the chat."""
38
- cost: dict[str, dict[str, Any]] = (
39
- None # keys: "usage_including_cached_inference", "usage_excluding_cached_inference"
47
+
48
+ cost: CostDict = field(
49
+ default_factory=lambda: {
50
+ "usage_including_cached_inference": {},
51
+ "usage_excluding_cached_inference": {},
52
+ }
40
53
  )
41
54
  """The cost of the chat.
42
55
  The value for each usage type is a dictionary containing cost information for that specific type.
43
56
  - "usage_including_cached_inference": Cost information on the total usage, including the tokens in cached inference.
44
57
  - "usage_excluding_cached_inference": Cost information on the usage of tokens, excluding the tokens in cache. No larger than "usage_including_cached_inference".
45
58
  """
46
- human_input: list[str] = None
59
+
60
+ human_input: list[str] = field(default_factory=list)
47
61
  """A list of human input solicited during the chat."""
48
62
 
49
63
 
@@ -119,7 +133,10 @@ def _post_process_carryover_item(carryover_item):
119
133
  if isinstance(carryover_item, str):
120
134
  return carryover_item
121
135
  elif isinstance(carryover_item, dict) and "content" in carryover_item:
122
- return str(carryover_item["content"])
136
+ content_value = carryover_item.get("content")
137
+ if isinstance(content_value, (str, list)) or content_value is None:
138
+ return content_str(content_value)
139
+ return str(content_value)
123
140
  else:
124
141
  return str(carryover_item)
125
142
 
@@ -141,37 +158,36 @@ def initiate_chats(chat_queue: list[dict[str, Any]]) -> list[ChatResult]:
141
158
  """Initiate a list of chats.
142
159
 
143
160
  Args:
144
- chat_queue (List[Dict]): A list of dictionaries containing the information about the chats.
145
-
161
+ chat_queue (List[Dict]): A list of dictionaries containing the information about the chats.\n
146
162
  Each dictionary should contain the input arguments for
147
- [`ConversableAgent.initiate_chat`](../ConversableAgent#initiate-chat).
148
- For example:
149
- - `"sender"` - the sender agent.
150
- - `"recipient"` - the recipient agent.
151
- - `"clear_history"` (bool) - whether to clear the chat history with the agent.
152
- Default is True.
153
- - `"silent"` (bool or None) - (Experimental) whether to print the messages in this
154
- conversation. Default is False.
155
- - `"cache"` (Cache or None) - the cache client to use for this conversation.
156
- Default is None.
157
- - `"max_turns"` (int or None) - maximum number of turns for the chat. If None, the chat
158
- will continue until a termination condition is met. Default is None.
159
- - `"summary_method"` (str or callable) - a string or callable specifying the method to get
160
- a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".
161
- - `"summary_args"` (dict) - a dictionary of arguments to be passed to the summary_method.
162
- Default is {}.
163
- - `"message"` (str, callable or None) - if None, input() will be called to get the
164
- initial message.
165
- - `**context` - additional context information to be passed to the chat.
166
- - `"carryover"` - It can be used to specify the carryover information to be passed
167
- to this chat. If provided, we will combine this carryover with the "message" content when
168
- generating the initial chat message in `generate_init_message`.
169
- - `"finished_chat_indexes_to_exclude_from_carryover"` - It can be used by specifying a list of indexes of the finished_chats list,
170
- from which to exclude the summaries for carryover. If 'finished_chat_indexes_to_exclude_from_carryover' is not provided or an empty list,
171
- then summary from all the finished chats will be taken.
163
+ [`ConversableAgent.initiate_chat`](../ConversableAgent#initiate-chat).\n
164
+ For example:\n
165
+ - `"sender"` - the sender agent.\n
166
+ - `"recipient"` - the recipient agent.\n
167
+ - `"clear_history"` (bool) - whether to clear the chat history with the agent.\n
168
+ Default is True.\n
169
+ - `"silent"` (bool or None) - (Experimental) whether to print the messages in this\n
170
+ conversation. Default is False.\n
171
+ - `"cache"` (Cache or None) - the cache client to use for this conversation.\n
172
+ Default is None.\n
173
+ - `"max_turns"` (int or None) - maximum number of turns for the chat. If None, the chat\n
174
+ will continue until a termination condition is met. Default is None.\n
175
+ - `"summary_method"` (str or callable) - a string or callable specifying the method to get\n
176
+ a summary from the chat. Default is DEFAULT_summary_method, i.e., "last_msg".\n
177
+ - `"summary_args"` (dict) - a dictionary of arguments to be passed to the summary_method.\n
178
+ Default is {}.\n
179
+ - `"message"` (str, callable or None) - if None, input() will be called to get the\n
180
+ initial message.\n
181
+ - `**context` - additional context information to be passed to the chat.\n
182
+ - `"carryover"` - It can be used to specify the carryover information to be passed\n
183
+ to this chat. If provided, we will combine this carryover with the "message" content when\n
184
+ generating the initial chat message in `generate_init_message`.\n
185
+ - `"finished_chat_indexes_to_exclude_from_carryover"` - It can be used by specifying a list of indexes of the finished_chats list,\n
186
+ from which to exclude the summaries for carryover. If 'finished_chat_indexes_to_exclude_from_carryover' is not provided or an empty list,\n
187
+ then summary from all the finished chats will be taken.\n
172
188
 
173
189
  Returns:
174
- (list): a list of ChatResult objects corresponding to the finished chats in the chat_queue.
190
+ (list): a list of ChatResult objects corresponding to the finished chats in the chat_queue.\n
175
191
  """
176
192
  consolidate_chat_info(chat_queue)
177
193
  _validate_recipients(chat_queue)
@@ -220,7 +236,7 @@ async def _dependent_chat_future(
220
236
  finished_chat_indexes_to_exclude_from_carryover = chat_info.get(
221
237
  "finished_chat_indexes_to_exclude_from_carryover", []
222
238
  )
223
- finished_chats = dict()
239
+ finished_chats = {}
224
240
  for chat in prerequisite_chat_futures:
225
241
  chat_future = prerequisite_chat_futures[chat]
226
242
  if chat_future.cancelled():
@@ -291,18 +307,18 @@ async def a_initiate_chats(chat_queue: list[dict[str, Any]]) -> dict[int, ChatRe
291
307
  num_chats = chat_book.keys()
292
308
  prerequisites = __create_async_prerequisites(chat_queue)
293
309
  chat_order_by_id = __find_async_chat_order(num_chats, prerequisites)
294
- finished_chat_futures = dict()
310
+ finished_chat_futures = {}
295
311
  for chat_id in chat_order_by_id:
296
312
  chat_info = chat_book[chat_id]
297
313
  prerequisite_chat_ids = chat_info.get("prerequisites", [])
298
- pre_chat_futures = dict()
314
+ pre_chat_futures = {}
299
315
  for pre_chat_id in prerequisite_chat_ids:
300
316
  pre_chat_future = finished_chat_futures[pre_chat_id]
301
317
  pre_chat_futures[pre_chat_id] = pre_chat_future
302
318
  current_chat_future = await _dependent_chat_future(chat_id, chat_info, pre_chat_futures)
303
319
  finished_chat_futures[chat_id] = current_chat_future
304
320
  await asyncio.gather(*list(finished_chat_futures.values()))
305
- finished_chats = dict()
321
+ finished_chats = {}
306
322
  for chat in finished_chat_futures:
307
323
  chat_result = finished_chat_futures[chat].result()
308
324
  finished_chats[chat] = chat_result
@@ -17,7 +17,7 @@ class Criterion(BaseModel):
17
17
  name: str
18
18
  description: str
19
19
  accepted_values: list[str]
20
- sub_criteria: list[Criterion] = list()
20
+ sub_criteria: list[Criterion] = []
21
21
 
22
22
  @staticmethod
23
23
  def parse_json_str(criteria: str):
@@ -34,11 +34,11 @@ class LLMLingua:
34
34
 
35
35
  def __init__(
36
36
  self,
37
- prompt_compressor_kwargs: dict = dict(
38
- model_name="microsoft/llmlingua-2-bert-base-multilingual-cased-meetingbank",
39
- use_llmlingua2=True,
40
- device_map="cpu",
41
- ),
37
+ prompt_compressor_kwargs: dict = {
38
+ "model_name": "microsoft/llmlingua-2-bert-base-multilingual-cased-meetingbank",
39
+ "use_llmlingua2": True,
40
+ "device_map": "cpu",
41
+ },
42
42
  structured_compression: bool = False,
43
43
  ) -> None:
44
44
  """Args:
@@ -14,7 +14,7 @@ class ToolsCapability:
14
14
  """
15
15
 
16
16
  def __init__(self, tool_list: list[Tool]):
17
- self.tools = [tool for tool in tool_list]
17
+ self.tools = list(tool_list)
18
18
 
19
19
  def add_to_agent(self, agent: ConversableAgent):
20
20
  """Add tools to the given agent."""
@@ -337,7 +337,7 @@ class TextMessageCompressor:
337
337
  self,
338
338
  text_compressor: TextCompressor | None = None,
339
339
  min_tokens: int | None = None,
340
- compression_params: dict = dict(),
340
+ compression_params: dict = {},
341
341
  cache: AbstractCache | None = None,
342
342
  filter_dict: dict[str, Any] | None = None,
343
343
  exclude_filter: bool = True,
@@ -380,7 +380,7 @@ Match roles in the role set to each expert in expert set.
380
380
 
381
381
  def clear_all_agents(self, recycle_endpoint: bool | None = True):
382
382
  """Clear all cached agents."""
383
- for agent_name in [agent_name for agent_name in self.agent_procs_assign]:
383
+ for agent_name in list(self.agent_procs_assign):
384
384
  self.clear_agent(agent_name, recycle_endpoint)
385
385
  print(colored("All agents have been cleared.", "yellow"), flush=True)
386
386
 
@@ -149,25 +149,26 @@ Note that the previous experts will forget everything after you obtain the respo
149
149
  description: str | None = DEFAULT_DESCRIPTION,
150
150
  **kwargs: Any,
151
151
  ):
152
- """Args:
153
- name (str): agent name.
154
- system_message (str): system message for the ChatCompletion inference.
155
- Please override this attribute if you want to reprogram the agent.
156
- llm_config (LLMConfig or dict or False): llm inference configuration.
157
- Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create) for available options.
158
- is_termination_msg (function): a function that takes a message in the form of a dictionary
159
- and returns a boolean value indicating if this received message is a termination message.
160
- The dict can contain the following keys: "content", "role", "name", "function_call".
161
- max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.
162
- default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).
163
- The limit only plays a role when human_input_mode is not "ALWAYS".
164
- agent_lib (str): the path or a JSON file of the agent library for retrieving the nested chat instantiated by CaptainAgent.
165
- tool_lib (str): the path to the tool library for retrieving the tools used in the nested chat instantiated by CaptainAgent.
166
- nested_config (dict): the configuration for the nested chat instantiated by CaptainAgent.
167
- A full list of keys and their functionalities can be found in [docs](https://docs.ag2.ai/latest/docs/user-guide/reference-agents/captainagent).
168
- agent_config_save_path (str): the path to save the generated or retrieved agent configuration.
169
- **kwargs (dict): Please refer to other kwargs in
170
- [ConversableAgent](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/conversable_agent.py#L74).
152
+ """
153
+ Args:\n
154
+ name (str): agent name.\n
155
+ system_message (str): system message for the ChatCompletion inference.\n
156
+ Please override this attribute if you want to reprogram the agent.\n
157
+ llm_config (LLMConfig or dict or False): llm inference configuration.\n
158
+ Please refer to [OpenAIWrapper.create](https://docs.ag2.ai/latest/docs/api-reference/autogen/OpenAIWrapper/#autogen.OpenAIWrapper.create) for available options.\n
159
+ is_termination_msg (function): a function that takes a message in the form of a dictionary\n
160
+ and returns a boolean value indicating if this received message is a termination message.\n
161
+ The dict can contain the following keys: "content", "role", "name", "function_call".\n
162
+ max_consecutive_auto_reply (int): the maximum number of consecutive auto replies.\n
163
+ default to None (no limit provided, class attribute MAX_CONSECUTIVE_AUTO_REPLY will be used as the limit in this case).\n
164
+ The limit only plays a role when human_input_mode is not "ALWAYS".\n
165
+ agent_lib (str): the path or a JSON file of the agent library for retrieving the nested chat instantiated by CaptainAgent.\n
166
+ tool_lib (str): the path to the tool library for retrieving the tools used in the nested chat instantiated by CaptainAgent.\n
167
+ nested_config (dict): the configuration for the nested chat instantiated by CaptainAgent.\n
168
+ A full list of keys and their functionalities can be found in [docs](https://docs.ag2.ai/latest/docs/user-guide/reference-agents/captainagent).\n
169
+ agent_config_save_path (str): the path to save the generated or retrieved agent configuration.\n
170
+ **kwargs (dict): Please refer to other kwargs in\n
171
+ [ConversableAgent](https://github.com/ag2ai/ag2/blob/main/autogen/agentchat/conversable_agent.py#L74).\n
171
172
  """
172
173
  super().__init__(
173
174
  name,
@@ -55,7 +55,7 @@ class FalkorGraphQueryEngine:
55
55
  self.username = username
56
56
  self.password = password
57
57
  self.model = model or OpenAiGenerativeModel("gpt-4o")
58
- self.model_config = KnowledgeGraphModelConfig.with_model(model)
58
+ self.model_config = KnowledgeGraphModelConfig.with_model(self.model)
59
59
  self.ontology = ontology
60
60
  self.knowledge_graph: KnowledgeGraph | None = None # type: ignore[no-any-unimported]
61
61
  self.falkordb = FalkorDB(host=self.host, port=self.port, username=self.username, password=self.password)
@@ -139,9 +139,6 @@ class FalkorGraphQueryEngine:
139
139
 
140
140
  response = self._chat_session.send_message(question)
141
141
 
142
- # History will be considered when querying by setting the last_answer
143
- self._chat_session.last_answer = response["response"]
144
-
145
142
  return GraphStoreQueryResult(answer=response["response"], results=[])
146
143
 
147
144
  def delete(self) -> bool:
@@ -167,4 +164,4 @@ class FalkorGraphQueryEngine:
167
164
  if self.ontology_table_name not in self.falkordb.list_graphs():
168
165
  raise ValueError(f"Knowledge graph {self.name} has not been created.")
169
166
  graph = self.__get_ontology_storage_graph()
170
- return Ontology.from_graph(graph)
167
+ return Ontology.from_schema_graph(graph)
@@ -14,10 +14,10 @@ __all__ = ["GraphRagCapability"]
14
14
  class GraphRagCapability(AgentCapability):
15
15
  """A graph-based RAG capability uses a graph query engine to give a conversable agent the graph-based RAG ability.
16
16
 
17
- An agent class with graph-based RAG capability could
18
- 1. create a graph in the underlying database with input documents.
19
- 2. retrieved relevant information based on messages received by the agent.
20
- 3. generate answers from retrieved information and send messages back.
17
+ An agent class with graph-based RAG capability could:\n
18
+ 1. create a graph in the underlying database with input documents.\n
19
+ 2. retrieved relevant information based on messages received by the agent.\n
20
+ 3. generate answers from retrieved information and send messages back.\n
21
21
 
22
22
  For example,
23
23
  ```python
@@ -41,7 +41,7 @@ class GraphRagCapability(AgentCapability):
41
41
  user_proxy.initiate_chat(graph_rag_agent, message="Name a few actors who've played in 'The Matrix'")
42
42
 
43
43
  # ChatResult(
44
- # chat_id=None,
44
+ # chat_id=uuid.uuid4().int,
45
45
  # chat_history=[
46
46
  # {'content': 'Name a few actors who've played in \'The Matrix\'', 'role': 'graph_rag_agent'},
47
47
  # {'content': 'A few actors who have played in The Matrix are:
@@ -28,23 +28,24 @@ with optional_import_block():
28
28
 
29
29
  @require_optional_import("llama_index", "neo4j")
30
30
  class Neo4jGraphQueryEngine:
31
- """This class serves as a wrapper for a property graph query engine backed by LlamaIndex and Neo4j,
32
- facilitating the creating, connecting, updating, and querying of LlamaIndex property graphs.
33
-
34
- It builds a property graph Index from input documents,
35
- storing and retrieving data from the property graph in the Neo4j database.
36
-
37
- It extracts triplets, i.e., [entity] -> [relationship] -> [entity] sets,
38
- from the input documents using llamIndex extractors.
39
-
40
- Users can provide custom entities, relationships, and schema to guide the extraction process.
41
-
42
- If strict is True, the engine will extract triplets following the schema
43
- of allowed relationships for each entity specified in the schema.
44
-
45
- It also leverages LlamaIndex's chat engine which has a conversation history internally to provide context-aware responses.
46
-
47
- For usage, please refer to example notebook/agentchat_graph_rag_neo4j.ipynb
31
+ """
32
+ This class serves as a wrapper for a property graph query engine backed by LlamaIndex and Neo4j,\n
33
+ facilitating the creating, connecting, updating, and querying of LlamaIndex property graphs.\n
34
+ \n
35
+ It builds a property graph Index from input documents,\n
36
+ storing and retrieving data from the property graph in the Neo4j database.\n
37
+ \n
38
+ It extracts triplets, i.e., [entity] -> [relationship] -> [entity] sets,\n
39
+ from the input documents using llamIndex extractors.\n
40
+ \n
41
+ Users can provide custom entities, relationships, and schema to guide the extraction process.\n
42
+ \n
43
+ If strict is True, the engine will extract triplets following the schema\n
44
+ of allowed relationships for each entity specified in the schema.\n
45
+ \n
46
+ It also leverages LlamaIndex's chat engine which has a conversation history internally to provide context-aware responses.\n
47
+ \n
48
+ For usage, please refer to example notebook/agentchat_graph_rag_neo4j.ipynb\n
48
49
  """
49
50
 
50
51
  def __init__( # type: ignore[no-any-unimported]