ag2 0.9.7__py3-none-any.whl → 0.9.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (236) hide show
  1. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/METADATA +102 -75
  2. ag2-0.9.9.dist-info/RECORD +387 -0
  3. autogen/__init__.py +1 -2
  4. autogen/_website/generate_api_references.py +4 -5
  5. autogen/_website/generate_mkdocs.py +9 -15
  6. autogen/_website/notebook_processor.py +13 -14
  7. autogen/_website/process_notebooks.py +10 -10
  8. autogen/_website/utils.py +5 -4
  9. autogen/agentchat/agent.py +13 -13
  10. autogen/agentchat/assistant_agent.py +7 -6
  11. autogen/agentchat/contrib/agent_eval/agent_eval.py +3 -3
  12. autogen/agentchat/contrib/agent_eval/critic_agent.py +3 -3
  13. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +3 -3
  14. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +3 -3
  15. autogen/agentchat/contrib/agent_optimizer.py +3 -3
  16. autogen/agentchat/contrib/capabilities/generate_images.py +11 -11
  17. autogen/agentchat/contrib/capabilities/teachability.py +15 -15
  18. autogen/agentchat/contrib/capabilities/transforms.py +17 -18
  19. autogen/agentchat/contrib/capabilities/transforms_util.py +5 -5
  20. autogen/agentchat/contrib/capabilities/vision_capability.py +4 -3
  21. autogen/agentchat/contrib/captainagent/agent_builder.py +30 -30
  22. autogen/agentchat/contrib/captainagent/captainagent.py +22 -21
  23. autogen/agentchat/contrib/captainagent/tool_retriever.py +2 -3
  24. autogen/agentchat/contrib/gpt_assistant_agent.py +9 -9
  25. autogen/agentchat/contrib/graph_rag/document.py +3 -3
  26. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +3 -3
  27. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +6 -6
  28. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +3 -3
  29. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +5 -11
  30. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +6 -6
  31. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +7 -7
  32. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +6 -6
  33. autogen/agentchat/contrib/img_utils.py +1 -1
  34. autogen/agentchat/contrib/llamaindex_conversable_agent.py +11 -11
  35. autogen/agentchat/contrib/llava_agent.py +18 -4
  36. autogen/agentchat/contrib/math_user_proxy_agent.py +11 -11
  37. autogen/agentchat/contrib/multimodal_conversable_agent.py +8 -8
  38. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +6 -5
  39. autogen/agentchat/contrib/rag/chromadb_query_engine.py +22 -26
  40. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +14 -17
  41. autogen/agentchat/contrib/rag/mongodb_query_engine.py +27 -37
  42. autogen/agentchat/contrib/rag/query_engine.py +7 -5
  43. autogen/agentchat/contrib/retrieve_assistant_agent.py +5 -5
  44. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +8 -7
  45. autogen/agentchat/contrib/society_of_mind_agent.py +15 -14
  46. autogen/agentchat/contrib/swarm_agent.py +76 -98
  47. autogen/agentchat/contrib/text_analyzer_agent.py +7 -7
  48. autogen/agentchat/contrib/vectordb/base.py +10 -18
  49. autogen/agentchat/contrib/vectordb/chromadb.py +2 -1
  50. autogen/agentchat/contrib/vectordb/couchbase.py +18 -20
  51. autogen/agentchat/contrib/vectordb/mongodb.py +6 -5
  52. autogen/agentchat/contrib/vectordb/pgvectordb.py +40 -41
  53. autogen/agentchat/contrib/vectordb/qdrant.py +5 -5
  54. autogen/agentchat/contrib/web_surfer.py +20 -19
  55. autogen/agentchat/conversable_agent.py +292 -290
  56. autogen/agentchat/group/context_str.py +1 -3
  57. autogen/agentchat/group/context_variables.py +15 -25
  58. autogen/agentchat/group/group_tool_executor.py +10 -10
  59. autogen/agentchat/group/group_utils.py +15 -15
  60. autogen/agentchat/group/guardrails.py +7 -7
  61. autogen/agentchat/group/handoffs.py +19 -36
  62. autogen/agentchat/group/multi_agent_chat.py +7 -7
  63. autogen/agentchat/group/on_condition.py +4 -7
  64. autogen/agentchat/group/on_context_condition.py +4 -7
  65. autogen/agentchat/group/patterns/auto.py +8 -7
  66. autogen/agentchat/group/patterns/manual.py +7 -6
  67. autogen/agentchat/group/patterns/pattern.py +13 -12
  68. autogen/agentchat/group/patterns/random.py +3 -3
  69. autogen/agentchat/group/patterns/round_robin.py +3 -3
  70. autogen/agentchat/group/reply_result.py +2 -4
  71. autogen/agentchat/group/speaker_selection_result.py +5 -5
  72. autogen/agentchat/group/targets/group_chat_target.py +7 -6
  73. autogen/agentchat/group/targets/group_manager_target.py +4 -4
  74. autogen/agentchat/group/targets/transition_target.py +2 -1
  75. autogen/agentchat/groupchat.py +60 -63
  76. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +4 -4
  77. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +4 -4
  78. autogen/agentchat/realtime/experimental/clients/gemini/client.py +7 -7
  79. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +8 -8
  80. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +6 -6
  81. autogen/agentchat/realtime/experimental/clients/realtime_client.py +10 -9
  82. autogen/agentchat/realtime/experimental/realtime_agent.py +10 -9
  83. autogen/agentchat/realtime/experimental/realtime_observer.py +3 -3
  84. autogen/agentchat/realtime/experimental/realtime_swarm.py +44 -44
  85. autogen/agentchat/user_proxy_agent.py +10 -9
  86. autogen/agentchat/utils.py +3 -3
  87. autogen/agents/contrib/time/time_reply_agent.py +6 -5
  88. autogen/agents/contrib/time/time_tool_agent.py +2 -1
  89. autogen/agents/experimental/deep_research/deep_research.py +3 -3
  90. autogen/agents/experimental/discord/discord.py +2 -2
  91. autogen/agents/experimental/document_agent/chroma_query_engine.py +29 -44
  92. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +9 -14
  93. autogen/agents/experimental/document_agent/document_agent.py +15 -16
  94. autogen/agents/experimental/document_agent/document_conditions.py +3 -3
  95. autogen/agents/experimental/document_agent/document_utils.py +5 -9
  96. autogen/agents/experimental/document_agent/inmemory_query_engine.py +14 -20
  97. autogen/agents/experimental/document_agent/parser_utils.py +4 -4
  98. autogen/agents/experimental/document_agent/url_utils.py +14 -23
  99. autogen/agents/experimental/reasoning/reasoning_agent.py +33 -33
  100. autogen/agents/experimental/slack/slack.py +2 -2
  101. autogen/agents/experimental/telegram/telegram.py +2 -3
  102. autogen/agents/experimental/websurfer/websurfer.py +4 -4
  103. autogen/agents/experimental/wikipedia/wikipedia.py +5 -7
  104. autogen/browser_utils.py +8 -8
  105. autogen/cache/abstract_cache_base.py +5 -5
  106. autogen/cache/cache.py +12 -12
  107. autogen/cache/cache_factory.py +4 -4
  108. autogen/cache/cosmos_db_cache.py +9 -9
  109. autogen/cache/disk_cache.py +6 -6
  110. autogen/cache/in_memory_cache.py +4 -4
  111. autogen/cache/redis_cache.py +4 -4
  112. autogen/code_utils.py +18 -18
  113. autogen/coding/base.py +6 -6
  114. autogen/coding/docker_commandline_code_executor.py +9 -9
  115. autogen/coding/func_with_reqs.py +7 -6
  116. autogen/coding/jupyter/base.py +3 -3
  117. autogen/coding/jupyter/docker_jupyter_server.py +3 -4
  118. autogen/coding/jupyter/import_utils.py +3 -3
  119. autogen/coding/jupyter/jupyter_client.py +5 -5
  120. autogen/coding/jupyter/jupyter_code_executor.py +3 -4
  121. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  122. autogen/coding/local_commandline_code_executor.py +8 -7
  123. autogen/coding/markdown_code_extractor.py +1 -2
  124. autogen/coding/utils.py +1 -2
  125. autogen/doc_utils.py +3 -2
  126. autogen/environments/docker_python_environment.py +19 -29
  127. autogen/environments/python_environment.py +8 -17
  128. autogen/environments/system_python_environment.py +3 -4
  129. autogen/environments/venv_python_environment.py +8 -12
  130. autogen/environments/working_directory.py +1 -2
  131. autogen/events/agent_events.py +106 -109
  132. autogen/events/base_event.py +6 -5
  133. autogen/events/client_events.py +15 -14
  134. autogen/events/helpers.py +1 -1
  135. autogen/events/print_event.py +4 -5
  136. autogen/fast_depends/_compat.py +10 -15
  137. autogen/fast_depends/core/build.py +17 -36
  138. autogen/fast_depends/core/model.py +64 -113
  139. autogen/fast_depends/dependencies/model.py +2 -1
  140. autogen/fast_depends/dependencies/provider.py +3 -2
  141. autogen/fast_depends/library/model.py +4 -4
  142. autogen/fast_depends/schema.py +7 -7
  143. autogen/fast_depends/use.py +17 -25
  144. autogen/fast_depends/utils.py +10 -30
  145. autogen/formatting_utils.py +6 -6
  146. autogen/graph_utils.py +1 -4
  147. autogen/import_utils.py +38 -27
  148. autogen/interop/crewai/crewai.py +2 -2
  149. autogen/interop/interoperable.py +2 -2
  150. autogen/interop/langchain/langchain_chat_model_factory.py +3 -2
  151. autogen/interop/langchain/langchain_tool.py +2 -6
  152. autogen/interop/litellm/litellm_config_factory.py +6 -7
  153. autogen/interop/pydantic_ai/pydantic_ai.py +4 -7
  154. autogen/interop/registry.py +2 -1
  155. autogen/io/base.py +5 -5
  156. autogen/io/run_response.py +33 -32
  157. autogen/io/websockets.py +6 -5
  158. autogen/json_utils.py +1 -2
  159. autogen/llm_config/__init__.py +11 -0
  160. autogen/llm_config/client.py +58 -0
  161. autogen/llm_config/config.py +384 -0
  162. autogen/llm_config/entry.py +154 -0
  163. autogen/logger/base_logger.py +4 -3
  164. autogen/logger/file_logger.py +2 -1
  165. autogen/logger/logger_factory.py +2 -2
  166. autogen/logger/logger_utils.py +2 -2
  167. autogen/logger/sqlite_logger.py +2 -1
  168. autogen/math_utils.py +4 -5
  169. autogen/mcp/__main__.py +6 -6
  170. autogen/mcp/helpers.py +4 -4
  171. autogen/mcp/mcp_client.py +170 -29
  172. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +3 -4
  173. autogen/mcp/mcp_proxy/mcp_proxy.py +23 -26
  174. autogen/mcp/mcp_proxy/operation_grouping.py +4 -5
  175. autogen/mcp/mcp_proxy/operation_renaming.py +6 -10
  176. autogen/mcp/mcp_proxy/security.py +2 -3
  177. autogen/messages/agent_messages.py +96 -98
  178. autogen/messages/base_message.py +6 -5
  179. autogen/messages/client_messages.py +15 -14
  180. autogen/messages/print_message.py +4 -5
  181. autogen/oai/__init__.py +1 -2
  182. autogen/oai/anthropic.py +42 -41
  183. autogen/oai/bedrock.py +68 -57
  184. autogen/oai/cerebras.py +26 -25
  185. autogen/oai/client.py +113 -139
  186. autogen/oai/client_utils.py +3 -3
  187. autogen/oai/cohere.py +34 -11
  188. autogen/oai/gemini.py +39 -17
  189. autogen/oai/gemini_types.py +11 -12
  190. autogen/oai/groq.py +22 -10
  191. autogen/oai/mistral.py +17 -11
  192. autogen/oai/oai_models/__init__.py +14 -2
  193. autogen/oai/oai_models/_models.py +2 -2
  194. autogen/oai/oai_models/chat_completion.py +13 -14
  195. autogen/oai/oai_models/chat_completion_message.py +11 -9
  196. autogen/oai/oai_models/chat_completion_message_tool_call.py +26 -3
  197. autogen/oai/oai_models/chat_completion_token_logprob.py +3 -4
  198. autogen/oai/oai_models/completion_usage.py +8 -9
  199. autogen/oai/ollama.py +19 -9
  200. autogen/oai/openai_responses.py +40 -17
  201. autogen/oai/openai_utils.py +48 -38
  202. autogen/oai/together.py +29 -14
  203. autogen/retrieve_utils.py +6 -7
  204. autogen/runtime_logging.py +5 -4
  205. autogen/token_count_utils.py +7 -4
  206. autogen/tools/contrib/time/time.py +0 -1
  207. autogen/tools/dependency_injection.py +5 -6
  208. autogen/tools/experimental/browser_use/browser_use.py +10 -10
  209. autogen/tools/experimental/code_execution/python_code_execution.py +5 -7
  210. autogen/tools/experimental/crawl4ai/crawl4ai.py +12 -15
  211. autogen/tools/experimental/deep_research/deep_research.py +9 -8
  212. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +5 -11
  213. autogen/tools/experimental/firecrawl/firecrawl_tool.py +98 -115
  214. autogen/tools/experimental/google/authentication/credentials_local_provider.py +1 -1
  215. autogen/tools/experimental/google/drive/drive_functions.py +4 -4
  216. autogen/tools/experimental/google/drive/toolkit.py +5 -5
  217. autogen/tools/experimental/google_search/google_search.py +5 -5
  218. autogen/tools/experimental/google_search/youtube_search.py +5 -5
  219. autogen/tools/experimental/messageplatform/discord/discord.py +8 -12
  220. autogen/tools/experimental/messageplatform/slack/slack.py +14 -20
  221. autogen/tools/experimental/messageplatform/telegram/telegram.py +8 -12
  222. autogen/tools/experimental/perplexity/perplexity_search.py +18 -29
  223. autogen/tools/experimental/reliable/reliable.py +68 -74
  224. autogen/tools/experimental/searxng/searxng_search.py +20 -19
  225. autogen/tools/experimental/tavily/tavily_search.py +12 -19
  226. autogen/tools/experimental/web_search_preview/web_search_preview.py +13 -7
  227. autogen/tools/experimental/wikipedia/wikipedia.py +7 -10
  228. autogen/tools/function_utils.py +7 -7
  229. autogen/tools/tool.py +8 -6
  230. autogen/types.py +2 -2
  231. autogen/version.py +1 -1
  232. ag2-0.9.7.dist-info/RECORD +0 -421
  233. autogen/llm_config.py +0 -385
  234. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/WHEEL +0 -0
  235. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/licenses/LICENSE +0 -0
  236. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/licenses/NOTICE.md +0 -0
@@ -6,7 +6,7 @@
6
6
  # SPDX-License-Identifier: MIT
7
7
  import copy
8
8
  import sys
9
- from typing import Any, Optional, Protocol, Union
9
+ from typing import Any, Protocol
10
10
 
11
11
  import tiktoken
12
12
  from termcolor import colored
@@ -62,9 +62,9 @@ class MessageHistoryLimiter:
62
62
 
63
63
  def __init__(
64
64
  self,
65
- max_messages: Optional[int] = None,
65
+ max_messages: int | None = None,
66
66
  keep_first_message: bool = False,
67
- exclude_names: Optional[list[str]] = None,
67
+ exclude_names: list[str] | None = None,
68
68
  ):
69
69
  """Args:
70
70
  max_messages Optional[int]: Maximum number of messages to keep in the context. Must be greater than 0 if not None.
@@ -91,7 +91,6 @@ class MessageHistoryLimiter:
91
91
  Returns:
92
92
  List[Dict]: A new list containing the most recent messages up to the specified maximum.
93
93
  """
94
-
95
94
  exclude_names = getattr(self, "_exclude_names", None)
96
95
 
97
96
  filtered = [msg for msg in messages if msg.get("name") not in exclude_names] if exclude_names else messages
@@ -136,7 +135,7 @@ class MessageHistoryLimiter:
136
135
  return logs_str, True
137
136
  return "No messages were removed.", False
138
137
 
139
- def _validate_max_messages(self, max_messages: Optional[int]):
138
+ def _validate_max_messages(self, max_messages: int | None):
140
139
  if max_messages is not None and max_messages < 1:
141
140
  raise ValueError("max_messages must be None or greater than 1")
142
141
 
@@ -171,11 +170,11 @@ class MessageTokenLimiter:
171
170
 
172
171
  def __init__(
173
172
  self,
174
- max_tokens_per_message: Optional[int] = None,
175
- max_tokens: Optional[int] = None,
176
- min_tokens: Optional[int] = None,
173
+ max_tokens_per_message: int | None = None,
174
+ max_tokens: int | None = None,
175
+ min_tokens: int | None = None,
177
176
  model: str = "gpt-3.5-turbo-0613",
178
- filter_dict: Optional[dict[str, Any]] = None,
177
+ filter_dict: dict[str, Any] | None = None,
179
178
  exclude_filter: bool = True,
180
179
  ):
181
180
  """Args:
@@ -268,7 +267,7 @@ class MessageTokenLimiter:
268
267
  return logs_str, True
269
268
  return "No tokens were truncated.", False
270
269
 
271
- def _truncate_str_to_tokens(self, contents: Union[str, list], n_tokens: int) -> Union[str, list]:
270
+ def _truncate_str_to_tokens(self, contents: str | list, n_tokens: int) -> str | list:
272
271
  if isinstance(contents, str):
273
272
  return self._truncate_tokens(contents, n_tokens)
274
273
  elif isinstance(contents, list):
@@ -296,7 +295,7 @@ class MessageTokenLimiter:
296
295
 
297
296
  return truncated_text
298
297
 
299
- def _validate_max_tokens(self, max_tokens: Optional[int] = None) -> Optional[int]:
298
+ def _validate_max_tokens(self, max_tokens: int | None = None) -> int | None:
300
299
  if max_tokens is not None and max_tokens < 0:
301
300
  raise ValueError("max_tokens and max_tokens_per_message must be None or greater than or equal to 0")
302
301
 
@@ -317,7 +316,7 @@ class MessageTokenLimiter:
317
316
 
318
317
  return max_tokens if max_tokens is not None else sys.maxsize
319
318
 
320
- def _validate_min_tokens(self, min_tokens: Optional[int], max_tokens: Optional[int]) -> int:
319
+ def _validate_min_tokens(self, min_tokens: int | None, max_tokens: int | None) -> int:
321
320
  if min_tokens is None:
322
321
  return 0
323
322
  if min_tokens < 0:
@@ -336,11 +335,11 @@ class TextMessageCompressor:
336
335
 
337
336
  def __init__(
338
337
  self,
339
- text_compressor: Optional[TextCompressor] = None,
340
- min_tokens: Optional[int] = None,
338
+ text_compressor: TextCompressor | None = None,
339
+ min_tokens: int | None = None,
341
340
  compression_params: dict = dict(),
342
- cache: Optional[AbstractCache] = None,
343
- filter_dict: Optional[dict[str, Any]] = None,
341
+ cache: AbstractCache | None = None,
342
+ filter_dict: dict[str, Any] | None = None,
344
343
  exclude_filter: bool = True,
345
344
  ):
346
345
  """Args:
@@ -466,7 +465,7 @@ class TextMessageCompressor:
466
465
 
467
466
  return compressed_text["compressed_prompt"], savings
468
467
 
469
- def _validate_min_tokens(self, min_tokens: Optional[int]):
468
+ def _validate_min_tokens(self, min_tokens: int | None):
470
469
  if min_tokens is not None and min_tokens <= 0:
471
470
  raise ValueError("min_tokens must be greater than 0 or None")
472
471
 
@@ -497,7 +496,7 @@ class TextMessageContentName:
497
496
  position: str = "start",
498
497
  format_string: str = "{name}:\n",
499
498
  deduplicate: bool = True,
500
- filter_dict: Optional[dict[str, Any]] = None,
499
+ filter_dict: dict[str, Any] | None = None,
501
500
  exclude_filter: bool = True,
502
501
  ):
503
502
  """Args:
@@ -5,7 +5,7 @@
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
7
  from collections.abc import Hashable
8
- from typing import Any, Optional
8
+ from typing import Any
9
9
 
10
10
  from .... import token_count_utils
11
11
  from ....cache.abstract_cache_base import AbstractCache
@@ -24,7 +24,7 @@ def cache_key(content: MessageContentType, *args: Hashable) -> str:
24
24
  return "".join(str_keys)
25
25
 
26
26
 
27
- def cache_content_get(cache: Optional[AbstractCache], key: str) -> Optional[tuple[MessageContentType, ...]]:
27
+ def cache_content_get(cache: AbstractCache | None, key: str) -> tuple[MessageContentType, ...] | None:
28
28
  """Retrieves cached content from the cache.
29
29
 
30
30
  Args:
@@ -37,7 +37,7 @@ def cache_content_get(cache: Optional[AbstractCache], key: str) -> Optional[tupl
37
37
  return cached_value
38
38
 
39
39
 
40
- def cache_content_set(cache: Optional[AbstractCache], key: str, content: MessageContentType, *extra_values):
40
+ def cache_content_set(cache: AbstractCache | None, key: str, content: MessageContentType, *extra_values):
41
41
  """Sets content into the cache.
42
42
 
43
43
  Args:
@@ -51,7 +51,7 @@ def cache_content_set(cache: Optional[AbstractCache], key: str, content: Message
51
51
  cache.set(key, cache_value)
52
52
 
53
53
 
54
- def min_tokens_reached(messages: list[dict[str, Any]], min_tokens: Optional[int]) -> bool:
54
+ def min_tokens_reached(messages: list[dict[str, Any]], min_tokens: int | None) -> bool:
55
55
  """Returns True if the total number of tokens in the messages is greater than or equal to the specified value.
56
56
 
57
57
  Args:
@@ -108,7 +108,7 @@ def is_content_text_empty(content: MessageContentType) -> bool:
108
108
  return True
109
109
 
110
110
 
111
- def should_transform_message(message: dict[str, Any], filter_dict: Optional[dict[str, Any]], exclude: bool) -> bool:
111
+ def should_transform_message(message: dict[str, Any], filter_dict: dict[str, Any] | None, exclude: bool) -> bool:
112
112
  """Validates whether the transform should be applied according to the filter dictionary.
113
113
 
114
114
  Args:
@@ -5,7 +5,8 @@
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
7
  import copy
8
- from typing import Any, Callable, Optional, Union
8
+ from collections.abc import Callable
9
+ from typing import Any
9
10
 
10
11
  from ....code_utils import content_str
11
12
  from ....oai.client import OpenAIWrapper
@@ -47,7 +48,7 @@ class VisionCapability(AgentCapability):
47
48
  def __init__(
48
49
  self,
49
50
  lmm_config: dict[str, Any],
50
- description_prompt: Optional[str] = DEFAULT_DESCRIPTION_PROMPT,
51
+ description_prompt: str | None = DEFAULT_DESCRIPTION_PROMPT,
51
52
  custom_caption_func: Callable = None,
52
53
  ) -> None:
53
54
  """Initializes a new instance, setting up the configuration for interacting with
@@ -101,7 +102,7 @@ class VisionCapability(AgentCapability):
101
102
  # Register a hook for processing the last message.
102
103
  agent.register_hook(hookable_method="process_last_received_message", hook=self.process_last_received_message)
103
104
 
104
- def process_last_received_message(self, content: Union[str, list[dict[str, Any]]]) -> str:
105
+ def process_last_received_message(self, content: str | list[dict[str, Any]]) -> str:
105
106
  """Processes the last received message content by normalizing and augmenting it
106
107
  with descriptions of any included images. The function supports input content
107
108
  as either a string or a list of dictionaries, where each dictionary represents
@@ -11,7 +11,7 @@ import logging
11
11
  import re
12
12
  import subprocess as sp
13
13
  import time
14
- from typing import Any, Optional, Union
14
+ from typing import Any
15
15
 
16
16
  from termcolor import colored
17
17
 
@@ -184,14 +184,14 @@ Match roles in the role set to each expert in expert set.
184
184
 
185
185
  def __init__(
186
186
  self,
187
- config_file_or_env: Optional[str] = "OAI_CONFIG_LIST",
188
- config_file_location: Optional[str] = "",
189
- llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
190
- builder_model: Optional[Union[str, list]] = [],
191
- agent_model: Optional[Union[str, list]] = [],
192
- builder_model_tags: Optional[list] = [],
193
- agent_model_tags: Optional[list] = [],
194
- max_agents: Optional[int] = 5,
187
+ config_file_or_env: str | None = "OAI_CONFIG_LIST",
188
+ config_file_location: str | None = "",
189
+ llm_config: LLMConfig | dict[str, Any] | None = None,
190
+ builder_model: str | list | None = [],
191
+ agent_model: str | list | None = [],
192
+ builder_model_tags: list | None = [],
193
+ agent_model_tags: list | None = [],
194
+ max_agents: int | None = 5,
195
195
  ):
196
196
  """(These APIs are experimental and may change in the future.)
197
197
 
@@ -259,8 +259,8 @@ Match roles in the role set to each expert in expert set.
259
259
  self,
260
260
  agent_config: dict[str, Any],
261
261
  member_name: list[str],
262
- llm_config: Union[LLMConfig, dict[str, Any]],
263
- use_oai_assistant: Optional[bool] = False,
262
+ llm_config: LLMConfig | dict[str, Any],
263
+ use_oai_assistant: bool | None = False,
264
264
  ) -> AssistantAgent:
265
265
  """Create a group chat participant agent.
266
266
 
@@ -357,7 +357,7 @@ Match roles in the role set to each expert in expert set.
357
357
  self.agent_procs_assign[agent_name] = (agent, server_id)
358
358
  return agent
359
359
 
360
- def clear_agent(self, agent_name: str, recycle_endpoint: Optional[bool] = True):
360
+ def clear_agent(self, agent_name: str, recycle_endpoint: bool | None = True):
361
361
  """Clear a specific agent by name.
362
362
 
363
363
  Args:
@@ -378,7 +378,7 @@ Match roles in the role set to each expert in expert set.
378
378
  self.open_ports.append(server_id.split("_")[-1])
379
379
  print(colored(f"Agent {agent_name} has been cleared.", "yellow"), flush=True)
380
380
 
381
- def clear_all_agents(self, recycle_endpoint: Optional[bool] = True):
381
+ def clear_all_agents(self, recycle_endpoint: bool | None = True):
382
382
  """Clear all cached agents."""
383
383
  for agent_name in [agent_name for agent_name in self.agent_procs_assign]:
384
384
  self.clear_agent(agent_name, recycle_endpoint)
@@ -387,12 +387,12 @@ Match roles in the role set to each expert in expert set.
387
387
  def build(
388
388
  self,
389
389
  building_task: str,
390
- default_llm_config: Union[LLMConfig, dict[str, Any]],
391
- coding: Optional[bool] = None,
392
- code_execution_config: Optional[dict[str, Any]] = None,
393
- use_oai_assistant: Optional[bool] = False,
394
- user_proxy: Optional[ConversableAgent] = None,
395
- max_agents: Optional[int] = None,
390
+ default_llm_config: LLMConfig | dict[str, Any],
391
+ coding: bool | None = None,
392
+ code_execution_config: dict[str, Any] | None = None,
393
+ use_oai_assistant: bool | None = False,
394
+ user_proxy: ConversableAgent | None = None,
395
+ max_agents: int | None = None,
396
396
  **kwargs: Any,
397
397
  ) -> tuple[list[ConversableAgent], dict[str, Any]]:
398
398
  """Auto build agents based on the building task.
@@ -515,13 +515,13 @@ Match roles in the role set to each expert in expert set.
515
515
  self,
516
516
  building_task: str,
517
517
  library_path_or_json: str,
518
- default_llm_config: Union[LLMConfig, dict[str, Any]],
518
+ default_llm_config: LLMConfig | dict[str, Any],
519
519
  top_k: int = 3,
520
- coding: Optional[bool] = None,
521
- code_execution_config: Optional[dict[str, Any]] = None,
522
- use_oai_assistant: Optional[bool] = False,
523
- embedding_model: Optional[str] = "all-mpnet-base-v2",
524
- user_proxy: Optional[ConversableAgent] = None,
520
+ coding: bool | None = None,
521
+ code_execution_config: dict[str, Any] | None = None,
522
+ use_oai_assistant: bool | None = False,
523
+ embedding_model: str | None = "all-mpnet-base-v2",
524
+ user_proxy: ConversableAgent | None = None,
525
525
  **kwargs: Any,
526
526
  ) -> tuple[list[ConversableAgent], dict[str, Any]]:
527
527
  """Build agents from a library.
@@ -668,7 +668,7 @@ Match roles in the role set to each expert in expert set.
668
668
  return self._build_agents(use_oai_assistant, user_proxy=user_proxy, **kwargs)
669
669
 
670
670
  def _build_agents(
671
- self, use_oai_assistant: Optional[bool] = False, user_proxy: Optional[ConversableAgent] = None, **kwargs
671
+ self, use_oai_assistant: bool | None = False, user_proxy: ConversableAgent | None = None, **kwargs
672
672
  ) -> tuple[list[ConversableAgent], dict[str, Any]]:
673
673
  """Build agents with generated configs.
674
674
 
@@ -711,7 +711,7 @@ Match roles in the role set to each expert in expert set.
711
711
 
712
712
  return agent_list, self.cached_configs.copy()
713
713
 
714
- def save(self, filepath: Optional[str] = None) -> str:
714
+ def save(self, filepath: str | None = None) -> str:
715
715
  """Save building configs. If the filepath is not specific, this function will create a filename by encrypt the
716
716
  building_task string by md5 with "save_config_" prefix, and save config to the local path.
717
717
 
@@ -731,9 +731,9 @@ Match roles in the role set to each expert in expert set.
731
731
 
732
732
  def load(
733
733
  self,
734
- filepath: Optional[str] = None,
735
- config_json: Optional[str] = None,
736
- use_oai_assistant: Optional[bool] = False,
734
+ filepath: str | None = None,
735
+ config_json: str | None = None,
736
+ use_oai_assistant: bool | None = False,
737
737
  **kwargs: Any,
738
738
  ) -> tuple[list[ConversableAgent], dict[str, Any]]:
739
739
  """Load building configs and call the build function to complete building without calling online LLMs' api.
@@ -4,7 +4,8 @@
4
4
  import hashlib
5
5
  import json
6
6
  import os
7
- from typing import Any, Callable, Literal, Optional, Union
7
+ from collections.abc import Callable
8
+ from typing import Any, Literal
8
9
 
9
10
  from termcolor import colored
10
11
 
@@ -135,17 +136,17 @@ Note that the previous experts will forget everything after you obtain the respo
135
136
  def __init__(
136
137
  self,
137
138
  name: str,
138
- system_message: Optional[str] = None,
139
- llm_config: Optional[Union[LLMConfig, dict[str, Any], Literal[False]]] = None,
140
- is_termination_msg: Optional[Callable[[dict[str, Any]], bool]] = None,
141
- max_consecutive_auto_reply: Optional[int] = None,
142
- human_input_mode: Optional[str] = "NEVER",
143
- code_execution_config: Optional[Union[dict[str, Any], Literal[False]]] = False,
144
- nested_config: Optional[dict[str, Any]] = None,
145
- agent_lib: Optional[str] = None,
146
- tool_lib: Optional[str] = None,
147
- agent_config_save_path: Optional[str] = None,
148
- description: Optional[str] = DEFAULT_DESCRIPTION,
139
+ system_message: str | None = None,
140
+ llm_config: LLMConfig | dict[str, Any] | Literal[False] | None = None,
141
+ is_termination_msg: Callable[[dict[str, Any]], bool] | None = None,
142
+ max_consecutive_auto_reply: int | None = None,
143
+ human_input_mode: str | None = "NEVER",
144
+ code_execution_config: dict[str, Any] | Literal[False] | None = False,
145
+ nested_config: dict[str, Any] | None = None,
146
+ agent_lib: str | None = None,
147
+ tool_lib: str | None = None,
148
+ agent_config_save_path: str | None = None,
149
+ description: str | None = DEFAULT_DESCRIPTION,
149
150
  **kwargs: Any,
150
151
  ):
151
152
  """Args:
@@ -227,7 +228,7 @@ Note that the previous experts will forget everything after you obtain the respo
227
228
  )
228
229
 
229
230
  @staticmethod
230
- def _update_config(default_dict: dict[str, Any], update_dict: Optional[dict[str, Any]]) -> dict[str, Any]:
231
+ def _update_config(default_dict: dict[str, Any], update_dict: dict[str, Any] | None) -> dict[str, Any]:
231
232
  """Recursively updates the default_dict with values from update_dict."""
232
233
  if update_dict is None:
233
234
  return default_dict
@@ -297,14 +298,14 @@ Collect information from the general task, follow the suggestions from manager t
297
298
  name: str,
298
299
  nested_config: dict[str, Any],
299
300
  agent_config_save_path: str = None,
300
- is_termination_msg: Optional[Callable[[dict[str, Any]], bool]] = None,
301
- max_consecutive_auto_reply: Optional[int] = None,
302
- human_input_mode: Optional[str] = "NEVER",
303
- code_execution_config: Optional[Union[dict[str, Any], Literal[False]]] = None,
304
- default_auto_reply: Optional[Union[str, dict[str, Any]]] = DEFAULT_AUTO_REPLY,
305
- llm_config: Optional[Union[LLMConfig, dict[str, Any], Literal[False]]] = False,
306
- system_message: Optional[Union[str, list]] = "",
307
- description: Optional[str] = None,
301
+ is_termination_msg: Callable[[dict[str, Any]], bool] | None = None,
302
+ max_consecutive_auto_reply: int | None = None,
303
+ human_input_mode: str | None = "NEVER",
304
+ code_execution_config: dict[str, Any] | Literal[False] | None = None,
305
+ default_auto_reply: str | dict[str, Any] | None = DEFAULT_AUTO_REPLY,
306
+ llm_config: LLMConfig | dict[str, Any] | Literal[False] | None = False,
307
+ system_message: str | list | None = "",
308
+ description: str | None = None,
308
309
  ):
309
310
  """Args:
310
311
  name (str): name of the agent.
@@ -14,7 +14,6 @@ import traceback
14
14
  from hashlib import md5
15
15
  from pathlib import Path
16
16
  from textwrap import dedent, indent
17
- from typing import Optional, Union
18
17
 
19
18
  from .... import AssistantAgent, UserProxyAgent
20
19
  from ....coding import CodeExecutor, CodeExtractor, LocalCommandLineCodeExecutor, MarkdownCodeExtractor
@@ -76,7 +75,7 @@ You have access to the following functions. You can write python code to call th
76
75
  agent.update_system_message(sys_message)
77
76
  return
78
77
 
79
- def bind_user_proxy(self, agent: UserProxyAgent, tool_root: Union[str, list]):
78
+ def bind_user_proxy(self, agent: UserProxyAgent, tool_root: str | list):
80
79
  """Updates user proxy agent with a executor so that code executor can successfully execute function-related code.
81
80
  Returns an updated user proxy.
82
81
  """
@@ -164,7 +163,7 @@ class LocalExecutorWithTools(CodeExecutor):
164
163
  """(Experimental) Export a code extractor that can be used by an agent."""
165
164
  return MarkdownCodeExtractor()
166
165
 
167
- def __init__(self, tools: Optional[list[Tool]] = None, work_dir: Union[Path, str] = Path()):
166
+ def __init__(self, tools: list[Tool] | None = None, work_dir: Path | str = Path()):
168
167
  self.tools = tools if tools is not None else []
169
168
  self.work_dir = work_dir
170
169
  if not os.path.exists(work_dir):
@@ -9,7 +9,7 @@ import json
9
9
  import logging
10
10
  import time
11
11
  from collections import defaultdict
12
- from typing import Any, Optional, Union
12
+ from typing import Any
13
13
 
14
14
  from ... import OpenAIWrapper
15
15
  from ...llm_config import LLMConfig
@@ -31,9 +31,9 @@ class GPTAssistantAgent(ConversableAgent):
31
31
  def __init__(
32
32
  self,
33
33
  name="GPT Assistant",
34
- instructions: Optional[str] = None,
35
- llm_config: Optional[Union[LLMConfig, dict[str, Any], bool]] = None,
36
- assistant_config: Optional[dict[str, Any]] = None,
34
+ instructions: str | None = None,
35
+ llm_config: LLMConfig | dict[str, Any] | bool | None = None,
36
+ assistant_config: dict[str, Any] | None = None,
37
37
  overwrite_instructions: bool = False,
38
38
  overwrite_tools: bool = False,
39
39
  **kwargs: Any,
@@ -182,10 +182,10 @@ class GPTAssistantAgent(ConversableAgent):
182
182
 
183
183
  def _invoke_assistant(
184
184
  self,
185
- messages: Optional[list[dict[str, Any]]] = None,
186
- sender: Optional[Agent] = None,
187
- config: Optional[Any] = None,
188
- ) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
185
+ messages: list[dict[str, Any]] | None = None,
186
+ sender: Agent | None = None,
187
+ config: Any | None = None,
188
+ ) -> tuple[bool, str | dict[str, Any] | None]:
189
189
  """Invokes the OpenAI assistant to generate a reply based on the given messages.
190
190
 
191
191
  Args:
@@ -392,7 +392,7 @@ class GPTAssistantAgent(ConversableAgent):
392
392
  # Clear the record of unread messages
393
393
  self._unread_index.clear()
394
394
 
395
- def clear_history(self, agent: Optional[Agent] = None):
395
+ def clear_history(self, agent: Agent | None = None):
396
396
  """Clear the chat history of the agent.
397
397
 
398
398
  Args:
@@ -6,7 +6,7 @@
6
6
  # SPDX-License-Identifier: MIT
7
7
  from dataclasses import dataclass, field
8
8
  from enum import Enum, auto
9
- from typing import Any, Optional
9
+ from typing import Any
10
10
 
11
11
  __all__ = ["Document", "DocumentType"]
12
12
 
@@ -25,5 +25,5 @@ class Document:
25
25
  """A wrapper of graph store query results."""
26
26
 
27
27
  doctype: DocumentType
28
- data: Optional[Any] = None
29
- path_or_url: Optional[str] = field(default_factory=lambda: "")
28
+ data: Any | None = None
29
+ path_or_url: str | None = field(default_factory=lambda: "")
@@ -28,8 +28,8 @@ class FalkorGraphQueryEngine:
28
28
  name: str,
29
29
  host: str = "127.0.0.1",
30
30
  port: int = 6379,
31
- username: Optional[str] = None,
32
- password: Optional[str] = None,
31
+ username: str | None = None,
32
+ password: str | None = None,
33
33
  model: Optional["GenerativeModel"] = None,
34
34
  ontology: Optional["Ontology"] = None,
35
35
  ):
@@ -57,7 +57,7 @@ class FalkorGraphQueryEngine:
57
57
  self.model = model or OpenAiGenerativeModel("gpt-4o")
58
58
  self.model_config = KnowledgeGraphModelConfig.with_model(model)
59
59
  self.ontology = ontology
60
- self.knowledge_graph: Optional["KnowledgeGraph"] = None # type: ignore[no-any-unimported]
60
+ self.knowledge_graph: KnowledgeGraph | None = None # type: ignore[no-any-unimported]
61
61
  self.falkordb = FalkorDB(host=self.host, port=self.port, username=self.username, password=self.password)
62
62
 
63
63
  def connect_db(self) -> None:
@@ -2,7 +2,7 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from typing import Any, Optional, Union
5
+ from typing import Any
6
6
 
7
7
  from .... import Agent, ConversableAgent
8
8
  from .falkor_graph_query_engine import FalkorGraphQueryEngine
@@ -50,10 +50,10 @@ class FalkorGraphRagCapability(GraphRagCapability):
50
50
  def _reply_using_falkordb_query(
51
51
  self,
52
52
  recipient: ConversableAgent,
53
- messages: Optional[list[dict[str, Any]]] = None,
54
- sender: Optional[Agent] = None,
55
- config: Optional[Any] = None,
56
- ) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
53
+ messages: list[dict[str, Any]] | None = None,
54
+ sender: Agent | None = None,
55
+ config: Any | None = None,
56
+ ) -> tuple[bool, str | dict[str, Any] | None]:
57
57
  """Query FalkorDB and return the message. Internally, it utilises OpenAI to generate a reply based on the given messages.
58
58
  The history with FalkorDB is also logged and updated.
59
59
 
@@ -76,7 +76,7 @@ class FalkorGraphRagCapability(GraphRagCapability):
76
76
 
77
77
  return True, result.answer if result.answer else "I'm sorry, I don't have an answer for that."
78
78
 
79
- def _messages_summary(self, messages: Union[dict[str, Any], str], system_message: str) -> str:
79
+ def _messages_summary(self, messages: dict[str, Any] | str, system_message: str) -> str:
80
80
  """Summarize the messages in the conversation history. Excluding any message with 'tool_calls' and 'tool_responses'
81
81
  Includes the 'name' (if it exists) and the 'content', with a new line between each one, like:
82
82
  customer:
@@ -5,7 +5,7 @@
5
5
  # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
6
  # SPDX-License-Identifier: MIT
7
7
  from dataclasses import dataclass, field
8
- from typing import Any, Optional, Protocol, runtime_checkable
8
+ from typing import Any, Protocol, runtime_checkable
9
9
 
10
10
  from .document import Document
11
11
 
@@ -20,7 +20,7 @@ class GraphStoreQueryResult:
20
20
  results: intermediate results to question/query, e.g. node entities.
21
21
  """
22
22
 
23
- answer: Optional[str] = None
23
+ answer: str | None = None
24
24
  results: list[Any] = field(default_factory=list)
25
25
 
26
26
 
@@ -31,7 +31,7 @@ class GraphQueryEngine(Protocol):
31
31
  This interface defines the basic methods for graph-based RAG.
32
32
  """
33
33
 
34
- def init_db(self, input_doc: Optional[list[Document]] = None) -> None:
34
+ def init_db(self, input_doc: list[Document] | None = None) -> None:
35
35
  """This method initializes graph database with the input documents or records.
36
36
  Usually, it takes the following steps,
37
37
  1. connecting to a graph database.
@@ -2,13 +2,7 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
  import os
5
- import sys
6
- from typing import Any, Optional, Union
7
-
8
- if sys.version_info >= (3, 10):
9
- from typing import TypeAlias
10
- else:
11
- from typing_extensions import TypeAlias
5
+ from typing import Any, Optional, TypeAlias
12
6
 
13
7
  from ....import_utils import optional_import_block, require_optional_import
14
8
  from .document import Document, DocumentType
@@ -64,8 +58,8 @@ class Neo4jGraphQueryEngine:
64
58
  embedding: Optional["BaseEmbedding"] = None,
65
59
  entities: Optional["TypeAlias"] = None,
66
60
  relations: Optional["TypeAlias"] = None,
67
- schema: Optional[Union[dict[str, str], list["Triple"]]] = None,
68
- strict: Optional[bool] = False,
61
+ schema: dict[str, str] | list["Triple"] | None = None,
62
+ strict: bool | None = False,
69
63
  ):
70
64
  """Initialize a Neo4j Property graph.
71
65
  Please also refer to https://docs.llamaindex.ai/en/stable/examples/property_graph/graph_store/
@@ -96,7 +90,7 @@ class Neo4jGraphQueryEngine:
96
90
  self.schema = schema
97
91
  self.strict = strict
98
92
 
99
- def init_db(self, input_doc: Optional[list[Document]] = None) -> None:
93
+ def init_db(self, input_doc: list[Document] | None = None) -> None:
100
94
  """Build the knowledge graph with input documents."""
101
95
  self.documents = self._load_doc(input_doc if input_doc is not None else [])
102
96
 
@@ -245,7 +239,7 @@ class Neo4jGraphQueryEngine:
245
239
  # To add more extractors, please refer to https://docs.llamaindex.ai/en/latest/module_guides/indexing/lpg_index_guide/#construction
246
240
  """
247
241
  #
248
- kg_extractors: list["TransformComponent"] = [ # type: ignore[no-any-unimported]
242
+ kg_extractors: list[TransformComponent] = [ # type: ignore[no-any-unimported]
249
243
  SchemaLLMPathExtractor(
250
244
  llm=self.llm,
251
245
  possible_entities=self.entities,
@@ -2,7 +2,7 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
 
5
- from typing import Any, Optional, Union
5
+ from typing import Any
6
6
 
7
7
  from .... import Agent, ConversableAgent, UserProxyAgent
8
8
  from .graph_query_engine import GraphStoreQueryResult
@@ -46,10 +46,10 @@ class Neo4jGraphCapability(GraphRagCapability):
46
46
  def _reply_using_neo4j_query(
47
47
  self,
48
48
  recipient: ConversableAgent,
49
- messages: Optional[list[dict[str, Any]]] = None,
50
- sender: Optional[Agent] = None,
51
- config: Optional[Any] = None,
52
- ) -> tuple[bool, Optional[Union[str, dict[str, Any]]]]:
49
+ messages: list[dict[str, Any]] | None = None,
50
+ sender: Agent | None = None,
51
+ config: Any | None = None,
52
+ ) -> tuple[bool, str | dict[str, Any] | None]:
53
53
  """Query neo4j and return the message. Internally, it queries the Property graph
54
54
  and returns the answer from the graph query engine.
55
55
  TODO: reply with a dictionary including both the answer and semantic source triplets.
@@ -74,7 +74,7 @@ class Neo4jGraphCapability(GraphRagCapability):
74
74
 
75
75
  return True, result.answer
76
76
 
77
- def _get_last_question(self, message: Union[dict[str, Any], str]) -> Optional[Union[str, dict[str, Any]]]:
77
+ def _get_last_question(self, message: dict[str, Any] | str) -> str | dict[str, Any] | None:
78
78
  """Retrieves the last message from the conversation history."""
79
79
  if isinstance(message, str):
80
80
  return message