ag2 0.9.7__py3-none-any.whl → 0.9.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (236) hide show
  1. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/METADATA +102 -75
  2. ag2-0.9.9.dist-info/RECORD +387 -0
  3. autogen/__init__.py +1 -2
  4. autogen/_website/generate_api_references.py +4 -5
  5. autogen/_website/generate_mkdocs.py +9 -15
  6. autogen/_website/notebook_processor.py +13 -14
  7. autogen/_website/process_notebooks.py +10 -10
  8. autogen/_website/utils.py +5 -4
  9. autogen/agentchat/agent.py +13 -13
  10. autogen/agentchat/assistant_agent.py +7 -6
  11. autogen/agentchat/contrib/agent_eval/agent_eval.py +3 -3
  12. autogen/agentchat/contrib/agent_eval/critic_agent.py +3 -3
  13. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +3 -3
  14. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +3 -3
  15. autogen/agentchat/contrib/agent_optimizer.py +3 -3
  16. autogen/agentchat/contrib/capabilities/generate_images.py +11 -11
  17. autogen/agentchat/contrib/capabilities/teachability.py +15 -15
  18. autogen/agentchat/contrib/capabilities/transforms.py +17 -18
  19. autogen/agentchat/contrib/capabilities/transforms_util.py +5 -5
  20. autogen/agentchat/contrib/capabilities/vision_capability.py +4 -3
  21. autogen/agentchat/contrib/captainagent/agent_builder.py +30 -30
  22. autogen/agentchat/contrib/captainagent/captainagent.py +22 -21
  23. autogen/agentchat/contrib/captainagent/tool_retriever.py +2 -3
  24. autogen/agentchat/contrib/gpt_assistant_agent.py +9 -9
  25. autogen/agentchat/contrib/graph_rag/document.py +3 -3
  26. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +3 -3
  27. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +6 -6
  28. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +3 -3
  29. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +5 -11
  30. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +6 -6
  31. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +7 -7
  32. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +6 -6
  33. autogen/agentchat/contrib/img_utils.py +1 -1
  34. autogen/agentchat/contrib/llamaindex_conversable_agent.py +11 -11
  35. autogen/agentchat/contrib/llava_agent.py +18 -4
  36. autogen/agentchat/contrib/math_user_proxy_agent.py +11 -11
  37. autogen/agentchat/contrib/multimodal_conversable_agent.py +8 -8
  38. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +6 -5
  39. autogen/agentchat/contrib/rag/chromadb_query_engine.py +22 -26
  40. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +14 -17
  41. autogen/agentchat/contrib/rag/mongodb_query_engine.py +27 -37
  42. autogen/agentchat/contrib/rag/query_engine.py +7 -5
  43. autogen/agentchat/contrib/retrieve_assistant_agent.py +5 -5
  44. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +8 -7
  45. autogen/agentchat/contrib/society_of_mind_agent.py +15 -14
  46. autogen/agentchat/contrib/swarm_agent.py +76 -98
  47. autogen/agentchat/contrib/text_analyzer_agent.py +7 -7
  48. autogen/agentchat/contrib/vectordb/base.py +10 -18
  49. autogen/agentchat/contrib/vectordb/chromadb.py +2 -1
  50. autogen/agentchat/contrib/vectordb/couchbase.py +18 -20
  51. autogen/agentchat/contrib/vectordb/mongodb.py +6 -5
  52. autogen/agentchat/contrib/vectordb/pgvectordb.py +40 -41
  53. autogen/agentchat/contrib/vectordb/qdrant.py +5 -5
  54. autogen/agentchat/contrib/web_surfer.py +20 -19
  55. autogen/agentchat/conversable_agent.py +292 -290
  56. autogen/agentchat/group/context_str.py +1 -3
  57. autogen/agentchat/group/context_variables.py +15 -25
  58. autogen/agentchat/group/group_tool_executor.py +10 -10
  59. autogen/agentchat/group/group_utils.py +15 -15
  60. autogen/agentchat/group/guardrails.py +7 -7
  61. autogen/agentchat/group/handoffs.py +19 -36
  62. autogen/agentchat/group/multi_agent_chat.py +7 -7
  63. autogen/agentchat/group/on_condition.py +4 -7
  64. autogen/agentchat/group/on_context_condition.py +4 -7
  65. autogen/agentchat/group/patterns/auto.py +8 -7
  66. autogen/agentchat/group/patterns/manual.py +7 -6
  67. autogen/agentchat/group/patterns/pattern.py +13 -12
  68. autogen/agentchat/group/patterns/random.py +3 -3
  69. autogen/agentchat/group/patterns/round_robin.py +3 -3
  70. autogen/agentchat/group/reply_result.py +2 -4
  71. autogen/agentchat/group/speaker_selection_result.py +5 -5
  72. autogen/agentchat/group/targets/group_chat_target.py +7 -6
  73. autogen/agentchat/group/targets/group_manager_target.py +4 -4
  74. autogen/agentchat/group/targets/transition_target.py +2 -1
  75. autogen/agentchat/groupchat.py +60 -63
  76. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +4 -4
  77. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +4 -4
  78. autogen/agentchat/realtime/experimental/clients/gemini/client.py +7 -7
  79. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +8 -8
  80. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +6 -6
  81. autogen/agentchat/realtime/experimental/clients/realtime_client.py +10 -9
  82. autogen/agentchat/realtime/experimental/realtime_agent.py +10 -9
  83. autogen/agentchat/realtime/experimental/realtime_observer.py +3 -3
  84. autogen/agentchat/realtime/experimental/realtime_swarm.py +44 -44
  85. autogen/agentchat/user_proxy_agent.py +10 -9
  86. autogen/agentchat/utils.py +3 -3
  87. autogen/agents/contrib/time/time_reply_agent.py +6 -5
  88. autogen/agents/contrib/time/time_tool_agent.py +2 -1
  89. autogen/agents/experimental/deep_research/deep_research.py +3 -3
  90. autogen/agents/experimental/discord/discord.py +2 -2
  91. autogen/agents/experimental/document_agent/chroma_query_engine.py +29 -44
  92. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +9 -14
  93. autogen/agents/experimental/document_agent/document_agent.py +15 -16
  94. autogen/agents/experimental/document_agent/document_conditions.py +3 -3
  95. autogen/agents/experimental/document_agent/document_utils.py +5 -9
  96. autogen/agents/experimental/document_agent/inmemory_query_engine.py +14 -20
  97. autogen/agents/experimental/document_agent/parser_utils.py +4 -4
  98. autogen/agents/experimental/document_agent/url_utils.py +14 -23
  99. autogen/agents/experimental/reasoning/reasoning_agent.py +33 -33
  100. autogen/agents/experimental/slack/slack.py +2 -2
  101. autogen/agents/experimental/telegram/telegram.py +2 -3
  102. autogen/agents/experimental/websurfer/websurfer.py +4 -4
  103. autogen/agents/experimental/wikipedia/wikipedia.py +5 -7
  104. autogen/browser_utils.py +8 -8
  105. autogen/cache/abstract_cache_base.py +5 -5
  106. autogen/cache/cache.py +12 -12
  107. autogen/cache/cache_factory.py +4 -4
  108. autogen/cache/cosmos_db_cache.py +9 -9
  109. autogen/cache/disk_cache.py +6 -6
  110. autogen/cache/in_memory_cache.py +4 -4
  111. autogen/cache/redis_cache.py +4 -4
  112. autogen/code_utils.py +18 -18
  113. autogen/coding/base.py +6 -6
  114. autogen/coding/docker_commandline_code_executor.py +9 -9
  115. autogen/coding/func_with_reqs.py +7 -6
  116. autogen/coding/jupyter/base.py +3 -3
  117. autogen/coding/jupyter/docker_jupyter_server.py +3 -4
  118. autogen/coding/jupyter/import_utils.py +3 -3
  119. autogen/coding/jupyter/jupyter_client.py +5 -5
  120. autogen/coding/jupyter/jupyter_code_executor.py +3 -4
  121. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  122. autogen/coding/local_commandline_code_executor.py +8 -7
  123. autogen/coding/markdown_code_extractor.py +1 -2
  124. autogen/coding/utils.py +1 -2
  125. autogen/doc_utils.py +3 -2
  126. autogen/environments/docker_python_environment.py +19 -29
  127. autogen/environments/python_environment.py +8 -17
  128. autogen/environments/system_python_environment.py +3 -4
  129. autogen/environments/venv_python_environment.py +8 -12
  130. autogen/environments/working_directory.py +1 -2
  131. autogen/events/agent_events.py +106 -109
  132. autogen/events/base_event.py +6 -5
  133. autogen/events/client_events.py +15 -14
  134. autogen/events/helpers.py +1 -1
  135. autogen/events/print_event.py +4 -5
  136. autogen/fast_depends/_compat.py +10 -15
  137. autogen/fast_depends/core/build.py +17 -36
  138. autogen/fast_depends/core/model.py +64 -113
  139. autogen/fast_depends/dependencies/model.py +2 -1
  140. autogen/fast_depends/dependencies/provider.py +3 -2
  141. autogen/fast_depends/library/model.py +4 -4
  142. autogen/fast_depends/schema.py +7 -7
  143. autogen/fast_depends/use.py +17 -25
  144. autogen/fast_depends/utils.py +10 -30
  145. autogen/formatting_utils.py +6 -6
  146. autogen/graph_utils.py +1 -4
  147. autogen/import_utils.py +38 -27
  148. autogen/interop/crewai/crewai.py +2 -2
  149. autogen/interop/interoperable.py +2 -2
  150. autogen/interop/langchain/langchain_chat_model_factory.py +3 -2
  151. autogen/interop/langchain/langchain_tool.py +2 -6
  152. autogen/interop/litellm/litellm_config_factory.py +6 -7
  153. autogen/interop/pydantic_ai/pydantic_ai.py +4 -7
  154. autogen/interop/registry.py +2 -1
  155. autogen/io/base.py +5 -5
  156. autogen/io/run_response.py +33 -32
  157. autogen/io/websockets.py +6 -5
  158. autogen/json_utils.py +1 -2
  159. autogen/llm_config/__init__.py +11 -0
  160. autogen/llm_config/client.py +58 -0
  161. autogen/llm_config/config.py +384 -0
  162. autogen/llm_config/entry.py +154 -0
  163. autogen/logger/base_logger.py +4 -3
  164. autogen/logger/file_logger.py +2 -1
  165. autogen/logger/logger_factory.py +2 -2
  166. autogen/logger/logger_utils.py +2 -2
  167. autogen/logger/sqlite_logger.py +2 -1
  168. autogen/math_utils.py +4 -5
  169. autogen/mcp/__main__.py +6 -6
  170. autogen/mcp/helpers.py +4 -4
  171. autogen/mcp/mcp_client.py +170 -29
  172. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +3 -4
  173. autogen/mcp/mcp_proxy/mcp_proxy.py +23 -26
  174. autogen/mcp/mcp_proxy/operation_grouping.py +4 -5
  175. autogen/mcp/mcp_proxy/operation_renaming.py +6 -10
  176. autogen/mcp/mcp_proxy/security.py +2 -3
  177. autogen/messages/agent_messages.py +96 -98
  178. autogen/messages/base_message.py +6 -5
  179. autogen/messages/client_messages.py +15 -14
  180. autogen/messages/print_message.py +4 -5
  181. autogen/oai/__init__.py +1 -2
  182. autogen/oai/anthropic.py +42 -41
  183. autogen/oai/bedrock.py +68 -57
  184. autogen/oai/cerebras.py +26 -25
  185. autogen/oai/client.py +113 -139
  186. autogen/oai/client_utils.py +3 -3
  187. autogen/oai/cohere.py +34 -11
  188. autogen/oai/gemini.py +39 -17
  189. autogen/oai/gemini_types.py +11 -12
  190. autogen/oai/groq.py +22 -10
  191. autogen/oai/mistral.py +17 -11
  192. autogen/oai/oai_models/__init__.py +14 -2
  193. autogen/oai/oai_models/_models.py +2 -2
  194. autogen/oai/oai_models/chat_completion.py +13 -14
  195. autogen/oai/oai_models/chat_completion_message.py +11 -9
  196. autogen/oai/oai_models/chat_completion_message_tool_call.py +26 -3
  197. autogen/oai/oai_models/chat_completion_token_logprob.py +3 -4
  198. autogen/oai/oai_models/completion_usage.py +8 -9
  199. autogen/oai/ollama.py +19 -9
  200. autogen/oai/openai_responses.py +40 -17
  201. autogen/oai/openai_utils.py +48 -38
  202. autogen/oai/together.py +29 -14
  203. autogen/retrieve_utils.py +6 -7
  204. autogen/runtime_logging.py +5 -4
  205. autogen/token_count_utils.py +7 -4
  206. autogen/tools/contrib/time/time.py +0 -1
  207. autogen/tools/dependency_injection.py +5 -6
  208. autogen/tools/experimental/browser_use/browser_use.py +10 -10
  209. autogen/tools/experimental/code_execution/python_code_execution.py +5 -7
  210. autogen/tools/experimental/crawl4ai/crawl4ai.py +12 -15
  211. autogen/tools/experimental/deep_research/deep_research.py +9 -8
  212. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +5 -11
  213. autogen/tools/experimental/firecrawl/firecrawl_tool.py +98 -115
  214. autogen/tools/experimental/google/authentication/credentials_local_provider.py +1 -1
  215. autogen/tools/experimental/google/drive/drive_functions.py +4 -4
  216. autogen/tools/experimental/google/drive/toolkit.py +5 -5
  217. autogen/tools/experimental/google_search/google_search.py +5 -5
  218. autogen/tools/experimental/google_search/youtube_search.py +5 -5
  219. autogen/tools/experimental/messageplatform/discord/discord.py +8 -12
  220. autogen/tools/experimental/messageplatform/slack/slack.py +14 -20
  221. autogen/tools/experimental/messageplatform/telegram/telegram.py +8 -12
  222. autogen/tools/experimental/perplexity/perplexity_search.py +18 -29
  223. autogen/tools/experimental/reliable/reliable.py +68 -74
  224. autogen/tools/experimental/searxng/searxng_search.py +20 -19
  225. autogen/tools/experimental/tavily/tavily_search.py +12 -19
  226. autogen/tools/experimental/web_search_preview/web_search_preview.py +13 -7
  227. autogen/tools/experimental/wikipedia/wikipedia.py +7 -10
  228. autogen/tools/function_utils.py +7 -7
  229. autogen/tools/tool.py +8 -6
  230. autogen/types.py +2 -2
  231. autogen/version.py +1 -1
  232. ag2-0.9.7.dist-info/RECORD +0 -421
  233. autogen/llm_config.py +0 -385
  234. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/WHEEL +0 -0
  235. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/licenses/LICENSE +0 -0
  236. {ag2-0.9.7.dist-info → ag2-0.9.9.dist-info}/licenses/NOTICE.md +0 -0
autogen/oai/gemini.py CHANGED
@@ -51,14 +51,15 @@ import re
51
51
  import time
52
52
  import warnings
53
53
  from io import BytesIO
54
- from typing import Any, Literal, Optional, Type, Union
54
+ from typing import Any, Literal
55
55
 
56
56
  import requests
57
57
  from pydantic import BaseModel, Field
58
+ from typing_extensions import Unpack
58
59
 
59
60
  from ..import_utils import optional_import_block, require_optional_import
60
61
  from ..json_utils import resolve_json_references
61
- from ..llm_config import LLMConfigEntry, register_llm_config
62
+ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
62
63
  from .client_utils import FormatterProtocol
63
64
  from .gemini_types import ToolConfig
64
65
  from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
@@ -68,6 +69,7 @@ with optional_import_block():
68
69
  import vertexai
69
70
  from PIL import Image
70
71
  from google.auth.credentials import Credentials
72
+ from google.genai import types
71
73
  from google.genai.types import (
72
74
  Content,
73
75
  FinishReason,
@@ -100,19 +102,34 @@ with optional_import_block():
100
102
  logger = logging.getLogger(__name__)
101
103
 
102
104
 
103
- @register_llm_config
105
+ class GeminiEntryDict(LLMConfigEntryDict, total=False):
106
+ api_type: Literal["google"]
107
+
108
+ project_id: str | None
109
+ location: str | None
110
+ google_application_credentials: str | None
111
+ credentials: Any | str | None
112
+ stream: bool
113
+ safety_settings: list[dict[str, Any]] | dict[str, Any] | None
114
+ price: list[float] | None
115
+ tool_config: ToolConfig | None
116
+ proxy: str | None
117
+
118
+
104
119
  class GeminiLLMConfigEntry(LLMConfigEntry):
105
120
  api_type: Literal["google"] = "google"
106
- project_id: Optional[str] = None
107
- location: Optional[str] = None
121
+ project_id: str | None = None
122
+ location: str | None = None
108
123
  # google_application_credentials points to the path of the JSON Keyfile
109
- google_application_credentials: Optional[str] = None
124
+ google_application_credentials: str | None = None
110
125
  # credentials is a google.auth.credentials.Credentials object
111
- credentials: Optional[Union[Any, str]] = None
126
+ credentials: Any | str | None = None
112
127
  stream: bool = False
113
- safety_settings: Optional[Union[list[dict[str, Any]], dict[str, Any]]] = None
114
- price: Optional[list[float]] = Field(default=None, min_length=2, max_length=2)
115
- tool_config: Optional[ToolConfig] = None
128
+ safety_settings: list[dict[str, Any]] | dict[str, Any] | None = None
129
+ price: list[float] | None = Field(default=None, min_length=2, max_length=2)
130
+ tool_config: ToolConfig | None = None
131
+ proxy: str | None = None
132
+ """A valid HTTP(S) proxy URL"""
116
133
 
117
134
  def create_client(self):
118
135
  raise NotImplementedError("GeminiLLMConfigEntry.create_client() is not implemented.")
@@ -134,7 +151,7 @@ class GeminiClient:
134
151
  "max_output_tokens": "max_output_tokens",
135
152
  }
136
153
 
137
- def _initialize_vertexai(self, **params):
154
+ def _initialize_vertexai(self, **params: Unpack[GeminiEntryDict]):
138
155
  if "google_application_credentials" in params:
139
156
  # Path to JSON Keyfile
140
157
  os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = params["google_application_credentials"]
@@ -179,9 +196,10 @@ class GeminiClient:
179
196
  )
180
197
 
181
198
  self.api_version = kwargs.get("api_version")
199
+ self.proxy = kwargs.get("proxy")
182
200
 
183
201
  # Store the response format, if provided (for structured outputs)
184
- self._response_format: Optional[type[BaseModel]] = None
202
+ self._response_format: type[BaseModel] | None = None
185
203
 
186
204
  def message_retrieval(self, response) -> list:
187
205
  """Retrieve and return a list of strings or a list of Choice.Message from the response.
@@ -237,8 +255,14 @@ class GeminiClient:
237
255
  "See this [LLM configuration tutorial](https://docs.ag2.ai/latest/docs/user-guide/basic-concepts/llm-configuration/) for more details."
238
256
  )
239
257
 
240
- params.get("api_type", "google") # not used
241
- http_options = {"api_version": self.api_version} if self.api_version else None
258
+ http_options = types.HttpOptions()
259
+ if proxy := params.get("proxy", self.proxy):
260
+ http_options.client_args = {"proxy": proxy}
261
+ http_options.async_client_args = {"proxy": proxy}
262
+
263
+ if self.api_version:
264
+ http_options.api_version = self.api_version
265
+
242
266
  messages = params.get("messages", [])
243
267
  stream = params.get("stream", False)
244
268
  n_response = params.get("n", 1)
@@ -670,9 +694,7 @@ class GeminiClient:
670
694
 
671
695
  @staticmethod
672
696
  def _convert_type_null_to_nullable(schema: Any) -> Any:
673
- """
674
- Recursively converts all occurrences of {"type": "null"} to {"nullable": True} in a schema.
675
- """
697
+ """Recursively converts all occurrences of {"type": "null"} to {"nullable": True} in a schema."""
676
698
  if isinstance(schema, dict):
677
699
  # If schema matches {"type": "null"}, replace it
678
700
  if schema == {"type": "null"}:
@@ -4,7 +4,7 @@
4
4
 
5
5
  import enum
6
6
  import warnings
7
- from typing import Any, Optional, Type, TypeVar, Union, get_args, get_origin
7
+ from typing import Any, Optional, TypeVar, Union, get_args, get_origin
8
8
 
9
9
  from pydantic import BaseModel as BaseModel
10
10
  from pydantic import ConfigDict, Field, alias_generators
@@ -15,7 +15,6 @@ def _remove_extra_fields(model: Any, response: dict[str, object]) -> None:
15
15
 
16
16
  Mutates the response in place.
17
17
  """
18
-
19
18
  key_values = list(response.items())
20
19
 
21
20
  for key, value in key_values:
@@ -64,7 +63,7 @@ class CommonBaseModel(BaseModel):
64
63
  )
65
64
 
66
65
  @classmethod
67
- def _from_response(cls: Type[T], *, response: dict[str, object], kwargs: dict[str, object]) -> T:
66
+ def _from_response(cls: type[T], *, response: dict[str, object], kwargs: dict[str, object]) -> T:
68
67
  # To maintain forward compatibility, we need to remove extra fields from
69
68
  # the response.
70
69
  # We will provide another mechanism to allow users to access these fields.
@@ -91,7 +90,7 @@ class CaseInSensitiveEnum(str, enum.Enum):
91
90
  try:
92
91
  # Creating a enum instance based on the value
93
92
  # We need to use super() to avoid infinite recursion.
94
- unknown_enum_val = super(CaseInSensitiveEnum, cls).__new__(cls, value)
93
+ unknown_enum_val = super().__new__(cls, value)
95
94
  unknown_enum_val._name_ = str(value) # pylint: disable=protected-access
96
95
  unknown_enum_val._value_ = value # pylint: disable=protected-access
97
96
  return unknown_enum_val
@@ -117,11 +116,11 @@ class LatLng(CommonBaseModel):
117
116
  WGS84 standard</a>. Values must be within normalized ranges.
118
117
  """
119
118
 
120
- latitude: Optional[float] = Field(
119
+ latitude: float | None = Field(
121
120
  default=None,
122
121
  description="""The latitude in degrees. It must be in the range [-90.0, +90.0].""",
123
122
  )
124
- longitude: Optional[float] = Field(
123
+ longitude: float | None = Field(
125
124
  default=None,
126
125
  description="""The longitude in degrees. It must be in the range [-180.0, +180.0]""",
127
126
  )
@@ -130,8 +129,8 @@ class LatLng(CommonBaseModel):
130
129
  class FunctionCallingConfig(CommonBaseModel):
131
130
  """Function calling config."""
132
131
 
133
- mode: Optional[FunctionCallingConfigMode] = Field(default=None, description="""Optional. Function calling mode.""")
134
- allowed_function_names: Optional[list[str]] = Field(
132
+ mode: FunctionCallingConfigMode | None = Field(default=None, description="""Optional. Function calling mode.""")
133
+ allowed_function_names: list[str] | None = Field(
135
134
  default=None,
136
135
  description="""Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.""",
137
136
  )
@@ -140,8 +139,8 @@ class FunctionCallingConfig(CommonBaseModel):
140
139
  class RetrievalConfig(CommonBaseModel):
141
140
  """Retrieval config."""
142
141
 
143
- lat_lng: Optional[LatLng] = Field(default=None, description="""Optional. The location of the user.""")
144
- language_code: Optional[str] = Field(default=None, description="""The language code of the user.""")
142
+ lat_lng: LatLng | None = Field(default=None, description="""Optional. The location of the user.""")
143
+ language_code: str | None = Field(default=None, description="""The language code of the user.""")
145
144
 
146
145
 
147
146
  class ToolConfig(CommonBaseModel):
@@ -150,7 +149,7 @@ class ToolConfig(CommonBaseModel):
150
149
  This config is shared for all tools provided in the request.
151
150
  """
152
151
 
153
- function_calling_config: Optional[FunctionCallingConfig] = Field(
152
+ function_calling_config: FunctionCallingConfig | None = Field(
154
153
  default=None, description="""Optional. Function calling config."""
155
154
  )
156
- retrieval_config: Optional[RetrievalConfig] = Field(default=None, description="""Optional. Retrieval config.""")
155
+ retrieval_config: RetrievalConfig | None = Field(default=None, description="""Optional. Retrieval config.""")
autogen/oai/groq.py CHANGED
@@ -27,12 +27,13 @@ import copy
27
27
  import os
28
28
  import time
29
29
  import warnings
30
- from typing import Any, Literal, Optional
30
+ from typing import Any, Literal
31
31
 
32
32
  from pydantic import Field
33
+ from typing_extensions import Unpack
33
34
 
34
35
  from ..import_utils import optional_import_block, require_optional_import
35
- from ..llm_config import LLMConfigEntry, register_llm_config
36
+ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
36
37
  from .client_utils import should_hide_tools, validate_parameter
37
38
  from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
38
39
 
@@ -48,18 +49,26 @@ GROQ_PRICING_1K = {
48
49
  }
49
50
 
50
51
 
51
- @register_llm_config
52
+ class GroqEntryDict(LLMConfigEntryDict, total=False):
53
+ api_type: Literal["groq"]
54
+
55
+ frequency_penalty: float
56
+ presence_penalty: float
57
+ seed: int
58
+ stream: bool
59
+ hide_tools: Literal["if_all_run", "if_any_run", "never"]
60
+ tool_choice: Literal["none", "auto", "required"] | None
61
+
62
+
52
63
  class GroqLLMConfigEntry(LLMConfigEntry):
53
64
  api_type: Literal["groq"] = "groq"
65
+
54
66
  frequency_penalty: float = Field(default=None, ge=-2, le=2)
55
- max_tokens: int = Field(default=None, ge=0)
56
67
  presence_penalty: float = Field(default=None, ge=-2, le=2)
57
68
  seed: int = Field(default=None)
58
69
  stream: bool = Field(default=False)
59
- temperature: float = Field(default=1, ge=0, le=2)
60
- top_p: float = Field(default=None)
61
70
  hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
62
- tool_choice: Optional[Literal["none", "auto", "required"]] = None
71
+ tool_choice: Literal["none", "auto", "required"] | None = None
63
72
 
64
73
  def create_client(self):
65
74
  raise NotImplementedError("GroqLLMConfigEntry.create_client is not implemented.")
@@ -68,7 +77,7 @@ class GroqLLMConfigEntry(LLMConfigEntry):
68
77
  class GroqClient:
69
78
  """Client for Groq's API."""
70
79
 
71
- def __init__(self, **kwargs):
80
+ def __init__(self, **kwargs: Unpack[GroqEntryDict]):
72
81
  """Requires api_key or environment variable to be set
73
82
 
74
83
  Args:
@@ -126,14 +135,17 @@ class GroqClient:
126
135
  groq_params["frequency_penalty"] = validate_parameter(
127
136
  params, "frequency_penalty", (int, float), True, None, (-2, 2), None
128
137
  )
138
+
129
139
  groq_params["max_tokens"] = validate_parameter(params, "max_tokens", int, True, None, (0, None), None)
140
+ groq_params["temperature"] = validate_parameter(params, "temperature", (int, float), True, 1, (0, 2), None)
141
+ groq_params["top_p"] = validate_parameter(params, "top_p", (int, float), True, None, None, None)
142
+
130
143
  groq_params["presence_penalty"] = validate_parameter(
131
144
  params, "presence_penalty", (int, float), True, None, (-2, 2), None
132
145
  )
133
146
  groq_params["seed"] = validate_parameter(params, "seed", int, True, None, None, None)
134
147
  groq_params["stream"] = validate_parameter(params, "stream", bool, True, False, None, None)
135
- groq_params["temperature"] = validate_parameter(params, "temperature", (int, float), True, 1, (0, 2), None)
136
- groq_params["top_p"] = validate_parameter(params, "top_p", (int, float), True, None, None, None)
148
+
137
149
  if "tool_choice" in params:
138
150
  groq_params["tool_choice"] = validate_parameter(
139
151
  params, "tool_choice", str, True, None, None, ["none", "auto", "required"]
autogen/oai/mistral.py CHANGED
@@ -29,12 +29,12 @@ import json
29
29
  import os
30
30
  import time
31
31
  import warnings
32
- from typing import Any, Literal, Optional, Union
32
+ from typing import Any, Literal
33
33
 
34
- from pydantic import Field
34
+ from typing_extensions import Unpack
35
35
 
36
36
  from ..import_utils import optional_import_block, require_optional_import
37
- from ..llm_config import LLMConfigEntry, register_llm_config
37
+ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
38
38
  from .client_utils import should_hide_tools, validate_parameter
39
39
  from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
40
40
 
@@ -53,17 +53,23 @@ with optional_import_block():
53
53
  )
54
54
 
55
55
 
56
- @register_llm_config
56
+ class MistralEntryDict(LLMConfigEntryDict, total=False):
57
+ api_type: Literal["mistral"]
58
+
59
+ safe_prompt: bool
60
+ random_seed: int | None
61
+ stream: bool
62
+ hide_tools: Literal["if_all_run", "if_any_run", "never"]
63
+ tool_choice: Literal["none", "auto", "any"] | None
64
+
65
+
57
66
  class MistralLLMConfigEntry(LLMConfigEntry):
58
67
  api_type: Literal["mistral"] = "mistral"
59
- temperature: float = Field(default=0.7)
60
- top_p: Optional[float] = None
61
- max_tokens: Optional[int] = Field(default=None, ge=0)
62
68
  safe_prompt: bool = False
63
- random_seed: Optional[int] = None
69
+ random_seed: int | None = None
64
70
  stream: bool = False
65
71
  hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
66
- tool_choice: Optional[Literal["none", "auto", "any"]] = None
72
+ tool_choice: Literal["none", "auto", "any"] | None = None
67
73
 
68
74
  def create_client(self):
69
75
  raise NotImplementedError("MistralLLMConfigEntry.create_client is not implemented.")
@@ -73,7 +79,7 @@ class MistralLLMConfigEntry(LLMConfigEntry):
73
79
  class MistralAIClient:
74
80
  """Client for Mistral.AI's API."""
75
81
 
76
- def __init__(self, **kwargs):
82
+ def __init__(self, **kwargs: Unpack[MistralEntryDict]):
77
83
  """Requires api_key or environment variable to be set
78
84
 
79
85
  Args:
@@ -93,7 +99,7 @@ class MistralAIClient:
93
99
 
94
100
  self._client = Mistral(api_key=self.api_key)
95
101
 
96
- def message_retrieval(self, response: ChatCompletion) -> Union[list[str], list[ChatCompletionMessage]]:
102
+ def message_retrieval(self, response: ChatCompletion) -> list[str] | list[ChatCompletionMessage]:
97
103
  """Retrieve the messages from the response."""
98
104
  return [choice.message for choice in response.choices]
99
105
 
@@ -5,7 +5,19 @@
5
5
  from .chat_completion import ChatCompletionExtended as ChatCompletion
6
6
  from .chat_completion import Choice
7
7
  from .chat_completion_message import ChatCompletionMessage
8
- from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
8
+ from .chat_completion_message_tool_call import (
9
+ ChatCompletionMessageCustomToolCall,
10
+ ChatCompletionMessageFunctionToolCall,
11
+ ChatCompletionMessageToolCall,
12
+ )
9
13
  from .completion_usage import CompletionUsage
10
14
 
11
- __all__ = ["ChatCompletion", "ChatCompletionMessage", "ChatCompletionMessageToolCall", "Choice", "CompletionUsage"]
15
+ __all__ = [
16
+ "ChatCompletion",
17
+ "ChatCompletionMessage",
18
+ "ChatCompletionMessageCustomToolCall",
19
+ "ChatCompletionMessageFunctionToolCall",
20
+ "ChatCompletionMessageToolCall",
21
+ "Choice",
22
+ "CompletionUsage",
23
+ ]
@@ -4,13 +4,13 @@
4
4
 
5
5
  # Taken over from https://github.com/openai/openai-python/blob/main/src/openai/_models.py
6
6
 
7
+
7
8
  import pydantic
8
9
  import pydantic.generics
9
10
  from pydantic import ConfigDict
10
- from typing_extensions import ClassVar
11
11
 
12
12
  __all__ = ["BaseModel"]
13
13
 
14
14
 
15
15
  class BaseModel(pydantic.BaseModel):
16
- model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
16
+ model_config = ConfigDict(extra="allow")
@@ -6,9 +6,8 @@
6
6
 
7
7
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
8
8
 
9
- from typing import Any, Callable, List, Optional
10
-
11
- from typing_extensions import Literal
9
+ from collections.abc import Callable
10
+ from typing import Any, Literal
12
11
 
13
12
  from ._models import BaseModel
14
13
  from .chat_completion_message import ChatCompletionMessage
@@ -19,10 +18,10 @@ __all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
19
18
 
20
19
 
21
20
  class ChoiceLogprobs(BaseModel):
22
- content: Optional[List[ChatCompletionTokenLogprob]] = None
21
+ content: list[ChatCompletionTokenLogprob] | None = None
23
22
  """A list of message content tokens with log probability information."""
24
23
 
25
- refusal: Optional[List[ChatCompletionTokenLogprob]] = None
24
+ refusal: list[ChatCompletionTokenLogprob] | None = None
26
25
  """A list of message refusal tokens with log probability information."""
27
26
 
28
27
 
@@ -40,7 +39,7 @@ class Choice(BaseModel):
40
39
  index: int
41
40
  """The index of the choice in the list of choices."""
42
41
 
43
- logprobs: Optional[ChoiceLogprobs] = None
42
+ logprobs: ChoiceLogprobs | None = None
44
43
  """Log probability information for the choice."""
45
44
 
46
45
  message: ChatCompletionMessage
@@ -51,7 +50,7 @@ class ChatCompletion(BaseModel):
51
50
  id: str
52
51
  """A unique identifier for the chat completion."""
53
52
 
54
- choices: List[Choice]
53
+ choices: list[Choice]
55
54
  """A list of chat completion choices.
56
55
 
57
56
  Can be more than one if `n` is greater than 1.
@@ -66,22 +65,22 @@ class ChatCompletion(BaseModel):
66
65
  object: Literal["chat.completion"]
67
66
  """The object type, which is always `chat.completion`."""
68
67
 
69
- service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None
68
+ service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
70
69
  """The service tier used for processing the request."""
71
70
 
72
- system_fingerprint: Optional[str] = None
71
+ system_fingerprint: str | None = None
73
72
  """This fingerprint represents the backend configuration that the model runs with.
74
73
 
75
74
  Can be used in conjunction with the `seed` request parameter to understand when
76
75
  backend changes have been made that might impact determinism.
77
76
  """
78
77
 
79
- usage: Optional[CompletionUsage] = None
78
+ usage: CompletionUsage | None = None
80
79
  """Usage statistics for the completion request."""
81
80
 
82
81
 
83
82
  class ChatCompletionExtended(ChatCompletion):
84
- message_retrieval_function: Optional[Callable[[Any, "ChatCompletion"], list[ChatCompletionMessage]]] = None
85
- config_id: Optional[str] = None
86
- pass_filter: Optional[Callable[..., bool]] = None
87
- cost: Optional[float] = None
83
+ message_retrieval_function: Callable[[Any, "ChatCompletion"], list[ChatCompletionMessage]] | None = None
84
+ config_id: str | None = None
85
+ pass_filter: Callable[..., bool] | None = None
86
+ cost: float | None = None
@@ -6,13 +6,15 @@
6
6
 
7
7
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
8
8
 
9
- from typing import List, Optional
10
9
 
11
- from typing_extensions import Literal
10
+ from typing import Literal
12
11
 
13
12
  from ._models import BaseModel
14
13
  from .chat_completion_audio import ChatCompletionAudio
15
- from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
14
+ from .chat_completion_message_tool_call import (
15
+ ChatCompletionMessageCustomToolCall,
16
+ ChatCompletionMessageFunctionToolCall,
17
+ )
16
18
 
17
19
  __all__ = ["Annotation", "AnnotationURLCitation", "ChatCompletionMessage", "FunctionCall"]
18
20
 
@@ -53,34 +55,34 @@ class FunctionCall(BaseModel):
53
55
 
54
56
 
55
57
  class ChatCompletionMessage(BaseModel):
56
- content: Optional[str] = None
58
+ content: str | None = None
57
59
  """The contents of the message."""
58
60
 
59
- refusal: Optional[str] = None
61
+ refusal: str | None = None
60
62
  """The refusal message generated by the model."""
61
63
 
62
64
  role: Literal["assistant"]
63
65
  """The role of the author of this message."""
64
66
 
65
- annotations: Optional[List[Annotation]] = None
67
+ annotations: list[Annotation] | None = None
66
68
  """
67
69
  Annotations for the message, when applicable, as when using the
68
70
  [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
69
71
  """
70
72
 
71
- audio: Optional[ChatCompletionAudio] = None
73
+ audio: ChatCompletionAudio | None = None
72
74
  """
73
75
  If the audio output modality is requested, this object contains data about the
74
76
  audio response from the model.
75
77
  [Learn more](https://platform.openai.com/docs/guides/audio).
76
78
  """
77
79
 
78
- function_call: Optional[FunctionCall] = None
80
+ function_call: FunctionCall | None = None
79
81
  """Deprecated and replaced by `tool_calls`.
80
82
 
81
83
  The name and arguments of a function that should be called, as generated by the
82
84
  model.
83
85
  """
84
86
 
85
- tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
87
+ tool_calls: list[ChatCompletionMessageFunctionToolCall | ChatCompletionMessageCustomToolCall] | None = None
86
88
  """The tool calls generated by the model, such as function calls."""
@@ -6,11 +6,11 @@
6
6
 
7
7
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
8
8
 
9
- from typing_extensions import Literal
9
+ from typing import Literal
10
10
 
11
11
  from ._models import BaseModel
12
12
 
13
- __all__ = ["ChatCompletionMessageToolCall", "Function"]
13
+ __all__ = ["ChatCompletionMessageCustomToolCall", "ChatCompletionMessageFunctionToolCall", "Custom", "Function"]
14
14
 
15
15
 
16
16
  class Function(BaseModel):
@@ -26,7 +26,15 @@ class Function(BaseModel):
26
26
  """The name of the function to call."""
27
27
 
28
28
 
29
- class ChatCompletionMessageToolCall(BaseModel):
29
+ class Custom(BaseModel):
30
+ input: str
31
+ """The input to the custom tool."""
32
+
33
+ name: str
34
+ """The name of the custom tool."""
35
+
36
+
37
+ class ChatCompletionMessageFunctionToolCall(BaseModel):
30
38
  id: str
31
39
  """The ID of the tool call."""
32
40
 
@@ -35,3 +43,18 @@ class ChatCompletionMessageToolCall(BaseModel):
35
43
 
36
44
  type: Literal["function"]
37
45
  """The type of the tool. Currently, only `function` is supported."""
46
+
47
+
48
+ class ChatCompletionMessageCustomToolCall(BaseModel):
49
+ id: str
50
+ """The ID of the tool call."""
51
+
52
+ custom: Custom
53
+ """The custom tool that the model called."""
54
+
55
+ type: Literal["custom"]
56
+ """The type of the tool. Currently, only `custom` is supported."""
57
+
58
+
59
+ # Backward compatibility alias
60
+ ChatCompletionMessageToolCall = ChatCompletionMessageFunctionToolCall
@@ -6,7 +6,6 @@
6
6
 
7
7
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
8
8
 
9
- from typing import List, Optional
10
9
 
11
10
  from ._models import BaseModel
12
11
 
@@ -17,7 +16,7 @@ class TopLogprob(BaseModel):
17
16
  token: str
18
17
  """The token."""
19
18
 
20
- bytes: Optional[List[int]] = None
19
+ bytes: list[int] | None = None
21
20
  """A list of integers representing the UTF-8 bytes representation of the token.
22
21
 
23
22
  Useful in instances where characters are represented by multiple tokens and
@@ -38,7 +37,7 @@ class ChatCompletionTokenLogprob(BaseModel):
38
37
  token: str
39
38
  """The token."""
40
39
 
41
- bytes: Optional[List[int]] = None
40
+ bytes: list[int] | None = None
42
41
  """A list of integers representing the UTF-8 bytes representation of the token.
43
42
 
44
43
  Useful in instances where characters are represented by multiple tokens and
@@ -54,7 +53,7 @@ class ChatCompletionTokenLogprob(BaseModel):
54
53
  unlikely.
55
54
  """
56
55
 
57
- top_logprobs: List[TopLogprob]
56
+ top_logprobs: list[TopLogprob]
58
57
  """List of the most likely tokens and their log probability, at this token
59
58
  position.
60
59
 
@@ -6,7 +6,6 @@
6
6
 
7
7
  # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
8
8
 
9
- from typing import Optional
10
9
 
11
10
  from ._models import BaseModel
12
11
 
@@ -14,19 +13,19 @@ __all__ = ["CompletionTokensDetails", "CompletionUsage", "PromptTokensDetails"]
14
13
 
15
14
 
16
15
  class CompletionTokensDetails(BaseModel):
17
- accepted_prediction_tokens: Optional[int] = None
16
+ accepted_prediction_tokens: int | None = None
18
17
  """
19
18
  When using Predicted Outputs, the number of tokens in the prediction that
20
19
  appeared in the completion.
21
20
  """
22
21
 
23
- audio_tokens: Optional[int] = None
22
+ audio_tokens: int | None = None
24
23
  """Audio input tokens generated by the model."""
25
24
 
26
- reasoning_tokens: Optional[int] = None
25
+ reasoning_tokens: int | None = None
27
26
  """Tokens generated by the model for reasoning."""
28
27
 
29
- rejected_prediction_tokens: Optional[int] = None
28
+ rejected_prediction_tokens: int | None = None
30
29
  """
31
30
  When using Predicted Outputs, the number of tokens in the prediction that did
32
31
  not appear in the completion. However, like reasoning tokens, these tokens are
@@ -36,10 +35,10 @@ class CompletionTokensDetails(BaseModel):
36
35
 
37
36
 
38
37
  class PromptTokensDetails(BaseModel):
39
- audio_tokens: Optional[int] = None
38
+ audio_tokens: int | None = None
40
39
  """Audio input tokens present in the prompt."""
41
40
 
42
- cached_tokens: Optional[int] = None
41
+ cached_tokens: int | None = None
43
42
  """Cached tokens present in the prompt."""
44
43
 
45
44
 
@@ -53,8 +52,8 @@ class CompletionUsage(BaseModel):
53
52
  total_tokens: int
54
53
  """Total number of tokens used in the request (prompt + completion)."""
55
54
 
56
- completion_tokens_details: Optional[CompletionTokensDetails] = None
55
+ completion_tokens_details: CompletionTokensDetails | None = None
57
56
  """Breakdown of tokens used in a completion."""
58
57
 
59
- prompt_tokens_details: Optional[PromptTokensDetails] = None
58
+ prompt_tokens_details: PromptTokensDetails | None = None
60
59
  """Breakdown of tokens used in the prompt."""