ag2 0.9.6__py3-none-any.whl → 0.9.8.post1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (236) hide show
  1. {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/METADATA +102 -75
  2. ag2-0.9.8.post1.dist-info/RECORD +387 -0
  3. autogen/__init__.py +1 -2
  4. autogen/_website/generate_api_references.py +4 -5
  5. autogen/_website/generate_mkdocs.py +9 -15
  6. autogen/_website/notebook_processor.py +13 -14
  7. autogen/_website/process_notebooks.py +10 -10
  8. autogen/_website/utils.py +5 -4
  9. autogen/agentchat/agent.py +13 -13
  10. autogen/agentchat/assistant_agent.py +7 -6
  11. autogen/agentchat/contrib/agent_eval/agent_eval.py +3 -3
  12. autogen/agentchat/contrib/agent_eval/critic_agent.py +3 -3
  13. autogen/agentchat/contrib/agent_eval/quantifier_agent.py +3 -3
  14. autogen/agentchat/contrib/agent_eval/subcritic_agent.py +3 -3
  15. autogen/agentchat/contrib/agent_optimizer.py +3 -3
  16. autogen/agentchat/contrib/capabilities/generate_images.py +11 -11
  17. autogen/agentchat/contrib/capabilities/teachability.py +15 -15
  18. autogen/agentchat/contrib/capabilities/transforms.py +17 -18
  19. autogen/agentchat/contrib/capabilities/transforms_util.py +5 -5
  20. autogen/agentchat/contrib/capabilities/vision_capability.py +4 -3
  21. autogen/agentchat/contrib/captainagent/agent_builder.py +30 -30
  22. autogen/agentchat/contrib/captainagent/captainagent.py +22 -21
  23. autogen/agentchat/contrib/captainagent/tool_retriever.py +2 -3
  24. autogen/agentchat/contrib/gpt_assistant_agent.py +9 -9
  25. autogen/agentchat/contrib/graph_rag/document.py +3 -3
  26. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +3 -3
  27. autogen/agentchat/contrib/graph_rag/falkor_graph_rag_capability.py +6 -6
  28. autogen/agentchat/contrib/graph_rag/graph_query_engine.py +3 -3
  29. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +5 -11
  30. autogen/agentchat/contrib/graph_rag/neo4j_graph_rag_capability.py +6 -6
  31. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_query_engine.py +7 -7
  32. autogen/agentchat/contrib/graph_rag/neo4j_native_graph_rag_capability.py +6 -6
  33. autogen/agentchat/contrib/img_utils.py +1 -1
  34. autogen/agentchat/contrib/llamaindex_conversable_agent.py +11 -11
  35. autogen/agentchat/contrib/llava_agent.py +18 -4
  36. autogen/agentchat/contrib/math_user_proxy_agent.py +11 -11
  37. autogen/agentchat/contrib/multimodal_conversable_agent.py +8 -8
  38. autogen/agentchat/contrib/qdrant_retrieve_user_proxy_agent.py +6 -5
  39. autogen/agentchat/contrib/rag/chromadb_query_engine.py +22 -26
  40. autogen/agentchat/contrib/rag/llamaindex_query_engine.py +14 -17
  41. autogen/agentchat/contrib/rag/mongodb_query_engine.py +27 -37
  42. autogen/agentchat/contrib/rag/query_engine.py +7 -5
  43. autogen/agentchat/contrib/retrieve_assistant_agent.py +5 -5
  44. autogen/agentchat/contrib/retrieve_user_proxy_agent.py +8 -7
  45. autogen/agentchat/contrib/society_of_mind_agent.py +15 -14
  46. autogen/agentchat/contrib/swarm_agent.py +76 -98
  47. autogen/agentchat/contrib/text_analyzer_agent.py +7 -7
  48. autogen/agentchat/contrib/vectordb/base.py +10 -18
  49. autogen/agentchat/contrib/vectordb/chromadb.py +2 -1
  50. autogen/agentchat/contrib/vectordb/couchbase.py +18 -20
  51. autogen/agentchat/contrib/vectordb/mongodb.py +6 -5
  52. autogen/agentchat/contrib/vectordb/pgvectordb.py +40 -41
  53. autogen/agentchat/contrib/vectordb/qdrant.py +5 -5
  54. autogen/agentchat/contrib/web_surfer.py +20 -19
  55. autogen/agentchat/conversable_agent.py +311 -295
  56. autogen/agentchat/group/context_str.py +1 -3
  57. autogen/agentchat/group/context_variables.py +15 -25
  58. autogen/agentchat/group/group_tool_executor.py +10 -10
  59. autogen/agentchat/group/group_utils.py +15 -15
  60. autogen/agentchat/group/guardrails.py +7 -7
  61. autogen/agentchat/group/handoffs.py +19 -36
  62. autogen/agentchat/group/multi_agent_chat.py +7 -7
  63. autogen/agentchat/group/on_condition.py +4 -7
  64. autogen/agentchat/group/on_context_condition.py +4 -7
  65. autogen/agentchat/group/patterns/auto.py +8 -7
  66. autogen/agentchat/group/patterns/manual.py +7 -6
  67. autogen/agentchat/group/patterns/pattern.py +13 -12
  68. autogen/agentchat/group/patterns/random.py +3 -3
  69. autogen/agentchat/group/patterns/round_robin.py +3 -3
  70. autogen/agentchat/group/reply_result.py +2 -4
  71. autogen/agentchat/group/speaker_selection_result.py +5 -5
  72. autogen/agentchat/group/targets/group_chat_target.py +7 -6
  73. autogen/agentchat/group/targets/group_manager_target.py +4 -4
  74. autogen/agentchat/group/targets/transition_target.py +2 -1
  75. autogen/agentchat/groupchat.py +58 -61
  76. autogen/agentchat/realtime/experimental/audio_adapters/twilio_audio_adapter.py +4 -4
  77. autogen/agentchat/realtime/experimental/audio_adapters/websocket_audio_adapter.py +4 -4
  78. autogen/agentchat/realtime/experimental/clients/gemini/client.py +7 -7
  79. autogen/agentchat/realtime/experimental/clients/oai/base_client.py +8 -8
  80. autogen/agentchat/realtime/experimental/clients/oai/rtc_client.py +6 -6
  81. autogen/agentchat/realtime/experimental/clients/realtime_client.py +10 -9
  82. autogen/agentchat/realtime/experimental/realtime_agent.py +10 -9
  83. autogen/agentchat/realtime/experimental/realtime_observer.py +3 -3
  84. autogen/agentchat/realtime/experimental/realtime_swarm.py +44 -44
  85. autogen/agentchat/user_proxy_agent.py +10 -9
  86. autogen/agentchat/utils.py +3 -3
  87. autogen/agents/contrib/time/time_reply_agent.py +6 -5
  88. autogen/agents/contrib/time/time_tool_agent.py +2 -1
  89. autogen/agents/experimental/deep_research/deep_research.py +3 -3
  90. autogen/agents/experimental/discord/discord.py +2 -2
  91. autogen/agents/experimental/document_agent/chroma_query_engine.py +29 -44
  92. autogen/agents/experimental/document_agent/docling_doc_ingest_agent.py +9 -14
  93. autogen/agents/experimental/document_agent/document_agent.py +15 -16
  94. autogen/agents/experimental/document_agent/document_conditions.py +3 -3
  95. autogen/agents/experimental/document_agent/document_utils.py +5 -9
  96. autogen/agents/experimental/document_agent/inmemory_query_engine.py +14 -20
  97. autogen/agents/experimental/document_agent/parser_utils.py +4 -4
  98. autogen/agents/experimental/document_agent/url_utils.py +14 -23
  99. autogen/agents/experimental/reasoning/reasoning_agent.py +33 -33
  100. autogen/agents/experimental/slack/slack.py +2 -2
  101. autogen/agents/experimental/telegram/telegram.py +2 -3
  102. autogen/agents/experimental/websurfer/websurfer.py +4 -4
  103. autogen/agents/experimental/wikipedia/wikipedia.py +5 -7
  104. autogen/browser_utils.py +8 -8
  105. autogen/cache/abstract_cache_base.py +5 -5
  106. autogen/cache/cache.py +12 -12
  107. autogen/cache/cache_factory.py +4 -4
  108. autogen/cache/cosmos_db_cache.py +9 -9
  109. autogen/cache/disk_cache.py +6 -6
  110. autogen/cache/in_memory_cache.py +4 -4
  111. autogen/cache/redis_cache.py +4 -4
  112. autogen/code_utils.py +18 -18
  113. autogen/coding/base.py +6 -6
  114. autogen/coding/docker_commandline_code_executor.py +9 -9
  115. autogen/coding/func_with_reqs.py +7 -6
  116. autogen/coding/jupyter/base.py +3 -3
  117. autogen/coding/jupyter/docker_jupyter_server.py +3 -4
  118. autogen/coding/jupyter/import_utils.py +3 -3
  119. autogen/coding/jupyter/jupyter_client.py +5 -5
  120. autogen/coding/jupyter/jupyter_code_executor.py +3 -4
  121. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  122. autogen/coding/local_commandline_code_executor.py +8 -7
  123. autogen/coding/markdown_code_extractor.py +1 -2
  124. autogen/coding/utils.py +1 -2
  125. autogen/doc_utils.py +3 -2
  126. autogen/environments/docker_python_environment.py +19 -29
  127. autogen/environments/python_environment.py +8 -17
  128. autogen/environments/system_python_environment.py +3 -4
  129. autogen/environments/venv_python_environment.py +8 -12
  130. autogen/environments/working_directory.py +1 -2
  131. autogen/events/agent_events.py +106 -109
  132. autogen/events/base_event.py +6 -5
  133. autogen/events/client_events.py +15 -14
  134. autogen/events/helpers.py +1 -1
  135. autogen/events/print_event.py +4 -5
  136. autogen/fast_depends/_compat.py +10 -15
  137. autogen/fast_depends/core/build.py +17 -36
  138. autogen/fast_depends/core/model.py +64 -113
  139. autogen/fast_depends/dependencies/model.py +2 -1
  140. autogen/fast_depends/dependencies/provider.py +3 -2
  141. autogen/fast_depends/library/model.py +4 -4
  142. autogen/fast_depends/schema.py +7 -7
  143. autogen/fast_depends/use.py +17 -25
  144. autogen/fast_depends/utils.py +10 -30
  145. autogen/formatting_utils.py +6 -6
  146. autogen/graph_utils.py +1 -4
  147. autogen/import_utils.py +13 -13
  148. autogen/interop/crewai/crewai.py +2 -2
  149. autogen/interop/interoperable.py +2 -2
  150. autogen/interop/langchain/langchain_chat_model_factory.py +3 -2
  151. autogen/interop/langchain/langchain_tool.py +2 -6
  152. autogen/interop/litellm/litellm_config_factory.py +6 -7
  153. autogen/interop/pydantic_ai/pydantic_ai.py +4 -7
  154. autogen/interop/registry.py +2 -1
  155. autogen/io/base.py +5 -5
  156. autogen/io/run_response.py +33 -32
  157. autogen/io/websockets.py +6 -5
  158. autogen/json_utils.py +1 -2
  159. autogen/llm_config/__init__.py +11 -0
  160. autogen/llm_config/client.py +58 -0
  161. autogen/llm_config/config.py +384 -0
  162. autogen/llm_config/entry.py +154 -0
  163. autogen/logger/base_logger.py +4 -3
  164. autogen/logger/file_logger.py +2 -1
  165. autogen/logger/logger_factory.py +2 -2
  166. autogen/logger/logger_utils.py +2 -2
  167. autogen/logger/sqlite_logger.py +3 -2
  168. autogen/math_utils.py +4 -5
  169. autogen/mcp/__main__.py +6 -6
  170. autogen/mcp/helpers.py +4 -4
  171. autogen/mcp/mcp_client.py +170 -29
  172. autogen/mcp/mcp_proxy/fastapi_code_generator_helpers.py +3 -4
  173. autogen/mcp/mcp_proxy/mcp_proxy.py +23 -26
  174. autogen/mcp/mcp_proxy/operation_grouping.py +4 -5
  175. autogen/mcp/mcp_proxy/operation_renaming.py +6 -10
  176. autogen/mcp/mcp_proxy/security.py +2 -3
  177. autogen/messages/agent_messages.py +96 -98
  178. autogen/messages/base_message.py +6 -5
  179. autogen/messages/client_messages.py +15 -14
  180. autogen/messages/print_message.py +4 -5
  181. autogen/oai/__init__.py +1 -2
  182. autogen/oai/anthropic.py +42 -41
  183. autogen/oai/bedrock.py +68 -57
  184. autogen/oai/cerebras.py +26 -25
  185. autogen/oai/client.py +118 -138
  186. autogen/oai/client_utils.py +3 -3
  187. autogen/oai/cohere.py +34 -11
  188. autogen/oai/gemini.py +40 -17
  189. autogen/oai/gemini_types.py +11 -12
  190. autogen/oai/groq.py +22 -10
  191. autogen/oai/mistral.py +17 -11
  192. autogen/oai/oai_models/__init__.py +14 -2
  193. autogen/oai/oai_models/_models.py +2 -2
  194. autogen/oai/oai_models/chat_completion.py +13 -14
  195. autogen/oai/oai_models/chat_completion_message.py +11 -9
  196. autogen/oai/oai_models/chat_completion_message_tool_call.py +26 -3
  197. autogen/oai/oai_models/chat_completion_token_logprob.py +3 -4
  198. autogen/oai/oai_models/completion_usage.py +8 -9
  199. autogen/oai/ollama.py +22 -10
  200. autogen/oai/openai_responses.py +40 -17
  201. autogen/oai/openai_utils.py +159 -85
  202. autogen/oai/together.py +29 -14
  203. autogen/retrieve_utils.py +6 -7
  204. autogen/runtime_logging.py +5 -4
  205. autogen/token_count_utils.py +7 -4
  206. autogen/tools/contrib/time/time.py +0 -1
  207. autogen/tools/dependency_injection.py +5 -6
  208. autogen/tools/experimental/browser_use/browser_use.py +10 -10
  209. autogen/tools/experimental/code_execution/python_code_execution.py +5 -7
  210. autogen/tools/experimental/crawl4ai/crawl4ai.py +12 -15
  211. autogen/tools/experimental/deep_research/deep_research.py +9 -8
  212. autogen/tools/experimental/duckduckgo/duckduckgo_search.py +5 -11
  213. autogen/tools/experimental/firecrawl/firecrawl_tool.py +98 -115
  214. autogen/tools/experimental/google/authentication/credentials_local_provider.py +1 -1
  215. autogen/tools/experimental/google/drive/drive_functions.py +4 -4
  216. autogen/tools/experimental/google/drive/toolkit.py +5 -5
  217. autogen/tools/experimental/google_search/google_search.py +5 -5
  218. autogen/tools/experimental/google_search/youtube_search.py +5 -5
  219. autogen/tools/experimental/messageplatform/discord/discord.py +8 -12
  220. autogen/tools/experimental/messageplatform/slack/slack.py +14 -20
  221. autogen/tools/experimental/messageplatform/telegram/telegram.py +8 -12
  222. autogen/tools/experimental/perplexity/perplexity_search.py +18 -29
  223. autogen/tools/experimental/reliable/reliable.py +68 -74
  224. autogen/tools/experimental/searxng/searxng_search.py +20 -19
  225. autogen/tools/experimental/tavily/tavily_search.py +12 -19
  226. autogen/tools/experimental/web_search_preview/web_search_preview.py +13 -7
  227. autogen/tools/experimental/wikipedia/wikipedia.py +7 -10
  228. autogen/tools/function_utils.py +7 -7
  229. autogen/tools/tool.py +6 -5
  230. autogen/types.py +2 -2
  231. autogen/version.py +1 -1
  232. ag2-0.9.6.dist-info/RECORD +0 -421
  233. autogen/llm_config.py +0 -385
  234. {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/WHEEL +0 -0
  235. {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/licenses/LICENSE +0 -0
  236. {ag2-0.9.6.dist-info → ag2-0.9.8.post1.dist-info}/licenses/NOTICE.md +0 -0
autogen/oai/ollama.py CHANGED
@@ -22,18 +22,19 @@ Resources:
22
22
 
23
23
  from __future__ import annotations
24
24
 
25
+ import ast
25
26
  import copy
26
27
  import json
27
28
  import random
28
29
  import re
29
30
  import time
30
31
  import warnings
31
- from typing import Any, Literal, Optional, Union
32
+ from typing import Any, Literal
32
33
 
33
34
  from pydantic import BaseModel, Field, HttpUrl
34
35
 
35
36
  from ..import_utils import optional_import_block, require_optional_import
36
- from ..llm_config import LLMConfigEntry, register_llm_config
37
+ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
37
38
  from .client_utils import FormatterProtocol, should_hide_tools, validate_parameter
38
39
  from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
39
40
 
@@ -43,10 +44,23 @@ with optional_import_block():
43
44
  from ollama import Client
44
45
 
45
46
 
46
- @register_llm_config
47
+ class OllamaEntryDict(LLMConfigEntryDict, total=False):
48
+ api_type: Literal["ollama"]
49
+ client_host: HttpUrl | None
50
+ stream: bool
51
+ num_predict: int
52
+ num_ctx: int
53
+ repeat_penalty: float
54
+ seed: int
55
+ top_k: int
56
+ hide_tools: Literal["if_all_run", "if_any_run", "never"]
57
+ native_tool_calls: bool
58
+
59
+
47
60
  class OllamaLLMConfigEntry(LLMConfigEntry):
48
61
  api_type: Literal["ollama"] = "ollama"
49
- client_host: Optional[HttpUrl] = None
62
+ # TODO: max_tokens
63
+ client_host: HttpUrl | None = None
50
64
  stream: bool = False
51
65
  num_predict: int = Field(
52
66
  default=-1,
@@ -55,10 +69,9 @@ class OllamaLLMConfigEntry(LLMConfigEntry):
55
69
  num_ctx: int = Field(default=2048)
56
70
  repeat_penalty: float = Field(default=1.1)
57
71
  seed: int = Field(default=0)
58
- temperature: float = Field(default=0.8)
59
72
  top_k: int = Field(default=40)
60
- top_p: float = Field(default=0.9)
61
73
  hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
74
+ native_tool_calls: bool = False
62
75
 
63
76
  def create_client(self):
64
77
  raise NotImplementedError("OllamaLLMConfigEntry.create_client is not implemented.")
@@ -99,11 +112,10 @@ class OllamaClient:
99
112
  # Override using "manual_tool_call_step2" config parameter
100
113
  TOOL_CALL_MANUAL_STEP2 = " (proceed with step 2)"
101
114
 
102
- def __init__(self, response_format: Optional[Union[BaseModel, dict[str, Any]]] = None, **kwargs):
115
+ def __init__(self, response_format: BaseModel | dict[str, Any] | None = None, **kwargs):
103
116
  """Note that no api_key or environment variable is required for Ollama."""
104
-
105
117
  # Store the response format, if provided (for structured outputs)
106
- self._response_format: Optional[Union[BaseModel, dict[str, Any]]] = response_format
118
+ self._response_format: BaseModel | dict[str, Any] | None = response_format
107
119
 
108
120
  def message_retrieval(self, response) -> list:
109
121
  """Retrieve and return a list of strings or a list of Choice.Message from the response.
@@ -612,7 +624,7 @@ def _object_to_tool_call(data_object: Any) -> list[dict[str, Any]]:
612
624
  is_invalid = False
613
625
  for i, item in enumerate(data_copy):
614
626
  try:
615
- new_item = eval(item)
627
+ new_item = ast.literal_eval(item)
616
628
  if isinstance(new_item, dict):
617
629
  if is_valid_tool_call_item(new_item):
618
630
  data_object[i] = new_item
@@ -4,7 +4,7 @@
4
4
 
5
5
  import copy
6
6
  import warnings
7
- from typing import TYPE_CHECKING, Any, Tuple, Union
7
+ from typing import TYPE_CHECKING, Any
8
8
 
9
9
  from pydantic import BaseModel
10
10
 
@@ -48,9 +48,8 @@ VALID_SIZES = {
48
48
 
49
49
  def calculate_openai_image_cost(
50
50
  model: str = "gpt-image-1", size: str = "1024x1024", quality: str = "high"
51
- ) -> Tuple[float, str]:
52
- """
53
- Calculate the cost for a single image generation.
51
+ ) -> tuple[float, str]:
52
+ """Calculate the cost for a single image generation.
54
53
 
55
54
  Args:
56
55
  model: Model name ("gpt-image-1", "dall-e-3" or "dall-e-2")
@@ -114,7 +113,7 @@ class OpenAIResponsesClient:
114
113
  def __init__(
115
114
  self,
116
115
  client: "OpenAI",
117
- response_format: Union[BaseModel, dict[str, Any], None] = None,
116
+ response_format: BaseModel | dict[str, Any] | None = None,
118
117
  ):
119
118
  self._oai_client = client # plain openai.OpenAI instance
120
119
  self.response_format = response_format # kept for parity but unused for now
@@ -195,6 +194,12 @@ class OpenAIResponsesClient:
195
194
  delta_messages.append(m)
196
195
  return delta_messages[::-1]
197
196
 
197
+ def _parse_params(self, params: dict[str, Any]) -> None:
198
+ if "verbosity" in params:
199
+ verbosity = params.pop("verbosity")
200
+ params["text"] = {"verbosity": verbosity}
201
+ return params
202
+
198
203
  def create(self, params: dict[str, Any]) -> "Response":
199
204
  """Invoke `client.responses.create() or .parse()`.
200
205
 
@@ -246,7 +251,14 @@ class OpenAIResponsesClient:
246
251
  "output": content,
247
252
  })
248
253
  break
249
- params["input"] = input_items[::-1]
254
+
255
+ # Ensure we have at least one valid input item
256
+ if input_items:
257
+ params["input"] = input_items[::-1]
258
+ else:
259
+ # If no valid input items were created, create a default one
260
+ # This prevents the API error about missing required parameters
261
+ params["input"] = [{"role": "user", "content": [{"type": "input_text", "text": "Hello"}]}]
250
262
 
251
263
  # Initialize tools list
252
264
  tools_list = []
@@ -274,6 +286,11 @@ class OpenAIResponsesClient:
274
286
  UserWarning,
275
287
  )
276
288
 
289
+ # Validate that we have at least one of the required parameters
290
+ if not any(key in params for key in ["input", "previous_response_id", "prompt"]):
291
+ # If we still don't have any required parameters, create a minimal input
292
+ params["input"] = [{"role": "user", "content": [{"type": "input_text", "text": "Hello"}]}]
293
+
277
294
  # ------------------------------------------------------------------
278
295
  # Structured output handling - mimic OpenAIClient behaviour
279
296
  # ------------------------------------------------------------------
@@ -306,7 +323,6 @@ class OpenAIResponsesClient:
306
323
  kwargs["text_format"] = type_to_response_format_param(rf)
307
324
  if "response_format" in kwargs:
308
325
  kwargs["text_format"] = kwargs.pop("response_format")
309
-
310
326
  try:
311
327
  return self._oai_client.responses.parse(**kwargs)
312
328
  except TypeError as e:
@@ -325,22 +341,19 @@ class OpenAIResponsesClient:
325
341
  response = _create_or_parse(**params)
326
342
  self.previous_response_id = response.id
327
343
  return response
328
-
329
344
  # No structured output
345
+ params = self._parse_params(params)
330
346
  response = self._oai_client.responses.create(**params)
331
347
  self.previous_response_id = response.id
332
-
333
348
  # Accumulate image costs
334
349
  self._add_image_cost(response)
335
-
336
350
  return response
337
351
 
338
- def message_retrieval(
339
- self, response
340
- ) -> Union[list[str], list["ModelClient.ModelClientResponseProtocol.Choice.Message"]]:
352
+ def message_retrieval(self, response) -> list[str] | list["ModelClient.ModelClientResponseProtocol.Choice.Message"]:
341
353
  output = getattr(response, "output", [])
342
- content = [] # list[dict[str, Union[str, dict[str, Any]]]]]
354
+ content = []
343
355
  tool_calls = []
356
+
344
357
  for item in output:
345
358
  # Convert pydantic objects to plain dicts for uniform handling
346
359
  if hasattr(item, "model_dump"):
@@ -348,16 +361,26 @@ class OpenAIResponsesClient:
348
361
 
349
362
  item_type = item.get("type")
350
363
 
351
- # ------------------------------------------------------------------
352
- # 1) Normal messages
353
- # ------------------------------------------------------------------
364
+ # Skip reasoning items - they're not messages
365
+ if item_type == "reasoning":
366
+ continue
367
+
354
368
  if item_type == "message":
355
369
  new_item = copy.deepcopy(item)
356
370
  new_item["type"] = "text"
357
371
  new_item["role"] = "assistant"
372
+
358
373
  blocks = item.get("content", [])
359
374
  if len(blocks) == 1 and blocks[0].get("type") == "output_text":
360
375
  new_item["text"] = blocks[0]["text"]
376
+ elif len(blocks) > 0:
377
+ # Handle multiple content blocks
378
+ text_parts = []
379
+ for block in blocks:
380
+ if block.get("type") == "output_text":
381
+ text_parts.append(block.get("text", ""))
382
+ new_item["text"] = " ".join(text_parts)
383
+
361
384
  if "content" in new_item:
362
385
  del new_item["content"]
363
386
  content.append(new_item)
@@ -17,7 +17,7 @@ import time
17
17
  import warnings
18
18
  from copy import deepcopy
19
19
  from pathlib import Path
20
- from typing import TYPE_CHECKING, Any, Optional, Union
20
+ from typing import TYPE_CHECKING, Any, Union
21
21
 
22
22
  from dotenv import find_dotenv, load_dotenv
23
23
  from packaging.version import parse
@@ -27,8 +27,9 @@ if TYPE_CHECKING:
27
27
  from openai import OpenAI
28
28
  from openai.types.beta.assistant import Assistant
29
29
 
30
+ from ..llm_config import LLMConfig
31
+
30
32
  from ..doc_utils import export_module
31
- from ..llm_config import LLMConfig
32
33
 
33
34
  NON_CACHE_KEY = [
34
35
  "api_key",
@@ -58,6 +59,15 @@ OAI_PRICE1K = {
58
59
  # o3
59
60
  "o3": (0.0011, 0.0044),
60
61
  "o3-mini-2025-01-31": (0.0011, 0.0044),
62
+ # gpt-5
63
+ "gpt-5": (0.00125, 0.00125),
64
+ "gpt-5-2025-08-07": (0.00125, 0.00125),
65
+ # gpt-5-mini
66
+ "gpt-5-mini": (0.00025, 0.00025),
67
+ "gpt-5-mini-2025-08-07": (0.00025, 0.00025),
68
+ # gpt-5-nano
69
+ "gpt-5-nano": (0.00005, 0.00005),
70
+ "gpt-5-nano-2025-08-07": (0.00005, 0.00005),
61
71
  # gpt-4o
62
72
  "gpt-4o": (0.005, 0.015),
63
73
  "gpt-4o-2024-05-13": (0.005, 0.015),
@@ -157,9 +167,9 @@ def is_valid_api_key(api_key: str) -> bool:
157
167
  @export_module("autogen")
158
168
  def get_config_list(
159
169
  api_keys: list[str],
160
- base_urls: Optional[list[str]] = None,
161
- api_type: Optional[str] = None,
162
- api_version: Optional[str] = None,
170
+ base_urls: list[str] | None = None,
171
+ api_type: str | None = None,
172
+ api_version: str | None = None,
163
173
  ) -> list[dict[str, Any]]:
164
174
  """Get a list of configs for OpenAI API client.
165
175
 
@@ -208,7 +218,7 @@ def get_config_list(
208
218
 
209
219
  @export_module("autogen")
210
220
  def get_first_llm_config(
211
- llm_config: Union[LLMConfig, dict[str, Any]],
221
+ llm_config: Union["LLMConfig", dict[str, Any]],
212
222
  ) -> dict[str, Any]:
213
223
  """Get the first LLM config from the given LLM config.
214
224
 
@@ -236,12 +246,12 @@ def get_first_llm_config(
236
246
 
237
247
  @export_module("autogen")
238
248
  def config_list_openai_aoai(
239
- key_file_path: Optional[str] = ".",
240
- openai_api_key_file: Optional[str] = "key_openai.txt",
241
- aoai_api_key_file: Optional[str] = "key_aoai.txt",
242
- openai_api_base_file: Optional[str] = "base_openai.txt",
243
- aoai_api_base_file: Optional[str] = "base_aoai.txt",
244
- exclude: Optional[str] = None,
249
+ key_file_path: str | None = ".",
250
+ openai_api_key_file: str | None = "key_openai.txt",
251
+ aoai_api_key_file: str | None = "key_aoai.txt",
252
+ openai_api_base_file: str | None = "base_openai.txt",
253
+ aoai_api_base_file: str | None = "base_aoai.txt",
254
+ exclude: str | None = None,
245
255
  ) -> list[dict[str, Any]]:
246
256
  """Get a list of configs for OpenAI API client (including Azure or local model deployments that support OpenAI's chat completion API).
247
257
 
@@ -366,12 +376,12 @@ def config_list_openai_aoai(
366
376
 
367
377
  @export_module("autogen")
368
378
  def config_list_from_models(
369
- key_file_path: Optional[str] = ".",
370
- openai_api_key_file: Optional[str] = "key_openai.txt",
371
- aoai_api_key_file: Optional[str] = "key_aoai.txt",
372
- aoai_api_base_file: Optional[str] = "base_aoai.txt",
373
- exclude: Optional[str] = None,
374
- model_list: Optional[list[str]] = None,
379
+ key_file_path: str | None = ".",
380
+ openai_api_key_file: str | None = "key_openai.txt",
381
+ aoai_api_key_file: str | None = "key_aoai.txt",
382
+ aoai_api_base_file: str | None = "base_aoai.txt",
383
+ exclude: str | None = None,
384
+ model_list: list[str] | None = None,
375
385
  ) -> list[dict[str, Any]]:
376
386
  """Get a list of configs for API calls with models specified in the model list.
377
387
 
@@ -433,11 +443,11 @@ def config_list_from_models(
433
443
 
434
444
  @export_module("autogen")
435
445
  def config_list_gpt4_gpt35(
436
- key_file_path: Optional[str] = ".",
437
- openai_api_key_file: Optional[str] = "key_openai.txt",
438
- aoai_api_key_file: Optional[str] = "key_aoai.txt",
439
- aoai_api_base_file: Optional[str] = "base_aoai.txt",
440
- exclude: Optional[str] = None,
446
+ key_file_path: str | None = ".",
447
+ openai_api_key_file: str | None = "key_openai.txt",
448
+ aoai_api_key_file: str | None = "key_aoai.txt",
449
+ aoai_api_base_file: str | None = "base_aoai.txt",
450
+ exclude: str | None = None,
441
451
  ) -> list[dict[str, Any]]:
442
452
  """Get a list of configs for 'gpt-4' followed by 'gpt-3.5-turbo' API calls.
443
453
 
@@ -464,62 +474,73 @@ def config_list_gpt4_gpt35(
464
474
  @export_module("autogen")
465
475
  def filter_config(
466
476
  config_list: list[dict[str, Any]],
467
- filter_dict: Optional[dict[str, Union[list[Union[str, None]], set[Union[str, None]]]]],
477
+ filter_dict: dict[str, list[str | None] | set[str | None]] | None,
468
478
  exclude: bool = False,
469
479
  ) -> list[dict[str, Any]]:
470
- """This function filters `config_list` by checking each configuration dictionary against the criteria specified in
471
- `filter_dict`. A configuration dictionary is retained if for every key in `filter_dict`, see example below.
480
+ """Filter configuration dictionaries based on specified criteria.
481
+
482
+ This function filters a list of configuration dictionaries by applying ALL criteria specified in `filter_dict`.
483
+ A configuration is included in the result if it satisfies every key-value constraint in the filter dictionary.
484
+ For each filter key, the configuration's corresponding field value must match at least one of the acceptable
485
+ values (OR logic within each criteria, AND logic between different criteria).
472
486
 
473
487
  Args:
474
488
  config_list (list of dict): A list of configuration dictionaries to be filtered.
475
- filter_dict (dict): A dictionary representing the filter criteria, where each key is a
476
- field name to check within the configuration dictionaries, and the
477
- corresponding value is a list of acceptable values for that field.
478
- If the configuration's field's value is not a list, then a match occurs
479
- when it is found in the list of acceptable values. If the configuration's
480
- field's value is a list, then a match occurs if there is a non-empty
481
- intersection with the acceptable values.
482
- exclude (bool): If False (the default value), configs that match the filter will be included in the returned
483
- list. If True, configs that match the filter will be excluded in the returned list.
489
+
490
+ filter_dict (dict, optional): A dictionary specifying filter criteria where:
491
+ - Keys are field names to check in each configuration dictionary
492
+ - Values are lists/sets of acceptable values for that field
493
+ - A configuration matches if ALL filter keys are satisfied AND for each key,
494
+ the config's field value matches at least one acceptable value
495
+ - If a filter value includes None, configurations missing that field will match
496
+ - If None, no filtering is applied
497
+
498
+ exclude (bool, optional): If False (default), return configurations that match the filter.
499
+ If True, return configurations that do NOT match the filter.
484
500
 
485
501
  Returns:
486
- list of dict: A list of configuration dictionaries that meet all the criteria specified
487
- in `filter_dict`.
502
+ list of dict: Filtered list of configuration dictionaries.
488
503
 
489
- Example:
504
+ Matching Logic:
505
+ - **Between different filter keys**: AND logic (all criteria must be satisfied)
506
+ - **Within each filter key's values**: OR logic (any acceptable value can match)
507
+ - **For list-type config values**: Match if there's any intersection with acceptable values
508
+ - **For scalar config values**: Match if the value is in the list of acceptable values
509
+ - **Missing fields**: Only match if None is included in the acceptable values for that field
510
+
511
+ Examples:
490
512
  ```python
491
- # Example configuration list with various models and API types
492
513
  configs = [
493
- {"model": "gpt-3.5-turbo"},
494
- {"model": "gpt-4"},
495
- {"model": "gpt-3.5-turbo", "api_type": "azure"},
496
- {"model": "gpt-3.5-turbo", "tags": ["gpt35_turbo", "gpt-35-turbo"]},
514
+ {"model": "gpt-3.5-turbo", "api_type": "openai"},
515
+ {"model": "gpt-4", "api_type": "openai"},
516
+ {"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"},
517
+ {"model": "gpt-4", "tags": ["premium", "latest"]},
497
518
  ]
498
- # Define filter criteria to select configurations for the 'gpt-3.5-turbo' model
499
- # that are also using the 'azure' API type
500
- filter_criteria = {
501
- "model": ["gpt-3.5-turbo"], # Only accept configurations for 'gpt-3.5-turbo'
502
- "api_type": ["azure"], # Only accept configurations for 'azure' API type
503
- }
504
- # Apply the filter to the configuration list
505
- filtered_configs = filter_config(configs, filter_criteria)
506
- # The resulting `filtered_configs` will be:
507
- # [{'model': 'gpt-3.5-turbo', 'api_type': 'azure', ...}]
508
- # Define a filter to select a given tag
509
- filter_criteria = {
510
- "tags": ["gpt35_turbo"],
511
- }
512
- # Apply the filter to the configuration list
513
- filtered_configs = filter_config(configs, filter_criteria)
514
- # The resulting `filtered_configs` will be:
515
- # [{'model': 'gpt-3.5-turbo', 'tags': ['gpt35_turbo', 'gpt-35-turbo']}]
519
+
520
+ # Example 1: Single criterion - matches any model in the list
521
+ filter_dict = {"model": ["gpt-4", "gpt-4o"]}
522
+ result = filter_config(configs, filter_dict)
523
+ # Returns: [{"model": "gpt-4", "api_type": "openai"}, {"model": "gpt-4", "tags": ["premium", "latest"]}]
524
+
525
+ # Example 2: Multiple criteria - must satisfy ALL conditions
526
+ filter_dict = {"model": ["gpt-3.5-turbo"], "api_type": ["azure"]}
527
+ result = filter_config(configs, filter_dict)
528
+ # Returns: [{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"}]
529
+
530
+ # Example 3: Tag filtering with list intersection
531
+ filter_dict = {"tags": ["premium"]}
532
+ result = filter_config(configs, filter_dict)
533
+ # Returns: [{"model": "gpt-4", "tags": ["premium", "latest"]}]
534
+
535
+ # Example 4: Exclude matching configurations
536
+ filter_dict = {"api_type": ["openai"]}
537
+ result = filter_config(configs, filter_dict, exclude=True)
538
+ # Returns configs that do NOT have api_type="openai"
516
539
  ```
517
540
  Note:
518
541
  - If `filter_dict` is empty or None, no filtering is applied and `config_list` is returned as is.
519
542
  - If a configuration dictionary in `config_list` does not contain a key specified in `filter_dict`,
520
543
  it is considered a non-match and is excluded from the result.
521
- - If the list of acceptable values for a key in `filter_dict` includes None, then configuration
522
- dictionaries that do not have that key will also be considered a match.
523
544
 
524
545
  """
525
546
  if inspect.stack()[1].function != "where":
@@ -538,25 +559,80 @@ def filter_config(
538
559
  return config_list
539
560
 
540
561
 
541
- def _satisfies_criteria(value: Any, criteria_values: Any) -> bool:
542
- if value is None:
562
+ def _satisfies_criteria(config_value: Any, criteria_values: Any) -> bool:
563
+ """Check if a configuration field value satisfies the filter criteria.
564
+
565
+ This helper function implements the matching logic between a single configuration
566
+ field value and the acceptable values specified in the filter criteria. It handles
567
+ both scalar and list-type configuration values with appropriate matching strategies.
568
+
569
+ Args:
570
+ config_value (Any): The value from a configuration dictionary field.
571
+ Can be None, a scalar value, or a list of values.
572
+ criteria_values (Any): The acceptable values from the filter dictionary.
573
+ Can be a single value or a list/set of acceptable values.
574
+
575
+ Returns:
576
+ bool: True if the config_value satisfies the criteria, False otherwise.
577
+
578
+ Matching Logic:
579
+ - **None config values**: Always return False (missing fields don't match)
580
+ - **List config values**:
581
+ - If criteria is a list: Match if there's any intersection (set overlap)
582
+ - If criteria is scalar: Match if the scalar is contained in the config list
583
+ - **Scalar config values**:
584
+ - If criteria is a list: Match if the config value is in the criteria list
585
+ - If criteria is scalar: Match if the values are exactly equal
586
+
587
+ Examples:
588
+ ```python
589
+ # List config value with list criteria (intersection matching)
590
+ _satisfies_criteria(["gpt-4", "gpt-3.5"], ["gpt-4", "claude"]) # True (gpt-4 intersects)
591
+ _satisfies_criteria(["tag1", "tag2"], ["tag3", "tag4"]) # False (no intersection)
592
+
593
+ # List config value with scalar criteria (containment matching)
594
+ _satisfies_criteria(["premium", "latest"], "premium") # True (premium is in list)
595
+ _satisfies_criteria(["tag1", "tag2"], "tag3") # False (tag3 not in list)
596
+
597
+ # Scalar config value with list criteria (membership matching)
598
+ _satisfies_criteria("gpt-4", ["gpt-4", "gpt-3.5"]) # True (gpt-4 in criteria)
599
+ _satisfies_criteria("claude", ["gpt-4", "gpt-3.5"]) # False (claude not in criteria)
600
+
601
+ # Scalar config value with scalar criteria (equality matching)
602
+ _satisfies_criteria("openai", "openai") # True (exact match)
603
+ _satisfies_criteria("openai", "azure") # False (different values)
604
+
605
+ # None config values (missing fields)
606
+ _satisfies_criteria(None, ["gpt-4"]) # False (missing field)
607
+ _satisfies_criteria(None, "gpt-4") # False (missing field)
608
+ ```
609
+
610
+ Note:
611
+ This is an internal helper function used by `filter_config()`. The function
612
+ assumes that both parameters can be of various types and handles type
613
+ checking internally to determine the appropriate matching strategy.
614
+ """
615
+ if config_value is None:
543
616
  return False
544
617
 
545
- if isinstance(value, list):
546
- return bool(set(value) & set(criteria_values)) # Non-empty intersection
618
+ if isinstance(config_value, list):
619
+ if isinstance(criteria_values, list):
620
+ return bool(set(config_value) & set(criteria_values)) # Non-empty intersection
621
+ else:
622
+ return criteria_values in config_value
547
623
  else:
548
624
  # In filter_dict, filter could be either a list of values or a single value.
549
625
  # For example, filter_dict = {"model": ["gpt-3.5-turbo"]} or {"model": "gpt-3.5-turbo"}
550
626
  if isinstance(criteria_values, list):
551
- return value in criteria_values
552
- return bool(value == criteria_values)
627
+ return config_value in criteria_values
628
+ return bool(config_value == criteria_values)
553
629
 
554
630
 
555
631
  @export_module("autogen")
556
632
  def config_list_from_json(
557
633
  env_or_file: str,
558
- file_location: Optional[str] = "",
559
- filter_dict: Optional[dict[str, Union[list[Union[str, None]], set[Union[str, None]]]]] = None,
634
+ file_location: str | None = "",
635
+ filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
560
636
  ) -> list[dict[str, Any]]:
561
637
  """Retrieves a list of API configurations from a JSON stored in an environment variable or a file.
562
638
 
@@ -620,16 +696,14 @@ def config_list_from_json(
620
696
  with open(config_list_path) as json_file:
621
697
  config_list = json.load(json_file)
622
698
 
623
- config_list = filter_config(config_list, filter_dict)
624
-
625
699
  return filter_config(config_list, filter_dict)
626
700
 
627
701
 
628
702
  def get_config(
629
- api_key: Optional[str],
630
- base_url: Optional[str] = None,
631
- api_type: Optional[str] = None,
632
- api_version: Optional[str] = None,
703
+ api_key: str | None,
704
+ base_url: str | None = None,
705
+ api_type: str | None = None,
706
+ api_version: str | None = None,
633
707
  ) -> dict[str, Any]:
634
708
  """Constructs a configuration dictionary for a single model with the provided API configurations.
635
709
 
@@ -665,10 +739,10 @@ def get_config(
665
739
 
666
740
  @export_module("autogen")
667
741
  def config_list_from_dotenv(
668
- dotenv_file_path: Optional[str] = None,
669
- model_api_key_map: Optional[dict[str, Any]] = None,
670
- filter_dict: Optional[dict[str, Union[list[Union[str, None]], set[Union[str, None]]]]] = None,
671
- ) -> list[dict[str, Union[str, set[str]]]]:
742
+ dotenv_file_path: str | None = None,
743
+ model_api_key_map: dict[str, Any] | None = None,
744
+ filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
745
+ ) -> list[dict[str, str | set[str]]]:
672
746
  """Load API configurations from a specified .env file or environment variables and construct a list of configurations.
673
747
 
674
748
  This function will:
@@ -733,12 +807,12 @@ def config_list_from_dotenv(
733
807
  config_without_key_var = {k: v for k, v in config.items() if k != "api_key_env_var"}
734
808
  config_dict = get_config(api_key=api_key, **config_without_key_var)
735
809
  else:
736
- logging.warning(f"Unsupported type {type(config)} for model {model} configuration")
737
-
738
- if not config_dict["api_key"] or config_dict["api_key"].strip() == "":
739
810
  logging.warning(
740
- f"API key not found or empty for model {model}. Please ensure path to .env file is correct."
811
+ "Unsupported configuration type encountered for a model. Please check your model_api_key_map."
741
812
  )
813
+
814
+ if not config_dict["api_key"] or config_dict["api_key"].strip() == "":
815
+ logging.warning("API key not found or empty for a model. Please ensure path to .env file is correct.")
742
816
  continue # Skip this configuration and continue with the next
743
817
 
744
818
  # Add model to the configuration and append to the list
autogen/oai/together.py CHANGED
@@ -33,12 +33,13 @@ import copy
33
33
  import os
34
34
  import time
35
35
  import warnings
36
- from typing import Any, Literal, Optional, Union
36
+ from typing import Any, Literal
37
37
 
38
38
  from pydantic import Field
39
+ from typing_extensions import Unpack
39
40
 
40
41
  from ..import_utils import optional_import_block, require_optional_import
41
- from ..llm_config import LLMConfigEntry, register_llm_config
42
+ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
42
43
  from .client_utils import should_hide_tools, validate_parameter
43
44
  from .oai_models import ChatCompletion, ChatCompletionMessage, ChatCompletionMessageToolCall, Choice, CompletionUsage
44
45
 
@@ -46,22 +47,36 @@ with optional_import_block():
46
47
  from together import Together
47
48
 
48
49
 
49
- @register_llm_config
50
+ class TogetherEntryDict(LLMConfigEntryDict, total=False):
51
+ api_type: Literal["together"]
52
+
53
+ stream: bool
54
+ top_k: int | None
55
+ repetition_penalty: float | None
56
+ presence_penalty: float | None
57
+ frequency_penalty: float | None
58
+ min_p: float | None
59
+ safety_model: str | None
60
+ hide_tools: Literal["if_all_run", "if_any_run", "never"]
61
+ price: list[float] | None
62
+ tool_choice: str | dict[str, str | dict[str, str]] | None
63
+
64
+
50
65
  class TogetherLLMConfigEntry(LLMConfigEntry):
51
66
  api_type: Literal["together"] = "together"
67
+
52
68
  max_tokens: int = Field(default=512, ge=0)
69
+
53
70
  stream: bool = False
54
- temperature: Optional[float] = Field(default=None)
55
- top_p: Optional[float] = Field(default=None)
56
- top_k: Optional[int] = Field(default=None)
57
- repetition_penalty: Optional[float] = Field(default=None)
58
- presence_penalty: Optional[float] = Field(default=None, ge=-2, le=2)
59
- frequency_penalty: Optional[float] = Field(default=None, ge=-2, le=2)
60
- min_p: Optional[float] = Field(default=None, ge=0, le=1)
61
- safety_model: Optional[str] = None
71
+ top_k: int | None = Field(default=None)
72
+ repetition_penalty: float | None = Field(default=None)
73
+ presence_penalty: float | None = Field(default=None, ge=-2, le=2)
74
+ frequency_penalty: float | None = Field(default=None, ge=-2, le=2)
75
+ min_p: float | None = Field(default=None, ge=0, le=1)
76
+ safety_model: str | None = None
62
77
  hide_tools: Literal["if_all_run", "if_any_run", "never"] = "never"
63
- price: Optional[list[float]] = Field(default=None, min_length=2, max_length=2)
64
- tool_choice: Optional[Union[str, dict[str, Union[str, dict[str, str]]]]] = (
78
+ price: list[float] | None = Field(default=None, min_length=2, max_length=2)
79
+ tool_choice: str | dict[str, str | dict[str, str]] | None = (
65
80
  None # dict is the tool to call: {"type": "function", "function": {"name": "my_function"}}
66
81
  )
67
82
 
@@ -72,7 +87,7 @@ class TogetherLLMConfigEntry(LLMConfigEntry):
72
87
  class TogetherClient:
73
88
  """Client for Together.AI's API."""
74
89
 
75
- def __init__(self, **kwargs):
90
+ def __init__(self, **kwargs: Unpack[TogetherEntryDict]):
76
91
  """Requires api_key or environment variable to be set
77
92
 
78
93
  Args: