ag2 0.9.9__py3-none-any.whl → 0.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

Files changed (88) hide show
  1. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/METADATA +232 -210
  2. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/RECORD +88 -80
  3. autogen/_website/generate_mkdocs.py +3 -3
  4. autogen/_website/notebook_processor.py +1 -1
  5. autogen/_website/utils.py +1 -1
  6. autogen/agentchat/assistant_agent.py +15 -15
  7. autogen/agentchat/chat.py +52 -40
  8. autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
  9. autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
  10. autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
  11. autogen/agentchat/contrib/capabilities/transforms.py +1 -1
  12. autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
  13. autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
  14. autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
  15. autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
  16. autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
  17. autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
  18. autogen/agentchat/contrib/rag/query_engine.py +11 -11
  19. autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
  20. autogen/agentchat/contrib/swarm_agent.py +3 -2
  21. autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
  22. autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
  23. autogen/agentchat/contrib/web_surfer.py +1 -1
  24. autogen/agentchat/conversable_agent.py +184 -80
  25. autogen/agentchat/group/context_expression.py +21 -21
  26. autogen/agentchat/group/handoffs.py +11 -11
  27. autogen/agentchat/group/multi_agent_chat.py +3 -2
  28. autogen/agentchat/group/on_condition.py +11 -11
  29. autogen/agentchat/group/safeguards/__init__.py +21 -0
  30. autogen/agentchat/group/safeguards/api.py +224 -0
  31. autogen/agentchat/group/safeguards/enforcer.py +1064 -0
  32. autogen/agentchat/group/safeguards/events.py +119 -0
  33. autogen/agentchat/group/safeguards/validator.py +435 -0
  34. autogen/agentchat/groupchat.py +58 -17
  35. autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
  36. autogen/agentchat/realtime/experimental/function_observer.py +2 -3
  37. autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
  38. autogen/agentchat/realtime/experimental/realtime_swarm.py +21 -10
  39. autogen/agentchat/user_proxy_agent.py +55 -53
  40. autogen/agents/experimental/document_agent/document_agent.py +1 -10
  41. autogen/agents/experimental/document_agent/parser_utils.py +5 -1
  42. autogen/browser_utils.py +4 -4
  43. autogen/cache/abstract_cache_base.py +2 -6
  44. autogen/cache/disk_cache.py +1 -6
  45. autogen/cache/in_memory_cache.py +2 -6
  46. autogen/cache/redis_cache.py +1 -5
  47. autogen/coding/__init__.py +10 -2
  48. autogen/coding/base.py +2 -1
  49. autogen/coding/docker_commandline_code_executor.py +1 -6
  50. autogen/coding/factory.py +9 -0
  51. autogen/coding/jupyter/docker_jupyter_server.py +1 -7
  52. autogen/coding/jupyter/jupyter_client.py +2 -9
  53. autogen/coding/jupyter/jupyter_code_executor.py +2 -7
  54. autogen/coding/jupyter/local_jupyter_server.py +2 -6
  55. autogen/coding/local_commandline_code_executor.py +0 -65
  56. autogen/coding/yepcode_code_executor.py +197 -0
  57. autogen/environments/docker_python_environment.py +3 -3
  58. autogen/environments/system_python_environment.py +5 -5
  59. autogen/environments/venv_python_environment.py +5 -5
  60. autogen/events/agent_events.py +1 -1
  61. autogen/events/client_events.py +1 -1
  62. autogen/fast_depends/utils.py +10 -0
  63. autogen/graph_utils.py +5 -7
  64. autogen/import_utils.py +3 -1
  65. autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
  66. autogen/io/processors/console_event_processor.py +8 -3
  67. autogen/llm_config/config.py +168 -91
  68. autogen/llm_config/entry.py +38 -26
  69. autogen/llm_config/types.py +35 -0
  70. autogen/llm_config/utils.py +223 -0
  71. autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
  72. autogen/messages/agent_messages.py +1 -1
  73. autogen/messages/client_messages.py +1 -1
  74. autogen/oai/__init__.py +8 -1
  75. autogen/oai/client.py +10 -3
  76. autogen/oai/client_utils.py +1 -1
  77. autogen/oai/cohere.py +4 -4
  78. autogen/oai/gemini.py +4 -6
  79. autogen/oai/gemini_types.py +1 -0
  80. autogen/oai/openai_utils.py +44 -115
  81. autogen/tools/dependency_injection.py +4 -8
  82. autogen/tools/experimental/reliable/reliable.py +3 -2
  83. autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
  84. autogen/tools/function_utils.py +2 -1
  85. autogen/version.py +1 -1
  86. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/WHEEL +0 -0
  87. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/licenses/LICENSE +0 -0
  88. {ag2-0.9.9.dist-info → ag2-0.9.10.dist-info}/licenses/NOTICE.md +0 -0
@@ -0,0 +1,223 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
+ # SPDX-License-Identifier: MIT
7
+ import json
8
+ import os
9
+ from pathlib import Path
10
+ from typing import Any
11
+
12
+
13
+ def config_list_from_json(
14
+ env_or_file: str | Path,
15
+ file_location: str | Path | None = "",
16
+ filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
17
+ ) -> list[dict[str, Any]]:
18
+ """Retrieves a list of API configurations from a JSON stored in an environment variable or a file.
19
+
20
+ This function attempts to parse JSON data from the given `env_or_file` parameter. If `env_or_file` is an
21
+ environment variable containing JSON data, it will be used directly. Otherwise, it is assumed to be a filename,
22
+ and the function will attempt to read the file from the specified `file_location`.
23
+
24
+ The `filter_dict` parameter allows for filtering the configurations based on specified criteria. Each key in the
25
+ `filter_dict` corresponds to a field in the configuration dictionaries, and the associated value is a list or set
26
+ of acceptable values for that field. If a field is missing in a configuration and `None` is included in the list
27
+ of acceptable values for that field, the configuration will still be considered a match.
28
+
29
+ Args:
30
+ env_or_file (str): The name of the environment variable, the filename, or the environment variable of the filename
31
+ that containing the JSON data.
32
+ file_location (str, optional): The directory path where the file is located, if `env_or_file` is a filename.
33
+ filter_dict (dict, optional): A dictionary specifying the filtering criteria for the configurations, with
34
+ keys representing field names and values being lists or sets of acceptable values for those fields.
35
+
36
+ Example:
37
+ ```python
38
+ # Suppose we have an environment variable 'CONFIG_JSON' with the following content:
39
+ # '[{"model": "gpt-3.5-turbo", "api_type": "azure"}, {"model": "gpt-4"}]'
40
+
41
+ # We can retrieve a filtered list of configurations like this:
42
+ filter_criteria = {"model": ["gpt-3.5-turbo"]}
43
+ configs = config_list_from_json("CONFIG_JSON", filter_dict=filter_criteria)
44
+ # The 'configs' variable will now contain only the configurations that match the filter criteria.
45
+ ```
46
+
47
+ Returns:
48
+ List[Dict]: A list of configuration dictionaries that match the filtering criteria specified in `filter_dict`.
49
+
50
+ Raises:
51
+ FileNotFoundError: if env_or_file is neither found as an environment variable nor a file
52
+ """
53
+ env_str = os.environ.get(str(env_or_file))
54
+
55
+ if env_str:
56
+ # The environment variable exists. We should use information from it.
57
+ if os.path.exists(env_str): # noqa: SIM108
58
+ # It is a file location, and we need to load the json from the file.
59
+ json_str = Path(env_str).read_text()
60
+ else:
61
+ # Else, it should be a JSON string by itself.
62
+ json_str = env_str
63
+ config_list = json.loads(json_str)
64
+
65
+ else:
66
+ # The environment variable does not exist.
67
+ # So, `env_or_file` is a filename. We should use the file location.
68
+ config_list_path = Path(file_location) / env_or_file if file_location else Path(env_or_file)
69
+
70
+ with open(config_list_path) as json_file:
71
+ config_list = json.load(json_file)
72
+
73
+ return filter_config(config_list, filter_dict)
74
+
75
+
76
+ def filter_config(
77
+ config_list: list[dict[str, Any]],
78
+ filter_dict: dict[str, list[str | None] | set[str | None]] | None,
79
+ exclude: bool = False,
80
+ ) -> list[dict[str, Any]]:
81
+ """Filter configuration dictionaries based on specified criteria.
82
+
83
+ This function filters a list of configuration dictionaries by applying ALL criteria specified in `filter_dict`.
84
+ A configuration is included in the result if it satisfies every key-value constraint in the filter dictionary.
85
+ For each filter key, the configuration's corresponding field value must match at least one of the acceptable
86
+ values (OR logic within each criteria, AND logic between different criteria).
87
+
88
+ Args:
89
+ config_list (list of dict): A list of configuration dictionaries to be filtered.
90
+
91
+ filter_dict (dict, optional): A dictionary specifying filter criteria where:
92
+ - Keys are field names to check in each configuration dictionary
93
+ - Values are lists/sets of acceptable values for that field
94
+ - A configuration matches if ALL filter keys are satisfied AND for each key,
95
+ the config's field value matches at least one acceptable value
96
+ - If a filter value includes None, configurations missing that field will match
97
+ - If None, no filtering is applied
98
+
99
+ exclude (bool, optional): If False (default), return configurations that match the filter.
100
+ If True, return configurations that do NOT match the filter.
101
+
102
+ Returns:
103
+ list of dict: Filtered list of configuration dictionaries.
104
+
105
+ Matching Logic:
106
+ - **Between different filter keys**: AND logic (all criteria must be satisfied)
107
+ - **Within each filter key's values**: OR logic (any acceptable value can match)
108
+ - **For list-type config values**: Match if there's any intersection with acceptable values
109
+ - **For scalar config values**: Match if the value is in the list of acceptable values
110
+ - **Missing fields**: Only match if None is included in the acceptable values for that field
111
+
112
+ Examples:
113
+ ```python
114
+ configs = [
115
+ {"model": "gpt-3.5-turbo", "api_type": "openai"},
116
+ {"model": "gpt-4", "api_type": "openai"},
117
+ {"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"},
118
+ {"model": "gpt-4", "tags": ["premium", "latest"]},
119
+ ]
120
+
121
+ # Example 1: Single criterion - matches any model in the list
122
+ filter_dict = {"model": ["gpt-4", "gpt-4o"]}
123
+ result = filter_config(configs, filter_dict)
124
+ # Returns: [{"model": "gpt-4", "api_type": "openai"}, {"model": "gpt-4", "tags": ["premium", "latest"]}]
125
+
126
+ # Example 2: Multiple criteria - must satisfy ALL conditions
127
+ filter_dict = {"model": ["gpt-3.5-turbo"], "api_type": ["azure"]}
128
+ result = filter_config(configs, filter_dict)
129
+ # Returns: [{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"}]
130
+
131
+ # Example 3: Tag filtering with list intersection
132
+ filter_dict = {"tags": ["premium"]}
133
+ result = filter_config(configs, filter_dict)
134
+ # Returns: [{"model": "gpt-4", "tags": ["premium", "latest"]}]
135
+
136
+ # Example 4: Exclude matching configurations
137
+ filter_dict = {"api_type": ["openai"]}
138
+ result = filter_config(configs, filter_dict, exclude=True)
139
+ # Returns configs that do NOT have api_type="openai"
140
+ ```
141
+ Note:
142
+ - If `filter_dict` is empty or None, no filtering is applied and `config_list` is returned as is.
143
+ - If a configuration dictionary in `config_list` does not contain a key specified in `filter_dict`,
144
+ it is considered a non-match and is excluded from the result.
145
+
146
+ """
147
+ if filter_dict:
148
+ return [
149
+ item
150
+ for item in config_list
151
+ if all(_satisfies_criteria(item.get(key), values) != exclude for key, values in filter_dict.items())
152
+ ]
153
+
154
+ return config_list
155
+
156
+
157
+ def _satisfies_criteria(config_value: Any, criteria_values: Any) -> bool:
158
+ """Check if a configuration field value satisfies the filter criteria.
159
+
160
+ This helper function implements the matching logic between a single configuration
161
+ field value and the acceptable values specified in the filter criteria. It handles
162
+ both scalar and list-type configuration values with appropriate matching strategies.
163
+
164
+ Args:
165
+ config_value (Any): The value from a configuration dictionary field.
166
+ Can be None, a scalar value, or a list of values.
167
+ criteria_values (Any): The acceptable values from the filter dictionary.
168
+ Can be a single value or a list/set of acceptable values.
169
+
170
+ Returns:
171
+ bool: True if the config_value satisfies the criteria, False otherwise.
172
+
173
+ Matching Logic:
174
+ - **None config values**: Always return False (missing fields don't match)
175
+ - **List config values**:
176
+ - If criteria is a list: Match if there's any intersection (set overlap)
177
+ - If criteria is scalar: Match if the scalar is contained in the config list
178
+ - **Scalar config values**:
179
+ - If criteria is a list: Match if the config value is in the criteria list
180
+ - If criteria is scalar: Match if the values are exactly equal
181
+
182
+ Examples:
183
+ ```python
184
+ # List config value with list criteria (intersection matching)
185
+ _satisfies_criteria(["gpt-4", "gpt-3.5"], ["gpt-4", "claude"]) # True (gpt-4 intersects)
186
+ _satisfies_criteria(["tag1", "tag2"], ["tag3", "tag4"]) # False (no intersection)
187
+
188
+ # List config value with scalar criteria (containment matching)
189
+ _satisfies_criteria(["premium", "latest"], "premium") # True (premium is in list)
190
+ _satisfies_criteria(["tag1", "tag2"], "tag3") # False (tag3 not in list)
191
+
192
+ # Scalar config value with list criteria (membership matching)
193
+ _satisfies_criteria("gpt-4", ["gpt-4", "gpt-3.5"]) # True (gpt-4 in criteria)
194
+ _satisfies_criteria("claude", ["gpt-4", "gpt-3.5"]) # False (claude not in criteria)
195
+
196
+ # Scalar config value with scalar criteria (equality matching)
197
+ _satisfies_criteria("openai", "openai") # True (exact match)
198
+ _satisfies_criteria("openai", "azure") # False (different values)
199
+
200
+ # None config values (missing fields)
201
+ _satisfies_criteria(None, ["gpt-4"]) # False (missing field)
202
+ _satisfies_criteria(None, "gpt-4") # False (missing field)
203
+ ```
204
+
205
+ Note:
206
+ This is an internal helper function used by `filter_config()`. The function
207
+ assumes that both parameters can be of various types and handles type
208
+ checking internally to determine the appropriate matching strategy.
209
+ """
210
+ if config_value is None:
211
+ return False
212
+
213
+ if isinstance(config_value, list):
214
+ if isinstance(criteria_values, list):
215
+ return bool(set(config_value) & set(criteria_values)) # Non-empty intersection
216
+ else:
217
+ return criteria_values in config_value
218
+ else:
219
+ # In filter_dict, filter could be either a list of values or a single value.
220
+ # For example, filter_dict = {"model": ["gpt-3.5-turbo"]} or {"model": "gpt-3.5-turbo"}
221
+ if isinstance(criteria_values, list):
222
+ return config_value in criteria_values
223
+ return bool(config_value == criteria_values)
@@ -67,37 +67,43 @@ def discover_groups(operations: list["Operation"], chunk_size: int = 30) -> dict
67
67
  for config in llm_config.config_list:
68
68
  config.response_format = GroupSuggestions
69
69
 
70
- with llm_config:
71
- agent = ConversableAgent(name="group_discovery_agent", system_message=GROUP_DISCOVERY_MESSAGE)
72
- groups = {}
70
+ agent = ConversableAgent(
71
+ name="group_discovery_agent",
72
+ system_message=GROUP_DISCOVERY_MESSAGE,
73
+ llm_config=llm_config,
74
+ )
75
+ groups = {}
73
76
 
74
- for i, chunk in enumerate(chunk_list(operations, chunk_size)):
75
- func_descriptions = [f"- {op.function_name}: {op.summary} (args: {op.arguments})" for op in chunk]
76
- message = "Here are some functions:\n" + "\n".join(func_descriptions)
77
+ for chunk in chunk_list(operations, chunk_size):
78
+ func_descriptions = [f"- {op.function_name}: {op.summary} (args: {op.arguments})" for op in chunk]
79
+ message = "Here are some functions:\n" + "\n".join(func_descriptions)
77
80
 
78
- response = agent.run(message=message, max_turns=1, user_input=False)
81
+ response = agent.run(message=message, max_turns=1, user_input=False)
79
82
 
80
- for event in response.events:
81
- if event.type == "text" and event.content.sender == "group_discovery_agent":
82
- # Naively parse "group_name: description" from text block
83
- new_groups = GroupSuggestions.model_validate_json(event.content.content).groups
84
- groups.update(new_groups)
83
+ for event in response.events:
84
+ if event.type == "text" and event.content.sender == "group_discovery_agent":
85
+ # Naively parse "group_name: description" from text block
86
+ new_groups = GroupSuggestions.model_validate_json(event.content.content).groups
87
+ groups.update(new_groups)
85
88
 
86
89
  logger.warning("Discovered groups: %s", groups)
87
90
 
88
91
  # Remove duplicates
89
- with llm_config:
90
- agent = ConversableAgent(name="group_refining_agent", system_message=GROUP_DISCOVERY_MESSAGE)
91
-
92
- message = (
93
- "You need to refine the group names and descriptions to ensure they are unique.\n"
94
- "Here are the groups:\n" + "\n".join([f"- {name}: {desc}" for name, desc in groups.items()])
95
- )
96
- response = agent.run(message=message, max_turns=1, user_input=False)
97
- for event in response.events:
98
- if event.type == "text" and event.content.sender == "group_refining_agent":
99
- # Naively parse "group_name: description" from text block
100
- refined_groups = json.loads(event.content.content)
92
+ agent = ConversableAgent(
93
+ name="group_refining_agent",
94
+ system_message=GROUP_DISCOVERY_MESSAGE,
95
+ llm_config=llm_config,
96
+ )
97
+
98
+ message = (
99
+ "You need to refine the group names and descriptions to ensure they are unique.\n"
100
+ "Here are the groups:\n" + "\n".join([f"- {name}: {desc}" for name, desc in groups.items()])
101
+ )
102
+ response = agent.run(message=message, max_turns=1, user_input=False)
103
+ for event in response.events:
104
+ if event.type == "text" and event.content.sender == "group_refining_agent":
105
+ # Naively parse "group_name: description" from text block
106
+ refined_groups = json.loads(event.content.content)
101
107
 
102
108
  return refined_groups
103
109
 
@@ -108,25 +114,28 @@ def assign_operation_to_group(operation: "Operation", groups: dict[str, str]) ->
108
114
  for config in llm_config.config_list:
109
115
  config.response_format = GroupNames
110
116
 
111
- with llm_config:
112
- agent = ConversableAgent(name="group_assignment_agent", system_message=GROUP_ASSIGNMENT_MESSAGE)
117
+ agent = ConversableAgent(
118
+ name="group_assignment_agent",
119
+ system_message=GROUP_ASSIGNMENT_MESSAGE,
120
+ llm_config=llm_config,
121
+ )
113
122
 
114
- message = (
115
- "Function summary:\n"
116
- f"{operation.summary}\n\n"
117
- f"Arguments: {operation.arguments}\n\n"
118
- f"Available groups: {json.dumps(groups)}\n\n"
119
- "What group should this function go in?"
120
- )
123
+ message = (
124
+ "Function summary:\n"
125
+ f"{operation.summary}\n\n"
126
+ f"Arguments: {operation.arguments}\n\n"
127
+ f"Available groups: {json.dumps(groups)}\n\n"
128
+ "What group should this function go in?"
129
+ )
121
130
 
122
- response = agent.run(message=message, max_turns=1, user_input=True)
131
+ response = agent.run(message=message, max_turns=1, user_input=True)
123
132
 
124
- groups = []
125
- for event in response.events:
126
- if event.type == "text" and event.content.sender == "group_assignment_agent":
127
- groups = GroupNames.model_validate_json(event.content.content).groups
133
+ groups = []
134
+ for event in response.events:
135
+ if event.type == "text" and event.content.sender == "group_assignment_agent":
136
+ groups = GroupNames.model_validate_json(event.content.content).groups
128
137
 
129
- return groups
138
+ return groups
130
139
 
131
140
 
132
141
  def refine_group_names(groups: dict[str, str]) -> dict[str, str]:
@@ -911,7 +911,7 @@ class GenerateCodeExecutionReplyMessage(BaseMessage):
911
911
  else:
912
912
  f(
913
913
  colored(
914
- f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join([x for x in self.code_block_languages])}])...",
914
+ f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join(list(self.code_block_languages))}])...",
915
915
  "red",
916
916
  ),
917
917
  flush=True,
@@ -64,7 +64,7 @@ def _change_usage_summary_format(
64
64
  usage_summary_altered_format: dict[str, list[dict[str, Any]]] = {"usages": []}
65
65
  for k, v in usage_summary.items():
66
66
  if isinstance(k, str) and isinstance(v, dict):
67
- current_usage = {key: value for key, value in v.items()}
67
+ current_usage = dict(v.items())
68
68
  current_usage["model"] = k
69
69
  usage_summary_altered_format["usages"].append(current_usage)
70
70
  else:
autogen/oai/__init__.py CHANGED
@@ -8,7 +8,13 @@ from ..cache.cache import Cache
8
8
  from .anthropic import AnthropicLLMConfigEntry
9
9
  from .bedrock import BedrockLLMConfigEntry
10
10
  from .cerebras import CerebrasLLMConfigEntry
11
- from .client import AzureOpenAILLMConfigEntry, DeepSeekLLMConfigEntry, OpenAILLMConfigEntry, OpenAIWrapper
11
+ from .client import (
12
+ AzureOpenAILLMConfigEntry,
13
+ DeepSeekLLMConfigEntry,
14
+ OpenAILLMConfigEntry,
15
+ OpenAIResponsesLLMConfigEntry,
16
+ OpenAIWrapper,
17
+ )
12
18
  from .cohere import CohereLLMConfigEntry
13
19
  from .gemini import GeminiLLMConfigEntry
14
20
  from .groq import GroqLLMConfigEntry
@@ -39,6 +45,7 @@ __all__ = [
39
45
  "MistralLLMConfigEntry",
40
46
  "OllamaLLMConfigEntry",
41
47
  "OpenAILLMConfigEntry",
48
+ "OpenAIResponsesLLMConfigEntry",
42
49
  "OpenAIWrapper",
43
50
  "TogetherLLMConfigEntry",
44
51
  "config_list_from_dotenv",
autogen/oai/client.py CHANGED
@@ -2,7 +2,6 @@
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
  #
5
- # Portions derived from https://github.com/microsoft/autogen are under the MIT License.
6
5
  # SPDX-License-Identifier: MIT
7
6
  from __future__ import annotations
8
7
 
@@ -287,7 +286,7 @@ class AzureOpenAIEntryDict(LLMConfigEntryDict, total=False):
287
286
  stream: bool
288
287
  tool_choice: Literal["none", "auto", "required"] | None
289
288
  user: str | None
290
- reasoning_effort: Literal["low", "medium", "high"] | None
289
+ reasoning_effort: Literal["low", "minimal", "medium", "high"] | None
291
290
  max_completion_tokens: int | None
292
291
 
293
292
 
@@ -301,7 +300,7 @@ class AzureOpenAILLMConfigEntry(LLMConfigEntry):
301
300
  # reasoning models - see:
302
301
  # - https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning
303
302
  # - https://learn.microsoft.com/en-us/azure/ai-services/openai/reference-preview
304
- reasoning_effort: Literal["low", "medium", "high"] | None = None
303
+ reasoning_effort: Literal["low", "minimal", "medium", "high"] | None = None
305
304
  max_completion_tokens: int | None = None
306
305
 
307
306
  def create_client(self) -> ModelClient:
@@ -884,6 +883,7 @@ class OpenAIWrapper:
884
883
  # a config for a custom client is set
885
884
  # adding placeholder until the register_model_client is called with the appropriate class
886
885
  self._clients.append(PlaceHolderClient(config))
886
+ # codeql[py/clear-text-logging-sensitive-data]
887
887
  logger.info(
888
888
  f"Detected custom model client in config: {model_client_cls_name}, model client can not be used until register_model_client is called."
889
889
  )
@@ -1462,6 +1462,13 @@ class OpenAIWrapper:
1462
1462
  # -----------------------------------------------------------------------------
1463
1463
 
1464
1464
 
1465
+ class OpenAIResponsesEntryDict(LLMConfigEntryDict, total=False):
1466
+ api_type: Literal["responses"]
1467
+
1468
+ tool_choice: Literal["none", "auto", "required"] | None
1469
+ built_in_tools: list[str] | None
1470
+
1471
+
1465
1472
  class OpenAIResponsesLLMConfigEntry(OpenAILLMConfigEntry):
1466
1473
  """LLMConfig entry for the OpenAI Responses API (stateful, tool-enabled).
1467
1474
 
@@ -131,7 +131,7 @@ def should_hide_tools(messages: list[dict[str, Any]], tools: list[dict[str, Any]
131
131
  return False
132
132
  elif hide_tools_param == "if_any_run":
133
133
  # Return True if any tool_call_id exists, indicating a tool call has been executed. False otherwise.
134
- return any(["tool_call_id" in dictionary for dictionary in messages])
134
+ return any("tool_call_id" in dictionary for dictionary in messages)
135
135
  elif hide_tools_param == "if_all_run":
136
136
  # Return True if all tools have been executed at least once. False otherwise.
137
137
 
autogen/oai/cohere.py CHANGED
@@ -260,7 +260,7 @@ class CohereClient:
260
260
  cohere_params["messages"] = messages
261
261
 
262
262
  if "tools" in params:
263
- cohere_tool_names = set([tool["function"]["name"] for tool in params["tools"]])
263
+ cohere_tool_names = {tool["function"]["name"] for tool in params["tools"]}
264
264
  cohere_params["tools"] = params["tools"]
265
265
 
266
266
  # Strip out name
@@ -285,9 +285,9 @@ class CohereClient:
285
285
  ) not in cohere_tool_names:
286
286
  message["role"] = "assistant"
287
287
  message["content"] = f"{message.pop('tool_plan', '')}{str(message['tool_calls'])}"
288
- tool_calls_modified_ids = tool_calls_modified_ids.union(
289
- set([tool_call.get("id") for tool_call in message["tool_calls"]])
290
- )
288
+ tool_calls_modified_ids = tool_calls_modified_ids.union({
289
+ tool_call.get("id") for tool_call in message["tool_calls"]
290
+ })
291
291
  del message["tool_calls"]
292
292
  break
293
293
 
autogen/oai/gemini.py CHANGED
@@ -246,7 +246,7 @@ class GeminiClient:
246
246
 
247
247
  if model_name == "gemini-pro-vision":
248
248
  raise ValueError(
249
- "Gemini 1.0 Pro vision ('gemini-pro-vision') has been deprecated, please consider switching to a different model, for example 'gemini-1.5-flash'."
249
+ "Gemini 1.0 Pro vision ('gemini-pro-vision') has been deprecated, please consider switching to a different model, for example 'gemini-2.5-flash'."
250
250
  )
251
251
  elif not model_name:
252
252
  raise ValueError(
@@ -385,9 +385,7 @@ class GeminiClient:
385
385
  function={
386
386
  "name": fn_call.name,
387
387
  "arguments": (
388
- json.dumps({key: val for key, val in fn_call.args.items()})
389
- if fn_call.args is not None
390
- else ""
388
+ json.dumps(dict(fn_call.args.items())) if fn_call.args is not None else ""
391
389
  ),
392
390
  },
393
391
  type="function",
@@ -857,10 +855,10 @@ class GeminiClient:
857
855
  """Convert safety settings to VertexAI format if needed,
858
856
  like when specifying them in the OAI_CONFIG_LIST
859
857
  """
860
- if isinstance(safety_settings, list) and all([
858
+ if isinstance(safety_settings, list) and all(
861
859
  isinstance(safety_setting, dict) and not isinstance(safety_setting, VertexAISafetySetting)
862
860
  for safety_setting in safety_settings
863
- ]):
861
+ ):
864
862
  vertexai_safety_settings = []
865
863
  for safety_setting in safety_settings:
866
864
  if safety_setting["category"] not in VertexAIHarmCategory.__members__:
@@ -105,6 +105,7 @@ class FunctionCallingConfigMode(CaseInSensitiveEnum):
105
105
  AUTO = "AUTO"
106
106
  ANY = "ANY"
107
107
  NONE = "NONE"
108
+ VALIDATED = "VALIDATED"
108
109
 
109
110
 
110
111
  class LatLng(CommonBaseModel):