ag2 0.9.9__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/METADATA +243 -214
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/RECORD +113 -87
- autogen/_website/generate_mkdocs.py +3 -3
- autogen/_website/notebook_processor.py +1 -1
- autogen/_website/utils.py +1 -1
- autogen/a2a/__init__.py +36 -0
- autogen/a2a/agent_executor.py +105 -0
- autogen/a2a/client.py +280 -0
- autogen/a2a/errors.py +18 -0
- autogen/a2a/httpx_client_factory.py +79 -0
- autogen/a2a/server.py +221 -0
- autogen/a2a/utils.py +165 -0
- autogen/agentchat/__init__.py +3 -0
- autogen/agentchat/agent.py +0 -2
- autogen/agentchat/assistant_agent.py +15 -15
- autogen/agentchat/chat.py +57 -41
- autogen/agentchat/contrib/agent_eval/criterion.py +1 -1
- autogen/agentchat/contrib/capabilities/text_compressors.py +5 -5
- autogen/agentchat/contrib/capabilities/tools_capability.py +1 -1
- autogen/agentchat/contrib/capabilities/transforms.py +1 -1
- autogen/agentchat/contrib/captainagent/agent_builder.py +1 -1
- autogen/agentchat/contrib/captainagent/captainagent.py +20 -19
- autogen/agentchat/contrib/graph_rag/falkor_graph_query_engine.py +2 -5
- autogen/agentchat/contrib/graph_rag/graph_rag_capability.py +5 -5
- autogen/agentchat/contrib/graph_rag/neo4j_graph_query_engine.py +18 -17
- autogen/agentchat/contrib/llava_agent.py +1 -13
- autogen/agentchat/contrib/rag/mongodb_query_engine.py +2 -2
- autogen/agentchat/contrib/rag/query_engine.py +11 -11
- autogen/agentchat/contrib/retrieve_assistant_agent.py +3 -0
- autogen/agentchat/contrib/swarm_agent.py +3 -2
- autogen/agentchat/contrib/vectordb/couchbase.py +1 -1
- autogen/agentchat/contrib/vectordb/mongodb.py +1 -1
- autogen/agentchat/contrib/web_surfer.py +1 -1
- autogen/agentchat/conversable_agent.py +359 -150
- autogen/agentchat/group/context_expression.py +21 -21
- autogen/agentchat/group/group_tool_executor.py +46 -15
- autogen/agentchat/group/guardrails.py +41 -33
- autogen/agentchat/group/handoffs.py +11 -11
- autogen/agentchat/group/multi_agent_chat.py +56 -2
- autogen/agentchat/group/on_condition.py +11 -11
- autogen/agentchat/group/safeguards/__init__.py +21 -0
- autogen/agentchat/group/safeguards/api.py +241 -0
- autogen/agentchat/group/safeguards/enforcer.py +1158 -0
- autogen/agentchat/group/safeguards/events.py +119 -0
- autogen/agentchat/group/safeguards/validator.py +435 -0
- autogen/agentchat/groupchat.py +102 -49
- autogen/agentchat/realtime/experimental/clients/realtime_client.py +2 -2
- autogen/agentchat/realtime/experimental/function_observer.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_agent.py +2 -3
- autogen/agentchat/realtime/experimental/realtime_swarm.py +22 -13
- autogen/agentchat/user_proxy_agent.py +55 -53
- autogen/agents/experimental/document_agent/document_agent.py +1 -10
- autogen/agents/experimental/document_agent/parser_utils.py +5 -1
- autogen/browser_utils.py +4 -4
- autogen/cache/abstract_cache_base.py +2 -6
- autogen/cache/disk_cache.py +1 -6
- autogen/cache/in_memory_cache.py +2 -6
- autogen/cache/redis_cache.py +1 -5
- autogen/coding/__init__.py +10 -2
- autogen/coding/base.py +2 -1
- autogen/coding/docker_commandline_code_executor.py +1 -6
- autogen/coding/factory.py +9 -0
- autogen/coding/jupyter/docker_jupyter_server.py +1 -7
- autogen/coding/jupyter/jupyter_client.py +2 -9
- autogen/coding/jupyter/jupyter_code_executor.py +2 -7
- autogen/coding/jupyter/local_jupyter_server.py +2 -6
- autogen/coding/local_commandline_code_executor.py +0 -65
- autogen/coding/yepcode_code_executor.py +197 -0
- autogen/environments/docker_python_environment.py +3 -3
- autogen/environments/system_python_environment.py +5 -5
- autogen/environments/venv_python_environment.py +5 -5
- autogen/events/agent_events.py +1 -1
- autogen/events/client_events.py +1 -1
- autogen/fast_depends/utils.py +10 -0
- autogen/graph_utils.py +5 -7
- autogen/import_utils.py +3 -1
- autogen/interop/pydantic_ai/pydantic_ai.py +8 -5
- autogen/io/processors/console_event_processor.py +8 -3
- autogen/llm_config/client.py +3 -2
- autogen/llm_config/config.py +168 -91
- autogen/llm_config/entry.py +38 -26
- autogen/llm_config/types.py +35 -0
- autogen/llm_config/utils.py +223 -0
- autogen/mcp/mcp_proxy/operation_grouping.py +48 -39
- autogen/messages/agent_messages.py +1 -1
- autogen/messages/client_messages.py +1 -1
- autogen/oai/__init__.py +8 -1
- autogen/oai/bedrock.py +0 -13
- autogen/oai/client.py +25 -11
- autogen/oai/client_utils.py +31 -1
- autogen/oai/cohere.py +4 -14
- autogen/oai/gemini.py +4 -6
- autogen/oai/gemini_types.py +1 -0
- autogen/oai/openai_utils.py +44 -115
- autogen/remote/__init__.py +18 -0
- autogen/remote/agent.py +199 -0
- autogen/remote/agent_service.py +142 -0
- autogen/remote/errors.py +17 -0
- autogen/remote/httpx_client_factory.py +131 -0
- autogen/remote/protocol.py +37 -0
- autogen/remote/retry.py +102 -0
- autogen/remote/runtime.py +96 -0
- autogen/testing/__init__.py +12 -0
- autogen/testing/messages.py +45 -0
- autogen/testing/test_agent.py +111 -0
- autogen/tools/dependency_injection.py +4 -8
- autogen/tools/experimental/reliable/reliable.py +3 -2
- autogen/tools/experimental/web_search_preview/web_search_preview.py +1 -1
- autogen/tools/function_utils.py +2 -1
- autogen/version.py +1 -1
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/WHEEL +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.9.dist-info → ag2-0.10.0.dist-info}/licenses/NOTICE.md +0 -0
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
|
+
# SPDX-License-Identifier: MIT
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def config_list_from_json(
|
|
14
|
+
env_or_file: str | Path,
|
|
15
|
+
file_location: str | Path | None = "",
|
|
16
|
+
filter_dict: dict[str, list[str | None] | set[str | None]] | None = None,
|
|
17
|
+
) -> list[dict[str, Any]]:
|
|
18
|
+
"""Retrieves a list of API configurations from a JSON stored in an environment variable or a file.
|
|
19
|
+
|
|
20
|
+
This function attempts to parse JSON data from the given `env_or_file` parameter. If `env_or_file` is an
|
|
21
|
+
environment variable containing JSON data, it will be used directly. Otherwise, it is assumed to be a filename,
|
|
22
|
+
and the function will attempt to read the file from the specified `file_location`.
|
|
23
|
+
|
|
24
|
+
The `filter_dict` parameter allows for filtering the configurations based on specified criteria. Each key in the
|
|
25
|
+
`filter_dict` corresponds to a field in the configuration dictionaries, and the associated value is a list or set
|
|
26
|
+
of acceptable values for that field. If a field is missing in a configuration and `None` is included in the list
|
|
27
|
+
of acceptable values for that field, the configuration will still be considered a match.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
env_or_file (str): The name of the environment variable, the filename, or the environment variable of the filename
|
|
31
|
+
that containing the JSON data.
|
|
32
|
+
file_location (str, optional): The directory path where the file is located, if `env_or_file` is a filename.
|
|
33
|
+
filter_dict (dict, optional): A dictionary specifying the filtering criteria for the configurations, with
|
|
34
|
+
keys representing field names and values being lists or sets of acceptable values for those fields.
|
|
35
|
+
|
|
36
|
+
Example:
|
|
37
|
+
```python
|
|
38
|
+
# Suppose we have an environment variable 'CONFIG_JSON' with the following content:
|
|
39
|
+
# '[{"model": "gpt-3.5-turbo", "api_type": "azure"}, {"model": "gpt-4"}]'
|
|
40
|
+
|
|
41
|
+
# We can retrieve a filtered list of configurations like this:
|
|
42
|
+
filter_criteria = {"model": ["gpt-3.5-turbo"]}
|
|
43
|
+
configs = config_list_from_json("CONFIG_JSON", filter_dict=filter_criteria)
|
|
44
|
+
# The 'configs' variable will now contain only the configurations that match the filter criteria.
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
List[Dict]: A list of configuration dictionaries that match the filtering criteria specified in `filter_dict`.
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
FileNotFoundError: if env_or_file is neither found as an environment variable nor a file
|
|
52
|
+
"""
|
|
53
|
+
env_str = os.environ.get(str(env_or_file))
|
|
54
|
+
|
|
55
|
+
if env_str:
|
|
56
|
+
# The environment variable exists. We should use information from it.
|
|
57
|
+
if os.path.exists(env_str): # noqa: SIM108
|
|
58
|
+
# It is a file location, and we need to load the json from the file.
|
|
59
|
+
json_str = Path(env_str).read_text()
|
|
60
|
+
else:
|
|
61
|
+
# Else, it should be a JSON string by itself.
|
|
62
|
+
json_str = env_str
|
|
63
|
+
config_list = json.loads(json_str)
|
|
64
|
+
|
|
65
|
+
else:
|
|
66
|
+
# The environment variable does not exist.
|
|
67
|
+
# So, `env_or_file` is a filename. We should use the file location.
|
|
68
|
+
config_list_path = Path(file_location) / env_or_file if file_location else Path(env_or_file)
|
|
69
|
+
|
|
70
|
+
with open(config_list_path) as json_file:
|
|
71
|
+
config_list = json.load(json_file)
|
|
72
|
+
|
|
73
|
+
return filter_config(config_list, filter_dict)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def filter_config(
|
|
77
|
+
config_list: list[dict[str, Any]],
|
|
78
|
+
filter_dict: dict[str, list[str | None] | set[str | None]] | None,
|
|
79
|
+
exclude: bool = False,
|
|
80
|
+
) -> list[dict[str, Any]]:
|
|
81
|
+
"""Filter configuration dictionaries based on specified criteria.
|
|
82
|
+
|
|
83
|
+
This function filters a list of configuration dictionaries by applying ALL criteria specified in `filter_dict`.
|
|
84
|
+
A configuration is included in the result if it satisfies every key-value constraint in the filter dictionary.
|
|
85
|
+
For each filter key, the configuration's corresponding field value must match at least one of the acceptable
|
|
86
|
+
values (OR logic within each criteria, AND logic between different criteria).
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
config_list (list of dict): A list of configuration dictionaries to be filtered.
|
|
90
|
+
|
|
91
|
+
filter_dict (dict, optional): A dictionary specifying filter criteria where:
|
|
92
|
+
- Keys are field names to check in each configuration dictionary
|
|
93
|
+
- Values are lists/sets of acceptable values for that field
|
|
94
|
+
- A configuration matches if ALL filter keys are satisfied AND for each key,
|
|
95
|
+
the config's field value matches at least one acceptable value
|
|
96
|
+
- If a filter value includes None, configurations missing that field will match
|
|
97
|
+
- If None, no filtering is applied
|
|
98
|
+
|
|
99
|
+
exclude (bool, optional): If False (default), return configurations that match the filter.
|
|
100
|
+
If True, return configurations that do NOT match the filter.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
list of dict: Filtered list of configuration dictionaries.
|
|
104
|
+
|
|
105
|
+
Matching Logic:
|
|
106
|
+
- **Between different filter keys**: AND logic (all criteria must be satisfied)
|
|
107
|
+
- **Within each filter key's values**: OR logic (any acceptable value can match)
|
|
108
|
+
- **For list-type config values**: Match if there's any intersection with acceptable values
|
|
109
|
+
- **For scalar config values**: Match if the value is in the list of acceptable values
|
|
110
|
+
- **Missing fields**: Only match if None is included in the acceptable values for that field
|
|
111
|
+
|
|
112
|
+
Examples:
|
|
113
|
+
```python
|
|
114
|
+
configs = [
|
|
115
|
+
{"model": "gpt-3.5-turbo", "api_type": "openai"},
|
|
116
|
+
{"model": "gpt-4", "api_type": "openai"},
|
|
117
|
+
{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"},
|
|
118
|
+
{"model": "gpt-4", "tags": ["premium", "latest"]},
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
# Example 1: Single criterion - matches any model in the list
|
|
122
|
+
filter_dict = {"model": ["gpt-4", "gpt-4o"]}
|
|
123
|
+
result = filter_config(configs, filter_dict)
|
|
124
|
+
# Returns: [{"model": "gpt-4", "api_type": "openai"}, {"model": "gpt-4", "tags": ["premium", "latest"]}]
|
|
125
|
+
|
|
126
|
+
# Example 2: Multiple criteria - must satisfy ALL conditions
|
|
127
|
+
filter_dict = {"model": ["gpt-3.5-turbo"], "api_type": ["azure"]}
|
|
128
|
+
result = filter_config(configs, filter_dict)
|
|
129
|
+
# Returns: [{"model": "gpt-3.5-turbo", "api_type": "azure", "api_version": "2024-02-01"}]
|
|
130
|
+
|
|
131
|
+
# Example 3: Tag filtering with list intersection
|
|
132
|
+
filter_dict = {"tags": ["premium"]}
|
|
133
|
+
result = filter_config(configs, filter_dict)
|
|
134
|
+
# Returns: [{"model": "gpt-4", "tags": ["premium", "latest"]}]
|
|
135
|
+
|
|
136
|
+
# Example 4: Exclude matching configurations
|
|
137
|
+
filter_dict = {"api_type": ["openai"]}
|
|
138
|
+
result = filter_config(configs, filter_dict, exclude=True)
|
|
139
|
+
# Returns configs that do NOT have api_type="openai"
|
|
140
|
+
```
|
|
141
|
+
Note:
|
|
142
|
+
- If `filter_dict` is empty or None, no filtering is applied and `config_list` is returned as is.
|
|
143
|
+
- If a configuration dictionary in `config_list` does not contain a key specified in `filter_dict`,
|
|
144
|
+
it is considered a non-match and is excluded from the result.
|
|
145
|
+
|
|
146
|
+
"""
|
|
147
|
+
if filter_dict:
|
|
148
|
+
return [
|
|
149
|
+
item
|
|
150
|
+
for item in config_list
|
|
151
|
+
if all(_satisfies_criteria(item.get(key), values) != exclude for key, values in filter_dict.items())
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
return config_list
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _satisfies_criteria(config_value: Any, criteria_values: Any) -> bool:
|
|
158
|
+
"""Check if a configuration field value satisfies the filter criteria.
|
|
159
|
+
|
|
160
|
+
This helper function implements the matching logic between a single configuration
|
|
161
|
+
field value and the acceptable values specified in the filter criteria. It handles
|
|
162
|
+
both scalar and list-type configuration values with appropriate matching strategies.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
config_value (Any): The value from a configuration dictionary field.
|
|
166
|
+
Can be None, a scalar value, or a list of values.
|
|
167
|
+
criteria_values (Any): The acceptable values from the filter dictionary.
|
|
168
|
+
Can be a single value or a list/set of acceptable values.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
bool: True if the config_value satisfies the criteria, False otherwise.
|
|
172
|
+
|
|
173
|
+
Matching Logic:
|
|
174
|
+
- **None config values**: Always return False (missing fields don't match)
|
|
175
|
+
- **List config values**:
|
|
176
|
+
- If criteria is a list: Match if there's any intersection (set overlap)
|
|
177
|
+
- If criteria is scalar: Match if the scalar is contained in the config list
|
|
178
|
+
- **Scalar config values**:
|
|
179
|
+
- If criteria is a list: Match if the config value is in the criteria list
|
|
180
|
+
- If criteria is scalar: Match if the values are exactly equal
|
|
181
|
+
|
|
182
|
+
Examples:
|
|
183
|
+
```python
|
|
184
|
+
# List config value with list criteria (intersection matching)
|
|
185
|
+
_satisfies_criteria(["gpt-4", "gpt-3.5"], ["gpt-4", "claude"]) # True (gpt-4 intersects)
|
|
186
|
+
_satisfies_criteria(["tag1", "tag2"], ["tag3", "tag4"]) # False (no intersection)
|
|
187
|
+
|
|
188
|
+
# List config value with scalar criteria (containment matching)
|
|
189
|
+
_satisfies_criteria(["premium", "latest"], "premium") # True (premium is in list)
|
|
190
|
+
_satisfies_criteria(["tag1", "tag2"], "tag3") # False (tag3 not in list)
|
|
191
|
+
|
|
192
|
+
# Scalar config value with list criteria (membership matching)
|
|
193
|
+
_satisfies_criteria("gpt-4", ["gpt-4", "gpt-3.5"]) # True (gpt-4 in criteria)
|
|
194
|
+
_satisfies_criteria("claude", ["gpt-4", "gpt-3.5"]) # False (claude not in criteria)
|
|
195
|
+
|
|
196
|
+
# Scalar config value with scalar criteria (equality matching)
|
|
197
|
+
_satisfies_criteria("openai", "openai") # True (exact match)
|
|
198
|
+
_satisfies_criteria("openai", "azure") # False (different values)
|
|
199
|
+
|
|
200
|
+
# None config values (missing fields)
|
|
201
|
+
_satisfies_criteria(None, ["gpt-4"]) # False (missing field)
|
|
202
|
+
_satisfies_criteria(None, "gpt-4") # False (missing field)
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
Note:
|
|
206
|
+
This is an internal helper function used by `filter_config()`. The function
|
|
207
|
+
assumes that both parameters can be of various types and handles type
|
|
208
|
+
checking internally to determine the appropriate matching strategy.
|
|
209
|
+
"""
|
|
210
|
+
if config_value is None:
|
|
211
|
+
return False
|
|
212
|
+
|
|
213
|
+
if isinstance(config_value, list):
|
|
214
|
+
if isinstance(criteria_values, list):
|
|
215
|
+
return bool(set(config_value) & set(criteria_values)) # Non-empty intersection
|
|
216
|
+
else:
|
|
217
|
+
return criteria_values in config_value
|
|
218
|
+
else:
|
|
219
|
+
# In filter_dict, filter could be either a list of values or a single value.
|
|
220
|
+
# For example, filter_dict = {"model": ["gpt-3.5-turbo"]} or {"model": "gpt-3.5-turbo"}
|
|
221
|
+
if isinstance(criteria_values, list):
|
|
222
|
+
return config_value in criteria_values
|
|
223
|
+
return bool(config_value == criteria_values)
|
|
@@ -67,37 +67,43 @@ def discover_groups(operations: list["Operation"], chunk_size: int = 30) -> dict
|
|
|
67
67
|
for config in llm_config.config_list:
|
|
68
68
|
config.response_format = GroupSuggestions
|
|
69
69
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
70
|
+
agent = ConversableAgent(
|
|
71
|
+
name="group_discovery_agent",
|
|
72
|
+
system_message=GROUP_DISCOVERY_MESSAGE,
|
|
73
|
+
llm_config=llm_config,
|
|
74
|
+
)
|
|
75
|
+
groups = {}
|
|
73
76
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
+
for chunk in chunk_list(operations, chunk_size):
|
|
78
|
+
func_descriptions = [f"- {op.function_name}: {op.summary} (args: {op.arguments})" for op in chunk]
|
|
79
|
+
message = "Here are some functions:\n" + "\n".join(func_descriptions)
|
|
77
80
|
|
|
78
|
-
|
|
81
|
+
response = agent.run(message=message, max_turns=1, user_input=False)
|
|
79
82
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
83
|
+
for event in response.events:
|
|
84
|
+
if event.type == "text" and event.content.sender == "group_discovery_agent":
|
|
85
|
+
# Naively parse "group_name: description" from text block
|
|
86
|
+
new_groups = GroupSuggestions.model_validate_json(event.content.content).groups
|
|
87
|
+
groups.update(new_groups)
|
|
85
88
|
|
|
86
89
|
logger.warning("Discovered groups: %s", groups)
|
|
87
90
|
|
|
88
91
|
# Remove duplicates
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
for
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
92
|
+
agent = ConversableAgent(
|
|
93
|
+
name="group_refining_agent",
|
|
94
|
+
system_message=GROUP_DISCOVERY_MESSAGE,
|
|
95
|
+
llm_config=llm_config,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
message = (
|
|
99
|
+
"You need to refine the group names and descriptions to ensure they are unique.\n"
|
|
100
|
+
"Here are the groups:\n" + "\n".join([f"- {name}: {desc}" for name, desc in groups.items()])
|
|
101
|
+
)
|
|
102
|
+
response = agent.run(message=message, max_turns=1, user_input=False)
|
|
103
|
+
for event in response.events:
|
|
104
|
+
if event.type == "text" and event.content.sender == "group_refining_agent":
|
|
105
|
+
# Naively parse "group_name: description" from text block
|
|
106
|
+
refined_groups = json.loads(event.content.content)
|
|
101
107
|
|
|
102
108
|
return refined_groups
|
|
103
109
|
|
|
@@ -108,25 +114,28 @@ def assign_operation_to_group(operation: "Operation", groups: dict[str, str]) ->
|
|
|
108
114
|
for config in llm_config.config_list:
|
|
109
115
|
config.response_format = GroupNames
|
|
110
116
|
|
|
111
|
-
|
|
112
|
-
|
|
117
|
+
agent = ConversableAgent(
|
|
118
|
+
name="group_assignment_agent",
|
|
119
|
+
system_message=GROUP_ASSIGNMENT_MESSAGE,
|
|
120
|
+
llm_config=llm_config,
|
|
121
|
+
)
|
|
113
122
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
123
|
+
message = (
|
|
124
|
+
"Function summary:\n"
|
|
125
|
+
f"{operation.summary}\n\n"
|
|
126
|
+
f"Arguments: {operation.arguments}\n\n"
|
|
127
|
+
f"Available groups: {json.dumps(groups)}\n\n"
|
|
128
|
+
"What group should this function go in?"
|
|
129
|
+
)
|
|
121
130
|
|
|
122
|
-
|
|
131
|
+
response = agent.run(message=message, max_turns=1, user_input=True)
|
|
123
132
|
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
133
|
+
groups = []
|
|
134
|
+
for event in response.events:
|
|
135
|
+
if event.type == "text" and event.content.sender == "group_assignment_agent":
|
|
136
|
+
groups = GroupNames.model_validate_json(event.content.content).groups
|
|
128
137
|
|
|
129
|
-
|
|
138
|
+
return groups
|
|
130
139
|
|
|
131
140
|
|
|
132
141
|
def refine_group_names(groups: dict[str, str]) -> dict[str, str]:
|
|
@@ -911,7 +911,7 @@ class GenerateCodeExecutionReplyMessage(BaseMessage):
|
|
|
911
911
|
else:
|
|
912
912
|
f(
|
|
913
913
|
colored(
|
|
914
|
-
f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join(
|
|
914
|
+
f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join(list(self.code_block_languages))}])...",
|
|
915
915
|
"red",
|
|
916
916
|
),
|
|
917
917
|
flush=True,
|
|
@@ -64,7 +64,7 @@ def _change_usage_summary_format(
|
|
|
64
64
|
usage_summary_altered_format: dict[str, list[dict[str, Any]]] = {"usages": []}
|
|
65
65
|
for k, v in usage_summary.items():
|
|
66
66
|
if isinstance(k, str) and isinstance(v, dict):
|
|
67
|
-
current_usage =
|
|
67
|
+
current_usage = dict(v.items())
|
|
68
68
|
current_usage["model"] = k
|
|
69
69
|
usage_summary_altered_format["usages"].append(current_usage)
|
|
70
70
|
else:
|
autogen/oai/__init__.py
CHANGED
|
@@ -8,7 +8,13 @@ from ..cache.cache import Cache
|
|
|
8
8
|
from .anthropic import AnthropicLLMConfigEntry
|
|
9
9
|
from .bedrock import BedrockLLMConfigEntry
|
|
10
10
|
from .cerebras import CerebrasLLMConfigEntry
|
|
11
|
-
from .client import
|
|
11
|
+
from .client import (
|
|
12
|
+
AzureOpenAILLMConfigEntry,
|
|
13
|
+
DeepSeekLLMConfigEntry,
|
|
14
|
+
OpenAILLMConfigEntry,
|
|
15
|
+
OpenAIResponsesLLMConfigEntry,
|
|
16
|
+
OpenAIWrapper,
|
|
17
|
+
)
|
|
12
18
|
from .cohere import CohereLLMConfigEntry
|
|
13
19
|
from .gemini import GeminiLLMConfigEntry
|
|
14
20
|
from .groq import GroqLLMConfigEntry
|
|
@@ -39,6 +45,7 @@ __all__ = [
|
|
|
39
45
|
"MistralLLMConfigEntry",
|
|
40
46
|
"OllamaLLMConfigEntry",
|
|
41
47
|
"OpenAILLMConfigEntry",
|
|
48
|
+
"OpenAIResponsesLLMConfigEntry",
|
|
42
49
|
"OpenAIWrapper",
|
|
43
50
|
"TogetherLLMConfigEntry",
|
|
44
51
|
"config_list_from_dotenv",
|
autogen/oai/bedrock.py
CHANGED
|
@@ -198,22 +198,9 @@ class BedrockClient:
|
|
|
198
198
|
if "top_p" in params:
|
|
199
199
|
base_params["topP"] = validate_parameter(params, "top_p", (float, int), False, None, None, None)
|
|
200
200
|
|
|
201
|
-
if "topP" in params:
|
|
202
|
-
warnings.warn(
|
|
203
|
-
("topP is deprecated, use top_p instead. Scheduled for removal in 0.10.0 version."), DeprecationWarning
|
|
204
|
-
)
|
|
205
|
-
base_params["topP"] = validate_parameter(params, "topP", (float, int), False, None, None, None)
|
|
206
|
-
|
|
207
201
|
if "max_tokens" in params:
|
|
208
202
|
base_params["maxTokens"] = validate_parameter(params, "max_tokens", (int,), False, None, None, None)
|
|
209
203
|
|
|
210
|
-
if "maxTokens" in params:
|
|
211
|
-
warnings.warn(
|
|
212
|
-
("maxTokens is deprecated, use max_tokens instead. Scheduled for removal in 0.10.0 version."),
|
|
213
|
-
DeprecationWarning,
|
|
214
|
-
)
|
|
215
|
-
base_params["maxTokens"] = validate_parameter(params, "maxTokens", (int,), False, None, None, None)
|
|
216
|
-
|
|
217
204
|
# Here are the possible "model-specific" parameters and their suitable types, known as additional parameters
|
|
218
205
|
additional_params = {}
|
|
219
206
|
|
autogen/oai/client.py
CHANGED
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
#
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
#
|
|
5
|
-
# Portions derived from https://github.com/microsoft/autogen are under the MIT License.
|
|
6
5
|
# SPDX-License-Identifier: MIT
|
|
7
6
|
from __future__ import annotations
|
|
8
7
|
|
|
@@ -21,6 +20,7 @@ from pydantic import BaseModel, Field, HttpUrl
|
|
|
21
20
|
from pydantic.type_adapter import TypeAdapter
|
|
22
21
|
|
|
23
22
|
from ..cache import Cache
|
|
23
|
+
from ..code_utils import content_str
|
|
24
24
|
from ..doc_utils import export_module
|
|
25
25
|
from ..events.client_events import StreamEvent, UsageSummaryEvent
|
|
26
26
|
from ..exception_utils import ModelToolNotSupportedError
|
|
@@ -31,7 +31,7 @@ from ..llm_config.entry import LLMConfigEntry, LLMConfigEntryDict
|
|
|
31
31
|
from ..logger.logger_utils import get_current_ts
|
|
32
32
|
from ..runtime_logging import log_chat_completion, log_new_client, log_new_wrapper, logging_enabled
|
|
33
33
|
from ..token_count_utils import count_token
|
|
34
|
-
from .client_utils import FormatterProtocol, logging_formatter
|
|
34
|
+
from .client_utils import FormatterProtocol, logging_formatter, merge_config_with_tools
|
|
35
35
|
from .openai_utils import OAI_PRICE1K, get_key, is_valid_api_key
|
|
36
36
|
|
|
37
37
|
TOOL_ENABLED = False
|
|
@@ -287,7 +287,7 @@ class AzureOpenAIEntryDict(LLMConfigEntryDict, total=False):
|
|
|
287
287
|
stream: bool
|
|
288
288
|
tool_choice: Literal["none", "auto", "required"] | None
|
|
289
289
|
user: str | None
|
|
290
|
-
reasoning_effort: Literal["low", "medium", "high"] | None
|
|
290
|
+
reasoning_effort: Literal["low", "minimal", "medium", "high"] | None
|
|
291
291
|
max_completion_tokens: int | None
|
|
292
292
|
|
|
293
293
|
|
|
@@ -301,7 +301,7 @@ class AzureOpenAILLMConfigEntry(LLMConfigEntry):
|
|
|
301
301
|
# reasoning models - see:
|
|
302
302
|
# - https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning
|
|
303
303
|
# - https://learn.microsoft.com/en-us/azure/ai-services/openai/reference-preview
|
|
304
|
-
reasoning_effort: Literal["low", "medium", "high"] | None = None
|
|
304
|
+
reasoning_effort: Literal["low", "minimal", "medium", "high"] | None = None
|
|
305
305
|
max_completion_tokens: int | None = None
|
|
306
306
|
|
|
307
307
|
def create_client(self) -> ModelClient:
|
|
@@ -366,11 +366,12 @@ class OpenAIClient:
|
|
|
366
366
|
if isinstance(response, Completion):
|
|
367
367
|
return [choice.text for choice in choices] # type: ignore [union-attr]
|
|
368
368
|
|
|
369
|
-
def _format_content(content: str) -> str:
|
|
369
|
+
def _format_content(content: str | list[dict[str, Any]] | None) -> str:
|
|
370
|
+
normalized_content = content_str(content)
|
|
370
371
|
return (
|
|
371
|
-
self.response_format.model_validate_json(
|
|
372
|
+
self.response_format.model_validate_json(normalized_content).format()
|
|
372
373
|
if isinstance(self.response_format, FormatterProtocol)
|
|
373
|
-
else
|
|
374
|
+
else normalized_content
|
|
374
375
|
)
|
|
375
376
|
|
|
376
377
|
if TOOL_ENABLED:
|
|
@@ -638,8 +639,11 @@ class OpenAIClient:
|
|
|
638
639
|
warnings.warn(
|
|
639
640
|
f"The {params.get('model')} model does not support streaming. The stream will be set to False."
|
|
640
641
|
)
|
|
641
|
-
if
|
|
642
|
-
|
|
642
|
+
if "tools" in params:
|
|
643
|
+
if params["tools"]: # If tools exist, raise as unsupported
|
|
644
|
+
raise ModelToolNotSupportedError(params.get("model"))
|
|
645
|
+
else:
|
|
646
|
+
params.pop("tools") # Remove empty tools list
|
|
643
647
|
self._process_reasoning_model_params(params)
|
|
644
648
|
params["stream"] = False
|
|
645
649
|
response = create_or_parse(**params)
|
|
@@ -884,6 +888,7 @@ class OpenAIWrapper:
|
|
|
884
888
|
# a config for a custom client is set
|
|
885
889
|
# adding placeholder until the register_model_client is called with the appropriate class
|
|
886
890
|
self._clients.append(PlaceHolderClient(config))
|
|
891
|
+
# codeql[py/clear-text-logging-sensitive-data]
|
|
887
892
|
logger.info(
|
|
888
893
|
f"Detected custom model client in config: {model_client_cls_name}, model client can not be used until register_model_client is called."
|
|
889
894
|
)
|
|
@@ -1079,9 +1084,10 @@ class OpenAIWrapper:
|
|
|
1079
1084
|
self._round_robin_index = (self._round_robin_index + 1) % len(self._clients)
|
|
1080
1085
|
|
|
1081
1086
|
for i in ordered_clients_indices:
|
|
1082
|
-
client = self._clients[i]
|
|
1083
1087
|
# merge the input config with the i-th config in the config list
|
|
1084
|
-
|
|
1088
|
+
client_config = self._config_list[i]
|
|
1089
|
+
full_config = merge_config_with_tools(config, client_config)
|
|
1090
|
+
|
|
1085
1091
|
# separate the config into create_config and extra_kwargs
|
|
1086
1092
|
create_config, extra_kwargs = self._separate_create_config(full_config)
|
|
1087
1093
|
# construct the create params
|
|
@@ -1112,6 +1118,7 @@ class OpenAIWrapper:
|
|
|
1112
1118
|
# Legacy cache behavior, if cache_seed is given, use DiskCache.
|
|
1113
1119
|
cache_client = Cache.disk(cache_seed, LEGACY_CACHE_DIR)
|
|
1114
1120
|
|
|
1121
|
+
client = self._clients[i]
|
|
1115
1122
|
log_cache_seed_value(cache if cache is not None else cache_seed, client=client)
|
|
1116
1123
|
|
|
1117
1124
|
if cache_client is not None:
|
|
@@ -1462,6 +1469,13 @@ class OpenAIWrapper:
|
|
|
1462
1469
|
# -----------------------------------------------------------------------------
|
|
1463
1470
|
|
|
1464
1471
|
|
|
1472
|
+
class OpenAIResponsesEntryDict(LLMConfigEntryDict, total=False):
|
|
1473
|
+
api_type: Literal["responses"]
|
|
1474
|
+
|
|
1475
|
+
tool_choice: Literal["none", "auto", "required"] | None
|
|
1476
|
+
built_in_tools: list[str] | None
|
|
1477
|
+
|
|
1478
|
+
|
|
1465
1479
|
class OpenAIResponsesLLMConfigEntry(OpenAILLMConfigEntry):
|
|
1466
1480
|
"""LLMConfig entry for the OpenAI Responses API (stateful, tool-enabled).
|
|
1467
1481
|
|
autogen/oai/client_utils.py
CHANGED
|
@@ -110,6 +110,36 @@ def validate_parameter(
|
|
|
110
110
|
return param_value
|
|
111
111
|
|
|
112
112
|
|
|
113
|
+
def merge_config_with_tools(config: dict[str, Any], client_config: dict[str, Any]) -> dict[str, Any]:
|
|
114
|
+
"""Merge configuration dictionaries with proper tools and functions handling.
|
|
115
|
+
|
|
116
|
+
This function merges two configuration dictionaries while ensuring that:
|
|
117
|
+
1. Empty 'tools' arrays are not added unnecessarily
|
|
118
|
+
2. 'tools' and deprecated 'functions' parameters are not both present
|
|
119
|
+
3. Actual tool configurations are properly merged
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
config: The base configuration dictionary (e.g., from create() call)
|
|
123
|
+
client_config: The client-specific configuration dictionary (e.g., from config_list)
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
dict[str, Any]: The merged configuration with proper tools/functions handling
|
|
127
|
+
"""
|
|
128
|
+
# Start with a clean merge of both configs
|
|
129
|
+
full_config = {**config, **client_config}
|
|
130
|
+
|
|
131
|
+
# Add tools if tools contains something AND are not using deprecated functions
|
|
132
|
+
config_tools = config.get("tools", [])
|
|
133
|
+
client_tools = client_config.get("tools", [])
|
|
134
|
+
|
|
135
|
+
if config_tools or client_tools:
|
|
136
|
+
# Don't add tools if functions parameter is present (deprecated API)
|
|
137
|
+
if "functions" not in full_config:
|
|
138
|
+
full_config["tools"] = config_tools + client_tools
|
|
139
|
+
|
|
140
|
+
return full_config
|
|
141
|
+
|
|
142
|
+
|
|
113
143
|
def should_hide_tools(messages: list[dict[str, Any]], tools: list[dict[str, Any]], hide_tools_param: str) -> bool:
|
|
114
144
|
"""Determines if tools should be hidden. This function is used to hide tools when they have been run, minimising the chance of the LLM choosing them when they shouldn't.
|
|
115
145
|
Parameters:
|
|
@@ -131,7 +161,7 @@ def should_hide_tools(messages: list[dict[str, Any]], tools: list[dict[str, Any]
|
|
|
131
161
|
return False
|
|
132
162
|
elif hide_tools_param == "if_any_run":
|
|
133
163
|
# Return True if any tool_call_id exists, indicating a tool call has been executed. False otherwise.
|
|
134
|
-
return any(
|
|
164
|
+
return any("tool_call_id" in dictionary for dictionary in messages)
|
|
135
165
|
elif hide_tools_param == "if_all_run":
|
|
136
166
|
# Return True if all tools have been executed at least once. False otherwise.
|
|
137
167
|
|
autogen/oai/cohere.py
CHANGED
|
@@ -217,16 +217,6 @@ class CohereClient:
|
|
|
217
217
|
if "top_p" in params:
|
|
218
218
|
cohere_params["p"] = validate_parameter(params, "top_p", (int, float), False, 0.75, (0.01, 0.99), None)
|
|
219
219
|
|
|
220
|
-
if "p" in params:
|
|
221
|
-
warnings.warn(
|
|
222
|
-
(
|
|
223
|
-
"parameter 'p' is deprecated, use 'top_p' instead for consistency with OpenAI API spec. "
|
|
224
|
-
"Scheduled for removal in 0.10.0 version."
|
|
225
|
-
),
|
|
226
|
-
DeprecationWarning,
|
|
227
|
-
)
|
|
228
|
-
cohere_params["p"] = validate_parameter(params, "p", (int, float), False, 0.75, (0.01, 0.99), None)
|
|
229
|
-
|
|
230
220
|
if "seed" in params:
|
|
231
221
|
cohere_params["seed"] = validate_parameter(params, "seed", int, True, None, None, None)
|
|
232
222
|
|
|
@@ -260,7 +250,7 @@ class CohereClient:
|
|
|
260
250
|
cohere_params["messages"] = messages
|
|
261
251
|
|
|
262
252
|
if "tools" in params:
|
|
263
|
-
cohere_tool_names =
|
|
253
|
+
cohere_tool_names = {tool["function"]["name"] for tool in params["tools"]}
|
|
264
254
|
cohere_params["tools"] = params["tools"]
|
|
265
255
|
|
|
266
256
|
# Strip out name
|
|
@@ -285,9 +275,9 @@ class CohereClient:
|
|
|
285
275
|
) not in cohere_tool_names:
|
|
286
276
|
message["role"] = "assistant"
|
|
287
277
|
message["content"] = f"{message.pop('tool_plan', '')}{str(message['tool_calls'])}"
|
|
288
|
-
tool_calls_modified_ids = tool_calls_modified_ids.union(
|
|
289
|
-
|
|
290
|
-
)
|
|
278
|
+
tool_calls_modified_ids = tool_calls_modified_ids.union({
|
|
279
|
+
tool_call.get("id") for tool_call in message["tool_calls"]
|
|
280
|
+
})
|
|
291
281
|
del message["tool_calls"]
|
|
292
282
|
break
|
|
293
283
|
|
autogen/oai/gemini.py
CHANGED
|
@@ -246,7 +246,7 @@ class GeminiClient:
|
|
|
246
246
|
|
|
247
247
|
if model_name == "gemini-pro-vision":
|
|
248
248
|
raise ValueError(
|
|
249
|
-
"Gemini 1.0 Pro vision ('gemini-pro-vision') has been deprecated, please consider switching to a different model, for example 'gemini-
|
|
249
|
+
"Gemini 1.0 Pro vision ('gemini-pro-vision') has been deprecated, please consider switching to a different model, for example 'gemini-2.5-flash'."
|
|
250
250
|
)
|
|
251
251
|
elif not model_name:
|
|
252
252
|
raise ValueError(
|
|
@@ -385,9 +385,7 @@ class GeminiClient:
|
|
|
385
385
|
function={
|
|
386
386
|
"name": fn_call.name,
|
|
387
387
|
"arguments": (
|
|
388
|
-
json.dumps(
|
|
389
|
-
if fn_call.args is not None
|
|
390
|
-
else ""
|
|
388
|
+
json.dumps(dict(fn_call.args.items())) if fn_call.args is not None else ""
|
|
391
389
|
),
|
|
392
390
|
},
|
|
393
391
|
type="function",
|
|
@@ -857,10 +855,10 @@ class GeminiClient:
|
|
|
857
855
|
"""Convert safety settings to VertexAI format if needed,
|
|
858
856
|
like when specifying them in the OAI_CONFIG_LIST
|
|
859
857
|
"""
|
|
860
|
-
if isinstance(safety_settings, list) and all(
|
|
858
|
+
if isinstance(safety_settings, list) and all(
|
|
861
859
|
isinstance(safety_setting, dict) and not isinstance(safety_setting, VertexAISafetySetting)
|
|
862
860
|
for safety_setting in safety_settings
|
|
863
|
-
|
|
861
|
+
):
|
|
864
862
|
vertexai_safety_settings = []
|
|
865
863
|
for safety_setting in safety_settings:
|
|
866
864
|
if safety_setting["category"] not in VertexAIHarmCategory.__members__:
|