ag2 0.9.4__py3-none-any.whl → 0.9.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.4.dist-info → ag2-0.9.6.dist-info}/METADATA +1 -1
- {ag2-0.9.4.dist-info → ag2-0.9.6.dist-info}/RECORD +22 -13
- autogen/agentchat/realtime/experimental/realtime_swarm.py +2 -0
- autogen/code_utils.py +8 -6
- autogen/coding/docker_commandline_code_executor.py +29 -9
- autogen/environments/__init__.py +10 -0
- autogen/environments/docker_python_environment.py +375 -0
- autogen/environments/python_environment.py +134 -0
- autogen/environments/system_python_environment.py +86 -0
- autogen/environments/venv_python_environment.py +224 -0
- autogen/environments/working_directory.py +75 -0
- autogen/llm_config.py +6 -3
- autogen/mcp/mcp_proxy/mcp_proxy.py +4 -4
- autogen/oai/client.py +72 -8
- autogen/oai/openai_responses.py +426 -0
- autogen/tools/experimental/__init__.py +2 -0
- autogen/tools/experimental/code_execution/__init__.py +7 -0
- autogen/tools/experimental/code_execution/python_code_execution.py +88 -0
- autogen/version.py +1 -1
- {ag2-0.9.4.dist-info → ag2-0.9.6.dist-info}/WHEEL +0 -0
- {ag2-0.9.4.dist-info → ag2-0.9.6.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.4.dist-info → ag2-0.9.6.dist-info}/licenses/NOTICE.md +0 -0
autogen/oai/client.py
CHANGED
|
@@ -51,6 +51,8 @@ if openai_result.is_successful:
|
|
|
51
51
|
from openai.types.completion import Completion
|
|
52
52
|
from openai.types.completion_usage import CompletionUsage
|
|
53
53
|
|
|
54
|
+
from autogen.oai.openai_responses import OpenAIResponsesClient
|
|
55
|
+
|
|
54
56
|
if openai.__version__ >= "1.1.0":
|
|
55
57
|
TOOL_ENABLED = True
|
|
56
58
|
ERROR = None
|
|
@@ -314,7 +316,7 @@ class ModelClient(Protocol):
|
|
|
314
316
|
class ModelClientResponseProtocol(Protocol):
|
|
315
317
|
class Choice(Protocol):
|
|
316
318
|
class Message(Protocol):
|
|
317
|
-
content: Optional[str]
|
|
319
|
+
content: Optional[str] | Optional[dict[str, Any]]
|
|
318
320
|
|
|
319
321
|
message: Message
|
|
320
322
|
|
|
@@ -804,17 +806,29 @@ class OpenAIWrapper:
|
|
|
804
806
|
self._clients: list[ModelClient] = []
|
|
805
807
|
self._config_list: list[dict[str, Any]] = []
|
|
806
808
|
|
|
809
|
+
# Determine routing_method from base_config only.
|
|
810
|
+
self.routing_method = base_config.get("routing_method") or "fixed_order"
|
|
811
|
+
self._round_robin_index = 0
|
|
812
|
+
|
|
813
|
+
# Remove routing_method from extra_kwargs after it has been used to set self.routing_method
|
|
814
|
+
# This ensures it's not part of the individual client configurations that are based on extra_kwargs.
|
|
815
|
+
extra_kwargs.pop("routing_method", None)
|
|
816
|
+
|
|
807
817
|
if config_list:
|
|
808
818
|
config_list = [config.copy() for config in config_list] # make a copy before modifying
|
|
809
|
-
for
|
|
810
|
-
self._register_default_client(
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
|
|
814
|
-
}
|
|
819
|
+
for config_item in config_list:
|
|
820
|
+
self._register_default_client(config_item, openai_config)
|
|
821
|
+
# Construct current_config_extra_kwargs using the cleaned extra_kwargs
|
|
822
|
+
# (which doesn't have routing_method from base_config)
|
|
823
|
+
# and specific non-openai kwargs from config_item.
|
|
824
|
+
config_item_specific_extras = {k: v for k, v in config_item.items() if k not in self.openai_kwargs}
|
|
825
|
+
self._config_list.append({**extra_kwargs, **config_item_specific_extras})
|
|
815
826
|
else:
|
|
827
|
+
# For a single config passed via base_config (already in extra_kwargs)
|
|
816
828
|
self._register_default_client(extra_kwargs, openai_config)
|
|
829
|
+
# extra_kwargs has already had routing_method popped.
|
|
817
830
|
self._config_list = [extra_kwargs]
|
|
831
|
+
|
|
818
832
|
self.wrapper_id = id(self)
|
|
819
833
|
|
|
820
834
|
def _separate_openai_config(self, config: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
@@ -952,6 +966,15 @@ class OpenAIWrapper:
|
|
|
952
966
|
raise ImportError("Please install `boto3` to use the Amazon Bedrock API.")
|
|
953
967
|
client = BedrockClient(response_format=response_format, **openai_config)
|
|
954
968
|
self._clients.append(client)
|
|
969
|
+
elif api_type is not None and api_type.startswith("responses"):
|
|
970
|
+
# OpenAI Responses API (stateful). Reuse the same OpenAI SDK but call the `/responses` endpoint via the new client.
|
|
971
|
+
@require_optional_import("openai>=1.66.2", "openai")
|
|
972
|
+
def create_responses_client() -> "OpenAI":
|
|
973
|
+
client = OpenAI(**openai_config)
|
|
974
|
+
self._clients.append(OpenAIResponsesClient(client, response_format=response_format))
|
|
975
|
+
return client
|
|
976
|
+
|
|
977
|
+
client = create_responses_client()
|
|
955
978
|
else:
|
|
956
979
|
|
|
957
980
|
@require_optional_import("openai>=1.66.2", "openai")
|
|
@@ -1063,7 +1086,16 @@ class OpenAIWrapper:
|
|
|
1063
1086
|
raise RuntimeError(
|
|
1064
1087
|
f"Model client(s) {non_activated} are not activated. Please register the custom model clients using `register_model_client` or filter them out form the config list."
|
|
1065
1088
|
)
|
|
1066
|
-
|
|
1089
|
+
|
|
1090
|
+
ordered_clients_indices = list(range(len(self._clients)))
|
|
1091
|
+
if self.routing_method == "round_robin" and len(self._clients) > 0:
|
|
1092
|
+
ordered_clients_indices = (
|
|
1093
|
+
ordered_clients_indices[self._round_robin_index :] + ordered_clients_indices[: self._round_robin_index]
|
|
1094
|
+
)
|
|
1095
|
+
self._round_robin_index = (self._round_robin_index + 1) % len(self._clients)
|
|
1096
|
+
|
|
1097
|
+
for i in ordered_clients_indices:
|
|
1098
|
+
client = self._clients[i]
|
|
1067
1099
|
# merge the input config with the i-th config in the config list
|
|
1068
1100
|
full_config = {**config, **self._config_list[i]}
|
|
1069
1101
|
# separate the config into create_config and extra_kwargs
|
|
@@ -1442,3 +1474,35 @@ class OpenAIWrapper:
|
|
|
1442
1474
|
A list of text, or a list of ChatCompletion objects if function_call/tool_calls are present.
|
|
1443
1475
|
"""
|
|
1444
1476
|
return response.message_retrieval_function(response)
|
|
1477
|
+
|
|
1478
|
+
|
|
1479
|
+
# -----------------------------------------------------------------------------
|
|
1480
|
+
# New: Responses API config entry (OpenAI-hosted preview endpoint)
|
|
1481
|
+
# -----------------------------------------------------------------------------
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
@register_llm_config
|
|
1485
|
+
class OpenAIResponsesLLMConfigEntry(OpenAILLMConfigEntry):
|
|
1486
|
+
"""LLMConfig entry for the OpenAI Responses API (stateful, tool-enabled).
|
|
1487
|
+
|
|
1488
|
+
This reuses all the OpenAI fields but changes *api_type* so the wrapper can
|
|
1489
|
+
route traffic to the `client.responses` endpoint instead of
|
|
1490
|
+
`chat.completions`. It inherits everything else – including reasoning
|
|
1491
|
+
fields – from *OpenAILLMConfigEntry* so users can simply set
|
|
1492
|
+
|
|
1493
|
+
```python
|
|
1494
|
+
{
|
|
1495
|
+
"api_type": "responses", # <-- key differentiator
|
|
1496
|
+
"model": "o3", # reasoning model
|
|
1497
|
+
"reasoning_effort": "medium", # low / medium / high
|
|
1498
|
+
"stream": True,
|
|
1499
|
+
}
|
|
1500
|
+
```
|
|
1501
|
+
"""
|
|
1502
|
+
|
|
1503
|
+
api_type: Literal["responses"] = "responses"
|
|
1504
|
+
tool_choice: Optional[Literal["none", "auto", "required"]] = "auto"
|
|
1505
|
+
built_in_tools: Optional[list[str]] = None
|
|
1506
|
+
|
|
1507
|
+
def create_client(self) -> "ModelClient": # pragma: no cover
|
|
1508
|
+
raise NotImplementedError("Handled via OpenAIWrapper._register_default_client")
|
|
@@ -0,0 +1,426 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import copy
|
|
6
|
+
import warnings
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Tuple, Union
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel
|
|
10
|
+
|
|
11
|
+
from autogen.code_utils import content_str
|
|
12
|
+
from autogen.import_utils import optional_import_block, require_optional_import
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from autogen.oai.client import ModelClient, OpenAI, OpenAILLMConfigEntry
|
|
16
|
+
else:
|
|
17
|
+
# Import at runtime to avoid circular import
|
|
18
|
+
OpenAILLMConfigEntry = None
|
|
19
|
+
ModelClient = None
|
|
20
|
+
OpenAI = None
|
|
21
|
+
|
|
22
|
+
with optional_import_block() as openai_result:
|
|
23
|
+
from openai.types.responses.response import Response
|
|
24
|
+
from openai.types.responses.response_output_item import ImageGenerationCall
|
|
25
|
+
|
|
26
|
+
# Image Costs
|
|
27
|
+
# Pricing per image (in USD)
|
|
28
|
+
PRICING = {
|
|
29
|
+
"gpt-image-1": {
|
|
30
|
+
"low": {"1024x1024": 0.011, "1024x1536": 0.016, "1536x1024": 0.016},
|
|
31
|
+
"medium": {"1024x1024": 0.042, "1024x1536": 0.063, "1536x1024": 0.063},
|
|
32
|
+
"high": {"1024x1024": 0.167, "1024x1536": 0.25, "1536x1024": 0.25},
|
|
33
|
+
},
|
|
34
|
+
"dall-e-3": {
|
|
35
|
+
"standard": {"1024x1024": 0.040, "1024x1792": 0.080, "1792x1024": 0.080},
|
|
36
|
+
"hd": {"1024x1024": 0.080, "1024x1792": 0.120, "1792x1024": 0.120},
|
|
37
|
+
},
|
|
38
|
+
"dall-e-2": {"standard": {"1024x1024": 0.020, "512x512": 0.018, "256x256": 0.016}},
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
# Valid sizes for each model
|
|
42
|
+
VALID_SIZES = {
|
|
43
|
+
"gpt-image-1": ["1024x1024", "1024x1536", "1536x1024"],
|
|
44
|
+
"dall-e-3": ["1024x1024", "1024x1792", "1792x1024"],
|
|
45
|
+
"dall-e-2": ["1024x1024", "512x512", "256x256"],
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def calculate_openai_image_cost(
|
|
50
|
+
model: str = "gpt-image-1", size: str = "1024x1024", quality: str = "high"
|
|
51
|
+
) -> Tuple[float, str]:
|
|
52
|
+
"""
|
|
53
|
+
Calculate the cost for a single image generation.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
model: Model name ("gpt-image-1", "dall-e-3" or "dall-e-2")
|
|
57
|
+
size: Image size (e.g., "1024x1024", "1024x1536")
|
|
58
|
+
quality: Quality setting:
|
|
59
|
+
- For gpt-image-1: "low", "medium", or "high"
|
|
60
|
+
- For dall-e-3: "standard" or "hd"
|
|
61
|
+
- For dall-e-2: "standard" only
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Tuple of (cost, error_message)
|
|
65
|
+
"""
|
|
66
|
+
# Normalize inputs
|
|
67
|
+
model = model.lower()
|
|
68
|
+
quality = quality.lower()
|
|
69
|
+
|
|
70
|
+
# Validate model
|
|
71
|
+
if model not in PRICING:
|
|
72
|
+
return 0.0, f"Invalid model: {model}. Valid models: {list(PRICING.keys())}"
|
|
73
|
+
|
|
74
|
+
# Validate size
|
|
75
|
+
if size not in VALID_SIZES[model]:
|
|
76
|
+
return 0.0, f"Invalid size {size} for {model}. Valid sizes: {VALID_SIZES[model]}"
|
|
77
|
+
|
|
78
|
+
# Get the cost based on model type
|
|
79
|
+
try:
|
|
80
|
+
if model == "gpt-image-1" or model == "dall-e-3":
|
|
81
|
+
cost = PRICING[model][quality][size]
|
|
82
|
+
elif model == "dall-e-2":
|
|
83
|
+
cost = PRICING[model]["standard"][size]
|
|
84
|
+
else:
|
|
85
|
+
return 0.0, f"Model {model} not properly configured"
|
|
86
|
+
|
|
87
|
+
return cost, None
|
|
88
|
+
|
|
89
|
+
except KeyError:
|
|
90
|
+
return 0.0, f"Invalid quality '{quality}' for {model}"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _get_base_class():
|
|
94
|
+
"""Lazy import OpenAILLMConfigEntry to avoid circular imports."""
|
|
95
|
+
from autogen.oai.client import OpenAILLMConfigEntry
|
|
96
|
+
|
|
97
|
+
return OpenAILLMConfigEntry
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# -----------------------------------------------------------------------------
|
|
101
|
+
# OpenAI Client that calls the /responses endpoint
|
|
102
|
+
# -----------------------------------------------------------------------------
|
|
103
|
+
@require_optional_import("openai", "openai")
|
|
104
|
+
class OpenAIResponsesClient:
|
|
105
|
+
"""Minimal implementation targeting the experimental /responses endpoint.
|
|
106
|
+
|
|
107
|
+
We purposefully keep the surface small - *create*, *message_retrieval*,
|
|
108
|
+
*cost* and *get_usage* - enough for ConversableAgent to operate. Anything
|
|
109
|
+
that the new endpoint does natively (web_search, file_search, image
|
|
110
|
+
generation, function calling, etc.) is transparently passed through by the
|
|
111
|
+
OpenAI SDK so we don't replicate logic here.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(
|
|
115
|
+
self,
|
|
116
|
+
client: "OpenAI",
|
|
117
|
+
response_format: Union[BaseModel, dict[str, Any], None] = None,
|
|
118
|
+
):
|
|
119
|
+
self._oai_client = client # plain openai.OpenAI instance
|
|
120
|
+
self.response_format = response_format # kept for parity but unused for now
|
|
121
|
+
|
|
122
|
+
# Initialize the image generation parameters
|
|
123
|
+
self.image_output_params = {
|
|
124
|
+
"quality": None, # "high" or "low"
|
|
125
|
+
"background": None, # "white" or "black" or "transparent"
|
|
126
|
+
"size": None, # "1024x1024" or "1024x1792" or "1792x1024"
|
|
127
|
+
"output_format": "png", # "png", "jpg" or "jpeg" or "webp"
|
|
128
|
+
"output_compression": None, # 0-100 if output_format is "jpg" or "jpeg" or "webp"
|
|
129
|
+
}
|
|
130
|
+
self.previous_response_id = None
|
|
131
|
+
|
|
132
|
+
# Image costs are calculated manually (rather than off returned information)
|
|
133
|
+
self.image_costs = 0
|
|
134
|
+
|
|
135
|
+
# ------------------------------------------------------------------ helpers
|
|
136
|
+
# responses objects embed usage similarly to chat completions
|
|
137
|
+
@staticmethod
|
|
138
|
+
def _usage_dict(resp) -> dict:
|
|
139
|
+
usage_obj = getattr(resp, "usage", None) or {}
|
|
140
|
+
|
|
141
|
+
# Convert pydantic/BaseModel usage objects to dict for uniform access
|
|
142
|
+
if hasattr(usage_obj, "model_dump"):
|
|
143
|
+
usage = usage_obj.model_dump()
|
|
144
|
+
elif isinstance(usage_obj, dict):
|
|
145
|
+
usage = usage_obj
|
|
146
|
+
else: # fallback - unknown structure
|
|
147
|
+
usage = {}
|
|
148
|
+
|
|
149
|
+
output_tokens_details = usage.get("output_tokens_details", {})
|
|
150
|
+
|
|
151
|
+
return {
|
|
152
|
+
"prompt_tokens": usage.get("input_tokens", 0),
|
|
153
|
+
"completion_tokens": usage.get("output_tokens", 0),
|
|
154
|
+
"total_tokens": usage.get("total_tokens", 0),
|
|
155
|
+
"cost": getattr(resp, "cost", 0),
|
|
156
|
+
"model": getattr(resp, "model", ""),
|
|
157
|
+
"reasoning_tokens": output_tokens_details.get("reasoning_tokens", 0),
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
def _add_image_cost(self, response: "Response") -> None:
|
|
161
|
+
"""Add image cost to self._image_costs when an image is generated"""
|
|
162
|
+
for output in response.output:
|
|
163
|
+
if (
|
|
164
|
+
isinstance(output, ImageGenerationCall)
|
|
165
|
+
and hasattr(response.output[0], "model_extra")
|
|
166
|
+
and response.output[0].model_extra
|
|
167
|
+
):
|
|
168
|
+
extra_fields = output.model_extra
|
|
169
|
+
|
|
170
|
+
image_cost, image_error = calculate_openai_image_cost(
|
|
171
|
+
model="gpt-image-1",
|
|
172
|
+
size=extra_fields.get("size", "1024x1536"),
|
|
173
|
+
quality=extra_fields.get("quality", "high"),
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
if not image_error and image_cost:
|
|
177
|
+
self.image_costs += image_cost
|
|
178
|
+
|
|
179
|
+
def _get_delta_messages(self, messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
180
|
+
"""Get the delta messages from the messages."""
|
|
181
|
+
delta_messages = []
|
|
182
|
+
for m in messages[::-1]:
|
|
183
|
+
contents = m.get("content")
|
|
184
|
+
is_last_completed_response = False
|
|
185
|
+
if isinstance(contents, list):
|
|
186
|
+
for c in contents:
|
|
187
|
+
if "status" in c and c.get("status") == "completed":
|
|
188
|
+
is_last_completed_response = True
|
|
189
|
+
break
|
|
190
|
+
elif isinstance(contents, str):
|
|
191
|
+
is_last_completed_response = "status" in m and m.get("status") == "completed"
|
|
192
|
+
|
|
193
|
+
if is_last_completed_response:
|
|
194
|
+
break
|
|
195
|
+
delta_messages.append(m)
|
|
196
|
+
return delta_messages[::-1]
|
|
197
|
+
|
|
198
|
+
def create(self, params: dict[str, Any]) -> "Response":
|
|
199
|
+
"""Invoke `client.responses.create() or .parse()`.
|
|
200
|
+
|
|
201
|
+
If the caller provided a classic *messages* array we convert it to the
|
|
202
|
+
*input* format expected by the Responses API.
|
|
203
|
+
"""
|
|
204
|
+
params = params.copy()
|
|
205
|
+
|
|
206
|
+
image_generation_tool_params = {"type": "image_generation"}
|
|
207
|
+
web_search_tool_params = {"type": "web_search_preview"}
|
|
208
|
+
|
|
209
|
+
if self.previous_response_id is not None and "previous_response_id" not in params:
|
|
210
|
+
params["previous_response_id"] = self.previous_response_id
|
|
211
|
+
|
|
212
|
+
# Back-compat: transform messages → input if needed ------------------
|
|
213
|
+
if "messages" in params and "input" not in params:
|
|
214
|
+
msgs = self._get_delta_messages(params.pop("messages"))
|
|
215
|
+
input_items = []
|
|
216
|
+
for m in msgs[::-1]: # reverse the list to get the last item first
|
|
217
|
+
role = m.get("role", "user")
|
|
218
|
+
# First, we need to convert the content to the Responses API format
|
|
219
|
+
content = m.get("content")
|
|
220
|
+
blocks = []
|
|
221
|
+
if role != "tool":
|
|
222
|
+
if isinstance(content, list):
|
|
223
|
+
for c in content:
|
|
224
|
+
if c.get("type") in ["input_text", "text"]:
|
|
225
|
+
blocks.append({"type": "input_text", "text": c.get("text")})
|
|
226
|
+
elif c.get("type") == "input_image":
|
|
227
|
+
blocks.append({"type": "input_image", "image_url": c.get("image_url")})
|
|
228
|
+
elif c.get("type") == "image_params":
|
|
229
|
+
for k, v in c.get("image_params", {}).items():
|
|
230
|
+
if k in self.image_output_params:
|
|
231
|
+
image_generation_tool_params[k] = v
|
|
232
|
+
else:
|
|
233
|
+
raise ValueError(f"Invalid content type: {c.get('type')}")
|
|
234
|
+
else:
|
|
235
|
+
blocks.append({"type": "input_text", "text": content})
|
|
236
|
+
input_items.append({"role": role, "content": blocks})
|
|
237
|
+
|
|
238
|
+
else:
|
|
239
|
+
if input_items:
|
|
240
|
+
break
|
|
241
|
+
# tool call response is the last item in the list
|
|
242
|
+
content = content_str(m.get("content"))
|
|
243
|
+
input_items.append({
|
|
244
|
+
"type": "function_call_output",
|
|
245
|
+
"call_id": m.get("tool_call_id", None),
|
|
246
|
+
"output": content,
|
|
247
|
+
})
|
|
248
|
+
break
|
|
249
|
+
params["input"] = input_items[::-1]
|
|
250
|
+
|
|
251
|
+
# Initialize tools list
|
|
252
|
+
tools_list = []
|
|
253
|
+
# Back-compat: add default tools
|
|
254
|
+
built_in_tools = params.pop("built_in_tools", [])
|
|
255
|
+
if built_in_tools:
|
|
256
|
+
if "image_generation" in built_in_tools:
|
|
257
|
+
tools_list.append(image_generation_tool_params)
|
|
258
|
+
if "web_search" in built_in_tools:
|
|
259
|
+
tools_list.append(web_search_tool_params)
|
|
260
|
+
|
|
261
|
+
if "tools" in params:
|
|
262
|
+
for tool in params["tools"]:
|
|
263
|
+
tool_item = {"type": "function"}
|
|
264
|
+
if "function" in tool:
|
|
265
|
+
tool_item |= tool["function"]
|
|
266
|
+
tools_list.append(tool_item)
|
|
267
|
+
params["tools"] = tools_list
|
|
268
|
+
params["tool_choice"] = "auto"
|
|
269
|
+
|
|
270
|
+
# Ensure we don't mix legacy params that Responses doesn't accept
|
|
271
|
+
if params.get("stream") and params.get("background"):
|
|
272
|
+
warnings.warn(
|
|
273
|
+
"Streaming a background response may introduce latency.",
|
|
274
|
+
UserWarning,
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# ------------------------------------------------------------------
|
|
278
|
+
# Structured output handling - mimic OpenAIClient behaviour
|
|
279
|
+
# ------------------------------------------------------------------
|
|
280
|
+
|
|
281
|
+
if self.response_format is not None or "response_format" in params:
|
|
282
|
+
|
|
283
|
+
def _create_or_parse(**kwargs):
|
|
284
|
+
# For structured output we must convert dict / pydantic model
|
|
285
|
+
# into the JSON-schema body expected by the API.
|
|
286
|
+
if "stream" in kwargs:
|
|
287
|
+
kwargs.pop("stream") # Responses API rejects stream with RF for now
|
|
288
|
+
|
|
289
|
+
rf = kwargs.get("response_format", self.response_format)
|
|
290
|
+
|
|
291
|
+
if isinstance(rf, dict):
|
|
292
|
+
from autogen.oai.client import _ensure_strict_json_schema
|
|
293
|
+
|
|
294
|
+
kwargs["text_format"] = {
|
|
295
|
+
"type": "json_schema",
|
|
296
|
+
"json_schema": {
|
|
297
|
+
"schema": _ensure_strict_json_schema(rf, path=(), root=rf),
|
|
298
|
+
"name": "response_format",
|
|
299
|
+
"strict": True,
|
|
300
|
+
},
|
|
301
|
+
}
|
|
302
|
+
else:
|
|
303
|
+
# pydantic.BaseModel subclass
|
|
304
|
+
from autogen.oai.client import type_to_response_format_param
|
|
305
|
+
|
|
306
|
+
kwargs["text_format"] = type_to_response_format_param(rf)
|
|
307
|
+
if "response_format" in kwargs:
|
|
308
|
+
kwargs["text_format"] = kwargs.pop("response_format")
|
|
309
|
+
|
|
310
|
+
try:
|
|
311
|
+
return self._oai_client.responses.parse(**kwargs)
|
|
312
|
+
except TypeError as e:
|
|
313
|
+
# Older openai-python versions may not yet expose the
|
|
314
|
+
# text_format parameter on the Responses endpoint.
|
|
315
|
+
if "text_format" in str(e) and "unexpected" in str(e):
|
|
316
|
+
warnings.warn(
|
|
317
|
+
"Installed openai-python version doesn't support "
|
|
318
|
+
"`response_format` for the Responses API. "
|
|
319
|
+
"Falling back to raw text output.",
|
|
320
|
+
UserWarning,
|
|
321
|
+
)
|
|
322
|
+
kwargs.pop("text_format", None)
|
|
323
|
+
return self._oai_client.responses.create(**kwargs)
|
|
324
|
+
|
|
325
|
+
response = _create_or_parse(**params)
|
|
326
|
+
self.previous_response_id = response.id
|
|
327
|
+
return response
|
|
328
|
+
|
|
329
|
+
# No structured output
|
|
330
|
+
response = self._oai_client.responses.create(**params)
|
|
331
|
+
self.previous_response_id = response.id
|
|
332
|
+
|
|
333
|
+
# Accumulate image costs
|
|
334
|
+
self._add_image_cost(response)
|
|
335
|
+
|
|
336
|
+
return response
|
|
337
|
+
|
|
338
|
+
def message_retrieval(
|
|
339
|
+
self, response
|
|
340
|
+
) -> Union[list[str], list["ModelClient.ModelClientResponseProtocol.Choice.Message"]]:
|
|
341
|
+
output = getattr(response, "output", [])
|
|
342
|
+
content = [] # list[dict[str, Union[str, dict[str, Any]]]]]
|
|
343
|
+
tool_calls = []
|
|
344
|
+
for item in output:
|
|
345
|
+
# Convert pydantic objects to plain dicts for uniform handling
|
|
346
|
+
if hasattr(item, "model_dump"):
|
|
347
|
+
item = item.model_dump()
|
|
348
|
+
|
|
349
|
+
item_type = item.get("type")
|
|
350
|
+
|
|
351
|
+
# ------------------------------------------------------------------
|
|
352
|
+
# 1) Normal messages
|
|
353
|
+
# ------------------------------------------------------------------
|
|
354
|
+
if item_type == "message":
|
|
355
|
+
new_item = copy.deepcopy(item)
|
|
356
|
+
new_item["type"] = "text"
|
|
357
|
+
new_item["role"] = "assistant"
|
|
358
|
+
blocks = item.get("content", [])
|
|
359
|
+
if len(blocks) == 1 and blocks[0].get("type") == "output_text":
|
|
360
|
+
new_item["text"] = blocks[0]["text"]
|
|
361
|
+
if "content" in new_item:
|
|
362
|
+
del new_item["content"]
|
|
363
|
+
content.append(new_item)
|
|
364
|
+
continue
|
|
365
|
+
|
|
366
|
+
# ------------------------------------------------------------------
|
|
367
|
+
# 2) Custom function calls
|
|
368
|
+
# ------------------------------------------------------------------
|
|
369
|
+
if item_type == "function_call":
|
|
370
|
+
tool_calls.append({
|
|
371
|
+
"id": item.get("call_id", None),
|
|
372
|
+
"function": {
|
|
373
|
+
"name": item.get("name", None),
|
|
374
|
+
"arguments": item.get("arguments"),
|
|
375
|
+
},
|
|
376
|
+
"type": "function_call",
|
|
377
|
+
})
|
|
378
|
+
continue
|
|
379
|
+
|
|
380
|
+
# ------------------------------------------------------------------
|
|
381
|
+
# 3) Built-in tool calls
|
|
382
|
+
# ------------------------------------------------------------------
|
|
383
|
+
if item_type and item_type.endswith("_call"):
|
|
384
|
+
tool_name = item_type.replace("_call", "")
|
|
385
|
+
tool_call_args = {
|
|
386
|
+
"id": item.get("id"),
|
|
387
|
+
"role": "tool_calls",
|
|
388
|
+
"type": "tool_call", # Responses API currently routes via function-like tools
|
|
389
|
+
"name": tool_name,
|
|
390
|
+
}
|
|
391
|
+
if tool_name == "image_generation":
|
|
392
|
+
for k in self.image_output_params:
|
|
393
|
+
if k in item:
|
|
394
|
+
tool_call_args[k] = item[k]
|
|
395
|
+
encoded_base64_result = item.get("result", "")
|
|
396
|
+
tool_call_args["content"] = encoded_base64_result
|
|
397
|
+
# add image_url for image input back to oai response api.
|
|
398
|
+
output_format = self.image_output_params["output_format"]
|
|
399
|
+
tool_call_args["image_url"] = f"data:image/{output_format};base64,{encoded_base64_result}"
|
|
400
|
+
elif tool_name == "web_search":
|
|
401
|
+
pass
|
|
402
|
+
else:
|
|
403
|
+
raise ValueError(f"Invalid tool name: {tool_name}")
|
|
404
|
+
content.append(tool_call_args)
|
|
405
|
+
continue
|
|
406
|
+
|
|
407
|
+
# ------------------------------------------------------------------
|
|
408
|
+
# 4) Fallback - store raw dict so information isn't lost
|
|
409
|
+
# ------------------------------------------------------------------
|
|
410
|
+
content.append(item)
|
|
411
|
+
|
|
412
|
+
return [
|
|
413
|
+
{
|
|
414
|
+
"role": "assistant",
|
|
415
|
+
"id": response.id,
|
|
416
|
+
"content": content if content else None,
|
|
417
|
+
"tool_calls": tool_calls,
|
|
418
|
+
}
|
|
419
|
+
]
|
|
420
|
+
|
|
421
|
+
def cost(self, response):
|
|
422
|
+
return self._usage_dict(response).get("cost", 0) + self.image_costs
|
|
423
|
+
|
|
424
|
+
@staticmethod
|
|
425
|
+
def get_usage(response):
|
|
426
|
+
return OpenAIResponsesClient._usage_dict(response)
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
|
4
4
|
|
|
5
5
|
from .browser_use import BrowserUseTool
|
|
6
|
+
from .code_execution import PythonCodeExecutionTool
|
|
6
7
|
from .crawl4ai import Crawl4AITool
|
|
7
8
|
from .deep_research import DeepResearchTool
|
|
8
9
|
from .duckduckgo import DuckDuckGoSearchTool
|
|
@@ -34,6 +35,7 @@ __all__ = [
|
|
|
34
35
|
"FirecrawlTool",
|
|
35
36
|
"GoogleSearchTool",
|
|
36
37
|
"PerplexitySearchTool",
|
|
38
|
+
"PythonCodeExecutionTool",
|
|
37
39
|
"ReliableTool",
|
|
38
40
|
"ReliableToolError",
|
|
39
41
|
"SearxngSearchTool",
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
|
|
2
|
+
#
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import tempfile
|
|
7
|
+
from typing import Annotated, Any, Optional
|
|
8
|
+
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
10
|
+
|
|
11
|
+
from ....doc_utils import export_module
|
|
12
|
+
from ....environments import WorkingDirectory
|
|
13
|
+
from ....environments.python_environment import PythonEnvironment
|
|
14
|
+
from ... import Tool
|
|
15
|
+
|
|
16
|
+
__all__ = ["PythonCodeExecutionTool"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@export_module("autogen.tools.experimental")
|
|
20
|
+
class PythonCodeExecutionTool(Tool):
|
|
21
|
+
"""Executes Python code in a given environment and returns the result."""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
*,
|
|
26
|
+
timeout: int = 30,
|
|
27
|
+
working_directory: Optional[WorkingDirectory] = None,
|
|
28
|
+
python_environment: Optional[PythonEnvironment] = None,
|
|
29
|
+
) -> None:
|
|
30
|
+
"""
|
|
31
|
+
Initialize the PythonCodeExecutionTool.
|
|
32
|
+
|
|
33
|
+
**CAUTION**: If provided a local environment, this tool will execute code in your local environment, which can be dangerous if the code is untrusted.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
timeout: Maximum execution time allowed in seconds, will raise a TimeoutError exception if exceeded.
|
|
37
|
+
working_directory: Optional WorkingDirectory context manager to use.
|
|
38
|
+
python_environment: Optional PythonEnvironment to use. If None, will auto-detect or create based on other parameters.
|
|
39
|
+
"""
|
|
40
|
+
# Store configuration parameters
|
|
41
|
+
self.timeout = timeout
|
|
42
|
+
self.working_directory = WorkingDirectory.get_current_working_directory(working_directory)
|
|
43
|
+
tool_python_environment = PythonEnvironment.get_current_python_environment(python_environment)
|
|
44
|
+
|
|
45
|
+
assert self.working_directory, "No Working directory found"
|
|
46
|
+
assert tool_python_environment, "No Python environment found"
|
|
47
|
+
|
|
48
|
+
self.python_environment = tool_python_environment
|
|
49
|
+
|
|
50
|
+
# Pydantic model to contain the code and list of libraries to execute
|
|
51
|
+
class CodeExecutionRequest(BaseModel):
|
|
52
|
+
code: Annotated[str, Field(description="Python code to execute")]
|
|
53
|
+
libraries: Annotated[list[str], Field(description="List of libraries to install before execution")]
|
|
54
|
+
|
|
55
|
+
# The tool function, this is what goes to the LLM
|
|
56
|
+
async def execute_python_code(
|
|
57
|
+
code_execution_request: Annotated[CodeExecutionRequest, "Python code and the libraries required"],
|
|
58
|
+
) -> dict[str, Any]:
|
|
59
|
+
"""
|
|
60
|
+
Executes Python code in the attached environment and returns the result.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
code_execution_request (CodeExecutionRequest): The Python code and libraries to execute
|
|
64
|
+
"""
|
|
65
|
+
code = code_execution_request.code
|
|
66
|
+
|
|
67
|
+
# NOTE: Libraries are not installed (something to consider for future versions)
|
|
68
|
+
|
|
69
|
+
# Prepare a script file path
|
|
70
|
+
script_dir = self._get_script_directory()
|
|
71
|
+
script_path = os.path.join(script_dir, "script.py")
|
|
72
|
+
|
|
73
|
+
# Execute the code
|
|
74
|
+
return await self.python_environment.execute_code(code=code, script_path=script_path, timeout=self.timeout)
|
|
75
|
+
|
|
76
|
+
super().__init__(
|
|
77
|
+
name="python_execute_code",
|
|
78
|
+
description="Executes Python code and returns the result.",
|
|
79
|
+
func_or_tool=execute_python_code,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
def _get_script_directory(self) -> str:
|
|
83
|
+
"""Get the directory to use for scripts."""
|
|
84
|
+
if self.working_directory and hasattr(self.working_directory, "path") and self.working_directory.path:
|
|
85
|
+
path = self.working_directory.path
|
|
86
|
+
os.makedirs(path, exist_ok=True)
|
|
87
|
+
return path
|
|
88
|
+
return tempfile.mkdtemp(prefix="ag2_script_dir_")
|
autogen/version.py
CHANGED
|
File without changes
|
|
File without changes
|
|
File without changes
|