ag2 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ag2 might be problematic. Click here for more details.
- {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/METADATA +14 -10
- {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/RECORD +35 -29
- autogen/agentchat/contrib/agent_optimizer.py +6 -3
- autogen/agentchat/contrib/capabilities/transforms.py +22 -9
- autogen/agentchat/conversable_agent.py +51 -5
- autogen/agentchat/group/group_utils.py +81 -27
- autogen/agentchat/group/guardrails.py +171 -0
- autogen/agentchat/group/handoffs.py +81 -5
- autogen/agentchat/group/on_context_condition.py +2 -2
- autogen/agentchat/group/patterns/pattern.py +7 -1
- autogen/agentchat/group/targets/transition_target.py +10 -0
- autogen/agentchat/groupchat.py +95 -8
- autogen/agentchat/realtime/experimental/realtime_swarm.py +12 -4
- autogen/agents/experimental/document_agent/document_agent.py +232 -40
- autogen/agents/experimental/websurfer/websurfer.py +9 -1
- autogen/events/agent_events.py +6 -0
- autogen/events/helpers.py +8 -0
- autogen/mcp/helpers.py +45 -0
- autogen/mcp/mcp_proxy/mcp_proxy.py +2 -3
- autogen/messages/agent_messages.py +1 -1
- autogen/oai/gemini.py +41 -17
- autogen/oai/gemini_types.py +2 -1
- autogen/oai/oai_models/chat_completion.py +1 -1
- autogen/tools/experimental/__init__.py +4 -0
- autogen/tools/experimental/browser_use/browser_use.py +4 -11
- autogen/tools/experimental/firecrawl/__init__.py +7 -0
- autogen/tools/experimental/firecrawl/firecrawl_tool.py +853 -0
- autogen/tools/experimental/searxng/__init__.py +7 -0
- autogen/tools/experimental/searxng/searxng_search.py +141 -0
- autogen/version.py +1 -1
- templates/client_template/main.jinja2 +5 -2
- templates/main.jinja2 +1 -1
- {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/WHEEL +0 -0
- {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/licenses/LICENSE +0 -0
- {ag2-0.9.2.dist-info → ag2-0.9.4.dist-info}/licenses/NOTICE.md +0 -0
autogen/oai/gemini.py
CHANGED
|
@@ -54,7 +54,6 @@ from io import BytesIO
|
|
|
54
54
|
from typing import Any, Literal, Optional, Type, Union
|
|
55
55
|
|
|
56
56
|
import requests
|
|
57
|
-
from packaging import version
|
|
58
57
|
from pydantic import BaseModel, Field
|
|
59
58
|
|
|
60
59
|
from ..import_utils import optional_import_block, require_optional_import
|
|
@@ -332,6 +331,12 @@ class GeminiClient:
|
|
|
332
331
|
recitation_part = Part(text="Unsuccessful Finish Reason: RECITATION")
|
|
333
332
|
parts = [recitation_part]
|
|
334
333
|
error_finish_reason = "content_filter" # As per available finish_reason in Choice
|
|
334
|
+
elif not response.candidates[0].content or not response.candidates[0].content.parts:
|
|
335
|
+
error_part = Part(
|
|
336
|
+
text=f"Unsuccessful Finish Reason: ({str(response.candidates[0].finish_reason)}) NO CONTENT RETURNED"
|
|
337
|
+
)
|
|
338
|
+
parts = [error_part]
|
|
339
|
+
error_finish_reason = "content_filter" # No other option in Choice in chat_completion.py
|
|
335
340
|
else:
|
|
336
341
|
parts = response.candidates[0].content.parts
|
|
337
342
|
elif isinstance(response, VertexAIGenerationResponse): # or hasattr(response, "candidates"):
|
|
@@ -570,16 +575,19 @@ class GeminiClient:
|
|
|
570
575
|
|
|
571
576
|
if part_type == "text":
|
|
572
577
|
rst.append(
|
|
573
|
-
VertexAIContent(parts=parts, role=role)
|
|
574
|
-
|
|
575
|
-
|
|
578
|
+
VertexAIContent(parts=parts, role=role) if self.use_vertexai else Content(parts=parts, role=role)
|
|
579
|
+
)
|
|
580
|
+
elif part_type == "tool_call":
|
|
581
|
+
# Function calls should be from the model/assistant
|
|
582
|
+
role = "model"
|
|
583
|
+
rst.append(
|
|
584
|
+
VertexAIContent(parts=parts, role=role) if self.use_vertexai else Content(parts=parts, role=role)
|
|
576
585
|
)
|
|
577
|
-
elif part_type == "tool"
|
|
578
|
-
|
|
586
|
+
elif part_type == "tool":
|
|
587
|
+
# Function responses should be from the user
|
|
588
|
+
role = "user"
|
|
579
589
|
rst.append(
|
|
580
|
-
VertexAIContent(parts=parts, role=role)
|
|
581
|
-
if self.use_vertexai
|
|
582
|
-
else rst.append(Content(parts=parts, role=role))
|
|
590
|
+
VertexAIContent(parts=parts, role=role) if self.use_vertexai else Content(parts=parts, role=role)
|
|
583
591
|
)
|
|
584
592
|
elif part_type == "image":
|
|
585
593
|
# Image has multiple parts, some can be text and some can be image based
|
|
@@ -599,14 +607,14 @@ class GeminiClient:
|
|
|
599
607
|
rst.append(
|
|
600
608
|
VertexAIContent(parts=text_parts, role=role)
|
|
601
609
|
if self.use_vertexai
|
|
602
|
-
else
|
|
610
|
+
else Content(parts=text_parts, role=role)
|
|
603
611
|
)
|
|
604
612
|
|
|
605
613
|
if len(image_parts) > 0:
|
|
606
614
|
rst.append(
|
|
607
615
|
VertexAIContent(parts=image_parts, role=role)
|
|
608
616
|
if self.use_vertexai
|
|
609
|
-
else
|
|
617
|
+
else Content(parts=image_parts, role=role)
|
|
610
618
|
)
|
|
611
619
|
|
|
612
620
|
if len(rst) != 0 and rst[-1] is None:
|
|
@@ -899,18 +907,25 @@ def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens:
|
|
|
899
907
|
# https://cloud.google.com/vertex-ai/generative-ai/pricing#vertex-ai-pricing
|
|
900
908
|
|
|
901
909
|
if (
|
|
902
|
-
"gemini-2.5-pro
|
|
903
|
-
or "gemini-2.5-pro-
|
|
910
|
+
model_name == "gemini-2.5-pro"
|
|
911
|
+
or "gemini-2.5-pro-preview-06-05" in model_name
|
|
904
912
|
or "gemini-2.5-pro-preview-05-06" in model_name
|
|
913
|
+
or "gemini-2.5-pro-preview-03-25" in model_name
|
|
905
914
|
):
|
|
906
915
|
if up_to_200k:
|
|
907
916
|
return total_cost_mil(1.25, 10)
|
|
908
917
|
else:
|
|
909
918
|
return total_cost_mil(2.5, 15)
|
|
910
919
|
|
|
911
|
-
elif "gemini-2.5-flash
|
|
920
|
+
elif "gemini-2.5-flash" in model_name:
|
|
921
|
+
return total_cost_mil(0.3, 2.5)
|
|
922
|
+
|
|
923
|
+
elif "gemini-2.5-flash-preview-04-17" in model_name or "gemini-2.5-flash-preview-05-20" in model_name:
|
|
912
924
|
return total_cost_mil(0.15, 0.6) # NON-THINKING OUTPUT PRICE, $3 FOR THINKING!
|
|
913
925
|
|
|
926
|
+
elif "gemini-2.5-flash-lite-preview-06-17" in model_name:
|
|
927
|
+
return total_cost_mil(0.1, 0.4)
|
|
928
|
+
|
|
914
929
|
elif "gemini-2.0-flash-lite" in model_name:
|
|
915
930
|
return total_cost_mil(0.075, 0.3)
|
|
916
931
|
|
|
@@ -943,9 +958,10 @@ def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens:
|
|
|
943
958
|
# Non-Vertex AI pricing
|
|
944
959
|
|
|
945
960
|
if (
|
|
946
|
-
"gemini-2.5-pro
|
|
947
|
-
or "gemini-2.5-pro-
|
|
961
|
+
model_name == "gemini-2.5-pro"
|
|
962
|
+
or "gemini-2.5-pro-preview-06-05" in model_name
|
|
948
963
|
or "gemini-2.5-pro-preview-05-06" in model_name
|
|
964
|
+
or "gemini-2.5-pro-preview-03-25" in model_name
|
|
949
965
|
):
|
|
950
966
|
# https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview
|
|
951
967
|
if up_to_200k:
|
|
@@ -953,10 +969,18 @@ def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens:
|
|
|
953
969
|
else:
|
|
954
970
|
return total_cost_mil(2.5, 15)
|
|
955
971
|
|
|
956
|
-
elif "gemini-2.5-flash
|
|
972
|
+
elif "gemini-2.5-flash" in model_name:
|
|
973
|
+
# https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash
|
|
974
|
+
return total_cost_mil(0.3, 2.5)
|
|
975
|
+
|
|
976
|
+
elif "gemini-2.5-flash-preview-04-17" in model_name or "gemini-2.5-flash-preview-05-20" in model_name:
|
|
957
977
|
# https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash
|
|
958
978
|
return total_cost_mil(0.15, 0.6)
|
|
959
979
|
|
|
980
|
+
elif "gemini-2.5-flash-lite-preview-06-17" in model_name:
|
|
981
|
+
# https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash-lite
|
|
982
|
+
return total_cost_mil(0.1, 0.4)
|
|
983
|
+
|
|
960
984
|
elif "gemini-2.0-flash-lite" in model_name:
|
|
961
985
|
# https://ai.google.dev/gemini-api/docs/pricing#gemini-2.0-flash-lite
|
|
962
986
|
return total_cost_mil(0.075, 0.3)
|
autogen/oai/gemini_types.py
CHANGED
|
@@ -91,7 +91,7 @@ class CaseInSensitiveEnum(str, enum.Enum):
|
|
|
91
91
|
try:
|
|
92
92
|
# Creating a enum instance based on the value
|
|
93
93
|
# We need to use super() to avoid infinite recursion.
|
|
94
|
-
unknown_enum_val = super().__new__(cls, value)
|
|
94
|
+
unknown_enum_val = super(CaseInSensitiveEnum, cls).__new__(cls, value)
|
|
95
95
|
unknown_enum_val._name_ = str(value) # pylint: disable=protected-access
|
|
96
96
|
unknown_enum_val._value_ = value # pylint: disable=protected-access
|
|
97
97
|
return unknown_enum_val
|
|
@@ -141,6 +141,7 @@ class RetrievalConfig(CommonBaseModel):
|
|
|
141
141
|
"""Retrieval config."""
|
|
142
142
|
|
|
143
143
|
lat_lng: Optional[LatLng] = Field(default=None, description="""Optional. The location of the user.""")
|
|
144
|
+
language_code: Optional[str] = Field(default=None, description="""The language code of the user.""")
|
|
144
145
|
|
|
145
146
|
|
|
146
147
|
class ToolConfig(CommonBaseModel):
|
|
@@ -66,7 +66,7 @@ class ChatCompletion(BaseModel):
|
|
|
66
66
|
object: Literal["chat.completion"]
|
|
67
67
|
"""The object type, which is always `chat.completion`."""
|
|
68
68
|
|
|
69
|
-
service_tier: Optional[Literal["auto", "default", "flex"]] = None
|
|
69
|
+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] = None
|
|
70
70
|
"""The service tier used for processing the request."""
|
|
71
71
|
|
|
72
72
|
system_fingerprint: Optional[str] = None
|
|
@@ -6,6 +6,7 @@ from .browser_use import BrowserUseTool
|
|
|
6
6
|
from .crawl4ai import Crawl4AITool
|
|
7
7
|
from .deep_research import DeepResearchTool
|
|
8
8
|
from .duckduckgo import DuckDuckGoSearchTool
|
|
9
|
+
from .firecrawl import FirecrawlTool
|
|
9
10
|
from .google_search import GoogleSearchTool, YoutubeSearchTool
|
|
10
11
|
from .messageplatform import (
|
|
11
12
|
DiscordRetrieveTool,
|
|
@@ -18,6 +19,7 @@ from .messageplatform import (
|
|
|
18
19
|
)
|
|
19
20
|
from .perplexity import PerplexitySearchTool
|
|
20
21
|
from .reliable import ReliableTool, ReliableToolError, SuccessfulExecutionParameters, ToolExecutionDetails
|
|
22
|
+
from .searxng import SearxngSearchTool
|
|
21
23
|
from .tavily import TavilySearchTool
|
|
22
24
|
from .web_search_preview import WebSearchPreviewTool
|
|
23
25
|
from .wikipedia import WikipediaPageLoadTool, WikipediaQueryRunTool
|
|
@@ -29,10 +31,12 @@ __all__ = [
|
|
|
29
31
|
"DiscordRetrieveTool",
|
|
30
32
|
"DiscordSendTool",
|
|
31
33
|
"DuckDuckGoSearchTool",
|
|
34
|
+
"FirecrawlTool",
|
|
32
35
|
"GoogleSearchTool",
|
|
33
36
|
"PerplexitySearchTool",
|
|
34
37
|
"ReliableTool",
|
|
35
38
|
"ReliableToolError",
|
|
39
|
+
"SearxngSearchTool",
|
|
36
40
|
"SlackRetrieveRepliesTool",
|
|
37
41
|
"SlackRetrieveTool",
|
|
38
42
|
"SlackSendTool",
|
|
@@ -78,7 +78,7 @@ class BrowserUseTool(Tool):
|
|
|
78
78
|
def __init__( # type: ignore[no-any-unimported]
|
|
79
79
|
self,
|
|
80
80
|
*,
|
|
81
|
-
llm_config: Union[LLMConfig, dict[str, Any]],
|
|
81
|
+
llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
|
|
82
82
|
browser: Optional["Browser"] = None,
|
|
83
83
|
agent_kwargs: Optional[dict[str, Any]] = None,
|
|
84
84
|
browser_config: Optional[dict[str, Any]] = None,
|
|
@@ -86,17 +86,17 @@ class BrowserUseTool(Tool):
|
|
|
86
86
|
"""Use the browser to perform a task.
|
|
87
87
|
|
|
88
88
|
Args:
|
|
89
|
-
llm_config: The LLM configuration.
|
|
89
|
+
llm_config: The LLM configuration. If None, the current LLMConfig from context is used.
|
|
90
90
|
browser: The browser to use. If defined, browser_config must be None
|
|
91
91
|
agent_kwargs: Additional keyword arguments to pass to the Agent
|
|
92
92
|
browser_config: The browser configuration to use. If defined, browser must be None
|
|
93
93
|
"""
|
|
94
|
+
if llm_config is None:
|
|
95
|
+
llm_config = LLMConfig.current
|
|
94
96
|
if agent_kwargs is None:
|
|
95
97
|
agent_kwargs = {}
|
|
96
|
-
|
|
97
98
|
if browser_config is None:
|
|
98
99
|
browser_config = {}
|
|
99
|
-
|
|
100
100
|
if browser is not None and browser_config:
|
|
101
101
|
raise ValueError(
|
|
102
102
|
f"Cannot provide both browser and additional keyword parameters: {browser=}, {browser_config=}"
|
|
@@ -114,18 +114,13 @@ class BrowserUseTool(Tool):
|
|
|
114
114
|
if browser is None:
|
|
115
115
|
# set default value for headless
|
|
116
116
|
headless = browser_config.pop("headless", True)
|
|
117
|
-
|
|
118
117
|
browser_config = BrowserConfig(headless=headless, **browser_config)
|
|
119
118
|
browser = Browser(config=browser_config)
|
|
120
|
-
|
|
121
119
|
# set default value for generate_gif
|
|
122
120
|
if "generate_gif" not in agent_kwargs:
|
|
123
121
|
agent_kwargs["generate_gif"] = False
|
|
124
|
-
|
|
125
122
|
llm = LangChainChatModelFactory.create_base_chat_model(llm_config)
|
|
126
|
-
|
|
127
123
|
max_steps = agent_kwargs.pop("max_steps", 100)
|
|
128
|
-
|
|
129
124
|
agent = Agent(
|
|
130
125
|
task=task,
|
|
131
126
|
llm=llm,
|
|
@@ -133,9 +128,7 @@ class BrowserUseTool(Tool):
|
|
|
133
128
|
controller=BrowserUseTool._get_controller(llm_config),
|
|
134
129
|
**agent_kwargs,
|
|
135
130
|
)
|
|
136
|
-
|
|
137
131
|
result = await agent.run(max_steps=max_steps)
|
|
138
|
-
|
|
139
132
|
extracted_content = [
|
|
140
133
|
ExtractedContent(content=content, url=url)
|
|
141
134
|
for content, url in zip(result.extracted_content(), result.urls())
|