ag2 0.9.3__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ag2 might be problematic. Click here for more details.

@@ -1023,6 +1023,38 @@ class GroupChat:
1023
1023
  mentions[agent.name] = count
1024
1024
  return mentions
1025
1025
 
1026
+ def _run_input_guardrails(
1027
+ self,
1028
+ agent: "ConversableAgent",
1029
+ messages: Optional[list[dict[str, Any]]] = None,
1030
+ ) -> Optional[str]:
1031
+ """Run input guardrails for an agent before the reply is generated.
1032
+ Args:
1033
+ agent (ConversableAgent): The agent whose input guardrails to run.
1034
+ messages (Optional[list[dict[str, Any]]]): The messages to check against the guardrails.
1035
+ """
1036
+ for guardrail in agent.input_guardrails:
1037
+ guardrail_result = guardrail.check(context=messages)
1038
+
1039
+ if guardrail_result.activated:
1040
+ guardrail.target.activate_target(self)
1041
+ return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1042
+ return None
1043
+
1044
+ def _run_output_guardrails(self, agent: "ConversableAgent", reply: str) -> None:
1045
+ """Run output guardrails for an agent after the reply is generated.
1046
+ Args:
1047
+ agent (ConversableAgent): The agent whose output guardrails to run.
1048
+ reply (str): The reply generated by the agent.
1049
+ """
1050
+ for guardrail in agent.output_guardrails:
1051
+ guardrail_result = guardrail.check(context=reply)
1052
+
1053
+ if guardrail_result.activated:
1054
+ guardrail.target.activate_target(self)
1055
+ return f"{guardrail.activation_message}\nJustification: {guardrail_result.justification}"
1056
+ return None
1057
+
1026
1058
 
1027
1059
  @export_module("autogen")
1028
1060
  class GroupChatManager(ConversableAgent):
@@ -1194,8 +1226,17 @@ class GroupChatManager(ConversableAgent):
1194
1226
  if not silent:
1195
1227
  iostream = IOStream.get_default()
1196
1228
  iostream.send(GroupChatRunChatEvent(speaker=speaker, silent=silent))
1197
- # let the speaker speak
1198
- reply = speaker.generate_reply(sender=self)
1229
+
1230
+ guardrails_activated = False
1231
+ guardrails_reply = groupchat._run_input_guardrails(speaker, speaker._oai_messages[self])
1232
+
1233
+ if guardrails_reply is not None:
1234
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1235
+ guardrails_activated = True
1236
+ reply = guardrails_reply
1237
+ else:
1238
+ # let the speaker speak
1239
+ reply = speaker.generate_reply(sender=self)
1199
1240
  except KeyboardInterrupt:
1200
1241
  # let the admin agent speak if interrupted
1201
1242
  if groupchat.admin_name in groupchat.agent_names:
@@ -1215,6 +1256,15 @@ class GroupChatManager(ConversableAgent):
1215
1256
  termination_reason = "No reply generated"
1216
1257
  break
1217
1258
 
1259
+ if not guardrails_activated:
1260
+ # if the input guardrails were not activated, and the agent returned a reply
1261
+ guardrails_reply = groupchat._run_output_guardrails(speaker, reply)
1262
+
1263
+ if guardrails_reply is not None:
1264
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1265
+ guardrails_activated = True
1266
+ reply = guardrails_reply
1267
+
1218
1268
  # check for "clear history" phrase in reply and activate clear history function if found
1219
1269
  if (
1220
1270
  groupchat.enable_clear_history
@@ -1233,7 +1283,11 @@ class GroupChatManager(ConversableAgent):
1233
1283
  a.previous_cache = None
1234
1284
 
1235
1285
  if termination_reason:
1236
- iostream.send(TerminationEvent(termination_reason=termination_reason))
1286
+ iostream.send(
1287
+ TerminationEvent(
1288
+ termination_reason=termination_reason, sender=self, recipient=speaker if speaker else None
1289
+ )
1290
+ )
1237
1291
 
1238
1292
  return True, None
1239
1293
 
@@ -1287,8 +1341,19 @@ class GroupChatManager(ConversableAgent):
1287
1341
  try:
1288
1342
  # select the next speaker
1289
1343
  speaker = await groupchat.a_select_speaker(speaker, self)
1290
- # let the speaker speak
1291
- reply = await speaker.a_generate_reply(sender=self)
1344
+ if not silent:
1345
+ iostream.send(GroupChatRunChatEvent(speaker=speaker, silent=silent))
1346
+
1347
+ guardrails_activated = False
1348
+ guardrails_reply = groupchat._run_input_guardrails(speaker, speaker._oai_messages[self])
1349
+
1350
+ if guardrails_reply is not None:
1351
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1352
+ guardrails_activated = True
1353
+ reply = guardrails_reply
1354
+ else:
1355
+ # let the speaker speak
1356
+ reply = await speaker.a_generate_reply(sender=self)
1292
1357
  except KeyboardInterrupt:
1293
1358
  # let the admin agent speak if interrupted
1294
1359
  if groupchat.admin_name in groupchat.agent_names:
@@ -1308,6 +1373,24 @@ class GroupChatManager(ConversableAgent):
1308
1373
  termination_reason = "No reply generated"
1309
1374
  break
1310
1375
 
1376
+ if not guardrails_activated:
1377
+ # if the input guardrails were not activated, and the agent returned a reply
1378
+ guardrails_reply = groupchat._run_output_guardrails(speaker, reply)
1379
+
1380
+ if guardrails_reply is not None:
1381
+ # if a guardrail has been activated, then the next target has been set and the guardrail reply will be sent
1382
+ guardrails_activated = True
1383
+ reply = guardrails_reply
1384
+
1385
+ # check for "clear history" phrase in reply and activate clear history function if found
1386
+ if (
1387
+ groupchat.enable_clear_history
1388
+ and isinstance(reply, dict)
1389
+ and reply["content"]
1390
+ and "CLEAR HISTORY" in reply["content"].upper()
1391
+ ):
1392
+ reply["content"] = self.clear_agents_history(reply, groupchat)
1393
+
1311
1394
  # The speaker sends the message without requesting a reply
1312
1395
  await speaker.a_send(reply, self, request_reply=False, silent=silent)
1313
1396
  message = self.last_message(speaker)
@@ -1317,7 +1400,11 @@ class GroupChatManager(ConversableAgent):
1317
1400
  a.previous_cache = None
1318
1401
 
1319
1402
  if termination_reason:
1320
- iostream.send(TerminationEvent(termination_reason=termination_reason))
1403
+ iostream.send(
1404
+ TerminationEvent(
1405
+ termination_reason=termination_reason, sender=self, recipient=speaker if speaker else None
1406
+ )
1407
+ )
1321
1408
 
1322
1409
  return True, None
1323
1410
 
@@ -12,7 +12,9 @@ from ....tools.experimental import (
12
12
  BrowserUseTool,
13
13
  Crawl4AITool,
14
14
  DuckDuckGoSearchTool,
15
+ FirecrawlTool,
15
16
  PerplexitySearchTool,
17
+ SearxngSearchTool,
16
18
  TavilySearchTool,
17
19
  )
18
20
 
@@ -28,7 +30,9 @@ class WebSurferAgent(ConversableAgent):
28
30
  *,
29
31
  llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
30
32
  web_tool_llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
31
- web_tool: Literal["browser_use", "crawl4ai", "duckduckgo", "perplexity", "tavily"] = "browser_use",
33
+ web_tool: Literal[
34
+ "browser_use", "crawl4ai", "duckduckgo", "firecrawl", "perplexity", "tavily", "searxng"
35
+ ] = "browser_use",
32
36
  web_tool_kwargs: Optional[dict[str, Any]] = None,
33
37
  **kwargs: Any,
34
38
  ) -> None:
@@ -48,12 +52,16 @@ class WebSurferAgent(ConversableAgent):
48
52
  self.tool: Tool = BrowserUseTool(llm_config=web_tool_llm_config, **web_tool_kwargs) # type: ignore[arg-type]
49
53
  elif web_tool == "crawl4ai":
50
54
  self.tool = Crawl4AITool(llm_config=web_tool_llm_config, **web_tool_kwargs)
55
+ elif web_tool == "firecrawl":
56
+ self.tool = FirecrawlTool(llm_config=web_tool_llm_config, **web_tool_kwargs)
51
57
  elif web_tool == "perplexity":
52
58
  self.tool = PerplexitySearchTool(**web_tool_kwargs)
53
59
  elif web_tool == "tavily":
54
60
  self.tool = TavilySearchTool(llm_config=web_tool_llm_config, **web_tool_kwargs)
55
61
  elif web_tool == "duckduckgo":
56
62
  self.tool = DuckDuckGoSearchTool(**web_tool_kwargs)
63
+ elif web_tool == "searxng":
64
+ self.tool = SearxngSearchTool(**web_tool_kwargs)
57
65
  else:
58
66
  raise ValueError(f"Unsupported {web_tool=}.")
59
67
 
@@ -669,16 +669,22 @@ class TerminationEvent(BaseEvent):
669
669
  """When a workflow termination condition is met"""
670
670
 
671
671
  termination_reason: str
672
+ sender: str
673
+ recipient: Optional[str] = None
672
674
 
673
675
  def __init__(
674
676
  self,
675
677
  *,
676
678
  uuid: Optional[UUID] = None,
679
+ sender: Union["Agent", str],
680
+ recipient: Optional[Union["Agent", str]] = None,
677
681
  termination_reason: str,
678
682
  ):
679
683
  super().__init__(
680
684
  uuid=uuid,
681
685
  termination_reason=termination_reason,
686
+ sender=sender.name if hasattr(sender, "name") else sender,
687
+ recipient=recipient.name if hasattr(recipient, "name") else recipient if recipient else None,
682
688
  )
683
689
 
684
690
  def print(self, f: Optional[Callable[..., Any]] = None) -> None:
autogen/events/helpers.py CHANGED
@@ -13,12 +13,15 @@ logger = logging.getLogger(__name__)
13
13
  def deprecated_by(
14
14
  new_class: type[BaseModel],
15
15
  param_mapping: dict[str, str] = None,
16
+ default_params: dict[str, any] = None,
16
17
  ) -> Callable[[type[BaseModel]], Callable[..., BaseModel]]:
17
18
  param_mapping = param_mapping or {}
19
+ default_params = default_params or {}
18
20
 
19
21
  def decorator(
20
22
  old_class: type[BaseModel],
21
23
  param_mapping: dict[str, str] = param_mapping,
24
+ default_params: dict[str, any] = default_params,
22
25
  ) -> Callable[..., BaseModel]:
23
26
  @wraps(old_class)
24
27
  def wrapper(*args, **kwargs) -> BaseModel:
@@ -28,6 +31,11 @@ def deprecated_by(
28
31
  # Translate old parameters to new parameters
29
32
  new_kwargs = {param_mapping.get(k, k): v for k, v in kwargs.items()}
30
33
 
34
+ # Add default parameters if not already present
35
+ for key, value in default_params.items():
36
+ if key not in new_kwargs:
37
+ new_kwargs[key] = value
38
+
31
39
  # Pass the translated parameters to the new class
32
40
  return new_class(*args, **new_kwargs)
33
41
 
autogen/mcp/helpers.py ADDED
@@ -0,0 +1,45 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ import asyncio
5
+ import os
6
+ import signal
7
+ from asyncio.subprocess import PIPE, Process, create_subprocess_exec
8
+ from contextlib import asynccontextmanager
9
+ from typing import AsyncGenerator, Dict, Optional
10
+
11
+
12
+ @asynccontextmanager
13
+ async def run_streamable_http_client(
14
+ *, mcp_server_path: str, env_vars: Optional[Dict[str, str]] = None, startup_wait_secs: float = 5.0
15
+ ) -> AsyncGenerator[Process, None]:
16
+ """
17
+ Async context manager to run a Python subprocess for streamable-http with custom env vars.
18
+
19
+ Args:
20
+ mcp_server_path: Path to the Python script to run.
21
+ env_vars: Environment variables to export to the subprocess.
22
+ startup_wait_secs: Time to wait for the server to start (in seconds).
23
+ Yields:
24
+ An asyncio.subprocess.Process object.
25
+ """
26
+ env = os.environ.copy()
27
+ if env_vars:
28
+ env.update(env_vars)
29
+
30
+ process = await create_subprocess_exec(
31
+ "python", mcp_server_path, "streamable-http", env=env, stdout=PIPE, stderr=PIPE
32
+ )
33
+
34
+ # Optional startup delay to let the server initialize
35
+ await asyncio.sleep(startup_wait_secs)
36
+
37
+ try:
38
+ yield process
39
+ finally:
40
+ if process.returncode is None:
41
+ process.send_signal(signal.SIGINT)
42
+ try:
43
+ await asyncio.wait_for(process.wait(), timeout=5.0)
44
+ except asyncio.TimeoutError:
45
+ process.kill()
@@ -117,9 +117,8 @@ class MCPProxy:
117
117
 
118
118
  return q_params, path_params, body, security
119
119
 
120
- @property
121
- def mcp(self) -> "FastMCP":
122
- mcp = FastMCP(title=self._title)
120
+ def get_mcp(self, **settings: Any) -> "FastMCP":
121
+ mcp = FastMCP(title=self._title, **settings)
123
122
 
124
123
  for func in self._registered_funcs:
125
124
  try:
@@ -647,7 +647,7 @@ class UsingAutoReplyMessage(BaseMessage):
647
647
  f(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True)
648
648
 
649
649
 
650
- @deprecated_by(TerminationEvent)
650
+ @deprecated_by(TerminationEvent, default_params={"sender": "system"})
651
651
  @wrap_message
652
652
  class TerminationMessage(BaseMessage):
653
653
  """When a workflow termination condition is met"""
autogen/oai/gemini.py CHANGED
@@ -54,7 +54,6 @@ from io import BytesIO
54
54
  from typing import Any, Literal, Optional, Type, Union
55
55
 
56
56
  import requests
57
- from packaging import version
58
57
  from pydantic import BaseModel, Field
59
58
 
60
59
  from ..import_utils import optional_import_block, require_optional_import
@@ -332,6 +331,12 @@ class GeminiClient:
332
331
  recitation_part = Part(text="Unsuccessful Finish Reason: RECITATION")
333
332
  parts = [recitation_part]
334
333
  error_finish_reason = "content_filter" # As per available finish_reason in Choice
334
+ elif not response.candidates[0].content or not response.candidates[0].content.parts:
335
+ error_part = Part(
336
+ text=f"Unsuccessful Finish Reason: ({str(response.candidates[0].finish_reason)}) NO CONTENT RETURNED"
337
+ )
338
+ parts = [error_part]
339
+ error_finish_reason = "content_filter" # No other option in Choice in chat_completion.py
335
340
  else:
336
341
  parts = response.candidates[0].content.parts
337
342
  elif isinstance(response, VertexAIGenerationResponse): # or hasattr(response, "candidates"):
@@ -570,25 +575,19 @@ class GeminiClient:
570
575
 
571
576
  if part_type == "text":
572
577
  rst.append(
573
- VertexAIContent(parts=parts, role=role)
574
- if self.use_vertexai
575
- else rst.append(Content(parts=parts, role=role))
578
+ VertexAIContent(parts=parts, role=role) if self.use_vertexai else Content(parts=parts, role=role)
576
579
  )
577
- elif part_type == "tool":
578
- # Function responses should be assigned "model" role to keep them separate from function calls
579
- role = "function" if version.parse(genai.__version__) < version.parse("1.4.0") else "model"
580
+ elif part_type == "tool_call":
581
+ # Function calls should be from the model/assistant
582
+ role = "model"
580
583
  rst.append(
581
- VertexAIContent(parts=parts, role=role)
582
- if self.use_vertexai
583
- else rst.append(Content(parts=parts, role=role))
584
+ VertexAIContent(parts=parts, role=role) if self.use_vertexai else Content(parts=parts, role=role)
584
585
  )
585
- elif part_type == "tool_call":
586
- # Function calls should be assigned "user" role
587
- role = "function" if version.parse(genai.__version__) < version.parse("1.4.0") else "user"
586
+ elif part_type == "tool":
587
+ # Function responses should be from the user
588
+ role = "user"
588
589
  rst.append(
589
- VertexAIContent(parts=parts, role=role)
590
- if self.use_vertexai
591
- else rst.append(Content(parts=parts, role=role))
590
+ VertexAIContent(parts=parts, role=role) if self.use_vertexai else Content(parts=parts, role=role)
592
591
  )
593
592
  elif part_type == "image":
594
593
  # Image has multiple parts, some can be text and some can be image based
@@ -608,14 +607,14 @@ class GeminiClient:
608
607
  rst.append(
609
608
  VertexAIContent(parts=text_parts, role=role)
610
609
  if self.use_vertexai
611
- else rst.append(Content(parts=text_parts, role=role))
610
+ else Content(parts=text_parts, role=role)
612
611
  )
613
612
 
614
613
  if len(image_parts) > 0:
615
614
  rst.append(
616
615
  VertexAIContent(parts=image_parts, role=role)
617
616
  if self.use_vertexai
618
- else rst.append(Content(parts=image_parts, role=role))
617
+ else Content(parts=image_parts, role=role)
619
618
  )
620
619
 
621
620
  if len(rst) != 0 and rst[-1] is None:
@@ -908,18 +907,25 @@ def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens:
908
907
  # https://cloud.google.com/vertex-ai/generative-ai/pricing#vertex-ai-pricing
909
908
 
910
909
  if (
911
- "gemini-2.5-pro-preview-03-25" in model_name
912
- or "gemini-2.5-pro-exp-03-25" in model_name
910
+ model_name == "gemini-2.5-pro"
911
+ or "gemini-2.5-pro-preview-06-05" in model_name
913
912
  or "gemini-2.5-pro-preview-05-06" in model_name
913
+ or "gemini-2.5-pro-preview-03-25" in model_name
914
914
  ):
915
915
  if up_to_200k:
916
916
  return total_cost_mil(1.25, 10)
917
917
  else:
918
918
  return total_cost_mil(2.5, 15)
919
919
 
920
- elif "gemini-2.5-flash-preview-04-17" in model_name:
920
+ elif "gemini-2.5-flash" in model_name:
921
+ return total_cost_mil(0.3, 2.5)
922
+
923
+ elif "gemini-2.5-flash-preview-04-17" in model_name or "gemini-2.5-flash-preview-05-20" in model_name:
921
924
  return total_cost_mil(0.15, 0.6) # NON-THINKING OUTPUT PRICE, $3 FOR THINKING!
922
925
 
926
+ elif "gemini-2.5-flash-lite-preview-06-17" in model_name:
927
+ return total_cost_mil(0.1, 0.4)
928
+
923
929
  elif "gemini-2.0-flash-lite" in model_name:
924
930
  return total_cost_mil(0.075, 0.3)
925
931
 
@@ -952,9 +958,10 @@ def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens:
952
958
  # Non-Vertex AI pricing
953
959
 
954
960
  if (
955
- "gemini-2.5-pro-preview-03-25" in model_name
956
- or "gemini-2.5-pro-exp-03-25" in model_name
961
+ model_name == "gemini-2.5-pro"
962
+ or "gemini-2.5-pro-preview-06-05" in model_name
957
963
  or "gemini-2.5-pro-preview-05-06" in model_name
964
+ or "gemini-2.5-pro-preview-03-25" in model_name
958
965
  ):
959
966
  # https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview
960
967
  if up_to_200k:
@@ -962,10 +969,18 @@ def calculate_gemini_cost(use_vertexai: bool, input_tokens: int, output_tokens:
962
969
  else:
963
970
  return total_cost_mil(2.5, 15)
964
971
 
965
- elif "gemini-2.5-flash-preview-04-17" in model_name:
972
+ elif "gemini-2.5-flash" in model_name:
973
+ # https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash
974
+ return total_cost_mil(0.3, 2.5)
975
+
976
+ elif "gemini-2.5-flash-preview-04-17" in model_name or "gemini-2.5-flash-preview-05-20" in model_name:
966
977
  # https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash
967
978
  return total_cost_mil(0.15, 0.6)
968
979
 
980
+ elif "gemini-2.5-flash-lite-preview-06-17" in model_name:
981
+ # https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash-lite
982
+ return total_cost_mil(0.1, 0.4)
983
+
969
984
  elif "gemini-2.0-flash-lite" in model_name:
970
985
  # https://ai.google.dev/gemini-api/docs/pricing#gemini-2.0-flash-lite
971
986
  return total_cost_mil(0.075, 0.3)
@@ -91,7 +91,7 @@ class CaseInSensitiveEnum(str, enum.Enum):
91
91
  try:
92
92
  # Creating a enum instance based on the value
93
93
  # We need to use super() to avoid infinite recursion.
94
- unknown_enum_val = super().__new__(cls, value)
94
+ unknown_enum_val = super(CaseInSensitiveEnum, cls).__new__(cls, value)
95
95
  unknown_enum_val._name_ = str(value) # pylint: disable=protected-access
96
96
  unknown_enum_val._value_ = value # pylint: disable=protected-access
97
97
  return unknown_enum_val
@@ -6,6 +6,7 @@ from .browser_use import BrowserUseTool
6
6
  from .crawl4ai import Crawl4AITool
7
7
  from .deep_research import DeepResearchTool
8
8
  from .duckduckgo import DuckDuckGoSearchTool
9
+ from .firecrawl import FirecrawlTool
9
10
  from .google_search import GoogleSearchTool, YoutubeSearchTool
10
11
  from .messageplatform import (
11
12
  DiscordRetrieveTool,
@@ -18,6 +19,7 @@ from .messageplatform import (
18
19
  )
19
20
  from .perplexity import PerplexitySearchTool
20
21
  from .reliable import ReliableTool, ReliableToolError, SuccessfulExecutionParameters, ToolExecutionDetails
22
+ from .searxng import SearxngSearchTool
21
23
  from .tavily import TavilySearchTool
22
24
  from .web_search_preview import WebSearchPreviewTool
23
25
  from .wikipedia import WikipediaPageLoadTool, WikipediaQueryRunTool
@@ -29,10 +31,12 @@ __all__ = [
29
31
  "DiscordRetrieveTool",
30
32
  "DiscordSendTool",
31
33
  "DuckDuckGoSearchTool",
34
+ "FirecrawlTool",
32
35
  "GoogleSearchTool",
33
36
  "PerplexitySearchTool",
34
37
  "ReliableTool",
35
38
  "ReliableToolError",
39
+ "SearxngSearchTool",
36
40
  "SlackRetrieveRepliesTool",
37
41
  "SlackRetrieveTool",
38
42
  "SlackSendTool",
@@ -78,7 +78,7 @@ class BrowserUseTool(Tool):
78
78
  def __init__( # type: ignore[no-any-unimported]
79
79
  self,
80
80
  *,
81
- llm_config: Union[LLMConfig, dict[str, Any]],
81
+ llm_config: Optional[Union[LLMConfig, dict[str, Any]]] = None,
82
82
  browser: Optional["Browser"] = None,
83
83
  agent_kwargs: Optional[dict[str, Any]] = None,
84
84
  browser_config: Optional[dict[str, Any]] = None,
@@ -86,17 +86,17 @@ class BrowserUseTool(Tool):
86
86
  """Use the browser to perform a task.
87
87
 
88
88
  Args:
89
- llm_config: The LLM configuration.
89
+ llm_config: The LLM configuration. If None, the current LLMConfig from context is used.
90
90
  browser: The browser to use. If defined, browser_config must be None
91
91
  agent_kwargs: Additional keyword arguments to pass to the Agent
92
92
  browser_config: The browser configuration to use. If defined, browser must be None
93
93
  """
94
+ if llm_config is None:
95
+ llm_config = LLMConfig.current
94
96
  if agent_kwargs is None:
95
97
  agent_kwargs = {}
96
-
97
98
  if browser_config is None:
98
99
  browser_config = {}
99
-
100
100
  if browser is not None and browser_config:
101
101
  raise ValueError(
102
102
  f"Cannot provide both browser and additional keyword parameters: {browser=}, {browser_config=}"
@@ -114,18 +114,13 @@ class BrowserUseTool(Tool):
114
114
  if browser is None:
115
115
  # set default value for headless
116
116
  headless = browser_config.pop("headless", True)
117
-
118
117
  browser_config = BrowserConfig(headless=headless, **browser_config)
119
118
  browser = Browser(config=browser_config)
120
-
121
119
  # set default value for generate_gif
122
120
  if "generate_gif" not in agent_kwargs:
123
121
  agent_kwargs["generate_gif"] = False
124
-
125
122
  llm = LangChainChatModelFactory.create_base_chat_model(llm_config)
126
-
127
123
  max_steps = agent_kwargs.pop("max_steps", 100)
128
-
129
124
  agent = Agent(
130
125
  task=task,
131
126
  llm=llm,
@@ -133,9 +128,7 @@ class BrowserUseTool(Tool):
133
128
  controller=BrowserUseTool._get_controller(llm_config),
134
129
  **agent_kwargs,
135
130
  )
136
-
137
131
  result = await agent.run(max_steps=max_steps)
138
-
139
132
  extracted_content = [
140
133
  ExtractedContent(content=content, url=url)
141
134
  for content, url in zip(result.extracted_content(), result.urls())
@@ -0,0 +1,7 @@
1
+ # Copyright (c) 2023 - 2025, AG2ai, Inc., AG2ai open-source projects maintainers and core contributors
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+
5
+ from .firecrawl_tool import FirecrawlTool
6
+
7
+ __all__ = ["FirecrawlTool"]