agno 2.0.8__py3-none-any.whl → 2.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. agno/agent/agent.py +2 -2
  2. agno/db/base.py +14 -0
  3. agno/db/dynamo/dynamo.py +107 -27
  4. agno/db/firestore/firestore.py +109 -33
  5. agno/db/gcs_json/gcs_json_db.py +100 -20
  6. agno/db/in_memory/in_memory_db.py +95 -20
  7. agno/db/json/json_db.py +101 -21
  8. agno/db/migrations/v1_to_v2.py +181 -35
  9. agno/db/mongo/mongo.py +251 -26
  10. agno/db/mysql/mysql.py +307 -6
  11. agno/db/postgres/postgres.py +279 -33
  12. agno/db/redis/redis.py +99 -22
  13. agno/db/singlestore/singlestore.py +319 -38
  14. agno/db/sqlite/sqlite.py +339 -23
  15. agno/models/anthropic/claude.py +0 -20
  16. agno/models/huggingface/huggingface.py +2 -1
  17. agno/models/ollama/chat.py +28 -2
  18. agno/models/openai/chat.py +7 -0
  19. agno/models/openai/responses.py +8 -8
  20. agno/os/interfaces/base.py +2 -0
  21. agno/os/interfaces/slack/router.py +50 -10
  22. agno/os/interfaces/slack/slack.py +6 -4
  23. agno/os/interfaces/whatsapp/router.py +7 -4
  24. agno/os/router.py +18 -0
  25. agno/os/utils.py +2 -2
  26. agno/reasoning/azure_ai_foundry.py +2 -2
  27. agno/reasoning/deepseek.py +2 -2
  28. agno/reasoning/groq.py +2 -2
  29. agno/reasoning/ollama.py +2 -2
  30. agno/reasoning/openai.py +2 -2
  31. agno/run/base.py +15 -2
  32. agno/team/team.py +0 -7
  33. agno/tools/mcp_toolbox.py +284 -0
  34. agno/tools/scrapegraph.py +58 -31
  35. agno/tools/whatsapp.py +1 -1
  36. agno/utils/print_response/agent.py +2 -2
  37. agno/utils/print_response/team.py +6 -6
  38. agno/utils/reasoning.py +22 -1
  39. agno/utils/string.py +9 -0
  40. agno/workflow/workflow.py +0 -1
  41. {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/METADATA +4 -1
  42. {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/RECORD +45 -44
  43. {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/WHEEL +0 -0
  44. {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/licenses/LICENSE +0 -0
  45. {agno-2.0.8.dist-info → agno-2.0.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,284 @@
1
+ from typing import Any, Callable, Dict, List, Literal, Optional, Union
2
+ from warnings import warn
3
+
4
+ from agno.tools.function import Function
5
+ from agno.tools.mcp import MCPTools
6
+ from agno.utils.log import logger
7
+
8
+ try:
9
+ from toolbox_core import ToolboxClient # type: ignore
10
+ except ImportError:
11
+ raise ImportError("`toolbox_core` not installed. Please install using `pip install -U toolbox-core`.")
12
+
13
+
14
+ class MCPToolsMeta(type):
15
+ """Metaclass for MCPTools to ensure proper initialization with AgentOS"""
16
+
17
+ @property
18
+ def __name__(cls):
19
+ return "MCPTools"
20
+
21
+
22
+ class MCPToolbox(MCPTools, metaclass=MCPToolsMeta):
23
+ """
24
+ A toolkit that combines MCPTools server connectivity with MCP Toolbox for Databases client (toolbox-core).
25
+
26
+ MCPToolbox connects to an MCP Toolbox server and registers all available tools, then uses
27
+ toolbox-core to filter those tools by toolset or tool name. This enables agents to
28
+ receive only the specific tools they need while maintaining full MCP execution capabilities.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ url: str,
34
+ toolsets: Optional[List[str]] = None,
35
+ tool_name: Optional[str] = None,
36
+ headers: Optional[Dict[str, Any]] = None,
37
+ transport: Literal["stdio", "sse", "streamable-http"] = "streamable-http",
38
+ **kwargs,
39
+ ):
40
+ """Initialize MCPToolbox with filtering capabilities.
41
+
42
+ Args:
43
+ url (str): Base URL for the toolbox service.
44
+ toolsets (Optional[List[str]], optional): List of toolset names to filter tools by. Defaults to None.
45
+ tool_name (Optional[str], optional): Single tool name to load. Defaults to None.
46
+ headers (Optional[Dict[str, Any]], optional): Headers for toolbox-core client requests. Defaults to None.
47
+ transport (Literal["stdio", "sse", "streamable-http"], optional): MCP transport protocol. Defaults to "streamable-http".
48
+
49
+ """
50
+
51
+ # Ensure the URL ends in "/mcp" as expected
52
+ if not url.endswith("/mcp"):
53
+ url = url + "/mcp"
54
+
55
+ super().__init__(url=url, transport=transport, **kwargs)
56
+
57
+ self.name = "toolbox_client"
58
+ self.toolbox_url = url
59
+ self.toolsets = toolsets
60
+ self.tool_name = tool_name
61
+ self.headers = headers
62
+ self._core_client_initialized = False
63
+
64
+ # Validate that only one of toolsets or tool_name is provided
65
+ filter_params = [toolsets, tool_name]
66
+ non_none_params = [p for p in filter_params if p is not None]
67
+ if len(non_none_params) > 1:
68
+ raise ValueError("Only one of toolsets or tool_name can be specified")
69
+
70
+ async def connect(self):
71
+ """Initialize MCPToolbox instance and connect to the MCP server."""
72
+ # First, connect to MCP server and load all available tools
73
+ await super().connect()
74
+
75
+ if self._core_client_initialized:
76
+ return
77
+
78
+ # Then, connect to the ToolboxClient and filter tools based on toolsets or tool_name
79
+ await self._connect_toolbox_client()
80
+
81
+ async def _connect_toolbox_client(self):
82
+ try:
83
+ if self.toolsets is not None or self.tool_name is not None:
84
+ self.__core_client = ToolboxClient(
85
+ url=self.toolbox_url,
86
+ client_headers=self.headers,
87
+ )
88
+ self._core_client_initialized = True
89
+
90
+ if self.toolsets is not None:
91
+ # Load multiple toolsets
92
+ all_functions = await self.load_multiple_toolsets(toolset_names=self.toolsets)
93
+ # Replace functions dict with filtered subset
94
+ filtered_functions = {func.name: func for func in all_functions}
95
+ self.functions = filtered_functions
96
+ elif self.tool_name is not None:
97
+ tool = await self.load_tool(tool_name=self.tool_name)
98
+ # Replace functions dict with just this single tool
99
+ self.functions = {tool.name: tool}
100
+ except Exception as e:
101
+ raise RuntimeError(f"Failed to connect to ToolboxClient: {e}") from e
102
+
103
+ def _handle_auth_params(
104
+ self,
105
+ auth_token_getters: dict[str, Callable[[], str]] = {},
106
+ auth_tokens: Optional[dict[str, Callable[[], str]]] = None,
107
+ auth_headers: Optional[dict[str, Callable[[], str]]] = None,
108
+ ):
109
+ """handle authentication parameters for toolbox-core client"""
110
+ if auth_tokens:
111
+ if auth_token_getters:
112
+ warn(
113
+ "Both `auth_token_getters` and `auth_tokens` are provided. `auth_tokens` is deprecated, and `auth_token_getters` will be used.",
114
+ DeprecationWarning,
115
+ )
116
+ else:
117
+ warn(
118
+ "Argument `auth_tokens` is deprecated. Use `auth_token_getters` instead.",
119
+ DeprecationWarning,
120
+ )
121
+ auth_token_getters = auth_tokens
122
+
123
+ if auth_headers:
124
+ if auth_token_getters:
125
+ warn(
126
+ "Both `auth_token_getters` and `auth_headers` are provided. `auth_headers` is deprecated, and `auth_token_getters` will be used.",
127
+ DeprecationWarning,
128
+ )
129
+ else:
130
+ warn(
131
+ "Argument `auth_headers` is deprecated. Use `auth_token_getters` instead.",
132
+ DeprecationWarning,
133
+ )
134
+ auth_token_getters = auth_headers
135
+ return auth_token_getters
136
+
137
+ async def load_tool(
138
+ self,
139
+ tool_name: str,
140
+ auth_token_getters: dict[str, Callable[[], str]] = {},
141
+ auth_tokens: Optional[dict[str, Callable[[], str]]] = None,
142
+ auth_headers: Optional[dict[str, Callable[[], str]]] = None,
143
+ bound_params: dict[str, Union[Any, Callable[[], Any]]] = {},
144
+ ) -> Function:
145
+ """Loads the tool with the given tool name from the Toolbox service.
146
+
147
+ Args:
148
+ tool_name (str): The name of the tool to load.
149
+ auth_token_getters (dict[str, Callable[[], str]], optional): A mapping of authentication source names to functions that retrieve ID tokens. Defaults to {}.
150
+ auth_tokens (Optional[dict[str, Callable[[], str]]], optional): Deprecated. Use `auth_token_getters` instead.
151
+ auth_headers (Optional[dict[str, Callable[[], str]]], optional): Deprecated. Use `auth_token_getters` instead.
152
+ bound_params (dict[str, Union[Any, Callable[[], Any]]], optional): A mapping of parameter names to their bound values. Defaults to {}.
153
+
154
+ Raises:
155
+ RuntimeError: If the tool is not found in the MCP functions registry.
156
+
157
+ Returns:
158
+ Function: The loaded tool function.
159
+ """
160
+ auth_token_getters = self._handle_auth_params(
161
+ auth_token_getters=auth_token_getters,
162
+ auth_tokens=auth_tokens,
163
+ auth_headers=auth_headers,
164
+ )
165
+
166
+ core_sync_tool = await self.__core_client.load_tool(
167
+ name=tool_name,
168
+ auth_token_getters=auth_token_getters,
169
+ bound_params=bound_params,
170
+ )
171
+ # Return the Function object from our MCP functions registry
172
+ if core_sync_tool._name in self.functions:
173
+ return self.functions[core_sync_tool._name]
174
+ else:
175
+ raise RuntimeError(f"Tool '{tool_name}' was not found in MCP functions registry")
176
+
177
+ async def load_toolset(
178
+ self,
179
+ toolset_name: Optional[str] = None,
180
+ auth_token_getters: dict[str, Callable[[], str]] = {},
181
+ auth_tokens: Optional[dict[str, Callable[[], str]]] = None,
182
+ auth_headers: Optional[dict[str, Callable[[], str]]] = None,
183
+ bound_params: dict[str, Union[Any, Callable[[], Any]]] = {},
184
+ strict: bool = False,
185
+ ) -> List[Function]:
186
+ """Loads tools from the configured toolset.
187
+
188
+ Args:
189
+ toolset_name (Optional[str], optional): The name of the toolset to load. Defaults to None.
190
+ auth_token_getters (dict[str, Callable[[], str]], optional): A mapping of authentication source names to functions that retrieve ID tokens. Defaults to {}.
191
+ auth_tokens (Optional[dict[str, Callable[[], str]]], optional): Deprecated. Use `auth_token_getters` instead.
192
+ auth_headers (Optional[dict[str, Callable[[], str]]], optional): Deprecated. Use `auth_token_getters` instead.
193
+ bound_params (dict[str, Union[Any, Callable[[], Any]]], optional): A mapping of parameter names to their bound values. Defaults to {}.
194
+ strict (bool, optional): If True, raises an error if *any* loaded tool instance fails
195
+ to utilize all of the given parameters or auth tokens. (if any
196
+ provided). If False (default), raises an error only if a
197
+ user-provided parameter or auth token cannot be applied to *any*
198
+ loaded tool across the set.
199
+
200
+ Returns:
201
+ List[Function]: A list of all tools loaded from the Toolbox.
202
+ """
203
+ auth_token_getters = self._handle_auth_params(
204
+ auth_token_getters=auth_token_getters,
205
+ auth_tokens=auth_tokens,
206
+ auth_headers=auth_headers,
207
+ )
208
+
209
+ core_sync_tools = await self.__core_client.load_toolset(
210
+ name=toolset_name,
211
+ auth_token_getters=auth_token_getters,
212
+ bound_params=bound_params,
213
+ strict=strict,
214
+ )
215
+
216
+ tools = []
217
+ for core_sync_tool in core_sync_tools:
218
+ if core_sync_tool._name in self.functions:
219
+ tools.append(self.functions[core_sync_tool._name])
220
+ else:
221
+ logger.debug(f"Tool '{core_sync_tool._name}' from toolset '{toolset_name}' not available in MCP server")
222
+ return tools
223
+
224
+ async def load_multiple_toolsets(
225
+ self,
226
+ toolset_names: List[str],
227
+ auth_token_getters: dict[str, Callable[[], str]] = {},
228
+ bound_params: dict[str, Union[Any, Callable[[], Any]]] = {},
229
+ strict: bool = False,
230
+ ) -> List[Function]:
231
+ """Load tools from multiple toolsets.
232
+
233
+ Args:
234
+ toolset_names (List[str]): A list of toolset names to load.
235
+ auth_token_getters (dict[str, Callable[[], str]], optional): A mapping of authentication source names to functions that retrieve ID tokens. Defaults to {}.
236
+ bound_params (dict[str, Union[Any, Callable[[], Any]]], optional): A mapping of parameter names to their bound values. Defaults to {}.
237
+ strict (bool, optional): If True, raises an error if *any* loaded tool instance fails to utilize all of the given parameters or auth tokens. Defaults to False.
238
+
239
+ Returns:
240
+ List[Function]: A list of all tools loaded from the specified toolsets.
241
+ """
242
+ all_tools = []
243
+ for toolset_name in toolset_names:
244
+ tools = await self.load_toolset(
245
+ toolset_name=toolset_name,
246
+ auth_token_getters=auth_token_getters,
247
+ bound_params=bound_params,
248
+ strict=strict,
249
+ )
250
+ all_tools.extend(tools)
251
+ return all_tools
252
+
253
+ async def close(self):
254
+ """Close the underlying asynchronous client."""
255
+ if self._core_client_initialized and hasattr(self, "_MCPToolbox__core_client"):
256
+ await self.__core_client.close()
257
+ await super().close()
258
+
259
+ async def load_toolset_safe(self, toolset_name: str) -> List[str]:
260
+ """Safely load a toolset and return tool names."""
261
+ try:
262
+ tools = await self.load_toolset(toolset_name)
263
+ return [tool.name for tool in tools]
264
+ except Exception as e:
265
+ raise RuntimeError(f"Failed to load toolset '{toolset_name}': {e}") from e
266
+
267
+ def get_client(self) -> ToolboxClient:
268
+ """Get the underlying ToolboxClient."""
269
+ if not self._core_client_initialized:
270
+ raise RuntimeError("ToolboxClient not initialized. Call connect() first.")
271
+ return self.__core_client
272
+
273
+ async def __aenter__(self):
274
+ """Initialize the direct toolbox client."""
275
+ await super().__aenter__()
276
+ await self.connect()
277
+ return self
278
+
279
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
280
+ """Clean up the toolbox client."""
281
+ # Close ToolboxClient first, then MCP client
282
+ if self._core_client_initialized and hasattr(self, "_MCPToolbox__core_client"):
283
+ await self.__core_client.close()
284
+ await super().__aexit__(exc_type, exc_val, exc_tb)
agno/tools/scrapegraph.py CHANGED
@@ -3,6 +3,7 @@ from os import getenv
3
3
  from typing import Any, List, Optional
4
4
 
5
5
  from agno.tools import Toolkit
6
+ from agno.utils.log import log_debug, log_error
6
7
 
7
8
  try:
8
9
  from scrapegraph_py import Client
@@ -23,11 +24,14 @@ class ScrapeGraphTools(Toolkit):
23
24
  enable_crawl: bool = False,
24
25
  enable_searchscraper: bool = False,
25
26
  enable_agentic_crawler: bool = False,
27
+ enable_scrape: bool = False,
28
+ render_heavy_js: bool = False,
26
29
  all: bool = False,
27
30
  **kwargs,
28
31
  ):
29
32
  self.api_key: Optional[str] = api_key or getenv("SGAI_API_KEY")
30
33
  self.client = Client(api_key=self.api_key)
34
+ self.render_heavy_js = render_heavy_js
31
35
 
32
36
  # Start with smartscraper by default
33
37
  # Only enable markdownify if smartscraper is False
@@ -45,6 +49,8 @@ class ScrapeGraphTools(Toolkit):
45
49
  tools.append(self.searchscraper)
46
50
  if enable_agentic_crawler or all:
47
51
  tools.append(self.agentic_crawler)
52
+ if enable_scrape or all:
53
+ tools.append(self.scrape)
48
54
 
49
55
  super().__init__(name="scrapegraph_tools", tools=tools, **kwargs)
50
56
 
@@ -57,10 +63,13 @@ class ScrapeGraphTools(Toolkit):
57
63
  The structured data extracted from the webpage
58
64
  """
59
65
  try:
66
+ log_debug(f"ScrapeGraph smartscraper request for URL: {url}")
60
67
  response = self.client.smartscraper(website_url=url, user_prompt=prompt)
61
68
  return json.dumps(response["result"])
62
69
  except Exception as e:
63
- return json.dumps({"error": str(e)})
70
+ error_msg = f"Smartscraper failed: {str(e)}"
71
+ log_error(error_msg)
72
+ return f"Error: {error_msg}"
64
73
 
65
74
  def markdownify(self, url: str) -> str:
66
75
  """Convert a webpage to markdown format.
@@ -70,10 +79,13 @@ class ScrapeGraphTools(Toolkit):
70
79
  The markdown version of the webpage
71
80
  """
72
81
  try:
82
+ log_debug(f"ScrapeGraph markdownify request for URL: {url}")
73
83
  response = self.client.markdownify(website_url=url)
74
84
  return response["result"]
75
85
  except Exception as e:
76
- return f"Error converting to markdown: {str(e)}"
86
+ error_msg = f"Markdownify failed: {str(e)}"
87
+ log_error(error_msg)
88
+ return f"Error: {error_msg}"
77
89
 
78
90
  def crawl(
79
91
  self,
@@ -100,10 +112,11 @@ class ScrapeGraphTools(Toolkit):
100
112
  The structured data extracted from the website
101
113
  """
102
114
  try:
115
+ log_debug(f"ScrapeGraph crawl request for URL: {url}")
103
116
  response = self.client.crawl(
104
117
  url=url,
105
118
  prompt=prompt,
106
- schema=schema,
119
+ data_schema=schema,
107
120
  cache_website=cache_website,
108
121
  depth=depth,
109
122
  max_pages=max_pages,
@@ -112,7 +125,9 @@ class ScrapeGraphTools(Toolkit):
112
125
  )
113
126
  return json.dumps(response, indent=2)
114
127
  except Exception as e:
115
- return json.dumps({"error": str(e)})
128
+ error_msg = f"Crawl failed: {str(e)}"
129
+ log_error(error_msg)
130
+ return f"Error: {error_msg}"
116
131
 
117
132
  def agentic_crawler(
118
133
  self,
@@ -143,21 +158,7 @@ class ScrapeGraphTools(Toolkit):
143
158
  JSON string containing the scraping results, including request_id, status, and extracted data
144
159
  """
145
160
  try:
146
- # Validate required parameters for AI extraction
147
- if ai_extraction and not user_prompt:
148
- return json.dumps({"error": "user_prompt is required when ai_extraction=True"})
149
-
150
- # Validate URL format
151
- if not url.strip():
152
- return json.dumps({"error": "URL cannot be empty"})
153
- if not (url.startswith("http://") or url.startswith("https://")):
154
- return json.dumps({"error": "Invalid URL - must start with http:// or https://"})
155
-
156
- # Validate steps
157
- if not steps:
158
- return json.dumps({"error": "Steps cannot be empty"})
159
- if any(not step.strip() for step in steps):
160
- return json.dumps({"error": "All steps must contain valid instructions"})
161
+ log_debug(f"ScrapeGraph agentic_crawler request for URL: {url}")
161
162
 
162
163
  # Prepare parameters for the API call
163
164
  params = {"url": url, "steps": steps, "use_session": use_session, "ai_extraction": ai_extraction}
@@ -170,26 +171,52 @@ class ScrapeGraphTools(Toolkit):
170
171
 
171
172
  # Call the agentic scraper API
172
173
  response = self.client.agenticscraper(**params)
173
-
174
174
  return json.dumps(response, indent=2)
175
175
 
176
176
  except Exception as e:
177
- return json.dumps({"error": str(e)})
177
+ error_msg = f"Agentic crawler failed: {str(e)}"
178
+ log_error(error_msg)
179
+ return f"Error: {error_msg}"
178
180
 
179
- def searchscraper(self, prompt: str) -> str:
181
+ def searchscraper(self, user_prompt: str) -> str:
180
182
  """Search the web and extract information from the web.
181
183
  Args:
182
- prompt (str): Search query
184
+ user_prompt (str): Search query
183
185
  Returns:
184
186
  JSON of the search results
185
187
  """
186
188
  try:
187
- response = self.client.searchscraper(user_prompt=prompt)
188
- if hasattr(response, "result"):
189
- return json.dumps(response.result)
190
- elif isinstance(response, dict) and "result" in response:
191
- return json.dumps(response["result"])
192
- else:
193
- return json.dumps(response)
189
+ log_debug(f"ScrapeGraph searchscraper request with prompt: {user_prompt}")
190
+ response = self.client.searchscraper(user_prompt=user_prompt, render_heavy_js=self.render_heavy_js)
191
+ return json.dumps(response["result"])
192
+ except Exception as e:
193
+ error_msg = f"Searchscraper failed: {str(e)}"
194
+ log_error(error_msg)
195
+ return f"Error: {error_msg}"
196
+
197
+ def scrape(
198
+ self,
199
+ website_url: str,
200
+ headers: Optional[dict] = None,
201
+ ) -> str:
202
+ """Get raw HTML content from a website using the ScrapeGraphAI scrape API.
203
+
204
+ Args:
205
+ website_url (str): The URL of the website to scrape
206
+ headers (Optional[dict]): Optional headers to send with the request
207
+
208
+ Returns:
209
+ JSON string containing the HTML content and metadata
210
+ """
211
+ try:
212
+ log_debug(f"ScrapeGraph scrape request for URL: {website_url}")
213
+ response = self.client.scrape(
214
+ website_url=website_url,
215
+ headers=headers,
216
+ render_heavy_js=self.render_heavy_js,
217
+ )
218
+ return json.dumps(response, indent=2)
194
219
  except Exception as e:
195
- return json.dumps({"error": str(e)})
220
+ error_msg = f"Scrape failed: {str(e)}"
221
+ log_error(error_msg)
222
+ return f"Error: {error_msg}"
agno/tools/whatsapp.py CHANGED
@@ -16,7 +16,7 @@ class WhatsAppTools(Toolkit):
16
16
  self,
17
17
  access_token: Optional[str] = None,
18
18
  phone_number_id: Optional[str] = None,
19
- version: str = "v22.0",
19
+ version: Optional[str] = None,
20
20
  recipient_waid: Optional[str] = None,
21
21
  async_mode: bool = False,
22
22
  ):
@@ -423,7 +423,7 @@ def build_panels_stream(
423
423
  reasoning_panel = create_panel(content=step_content, title=f"Reasoning step {i}", border_style="green")
424
424
  panels.append(reasoning_panel)
425
425
 
426
- if len(response_reasoning_content_buffer) > 0:
426
+ if len(response_reasoning_content_buffer) > 0 and show_reasoning:
427
427
  # Create panel for thinking
428
428
  thinking_panel = create_panel(
429
429
  content=Text(response_reasoning_content_buffer),
@@ -727,7 +727,7 @@ def build_panels(
727
727
  reasoning_panel = create_panel(content=step_content, title=f"Reasoning step {i}", border_style="green")
728
728
  panels.append(reasoning_panel)
729
729
 
730
- if isinstance(run_response, RunOutput) and run_response.reasoning_content is not None:
730
+ if isinstance(run_response, RunOutput) and run_response.reasoning_content is not None and show_reasoning:
731
731
  # Create panel for thinking
732
732
  thinking_panel = create_panel(
733
733
  content=Text(run_response.reasoning_content),
@@ -125,7 +125,7 @@ def print_response(
125
125
  panels.append(reasoning_panel)
126
126
  live_console.update(Group(*panels))
127
127
 
128
- if isinstance(run_response, TeamRunOutput) and run_response.reasoning_content is not None:
128
+ if isinstance(run_response, TeamRunOutput) and run_response.reasoning_content is not None and show_reasoning:
129
129
  # Create panel for thinking
130
130
  thinking_panel = create_panel(
131
131
  content=Text(run_response.reasoning_content),
@@ -497,7 +497,7 @@ def print_response_stream(
497
497
  reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
498
498
  panels.append(reasoning_panel)
499
499
 
500
- if len(_response_reasoning_content) > 0:
500
+ if len(_response_reasoning_content) > 0 and show_reasoning:
501
501
  render = True
502
502
  # Create panel for thinking
503
503
  thinking_panel = create_panel(
@@ -679,7 +679,7 @@ def print_response_stream(
679
679
  final_panels.append(reasoning_panel)
680
680
 
681
681
  # Add thinking panel if available
682
- if _response_reasoning_content:
682
+ if _response_reasoning_content and show_reasoning:
683
683
  thinking_panel = create_panel(
684
684
  content=Text(_response_reasoning_content),
685
685
  title=f"Thinking ({response_timer.elapsed:.1f}s)",
@@ -938,7 +938,7 @@ async def aprint_response(
938
938
  panels.append(reasoning_panel)
939
939
  live_console.update(Group(*panels))
940
940
 
941
- if isinstance(run_response, TeamRunOutput) and run_response.reasoning_content is not None:
941
+ if isinstance(run_response, TeamRunOutput) and run_response.reasoning_content is not None and show_reasoning:
942
942
  # Create panel for thinking
943
943
  thinking_panel = create_panel(
944
944
  content=Text(run_response.reasoning_content),
@@ -1306,7 +1306,7 @@ async def aprint_response_stream(
1306
1306
  reasoning_panel = build_reasoning_step_panel(i, step, show_full_reasoning)
1307
1307
  panels.append(reasoning_panel)
1308
1308
 
1309
- if len(_response_reasoning_content) > 0:
1309
+ if len(_response_reasoning_content) > 0 and show_reasoning:
1310
1310
  render = True
1311
1311
  # Create panel for thinking
1312
1312
  thinking_panel = create_panel(
@@ -1489,7 +1489,7 @@ async def aprint_response_stream(
1489
1489
  final_panels.append(reasoning_panel)
1490
1490
 
1491
1491
  # Add thinking panel if available
1492
- if _response_reasoning_content:
1492
+ if _response_reasoning_content and show_reasoning:
1493
1493
  thinking_panel = create_panel(
1494
1494
  content=Text(_response_reasoning_content),
1495
1495
  title=f"Thinking ({response_timer.elapsed:.1f}s)",
agno/utils/reasoning.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import TYPE_CHECKING, List, Union
1
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
2
2
 
3
3
  from agno.models.message import Message
4
4
  from agno.models.metrics import Metrics
@@ -9,6 +9,27 @@ if TYPE_CHECKING:
9
9
  from agno.team.team import TeamRunOutput
10
10
 
11
11
 
12
+ def extract_thinking_content(content: str) -> Tuple[Optional[str], str]:
13
+ """Extract thinking content from response text between <think> tags."""
14
+ if not content or "</think>" not in content:
15
+ return None, content
16
+
17
+ # Find the end of thinking content
18
+ end_idx = content.find("</think>")
19
+
20
+ # Look for opening <think> tag, if not found, assume thinking starts at beginning
21
+ start_idx = content.find("<think>")
22
+ if start_idx == -1:
23
+ reasoning_content = content[:end_idx].strip()
24
+ else:
25
+ start_idx = start_idx + len("<think>")
26
+ reasoning_content = content[start_idx:end_idx].strip()
27
+
28
+ output_content = content[end_idx + len("</think>") :].strip()
29
+
30
+ return reasoning_content, output_content
31
+
32
+
12
33
  def append_to_reasoning_content(run_response: Union["RunOutput", "TeamRunOutput"], content: str) -> None:
13
34
  """Helper to append content to the reasoning_content field."""
14
35
  if not hasattr(run_response, "reasoning_content") or not run_response.reasoning_content: # type: ignore
agno/utils/string.py CHANGED
@@ -158,6 +158,15 @@ def _parse_individual_json(content: str, output_schema: Type[BaseModel]) -> Opti
158
158
  def parse_response_model_str(content: str, output_schema: Type[BaseModel]) -> Optional[BaseModel]:
159
159
  structured_output = None
160
160
 
161
+ # Extract thinking content first to prevent <think> tags from corrupting JSON
162
+ from agno.utils.reasoning import extract_thinking_content
163
+
164
+ # handle thinking content b/w <think> tags
165
+ if "</think>" in content:
166
+ reasoning_content, output_content = extract_thinking_content(content)
167
+ if reasoning_content:
168
+ content = output_content
169
+
161
170
  # Clean content first to simplify all parsing attempts
162
171
  cleaned_content = _clean_json_content(content)
163
172
 
agno/workflow/workflow.py CHANGED
@@ -1406,7 +1406,6 @@ class Workflow:
1406
1406
  logger.info(f"Early termination requested by step {step_name}")
1407
1407
  break
1408
1408
 
1409
-
1410
1409
  # Update the workflow_run_response with completion data
1411
1410
  if collected_step_outputs:
1412
1411
  workflow_run_response.metrics = self._aggregate_workflow_metrics(collected_step_outputs)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 2.0.8
3
+ Version: 2.0.9
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  Project-URL: homepage, https://agno.com
@@ -163,6 +163,8 @@ Requires-Dist: psycopg-binary; extra == "psycopg"
163
163
  Requires-Dist: psycopg; extra == "psycopg"
164
164
  Provides-Extra: reportlab
165
165
  Requires-Dist: reportlab; extra == "reportlab"
166
+ Provides-Extra: scrapegraph
167
+ Requires-Dist: scrapegraph-py; extra == "scrapegraph"
166
168
  Provides-Extra: todoist
167
169
  Requires-Dist: todoist-api-python; extra == "todoist"
168
170
  Provides-Extra: valyu
@@ -300,6 +302,7 @@ Requires-Dist: agno[mcp]; extra == "tools"
300
302
  Requires-Dist: agno[browserbase]; extra == "tools"
301
303
  Requires-Dist: agno[agentql]; extra == "tools"
302
304
  Requires-Dist: agno[opencv]; extra == "tools"
305
+ Requires-Dist: agno[scrapegraph]; extra == "tools"
303
306
  Requires-Dist: agno[valyu]; extra == "tools"
304
307
  Requires-Dist: agno[confluence]; extra == "tools"
305
308
  Requires-Dist: agno[oxylabs]; extra == "tools"