agno 1.7.11__py3-none-any.whl → 1.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/agent/agent.py CHANGED
@@ -5885,6 +5885,9 @@ class Agent:
5885
5885
  telemetry=self.telemetry,
5886
5886
  debug_mode=self.debug_mode,
5887
5887
  debug_level=self.debug_level,
5888
+ session_state=self.session_state,
5889
+ context=self.context,
5890
+ extra_data=self.extra_data,
5888
5891
  )
5889
5892
  is_deepseek = is_deepseek_reasoning_model(reasoning_model)
5890
5893
  is_groq = is_groq_reasoning_model(reasoning_model)
@@ -5974,6 +5977,9 @@ class Agent:
5974
5977
  telemetry=self.telemetry,
5975
5978
  debug_mode=self.debug_mode,
5976
5979
  debug_level=self.debug_level,
5980
+ session_state=self.session_state,
5981
+ context=self.context,
5982
+ extra_data=self.extra_data,
5977
5983
  )
5978
5984
 
5979
5985
  # Validate reasoning agent
@@ -6108,6 +6114,9 @@ class Agent:
6108
6114
  telemetry=self.telemetry,
6109
6115
  debug_mode=self.debug_mode,
6110
6116
  debug_level=self.debug_level,
6117
+ session_state=self.session_state,
6118
+ context=self.context,
6119
+ extra_data=self.extra_data,
6111
6120
  )
6112
6121
  is_deepseek = is_deepseek_reasoning_model(reasoning_model)
6113
6122
  is_groq = is_groq_reasoning_model(reasoning_model)
@@ -6197,6 +6206,9 @@ class Agent:
6197
6206
  telemetry=self.telemetry,
6198
6207
  debug_mode=self.debug_mode,
6199
6208
  debug_level=self.debug_level,
6209
+ session_state=self.session_state,
6210
+ context=self.context,
6211
+ extra_data=self.extra_data,
6200
6212
  )
6201
6213
 
6202
6214
  # Validate reasoning agent
@@ -7443,6 +7455,7 @@ class Agent:
7443
7455
  if citation.url # Only include citations with valid URLs
7444
7456
  )
7445
7457
  if md_content: # Only create panel if there are citations
7458
+ md_content = md_content.strip()
7446
7459
  citations_panel = create_panel(
7447
7460
  content=Markdown(md_content),
7448
7461
  title="Citations",
agno/app/agui/utils.py CHANGED
@@ -129,7 +129,7 @@ def _create_events_from_chunk(
129
129
  Process a single chunk and return events to emit + updated message_started state.
130
130
  Returns: (events_to_emit, new_message_started_state)
131
131
  """
132
- events_to_emit = []
132
+ events_to_emit: List[BaseEvent] = []
133
133
 
134
134
  # Extract content if the contextual event is a content event
135
135
  if chunk.event == RunEvent.run_response_content:
@@ -13,7 +13,7 @@ from agno.media import Audio, Image, Video
13
13
  from agno.media import File as FileMedia
14
14
  from agno.run.response import RunResponseErrorEvent
15
15
  from agno.run.team import RunResponseErrorEvent as TeamRunResponseErrorEvent
16
- from agno.run.team import TeamRunResponseEvent
16
+ from agno.run.team import TeamRunResponse, TeamRunResponseEvent
17
17
  from agno.run.v2.workflow import WorkflowErrorEvent
18
18
  from agno.team.team import Team
19
19
  from agno.utils.log import logger
@@ -425,15 +425,18 @@ def get_async_router(
425
425
  )
426
426
  return run_response.to_dict()
427
427
  elif team:
428
- team_run_response = await team.arun(
429
- message=message,
430
- session_id=session_id,
431
- user_id=user_id,
432
- images=base64_images if base64_images else None,
433
- audio=base64_audios if base64_audios else None,
434
- videos=base64_videos if base64_videos else None,
435
- files=document_files if document_files else None,
436
- stream=False,
428
+ team_run_response = cast(
429
+ TeamRunResponse,
430
+ await team.arun(
431
+ message=message,
432
+ session_id=session_id,
433
+ user_id=user_id,
434
+ images=base64_images if base64_images else None,
435
+ audio=base64_audios if base64_audios else None,
436
+ videos=base64_videos if base64_videos else None,
437
+ files=document_files if document_files else None,
438
+ stream=False,
439
+ ),
437
440
  )
438
441
  return team_run_response.to_dict()
439
442
  elif workflow:
agno/embedder/google.py CHANGED
@@ -23,6 +23,10 @@ class GeminiEmbedder(Embedder):
23
23
  request_params: Optional[Dict[str, Any]] = None
24
24
  client_params: Optional[Dict[str, Any]] = None
25
25
  gemini_client: Optional[GeminiClient] = None
26
+ # Vertex AI parameters
27
+ vertexai: bool = False
28
+ project_id: Optional[str] = None
29
+ location: Optional[str] = None
26
30
 
27
31
  @property
28
32
  def client(self):
@@ -30,13 +34,21 @@ class GeminiEmbedder(Embedder):
30
34
  return self.gemini_client
31
35
 
32
36
  _client_params: Dict[str, Any] = {}
37
+ vertexai = self.vertexai or getenv("GOOGLE_GENAI_USE_VERTEXAI", "false").lower() == "true"
33
38
 
34
- self.api_key = self.api_key or getenv("GOOGLE_API_KEY")
35
- if not self.api_key:
36
- log_error("GOOGLE_API_KEY not set. Please set the GOOGLE_API_KEY environment variable.")
37
-
38
- if self.api_key:
39
+ if not vertexai:
40
+ self.api_key = self.api_key or getenv("GOOGLE_API_KEY")
41
+ if not self.api_key:
42
+ log_error("GOOGLE_API_KEY not set. Please set the GOOGLE_API_KEY environment variable.")
39
43
  _client_params["api_key"] = self.api_key
44
+ else:
45
+ log_info("Using Vertex AI API for embeddings")
46
+ _client_params["vertexai"] = True
47
+ _client_params["project"] = self.project_id or getenv("GOOGLE_CLOUD_PROJECT")
48
+ _client_params["location"] = self.location or getenv("GOOGLE_CLOUD_LOCATION")
49
+
50
+ _client_params = {k: v for k, v in _client_params.items() if v is not None}
51
+
40
52
  if self.client_params:
41
53
  _client_params.update(self.client_params)
42
54
 
agno/knowledge/gcs/pdf.py CHANGED
@@ -1,8 +1,9 @@
1
- from typing import AsyncIterator, Iterator, List
1
+ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
2
2
 
3
3
  from agno.document import Document
4
4
  from agno.document.reader.gcs.pdf_reader import GCSPDFReader
5
5
  from agno.knowledge.gcs.base import GCSKnowledgeBase
6
+ from agno.utils.log import log_debug, log_info
6
7
 
7
8
 
8
9
  class GCSPDFKnowledgeBase(GCSKnowledgeBase):
@@ -19,3 +20,106 @@ class GCSPDFKnowledgeBase(GCSKnowledgeBase):
19
20
  for blob in self.gcs_blobs:
20
21
  if blob.name.endswith(".pdf"):
21
22
  yield await self.reader.async_read(blob=blob)
23
+
24
+ def load(
25
+ self,
26
+ recreate: bool = False,
27
+ upsert: bool = False,
28
+ skip_existing: bool = True,
29
+ ) -> None:
30
+ """Load the knowledge base to the vector db
31
+ Args:
32
+ recreate (bool): If True, recreates the collection in the vector db. Defaults to False.
33
+ upsert (bool): If True, upserts documents to the vector db. Defaults to False.
34
+ skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
35
+ """
36
+ self._load_init(recreate, upsert)
37
+ if self.vector_db is None:
38
+ return
39
+
40
+ log_info("Loading knowledge base")
41
+ num_documents = 0
42
+ for document_list in self.document_lists:
43
+ documents_to_load = document_list
44
+
45
+ # Track metadata for filtering capabilities and collect metadata for filters
46
+ filters_metadata: Optional[Dict[str, Any]] = None
47
+ for doc in document_list:
48
+ if doc.meta_data:
49
+ self._track_metadata_structure(doc.meta_data)
50
+ # Use the first non-None metadata for filters
51
+ if filters_metadata is None:
52
+ filters_metadata = doc.meta_data
53
+
54
+ # Skip processing if no documents in this batch
55
+ if not documents_to_load:
56
+ log_debug("Skipping empty document batch")
57
+ continue
58
+
59
+ # Upsert documents if upsert is True and vector db supports upsert
60
+ if upsert and self.vector_db.upsert_available():
61
+ self.vector_db.upsert(documents=documents_to_load, filters=filters_metadata)
62
+ # Insert documents
63
+ else:
64
+ # Filter out documents which already exist in the vector db
65
+ if skip_existing:
66
+ log_debug("Filtering out existing documents before insertion.")
67
+ documents_to_load = self.filter_existing_documents(document_list)
68
+
69
+ if documents_to_load:
70
+ self.vector_db.insert(documents=documents_to_load, filters=filters_metadata)
71
+
72
+ num_documents += len(documents_to_load)
73
+ log_info(f"Added {num_documents} documents to knowledge base")
74
+
75
+ async def aload(
76
+ self,
77
+ recreate: bool = False,
78
+ upsert: bool = False,
79
+ skip_existing: bool = True,
80
+ ) -> None:
81
+ """Load the knowledge base to the vector db asynchronously
82
+ Args:
83
+ recreate (bool): If True, recreates the collection in the vector db. Defaults to False.
84
+ upsert (bool): If True, upserts documents to the vector db. Defaults to False.
85
+ skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
86
+ """
87
+ await self._aload_init(recreate, upsert)
88
+ if self.vector_db is None:
89
+ return
90
+
91
+ log_info("Loading knowledge base")
92
+ num_documents = 0
93
+ document_iterator = self.async_document_lists
94
+ async for document_list in document_iterator: # type: ignore
95
+ documents_to_load = document_list
96
+
97
+ # Track metadata for filtering capabilities and collect metadata for filters
98
+ filters_metadata: Optional[Dict[str, Any]] = None
99
+ for doc in document_list:
100
+ if doc.meta_data:
101
+ self._track_metadata_structure(doc.meta_data)
102
+ # Use the first non-None metadata for filters
103
+ if filters_metadata is None:
104
+ filters_metadata = doc.meta_data
105
+
106
+ # Skip processing if no documents in this batch
107
+ if not documents_to_load:
108
+ log_debug("Skipping empty document batch")
109
+ continue
110
+
111
+ # Upsert documents if upsert is True and vector db supports upsert
112
+ if upsert and self.vector_db.upsert_available():
113
+ await self.vector_db.async_upsert(documents=documents_to_load, filters=filters_metadata)
114
+ # Insert documents
115
+ else:
116
+ # Filter out documents which already exist in the vector db
117
+ if skip_existing:
118
+ log_debug("Filtering out existing documents before insertion.")
119
+ documents_to_load = await self.async_filter_existing_documents(document_list)
120
+
121
+ if documents_to_load:
122
+ await self.vector_db.async_insert(documents=documents_to_load, filters=filters_metadata)
123
+
124
+ num_documents += len(documents_to_load)
125
+ log_info(f"Added {num_documents} documents to knowledge base")
agno/media.py CHANGED
@@ -38,13 +38,34 @@ class ImageArtifact(Media):
38
38
  mime_type: Optional[str] = None
39
39
  alt_text: Optional[str] = None
40
40
 
41
+ def _normalise_content(self) -> Optional[Union[str, bytes]]:
42
+ if self.content is None:
43
+ return None
44
+ content_normalised: Union[str, bytes] = self.content
45
+ if content_normalised and isinstance(content_normalised, bytes):
46
+ from base64 import b64encode
47
+
48
+ try:
49
+ # First try to decode as UTF-8
50
+ content_normalised = content_normalised.decode("utf-8") # type: ignore
51
+ except UnicodeDecodeError:
52
+ # Fallback to base64 encoding for binary content
53
+ content_normalised = b64encode(bytes(content_normalised)).decode("utf-8") # type: ignore
54
+ except Exception:
55
+ # Last resort: try to convert to base64
56
+ try:
57
+ content_normalised = b64encode(bytes(content_normalised)).decode("utf-8") # type: ignore
58
+ except Exception:
59
+ pass
60
+ return content_normalised
61
+
41
62
  def to_dict(self) -> Dict[str, Any]:
63
+ content_normalised = self._normalise_content()
64
+
42
65
  response_dict = {
43
66
  "id": self.id,
44
67
  "url": self.url,
45
- "content": self.content.decode("utf-8")
46
- if self.content and isinstance(self.content, bytes)
47
- else self.content,
68
+ "content": content_normalised,
48
69
  "mime_type": self.mime_type,
49
70
  "alt_text": self.alt_text,
50
71
  }
@@ -30,8 +30,11 @@ try:
30
30
  GoogleSearch,
31
31
  GoogleSearchRetrieval,
32
32
  Part,
33
+ Retrieval,
33
34
  ThinkingConfig,
34
35
  Tool,
36
+ UrlContext,
37
+ VertexAISearch,
35
38
  )
36
39
  from google.genai.types import (
37
40
  File as GeminiFile,
@@ -68,6 +71,9 @@ class Gemini(Model):
68
71
  search: bool = False
69
72
  grounding: bool = False
70
73
  grounding_dynamic_threshold: Optional[float] = None
74
+ url_context: bool = False
75
+ vertexai_search: bool = False
76
+ vertexai_search_datastore: Optional[str] = None
71
77
 
72
78
  temperature: Optional[float] = None
73
79
  top_p: Optional[float] = None
@@ -115,7 +121,6 @@ class Gemini(Model):
115
121
  """
116
122
  if self.client:
117
123
  return self.client
118
-
119
124
  client_params: Dict[str, Any] = {}
120
125
  vertexai = self.vertexai or getenv("GOOGLE_GENAI_USE_VERTEXAI", "false").lower() == "true"
121
126
 
@@ -199,26 +204,45 @@ class Gemini(Model):
199
204
  if thinking_config_params:
200
205
  config["thinking_config"] = ThinkingConfig(**thinking_config_params)
201
206
 
202
- if self.grounding and self.search:
203
- log_info("Both grounding and search are enabled. Grounding will take precedence.")
204
- self.search = False
207
+ # Build tools array based on enabled built-in tools
208
+ builtin_tools = []
205
209
 
206
210
  if self.grounding:
207
- log_info("Grounding enabled. External tools will be disabled.")
208
- config["tools"] = [
211
+ log_info(
212
+ "Grounding enabled. This is a legacy tool. For Gemini 2.0+ Please use enable `search` flag instead."
213
+ )
214
+ builtin_tools.append(
209
215
  Tool(
210
216
  google_search=GoogleSearchRetrieval(
211
217
  dynamic_retrieval_config=DynamicRetrievalConfig(
212
218
  dynamic_threshold=self.grounding_dynamic_threshold
213
219
  )
214
220
  )
215
- ),
216
- ]
221
+ )
222
+ )
217
223
 
218
- elif self.search:
219
- log_info("Search enabled. External tools will be disabled.")
220
- config["tools"] = [Tool(google_search=GoogleSearch())]
224
+ if self.search:
225
+ log_info("Google Search enabled.")
226
+ builtin_tools.append(Tool(google_search=GoogleSearch()))
227
+
228
+ if self.url_context:
229
+ log_info("URL context enabled.")
230
+ builtin_tools.append(Tool(url_context=UrlContext()))
231
+
232
+ if self.vertexai_search:
233
+ log_info("Vertex AI Search enabled.")
234
+ if not self.vertexai_search_datastore:
235
+ log_error("vertexai_search_datastore must be provided when vertexai_search is enabled.")
236
+ raise ValueError("vertexai_search_datastore must be provided when vertexai_search is enabled.")
237
+ builtin_tools.append(
238
+ Tool(retrieval=Retrieval(vertex_ai_search=VertexAISearch(datastore=self.vertexai_search_datastore)))
239
+ )
221
240
 
241
+ # Set tools in config
242
+ if builtin_tools:
243
+ if tools:
244
+ log_info("Built-in tools enabled. External tools will be disabled.")
245
+ config["tools"] = builtin_tools
222
246
  elif tools:
223
247
  config["tools"] = [format_function_definitions(tools)]
224
248
 
@@ -388,7 +412,10 @@ class Gemini(Model):
388
412
  message_parts: List[Any] = []
389
413
 
390
414
  # Function calls
391
- if (not content or role == "model") and message.tool_calls is not None and len(message.tool_calls) > 0:
415
+ if role == "model" and message.tool_calls is not None and len(message.tool_calls) > 0:
416
+ if content is not None:
417
+ content_str = content if isinstance(content, str) else str(content)
418
+ message_parts.append(Part.from_text(text=content_str))
392
419
  for tool_call in message.tool_calls:
393
420
  message_parts.append(
394
421
  Part.from_function_call(
@@ -396,7 +423,7 @@ class Gemini(Model):
396
423
  args=json.loads(tool_call["function"]["arguments"]),
397
424
  )
398
425
  )
399
- # Function results
426
+ # Function call results
400
427
  elif message.tool_calls is not None and len(message.tool_calls) > 0:
401
428
  for tool_call in message.tool_calls:
402
429
  message_parts.append(
@@ -758,13 +785,15 @@ class Gemini(Model):
758
785
 
759
786
  model_response.tool_calls.append(tool_call)
760
787
 
788
+ citations = Citations()
789
+ citations_raw = {}
790
+ citations_urls = []
791
+
761
792
  if response.candidates and response.candidates[0].grounding_metadata is not None:
762
- citations = Citations()
763
793
  grounding_metadata = response.candidates[0].grounding_metadata.model_dump()
764
- citations.raw = grounding_metadata
794
+ citations_raw["grounding_metadata"] = grounding_metadata
765
795
 
766
- # Extract url and title
767
- chunks = grounding_metadata.pop("grounding_chunks", None) or []
796
+ chunks = grounding_metadata.get("grounding_chunks", [])
768
797
  citation_pairs = [
769
798
  (chunk.get("web", {}).get("uri"), chunk.get("web", {}).get("title"))
770
799
  for chunk in chunks
@@ -772,8 +801,31 @@ class Gemini(Model):
772
801
  ]
773
802
 
774
803
  # Create citation objects from filtered pairs
775
- citations.urls = [UrlCitation(url=url, title=title) for url, title in citation_pairs]
776
-
804
+ grounding_urls = [UrlCitation(url=url, title=title) for url, title in citation_pairs]
805
+ citations_urls.extend(grounding_urls)
806
+
807
+ # Handle URLs from URL context tool
808
+ if (
809
+ response.candidates
810
+ and hasattr(response.candidates[0], "url_context_metadata")
811
+ and response.candidates[0].url_context_metadata is not None
812
+ ):
813
+ url_context_metadata = response.candidates[0].url_context_metadata.model_dump()
814
+ citations_raw["url_context_metadata"] = url_context_metadata
815
+
816
+ url_metadata_list = url_context_metadata.get("url_metadata", [])
817
+ for url_meta in url_metadata_list:
818
+ retrieved_url = url_meta.get("retrieved_url")
819
+ status = url_meta.get("url_retrieval_status", "UNKNOWN")
820
+ if retrieved_url and status == "URL_RETRIEVAL_STATUS_SUCCESS":
821
+ # Avoid duplicate URLs
822
+ existing_urls = [citation.url for citation in citations_urls]
823
+ if retrieved_url not in existing_urls:
824
+ citations_urls.append(UrlCitation(url=retrieved_url, title=retrieved_url))
825
+
826
+ if citations_raw or citations_urls:
827
+ citations.raw = citations_raw if citations_raw else None
828
+ citations.urls = citations_urls if citations_urls else None
777
829
  model_response.citations = citations
778
830
 
779
831
  # Extract usage metadata if present
@@ -1,7 +1,7 @@
1
1
  from collections.abc import AsyncIterator
2
2
  from dataclasses import dataclass
3
3
  from os import getenv
4
- from typing import Any, Dict, Iterator, List, Optional, Type, Union
4
+ from typing import Any, Dict, Iterator, List, Literal, Optional, Type, Union
5
5
 
6
6
  import httpx
7
7
  from pydantic import BaseModel
@@ -45,6 +45,7 @@ class OpenAIChat(Model):
45
45
  # Request parameters
46
46
  store: Optional[bool] = None
47
47
  reasoning_effort: Optional[str] = None
48
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
48
49
  metadata: Optional[Dict[str, Any]] = None
49
50
  frequency_penalty: Optional[float] = None
50
51
  logit_bias: Optional[Any] = None
@@ -159,6 +160,7 @@ class OpenAIChat(Model):
159
160
  base_params = {
160
161
  "store": self.store,
161
162
  "reasoning_effort": self.reasoning_effort,
163
+ "verbosity": self.verbosity,
162
164
  "frequency_penalty": self.frequency_penalty,
163
165
  "logit_bias": self.logit_bias,
164
166
  "logprobs": self.logprobs,
@@ -227,6 +229,8 @@ class OpenAIChat(Model):
227
229
  model_dict.update(
228
230
  {
229
231
  "store": self.store,
232
+ "reasoning_effort": self.reasoning_effort,
233
+ "verbosity": self.verbosity,
230
234
  "frequency_penalty": self.frequency_penalty,
231
235
  "logit_bias": self.logit_bias,
232
236
  "logprobs": self.logprobs,
@@ -42,6 +42,8 @@ class OpenAIResponses(Model):
42
42
  metadata: Optional[Dict[str, Any]] = None
43
43
  parallel_tool_calls: Optional[bool] = None
44
44
  reasoning: Optional[Dict[str, Any]] = None
45
+ verbosity: Optional[Literal["low", "medium", "high"]] = None
46
+ reasoning_effort: Optional[Literal["minimal", "medium", "high"]] = None
45
47
  store: Optional[bool] = None
46
48
  temperature: Optional[float] = None
47
49
  top_p: Optional[float] = None
@@ -176,7 +178,6 @@ class OpenAIResponses(Model):
176
178
  "max_tool_calls": self.max_tool_calls,
177
179
  "metadata": self.metadata,
178
180
  "parallel_tool_calls": self.parallel_tool_calls,
179
- "reasoning": self.reasoning,
180
181
  "store": self.store,
181
182
  "temperature": self.temperature,
182
183
  "top_p": self.top_p,
@@ -184,21 +185,37 @@ class OpenAIResponses(Model):
184
185
  "user": self.user,
185
186
  "service_tier": self.service_tier,
186
187
  }
188
+
189
+ # Handle reasoning parameter - convert reasoning_effort to reasoning format
190
+ if self.reasoning is not None:
191
+ base_params["reasoning"] = self.reasoning
192
+ elif self.reasoning_effort is not None:
193
+ base_params["reasoning"] = {"effort": self.reasoning_effort}
194
+
195
+ # Build text parameter
196
+ text_params: Dict[str, Any] = {}
197
+
198
+ # Add verbosity if specified
199
+ if self.verbosity is not None:
200
+ text_params["verbosity"] = self.verbosity
201
+
187
202
  # Set the response format
188
203
  if response_format is not None:
189
204
  if isinstance(response_format, type) and issubclass(response_format, BaseModel):
190
205
  schema = get_response_schema_for_provider(response_format, "openai")
191
- base_params["text"] = {
192
- "format": {
193
- "type": "json_schema",
194
- "name": response_format.__name__,
195
- "schema": schema,
196
- "strict": True,
197
- }
206
+ text_params["format"] = {
207
+ "type": "json_schema",
208
+ "name": response_format.__name__,
209
+ "schema": schema,
210
+ "strict": True,
198
211
  }
199
212
  else:
200
213
  # JSON mode
201
- base_params["text"] = {"format": {"type": "json_object"}}
214
+ text_params["format"] = {"type": "json_object"}
215
+
216
+ # Add text parameter if there are any text-level params
217
+ if text_params:
218
+ base_params["text"] = text_params
202
219
 
203
220
  # Filter out None values
204
221
  request_params: Dict[str, Any] = {k: v for k, v in base_params.items() if v is not None}
agno/reasoning/default.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from textwrap import dedent
4
- from typing import Callable, Dict, List, Literal, Optional, Union
4
+ from typing import Any, Callable, Dict, List, Literal, Optional, Union
5
5
 
6
6
  from agno.models.base import Model
7
7
  from agno.reasoning.step import ReasoningSteps
@@ -19,6 +19,9 @@ def get_default_reasoning_agent(
19
19
  telemetry: bool = True,
20
20
  debug_mode: bool = False,
21
21
  debug_level: Literal[1, 2] = 1,
22
+ session_state: Optional[Dict[str, Any]] = None,
23
+ context: Optional[Dict[str, Any]] = None,
24
+ extra_data: Optional[Dict[str, Any]] = None,
22
25
  ) -> Optional["Agent"]: # type: ignore # noqa: F821
23
26
  from agno.agent import Agent
24
27
 
@@ -87,6 +90,9 @@ def get_default_reasoning_agent(
87
90
  telemetry=telemetry,
88
91
  debug_mode=debug_mode,
89
92
  debug_level=debug_level,
93
+ session_state=session_state,
94
+ context=context,
95
+ extra_data=extra_data,
90
96
  )
91
97
 
92
98
  agent.model.show_tool_calls = False # type: ignore
agno/reasoning/helpers.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import List, Literal
1
+ from typing import Any, Dict, List, Literal, Optional
2
2
 
3
3
  from agno.models.base import Model
4
4
  from agno.models.message import Message
@@ -13,6 +13,9 @@ def get_reasoning_agent(
13
13
  telemetry: bool = False,
14
14
  debug_mode: bool = False,
15
15
  debug_level: Literal[1, 2] = 1,
16
+ session_state: Optional[Dict[str, Any]] = None,
17
+ context: Optional[Dict[str, Any]] = None,
18
+ extra_data: Optional[Dict[str, Any]] = None,
16
19
  ) -> "Agent": # type: ignore # noqa: F821
17
20
  from agno.agent import Agent
18
21
 
@@ -22,6 +25,9 @@ def get_reasoning_agent(
22
25
  telemetry=telemetry,
23
26
  debug_mode=debug_mode,
24
27
  debug_level=debug_level,
28
+ session_state=session_state,
29
+ context=context,
30
+ extra_data=extra_data,
25
31
  )
26
32
 
27
33