agno 2.0.6__py3-none-any.whl → 2.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/agent/agent.py CHANGED
@@ -762,6 +762,7 @@ class Agent:
762
762
  tool_call_limit=self.tool_call_limit,
763
763
  response_format=response_format,
764
764
  run_response=run_response,
765
+ send_media_to_model=self.send_media_to_model,
765
766
  )
766
767
 
767
768
  # Check for cancellation after model call
@@ -1359,6 +1360,7 @@ class Agent:
1359
1360
  tool_choice=self.tool_choice,
1360
1361
  tool_call_limit=self.tool_call_limit,
1361
1362
  response_format=response_format,
1363
+ send_media_to_model=self.send_media_to_model,
1362
1364
  )
1363
1365
 
1364
1366
  # Check for cancellation after model call
@@ -3100,6 +3102,8 @@ class Agent:
3100
3102
  # Update the run_response citations with the model response citations
3101
3103
  if model_response.citations is not None:
3102
3104
  run_response.citations = model_response.citations
3105
+ if model_response.provider_data is not None:
3106
+ run_response.model_provider_data = model_response.provider_data
3103
3107
 
3104
3108
  # Update the run_response tools with the model response tool_executions
3105
3109
  if model_response.tool_executions is not None:
@@ -3174,6 +3178,7 @@ class Agent:
3174
3178
  tool_call_limit=self.tool_call_limit,
3175
3179
  stream_model_response=stream_model_response,
3176
3180
  run_response=run_response,
3181
+ send_media_to_model=self.send_media_to_model,
3177
3182
  ):
3178
3183
  yield from self._handle_model_response_chunk(
3179
3184
  session=session,
@@ -3250,6 +3255,7 @@ class Agent:
3250
3255
  tool_call_limit=self.tool_call_limit,
3251
3256
  stream_model_response=stream_model_response,
3252
3257
  run_response=run_response,
3258
+ send_media_to_model=self.send_media_to_model,
3253
3259
  ) # type: ignore
3254
3260
 
3255
3261
  async for model_response_event in model_response_stream: # type: ignore
@@ -3352,6 +3358,10 @@ class Agent:
3352
3358
  model_response.reasoning_content += model_response_event.redacted_reasoning_content
3353
3359
  run_response.reasoning_content = model_response.reasoning_content
3354
3360
 
3361
+ if model_response_event.provider_data is not None:
3362
+ # We get citations in one chunk
3363
+ run_response.model_provider_data = model_response.provider_data
3364
+
3355
3365
  if model_response_event.citations is not None:
3356
3366
  # We get citations in one chunk
3357
3367
  run_response.citations = model_response_event.citations
@@ -3372,6 +3382,7 @@ class Agent:
3372
3382
  or model_response_event.reasoning_content is not None
3373
3383
  or model_response_event.redacted_reasoning_content is not None
3374
3384
  or model_response_event.citations is not None
3385
+ or model_response_event.provider_data is not None
3375
3386
  ):
3376
3387
  yield self._handle_event(
3377
3388
  create_run_output_content_event(
@@ -3380,6 +3391,7 @@ class Agent:
3380
3391
  reasoning_content=model_response_event.reasoning_content,
3381
3392
  redacted_reasoning_content=model_response_event.redacted_reasoning_content,
3382
3393
  citations=model_response_event.citations,
3394
+ model_provider_data=model_response_event.provider_data,
3383
3395
  ),
3384
3396
  run_response,
3385
3397
  workflow_context=workflow_context,
@@ -3813,7 +3825,9 @@ class Agent:
3813
3825
  self._rebuild_tools = True
3814
3826
  if self.search_session_history:
3815
3827
  agent_tools.append(
3816
- self._get_previous_sessions_messages_function(num_history_sessions=self.num_history_sessions, user_id=user_id)
3828
+ self._get_previous_sessions_messages_function(
3829
+ num_history_sessions=self.num_history_sessions, user_id=user_id
3830
+ )
3817
3831
  )
3818
3832
  self._rebuild_tools = True
3819
3833
 
@@ -1,4 +1,5 @@
1
- from typing import List, Optional
1
+ import inspect
2
+ from typing import Any, Dict, List, Optional
2
3
 
3
4
  from agno.knowledge.chunking.strategy import ChunkingStrategy
4
5
  from agno.knowledge.document.base import Document
@@ -26,11 +27,37 @@ class SemanticChunking(ChunkingStrategy):
26
27
  "Please install it using `pip install chonkie` to use SemanticChunking."
27
28
  )
28
29
 
29
- self.chunker = SemanticChunker(
30
- embedding_model=self.embedder.id, # type: ignore
31
- chunk_size=self.chunk_size,
32
- threshold=self.similarity_threshold,
33
- )
30
+ # Build arguments dynamically based on chonkie's supported signature
31
+ params: Dict[str, Any] = {
32
+ "chunk_size": self.chunk_size,
33
+ "threshold": self.similarity_threshold,
34
+ }
35
+
36
+ try:
37
+ sig = inspect.signature(SemanticChunker)
38
+ param_names = set(sig.parameters.keys())
39
+
40
+ # Prefer passing a callable to avoid Chonkie initializing its own client
41
+ if "embedding_fn" in param_names:
42
+ params["embedding_fn"] = self.embedder.get_embedding # type: ignore[attr-defined]
43
+ # If chonkie allows specifying dimensions, provide them
44
+ if "embedding_dimensions" in param_names and getattr(self.embedder, "dimensions", None):
45
+ params["embedding_dimensions"] = self.embedder.dimensions # type: ignore[attr-defined]
46
+ elif "embedder" in param_names:
47
+ # Some versions may accept an embedder object directly
48
+ params["embedder"] = self.embedder
49
+ else:
50
+ # Fallback to model id
51
+ params["embedding_model"] = getattr(self.embedder, "id", None) or "text-embedding-3-small"
52
+
53
+ self.chunker = SemanticChunker(**params)
54
+ except Exception:
55
+ # As a final fallback, use the original behavior
56
+ self.chunker = SemanticChunker(
57
+ embedding_model=getattr(self.embedder, "id", None) or "text-embedding-3-small",
58
+ chunk_size=self.chunk_size,
59
+ threshold=self.similarity_threshold,
60
+ )
34
61
 
35
62
  def chunk(self, document: Document) -> List[Document]:
36
63
  """Split document into semantic chunks using chonkie"""
agno/media.py CHANGED
@@ -371,6 +371,8 @@ class File(BaseModel):
371
371
  "application/pdf",
372
372
  "application/json",
373
373
  "application/x-javascript",
374
+ "application/json",
375
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
374
376
  "text/javascript",
375
377
  "application/x-python",
376
378
  "text/x-python",
agno/models/base.py CHANGED
@@ -196,6 +196,7 @@ class Model(ABC):
196
196
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
197
197
  tool_call_limit: Optional[int] = None,
198
198
  run_response: Optional[RunOutput] = None,
199
+ send_media_to_model: bool = True,
199
200
  ) -> ModelResponse:
200
201
  """
201
202
  Generate a response from the model.
@@ -301,7 +302,11 @@ class Model(ABC):
301
302
 
302
303
  if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
303
304
  # Handle function call media
304
- self._handle_function_call_media(messages=messages, function_call_results=function_call_results)
305
+ self._handle_function_call_media(
306
+ messages=messages,
307
+ function_call_results=function_call_results,
308
+ send_media_to_model=send_media_to_model,
309
+ )
305
310
 
306
311
  for function_call_result in function_call_results:
307
312
  function_call_result.log(metrics=True)
@@ -339,6 +344,7 @@ class Model(ABC):
339
344
  functions: Optional[Dict[str, Function]] = None,
340
345
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
341
346
  tool_call_limit: Optional[int] = None,
347
+ send_media_to_model: bool = True,
342
348
  ) -> ModelResponse:
343
349
  """
344
350
  Generate an asynchronous response from the model.
@@ -441,7 +447,11 @@ class Model(ABC):
441
447
 
442
448
  if any(msg.images or msg.videos or msg.audio or msg.files for msg in function_call_results):
443
449
  # Handle function call media
444
- self._handle_function_call_media(messages=messages, function_call_results=function_call_results)
450
+ self._handle_function_call_media(
451
+ messages=messages,
452
+ function_call_results=function_call_results,
453
+ send_media_to_model=send_media_to_model,
454
+ )
445
455
 
446
456
  for function_call_result in function_call_results:
447
457
  function_call_result.log(metrics=True)
@@ -689,6 +699,7 @@ class Model(ABC):
689
699
  tool_call_limit: Optional[int] = None,
690
700
  stream_model_response: bool = True,
691
701
  run_response: Optional[RunOutput] = None,
702
+ send_media_to_model: bool = True,
692
703
  ) -> Iterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
693
704
  """
694
705
  Generate a streaming response from the model.
@@ -778,7 +789,11 @@ class Model(ABC):
778
789
 
779
790
  # Handle function call media
780
791
  if any(msg.images or msg.videos or msg.audio for msg in function_call_results):
781
- self._handle_function_call_media(messages=messages, function_call_results=function_call_results)
792
+ self._handle_function_call_media(
793
+ messages=messages,
794
+ function_call_results=function_call_results,
795
+ send_media_to_model=send_media_to_model,
796
+ )
782
797
 
783
798
  for function_call_result in function_call_results:
784
799
  function_call_result.log(metrics=True)
@@ -848,6 +863,7 @@ class Model(ABC):
848
863
  tool_call_limit: Optional[int] = None,
849
864
  stream_model_response: bool = True,
850
865
  run_response: Optional[RunOutput] = None,
866
+ send_media_to_model: bool = True,
851
867
  ) -> AsyncIterator[Union[ModelResponse, RunOutputEvent, TeamRunOutputEvent]]:
852
868
  """
853
869
  Generate an asynchronous streaming response from the model.
@@ -937,7 +953,11 @@ class Model(ABC):
937
953
 
938
954
  # Handle function call media
939
955
  if any(msg.images or msg.videos or msg.audio for msg in function_call_results):
940
- self._handle_function_call_media(messages=messages, function_call_results=function_call_results)
956
+ self._handle_function_call_media(
957
+ messages=messages,
958
+ function_call_results=function_call_results,
959
+ send_media_to_model=send_media_to_model,
960
+ )
941
961
 
942
962
  for function_call_result in function_call_results:
943
963
  function_call_result.log(metrics=True)
@@ -1041,7 +1061,13 @@ class Model(ABC):
1041
1061
  if model_response_delta.extra is not None:
1042
1062
  if stream_data.extra is None:
1043
1063
  stream_data.extra = {}
1044
- stream_data.extra.update(model_response_delta.extra)
1064
+ for key in model_response_delta.extra:
1065
+ if isinstance(model_response_delta.extra[key], list):
1066
+ if not stream_data.extra.get(key):
1067
+ stream_data.extra[key] = []
1068
+ stream_data.extra[key].extend(model_response_delta.extra[key])
1069
+ else:
1070
+ stream_data.extra[key] = model_response_delta.extra[key]
1045
1071
 
1046
1072
  if should_yield:
1047
1073
  yield model_response_delta
@@ -1708,7 +1734,9 @@ class Model(ABC):
1708
1734
  if len(function_call_results) > 0:
1709
1735
  messages.extend(function_call_results)
1710
1736
 
1711
- def _handle_function_call_media(self, messages: List[Message], function_call_results: List[Message]) -> None:
1737
+ def _handle_function_call_media(
1738
+ self, messages: List[Message], function_call_results: List[Message], send_media_to_model: bool = True
1739
+ ) -> None:
1712
1740
  """
1713
1741
  Handle media artifacts from function calls by adding follow-up user messages for generated media if needed.
1714
1742
  """
@@ -1739,9 +1767,10 @@ class Model(ABC):
1739
1767
  all_files.extend(result_message.files)
1740
1768
  result_message.files = None
1741
1769
 
1742
- # If we have media artifacts, add a follow-up "user" message instead of a "tool"
1743
- # message with the media artifacts which throws error for some models
1744
- if all_images or all_videos or all_audio or all_files:
1770
+ # Only add media message if we should send media to model
1771
+ if send_media_to_model and (all_images or all_videos or all_audio or all_files):
1772
+ # If we have media artifacts, add a follow-up "user" message instead of a "tool"
1773
+ # message with the media artifacts which throws error for some models
1745
1774
  media_message = Message(
1746
1775
  role="user",
1747
1776
  content="Take note of the following content",
@@ -0,0 +1,5 @@
1
+ from agno.models.llama_cpp.llama_cpp import LlamaCpp
2
+
3
+ __all__ = [
4
+ "LlamaCpp",
5
+ ]
@@ -0,0 +1,22 @@
1
+ from dataclasses import dataclass
2
+
3
+ from agno.models.openai.like import OpenAILike
4
+
5
+
6
+ @dataclass
7
+ class LlamaCpp(OpenAILike):
8
+ """
9
+ A class for interacting with LLMs using Llama CPP.
10
+
11
+ Attributes:
12
+ id (str): The id of the Llama CPP model. Default is "ggml-org/gpt-oss-20b-GGUF".
13
+ name (str): The name of this chat model instance. Default is "LlamaCpp".
14
+ provider (str): The provider of the model. Default is "LlamaCpp".
15
+ base_url (str): The base url to which the requests are sent.
16
+ """
17
+
18
+ id: str = "ggml-org/gpt-oss-20b-GGUF"
19
+ name: str = "LlamaCpp"
20
+ provider: str = "LlamaCpp"
21
+
22
+ base_url: str = "http://127.0.0.1:8080/v1"
@@ -1,3 +1,3 @@
1
1
  from agno.models.nexus.nexus import Nexus
2
2
 
3
- __all__ = ["Nexus"]
3
+ __all__ = ["Nexus"]
@@ -6,13 +6,12 @@ from agno.models.openai.like import OpenAILike
6
6
  @dataclass
7
7
  class Nexus(OpenAILike):
8
8
  """
9
- A class for interacting with Nvidia models.
9
+ A class for interacting with LLMs using Nexus.
10
10
 
11
11
  Attributes:
12
- id (str): The id of the Nexus model to use. Default is "nvidia/llama-3.1-nemotron-70b-instruct".
12
+ id (str): The id of the Nexus model to use. Default is "openai/gpt-4".
13
13
  name (str): The name of this chat model instance. Default is "Nexus"
14
14
  provider (str): The provider of the model. Default is "Nexus".
15
- api_key (str): The api key to authorize request to Nexus.
16
15
  base_url (str): The base url to which the requests are sent.
17
16
  """
18
17
 
@@ -21,5 +20,3 @@ class Nexus(OpenAILike):
21
20
  provider: str = "Nexus"
22
21
 
23
22
  base_url: str = "http://localhost:8000/llm/v1/"
24
-
25
- supports_native_structured_outputs: bool = False
agno/os/app.py CHANGED
@@ -9,7 +9,6 @@ from fastapi.responses import JSONResponse
9
9
  from fastapi.routing import APIRoute
10
10
  from rich import box
11
11
  from rich.panel import Panel
12
- from starlette.middleware.cors import CORSMiddleware
13
12
  from starlette.requests import Request
14
13
 
15
14
  from agno.agent.agent import Agent
@@ -37,6 +36,7 @@ from agno.os.routers.memory import get_memory_router
37
36
  from agno.os.routers.metrics import get_metrics_router
38
37
  from agno.os.routers.session import get_session_router
39
38
  from agno.os.settings import AgnoAPISettings
39
+ from agno.os.utils import update_cors_middleware
40
40
  from agno.team.team import Team
41
41
  from agno.utils.log import logger
42
42
  from agno.utils.string import generate_id, generate_id_from_name
@@ -286,14 +286,8 @@ class AgentOS:
286
286
 
287
287
  self.fastapi_app.middleware("http")(general_exception_handler)
288
288
 
289
- self.fastapi_app.add_middleware(
290
- CORSMiddleware,
291
- allow_origins=self.settings.cors_origin_list, # type: ignore
292
- allow_credentials=True,
293
- allow_methods=["*"],
294
- allow_headers=["*"],
295
- expose_headers=["*"],
296
- )
289
+ # Update CORS middleware
290
+ update_cors_middleware(self.fastapi_app, self.settings.cors_origin_list) # type: ignore
297
291
 
298
292
  return self.fastapi_app
299
293
 
@@ -368,7 +362,7 @@ class AgentOS:
368
362
  for route in self.fastapi_app.routes:
369
363
  for conflict in conflicts:
370
364
  if isinstance(route, APIRoute):
371
- if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]):
365
+ if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]): # type: ignore
372
366
  self.fastapi_app.routes.pop(self.fastapi_app.routes.index(route))
373
367
 
374
368
  self.fastapi_app.include_router(router)
agno/os/router.py CHANGED
@@ -731,10 +731,9 @@ def get_base_router(
731
731
  ]:
732
732
  # Process document files
733
733
  try:
734
- file_content = await file.read()
735
- input_files.append(
736
- FileMedia(content=file_content, filename=file.filename, mime_type=file.content_type)
737
- )
734
+ input_file = process_document(file)
735
+ if input_file is not None:
736
+ input_files.append(input_file)
738
737
  except Exception as e:
739
738
  log_error(f"Error processing file {file.filename}: {e}")
740
739
  continue
@@ -380,7 +380,7 @@ def parse_eval_types_filter(
380
380
  eval_types: Optional[str] = Query(
381
381
  default=None,
382
382
  description="Comma-separated eval types (accuracy,performance,reliability)",
383
- example="accuracy,performance",
383
+ examples=["accuracy,performance"],
384
384
  ),
385
385
  ) -> Optional[List[EvalType]]:
386
386
  """Parse comma-separated eval types into EvalType enums for filtering evaluation runs."""
@@ -396,7 +396,7 @@ def parse_topics(
396
396
  topics: Optional[List[str]] = Query(
397
397
  default=None,
398
398
  description="Comma-separated list of topics to filter by",
399
- example=["preferences,technical,communication_style"],
399
+ examples=["preferences,technical,communication_style"],
400
400
  ),
401
401
  ) -> Optional[List[str]]:
402
402
  """Parse comma-separated topics into a list for filtering memories by topic."""
agno/os/schema.py CHANGED
@@ -461,11 +461,8 @@ class TeamResponse(BaseModel):
461
461
  "stream_member_events": False,
462
462
  }
463
463
 
464
- if team.model is None:
465
- raise ValueError("Team model is required")
466
-
467
464
  team.determine_tools_for_model(
468
- model=team.model,
465
+ model=team.model, # type: ignore
469
466
  session=TeamSession(session_id=str(uuid4()), session_data={}),
470
467
  run_response=TeamRunOutput(run_id=str(uuid4())),
471
468
  async_mode=True,
@@ -763,6 +760,7 @@ class TeamSessionDetailSchema(BaseModel):
763
760
  session_state: Optional[dict]
764
761
  metrics: Optional[dict]
765
762
  team_data: Optional[dict]
763
+ chat_history: Optional[List[dict]]
766
764
  created_at: Optional[datetime]
767
765
  updated_at: Optional[datetime]
768
766
  total_tokens: Optional[int]
@@ -784,6 +782,7 @@ class TeamSessionDetailSchema(BaseModel):
784
782
  if session.session_data
785
783
  else None,
786
784
  metrics=session.session_data.get("session_metrics", {}) if session.session_data else None,
785
+ chat_history=[message.to_dict() for message in session.get_chat_history()],
787
786
  created_at=datetime.fromtimestamp(session.created_at, tz=timezone.utc) if session.created_at else None,
788
787
  updated_at=datetime.fromtimestamp(session.updated_at, tz=timezone.utc) if session.updated_at else None,
789
788
  )
agno/os/utils.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from typing import Any, Callable, Dict, List, Optional, Union
2
2
 
3
- from fastapi import HTTPException, UploadFile
3
+ from fastapi import FastAPI, HTTPException, UploadFile
4
+ from starlette.middleware.cors import CORSMiddleware
4
5
 
5
6
  from agno.agent.agent import Agent
6
7
  from agno.db.base import BaseDb
@@ -109,27 +110,21 @@ def process_image(file: UploadFile) -> Image:
109
110
  content = file.file.read()
110
111
  if not content:
111
112
  raise HTTPException(status_code=400, detail="Empty file")
112
- return Image(content=content)
113
+ return Image(content=content, format=extract_format(file), mime_type=file.content_type)
113
114
 
114
115
 
115
116
  def process_audio(file: UploadFile) -> Audio:
116
117
  content = file.file.read()
117
118
  if not content:
118
119
  raise HTTPException(status_code=400, detail="Empty file")
119
- format = None
120
- if file.filename and "." in file.filename:
121
- format = file.filename.split(".")[-1].lower()
122
- elif file.content_type:
123
- format = file.content_type.split("/")[-1]
124
-
125
- return Audio(content=content, format=format)
120
+ return Audio(content=content, format=extract_format(file), mime_type=file.content_type)
126
121
 
127
122
 
128
123
  def process_video(file: UploadFile) -> Video:
129
124
  content = file.file.read()
130
125
  if not content:
131
126
  raise HTTPException(status_code=400, detail="Empty file")
132
- return Video(content=content, format=file.content_type)
127
+ return Video(content=content, format=extract_format(file), mime_type=file.content_type)
133
128
 
134
129
 
135
130
  def process_document(file: UploadFile) -> Optional[FileMedia]:
@@ -137,13 +132,23 @@ def process_document(file: UploadFile) -> Optional[FileMedia]:
137
132
  content = file.file.read()
138
133
  if not content:
139
134
  raise HTTPException(status_code=400, detail="Empty file")
140
-
141
- return FileMedia(content=content, filename=file.filename, mime_type=file.content_type)
135
+ return FileMedia(
136
+ content=content, filename=file.filename, format=extract_format(file), mime_type=file.content_type
137
+ )
142
138
  except Exception as e:
143
139
  logger.error(f"Error processing document {file.filename}: {e}")
144
140
  return None
145
141
 
146
142
 
143
+ def extract_format(file: UploadFile):
144
+ format = None
145
+ if file.filename and "." in file.filename:
146
+ format = file.filename.split(".")[-1].lower()
147
+ elif file.content_type:
148
+ format = file.content_type.split("/")[-1]
149
+ return format
150
+
151
+
147
152
  def format_tools(agent_tools: List[Union[Dict[str, Any], Toolkit, Function, Callable]]):
148
153
  formatted_tools = []
149
154
  if agent_tools is not None:
@@ -260,3 +265,33 @@ def _generate_schema_from_params(params: Dict[str, Any]) -> Dict[str, Any]:
260
265
  schema["required"] = required
261
266
 
262
267
  return schema
268
+
269
+
270
+ def update_cors_middleware(app: FastAPI, new_origins: list):
271
+ existing_origins: List[str] = []
272
+
273
+ # TODO: Allow more options where CORS is properly merged and user can disable this behaviour
274
+
275
+ # Extract existing origins from current CORS middleware
276
+ for middleware in app.user_middleware:
277
+ if middleware.cls == CORSMiddleware:
278
+ if hasattr(middleware, "kwargs"):
279
+ existing_origins = middleware.kwargs.get("allow_origins", [])
280
+ break
281
+ # Merge origins
282
+ merged_origins = list(set(new_origins + existing_origins))
283
+ final_origins = [origin for origin in merged_origins if origin != "*"]
284
+
285
+ # Remove existing CORS
286
+ app.user_middleware = [m for m in app.user_middleware if m.cls != CORSMiddleware]
287
+ app.middleware_stack = None
288
+
289
+ # Add updated CORS
290
+ app.add_middleware(
291
+ CORSMiddleware,
292
+ allow_origins=final_origins,
293
+ allow_credentials=True,
294
+ allow_methods=["*"],
295
+ allow_headers=["*"],
296
+ expose_headers=["*"],
297
+ )
agno/run/agent.py CHANGED
@@ -96,6 +96,7 @@ class RunContentEvent(BaseAgentRunEvent):
96
96
  content: Optional[Any] = None
97
97
  content_type: str = "str"
98
98
  reasoning_content: Optional[str] = None
99
+ model_provider_data: Optional[Dict[str, Any]] = None
99
100
  citations: Optional[Citations] = None
100
101
  response_audio: Optional[Audio] = None # Model audio response
101
102
  image: Optional[Image] = None # Image attached to the response
@@ -119,6 +120,7 @@ class RunCompletedEvent(BaseAgentRunEvent):
119
120
  content_type: str = "str"
120
121
  reasoning_content: Optional[str] = None
121
122
  citations: Optional[Citations] = None
123
+ model_provider_data: Optional[Dict[str, Any]] = None
122
124
  images: Optional[List[Image]] = None # Images attached to the response
123
125
  videos: Optional[List[Video]] = None # Videos attached to the response
124
126
  audio: Optional[List[Audio]] = None # Audio attached to the response
@@ -383,6 +385,8 @@ class RunOutput:
383
385
  reasoning_steps: Optional[List[ReasoningStep]] = None
384
386
  reasoning_messages: Optional[List[Message]] = None
385
387
 
388
+ model_provider_data: Optional[Dict[str, Any]] = None
389
+
386
390
  model: Optional[str] = None
387
391
  model_provider: Optional[str] = None
388
392
  messages: Optional[List[Message]] = None
agno/run/team.py CHANGED
@@ -98,6 +98,7 @@ class RunContentEvent(BaseTeamRunEvent):
98
98
  content: Optional[Any] = None
99
99
  content_type: str = "str"
100
100
  reasoning_content: Optional[str] = None
101
+ model_provider_data: Optional[Dict[str, Any]] = None
101
102
  citations: Optional[Citations] = None
102
103
  response_audio: Optional[Audio] = None # Model audio response
103
104
  image: Optional[Image] = None # Image attached to the response
@@ -121,6 +122,7 @@ class RunCompletedEvent(BaseTeamRunEvent):
121
122
  content_type: str = "str"
122
123
  reasoning_content: Optional[str] = None
123
124
  citations: Optional[Citations] = None
125
+ model_provider_data: Optional[Dict[str, Any]] = None
124
126
  images: Optional[List[Image]] = None # Images attached to the response
125
127
  videos: Optional[List[Video]] = None # Videos attached to the response
126
128
  audio: Optional[List[Audio]] = None # Audio attached to the response
@@ -382,7 +384,7 @@ class TeamRunOutput:
382
384
  reasoning_content: Optional[str] = None
383
385
 
384
386
  citations: Optional[Citations] = None
385
-
387
+ model_provider_data: Optional[Dict[str, Any]] = None
386
388
  metadata: Optional[Dict[str, Any]] = None
387
389
 
388
390
  references: Optional[List[MessageReferences]] = None
agno/team/team.py CHANGED
@@ -829,6 +829,7 @@ class Team:
829
829
  functions=self._functions_for_model,
830
830
  tool_choice=self.tool_choice,
831
831
  tool_call_limit=self.tool_call_limit,
832
+ send_media_to_model=self.send_media_to_model,
832
833
  )
833
834
 
834
835
  # Check for cancellation after model call
@@ -1415,6 +1416,7 @@ class Team:
1415
1416
  tool_choice=self.tool_choice,
1416
1417
  tool_call_limit=self.tool_call_limit,
1417
1418
  response_format=response_format,
1419
+ send_media_to_model=self.send_media_to_model,
1418
1420
  ) # type: ignore
1419
1421
 
1420
1422
  # Check for cancellation after model call
@@ -1997,7 +1999,9 @@ class Team:
1997
1999
  run_response.reasoning_content = model_response.reasoning_content
1998
2000
  else:
1999
2001
  run_response.reasoning_content += model_response.reasoning_content
2000
-
2002
+ # Update provider data
2003
+ if model_response.provider_data is not None:
2004
+ run_response.model_provider_data = model_response.provider_data
2001
2005
  # Update citations
2002
2006
  if model_response.citations is not None:
2003
2007
  run_response.citations = model_response.citations
@@ -2061,6 +2065,7 @@ class Team:
2061
2065
  tool_choice=self.tool_choice,
2062
2066
  tool_call_limit=self.tool_call_limit,
2063
2067
  stream_model_response=stream_model_response,
2068
+ send_media_to_model=self.send_media_to_model,
2064
2069
  ):
2065
2070
  yield from self._handle_model_response_chunk(
2066
2071
  session=session,
@@ -2083,6 +2088,8 @@ class Team:
2083
2088
  run_response.response_audio = full_model_response.audio
2084
2089
  if full_model_response.citations is not None:
2085
2090
  run_response.citations = full_model_response.citations
2091
+ if full_model_response.provider_data is not None:
2092
+ run_response.model_provider_data = full_model_response.provider_data
2086
2093
 
2087
2094
  if stream_intermediate_steps and reasoning_state["reasoning_started"]:
2088
2095
  all_reasoning_steps: List[ReasoningStep] = []
@@ -2141,6 +2148,7 @@ class Team:
2141
2148
  tool_choice=self.tool_choice,
2142
2149
  tool_call_limit=self.tool_call_limit,
2143
2150
  stream_model_response=stream_model_response,
2151
+ send_media_to_model=self.send_media_to_model,
2144
2152
  ) # type: ignore
2145
2153
  async for model_response_event in model_stream:
2146
2154
  for event in self._handle_model_response_chunk(
@@ -2170,6 +2178,8 @@ class Team:
2170
2178
  run_response.response_audio = full_model_response.audio
2171
2179
  if full_model_response.citations is not None:
2172
2180
  run_response.citations = full_model_response.citations
2181
+ if full_model_response.provider_data is not None:
2182
+ run_response.model_provider_data = full_model_response.provider_data
2173
2183
 
2174
2184
  # Build a list of messages that should be added to the RunOutput
2175
2185
  messages_for_run_response = [m for m in run_messages.messages if m.add_to_agent_memory]
@@ -2309,6 +2319,7 @@ class Team:
2309
2319
  redacted_reasoning_content=model_response_event.redacted_reasoning_content,
2310
2320
  response_audio=full_model_response.audio,
2311
2321
  citations=model_response_event.citations,
2322
+ model_provider_data=model_response_event.provider_data,
2312
2323
  image=model_response_event.images[-1] if model_response_event.images else None,
2313
2324
  ),
2314
2325
  run_response,
@@ -5079,20 +5090,19 @@ class Team:
5079
5090
  import json
5080
5091
 
5081
5092
  history: List[Dict[str, Any]] = []
5082
- if session is not None:
5083
- all_chats = self.get_messages_for_session(session_id=session.session_id)
5084
5093
 
5085
- if len(all_chats) == 0:
5086
- return ""
5094
+ all_chats = session.get_messages_from_last_n_runs(
5095
+ team_id=self.id,
5096
+ )
5087
5097
 
5088
- for chat in all_chats[::-1]: # type: ignore
5089
- history.insert(0, chat.to_dict()) # type: ignore
5098
+ if len(all_chats) == 0:
5099
+ return ""
5090
5100
 
5091
- if num_chats is not None:
5092
- history = history[:num_chats]
5101
+ for chat in all_chats[::-1]: # type: ignore
5102
+ history.insert(0, chat.to_dict()) # type: ignore
5093
5103
 
5094
- else:
5095
- return ""
5104
+ if num_chats is not None:
5105
+ history = history[:num_chats]
5096
5106
 
5097
5107
  return json.dumps(history)
5098
5108
 
agno/tools/decorator.py CHANGED
@@ -250,8 +250,10 @@ def tool(*args, **kwargs) -> Union[Function, Callable[[F], Function]]:
250
250
  if kwargs.get("stop_after_tool_call") is True:
251
251
  if "show_result" not in kwargs or kwargs.get("show_result") is None:
252
252
  tool_config["show_result"] = True
253
-
254
- return Function(**tool_config)
253
+ function = Function(**tool_config)
254
+ # Determine parameters for the function
255
+ function.process_entrypoint()
256
+ return function
255
257
 
256
258
  # Handle both @tool and @tool() cases
257
259
  if len(args) == 1 and callable(args[0]) and not kwargs:
agno/tools/mcp.py CHANGED
@@ -102,7 +102,7 @@ class MCPTools(Toolkit):
102
102
  transport: Literal["stdio", "sse", "streamable-http"] = "stdio",
103
103
  server_params: Optional[Union[StdioServerParameters, SSEClientParams, StreamableHTTPClientParams]] = None,
104
104
  session: Optional[ClientSession] = None,
105
- timeout_seconds: int = 5,
105
+ timeout_seconds: int = 10,
106
106
  client=None,
107
107
  include_tools: Optional[list[str]] = None,
108
108
  exclude_tools: Optional[list[str]] = None,
agno/tools/memori.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import json
2
2
  from typing import Any, Dict, List, Optional
3
3
 
4
- from agno.agent import Agent
5
4
  from agno.tools.toolkit import Toolkit
6
5
  from agno.utils.log import log_debug, log_error, log_info, log_warning
7
6
 
@@ -122,7 +121,6 @@ class MemoriTools(Toolkit):
122
121
 
123
122
  def search_memory(
124
123
  self,
125
- agent: Agent,
126
124
  query: str,
127
125
  limit: Optional[int] = None,
128
126
  ) -> str:
@@ -180,7 +178,7 @@ class MemoriTools(Toolkit):
180
178
  log_error(f"Error searching memory: {e}")
181
179
  return json.dumps({"success": False, "error": f"Memory search error: {str(e)}"})
182
180
 
183
- def record_conversation(self, agent: Agent, content: str) -> str:
181
+ def record_conversation(self, content: str) -> str:
184
182
  """
185
183
  Add important information or facts to memory.
186
184
 
@@ -222,7 +220,6 @@ class MemoriTools(Toolkit):
222
220
 
223
221
  def get_memory_stats(
224
222
  self,
225
- agent: Agent,
226
223
  ) -> str:
227
224
  """
228
225
  Get statistics about the memory system.
@@ -340,52 +337,3 @@ class MemoriTools(Toolkit):
340
337
  except Exception as e:
341
338
  log_error(f"Failed to disable memory system: {e}")
342
339
  return False
343
-
344
-
345
- def create_memori_search_tool(memori_toolkit: MemoriTools):
346
- """
347
- Create a standalone memory search function for use with Agno agents.
348
-
349
- This is a convenience function that creates a memory search tool similar
350
- to the pattern shown in the Memori example code.
351
-
352
- Args:
353
- memori_toolkit: An initialized MemoriTools instance
354
-
355
- Returns:
356
- Callable: A memory search function that can be used as an agent tool
357
-
358
- Example:
359
- ```python
360
- memori_tools = MemoriTools(database_connect="sqlite:///memory.db")
361
- search_tool = create_memori_search_tool(memori_tools)
362
-
363
- agent = Agent(
364
- model=OpenAIChat(),
365
- tools=[search_tool],
366
- description="Agent with memory search capability"
367
- )
368
- ```
369
- """
370
-
371
- def search_memory(query: str) -> str:
372
- """
373
- Search the agent's memory for past conversations and information.
374
-
375
- Args:
376
- query: What to search for in memory
377
-
378
- Returns:
379
- str: Search results or error message
380
- """
381
- try:
382
- if not query.strip():
383
- return "Please provide a search query"
384
-
385
- result = memori_toolkit._memory_tool.execute(query=query.strip())
386
- return str(result) if result else "No relevant memories found"
387
-
388
- except Exception as e:
389
- return f"Memory search error: {str(e)}"
390
-
391
- return search_memory
agno/utils/events.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, List, Optional
1
+ from typing import Any, Dict, List, Optional
2
2
 
3
3
  from agno.media import Audio, Image
4
4
  from agno.models.message import Citations
@@ -76,6 +76,7 @@ def create_team_run_completed_event(from_run_response: TeamRunOutput) -> TeamRun
76
76
  content_type=from_run_response.content_type, # type: ignore
77
77
  reasoning_content=from_run_response.reasoning_content, # type: ignore
78
78
  citations=from_run_response.citations, # type: ignore
79
+ model_provider_data=from_run_response.model_provider_data, # type: ignore
79
80
  images=from_run_response.images, # type: ignore
80
81
  videos=from_run_response.videos, # type: ignore
81
82
  audio=from_run_response.audio, # type: ignore
@@ -100,6 +101,7 @@ def create_run_completed_event(from_run_response: RunOutput) -> RunCompletedEven
100
101
  content_type=from_run_response.content_type, # type: ignore
101
102
  reasoning_content=from_run_response.reasoning_content, # type: ignore
102
103
  citations=from_run_response.citations, # type: ignore
104
+ model_provider_data=from_run_response.model_provider_data, # type: ignore
103
105
  images=from_run_response.images, # type: ignore
104
106
  videos=from_run_response.videos, # type: ignore
105
107
  audio=from_run_response.audio, # type: ignore
@@ -343,6 +345,7 @@ def create_run_output_content_event(
343
345
  content_type: Optional[str] = None,
344
346
  reasoning_content: Optional[str] = None,
345
347
  redacted_reasoning_content: Optional[str] = None,
348
+ model_provider_data: Optional[Dict[str, Any]] = None,
346
349
  citations: Optional[Citations] = None,
347
350
  response_audio: Optional[Audio] = None,
348
351
  image: Optional[Image] = None,
@@ -364,6 +367,7 @@ def create_run_output_content_event(
364
367
  additional_input=from_run_response.additional_input,
365
368
  reasoning_steps=from_run_response.reasoning_steps,
366
369
  reasoning_messages=from_run_response.reasoning_messages,
370
+ model_provider_data=model_provider_data,
367
371
  )
368
372
 
369
373
 
@@ -374,6 +378,7 @@ def create_team_run_output_content_event(
374
378
  reasoning_content: Optional[str] = None,
375
379
  redacted_reasoning_content: Optional[str] = None,
376
380
  citations: Optional[Citations] = None,
381
+ model_provider_data: Optional[Dict[str, Any]] = None,
377
382
  response_audio: Optional[Audio] = None,
378
383
  image: Optional[Image] = None,
379
384
  ) -> TeamRunContentEvent:
@@ -388,6 +393,7 @@ def create_team_run_output_content_event(
388
393
  content_type=content_type or "str",
389
394
  reasoning_content=thinking_combined,
390
395
  citations=citations,
396
+ model_provider_data=model_provider_data,
391
397
  response_audio=response_audio,
392
398
  image=image,
393
399
  references=from_run_response.references, # type: ignore
agno/workflow/workflow.py CHANGED
@@ -2371,6 +2371,34 @@ class Workflow:
2371
2371
  """Convert workflow to dictionary representation"""
2372
2372
 
2373
2373
  def serialize_step(step):
2374
+ # Handle callable functions (not wrapped in Step objects)
2375
+ if callable(step) and hasattr(step, "__name__"):
2376
+ step_dict = {
2377
+ "name": step.__name__,
2378
+ "description": "User-defined callable step",
2379
+ "type": StepType.STEP.value,
2380
+ }
2381
+ return step_dict
2382
+
2383
+ # Handle Agent and Team objects directly
2384
+ if isinstance(step, Agent):
2385
+ step_dict = {
2386
+ "name": step.name or "unnamed_agent",
2387
+ "description": step.description or "Agent step",
2388
+ "type": StepType.STEP.value,
2389
+ "agent": step,
2390
+ }
2391
+ return step_dict
2392
+
2393
+ if isinstance(step, Team):
2394
+ step_dict = {
2395
+ "name": step.name or "unnamed_team",
2396
+ "description": step.description or "Team step",
2397
+ "type": StepType.STEP.value,
2398
+ "team": step,
2399
+ }
2400
+ return step_dict
2401
+
2374
2402
  step_dict = {
2375
2403
  "name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
2376
2404
  "description": step.description if hasattr(step, "description") else "User-defined callable step",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 2.0.6
3
+ Version: 2.0.7
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  Project-URL: homepage, https://agno.com
@@ -1,10 +1,10 @@
1
1
  agno/__init__.py,sha256=fTmeyAdl4Mc1Y7_y_sACZTzXrc2Ymn8nMaFlgaawFvo,183
2
2
  agno/debug.py,sha256=zzYxYwfF5AfHgQ6JU7oCmPK4yc97Y5xxOb5fiezq8nA,449
3
3
  agno/exceptions.py,sha256=HWuuNFS5J0l1RYJsdUrSx51M22aFEoh9ltoeonXBoBw,2891
4
- agno/media.py,sha256=ykxk2Z8rD3QXZWzkKT97t4WqzfSUfIiIkWuif_dFlXs,16381
4
+ agno/media.py,sha256=eTfYb_pwhX_PCIVPSrW4VYRqmoxKABEF1aZClrVvQ30,16500
5
5
  agno/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  agno/agent/__init__.py,sha256=s7S3FgsjZxuaabzi8L5n4aSH8IZAiZ7XaNNcySGR-EQ,1051
7
- agno/agent/agent.py,sha256=FuAhWG3ryCUzqGq-3jHIOLs0s8Kmfa4ZLW68Aa21ST0,327133
7
+ agno/agent/agent.py,sha256=1-GmMYKCDv4lxUzyr2IKS1nvVzQQix30M03jzE5AM5w,327890
8
8
  agno/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  agno/api/agent.py,sha256=fKlQ62E_C9Rjd7Zus3Gs3R1RG-IhzFV-ICpkb6SLqYc,932
10
10
  agno/api/api.py,sha256=Z7iWbrjheJcGLeeDYrtTCWiKTVqjH0uJI35UNWOtAXw,973
@@ -98,7 +98,7 @@ agno/knowledge/chunking/fixed.py,sha256=yZLlZHisCWQKszExPuyzTT2zyaZo-udxAAjJhOI4
98
98
  agno/knowledge/chunking/markdown.py,sha256=M95AddeedFbG71DVxC4pbCullfX4StwmdmA9FalVPwQ,6162
99
99
  agno/knowledge/chunking/recursive.py,sha256=0zlmgFz2VfWFMCnVdJqRVrB2CLlu8Gc6BA6oy4xh4d8,2357
100
100
  agno/knowledge/chunking/row.py,sha256=yFGKMsHd2Ml0fkJLksw8ULUpWXmbSXIQwnwlKHVPP40,1426
101
- agno/knowledge/chunking/semantic.py,sha256=WkczmUYbCmt_6gTrKdFTEo4j3oJw77t-nIQA47EoA4Q,2462
101
+ agno/knowledge/chunking/semantic.py,sha256=r0N4SyqFaKYTQrLnrVA202Nfadz8uST-byvVaMoLRsc,3954
102
102
  agno/knowledge/chunking/strategy.py,sha256=_rjZd5VQVgCUyOSR_D7jGQpsM1yAXQMbG-6q1BzOhAw,4702
103
103
  agno/knowledge/document/__init__.py,sha256=vxMAu103larPlcpJFG3sBg-sCATf-LZZO_SlOwlEY5E,81
104
104
  agno/knowledge/document/base.py,sha256=kvoLSAxc8snrayo_-C6L3HxJVXwZiXd7Maq6VToLgfg,2087
@@ -146,7 +146,7 @@ agno/knowledge/reranker/sentence_transformer.py,sha256=ZN4SqnMZsUhg5G7AzlONM1_Uj
146
146
  agno/memory/__init__.py,sha256=XWKJU5SJObYZqEKMZ2XYwgH8-YeuWUoSRfT4dEI5HnY,101
147
147
  agno/memory/manager.py,sha256=eCgy4KLVM97x8ypqx1-0hKubbTcwEYrOlzI-rNOinec,42219
148
148
  agno/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
149
- agno/models/base.py,sha256=4Zl63GFdVDQDxHxJhKuQTBbo_qySzqrnCxmoPoIyNyA,79350
149
+ agno/models/base.py,sha256=Tmr4aoLOPph76825S8gdHTO2clUCLI5-nnnqyoP3mVc,80555
150
150
  agno/models/defaults.py,sha256=1_fe4-ZbNriE8BgqxVRVi4KGzEYxYKYsz4hn6CZNEEM,40
151
151
  agno/models/message.py,sha256=yHiNMsgadNEjAYAvtSJgktwsXqlRYC8N_DRShCzjYtc,18914
152
152
  agno/models/metrics.py,sha256=81IILXZwGmOTiWK003bi5mg4bM1f4LCWbwyamjFzp18,4500
@@ -190,6 +190,8 @@ agno/models/langdb/langdb.py,sha256=tTorf22oSTA632LzFor6UtJLqAOP6gsEKtNimNeehZI,
190
190
  agno/models/litellm/__init__.py,sha256=5e4yHqepF9-fOE0DMDIKnH6psFV1OcRgfAD5BaoVRgI,353
191
191
  agno/models/litellm/chat.py,sha256=6JHfOIluv3X73lmF24V2Ej56wjtEMqII6PqiadXKius,18448
192
192
  agno/models/litellm/litellm_openai.py,sha256=6TNf6nRbCibPWEyubDmSLnoKpAKcM31afiklAYbujyY,759
193
+ agno/models/llama_cpp/__init__.py,sha256=oxOZfqEcOdcB74VLwOTO6bPmXHHA88uaeJO-IkXgr8A,84
194
+ agno/models/llama_cpp/llama_cpp.py,sha256=tWIF0TQH-sK29xCrTe7oy0SjSC-FlOmwmPL1d5IVMPM,673
193
195
  agno/models/lmstudio/__init__.py,sha256=3GPW_YrtFalcpsyoHSFKCre9fYcMHf3gvNcMLerVOZg,82
194
196
  agno/models/lmstudio/lmstudio.py,sha256=E7pmyOcrYUzYr3IhgptL9_CnmI_clftnP4Erw6ADdoQ,756
195
197
  agno/models/meta/__init__.py,sha256=Of02Sw_EzexIdap-GHuDEcvGTSUbho4Eh66jG7xzha8,347
@@ -199,8 +201,8 @@ agno/models/mistral/__init__.py,sha256=6CP9TDn8oRUjtGBk1McvSQHrjY935vB6msGPlXBhk
199
201
  agno/models/mistral/mistral.py,sha256=jY1ifrWMagRxCxuYTwCykBgWF_MIGPQ4lKG1ueQVJeI,16573
200
202
  agno/models/nebius/__init__.py,sha256=gW2yvxIfV2gxxOnBtTP8MCpI9AvMbIE6VTw-gY01Uvg,67
201
203
  agno/models/nebius/nebius.py,sha256=RxluPJYM4m_alKH9_PGs_oXbZcBpVwdwC4vksJPZ2Ys,1899
202
- agno/models/nexus/__init__.py,sha256=xrpZuPJfsoMn0fDgIAo635OO6wT4xxSOry_E41PPFEs,62
203
- agno/models/nexus/nexus.py,sha256=x0gln6_Yw9mnEClp4kpBuf4jLQMOrSh93hXXCFkOMyY,778
204
+ agno/models/nexus/__init__.py,sha256=q9pwjZ2KXpG1B3Cy8ujrj3_s0a_LI5SaekXJL6mh4gE,63
205
+ agno/models/nexus/nexus.py,sha256=rJcBQXR1aqUiLWMPBRuHIEh87wVrsqXup1hr_smanBQ,635
204
206
  agno/models/nvidia/__init__.py,sha256=O0g3_0_ciOz0AH4Y4CAL7YRfhdDPAvhDzNjJmgWKT78,74
205
207
  agno/models/nvidia/nvidia.py,sha256=0vpEq4cBY_5JdBzGoD95rBdro6dpVNAV5ioKz3lpnfs,948
206
208
  agno/models/ollama/__init__.py,sha256=TIhwxG7ek3eyfoKTLoZQXwdgzcIngYKjbjSlkf2gkWE,72
@@ -228,14 +230,14 @@ agno/models/vllm/vllm.py,sha256=anBt3gshctbmda-OZJ51tOgbHHemsrSbQLMDqB39HIA,2707
228
230
  agno/models/xai/__init__.py,sha256=ukcCxnCHxTtkJNA2bAMTX4MhCv1wJcbiq8ZIfYczIxs,55
229
231
  agno/models/xai/xai.py,sha256=OqqhPySt64hQ4MM_qYhdMPPjoLRNhExyC-01sN86hJw,4158
230
232
  agno/os/__init__.py,sha256=h8oQu7vhD5RZf09jkyM_Kt1Kdq_d5kFB9gJju8QPwcY,55
231
- agno/os/app.py,sha256=FuhU3tz2CoWER4cJ4NvFT0UgUE23LHolPUlE1EBSuZI,24636
233
+ agno/os/app.py,sha256=acPtlVPAlrdsg6L2QC73SpaaX68-hsWn9X0RH9WNx7Q,24457
232
234
  agno/os/auth.py,sha256=FyBtAKWtg-qSunCas5m5pK1dVEmikOSZvcCp5r25tTA,1844
233
235
  agno/os/config.py,sha256=u4R9yazQXIcKjR3QzEIZw_XAe_OHp3xn0ff7SVkj2jA,2893
234
236
  agno/os/mcp.py,sha256=5L0TZwInuCWM7TQ7MI_DjMWfVIIB0jxVCqaVEdR62UU,8114
235
- agno/os/router.py,sha256=XB5m_Cl1J_dkb7ToIOwGvOJ78g8emP5MhjiwqRDVMfQ,61765
236
- agno/os/schema.py,sha256=n1DseZbkqR_Fy1bFSoog17rljYj3RFLMxUM2r_6gwF4,38222
237
+ agno/os/router.py,sha256=LSWu6jCnDxo8JV02fFi9bhXEWh47PYJmWWYLAlOlfQM,61695
238
+ agno/os/schema.py,sha256=bdmGLQoBJyetiAZeajJG_gEKLaQSSEwiDwKy0q4Pa-M,38278
237
239
  agno/os/settings.py,sha256=Cn5_8lZI8Vx1UaUYqs9h6Qp4IMDFn4f3c35uppiaMy4,1343
238
- agno/os/utils.py,sha256=afswMrRwnaVVuwbWJ6kWoycrJIDqTl3pwbUBVRz3TX4,9168
240
+ agno/os/utils.py,sha256=mWTHc-czb95hsEoMZxo0CAUE29HDg6vx6GAWRzY1uWo,10498
239
241
  agno/os/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
240
242
  agno/os/interfaces/base.py,sha256=NIqG5vCQDaKczcQZWRc9tK1L1v5_wD6haXTHFViMCpo,443
241
243
  agno/os/interfaces/agui/__init__.py,sha256=1zrGICk4roXUINwSFZfqH6sBsbHmD5KjGYVJMGg4fKQ,66
@@ -254,14 +256,14 @@ agno/os/routers/__init__.py,sha256=du4LO9aZwiY1t59VcV9M6wiAfftFFlUZc-YXsTGy9LI,9
254
256
  agno/os/routers/health.py,sha256=MpJrg488T0m8jU0mtoBNPiLhlR2r2gFihvyZwdQh_oc,725
255
257
  agno/os/routers/home.py,sha256=gbqBP2-G5_aFSdsXMdc77jtanI8XSRZ8QcxKjX8_qkw,1717
256
258
  agno/os/routers/evals/__init__.py,sha256=3s0M-Ftg5A3rFyRfTATs-0aNA6wcbj_5tCvtwH9gORQ,87
257
- agno/os/routers/evals/evals.py,sha256=zjsgoTCyKbj9clPtTyg3veFFkHNxMBPtcLGuy5Zlnck,16965
259
+ agno/os/routers/evals/evals.py,sha256=u2Nfa12qH-pYVxfT8EDtNchQkMmuRkqpHPP_l-RR2as,16968
258
260
  agno/os/routers/evals/schemas.py,sha256=3Ebm3IrpX22Hg3ZatMRkozgS4TfnMki4_UbqCNtQvJ4,4800
259
261
  agno/os/routers/evals/utils.py,sha256=ZdQd5Rz7rBFwMT6Z380cP1MvJG24I_lgTfaQItvva3g,5519
260
262
  agno/os/routers/knowledge/__init__.py,sha256=ZSqMQ8X7C_oYn8xt7NaYlriarWUpHgaWDyHXOWooMaU,105
261
263
  agno/os/routers/knowledge/knowledge.py,sha256=nCbVU2aCenIvpbmIIWjupgICO3GHbiD76q81P-lD0r8,38604
262
264
  agno/os/routers/knowledge/schemas.py,sha256=w8XZZsWCVNmd2s_rihq2PDcgXhF7H_yO7WHU_OgY6OU,4397
263
265
  agno/os/routers/memory/__init__.py,sha256=9hrYFc1dkbsLBqKfqyfioQeLX9TTbLrJx6lWDKNNWbc,93
264
- agno/os/routers/memory/memory.py,sha256=pi_3omyZ54usHNTVBvkoPwIXBgu1p-Q7s4jYD2wUYYM,16798
266
+ agno/os/routers/memory/memory.py,sha256=2LFrjedQVL0VeWZ6T3DV7mZMK3qmrm3jRUA6A_xM7ok,16799
265
267
  agno/os/routers/memory/schemas.py,sha256=y9w1LYjKd2vQJdQ3qEXyRUWz5IQCFZaVwWD_CE9Utpk,1650
266
268
  agno/os/routers/metrics/__init__.py,sha256=Uw6wWEikLpF5hHxBkHtFyaTuz7OUerGYWk0JW7teUGQ,97
267
269
  agno/os/routers/metrics/metrics.py,sha256=0cUsYWlknzTxeAUklswNlWOWl1m7F4NH-mfdv9ex_QY,8021
@@ -278,11 +280,11 @@ agno/reasoning/ollama.py,sha256=v-XbtBjjRUZHX9I1ZNZAAb-sv8W_a0hCgkhczz_W4HU,2564
278
280
  agno/reasoning/openai.py,sha256=RkXxZZpr1KykMiDLVdz36d3WKjwlGPucR5RmhU5asHM,3089
279
281
  agno/reasoning/step.py,sha256=6DaOb_0DJRz9Yh1w_mxcRaOSVzIQDrj3lQ6rzHLdIwA,1220
280
282
  agno/run/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
281
- agno/run/agent.py,sha256=zeL8ie8fie6w0mrLI-b3w6_LKVQXxggBs9ApO9PofzA,22244
283
+ agno/run/agent.py,sha256=OmcTPaq466DURhZ2sBd6c4MtwR-7ZyOzwWdSBoVMrzM,22416
282
284
  agno/run/base.py,sha256=TvIGCos_uNrFpYe8F9j3qWE1YC4-_SW9rxnsFilJvCc,6911
283
285
  agno/run/cancel.py,sha256=yoSj3fnx8D7Gf-fSngVIgd3GOp3tRaDhHH_4QeHDoAk,2667
284
286
  agno/run/messages.py,sha256=rAC4CLW-xBA6qFS1BOvcjJ9j_qYf0a7sX1mcdY04zMU,1126
285
- agno/run/team.py,sha256=aMjtsQRXdOz74Q3j-TXNeJn3Fys3dAwoEU94z3QHWGE,22765
287
+ agno/run/team.py,sha256=J-ZsMIMe-xLMgjAWgMRiSREqn8gJ6uKPQsKLo3RUdNU,22935
286
288
  agno/run/workflow.py,sha256=C4RHtkT-WjEzx44qiRbzzL2po7-0FV37sA4A-7XheMs,22999
287
289
  agno/session/__init__.py,sha256=p6eqzWcLSHiMex2yZvkwv2yrFUNdGs21TGMS49xrEC4,376
288
290
  agno/session/agent.py,sha256=0o8QJRc6HclC7X5MjPTSqtRIwuyjJ9un4ck9FK8lBlA,9686
@@ -290,7 +292,7 @@ agno/session/summary.py,sha256=2a74rDzrOGkWjrFkHQ6gf1dsZBU0_G1lgDE7b3fPaQw,8405
290
292
  agno/session/team.py,sha256=ttb5MquVYEAtcXZNPttcBYgELu2cEwQRknveVyddr-Y,10250
291
293
  agno/session/workflow.py,sha256=8dWTona5jen1iPYwjcvxq1XG5EQDFnd28BEjcbqzl4s,5004
292
294
  agno/team/__init__.py,sha256=toHidBOo5M3n_TIVtIKHgcDbLL9HR-_U-YQYuIt_XtE,847
293
- agno/team/team.py,sha256=7DvXUxlY2Km_ePMX0fQE5MuJ4IukFlzeHn5LgqXhbfU,307154
295
+ agno/team/team.py,sha256=lUwsSb1B8lEsjgpkmcvk3ov9qkPagYfE9cQZatoBz-U,307828
294
296
  agno/tools/__init__.py,sha256=jNll2sELhPPbqm5nPeT4_uyzRO2_KRTW-8Or60kioS0,210
295
297
  agno/tools/agentql.py,sha256=S82Z9aTNr-E5wnA4fbFs76COljJtiQIjf2grjz3CkHU,4104
296
298
  agno/tools/airflow.py,sha256=uf2rOzZpSU64l_qRJ5Raku-R3Gky-uewmYkh6W0-oxg,2610
@@ -314,7 +316,7 @@ agno/tools/crawl4ai.py,sha256=xGt_Jo1vvtXlC28Kg5m-UM_YctY79lOSa1QDqzdC4j8,6509
314
316
  agno/tools/csv_toolkit.py,sha256=1_G0cZJw1LBdXV0DZdHz-gzlQJCRocv9urU4g2WxBhw,7573
315
317
  agno/tools/dalle.py,sha256=GlaeF_Rgq9FbXEjb1TwUF-gnw60Uv9vF9OGozexg2n8,4231
316
318
  agno/tools/daytona.py,sha256=I03Ddgvh6SJ5h5shpxw0lnDKq7ltMsUJK0kmnT2b0Mo,18446
317
- agno/tools/decorator.py,sha256=iIw08YEoRCiijixMLeFf646QpsE2sA4IZn1qljkvcKU,9615
319
+ agno/tools/decorator.py,sha256=m49Gt-J4tpc-gjhwnMOdw4qvgYu2j4qGCrrZ91QP7ug,9728
318
320
  agno/tools/desi_vocal.py,sha256=dTeIaD7pabaZxPG9IxtQhaVpC6-A3-4hX4xIt5C7kA4,3661
319
321
  agno/tools/discord.py,sha256=KkqArWVMtwpqOC-fQ7MxHAOAL9-G0CqlXUWgjQNgvOA,5819
320
322
  agno/tools/docker.py,sha256=pk1OXW3Pk318gMoFjV2vXGA0HBc93DLs8eftOM7eRIQ,25837
@@ -347,9 +349,9 @@ agno/tools/linear.py,sha256=yA3Yci-Dnid0rZPeXds4aZY8hL7KjloZkES9thKEPe8,13775
347
349
  agno/tools/linkup.py,sha256=EzX4_ARW96DkFe1IXAFlPQI5rdhhdhmNTX1tB5IVFWs,2123
348
350
  agno/tools/local_file_system.py,sha256=wWyhM5-lqDgDO_YNzRA8ekG-m2n89k8fWr8M1BWiQWo,3157
349
351
  agno/tools/lumalab.py,sha256=6WnZXbThKY2jL9zLswq1PVsbFm2jz81qshWqBZi59oo,6808
350
- agno/tools/mcp.py,sha256=Cdeo9xI1nBnEWOytvWCzka9oiMi7_lPaVcNBWS1uQdw,25396
352
+ agno/tools/mcp.py,sha256=4effn9XvS878W810k1mOE411-_XQqp3dpHc1Y9IcN4k,25397
351
353
  agno/tools/mem0.py,sha256=5W5pZwJmBTt-_l4nvBdNQHavXFSKV9mVdJg5aso4JBI,7680
352
- agno/tools/memori.py,sha256=VCZmDFCyp_7c1jPA3dm3OBQHYNZNZ-AhEHBmNyKmIAU,14691
354
+ agno/tools/memori.py,sha256=tubBYj0rQFbBXadhWxwTjjmb3Rnims536AVPkGdCMcw,13181
353
355
  agno/tools/memory.py,sha256=vpMoKtCqs3m6vkuqmZ4fW9IRf1OhXHQGGaq3exJK0Xo,18449
354
356
  agno/tools/mlx_transcribe.py,sha256=kuiYZAM5ZAdkiOfFdbGJsCb0gacnJRtSTFzuX8eWGLw,6379
355
357
  agno/tools/models_labs.py,sha256=UrjuEOfsXv92ZYsnQJGLQdiVQiRLK4q0CjITbtQ_u7k,7488
@@ -419,7 +421,7 @@ agno/utils/common.py,sha256=EJaERgzrJnin1i0Aorv0Sf5y8AfMQWM-8yYhuYtD_4Q,4445
419
421
  agno/utils/dttm.py,sha256=sk7olzbUlMl8ibAGx24sxoP0DGBCnH81VQRYjqIcpDg,289
420
422
  agno/utils/enum.py,sha256=wDHnruIf8cQU-_QdryY9LBugPCrlj-nOabQuEFnmeYM,753
421
423
  agno/utils/env.py,sha256=o8OwKhx78vi8MaXPes10mXejmJ13CqAh7ODKMS1pmcM,438
422
- agno/utils/events.py,sha256=PHwN-DTmPWsJGykR2SFQTBXY5IdnaCbJskRMjmp8Cpw,19706
424
+ agno/utils/events.py,sha256=UDlVk51vvQIgnJGkTA-mxl1_fy2qze6wi8griiPEwVQ,20092
423
425
  agno/utils/format_str.py,sha256=Zp9dDGMABUJzulp2bs41JiNv0MqmMX0qPToL7l_Ab1c,376
424
426
  agno/utils/functions.py,sha256=eHvGqO2uO63TR-QmmhZy2DEnC0xkAfhBG26z77T7jCo,6306
425
427
  agno/utils/gemini.py,sha256=OPaXmqS1-fuZ9kETlpx_pMp6cEufSZzHmJerR5nAR7g,11206
@@ -515,9 +517,9 @@ agno/workflow/router.py,sha256=ZAiVsh2F_9ssKj0_RzHWzfgimaZ5hfb3Ni1Xx_SkVR0,26625
515
517
  agno/workflow/step.py,sha256=15vZFN5HlO3IkHqsxkM-2InUzCWC0Ee8BtoJ-OAhS5w,52485
516
518
  agno/workflow/steps.py,sha256=uRE4oGWs2cA-TrX881AEa69zu6rheXH81mNOZiRrNvg,23719
517
519
  agno/workflow/types.py,sha256=ZEFyaKH42261_3yx810ABfi70RUfjPGSwiIep7n53yg,17485
518
- agno/workflow/workflow.py,sha256=_35gqbn_gBrH1QEwXRN4gDS-bDWzZIwcChUTlNi_BQc,110639
519
- agno-2.0.6.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
520
- agno-2.0.6.dist-info/METADATA,sha256=eCS5_mpMqALlpH8BdLYf4lRkKJphKf-2X7z4CHWqIRM,21884
521
- agno-2.0.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
522
- agno-2.0.6.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
523
- agno-2.0.6.dist-info/RECORD,,
520
+ agno/workflow/workflow.py,sha256=1SWIoQdXt8FN8qJDxpfJ7Ol9Ea4c_Ir2xPV7m7deqnQ,111724
521
+ agno-2.0.7.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
522
+ agno-2.0.7.dist-info/METADATA,sha256=1Jt-ipXGuFjrLybXQZaLJGxte4rJg0noeXjnGLl0p-M,21884
523
+ agno-2.0.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
524
+ agno-2.0.7.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
525
+ agno-2.0.7.dist-info/RECORD,,
File without changes