agno 1.7.5__py3-none-any.whl → 1.7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +0 -1
- agno/app/agui/async_router.py +5 -5
- agno/app/agui/sync_router.py +5 -5
- agno/app/agui/utils.py +84 -14
- agno/document/chunking/row.py +39 -0
- agno/document/reader/base.py +0 -7
- agno/embedder/jina.py +73 -0
- agno/memory/agent.py +2 -2
- agno/memory/team.py +2 -2
- agno/models/aws/bedrock.py +311 -15
- agno/models/litellm/chat.py +12 -3
- agno/models/openai/chat.py +1 -22
- agno/models/openai/responses.py +5 -5
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +88 -0
- agno/models/xai/xai.py +54 -0
- agno/run/v2/workflow.py +4 -0
- agno/storage/mysql.py +1 -0
- agno/storage/postgres.py +1 -0
- agno/storage/session/v2/workflow.py +29 -5
- agno/storage/singlestore.py +4 -1
- agno/storage/sqlite.py +0 -1
- agno/team/team.py +32 -17
- agno/tools/bitbucket.py +292 -0
- agno/tools/daytona.py +411 -63
- agno/tools/evm.py +123 -0
- agno/tools/linkup.py +54 -0
- agno/tools/mcp.py +10 -3
- agno/tools/mem0.py +15 -2
- agno/tools/postgres.py +175 -162
- agno/utils/log.py +16 -0
- agno/utils/string.py +14 -0
- agno/vectordb/pgvector/pgvector.py +4 -5
- agno/workflow/v2/workflow.py +146 -19
- agno/workflow/workflow.py +90 -63
- {agno-1.7.5.dist-info → agno-1.7.6.dist-info}/METADATA +16 -1
- {agno-1.7.5.dist-info → agno-1.7.6.dist-info}/RECORD +41 -34
- {agno-1.7.5.dist-info → agno-1.7.6.dist-info}/WHEEL +0 -0
- {agno-1.7.5.dist-info → agno-1.7.6.dist-info}/entry_points.txt +0 -0
- {agno-1.7.5.dist-info → agno-1.7.6.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.5.dist-info → agno-1.7.6.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -4369,7 +4369,6 @@ class Agent:
|
|
|
4369
4369
|
# Format the system message with the session state variables
|
|
4370
4370
|
if self.add_state_in_messages:
|
|
4371
4371
|
sys_message_content = self.format_message_with_state_variables(sys_message_content)
|
|
4372
|
-
print("HELLO", sys_message_content)
|
|
4373
4372
|
|
|
4374
4373
|
# Add the JSON output prompt if response_model is provided and the model does not support native structured outputs or JSON schema outputs
|
|
4375
4374
|
# or if use_json_mode is True
|
agno/app/agui/async_router.py
CHANGED
|
@@ -16,7 +16,7 @@ from fastapi import APIRouter
|
|
|
16
16
|
from fastapi.responses import StreamingResponse
|
|
17
17
|
|
|
18
18
|
from agno.agent.agent import Agent
|
|
19
|
-
from agno.app.agui.utils import async_stream_agno_response_as_agui_events,
|
|
19
|
+
from agno.app.agui.utils import async_stream_agno_response_as_agui_events, convert_agui_messages_to_agno_messages
|
|
20
20
|
from agno.team.team import Team
|
|
21
21
|
|
|
22
22
|
logger = logging.getLogger(__name__)
|
|
@@ -28,12 +28,12 @@ async def run_agent(agent: Agent, run_input: RunAgentInput) -> AsyncIterator[Bas
|
|
|
28
28
|
|
|
29
29
|
try:
|
|
30
30
|
# Preparing the input for the Agent and emitting the run started event
|
|
31
|
-
|
|
31
|
+
messages = convert_agui_messages_to_agno_messages(run_input.messages or [])
|
|
32
32
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=run_input.thread_id, run_id=run_id)
|
|
33
33
|
|
|
34
34
|
# Request streaming response from agent
|
|
35
35
|
response_stream = await agent.arun(
|
|
36
|
-
|
|
36
|
+
messages=messages,
|
|
37
37
|
session_id=run_input.thread_id,
|
|
38
38
|
stream=True,
|
|
39
39
|
stream_intermediate_steps=True,
|
|
@@ -56,12 +56,12 @@ async def run_team(team: Team, input: RunAgentInput) -> AsyncIterator[BaseEvent]
|
|
|
56
56
|
run_id = input.run_id or str(uuid.uuid4())
|
|
57
57
|
try:
|
|
58
58
|
# Extract the last user message for team execution
|
|
59
|
-
|
|
59
|
+
messages = convert_agui_messages_to_agno_messages(input.messages or [])
|
|
60
60
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=input.thread_id, run_id=run_id)
|
|
61
61
|
|
|
62
62
|
# Request streaming response from team
|
|
63
63
|
response_stream = await team.arun(
|
|
64
|
-
message=
|
|
64
|
+
message=messages,
|
|
65
65
|
session_id=input.thread_id,
|
|
66
66
|
stream=True,
|
|
67
67
|
stream_intermediate_steps=True,
|
agno/app/agui/sync_router.py
CHANGED
|
@@ -16,7 +16,7 @@ from fastapi import APIRouter
|
|
|
16
16
|
from fastapi.responses import StreamingResponse
|
|
17
17
|
|
|
18
18
|
from agno.agent.agent import Agent
|
|
19
|
-
from agno.app.agui.utils import
|
|
19
|
+
from agno.app.agui.utils import convert_agui_messages_to_agno_messages, stream_agno_response_as_agui_events
|
|
20
20
|
from agno.team.team import Team
|
|
21
21
|
|
|
22
22
|
logger = logging.getLogger(__name__)
|
|
@@ -28,12 +28,12 @@ def run_agent(agent: Agent, run_input: RunAgentInput) -> Iterator[BaseEvent]:
|
|
|
28
28
|
|
|
29
29
|
try:
|
|
30
30
|
# Preparing the input for the Agent and emitting the run started event
|
|
31
|
-
|
|
31
|
+
messages = convert_agui_messages_to_agno_messages(run_input.messages or [])
|
|
32
32
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=run_input.thread_id, run_id=run_id)
|
|
33
33
|
|
|
34
34
|
# Request streaming response from agent
|
|
35
35
|
response_stream = agent.run(
|
|
36
|
-
|
|
36
|
+
messages=messages,
|
|
37
37
|
session_id=run_input.thread_id,
|
|
38
38
|
stream=True,
|
|
39
39
|
stream_intermediate_steps=True,
|
|
@@ -56,12 +56,12 @@ def run_team(team: Team, input: RunAgentInput) -> Iterator[BaseEvent]:
|
|
|
56
56
|
run_id = input.run_id or str(uuid.uuid4())
|
|
57
57
|
try:
|
|
58
58
|
# Extract the last user message for team execution
|
|
59
|
-
|
|
59
|
+
messages = convert_agui_messages_to_agno_messages(input.messages or [])
|
|
60
60
|
yield RunStartedEvent(type=EventType.RUN_STARTED, thread_id=input.thread_id, run_id=run_id)
|
|
61
61
|
|
|
62
62
|
# Request streaming response from team
|
|
63
63
|
response_stream = team.run(
|
|
64
|
-
message=
|
|
64
|
+
message=messages,
|
|
65
65
|
session_id=input.thread_id,
|
|
66
66
|
stream=True,
|
|
67
67
|
stream_intermediate_steps=True,
|
agno/app/agui/utils.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
"""Logic used by the AG-UI router."""
|
|
2
2
|
|
|
3
|
+
import json
|
|
3
4
|
import uuid
|
|
4
5
|
from collections import deque
|
|
5
6
|
from collections.abc import Iterator
|
|
@@ -17,11 +18,13 @@ from ag_ui.core import (
|
|
|
17
18
|
TextMessageStartEvent,
|
|
18
19
|
ToolCallArgsEvent,
|
|
19
20
|
ToolCallEndEvent,
|
|
21
|
+
ToolCallResultEvent,
|
|
20
22
|
ToolCallStartEvent,
|
|
21
23
|
)
|
|
22
24
|
from ag_ui.core.types import Message as AGUIMessage
|
|
23
25
|
|
|
24
|
-
from agno.
|
|
26
|
+
from agno.models.message import Message
|
|
27
|
+
from agno.run.response import RunEvent, RunResponseContentEvent, RunResponseEvent, RunResponsePausedEvent
|
|
25
28
|
from agno.run.team import RunResponseContentEvent as TeamRunResponseContentEvent
|
|
26
29
|
from agno.run.team import TeamRunEvent, TeamRunResponseEvent
|
|
27
30
|
|
|
@@ -64,13 +67,26 @@ class EventBuffer:
|
|
|
64
67
|
return False
|
|
65
68
|
|
|
66
69
|
|
|
67
|
-
def
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
for msg in
|
|
71
|
-
if msg.role == "
|
|
72
|
-
|
|
73
|
-
|
|
70
|
+
def convert_agui_messages_to_agno_messages(messages: List[AGUIMessage]) -> List[Message]:
|
|
71
|
+
"""Convert AG-UI messages to Agno messages."""
|
|
72
|
+
result = []
|
|
73
|
+
for msg in messages:
|
|
74
|
+
if msg.role == "tool":
|
|
75
|
+
result.append(Message(role="tool", tool_call_id=msg.tool_call_id, content=msg.content))
|
|
76
|
+
elif msg.role == "assistant":
|
|
77
|
+
tool_calls = None
|
|
78
|
+
if msg.tool_calls:
|
|
79
|
+
tool_calls = [call.model_dump() for call in msg.tool_calls]
|
|
80
|
+
result.append(
|
|
81
|
+
Message(
|
|
82
|
+
role="assistant",
|
|
83
|
+
content=msg.content,
|
|
84
|
+
tool_calls=tool_calls,
|
|
85
|
+
)
|
|
86
|
+
)
|
|
87
|
+
elif msg.role == "user":
|
|
88
|
+
result.append(Message(role="user", content=msg.content))
|
|
89
|
+
return result
|
|
74
90
|
|
|
75
91
|
|
|
76
92
|
def extract_team_response_chunk_content(response: TeamRunResponseContentEvent) -> str:
|
|
@@ -159,7 +175,7 @@ def _create_events_from_chunk(
|
|
|
159
175
|
args_event = ToolCallArgsEvent(
|
|
160
176
|
type=EventType.TOOL_CALL_ARGS,
|
|
161
177
|
tool_call_id=tool_call.tool_call_id, # type: ignore
|
|
162
|
-
delta=
|
|
178
|
+
delta=json.dumps(tool_call.tool_args),
|
|
163
179
|
)
|
|
164
180
|
events_to_emit.append(args_event)
|
|
165
181
|
|
|
@@ -174,6 +190,16 @@ def _create_events_from_chunk(
|
|
|
174
190
|
)
|
|
175
191
|
events_to_emit.append(end_event)
|
|
176
192
|
|
|
193
|
+
if tool_call.result is not None:
|
|
194
|
+
result_event = ToolCallResultEvent(
|
|
195
|
+
type=EventType.TOOL_CALL_RESULT,
|
|
196
|
+
tool_call_id=tool_call.tool_call_id, # type: ignore
|
|
197
|
+
content=str(tool_call.result),
|
|
198
|
+
role="tool",
|
|
199
|
+
message_id=str(uuid.uuid4()),
|
|
200
|
+
)
|
|
201
|
+
events_to_emit.append(result_event)
|
|
202
|
+
|
|
177
203
|
# Handle reasoning
|
|
178
204
|
elif chunk.event == RunEvent.reasoning_started:
|
|
179
205
|
step_event = StepStartedEvent(type=EventType.STEP_STARTED, step_name="reasoning")
|
|
@@ -186,7 +212,12 @@ def _create_events_from_chunk(
|
|
|
186
212
|
|
|
187
213
|
|
|
188
214
|
def _create_completion_events(
|
|
189
|
-
|
|
215
|
+
chunk: Union[RunResponseEvent, TeamRunResponseEvent],
|
|
216
|
+
event_buffer: EventBuffer,
|
|
217
|
+
message_started: bool,
|
|
218
|
+
message_id: str,
|
|
219
|
+
thread_id: str,
|
|
220
|
+
run_id: str,
|
|
190
221
|
) -> List[BaseEvent]:
|
|
191
222
|
"""Create events for run completion."""
|
|
192
223
|
events_to_emit = []
|
|
@@ -205,6 +236,33 @@ def _create_completion_events(
|
|
|
205
236
|
end_message_event = TextMessageEndEvent(type=EventType.TEXT_MESSAGE_END, message_id=message_id)
|
|
206
237
|
events_to_emit.append(end_message_event)
|
|
207
238
|
|
|
239
|
+
# emit frontend tool calls, i.e. external_execution=True
|
|
240
|
+
if isinstance(chunk, RunResponsePausedEvent) and chunk.tools is not None:
|
|
241
|
+
for tool in chunk.tools:
|
|
242
|
+
if tool.tool_call_id is None or tool.tool_name is None:
|
|
243
|
+
continue
|
|
244
|
+
|
|
245
|
+
start_event = ToolCallStartEvent(
|
|
246
|
+
type=EventType.TOOL_CALL_START,
|
|
247
|
+
tool_call_id=tool.tool_call_id,
|
|
248
|
+
tool_call_name=tool.tool_name,
|
|
249
|
+
parent_message_id=message_id,
|
|
250
|
+
)
|
|
251
|
+
events_to_emit.append(start_event)
|
|
252
|
+
|
|
253
|
+
args_event = ToolCallArgsEvent(
|
|
254
|
+
type=EventType.TOOL_CALL_ARGS,
|
|
255
|
+
tool_call_id=tool.tool_call_id,
|
|
256
|
+
delta=json.dumps(tool.tool_args),
|
|
257
|
+
)
|
|
258
|
+
events_to_emit.append(args_event)
|
|
259
|
+
|
|
260
|
+
end_event = ToolCallEndEvent(
|
|
261
|
+
type=EventType.TOOL_CALL_END,
|
|
262
|
+
tool_call_id=tool.tool_call_id,
|
|
263
|
+
)
|
|
264
|
+
events_to_emit.append(end_event)
|
|
265
|
+
|
|
208
266
|
run_finished_event = RunFinishedEvent(type=EventType.RUN_FINISHED, thread_id=thread_id, run_id=run_id)
|
|
209
267
|
events_to_emit.append(run_finished_event)
|
|
210
268
|
|
|
@@ -271,8 +329,14 @@ def stream_agno_response_as_agui_events(
|
|
|
271
329
|
|
|
272
330
|
for chunk in response_stream:
|
|
273
331
|
# Handle the lifecycle end event
|
|
274
|
-
if
|
|
275
|
-
|
|
332
|
+
if (
|
|
333
|
+
chunk.event == RunEvent.run_completed
|
|
334
|
+
or chunk.event == TeamRunEvent.run_completed
|
|
335
|
+
or chunk.event == RunEvent.run_paused
|
|
336
|
+
):
|
|
337
|
+
completion_events = _create_completion_events(
|
|
338
|
+
chunk, event_buffer, message_started, message_id, thread_id, run_id
|
|
339
|
+
)
|
|
276
340
|
for event in completion_events:
|
|
277
341
|
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
|
|
278
342
|
for emit_event in events_to_emit:
|
|
@@ -302,8 +366,14 @@ async def async_stream_agno_response_as_agui_events(
|
|
|
302
366
|
|
|
303
367
|
async for chunk in response_stream:
|
|
304
368
|
# Handle the lifecycle end event
|
|
305
|
-
if
|
|
306
|
-
|
|
369
|
+
if (
|
|
370
|
+
chunk.event == RunEvent.run_completed
|
|
371
|
+
or chunk.event == TeamRunEvent.run_completed
|
|
372
|
+
or chunk.event == RunEvent.run_paused
|
|
373
|
+
):
|
|
374
|
+
completion_events = _create_completion_events(
|
|
375
|
+
chunk, event_buffer, message_started, message_id, thread_id, run_id
|
|
376
|
+
)
|
|
307
377
|
for event in completion_events:
|
|
308
378
|
events_to_emit = _emit_event_logic(event_buffer=event_buffer, event=event)
|
|
309
379
|
for emit_event in events_to_emit:
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from typing import List
|
|
2
|
+
|
|
3
|
+
from agno.document.base import Document
|
|
4
|
+
from agno.document.chunking.strategy import ChunkingStrategy
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class RowChunking(ChunkingStrategy):
|
|
8
|
+
def __init__(self, skip_header: bool = False, clean_rows: bool = True):
|
|
9
|
+
self.skip_header = skip_header
|
|
10
|
+
self.clean_rows = clean_rows
|
|
11
|
+
|
|
12
|
+
def chunk(self, document: Document) -> List[Document]:
|
|
13
|
+
if not document or not document.content:
|
|
14
|
+
return []
|
|
15
|
+
|
|
16
|
+
if not isinstance(document.content, str):
|
|
17
|
+
raise ValueError("Document content must be a string")
|
|
18
|
+
|
|
19
|
+
rows = document.content.splitlines()
|
|
20
|
+
|
|
21
|
+
if self.skip_header and rows:
|
|
22
|
+
rows = rows[1:]
|
|
23
|
+
start_index = 2
|
|
24
|
+
else:
|
|
25
|
+
start_index = 1
|
|
26
|
+
|
|
27
|
+
chunks = []
|
|
28
|
+
for i, row in enumerate(rows):
|
|
29
|
+
if self.clean_rows:
|
|
30
|
+
chunk_content = " ".join(row.split()) # Normalize internal whitespace
|
|
31
|
+
else:
|
|
32
|
+
chunk_content = row.strip()
|
|
33
|
+
|
|
34
|
+
if chunk_content: # Skip empty rows
|
|
35
|
+
meta_data = document.meta_data.copy()
|
|
36
|
+
meta_data["row_number"] = start_index + i # Preserve logical row numbering
|
|
37
|
+
chunk_id = f"{document.id}_row_{start_index + i}" if document.id else None
|
|
38
|
+
chunks.append(Document(id=chunk_id, name=document.name, meta_data=meta_data, content=chunk_content))
|
|
39
|
+
return chunks
|
agno/document/reader/base.py
CHANGED
|
@@ -16,13 +16,6 @@ class Reader:
|
|
|
16
16
|
separators: List[str] = field(default_factory=lambda: ["\n", "\n\n", "\r", "\r\n", "\n\r", "\t", " ", " "])
|
|
17
17
|
chunking_strategy: Optional[ChunkingStrategy] = None
|
|
18
18
|
|
|
19
|
-
def __init__(
|
|
20
|
-
self, chunk: bool = True, chunk_size: int = 5000, chunking_strategy: Optional[ChunkingStrategy] = None
|
|
21
|
-
) -> None:
|
|
22
|
-
self.chunk = chunk
|
|
23
|
-
self.chunk_size = chunk_size
|
|
24
|
-
self.chunking_strategy = chunking_strategy
|
|
25
|
-
|
|
26
19
|
def read(self, obj: Any) -> List[Document]:
|
|
27
20
|
raise NotImplementedError
|
|
28
21
|
|
agno/embedder/jina.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from os import getenv
|
|
3
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from typing_extensions import Literal
|
|
6
|
+
|
|
7
|
+
from agno.embedder.base import Embedder
|
|
8
|
+
from agno.utils.log import logger
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import requests
|
|
12
|
+
except ImportError:
|
|
13
|
+
raise ImportError("requests not installed, use pip install requests")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class JinaEmbedder(Embedder):
|
|
18
|
+
id: str = "jina-embeddings-v3"
|
|
19
|
+
dimensions: int = 1024
|
|
20
|
+
embedding_type: Literal["float", "base64", "int8"] = "float"
|
|
21
|
+
late_chunking: bool = False
|
|
22
|
+
user: Optional[str] = None
|
|
23
|
+
api_key: Optional[str] = getenv("JINA_API_KEY")
|
|
24
|
+
base_url: str = "https://api.jina.ai/v1/embeddings"
|
|
25
|
+
headers: Optional[Dict[str, str]] = None
|
|
26
|
+
request_params: Optional[Dict[str, Any]] = None
|
|
27
|
+
timeout: Optional[float] = None
|
|
28
|
+
|
|
29
|
+
def _get_headers(self) -> Dict[str, str]:
|
|
30
|
+
if not self.api_key:
|
|
31
|
+
raise ValueError(
|
|
32
|
+
"API key is required for Jina embedder. Set JINA_API_KEY environment variable or pass api_key parameter."
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"}
|
|
36
|
+
if self.headers:
|
|
37
|
+
headers.update(self.headers)
|
|
38
|
+
return headers
|
|
39
|
+
|
|
40
|
+
def _response(self, text: str) -> Dict[str, Any]:
|
|
41
|
+
data = {
|
|
42
|
+
"model": self.id,
|
|
43
|
+
"late_chunking": self.late_chunking,
|
|
44
|
+
"dimensions": self.dimensions,
|
|
45
|
+
"embedding_type": self.embedding_type,
|
|
46
|
+
"input": [text], # Jina API expects a list
|
|
47
|
+
}
|
|
48
|
+
if self.user is not None:
|
|
49
|
+
data["user"] = self.user
|
|
50
|
+
if self.request_params:
|
|
51
|
+
data.update(self.request_params)
|
|
52
|
+
|
|
53
|
+
response = requests.post(self.base_url, headers=self._get_headers(), json=data, timeout=self.timeout)
|
|
54
|
+
response.raise_for_status()
|
|
55
|
+
return response.json()
|
|
56
|
+
|
|
57
|
+
def get_embedding(self, text: str) -> List[float]:
|
|
58
|
+
try:
|
|
59
|
+
result = self._response(text)
|
|
60
|
+
return result["data"][0]["embedding"]
|
|
61
|
+
except Exception as e:
|
|
62
|
+
logger.warning(f"Failed to get embedding: {e}")
|
|
63
|
+
return []
|
|
64
|
+
|
|
65
|
+
def get_embedding_and_usage(self, text: str) -> Tuple[List[float], Optional[Dict]]:
|
|
66
|
+
try:
|
|
67
|
+
result = self._response(text)
|
|
68
|
+
embedding = result["data"][0]["embedding"]
|
|
69
|
+
usage = result.get("usage")
|
|
70
|
+
return embedding, usage
|
|
71
|
+
except Exception as e:
|
|
72
|
+
logger.warning(f"Failed to get embedding and usage: {e}")
|
|
73
|
+
return [], None
|
agno/memory/agent.py
CHANGED
|
@@ -273,7 +273,7 @@ class AgentMemory(BaseModel):
|
|
|
273
273
|
|
|
274
274
|
self.classifier.existing_memories = self.memories
|
|
275
275
|
classifier_response = self.classifier.run(input)
|
|
276
|
-
if classifier_response == "yes":
|
|
276
|
+
if classifier_response and classifier_response.lower() == "yes":
|
|
277
277
|
return True
|
|
278
278
|
return False
|
|
279
279
|
|
|
@@ -286,7 +286,7 @@ class AgentMemory(BaseModel):
|
|
|
286
286
|
|
|
287
287
|
self.classifier.existing_memories = self.memories
|
|
288
288
|
classifier_response = await self.classifier.arun(input)
|
|
289
|
-
if classifier_response == "yes":
|
|
289
|
+
if classifier_response and classifier_response.lower() == "yes":
|
|
290
290
|
return True
|
|
291
291
|
return False
|
|
292
292
|
|
agno/memory/team.py
CHANGED
|
@@ -313,7 +313,7 @@ class TeamMemory:
|
|
|
313
313
|
|
|
314
314
|
self.classifier.existing_memories = self.memories
|
|
315
315
|
classifier_response = self.classifier.run(input)
|
|
316
|
-
if classifier_response == "yes":
|
|
316
|
+
if classifier_response and classifier_response.lower() == "yes":
|
|
317
317
|
return True
|
|
318
318
|
return False
|
|
319
319
|
|
|
@@ -326,7 +326,7 @@ class TeamMemory:
|
|
|
326
326
|
|
|
327
327
|
self.classifier.existing_memories = self.memories
|
|
328
328
|
classifier_response = await self.classifier.arun(input)
|
|
329
|
-
if classifier_response == "yes":
|
|
329
|
+
if classifier_response and classifier_response.lower() == "yes":
|
|
330
330
|
return True
|
|
331
331
|
return False
|
|
332
332
|
|