agno 1.7.9__py3-none-any.whl → 1.7.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1 -1
- agno/app/fastapi/app.py +3 -1
- agno/app/fastapi/async_router.py +1 -1
- agno/app/playground/app.py +1 -0
- agno/document/chunking/semantic.py +1 -3
- agno/document/reader/markdown_reader.py +2 -7
- agno/document/reader/pdf_reader.py +69 -13
- agno/document/reader/text_reader.py +2 -2
- agno/knowledge/agent.py +70 -75
- agno/knowledge/markdown.py +15 -2
- agno/knowledge/pdf.py +32 -8
- agno/knowledge/pdf_url.py +13 -5
- agno/knowledge/website.py +4 -1
- agno/media.py +2 -0
- agno/models/aws/bedrock.py +51 -21
- agno/models/dashscope/__init__.py +5 -0
- agno/models/dashscope/dashscope.py +81 -0
- agno/models/openai/chat.py +3 -0
- agno/models/openai/responses.py +53 -7
- agno/models/qwen/__init__.py +5 -0
- agno/run/response.py +4 -0
- agno/run/team.py +4 -0
- agno/storage/in_memory.py +234 -0
- agno/team/team.py +25 -9
- agno/tools/brandfetch.py +210 -0
- agno/tools/github.py +46 -18
- agno/tools/trafilatura.py +372 -0
- agno/vectordb/clickhouse/clickhousedb.py +1 -1
- agno/vectordb/milvus/milvus.py +89 -1
- agno/vectordb/weaviate/weaviate.py +84 -18
- agno/workflow/workflow.py +3 -0
- {agno-1.7.9.dist-info → agno-1.7.11.dist-info}/METADATA +5 -1
- {agno-1.7.9.dist-info → agno-1.7.11.dist-info}/RECORD +37 -31
- {agno-1.7.9.dist-info → agno-1.7.11.dist-info}/WHEEL +0 -0
- {agno-1.7.9.dist-info → agno-1.7.11.dist-info}/entry_points.txt +0 -0
- {agno-1.7.9.dist-info → agno-1.7.11.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.9.dist-info → agno-1.7.11.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from dataclasses import asdict
|
|
3
|
+
from typing import Dict, List, Literal, Optional
|
|
4
|
+
|
|
5
|
+
from agno.storage.base import Storage
|
|
6
|
+
from agno.storage.session import Session
|
|
7
|
+
from agno.storage.session.agent import AgentSession
|
|
8
|
+
from agno.storage.session.team import TeamSession
|
|
9
|
+
from agno.storage.session.v2.workflow import WorkflowSession as WorkflowSessionV2
|
|
10
|
+
from agno.storage.session.workflow import WorkflowSession
|
|
11
|
+
from agno.utils.log import logger
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class InMemoryStorage(Storage):
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
mode: Optional[Literal["agent", "team", "workflow", "workflow_v2"]] = "agent",
|
|
18
|
+
storage_dict: Optional[Dict[str, Dict]] = None,
|
|
19
|
+
):
|
|
20
|
+
super().__init__(mode)
|
|
21
|
+
self.storage: Dict[str, Dict] = storage_dict if storage_dict is not None else {}
|
|
22
|
+
|
|
23
|
+
def create(self) -> None:
|
|
24
|
+
"""Create the storage if it doesn't exist."""
|
|
25
|
+
# No-op for in-memory storage
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
def read(self, session_id: str, user_id: Optional[str] = None) -> Optional[Session]:
|
|
29
|
+
"""Read a Session from storage."""
|
|
30
|
+
try:
|
|
31
|
+
data = self.storage.get(session_id)
|
|
32
|
+
if data is None:
|
|
33
|
+
return None
|
|
34
|
+
if user_id and data["user_id"] != user_id:
|
|
35
|
+
return None
|
|
36
|
+
if self.mode == "agent":
|
|
37
|
+
return AgentSession.from_dict(data)
|
|
38
|
+
elif self.mode == "team":
|
|
39
|
+
return TeamSession.from_dict(data)
|
|
40
|
+
elif self.mode == "workflow":
|
|
41
|
+
return WorkflowSession.from_dict(data)
|
|
42
|
+
elif self.mode == "workflow_v2":
|
|
43
|
+
return WorkflowSessionV2.from_dict(data)
|
|
44
|
+
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.error(f"Error reading session {session_id}: {e}")
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
def get_all_session_ids(self, user_id: Optional[str] = None, entity_id: Optional[str] = None) -> List[str]:
|
|
50
|
+
"""Get all session IDs, optionally filtered by user_id and/or entity_id."""
|
|
51
|
+
session_ids = []
|
|
52
|
+
for _, data in self.storage.items():
|
|
53
|
+
if user_id or entity_id:
|
|
54
|
+
if user_id and entity_id:
|
|
55
|
+
if self.mode == "agent" and data["agent_id"] == entity_id and data["user_id"] == user_id:
|
|
56
|
+
session_ids.append(data["session_id"])
|
|
57
|
+
elif self.mode == "team" and data["team_id"] == entity_id and data["user_id"] == user_id:
|
|
58
|
+
session_ids.append(data["session_id"])
|
|
59
|
+
elif self.mode == "workflow" and data["workflow_id"] == entity_id and data["user_id"] == user_id:
|
|
60
|
+
session_ids.append(data["session_id"])
|
|
61
|
+
|
|
62
|
+
elif user_id and data["user_id"] == user_id:
|
|
63
|
+
session_ids.append(data["session_id"])
|
|
64
|
+
|
|
65
|
+
elif entity_id:
|
|
66
|
+
if self.mode == "agent" and data["agent_id"] == entity_id:
|
|
67
|
+
session_ids.append(data["session_id"])
|
|
68
|
+
elif self.mode == "team" and data["team_id"] == entity_id:
|
|
69
|
+
session_ids.append(data["session_id"])
|
|
70
|
+
elif self.mode == "workflow" and data["workflow_id"] == entity_id:
|
|
71
|
+
session_ids.append(data["session_id"])
|
|
72
|
+
elif self.mode == "workflow_v2" and data["workflow_id"] == entity_id:
|
|
73
|
+
session_ids.append(data["session_id"])
|
|
74
|
+
|
|
75
|
+
else:
|
|
76
|
+
# No filters applied, add all session_ids
|
|
77
|
+
session_ids.append(data["session_id"])
|
|
78
|
+
|
|
79
|
+
return session_ids
|
|
80
|
+
|
|
81
|
+
def get_all_sessions(self, user_id: Optional[str] = None, entity_id: Optional[str] = None) -> List[Session]:
|
|
82
|
+
"""Get all sessions, optionally filtered by user_id and/or entity_id."""
|
|
83
|
+
sessions: List[Session] = []
|
|
84
|
+
for _, data in self.storage.items():
|
|
85
|
+
if user_id or entity_id:
|
|
86
|
+
_session: Optional[Session] = None
|
|
87
|
+
|
|
88
|
+
if user_id and entity_id:
|
|
89
|
+
if self.mode == "agent" and data["agent_id"] == entity_id and data["user_id"] == user_id:
|
|
90
|
+
_session = AgentSession.from_dict(data)
|
|
91
|
+
elif self.mode == "team" and data["team_id"] == entity_id and data["user_id"] == user_id:
|
|
92
|
+
_session = TeamSession.from_dict(data)
|
|
93
|
+
elif self.mode == "workflow" and data["workflow_id"] == entity_id and data["user_id"] == user_id:
|
|
94
|
+
_session = WorkflowSession.from_dict(data)
|
|
95
|
+
|
|
96
|
+
elif user_id and data["user_id"] == user_id:
|
|
97
|
+
if self.mode == "agent":
|
|
98
|
+
_session = AgentSession.from_dict(data)
|
|
99
|
+
elif self.mode == "team":
|
|
100
|
+
_session = TeamSession.from_dict(data)
|
|
101
|
+
elif self.mode == "workflow":
|
|
102
|
+
_session = WorkflowSession.from_dict(data)
|
|
103
|
+
|
|
104
|
+
elif entity_id:
|
|
105
|
+
if self.mode == "agent" and data["agent_id"] == entity_id:
|
|
106
|
+
_session = AgentSession.from_dict(data)
|
|
107
|
+
elif self.mode == "team" and data["team_id"] == entity_id:
|
|
108
|
+
_session = TeamSession.from_dict(data)
|
|
109
|
+
elif self.mode == "workflow" and data["workflow_id"] == entity_id:
|
|
110
|
+
_session = WorkflowSession.from_dict(data)
|
|
111
|
+
elif self.mode == "workflow_v2" and data["workflow_id"] == entity_id:
|
|
112
|
+
_session = WorkflowSessionV2.from_dict(data)
|
|
113
|
+
|
|
114
|
+
if _session:
|
|
115
|
+
sessions.append(_session)
|
|
116
|
+
|
|
117
|
+
else:
|
|
118
|
+
# No filters applied, add all sessions
|
|
119
|
+
if self.mode == "agent":
|
|
120
|
+
_session = AgentSession.from_dict(data)
|
|
121
|
+
elif self.mode == "team":
|
|
122
|
+
_session = TeamSession.from_dict(data)
|
|
123
|
+
elif self.mode == "workflow":
|
|
124
|
+
_session = WorkflowSession.from_dict(data)
|
|
125
|
+
elif self.mode == "workflow_v2":
|
|
126
|
+
_session = WorkflowSessionV2.from_dict(data)
|
|
127
|
+
|
|
128
|
+
if _session:
|
|
129
|
+
sessions.append(_session)
|
|
130
|
+
|
|
131
|
+
return sessions
|
|
132
|
+
|
|
133
|
+
def get_recent_sessions(
|
|
134
|
+
self,
|
|
135
|
+
user_id: Optional[str] = None,
|
|
136
|
+
entity_id: Optional[str] = None,
|
|
137
|
+
limit: Optional[int] = 2,
|
|
138
|
+
) -> List[Session]:
|
|
139
|
+
"""Get the last N sessions, ordered by created_at descending.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
limit: Number of most recent sessions to return
|
|
143
|
+
user_id: Filter by user ID
|
|
144
|
+
entity_id: Filter by entity ID (agent_id, team_id, or workflow_id)
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
List[Session]: List of most recent sessions
|
|
148
|
+
"""
|
|
149
|
+
sessions: List[Session] = []
|
|
150
|
+
# List of (created_at, data) tuples for sorting
|
|
151
|
+
session_data: List[tuple[int, dict]] = []
|
|
152
|
+
|
|
153
|
+
# First pass: collect and filter sessions
|
|
154
|
+
for session_id, data in self.storage.items():
|
|
155
|
+
try:
|
|
156
|
+
if user_id and data["user_id"] != user_id:
|
|
157
|
+
continue
|
|
158
|
+
|
|
159
|
+
if entity_id:
|
|
160
|
+
if self.mode == "agent" and data["agent_id"] != entity_id:
|
|
161
|
+
continue
|
|
162
|
+
elif self.mode == "team" and data["team_id"] != entity_id:
|
|
163
|
+
continue
|
|
164
|
+
elif self.mode == "workflow" and data["workflow_id"] != entity_id:
|
|
165
|
+
continue
|
|
166
|
+
elif self.mode == "workflow_v2" and data["workflow_id"] != entity_id:
|
|
167
|
+
continue
|
|
168
|
+
|
|
169
|
+
# Store with created_at for sorting
|
|
170
|
+
created_at = data.get("created_at", 0)
|
|
171
|
+
session_data.append((created_at, data))
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
logger.error(f"Error processing session {session_id}: {e}")
|
|
175
|
+
continue
|
|
176
|
+
|
|
177
|
+
# Sort by created_at descending and take only limit sessions
|
|
178
|
+
session_data.sort(key=lambda x: x[0], reverse=True)
|
|
179
|
+
if limit is not None:
|
|
180
|
+
session_data = session_data[:limit]
|
|
181
|
+
|
|
182
|
+
# Convert filtered and sorted data to Session objects
|
|
183
|
+
for _, data in session_data:
|
|
184
|
+
session: Optional[Session] = None
|
|
185
|
+
if self.mode == "agent":
|
|
186
|
+
session = AgentSession.from_dict(data)
|
|
187
|
+
elif self.mode == "team":
|
|
188
|
+
session = TeamSession.from_dict(data)
|
|
189
|
+
elif self.mode == "workflow":
|
|
190
|
+
session = WorkflowSession.from_dict(data)
|
|
191
|
+
elif self.mode == "workflow_v2":
|
|
192
|
+
session = WorkflowSessionV2.from_dict(data)
|
|
193
|
+
if session is not None:
|
|
194
|
+
sessions.append(session)
|
|
195
|
+
|
|
196
|
+
return sessions
|
|
197
|
+
|
|
198
|
+
def upsert(self, session: Session) -> Optional[Session]:
|
|
199
|
+
"""Insert or update a Session in storage."""
|
|
200
|
+
try:
|
|
201
|
+
if self.mode == "workflow_v2":
|
|
202
|
+
data = session.to_dict()
|
|
203
|
+
else:
|
|
204
|
+
data = asdict(session)
|
|
205
|
+
|
|
206
|
+
data["updated_at"] = int(time.time())
|
|
207
|
+
if not data.get("created_at", None):
|
|
208
|
+
data["created_at"] = data["updated_at"]
|
|
209
|
+
|
|
210
|
+
self.storage[session.session_id] = data
|
|
211
|
+
return session
|
|
212
|
+
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.error(f"Error upserting session: {e}")
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
def delete_session(self, session_id: Optional[str] = None):
|
|
218
|
+
"""Delete a session from storage."""
|
|
219
|
+
if session_id is None:
|
|
220
|
+
return
|
|
221
|
+
|
|
222
|
+
try:
|
|
223
|
+
self.storage.pop(session_id, None)
|
|
224
|
+
|
|
225
|
+
except Exception as e:
|
|
226
|
+
logger.error(f"Error deleting session: {e}")
|
|
227
|
+
|
|
228
|
+
def drop(self) -> None:
|
|
229
|
+
"""Drop all sessions from storage."""
|
|
230
|
+
self.storage.clear()
|
|
231
|
+
|
|
232
|
+
def upgrade_schema(self) -> None:
|
|
233
|
+
"""Upgrade the schema of the storage."""
|
|
234
|
+
pass
|
agno/team/team.py
CHANGED
|
@@ -310,6 +310,7 @@ class Team:
|
|
|
310
310
|
model: Optional[Model] = None,
|
|
311
311
|
name: Optional[str] = None,
|
|
312
312
|
team_id: Optional[str] = None,
|
|
313
|
+
role: Optional[str] = None,
|
|
313
314
|
user_id: Optional[str] = None,
|
|
314
315
|
session_id: Optional[str] = None,
|
|
315
316
|
session_name: Optional[str] = None,
|
|
@@ -390,6 +391,7 @@ class Team:
|
|
|
390
391
|
|
|
391
392
|
self.name = name
|
|
392
393
|
self.team_id = team_id
|
|
394
|
+
self.role = role
|
|
393
395
|
|
|
394
396
|
self.user_id = user_id
|
|
395
397
|
self.session_id = session_id
|
|
@@ -1351,6 +1353,10 @@ class Team:
|
|
|
1351
1353
|
**kwargs,
|
|
1352
1354
|
)
|
|
1353
1355
|
|
|
1356
|
+
self.run_messages = run_messages
|
|
1357
|
+
if len(run_messages.messages) == 0:
|
|
1358
|
+
log_error("No messages to be sent to the model.")
|
|
1359
|
+
|
|
1354
1360
|
if stream:
|
|
1355
1361
|
response_iterator = self._arun_stream(
|
|
1356
1362
|
run_response=self.run_response,
|
|
@@ -5337,7 +5343,7 @@ class Team:
|
|
|
5337
5343
|
system_message_content += "\n<how_to_respond>\n"
|
|
5338
5344
|
if self.mode == "coordinate":
|
|
5339
5345
|
system_message_content += (
|
|
5340
|
-
"-
|
|
5346
|
+
"- Your role is to forward tasks to members in your team with the highest likelihood of completing the user's request.\n"
|
|
5341
5347
|
"- Carefully analyze the tools available to the members and their roles before transferring tasks.\n"
|
|
5342
5348
|
"- You cannot use a member tool directly. You can only transfer tasks to members.\n"
|
|
5343
5349
|
"- When you transfer a task to another member, make sure to include:\n"
|
|
@@ -5348,15 +5354,19 @@ class Team:
|
|
|
5348
5354
|
"- You must always analyze the responses from members before responding to the user.\n"
|
|
5349
5355
|
"- After analyzing the responses from the members, if you feel the task has been completed, you can stop and respond to the user.\n"
|
|
5350
5356
|
"- If you are not satisfied with the responses from the members, you should re-assign the task.\n"
|
|
5357
|
+
"- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
|
|
5358
|
+
"- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
|
|
5351
5359
|
)
|
|
5352
5360
|
elif self.mode == "route":
|
|
5353
5361
|
system_message_content += (
|
|
5354
|
-
"-
|
|
5362
|
+
"- Your role is to forward tasks to members in your team with the highest likelihood of completing the user's request.\n"
|
|
5355
5363
|
"- Carefully analyze the tools available to the members and their roles before forwarding tasks.\n"
|
|
5356
5364
|
"- When you forward a task to another Agent, make sure to include:\n"
|
|
5357
5365
|
" - member_id (str): The ID of the member to forward the task to. Use only the ID of the member, not the ID of the team followed by the ID of the member.\n"
|
|
5358
5366
|
" - expected_output (str): The expected output.\n"
|
|
5359
5367
|
"- You can forward tasks to multiple members at once.\n"
|
|
5368
|
+
"- For simple greetings, thanks, or questions about the team itself, you should respond directly.\n"
|
|
5369
|
+
"- For all work requests, tasks, or questions requiring expertise, route to appropriate team members.\n"
|
|
5360
5370
|
)
|
|
5361
5371
|
elif self.mode == "collaborate":
|
|
5362
5372
|
system_message_content += (
|
|
@@ -5453,6 +5463,10 @@ class Team:
|
|
|
5453
5463
|
if self.description is not None:
|
|
5454
5464
|
system_message_content += f"<description>\n{self.description}\n</description>\n\n"
|
|
5455
5465
|
|
|
5466
|
+
# 3.3.4 Then add the Team role if provided
|
|
5467
|
+
if self.role is not None:
|
|
5468
|
+
system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
|
|
5469
|
+
|
|
5456
5470
|
# 3.3.5 Then add instructions for the Agent
|
|
5457
5471
|
if len(instructions) > 0:
|
|
5458
5472
|
system_message_content += "<instructions>"
|
|
@@ -5645,7 +5659,7 @@ class Team:
|
|
|
5645
5659
|
if isinstance(message, str):
|
|
5646
5660
|
user_message_content = message
|
|
5647
5661
|
else:
|
|
5648
|
-
user_message_content = "\n".join(message)
|
|
5662
|
+
user_message_content = "\n".join(str(message))
|
|
5649
5663
|
|
|
5650
5664
|
# Add references to user message
|
|
5651
5665
|
if (
|
|
@@ -7152,9 +7166,10 @@ class Team:
|
|
|
7152
7166
|
# If the team_session_state is already set, merge the team_session_state from the database with the current team_session_state
|
|
7153
7167
|
if self.team_session_state is not None and len(self.team_session_state) > 0:
|
|
7154
7168
|
# This updates team_session_state_from_db
|
|
7155
|
-
merge_dictionaries(
|
|
7156
|
-
|
|
7157
|
-
|
|
7169
|
+
merge_dictionaries(self.team_session_state, team_session_state_from_db)
|
|
7170
|
+
else:
|
|
7171
|
+
# Update the current team_session_state
|
|
7172
|
+
self.team_session_state = team_session_state_from_db
|
|
7158
7173
|
|
|
7159
7174
|
if "workflow_session_state" in session.session_data:
|
|
7160
7175
|
workflow_session_state_from_db = session.session_data.get("workflow_session_state")
|
|
@@ -7166,9 +7181,10 @@ class Team:
|
|
|
7166
7181
|
# If the workflow_session_state is already set, merge the workflow_session_state from the database with the current workflow_session_state
|
|
7167
7182
|
if self.workflow_session_state is not None and len(self.workflow_session_state) > 0:
|
|
7168
7183
|
# This updates workflow_session_state_from_db
|
|
7169
|
-
merge_dictionaries(
|
|
7170
|
-
|
|
7171
|
-
|
|
7184
|
+
merge_dictionaries(self.workflow_session_state, workflow_session_state_from_db)
|
|
7185
|
+
else:
|
|
7186
|
+
# Update the current workflow_session_state
|
|
7187
|
+
self.workflow_session_state = workflow_session_state_from_db
|
|
7172
7188
|
|
|
7173
7189
|
# Get the session_metrics from the database
|
|
7174
7190
|
if "session_metrics" in session.session_data:
|
agno/tools/brandfetch.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Going to contribute this to agno toolkits.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from os import getenv
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
import httpx
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError("`httpx` not installed.")
|
|
12
|
+
|
|
13
|
+
from agno.tools import Toolkit
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BrandfetchTools(Toolkit):
|
|
17
|
+
"""
|
|
18
|
+
Brandfetch API toolkit for retrieving brand data and searching brands.
|
|
19
|
+
|
|
20
|
+
Supports both Brand API (retrieve comprehensive brand data) and
|
|
21
|
+
Brand Search API (find and search brands by name).
|
|
22
|
+
|
|
23
|
+
-- Brand API
|
|
24
|
+
|
|
25
|
+
api_key: str - your Brandfetch API key
|
|
26
|
+
|
|
27
|
+
-- Brand Search API
|
|
28
|
+
|
|
29
|
+
client_id: str - your Brandfetch Client ID
|
|
30
|
+
|
|
31
|
+
async_tools: bool = True - if True, will use async tools, if False, will use sync tools
|
|
32
|
+
brand: bool = False - if True, will use brand api, if False, will not use brand api
|
|
33
|
+
search: bool = False - if True, will use brand search api, if False, will not use brand search api
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
api_key: Optional[str] = None,
|
|
39
|
+
client_id: Optional[str] = None,
|
|
40
|
+
base_url: str = "https://api.brandfetch.io/v2",
|
|
41
|
+
timeout: Optional[float] = 20.0,
|
|
42
|
+
async_tools: bool = False,
|
|
43
|
+
brand: bool = True,
|
|
44
|
+
search: bool = False,
|
|
45
|
+
**kwargs,
|
|
46
|
+
):
|
|
47
|
+
self.api_key = api_key or getenv("BRANDFETCH_API_KEY")
|
|
48
|
+
self.client_id = client_id or getenv("BRANDFETCH_CLIENT_ID")
|
|
49
|
+
self.base_url = base_url
|
|
50
|
+
self.timeout = httpx.Timeout(timeout)
|
|
51
|
+
self.async_tools = async_tools
|
|
52
|
+
self.search_url = f"{self.base_url}/search"
|
|
53
|
+
self.brand_url = f"{self.base_url}/brands"
|
|
54
|
+
|
|
55
|
+
tools: list[Any] = []
|
|
56
|
+
if self.async_tools:
|
|
57
|
+
if brand:
|
|
58
|
+
tools.append(self.asearch_by_identifier)
|
|
59
|
+
if search:
|
|
60
|
+
tools.append(self.asearch_by_brand)
|
|
61
|
+
else:
|
|
62
|
+
if brand:
|
|
63
|
+
tools.append(self.search_by_identifier)
|
|
64
|
+
if search:
|
|
65
|
+
tools.append(self.search_by_brand)
|
|
66
|
+
name = kwargs.pop("name", "brandfetch_tools")
|
|
67
|
+
super().__init__(name=name, tools=tools, **kwargs)
|
|
68
|
+
|
|
69
|
+
async def asearch_by_identifier(self, identifier: str) -> dict[str, Any]:
|
|
70
|
+
"""
|
|
71
|
+
Search for brand data by identifier (domain, brand id, isin, stock ticker).
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
identifier: Options are you can use: Domain (nike.com), Brand ID (id_0dwKPKT), ISIN (US6541061031), Stock Ticker (NKE)
|
|
75
|
+
Returns:
|
|
76
|
+
Dict containing brand data including logos, colors, fonts, and other brand assets
|
|
77
|
+
|
|
78
|
+
Raises:
|
|
79
|
+
ValueError: If no API key is provided
|
|
80
|
+
"""
|
|
81
|
+
if not self.api_key:
|
|
82
|
+
raise ValueError("API key is required for brand search by identifier")
|
|
83
|
+
|
|
84
|
+
url = f"{self.brand_url}/{identifier}"
|
|
85
|
+
headers = {"Authorization": f"Bearer {self.api_key}"}
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
|
89
|
+
response = await client.get(url, headers=headers)
|
|
90
|
+
response.raise_for_status()
|
|
91
|
+
return response.json()
|
|
92
|
+
except httpx.HTTPStatusError as e:
|
|
93
|
+
if e.response.status_code == 404:
|
|
94
|
+
return {"error": f"Brand not found for identifier: {identifier}"}
|
|
95
|
+
elif e.response.status_code == 401:
|
|
96
|
+
return {"error": "Invalid API key"}
|
|
97
|
+
elif e.response.status_code == 429:
|
|
98
|
+
return {"error": "Rate limit exceeded"}
|
|
99
|
+
else:
|
|
100
|
+
return {"error": f"API error: {e.response.status_code}"}
|
|
101
|
+
except httpx.RequestError as e:
|
|
102
|
+
return {"error": f"Request failed: {str(e)}"}
|
|
103
|
+
|
|
104
|
+
def search_by_identifier(self, identifier: str) -> dict[str, Any]:
|
|
105
|
+
"""
|
|
106
|
+
Search for brand data by identifier (domain, brand id, isin, stock ticker).
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
identifier: Options are you can use: Domain (nike.com), Brand ID (id_0dwKPKT), ISIN (US6541061031), Stock Ticker (NKE)
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Dict containing brand data including logos, colors, fonts, and other brand assets
|
|
113
|
+
|
|
114
|
+
Raises:
|
|
115
|
+
ValueError: If no API key is provided
|
|
116
|
+
"""
|
|
117
|
+
if not self.api_key:
|
|
118
|
+
raise ValueError("API key is required for brand search by identifier")
|
|
119
|
+
|
|
120
|
+
url = f"{self.brand_url}/{identifier}"
|
|
121
|
+
headers = {"Authorization": f"Bearer {self.api_key}"}
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
with httpx.Client(timeout=self.timeout) as client:
|
|
125
|
+
response = client.get(url, headers=headers)
|
|
126
|
+
response.raise_for_status()
|
|
127
|
+
return response.json()
|
|
128
|
+
except httpx.HTTPStatusError as e:
|
|
129
|
+
if e.response.status_code == 404:
|
|
130
|
+
return {"error": f"Brand not found for identifier: {identifier}"}
|
|
131
|
+
elif e.response.status_code == 401:
|
|
132
|
+
return {"error": "Invalid API key"}
|
|
133
|
+
elif e.response.status_code == 429:
|
|
134
|
+
return {"error": "Rate limit exceeded"}
|
|
135
|
+
else:
|
|
136
|
+
return {"error": f"API error: {e.response.status_code}"}
|
|
137
|
+
except httpx.RequestError as e:
|
|
138
|
+
return {"error": f"Request failed: {str(e)}"}
|
|
139
|
+
|
|
140
|
+
async def asearch_by_brand(self, name: str) -> dict[str, Any]:
|
|
141
|
+
"""
|
|
142
|
+
Search for brands by name using the Brand Search API - can give you the right brand id to use for the brand api.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
name: Brand name to search for (e.g., 'Google', 'Apple')
|
|
146
|
+
|
|
147
|
+
Returns:
|
|
148
|
+
Dict containing search results with brand matches
|
|
149
|
+
|
|
150
|
+
Raises:
|
|
151
|
+
ValueError: If no client ID is provided
|
|
152
|
+
"""
|
|
153
|
+
if not self.client_id:
|
|
154
|
+
raise ValueError("Client ID is required for brand search by name")
|
|
155
|
+
|
|
156
|
+
url = f"{self.search_url}/{name}"
|
|
157
|
+
params = {"c": self.client_id}
|
|
158
|
+
|
|
159
|
+
try:
|
|
160
|
+
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
|
161
|
+
response = await client.get(url, params=params)
|
|
162
|
+
response.raise_for_status()
|
|
163
|
+
return response.json()
|
|
164
|
+
except httpx.HTTPStatusError as e:
|
|
165
|
+
if e.response.status_code == 404:
|
|
166
|
+
return {"error": f"No brands found for name: {name}"}
|
|
167
|
+
elif e.response.status_code == 401:
|
|
168
|
+
return {"error": "Invalid client ID"}
|
|
169
|
+
elif e.response.status_code == 429:
|
|
170
|
+
return {"error": "Rate limit exceeded"}
|
|
171
|
+
else:
|
|
172
|
+
return {"error": f"API error: {e.response.status_code}"}
|
|
173
|
+
except httpx.RequestError as e:
|
|
174
|
+
return {"error": f"Request failed: {str(e)}"}
|
|
175
|
+
|
|
176
|
+
def search_by_brand(self, name: str) -> dict[str, Any]:
|
|
177
|
+
"""
|
|
178
|
+
Search for brands by name using the Brand Search API - can give you the right brand id to use for the brand api.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
name: Brand name to search for (e.g., 'Google', 'Apple')
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Dict containing search results with brand matches
|
|
185
|
+
|
|
186
|
+
Raises:
|
|
187
|
+
ValueError: If no client ID is provided
|
|
188
|
+
"""
|
|
189
|
+
if not self.client_id:
|
|
190
|
+
raise ValueError("Client ID is required for brand search by name")
|
|
191
|
+
|
|
192
|
+
url = f"{self.search_url}/{name}"
|
|
193
|
+
params = {"c": self.client_id}
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
with httpx.Client(timeout=self.timeout) as client:
|
|
197
|
+
response = client.get(url, params=params)
|
|
198
|
+
response.raise_for_status()
|
|
199
|
+
return response.json()
|
|
200
|
+
except httpx.HTTPStatusError as e:
|
|
201
|
+
if e.response.status_code == 404:
|
|
202
|
+
return {"error": f"No brands found for name: {name}"}
|
|
203
|
+
elif e.response.status_code == 401:
|
|
204
|
+
return {"error": "Invalid client ID"}
|
|
205
|
+
elif e.response.status_code == 429:
|
|
206
|
+
return {"error": "Rate limit exceeded"}
|
|
207
|
+
else:
|
|
208
|
+
return {"error": f"API error: {e.response.status_code}"}
|
|
209
|
+
except httpx.RequestError as e:
|
|
210
|
+
return {"error": f"Request failed: {str(e)}"}
|
agno/tools/github.py
CHANGED
|
@@ -458,35 +458,63 @@ class GithubTools(Toolkit):
|
|
|
458
458
|
logger.error(f"Error creating issue: {e}")
|
|
459
459
|
return json.dumps({"error": str(e)})
|
|
460
460
|
|
|
461
|
-
def list_issues(self, repo_name: str, state: str = "open",
|
|
462
|
-
"""List issues for a repository.
|
|
461
|
+
def list_issues(self, repo_name: str, state: str = "open", page: int = 1, per_page: int = 20) -> str:
|
|
462
|
+
"""List issues for a repository with pagination.
|
|
463
463
|
|
|
464
464
|
Args:
|
|
465
465
|
repo_name (str): The full name of the repository (e.g., 'owner/repo').
|
|
466
466
|
state (str, optional): The state of issues to list ('open', 'closed', 'all'). Defaults to 'open'.
|
|
467
|
-
|
|
467
|
+
page (int, optional): Page number of results to return, counting from 1. Defaults to 1.
|
|
468
|
+
per_page (int, optional): Number of results per page. Defaults to 20.
|
|
468
469
|
Returns:
|
|
469
|
-
A JSON-formatted string containing a list of issues.
|
|
470
|
+
A JSON-formatted string containing a list of issues with pagination metadata.
|
|
470
471
|
"""
|
|
471
|
-
log_debug(f"Listing issues for repository: {repo_name} with state: {state}")
|
|
472
|
+
log_debug(f"Listing issues for repository: {repo_name} with state: {state}, page: {page}, per_page: {per_page}")
|
|
472
473
|
try:
|
|
473
474
|
repo = self.g.get_repo(repo_name)
|
|
475
|
+
|
|
474
476
|
issues = repo.get_issues(state=state)
|
|
477
|
+
|
|
475
478
|
# Filter out pull requests after fetching issues
|
|
476
|
-
|
|
477
|
-
|
|
479
|
+
total_issues = 0
|
|
480
|
+
all_issues = []
|
|
481
|
+
for issue in issues:
|
|
482
|
+
if not issue.pull_request:
|
|
483
|
+
all_issues.append(issue)
|
|
484
|
+
total_issues += 1
|
|
485
|
+
|
|
486
|
+
# Calculate pagination metadata
|
|
487
|
+
total_pages = (total_issues + per_page - 1) // per_page
|
|
488
|
+
|
|
489
|
+
# Validate page number
|
|
490
|
+
if page < 1:
|
|
491
|
+
page = 1
|
|
492
|
+
elif page > total_pages and total_pages > 0:
|
|
493
|
+
page = total_pages
|
|
494
|
+
|
|
495
|
+
# Get the specified page of results
|
|
478
496
|
issue_list = []
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
497
|
+
page_start = (page - 1) * per_page
|
|
498
|
+
page_end = page_start + per_page
|
|
499
|
+
|
|
500
|
+
for i in range(page_start, min(page_end, total_issues)):
|
|
501
|
+
if i < len(all_issues):
|
|
502
|
+
issue = all_issues[i]
|
|
503
|
+
issue_info = {
|
|
504
|
+
"number": issue.number,
|
|
505
|
+
"title": issue.title,
|
|
506
|
+
"user": issue.user.login,
|
|
507
|
+
"created_at": issue.created_at.isoformat(),
|
|
508
|
+
"state": issue.state,
|
|
509
|
+
"url": issue.html_url,
|
|
510
|
+
}
|
|
511
|
+
issue_list.append(issue_info)
|
|
512
|
+
|
|
513
|
+
meta = {"current_page": page, "per_page": per_page, "total_items": total_issues, "total_pages": total_pages}
|
|
514
|
+
|
|
515
|
+
response = {"data": issue_list, "meta": meta}
|
|
516
|
+
|
|
517
|
+
return json.dumps(response, indent=2)
|
|
490
518
|
except GithubException as e:
|
|
491
519
|
logger.error(f"Error listing issues: {e}")
|
|
492
520
|
return json.dumps({"error": str(e)})
|