agno 2.0.11__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +606 -175
- agno/db/in_memory/in_memory_db.py +42 -29
- agno/db/postgres/postgres.py +6 -4
- agno/exceptions.py +62 -1
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +51 -0
- agno/knowledge/embedder/aws_bedrock.py +9 -4
- agno/knowledge/embedder/azure_openai.py +54 -0
- agno/knowledge/embedder/base.py +2 -0
- agno/knowledge/embedder/cohere.py +184 -5
- agno/knowledge/embedder/google.py +79 -1
- agno/knowledge/embedder/huggingface.py +9 -4
- agno/knowledge/embedder/jina.py +63 -0
- agno/knowledge/embedder/mistral.py +78 -11
- agno/knowledge/embedder/ollama.py +5 -0
- agno/knowledge/embedder/openai.py +18 -54
- agno/knowledge/embedder/voyageai.py +69 -16
- agno/knowledge/knowledge.py +5 -4
- agno/knowledge/reader/pdf_reader.py +4 -3
- agno/knowledge/reader/website_reader.py +3 -2
- agno/models/base.py +125 -32
- agno/models/cerebras/cerebras.py +1 -0
- agno/models/cerebras/cerebras_openai.py +1 -0
- agno/models/dashscope/dashscope.py +1 -0
- agno/models/google/gemini.py +27 -5
- agno/models/openai/chat.py +13 -4
- agno/models/perplexity/perplexity.py +2 -3
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +49 -0
- agno/models/vllm/vllm.py +1 -0
- agno/models/xai/xai.py +1 -0
- agno/os/app.py +98 -126
- agno/os/interfaces/whatsapp/router.py +2 -0
- agno/os/mcp.py +1 -1
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +181 -45
- agno/os/routers/home.py +2 -2
- agno/os/routers/memory/memory.py +23 -1
- agno/os/routers/memory/schemas.py +1 -1
- agno/os/routers/session/session.py +20 -3
- agno/os/utils.py +74 -8
- agno/run/agent.py +120 -77
- agno/run/team.py +115 -72
- agno/run/workflow.py +5 -15
- agno/session/summary.py +9 -10
- agno/session/team.py +2 -1
- agno/team/team.py +720 -168
- agno/tools/firecrawl.py +4 -4
- agno/tools/function.py +42 -2
- agno/tools/knowledge.py +3 -3
- agno/tools/searxng.py +2 -2
- agno/tools/serper.py +2 -2
- agno/tools/spider.py +2 -2
- agno/tools/workflow.py +4 -5
- agno/utils/events.py +66 -1
- agno/utils/hooks.py +57 -0
- agno/utils/media.py +11 -9
- agno/utils/print_response/agent.py +43 -5
- agno/utils/print_response/team.py +48 -12
- agno/vectordb/cassandra/cassandra.py +44 -4
- agno/vectordb/chroma/chromadb.py +79 -8
- agno/vectordb/clickhouse/clickhousedb.py +43 -6
- agno/vectordb/couchbase/couchbase.py +76 -5
- agno/vectordb/lancedb/lance_db.py +38 -3
- agno/vectordb/milvus/milvus.py +76 -4
- agno/vectordb/mongodb/mongodb.py +76 -4
- agno/vectordb/pgvector/pgvector.py +50 -6
- agno/vectordb/pineconedb/pineconedb.py +39 -2
- agno/vectordb/qdrant/qdrant.py +76 -26
- agno/vectordb/singlestore/singlestore.py +77 -4
- agno/vectordb/upstashdb/upstashdb.py +42 -2
- agno/vectordb/weaviate/weaviate.py +39 -3
- agno/workflow/types.py +1 -0
- agno/workflow/workflow.py +58 -2
- {agno-2.0.11.dist-info → agno-2.1.0.dist-info}/METADATA +4 -3
- {agno-2.0.11.dist-info → agno-2.1.0.dist-info}/RECORD +83 -73
- {agno-2.0.11.dist-info → agno-2.1.0.dist-info}/WHEEL +0 -0
- {agno-2.0.11.dist-info → agno-2.1.0.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.11.dist-info → agno-2.1.0.dist-info}/top_level.txt +0 -0
agno/run/agent.py
CHANGED
|
@@ -14,6 +14,96 @@ from agno.run.base import BaseRunOutputEvent, MessageReferences, RunStatus
|
|
|
14
14
|
from agno.utils.log import logger
|
|
15
15
|
|
|
16
16
|
|
|
17
|
+
@dataclass
|
|
18
|
+
class RunInput:
|
|
19
|
+
"""Container for the raw input data passed to Agent.run().
|
|
20
|
+
|
|
21
|
+
This captures the original input exactly as provided by the user,
|
|
22
|
+
separate from the processed messages that go to the model.
|
|
23
|
+
|
|
24
|
+
Attributes:
|
|
25
|
+
input_content: The literal input message/content passed to run()
|
|
26
|
+
images: Images directly passed to run()
|
|
27
|
+
videos: Videos directly passed to run()
|
|
28
|
+
audios: Audio files directly passed to run()
|
|
29
|
+
files: Files directly passed to run()
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
input_content: Union[str, List, Dict, Message, BaseModel, List[Message]]
|
|
33
|
+
images: Optional[Sequence[Image]] = None
|
|
34
|
+
videos: Optional[Sequence[Video]] = None
|
|
35
|
+
audios: Optional[Sequence[Audio]] = None
|
|
36
|
+
files: Optional[Sequence[File]] = None
|
|
37
|
+
|
|
38
|
+
def input_content_string(self) -> str:
|
|
39
|
+
import json
|
|
40
|
+
|
|
41
|
+
if isinstance(self.input_content, (str)):
|
|
42
|
+
return self.input_content
|
|
43
|
+
elif isinstance(self.input_content, BaseModel):
|
|
44
|
+
return self.input_content.model_dump_json(exclude_none=True)
|
|
45
|
+
elif isinstance(self.input_content, Message):
|
|
46
|
+
return json.dumps(self.input_content.to_dict())
|
|
47
|
+
elif isinstance(self.input_content, list) and self.input_content and isinstance(self.input_content[0], Message):
|
|
48
|
+
return json.dumps([m.to_dict() for m in self.input_content])
|
|
49
|
+
else:
|
|
50
|
+
return str(self.input_content)
|
|
51
|
+
|
|
52
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
53
|
+
"""Convert to dictionary representation"""
|
|
54
|
+
result: Dict[str, Any] = {}
|
|
55
|
+
|
|
56
|
+
if self.input_content is not None:
|
|
57
|
+
if isinstance(self.input_content, (str)):
|
|
58
|
+
result["input_content"] = self.input_content
|
|
59
|
+
elif isinstance(self.input_content, BaseModel):
|
|
60
|
+
result["input_content"] = self.input_content.model_dump(exclude_none=True)
|
|
61
|
+
elif isinstance(self.input_content, Message):
|
|
62
|
+
result["input_content"] = self.input_content.to_dict()
|
|
63
|
+
elif (
|
|
64
|
+
isinstance(self.input_content, list)
|
|
65
|
+
and self.input_content
|
|
66
|
+
and isinstance(self.input_content[0], Message)
|
|
67
|
+
):
|
|
68
|
+
result["input_content"] = [m.to_dict() for m in self.input_content]
|
|
69
|
+
else:
|
|
70
|
+
result["input_content"] = self.input_content
|
|
71
|
+
|
|
72
|
+
if self.images:
|
|
73
|
+
result["images"] = [img.to_dict() for img in self.images]
|
|
74
|
+
if self.videos:
|
|
75
|
+
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
76
|
+
if self.audios:
|
|
77
|
+
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
78
|
+
if self.files:
|
|
79
|
+
result["files"] = [file.to_dict() for file in self.files]
|
|
80
|
+
|
|
81
|
+
return result
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_dict(cls, data: Dict[str, Any]) -> "RunInput":
|
|
85
|
+
"""Create RunInput from dictionary"""
|
|
86
|
+
images = None
|
|
87
|
+
if data.get("images"):
|
|
88
|
+
images = [Image.model_validate(img_data) for img_data in data["images"]]
|
|
89
|
+
|
|
90
|
+
videos = None
|
|
91
|
+
if data.get("videos"):
|
|
92
|
+
videos = [Video.model_validate(vid_data) for vid_data in data["videos"]]
|
|
93
|
+
|
|
94
|
+
audios = None
|
|
95
|
+
if data.get("audios"):
|
|
96
|
+
audios = [Audio.model_validate(aud_data) for aud_data in data["audios"]]
|
|
97
|
+
|
|
98
|
+
files = None
|
|
99
|
+
if data.get("files"):
|
|
100
|
+
files = [File.model_validate(file_data) for file_data in data["files"]]
|
|
101
|
+
|
|
102
|
+
return cls(
|
|
103
|
+
input_content=data.get("input_content", ""), images=images, videos=videos, audios=audios, files=files
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
|
|
17
107
|
class RunEvent(str, Enum):
|
|
18
108
|
"""Events that can be sent by the run() functions"""
|
|
19
109
|
|
|
@@ -27,6 +117,9 @@ class RunEvent(str, Enum):
|
|
|
27
117
|
run_paused = "RunPaused"
|
|
28
118
|
run_continued = "RunContinued"
|
|
29
119
|
|
|
120
|
+
pre_hook_started = "PreHookStarted"
|
|
121
|
+
pre_hook_completed = "PreHookCompleted"
|
|
122
|
+
|
|
30
123
|
tool_call_started = "ToolCallStarted"
|
|
31
124
|
tool_call_completed = "ToolCallCompleted"
|
|
32
125
|
|
|
@@ -53,6 +146,7 @@ class BaseAgentRunEvent(BaseRunOutputEvent):
|
|
|
53
146
|
agent_id: str = ""
|
|
54
147
|
agent_name: str = ""
|
|
55
148
|
run_id: Optional[str] = None
|
|
149
|
+
parent_run_id: Optional[str] = None
|
|
56
150
|
session_id: Optional[str] = None
|
|
57
151
|
|
|
58
152
|
# Step context for workflow execution
|
|
@@ -153,6 +247,11 @@ class RunErrorEvent(BaseAgentRunEvent):
|
|
|
153
247
|
event: str = RunEvent.run_error.value
|
|
154
248
|
content: Optional[str] = None
|
|
155
249
|
|
|
250
|
+
# From exceptions
|
|
251
|
+
error_type: Optional[str] = None
|
|
252
|
+
error_id: Optional[str] = None
|
|
253
|
+
additional_data: Optional[Dict[str, Any]] = None
|
|
254
|
+
|
|
156
255
|
|
|
157
256
|
@dataclass
|
|
158
257
|
class RunCancelledEvent(BaseAgentRunEvent):
|
|
@@ -164,6 +263,20 @@ class RunCancelledEvent(BaseAgentRunEvent):
|
|
|
164
263
|
return True
|
|
165
264
|
|
|
166
265
|
|
|
266
|
+
@dataclass
|
|
267
|
+
class PreHookStartedEvent(BaseAgentRunEvent):
|
|
268
|
+
event: str = RunEvent.pre_hook_started.value
|
|
269
|
+
pre_hook_name: Optional[str] = None
|
|
270
|
+
run_input: Optional[RunInput] = None
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
@dataclass
|
|
274
|
+
class PreHookCompletedEvent(BaseAgentRunEvent):
|
|
275
|
+
event: str = RunEvent.pre_hook_completed.value
|
|
276
|
+
pre_hook_name: Optional[str] = None
|
|
277
|
+
run_input: Optional[RunInput] = None
|
|
278
|
+
|
|
279
|
+
|
|
167
280
|
@dataclass
|
|
168
281
|
class MemoryUpdateStartedEvent(BaseAgentRunEvent):
|
|
169
282
|
event: str = RunEvent.memory_update_started.value
|
|
@@ -244,6 +357,8 @@ RunOutputEvent = Union[
|
|
|
244
357
|
RunCancelledEvent,
|
|
245
358
|
RunPausedEvent,
|
|
246
359
|
RunContinuedEvent,
|
|
360
|
+
PreHookStartedEvent,
|
|
361
|
+
PreHookCompletedEvent,
|
|
247
362
|
ReasoningStartedEvent,
|
|
248
363
|
ReasoningStepEvent,
|
|
249
364
|
ReasoningCompletedEvent,
|
|
@@ -269,6 +384,8 @@ RUN_EVENT_TYPE_REGISTRY = {
|
|
|
269
384
|
RunEvent.run_cancelled.value: RunCancelledEvent,
|
|
270
385
|
RunEvent.run_paused.value: RunPausedEvent,
|
|
271
386
|
RunEvent.run_continued.value: RunContinuedEvent,
|
|
387
|
+
RunEvent.pre_hook_started.value: PreHookStartedEvent,
|
|
388
|
+
RunEvent.pre_hook_completed.value: PreHookCompletedEvent,
|
|
272
389
|
RunEvent.reasoning_started.value: ReasoningStartedEvent,
|
|
273
390
|
RunEvent.reasoning_step.value: ReasoningStepEvent,
|
|
274
391
|
RunEvent.reasoning_completed.value: ReasoningCompletedEvent,
|
|
@@ -292,80 +409,6 @@ def run_output_event_from_dict(data: dict) -> BaseRunOutputEvent:
|
|
|
292
409
|
return cls.from_dict(data) # type: ignore
|
|
293
410
|
|
|
294
411
|
|
|
295
|
-
@dataclass
|
|
296
|
-
class RunInput:
|
|
297
|
-
"""Container for the raw input data passed to Agent.run().
|
|
298
|
-
|
|
299
|
-
This captures the original input exactly as provided by the user,
|
|
300
|
-
separate from the processed messages that go to the model.
|
|
301
|
-
|
|
302
|
-
Attributes:
|
|
303
|
-
input_content: The literal input message/content passed to run()
|
|
304
|
-
images: Images directly passed to run()
|
|
305
|
-
videos: Videos directly passed to run()
|
|
306
|
-
audios: Audio files directly passed to run()
|
|
307
|
-
files: Files directly passed to run()
|
|
308
|
-
"""
|
|
309
|
-
|
|
310
|
-
input_content: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None
|
|
311
|
-
images: Optional[Sequence[Image]] = None
|
|
312
|
-
videos: Optional[Sequence[Video]] = None
|
|
313
|
-
audios: Optional[Sequence[Audio]] = None
|
|
314
|
-
files: Optional[Sequence[File]] = None
|
|
315
|
-
|
|
316
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
317
|
-
"""Convert to dictionary representation"""
|
|
318
|
-
result: Dict[str, Any] = {}
|
|
319
|
-
|
|
320
|
-
if self.input_content is not None:
|
|
321
|
-
if isinstance(self.input_content, (str)):
|
|
322
|
-
result["input_content"] = self.input_content
|
|
323
|
-
elif isinstance(self.input_content, BaseModel):
|
|
324
|
-
result["input_content"] = self.input_content.model_dump(exclude_none=True)
|
|
325
|
-
elif isinstance(self.input_content, Message):
|
|
326
|
-
result["input_content"] = self.input_content.to_dict()
|
|
327
|
-
elif (
|
|
328
|
-
isinstance(self.input_content, list)
|
|
329
|
-
and self.input_content
|
|
330
|
-
and isinstance(self.input_content[0], Message)
|
|
331
|
-
):
|
|
332
|
-
result["input_content"] = [m.to_dict() for m in self.input_content]
|
|
333
|
-
else:
|
|
334
|
-
result["input_content"] = self.input_content
|
|
335
|
-
|
|
336
|
-
if self.images:
|
|
337
|
-
result["images"] = [img.to_dict() for img in self.images]
|
|
338
|
-
if self.videos:
|
|
339
|
-
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
340
|
-
if self.audios:
|
|
341
|
-
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
342
|
-
if self.files:
|
|
343
|
-
result["files"] = [file.to_dict() for file in self.files]
|
|
344
|
-
|
|
345
|
-
return result
|
|
346
|
-
|
|
347
|
-
@classmethod
|
|
348
|
-
def from_dict(cls, data: Dict[str, Any]) -> "RunInput":
|
|
349
|
-
"""Create RunInput from dictionary"""
|
|
350
|
-
images = None
|
|
351
|
-
if data.get("images"):
|
|
352
|
-
images = [Image.model_validate(img_data) for img_data in data["images"]]
|
|
353
|
-
|
|
354
|
-
videos = None
|
|
355
|
-
if data.get("videos"):
|
|
356
|
-
videos = [Video.model_validate(vid_data) for vid_data in data["videos"]]
|
|
357
|
-
|
|
358
|
-
audios = None
|
|
359
|
-
if data.get("audios"):
|
|
360
|
-
audios = [Audio.model_validate(aud_data) for aud_data in data["audios"]]
|
|
361
|
-
|
|
362
|
-
files = None
|
|
363
|
-
if data.get("files"):
|
|
364
|
-
files = [File.model_validate(file_data) for file_data in data["files"]]
|
|
365
|
-
|
|
366
|
-
return cls(input_content=data.get("input_content"), images=images, videos=videos, audios=audios, files=files)
|
|
367
|
-
|
|
368
|
-
|
|
369
412
|
@dataclass
|
|
370
413
|
class RunOutput:
|
|
371
414
|
"""Response returned by Agent.run() or Workflow.run() functions"""
|
|
@@ -378,6 +421,9 @@ class RunOutput:
|
|
|
378
421
|
workflow_id: Optional[str] = None
|
|
379
422
|
user_id: Optional[str] = None
|
|
380
423
|
|
|
424
|
+
# Input media and messages from user
|
|
425
|
+
input: Optional[RunInput] = None
|
|
426
|
+
|
|
381
427
|
content: Optional[Any] = None
|
|
382
428
|
content_type: str = "str"
|
|
383
429
|
|
|
@@ -401,9 +447,6 @@ class RunOutput:
|
|
|
401
447
|
files: Optional[List[File]] = None # Files attached to the response
|
|
402
448
|
response_audio: Optional[Audio] = None # Model audio response
|
|
403
449
|
|
|
404
|
-
# Input media and messages from user
|
|
405
|
-
input: Optional[RunInput] = None
|
|
406
|
-
|
|
407
450
|
citations: Optional[Citations] = None
|
|
408
451
|
references: Optional[List[MessageReferences]] = None
|
|
409
452
|
|
agno/run/team.py
CHANGED
|
@@ -15,6 +15,94 @@ from agno.run.base import BaseRunOutputEvent, MessageReferences, RunStatus
|
|
|
15
15
|
from agno.utils.log import log_error
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
@dataclass
|
|
19
|
+
class TeamRunInput:
|
|
20
|
+
"""Container for the raw input data passed to Agent.run().
|
|
21
|
+
This captures the original input exactly as provided by the user,
|
|
22
|
+
separate from the processed messages that go to the model.
|
|
23
|
+
Attributes:
|
|
24
|
+
input_content: The literal input message/content passed to run()
|
|
25
|
+
images: Images directly passed to run()
|
|
26
|
+
videos: Videos directly passed to run()
|
|
27
|
+
audios: Audio files directly passed to run()
|
|
28
|
+
files: Files directly passed to run()
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
input_content: Union[str, List, Dict, Message, BaseModel, List[Message]]
|
|
32
|
+
images: Optional[Sequence[Image]] = None
|
|
33
|
+
videos: Optional[Sequence[Video]] = None
|
|
34
|
+
audios: Optional[Sequence[Audio]] = None
|
|
35
|
+
files: Optional[Sequence[File]] = None
|
|
36
|
+
|
|
37
|
+
def input_content_string(self) -> str:
|
|
38
|
+
import json
|
|
39
|
+
|
|
40
|
+
if isinstance(self.input_content, (str)):
|
|
41
|
+
return self.input_content
|
|
42
|
+
elif isinstance(self.input_content, BaseModel):
|
|
43
|
+
return self.input_content.model_dump_json(exclude_none=True)
|
|
44
|
+
elif isinstance(self.input_content, Message):
|
|
45
|
+
return json.dumps(self.input_content.to_dict())
|
|
46
|
+
elif isinstance(self.input_content, list) and self.input_content and isinstance(self.input_content[0], Message):
|
|
47
|
+
return json.dumps([m.to_dict() for m in self.input_content])
|
|
48
|
+
else:
|
|
49
|
+
return str(self.input_content)
|
|
50
|
+
|
|
51
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
52
|
+
"""Convert to dictionary representation"""
|
|
53
|
+
result: Dict[str, Any] = {}
|
|
54
|
+
|
|
55
|
+
if self.input_content is not None:
|
|
56
|
+
if isinstance(self.input_content, (str)):
|
|
57
|
+
result["input_content"] = self.input_content
|
|
58
|
+
elif isinstance(self.input_content, BaseModel):
|
|
59
|
+
result["input_content"] = self.input_content.model_dump(exclude_none=True)
|
|
60
|
+
elif isinstance(self.input_content, Message):
|
|
61
|
+
result["input_content"] = self.input_content.to_dict()
|
|
62
|
+
elif (
|
|
63
|
+
isinstance(self.input_content, list)
|
|
64
|
+
and self.input_content
|
|
65
|
+
and isinstance(self.input_content[0], Message)
|
|
66
|
+
):
|
|
67
|
+
result["input_content"] = [m.to_dict() for m in self.input_content]
|
|
68
|
+
else:
|
|
69
|
+
result["input_content"] = self.input_content
|
|
70
|
+
|
|
71
|
+
if self.images:
|
|
72
|
+
result["images"] = [img.to_dict() for img in self.images]
|
|
73
|
+
if self.videos:
|
|
74
|
+
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
75
|
+
if self.audios:
|
|
76
|
+
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
77
|
+
if self.files:
|
|
78
|
+
result["files"] = [file.to_dict() for file in self.files]
|
|
79
|
+
|
|
80
|
+
return result
|
|
81
|
+
|
|
82
|
+
@classmethod
|
|
83
|
+
def from_dict(cls, data: Dict[str, Any]) -> "TeamRunInput":
|
|
84
|
+
"""Create TeamRunInput from dictionary"""
|
|
85
|
+
images = None
|
|
86
|
+
if data.get("images"):
|
|
87
|
+
images = [Image.model_validate(img_data) for img_data in data["images"]]
|
|
88
|
+
|
|
89
|
+
videos = None
|
|
90
|
+
if data.get("videos"):
|
|
91
|
+
videos = [Video.model_validate(vid_data) for vid_data in data["videos"]]
|
|
92
|
+
|
|
93
|
+
audios = None
|
|
94
|
+
if data.get("audios"):
|
|
95
|
+
audios = [Audio.model_validate(aud_data) for aud_data in data["audios"]]
|
|
96
|
+
|
|
97
|
+
files = None
|
|
98
|
+
if data.get("files"):
|
|
99
|
+
files = [File.model_validate(file_data) for file_data in data["files"]]
|
|
100
|
+
|
|
101
|
+
return cls(
|
|
102
|
+
input_content=data.get("input_content", ""), images=images, videos=videos, audios=audios, files=files
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
18
106
|
class TeamRunEvent(str, Enum):
|
|
19
107
|
"""Events that can be sent by the run() functions"""
|
|
20
108
|
|
|
@@ -25,6 +113,9 @@ class TeamRunEvent(str, Enum):
|
|
|
25
113
|
run_error = "TeamRunError"
|
|
26
114
|
run_cancelled = "TeamRunCancelled"
|
|
27
115
|
|
|
116
|
+
pre_hook_started = "TeamPreHookStarted"
|
|
117
|
+
pre_hook_completed = "TeamPreHookCompleted"
|
|
118
|
+
|
|
28
119
|
tool_call_started = "TeamToolCallStarted"
|
|
29
120
|
tool_call_completed = "TeamToolCallCompleted"
|
|
30
121
|
|
|
@@ -51,6 +142,7 @@ class BaseTeamRunEvent(BaseRunOutputEvent):
|
|
|
51
142
|
team_id: str = ""
|
|
52
143
|
team_name: str = ""
|
|
53
144
|
run_id: Optional[str] = None
|
|
145
|
+
parent_run_id: Optional[str] = None
|
|
54
146
|
session_id: Optional[str] = None
|
|
55
147
|
|
|
56
148
|
workflow_id: Optional[str] = None
|
|
@@ -141,6 +233,11 @@ class RunErrorEvent(BaseTeamRunEvent):
|
|
|
141
233
|
event: str = TeamRunEvent.run_error.value
|
|
142
234
|
content: Optional[str] = None
|
|
143
235
|
|
|
236
|
+
# From exceptions
|
|
237
|
+
error_type: Optional[str] = None
|
|
238
|
+
error_id: Optional[str] = None
|
|
239
|
+
additional_data: Optional[Dict[str, Any]] = None
|
|
240
|
+
|
|
144
241
|
|
|
145
242
|
@dataclass
|
|
146
243
|
class RunCancelledEvent(BaseTeamRunEvent):
|
|
@@ -152,6 +249,20 @@ class RunCancelledEvent(BaseTeamRunEvent):
|
|
|
152
249
|
return True
|
|
153
250
|
|
|
154
251
|
|
|
252
|
+
@dataclass
|
|
253
|
+
class PreHookStartedEvent(BaseTeamRunEvent):
|
|
254
|
+
event: str = TeamRunEvent.pre_hook_started.value
|
|
255
|
+
pre_hook_name: Optional[str] = None
|
|
256
|
+
run_input: Optional[TeamRunInput] = None
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
@dataclass
|
|
260
|
+
class PreHookCompletedEvent(BaseTeamRunEvent):
|
|
261
|
+
event: str = TeamRunEvent.pre_hook_completed.value
|
|
262
|
+
pre_hook_name: Optional[str] = None
|
|
263
|
+
run_input: Optional[TeamRunInput] = None
|
|
264
|
+
|
|
265
|
+
|
|
155
266
|
@dataclass
|
|
156
267
|
class MemoryUpdateStartedEvent(BaseTeamRunEvent):
|
|
157
268
|
event: str = TeamRunEvent.memory_update_started.value
|
|
@@ -230,6 +341,8 @@ TeamRunOutputEvent = Union[
|
|
|
230
341
|
RunCompletedEvent,
|
|
231
342
|
RunErrorEvent,
|
|
232
343
|
RunCancelledEvent,
|
|
344
|
+
PreHookStartedEvent,
|
|
345
|
+
PreHookCompletedEvent,
|
|
233
346
|
ReasoningStartedEvent,
|
|
234
347
|
ReasoningStepEvent,
|
|
235
348
|
ReasoningCompletedEvent,
|
|
@@ -252,6 +365,8 @@ TEAM_RUN_EVENT_TYPE_REGISTRY = {
|
|
|
252
365
|
TeamRunEvent.run_completed.value: RunCompletedEvent,
|
|
253
366
|
TeamRunEvent.run_error.value: RunErrorEvent,
|
|
254
367
|
TeamRunEvent.run_cancelled.value: RunCancelledEvent,
|
|
368
|
+
TeamRunEvent.pre_hook_started.value: PreHookStartedEvent,
|
|
369
|
+
TeamRunEvent.pre_hook_completed.value: PreHookCompletedEvent,
|
|
255
370
|
TeamRunEvent.reasoning_started.value: ReasoningStartedEvent,
|
|
256
371
|
TeamRunEvent.reasoning_step.value: ReasoningStepEvent,
|
|
257
372
|
TeamRunEvent.reasoning_completed.value: ReasoningCompletedEvent,
|
|
@@ -278,78 +393,6 @@ def team_run_output_event_from_dict(data: dict) -> BaseTeamRunEvent:
|
|
|
278
393
|
return event_class.from_dict(data) # type: ignore
|
|
279
394
|
|
|
280
395
|
|
|
281
|
-
@dataclass
|
|
282
|
-
class TeamRunInput:
|
|
283
|
-
"""Container for the raw input data passed to Agent.run().
|
|
284
|
-
This captures the original input exactly as provided by the user,
|
|
285
|
-
separate from the processed messages that go to the model.
|
|
286
|
-
Attributes:
|
|
287
|
-
input_content: The literal input message/content passed to run()
|
|
288
|
-
images: Images directly passed to run()
|
|
289
|
-
videos: Videos directly passed to run()
|
|
290
|
-
audios: Audio files directly passed to run()
|
|
291
|
-
files: Files directly passed to run()
|
|
292
|
-
"""
|
|
293
|
-
|
|
294
|
-
input_content: Optional[Union[str, List, Dict, Message, BaseModel, List[Message]]] = None
|
|
295
|
-
images: Optional[Sequence[Image]] = None
|
|
296
|
-
videos: Optional[Sequence[Video]] = None
|
|
297
|
-
audios: Optional[Sequence[Audio]] = None
|
|
298
|
-
files: Optional[Sequence[File]] = None
|
|
299
|
-
|
|
300
|
-
def to_dict(self) -> Dict[str, Any]:
|
|
301
|
-
"""Convert to dictionary representation"""
|
|
302
|
-
result: Dict[str, Any] = {}
|
|
303
|
-
|
|
304
|
-
if self.input_content is not None:
|
|
305
|
-
if isinstance(self.input_content, (str)):
|
|
306
|
-
result["input_content"] = self.input_content
|
|
307
|
-
elif isinstance(self.input_content, BaseModel):
|
|
308
|
-
result["input_content"] = self.input_content.model_dump(exclude_none=True)
|
|
309
|
-
elif isinstance(self.input_content, Message):
|
|
310
|
-
result["input_content"] = self.input_content.to_dict()
|
|
311
|
-
elif (
|
|
312
|
-
isinstance(self.input_content, list)
|
|
313
|
-
and self.input_content
|
|
314
|
-
and isinstance(self.input_content[0], Message)
|
|
315
|
-
):
|
|
316
|
-
result["input_content"] = [m.to_dict() for m in self.input_content]
|
|
317
|
-
else:
|
|
318
|
-
result["input_content"] = self.input_content
|
|
319
|
-
|
|
320
|
-
if self.images:
|
|
321
|
-
result["images"] = [img.to_dict() for img in self.images]
|
|
322
|
-
if self.videos:
|
|
323
|
-
result["videos"] = [vid.to_dict() for vid in self.videos]
|
|
324
|
-
if self.audios:
|
|
325
|
-
result["audios"] = [aud.to_dict() for aud in self.audios]
|
|
326
|
-
if self.files:
|
|
327
|
-
result["files"] = [file.to_dict() for file in self.files]
|
|
328
|
-
|
|
329
|
-
return result
|
|
330
|
-
|
|
331
|
-
@classmethod
|
|
332
|
-
def from_dict(cls, data: Dict[str, Any]) -> "TeamRunInput":
|
|
333
|
-
"""Create TeamRunInput from dictionary"""
|
|
334
|
-
images = None
|
|
335
|
-
if data.get("images"):
|
|
336
|
-
images = [Image.model_validate(img_data) for img_data in data["images"]]
|
|
337
|
-
|
|
338
|
-
videos = None
|
|
339
|
-
if data.get("videos"):
|
|
340
|
-
videos = [Video.model_validate(vid_data) for vid_data in data["videos"]]
|
|
341
|
-
|
|
342
|
-
audios = None
|
|
343
|
-
if data.get("audios"):
|
|
344
|
-
audios = [Audio.model_validate(aud_data) for aud_data in data["audios"]]
|
|
345
|
-
|
|
346
|
-
files = None
|
|
347
|
-
if data.get("files"):
|
|
348
|
-
files = [File.model_validate(file_data) for file_data in data["files"]]
|
|
349
|
-
|
|
350
|
-
return cls(input_content=data.get("input_content"), images=images, videos=videos, audios=audios, files=files)
|
|
351
|
-
|
|
352
|
-
|
|
353
396
|
@dataclass
|
|
354
397
|
class TeamRunOutput:
|
|
355
398
|
"""Response returned by Team.run() functions"""
|
agno/run/workflow.py
CHANGED
|
@@ -9,7 +9,6 @@ from agno.media import Audio, Image, Video
|
|
|
9
9
|
from agno.run.agent import RunOutput
|
|
10
10
|
from agno.run.base import BaseRunOutputEvent, RunStatus
|
|
11
11
|
from agno.run.team import TeamRunOutput
|
|
12
|
-
from agno.utils.log import log_error
|
|
13
12
|
|
|
14
13
|
if TYPE_CHECKING:
|
|
15
14
|
from agno.workflow.types import StepOutput, WorkflowMetrics
|
|
@@ -95,20 +94,6 @@ class BaseWorkflowRunOutputEvent(BaseRunOutputEvent):
|
|
|
95
94
|
|
|
96
95
|
return _dict
|
|
97
96
|
|
|
98
|
-
def to_json(self, separators=(", ", ": "), indent: Optional[int] = 2) -> str:
|
|
99
|
-
import json
|
|
100
|
-
|
|
101
|
-
try:
|
|
102
|
-
_dict = self.to_dict()
|
|
103
|
-
except Exception:
|
|
104
|
-
log_error("Failed to convert response to json", exc_info=True)
|
|
105
|
-
raise
|
|
106
|
-
|
|
107
|
-
if indent is None:
|
|
108
|
-
return json.dumps(_dict, separators=separators)
|
|
109
|
-
else:
|
|
110
|
-
return json.dumps(_dict, indent=indent, separators=separators)
|
|
111
|
-
|
|
112
97
|
@property
|
|
113
98
|
def is_cancelled(self):
|
|
114
99
|
return False
|
|
@@ -155,6 +140,11 @@ class WorkflowErrorEvent(BaseWorkflowRunOutputEvent):
|
|
|
155
140
|
event: str = WorkflowRunEvent.workflow_error.value
|
|
156
141
|
error: Optional[str] = None
|
|
157
142
|
|
|
143
|
+
# From exceptions
|
|
144
|
+
error_type: Optional[str] = None
|
|
145
|
+
error_id: Optional[str] = None
|
|
146
|
+
additional_data: Optional[Dict[str, Any]] = None
|
|
147
|
+
|
|
158
148
|
|
|
159
149
|
@dataclass
|
|
160
150
|
class WorkflowCancelledEvent(BaseWorkflowRunOutputEvent):
|
agno/session/summary.py
CHANGED
|
@@ -90,17 +90,16 @@ class SessionSummaryManager:
|
|
|
90
90
|
response_format: Union[Dict[str, Any], Type[BaseModel]],
|
|
91
91
|
) -> Message:
|
|
92
92
|
if self.session_summary_prompt is not None:
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
<conversation>
|
|
102
|
-
""")
|
|
93
|
+
system_prompt = self.session_summary_prompt
|
|
94
|
+
else:
|
|
95
|
+
system_prompt = dedent("""\
|
|
96
|
+
Analyze the following conversation between a user and an assistant, and extract the following details:
|
|
97
|
+
- Summary (str): Provide a concise summary of the session, focusing on important information that would be helpful for future interactions.
|
|
98
|
+
- Topics (Optional[List[str]]): List the topics discussed in the session.
|
|
99
|
+
Keep the summary concise and to the point. Only include relevant information.
|
|
100
|
+
""")
|
|
103
101
|
conversation_messages = []
|
|
102
|
+
system_prompt += "<conversation>"
|
|
104
103
|
for message in conversation:
|
|
105
104
|
if message.role == "user":
|
|
106
105
|
conversation_messages.append(f"User: {message.content}")
|
agno/session/team.py
CHANGED
|
@@ -140,6 +140,7 @@ class TeamSession:
|
|
|
140
140
|
skip_status = [RunStatus.paused, RunStatus.cancelled, RunStatus.error]
|
|
141
141
|
|
|
142
142
|
session_runs = self.runs
|
|
143
|
+
|
|
143
144
|
# Filter by agent_id and team_id
|
|
144
145
|
if agent_id:
|
|
145
146
|
session_runs = [run for run in session_runs if hasattr(run, "agent_id") and run.agent_id == agent_id] # type: ignore
|
|
@@ -149,7 +150,6 @@ class TeamSession:
|
|
|
149
150
|
if not member_runs:
|
|
150
151
|
# Filter for the main team runs
|
|
151
152
|
session_runs = [run for run in session_runs if run.parent_run_id is None] # type: ignore
|
|
152
|
-
|
|
153
153
|
# Filter by status
|
|
154
154
|
session_runs = [run for run in session_runs if hasattr(run, "status") and run.status not in skip_status] # type: ignore
|
|
155
155
|
|
|
@@ -157,6 +157,7 @@ class TeamSession:
|
|
|
157
157
|
runs_to_process = session_runs[-last_n:] if last_n is not None else session_runs
|
|
158
158
|
messages_from_history = []
|
|
159
159
|
system_message = None
|
|
160
|
+
|
|
160
161
|
for run_response in runs_to_process:
|
|
161
162
|
if not (run_response and run_response.messages):
|
|
162
163
|
continue
|