agno 1.7.2__py3-none-any.whl → 1.7.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. agno/agent/agent.py +264 -155
  2. agno/api/schemas/agent.py +1 -0
  3. agno/api/schemas/team.py +1 -0
  4. agno/app/base.py +0 -22
  5. agno/app/discord/client.py +134 -56
  6. agno/app/fastapi/app.py +0 -11
  7. agno/app/playground/app.py +3 -24
  8. agno/app/playground/async_router.py +97 -28
  9. agno/app/playground/operator.py +25 -19
  10. agno/app/playground/schemas.py +1 -0
  11. agno/app/playground/sync_router.py +93 -26
  12. agno/document/reader/gcs/__init__.py +0 -0
  13. agno/document/reader/gcs/pdf_reader.py +44 -0
  14. agno/embedder/langdb.py +9 -5
  15. agno/knowledge/document.py +199 -8
  16. agno/knowledge/gcs/__init__.py +0 -0
  17. agno/knowledge/gcs/base.py +39 -0
  18. agno/knowledge/gcs/pdf.py +21 -0
  19. agno/models/langdb/langdb.py +8 -5
  20. agno/run/base.py +2 -0
  21. agno/run/response.py +4 -4
  22. agno/run/team.py +6 -6
  23. agno/run/v2/__init__.py +0 -0
  24. agno/run/v2/workflow.py +563 -0
  25. agno/storage/base.py +4 -4
  26. agno/storage/dynamodb.py +74 -10
  27. agno/storage/firestore.py +6 -1
  28. agno/storage/gcs_json.py +8 -2
  29. agno/storage/json.py +20 -5
  30. agno/storage/mongodb.py +14 -5
  31. agno/storage/mysql.py +56 -17
  32. agno/storage/postgres.py +55 -13
  33. agno/storage/redis.py +25 -5
  34. agno/storage/session/__init__.py +3 -1
  35. agno/storage/session/agent.py +3 -0
  36. agno/storage/session/team.py +3 -0
  37. agno/storage/session/v2/__init__.py +5 -0
  38. agno/storage/session/v2/workflow.py +89 -0
  39. agno/storage/singlestore.py +74 -12
  40. agno/storage/sqlite.py +64 -18
  41. agno/storage/yaml.py +26 -6
  42. agno/team/team.py +198 -243
  43. agno/tools/scrapegraph.py +8 -10
  44. agno/utils/log.py +12 -0
  45. agno/utils/message.py +5 -1
  46. agno/utils/openai.py +20 -5
  47. agno/utils/pprint.py +32 -8
  48. agno/workflow/v2/__init__.py +21 -0
  49. agno/workflow/v2/condition.py +554 -0
  50. agno/workflow/v2/loop.py +602 -0
  51. agno/workflow/v2/parallel.py +659 -0
  52. agno/workflow/v2/router.py +521 -0
  53. agno/workflow/v2/step.py +861 -0
  54. agno/workflow/v2/steps.py +465 -0
  55. agno/workflow/v2/types.py +347 -0
  56. agno/workflow/v2/workflow.py +3134 -0
  57. agno/workflow/workflow.py +15 -147
  58. {agno-1.7.2.dist-info → agno-1.7.4.dist-info}/METADATA +1 -1
  59. {agno-1.7.2.dist-info → agno-1.7.4.dist-info}/RECORD +63 -45
  60. {agno-1.7.2.dist-info → agno-1.7.4.dist-info}/WHEEL +0 -0
  61. {agno-1.7.2.dist-info → agno-1.7.4.dist-info}/entry_points.txt +0 -0
  62. {agno-1.7.2.dist-info → agno-1.7.4.dist-info}/licenses/LICENSE +0 -0
  63. {agno-1.7.2.dist-info → agno-1.7.4.dist-info}/top_level.txt +0 -0
@@ -38,11 +38,13 @@ from agno.memory.agent import AgentMemory
38
38
  from agno.memory.v2 import Memory
39
39
  from agno.run.response import RunResponseErrorEvent, RunResponseEvent
40
40
  from agno.run.team import RunResponseErrorEvent as TeamRunResponseErrorEvent
41
+ from agno.run.v2.workflow import WorkflowErrorEvent
41
42
  from agno.storage.session.agent import AgentSession
42
43
  from agno.storage.session.team import TeamSession
43
44
  from agno.storage.session.workflow import WorkflowSession
44
45
  from agno.team.team import Team
45
46
  from agno.utils.log import logger
47
+ from agno.workflow.v2.workflow import Workflow as WorkflowV2
46
48
  from agno.workflow.workflow import Workflow
47
49
 
48
50
 
@@ -147,6 +149,31 @@ def team_chat_response_streamer(
147
149
  return
148
150
 
149
151
 
152
+ def workflow_response_streamer(
153
+ workflow: WorkflowV2,
154
+ body: WorkflowRunRequest,
155
+ ) -> Generator:
156
+ try:
157
+ run_response = workflow.run(
158
+ **body.input,
159
+ user_id=body.user_id,
160
+ session_id=body.session_id or str(uuid4()),
161
+ stream=True,
162
+ stream_intermediate_steps=True,
163
+ )
164
+ for run_response_chunk in run_response:
165
+ yield run_response_chunk.to_json()
166
+ except Exception as e:
167
+ import traceback
168
+
169
+ traceback.print_exc(limit=3)
170
+ error_response = WorkflowErrorEvent(
171
+ error=str(e),
172
+ )
173
+ yield error_response.to_json()
174
+ return
175
+
176
+
150
177
  def get_sync_playground_router(
151
178
  agents: Optional[List[Agent]] = None,
152
179
  workflows: Optional[List[Workflow]] = None,
@@ -615,13 +642,22 @@ def get_sync_playground_router(
615
642
  if workflow is None:
616
643
  raise HTTPException(status_code=404, detail="Workflow not found")
617
644
 
618
- return WorkflowGetResponse(
619
- workflow_id=workflow.workflow_id,
620
- name=workflow.name,
621
- description=workflow.description,
622
- parameters=workflow._run_parameters or {},
623
- storage=workflow.storage.__class__.__name__ if workflow.storage else None,
624
- )
645
+ if isinstance(workflow, Workflow):
646
+ return WorkflowGetResponse(
647
+ workflow_id=workflow.workflow_id,
648
+ name=workflow.name,
649
+ description=workflow.description,
650
+ parameters=workflow._run_parameters or {},
651
+ storage=workflow.storage.__class__.__name__ if workflow.storage else None,
652
+ )
653
+ else:
654
+ return WorkflowGetResponse(
655
+ workflow_id=workflow.workflow_id,
656
+ name=workflow.name,
657
+ description=workflow.description,
658
+ parameters=workflow.run_parameters,
659
+ storage=workflow.storage.__class__.__name__ if workflow.storage else None,
660
+ )
625
661
 
626
662
  @playground_router.post("/workflows/{workflow_id}/runs")
627
663
  def create_workflow_run(workflow_id: str, body: WorkflowRunRequest):
@@ -631,24 +667,52 @@ def get_sync_playground_router(
631
667
  raise HTTPException(status_code=404, detail="Workflow not found")
632
668
 
633
669
  # Create a new instance of this workflow
634
- new_workflow_instance = workflow.deep_copy(update={"workflow_id": workflow_id})
635
- new_workflow_instance.user_id = body.user_id
636
- new_workflow_instance.session_name = None
670
+ if isinstance(workflow, Workflow):
671
+ new_workflow_instance = workflow.deep_copy(
672
+ update={"workflow_id": workflow_id, "session_id": body.session_id}
673
+ )
674
+ new_workflow_instance.user_id = body.user_id
675
+ new_workflow_instance.session_name = None
637
676
 
638
- # Return based on the response type
639
- try:
640
- if new_workflow_instance._run_return_type == "RunResponse":
641
- # Return as a normal response
642
- return new_workflow_instance.run(**body.input)
643
- else:
644
- # Return as a streaming response
645
- return StreamingResponse(
646
- (result.to_json() for result in new_workflow_instance.run(**body.input)),
647
- media_type="text/event-stream",
648
- )
649
- except Exception as e:
650
- # Handle unexpected runtime errors
651
- raise HTTPException(status_code=500, detail=f"Error running workflow: {str(e)}")
677
+ # Return based on the response type
678
+ try:
679
+ if new_workflow_instance._run_return_type == "RunResponse":
680
+ # Return as a normal response
681
+ return new_workflow_instance.run(**body.input)
682
+ else:
683
+ # Return as a streaming response
684
+ return StreamingResponse(
685
+ (result.to_json() for result in new_workflow_instance.run(**body.input)),
686
+ media_type="text/event-stream",
687
+ headers={
688
+ "Access-Control-Allow-Origin": "*",
689
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
690
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
691
+ },
692
+ )
693
+ except Exception as e:
694
+ # Handle unexpected runtime errors
695
+ raise HTTPException(status_code=500, detail=f"Error running workflow: {str(e)}")
696
+ else:
697
+ # Return based on the response type
698
+ try:
699
+ if body.stream:
700
+ # Return as a streaming response
701
+ return StreamingResponse(
702
+ workflow_response_streamer(workflow, body),
703
+ media_type="text/event-stream",
704
+ headers={
705
+ "Access-Control-Allow-Origin": "*",
706
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
707
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
708
+ },
709
+ )
710
+ else:
711
+ # Return as a normal response
712
+ return workflow.arun(**body.input, session_id=body.session_id or str(uuid4()), user_id=body.user_id)
713
+ except Exception as e:
714
+ # Handle unexpected runtime errors
715
+ raise HTTPException(status_code=500, detail=f"Error running workflow: {str(e)}")
652
716
 
653
717
  @playground_router.get("/workflows/{workflow_id}/sessions")
654
718
  def get_all_workflow_sessions(workflow_id: str, user_id: Optional[str] = Query(None, min_length=1)):
@@ -703,8 +767,11 @@ def get_sync_playground_router(
703
767
  if not workflow_session:
704
768
  raise HTTPException(status_code=404, detail="Session not found")
705
769
 
706
- # Return the session
707
- return workflow_session
770
+ workflow_session_dict = workflow_session.to_dict()
771
+ if "memory" not in workflow_session_dict:
772
+ workflow_session_dict["memory"] = {"runs": workflow_session_dict.pop("runs", [])}
773
+
774
+ return JSONResponse(content=workflow_session_dict)
708
775
 
709
776
  @playground_router.post("/workflows/{workflow_id}/sessions/{session_id}/rename")
710
777
  def rename_workflow_session(
File without changes
@@ -0,0 +1,44 @@
1
+ import asyncio
2
+ from io import BytesIO
3
+ from typing import List
4
+ from uuid import uuid4
5
+
6
+ from agno.document.base import Document
7
+ from agno.document.reader.base import Reader
8
+ from agno.utils.log import log_info
9
+
10
+ try:
11
+ from google.cloud import storage
12
+ except ImportError:
13
+ raise ImportError("`google-cloud-storage` not installed. Please install it via `pip install google-cloud-storage`.")
14
+
15
+ try:
16
+ from pypdf import PdfReader as DocumentReader
17
+ except ImportError:
18
+ raise ImportError("`pypdf` not installed. Please install it via `pip install pypdf`.")
19
+
20
+
21
+ class GCSPDFReader(Reader):
22
+ def read(self, blob: storage.Blob) -> List[Document]:
23
+ log_info(f"Reading: gs://{blob.bucket.name}/{blob.name}")
24
+ data = blob.download_as_bytes()
25
+ doc_name = blob.name.split("/")[-1].split(".")[0].replace("/", "_").replace(" ", "_")
26
+ doc_reader = DocumentReader(BytesIO(data))
27
+ documents = [
28
+ Document(
29
+ name=doc_name,
30
+ id=str(uuid4()),
31
+ meta_data={"page": page_number},
32
+ content=page.extract_text(),
33
+ )
34
+ for page_number, page in enumerate(doc_reader.pages, start=1)
35
+ ]
36
+ if self.chunk:
37
+ chunked_documents = []
38
+ for document in documents:
39
+ chunked_documents.extend(self.chunk_document(document))
40
+ return chunked_documents
41
+ return documents
42
+
43
+ async def async_read(self, blob: storage.Blob) -> List[Document]:
44
+ return await asyncio.to_thread(self.read, blob)
agno/embedder/langdb.py CHANGED
@@ -20,10 +20,8 @@ class LangDBEmbedder(Embedder):
20
20
  user: Optional[str] = None
21
21
  api_key: Optional[str] = getenv("LANGDB_API_KEY")
22
22
  project_id: Optional[str] = getenv("LANGDB_PROJECT_ID")
23
- if not project_id:
24
- logger.error("LANGDB_PROJECT_ID not set in the environment")
25
23
  organization: Optional[str] = None
26
- base_url: Optional[str] = f"https://api.us-east-1.langdb.ai/{project_id}/v1"
24
+ base_url: Optional[str] = None
27
25
  request_params: Optional[Dict[str, Any]] = None
28
26
  client_params: Optional[Dict[str, Any]] = None
29
27
  openai_client: Optional[OpenAIClient] = None
@@ -33,13 +31,19 @@ class LangDBEmbedder(Embedder):
33
31
  if self.openai_client:
34
32
  return self.openai_client
35
33
 
34
+ if not self.project_id:
35
+ raise ValueError("LANGDB_PROJECT_ID not set in the environment")
36
+
36
37
  _client_params: Dict[str, Any] = {}
37
38
  if self.api_key:
38
39
  _client_params["api_key"] = self.api_key
39
40
  if self.organization:
40
41
  _client_params["organization"] = self.organization
41
- if self.base_url:
42
- _client_params["base_url"] = self.base_url
42
+
43
+ if not self.base_url:
44
+ self.base_url = f"https://api.us-east-1.langdb.ai/{self.project_id}/v1"
45
+ _client_params["base_url"] = self.base_url
46
+
43
47
  if self.client_params:
44
48
  _client_params.update(self.client_params)
45
49
  return OpenAIClient(**_client_params)
@@ -1,11 +1,12 @@
1
- from typing import AsyncIterator, Iterator, List
1
+ from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
2
2
 
3
3
  from agno.document import Document
4
4
  from agno.knowledge.agent import AgentKnowledge
5
+ from agno.utils.log import log_info, logger
5
6
 
6
7
 
7
8
  class DocumentKnowledgeBase(AgentKnowledge):
8
- documents: List[Document]
9
+ documents: Optional[Union[List[Document], List[Dict[str, Any]]]] = None
9
10
 
10
11
  @property
11
12
  def document_lists(self) -> Iterator[List[Document]]:
@@ -15,18 +16,208 @@ class DocumentKnowledgeBase(AgentKnowledge):
15
16
  Returns:
16
17
  Iterator[List[Document]]: Iterator yielding list of documents
17
18
  """
19
+ if self.documents is None:
20
+ # Return empty iterator when no documents are set
21
+ return
18
22
 
19
- for _document in self.documents:
20
- yield [_document]
23
+ for item in self.documents:
24
+ if isinstance(item, dict) and "document" in item:
25
+ # Handle document with metadata
26
+ document: Document = item["document"]
27
+ config = item.get("metadata", {})
28
+ if config:
29
+ log_info(f"Adding metadata {config} to document: {document.name}")
30
+ # Create a copy of the document with updated metadata
31
+ updated_document = Document(
32
+ content=document.content,
33
+ id=document.id,
34
+ name=document.name,
35
+ meta_data={**document.meta_data, **config},
36
+ embedder=document.embedder,
37
+ embedding=document.embedding,
38
+ usage=document.usage,
39
+ reranking_score=document.reranking_score,
40
+ )
41
+ yield [updated_document]
42
+ else:
43
+ yield [document]
44
+ elif isinstance(item, Document):
45
+ # Handle direct document
46
+ yield [item]
47
+ else:
48
+ raise ValueError(f"Invalid document format: {type(item)}")
21
49
 
22
50
  @property
23
51
  async def async_document_lists(self) -> AsyncIterator[List[Document]]:
24
- """Iterate over documents and yield lists of documents.
52
+ """Iterate over documents and yield lists of documents asynchronously.
25
53
  Each object yielded by the iterator is a list of documents.
26
54
 
27
55
  Returns:
28
- Iterator[List[Document]]: Iterator yielding list of documents
56
+ AsyncIterator[List[Document]]: Iterator yielding list of documents
57
+ """
58
+ if self.documents is None:
59
+ # Return empty iterator when no documents are set
60
+ return
61
+
62
+ for item in self.documents:
63
+ if isinstance(item, dict) and "document" in item:
64
+ # Handle document with metadata
65
+ document: Document = item["document"]
66
+ config = item.get("metadata", {})
67
+ if config:
68
+ log_info(f"Adding metadata {config} to document: {document.name}")
69
+ # Create a copy of the document with updated metadata
70
+ updated_document = Document(
71
+ content=document.content,
72
+ id=document.id,
73
+ name=document.name,
74
+ meta_data={**document.meta_data, **config},
75
+ embedder=document.embedder,
76
+ embedding=document.embedding,
77
+ usage=document.usage,
78
+ reranking_score=document.reranking_score,
79
+ )
80
+ yield [updated_document]
81
+ else:
82
+ yield [document]
83
+ elif isinstance(item, Document):
84
+ # Handle direct document
85
+ yield [item]
86
+ else:
87
+ raise ValueError(f"Invalid document format: {type(item)}")
88
+
89
+ def _prepare_document_load(
90
+ self,
91
+ metadata: Optional[Dict[str, Any]] = None,
92
+ recreate: bool = False,
93
+ ) -> bool:
94
+ """Prepare collection for loading documents (no file validation needed).
95
+ Args:
96
+ metadata (Optional[Dict[str, Any]]): Metadata to track
97
+ recreate (bool): Whether to recreate the collection
98
+ Returns:
99
+ bool: True if preparation succeeded, False otherwise
100
+ """
101
+ # 1. Track metadata
102
+ if metadata:
103
+ self._track_metadata_structure(metadata)
104
+
105
+ # 2. Prepare vector DB
106
+ if self.vector_db is None:
107
+ logger.warning("Cannot load document: No vector db provided.")
108
+ return False
109
+
110
+ # Recreate collection if requested
111
+ if recreate:
112
+ self.vector_db.drop()
113
+
114
+ # Create collection if it doesn't exist
115
+ if not self.vector_db.exists():
116
+ self.vector_db.create()
117
+
118
+ return True
119
+
120
+ async def _aprepare_document_load(
121
+ self,
122
+ metadata: Optional[Dict[str, Any]] = None,
123
+ recreate: bool = False,
124
+ ) -> bool:
125
+ """Prepare collection for loading documents asynchronously (no file validation needed).
126
+ Args:
127
+ metadata (Optional[Dict[str, Any]]): Metadata to track
128
+ recreate (bool): Whether to recreate the collection
129
+ Returns:
130
+ bool: True if preparation succeeded, False otherwise
29
131
  """
132
+ # 1. Track metadata
133
+ if metadata:
134
+ self._track_metadata_structure(metadata)
135
+
136
+ # 2. Prepare vector DB
137
+ if self.vector_db is None:
138
+ logger.warning("Cannot load document: No vector db provided.")
139
+ return False
140
+
141
+ # Recreate collection if requested
142
+ if recreate:
143
+ await self.vector_db.async_drop()
144
+
145
+ # Create collection if it doesn't exist
146
+ if not await self.vector_db.async_exists():
147
+ await self.vector_db.async_create()
148
+
149
+ return True
150
+
151
+ def load_document(
152
+ self,
153
+ document: Document,
154
+ metadata: Optional[Dict[str, Any]] = None,
155
+ recreate: bool = False,
156
+ upsert: bool = False,
157
+ skip_existing: bool = True,
158
+ ) -> None:
159
+ """Load a single document with specific metadata into the vector DB."""
160
+
161
+ # Use our document-specific preparation method
162
+ if not self._prepare_document_load(metadata, recreate):
163
+ return
164
+
165
+ # Apply metadata if provided
166
+ if metadata:
167
+ # Create a copy of the document with updated metadata
168
+ document = Document(
169
+ content=document.content,
170
+ id=document.id,
171
+ name=document.name,
172
+ meta_data={**document.meta_data, **metadata},
173
+ embedder=document.embedder,
174
+ embedding=document.embedding,
175
+ usage=document.usage,
176
+ reranking_score=document.reranking_score,
177
+ )
178
+
179
+ # Process documents
180
+ self.process_documents(
181
+ documents=[document],
182
+ metadata=metadata,
183
+ upsert=upsert,
184
+ skip_existing=skip_existing,
185
+ source_info=f"document: {document.name or document.id}",
186
+ )
187
+
188
+ async def aload_document(
189
+ self,
190
+ document: Document,
191
+ metadata: Optional[Dict[str, Any]] = None,
192
+ recreate: bool = False,
193
+ upsert: bool = False,
194
+ skip_existing: bool = True,
195
+ ) -> None:
196
+ """Load a single document with specific metadata into the vector DB asynchronously."""
197
+
198
+ # Use our document-specific preparation method
199
+ if not await self._aprepare_document_load(metadata, recreate):
200
+ return
201
+
202
+ # Apply metadata if provided
203
+ if metadata:
204
+ # Create a copy of the document with updated metadata
205
+ document = Document(
206
+ content=document.content,
207
+ id=document.id,
208
+ name=document.name,
209
+ meta_data={**document.meta_data, **metadata},
210
+ embedder=document.embedder,
211
+ embedding=document.embedding,
212
+ usage=document.usage,
213
+ reranking_score=document.reranking_score,
214
+ )
30
215
 
31
- for _document in self.documents:
32
- yield [_document]
216
+ # Process documents
217
+ await self.aprocess_documents(
218
+ documents=[document],
219
+ metadata=metadata,
220
+ upsert=upsert,
221
+ skip_existing=skip_existing,
222
+ source_info=f"document: {document.name or document.id}",
223
+ )
File without changes
@@ -0,0 +1,39 @@
1
+ from typing import AsyncIterator, Iterator, List, Optional
2
+
3
+ from google.cloud import storage
4
+
5
+ from agno.document import Document
6
+ from agno.knowledge.agent import AgentKnowledge
7
+
8
+
9
+ class GCSKnowledgeBase(AgentKnowledge):
10
+ bucket: Optional[storage.Bucket] = None
11
+ bucket_name: Optional[str] = None
12
+ blob_name: Optional[str] = None
13
+ prefix: Optional[str] = None
14
+
15
+ @property
16
+ def gcs_blobs(self) -> List[storage.Blob]:
17
+ if self.bucket is None and self.bucket_name is None:
18
+ raise ValueError("No bucket or bucket_name provided")
19
+ if self.bucket is not None and self.bucket_name is not None:
20
+ raise ValueError("Provide either bucket or bucket_name")
21
+ if self.bucket_name is not None:
22
+ client = storage.Client()
23
+ self.bucket = client.bucket(self.bucket_name)
24
+ blobs_to_read = []
25
+ if self.blob_name is not None:
26
+ blobs_to_read.append(self.bucket.blob(self.blob_name)) # type: ignore
27
+ elif self.prefix is not None:
28
+ blobs_to_read.extend(self.bucket.list_blobs(prefix=self.prefix)) # type: ignore
29
+ else:
30
+ blobs_to_read.extend(self.bucket.list_blobs()) # type: ignore
31
+ return list(blobs_to_read)
32
+
33
+ @property
34
+ def document_lists(self) -> Iterator[List[Document]]:
35
+ raise NotImplementedError
36
+
37
+ @property
38
+ def async_document_lists(self) -> AsyncIterator[List[Document]]:
39
+ raise NotImplementedError
@@ -0,0 +1,21 @@
1
+ from typing import AsyncIterator, Iterator, List
2
+
3
+ from agno.document import Document
4
+ from agno.document.reader.gcs.pdf_reader import GCSPDFReader
5
+ from agno.knowledge.gcs.base import GCSKnowledgeBase
6
+
7
+
8
+ class GCSPDFKnowledgeBase(GCSKnowledgeBase):
9
+ reader: GCSPDFReader = GCSPDFReader()
10
+
11
+ @property
12
+ def document_lists(self) -> Iterator[List[Document]]:
13
+ for blob in self.gcs_blobs:
14
+ if blob.name.endswith(".pdf"):
15
+ yield self.reader.read(blob=blob)
16
+
17
+ @property
18
+ async def async_document_lists(self) -> AsyncIterator[List[Document]]:
19
+ for blob in self.gcs_blobs:
20
+ if blob.name.endswith(".pdf"):
21
+ yield await self.reader.async_read(blob=blob)
@@ -3,7 +3,6 @@ from os import getenv
3
3
  from typing import Any, Dict, Optional
4
4
 
5
5
  from agno.models.openai.like import OpenAILike
6
- from agno.utils.log import logger
7
6
 
8
7
 
9
8
  @dataclass
@@ -25,21 +24,25 @@ class LangDB(OpenAILike):
25
24
 
26
25
  api_key: Optional[str] = getenv("LANGDB_API_KEY")
27
26
  project_id: Optional[str] = getenv("LANGDB_PROJECT_ID")
28
- if not project_id:
29
- logger.warning("LANGDB_PROJECT_ID not set in the environment")
30
27
 
31
28
  base_host_url: str = getenv("LANGDB_API_BASE_URL", "https://api.us-east-1.langdb.ai")
32
29
 
33
- base_url: str = f"{base_host_url}/{project_id}/v1"
30
+ base_url: Optional[str] = None
34
31
  label: Optional[str] = None
35
32
  default_headers: Optional[dict] = None
36
33
 
37
34
  def _get_client_params(self) -> Dict[str, Any]:
35
+ if not self.project_id:
36
+ raise ValueError("LANGDB_PROJECT_ID not set in the environment")
37
+
38
+ if not self.base_url:
39
+ self.base_url = f"{self.base_host_url}/{self.project_id}/v1"
40
+
38
41
  # Initialize headers with label if present
39
42
  if self.label and not self.default_headers:
40
43
  self.default_headers = {
41
44
  "x-label": self.label,
42
45
  }
43
- client_params = super()._get_client_params()
44
46
 
47
+ client_params = super()._get_client_params()
45
48
  return client_params
agno/run/base.py CHANGED
@@ -206,7 +206,9 @@ class RunResponseExtraData:
206
206
  class RunStatus(str, Enum):
207
207
  """State of the main run response"""
208
208
 
209
+ pending = "PENDING"
209
210
  running = "RUNNING"
211
+ completed = "COMPLETED"
210
212
  paused = "PAUSED"
211
213
  cancelled = "CANCELLED"
212
214
  error = "ERROR"
agno/run/response.py CHANGED
@@ -379,16 +379,16 @@ class RunResponse:
379
379
  messages = data.pop("messages", None)
380
380
  messages = [Message.model_validate(message) for message in messages] if messages else None
381
381
 
382
- tools = data.pop("tools", None)
382
+ tools = data.pop("tools", [])
383
383
  tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
384
384
 
385
- images = data.pop("images", None)
385
+ images = data.pop("images", [])
386
386
  images = [ImageArtifact.model_validate(image) for image in images] if images else None
387
387
 
388
- videos = data.pop("videos", None)
388
+ videos = data.pop("videos", [])
389
389
  videos = [VideoArtifact.model_validate(video) for video in videos] if videos else None
390
390
 
391
- audio = data.pop("audio", None)
391
+ audio = data.pop("audio", [])
392
392
  audio = [AudioArtifact.model_validate(audio) for audio in audio] if audio else None
393
393
 
394
394
  response_audio = data.pop("response_audio", None)
agno/run/team.py CHANGED
@@ -361,9 +361,9 @@ class TeamRunResponse:
361
361
  messages = data.pop("messages", None)
362
362
  messages = [Message.model_validate(message) for message in messages] if messages else None
363
363
 
364
- member_responses = data.pop("member_responses", None)
364
+ member_responses = data.pop("member_responses", [])
365
365
  parsed_member_responses: List[Union["TeamRunResponse", RunResponse]] = []
366
- if member_responses is not None:
366
+ if member_responses:
367
367
  for response in member_responses:
368
368
  if "agent_id" in response:
369
369
  parsed_member_responses.append(RunResponse.from_dict(response))
@@ -374,16 +374,16 @@ class TeamRunResponse:
374
374
  if extra_data is not None:
375
375
  extra_data = RunResponseExtraData.from_dict(extra_data)
376
376
 
377
- images = data.pop("images", None)
377
+ images = data.pop("images", [])
378
378
  images = [ImageArtifact.model_validate(image) for image in images] if images else None
379
379
 
380
- videos = data.pop("videos", None)
380
+ videos = data.pop("videos", [])
381
381
  videos = [VideoArtifact.model_validate(video) for video in videos] if videos else None
382
382
 
383
- audio = data.pop("audio", None)
383
+ audio = data.pop("audio", [])
384
384
  audio = [AudioArtifact.model_validate(audio) for audio in audio] if audio else None
385
385
 
386
- tools = data.pop("tools", None)
386
+ tools = data.pop("tools", [])
387
387
  tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
388
388
 
389
389
  response_audio = data.pop("response_audio", None)
File without changes