agno 2.0.5__py3-none-any.whl → 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +67 -17
- agno/db/dynamo/dynamo.py +7 -5
- agno/db/firestore/firestore.py +4 -2
- agno/db/gcs_json/gcs_json_db.py +4 -2
- agno/db/json/json_db.py +8 -4
- agno/db/mongo/mongo.py +6 -4
- agno/db/mysql/mysql.py +2 -1
- agno/db/postgres/postgres.py +2 -1
- agno/db/redis/redis.py +1 -1
- agno/db/singlestore/singlestore.py +2 -2
- agno/db/sqlite/sqlite.py +1 -1
- agno/knowledge/chunking/semantic.py +33 -6
- agno/knowledge/embedder/openai.py +19 -11
- agno/knowledge/knowledge.py +4 -3
- agno/knowledge/reader/website_reader.py +33 -16
- agno/media.py +72 -0
- agno/models/aimlapi/aimlapi.py +2 -2
- agno/models/base.py +68 -12
- agno/models/cerebras/cerebras_openai.py +2 -2
- agno/models/deepinfra/deepinfra.py +2 -2
- agno/models/deepseek/deepseek.py +2 -2
- agno/models/fireworks/fireworks.py +2 -2
- agno/models/internlm/internlm.py +2 -2
- agno/models/langdb/langdb.py +4 -4
- agno/models/litellm/litellm_openai.py +2 -2
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/message.py +26 -0
- agno/models/meta/llama_openai.py +2 -2
- agno/models/nebius/nebius.py +2 -2
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/nvidia.py +2 -2
- agno/models/openrouter/openrouter.py +2 -2
- agno/models/perplexity/perplexity.py +2 -2
- agno/models/portkey/portkey.py +3 -3
- agno/models/response.py +2 -1
- agno/models/sambanova/sambanova.py +2 -2
- agno/models/together/together.py +2 -2
- agno/models/vercel/v0.py +2 -2
- agno/models/xai/xai.py +2 -2
- agno/os/app.py +4 -10
- agno/os/router.py +3 -2
- agno/os/routers/evals/evals.py +1 -1
- agno/os/routers/memory/memory.py +1 -1
- agno/os/schema.py +3 -4
- agno/os/utils.py +47 -12
- agno/run/agent.py +20 -0
- agno/run/team.py +18 -1
- agno/run/workflow.py +10 -0
- agno/team/team.py +58 -18
- agno/tools/decorator.py +4 -2
- agno/tools/e2b.py +14 -7
- agno/tools/file_generation.py +350 -0
- agno/tools/function.py +2 -0
- agno/tools/mcp.py +1 -1
- agno/tools/memori.py +1 -53
- agno/utils/events.py +7 -1
- agno/utils/gemini.py +24 -4
- agno/vectordb/chroma/chromadb.py +66 -25
- agno/vectordb/lancedb/lance_db.py +15 -4
- agno/vectordb/milvus/milvus.py +6 -0
- agno/workflow/workflow.py +32 -0
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/METADATA +4 -1
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/RECORD +68 -63
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/WHEEL +0 -0
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.5.dist-info → agno-2.0.7.dist-info}/top_level.txt +0 -0
agno/tools/memori.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from typing import Any, Dict, List, Optional
|
|
3
3
|
|
|
4
|
-
from agno.agent import Agent
|
|
5
4
|
from agno.tools.toolkit import Toolkit
|
|
6
5
|
from agno.utils.log import log_debug, log_error, log_info, log_warning
|
|
7
6
|
|
|
@@ -122,7 +121,6 @@ class MemoriTools(Toolkit):
|
|
|
122
121
|
|
|
123
122
|
def search_memory(
|
|
124
123
|
self,
|
|
125
|
-
agent: Agent,
|
|
126
124
|
query: str,
|
|
127
125
|
limit: Optional[int] = None,
|
|
128
126
|
) -> str:
|
|
@@ -180,7 +178,7 @@ class MemoriTools(Toolkit):
|
|
|
180
178
|
log_error(f"Error searching memory: {e}")
|
|
181
179
|
return json.dumps({"success": False, "error": f"Memory search error: {str(e)}"})
|
|
182
180
|
|
|
183
|
-
def record_conversation(self,
|
|
181
|
+
def record_conversation(self, content: str) -> str:
|
|
184
182
|
"""
|
|
185
183
|
Add important information or facts to memory.
|
|
186
184
|
|
|
@@ -222,7 +220,6 @@ class MemoriTools(Toolkit):
|
|
|
222
220
|
|
|
223
221
|
def get_memory_stats(
|
|
224
222
|
self,
|
|
225
|
-
agent: Agent,
|
|
226
223
|
) -> str:
|
|
227
224
|
"""
|
|
228
225
|
Get statistics about the memory system.
|
|
@@ -340,52 +337,3 @@ class MemoriTools(Toolkit):
|
|
|
340
337
|
except Exception as e:
|
|
341
338
|
log_error(f"Failed to disable memory system: {e}")
|
|
342
339
|
return False
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
def create_memori_search_tool(memori_toolkit: MemoriTools):
|
|
346
|
-
"""
|
|
347
|
-
Create a standalone memory search function for use with Agno agents.
|
|
348
|
-
|
|
349
|
-
This is a convenience function that creates a memory search tool similar
|
|
350
|
-
to the pattern shown in the Memori example code.
|
|
351
|
-
|
|
352
|
-
Args:
|
|
353
|
-
memori_toolkit: An initialized MemoriTools instance
|
|
354
|
-
|
|
355
|
-
Returns:
|
|
356
|
-
Callable: A memory search function that can be used as an agent tool
|
|
357
|
-
|
|
358
|
-
Example:
|
|
359
|
-
```python
|
|
360
|
-
memori_tools = MemoriTools(database_connect="sqlite:///memory.db")
|
|
361
|
-
search_tool = create_memori_search_tool(memori_tools)
|
|
362
|
-
|
|
363
|
-
agent = Agent(
|
|
364
|
-
model=OpenAIChat(),
|
|
365
|
-
tools=[search_tool],
|
|
366
|
-
description="Agent with memory search capability"
|
|
367
|
-
)
|
|
368
|
-
```
|
|
369
|
-
"""
|
|
370
|
-
|
|
371
|
-
def search_memory(query: str) -> str:
|
|
372
|
-
"""
|
|
373
|
-
Search the agent's memory for past conversations and information.
|
|
374
|
-
|
|
375
|
-
Args:
|
|
376
|
-
query: What to search for in memory
|
|
377
|
-
|
|
378
|
-
Returns:
|
|
379
|
-
str: Search results or error message
|
|
380
|
-
"""
|
|
381
|
-
try:
|
|
382
|
-
if not query.strip():
|
|
383
|
-
return "Please provide a search query"
|
|
384
|
-
|
|
385
|
-
result = memori_toolkit._memory_tool.execute(query=query.strip())
|
|
386
|
-
return str(result) if result else "No relevant memories found"
|
|
387
|
-
|
|
388
|
-
except Exception as e:
|
|
389
|
-
return f"Memory search error: {str(e)}"
|
|
390
|
-
|
|
391
|
-
return search_memory
|
agno/utils/events.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, List, Optional
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
2
|
|
|
3
3
|
from agno.media import Audio, Image
|
|
4
4
|
from agno.models.message import Citations
|
|
@@ -76,6 +76,7 @@ def create_team_run_completed_event(from_run_response: TeamRunOutput) -> TeamRun
|
|
|
76
76
|
content_type=from_run_response.content_type, # type: ignore
|
|
77
77
|
reasoning_content=from_run_response.reasoning_content, # type: ignore
|
|
78
78
|
citations=from_run_response.citations, # type: ignore
|
|
79
|
+
model_provider_data=from_run_response.model_provider_data, # type: ignore
|
|
79
80
|
images=from_run_response.images, # type: ignore
|
|
80
81
|
videos=from_run_response.videos, # type: ignore
|
|
81
82
|
audio=from_run_response.audio, # type: ignore
|
|
@@ -100,6 +101,7 @@ def create_run_completed_event(from_run_response: RunOutput) -> RunCompletedEven
|
|
|
100
101
|
content_type=from_run_response.content_type, # type: ignore
|
|
101
102
|
reasoning_content=from_run_response.reasoning_content, # type: ignore
|
|
102
103
|
citations=from_run_response.citations, # type: ignore
|
|
104
|
+
model_provider_data=from_run_response.model_provider_data, # type: ignore
|
|
103
105
|
images=from_run_response.images, # type: ignore
|
|
104
106
|
videos=from_run_response.videos, # type: ignore
|
|
105
107
|
audio=from_run_response.audio, # type: ignore
|
|
@@ -343,6 +345,7 @@ def create_run_output_content_event(
|
|
|
343
345
|
content_type: Optional[str] = None,
|
|
344
346
|
reasoning_content: Optional[str] = None,
|
|
345
347
|
redacted_reasoning_content: Optional[str] = None,
|
|
348
|
+
model_provider_data: Optional[Dict[str, Any]] = None,
|
|
346
349
|
citations: Optional[Citations] = None,
|
|
347
350
|
response_audio: Optional[Audio] = None,
|
|
348
351
|
image: Optional[Image] = None,
|
|
@@ -364,6 +367,7 @@ def create_run_output_content_event(
|
|
|
364
367
|
additional_input=from_run_response.additional_input,
|
|
365
368
|
reasoning_steps=from_run_response.reasoning_steps,
|
|
366
369
|
reasoning_messages=from_run_response.reasoning_messages,
|
|
370
|
+
model_provider_data=model_provider_data,
|
|
367
371
|
)
|
|
368
372
|
|
|
369
373
|
|
|
@@ -374,6 +378,7 @@ def create_team_run_output_content_event(
|
|
|
374
378
|
reasoning_content: Optional[str] = None,
|
|
375
379
|
redacted_reasoning_content: Optional[str] = None,
|
|
376
380
|
citations: Optional[Citations] = None,
|
|
381
|
+
model_provider_data: Optional[Dict[str, Any]] = None,
|
|
377
382
|
response_audio: Optional[Audio] = None,
|
|
378
383
|
image: Optional[Image] = None,
|
|
379
384
|
) -> TeamRunContentEvent:
|
|
@@ -388,6 +393,7 @@ def create_team_run_output_content_event(
|
|
|
388
393
|
content_type=content_type or "str",
|
|
389
394
|
reasoning_content=thinking_combined,
|
|
390
395
|
citations=citations,
|
|
396
|
+
model_provider_data=model_provider_data,
|
|
391
397
|
response_audio=response_audio,
|
|
392
398
|
image=image,
|
|
393
399
|
references=from_run_response.references, # type: ignore
|
agno/utils/gemini.py
CHANGED
|
@@ -146,13 +146,24 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
|
|
|
146
146
|
# For Gemini, we need to represent Dict[str, T] as an object with at least one property
|
|
147
147
|
# to avoid the "properties should be non-empty" error.
|
|
148
148
|
# We'll create a generic property that represents the dictionary structure
|
|
149
|
-
|
|
149
|
+
|
|
150
|
+
# Handle both single types and union types (arrays) from Zod schemas
|
|
151
|
+
type_value = additional_props.get("type", "string")
|
|
152
|
+
if isinstance(type_value, list):
|
|
153
|
+
value_type = type_value[0].upper() if type_value else "STRING"
|
|
154
|
+
union_types = ", ".join(type_value)
|
|
155
|
+
type_description_suffix = f" (supports union types: {union_types})"
|
|
156
|
+
else:
|
|
157
|
+
# Single type
|
|
158
|
+
value_type = type_value.upper()
|
|
159
|
+
type_description_suffix = ""
|
|
160
|
+
|
|
150
161
|
# Create a placeholder property to satisfy Gemini's requirements
|
|
151
162
|
# This is a workaround since Gemini doesn't support additionalProperties directly
|
|
152
163
|
placeholder_properties = {
|
|
153
164
|
"example_key": Schema(
|
|
154
165
|
type=value_type,
|
|
155
|
-
description=f"Example key-value pair. This object can contain any number of keys with {value_type.lower()} values.",
|
|
166
|
+
description=f"Example key-value pair. This object can contain any number of keys with {value_type.lower()} values{type_description_suffix}.",
|
|
156
167
|
)
|
|
157
168
|
}
|
|
158
169
|
if value_type == "ARRAY":
|
|
@@ -162,7 +173,7 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
|
|
|
162
173
|
type=Type.OBJECT,
|
|
163
174
|
properties=placeholder_properties,
|
|
164
175
|
description=description
|
|
165
|
-
or f"Dictionary with {value_type.lower()} values. Can contain any number of key-value pairs.",
|
|
176
|
+
or f"Dictionary with {value_type.lower()} values{type_description_suffix}. Can contain any number of key-value pairs.",
|
|
166
177
|
default=default,
|
|
167
178
|
)
|
|
168
179
|
else:
|
|
@@ -174,7 +185,10 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
|
|
|
174
185
|
return Schema(type=Type.OBJECT, description=description, default=default)
|
|
175
186
|
|
|
176
187
|
elif schema_type == "array" and "items" in schema_dict:
|
|
177
|
-
|
|
188
|
+
if not schema_dict["items"]: # Handle empty {}
|
|
189
|
+
items = Schema(type=Type.STRING)
|
|
190
|
+
else:
|
|
191
|
+
items = convert_schema(schema_dict["items"], root_schema)
|
|
178
192
|
min_items = schema_dict.get("minItems")
|
|
179
193
|
max_items = schema_dict.get("maxItems")
|
|
180
194
|
return Schema(
|
|
@@ -233,6 +247,12 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
|
|
|
233
247
|
default=default,
|
|
234
248
|
)
|
|
235
249
|
else:
|
|
250
|
+
if isinstance(schema_type, list):
|
|
251
|
+
non_null_types = [t for t in schema_type if t != "null"]
|
|
252
|
+
if non_null_types:
|
|
253
|
+
schema_type = non_null_types[0]
|
|
254
|
+
else:
|
|
255
|
+
schema_type = ""
|
|
236
256
|
# Only convert to uppercase if schema_type is not empty
|
|
237
257
|
if schema_type:
|
|
238
258
|
schema_type = schema_type.upper()
|
agno/vectordb/chroma/chromadb.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import json
|
|
2
3
|
from hashlib import md5
|
|
3
4
|
from typing import Any, Dict, List, Mapping, Optional, Union, cast
|
|
4
5
|
|
|
@@ -60,6 +61,44 @@ class ChromaDb(VectorDb):
|
|
|
60
61
|
# Chroma client kwargs
|
|
61
62
|
self.kwargs = kwargs
|
|
62
63
|
|
|
64
|
+
def _flatten_metadata(self, metadata: Dict[str, Any]) -> Dict[str, Union[str, int, float, bool]]:
|
|
65
|
+
"""
|
|
66
|
+
Flatten nested metadata to ChromaDB-compatible format.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
metadata: Dictionary that may contain nested structures
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Flattened dictionary with only primitive values
|
|
73
|
+
"""
|
|
74
|
+
flattened: Dict[str, Any] = {}
|
|
75
|
+
|
|
76
|
+
def _flatten_recursive(obj: Any, prefix: str = "") -> None:
|
|
77
|
+
if isinstance(obj, dict):
|
|
78
|
+
if len(obj) == 0:
|
|
79
|
+
# Handle empty dictionaries by converting to JSON string
|
|
80
|
+
flattened[prefix] = json.dumps(obj)
|
|
81
|
+
else:
|
|
82
|
+
for key, value in obj.items():
|
|
83
|
+
new_key = f"{prefix}.{key}" if prefix else key
|
|
84
|
+
_flatten_recursive(value, new_key)
|
|
85
|
+
elif isinstance(obj, (list, tuple)):
|
|
86
|
+
# Convert lists/tuples to JSON strings
|
|
87
|
+
flattened[prefix] = json.dumps(obj)
|
|
88
|
+
elif isinstance(obj, (str, int, float, bool)) or obj is None:
|
|
89
|
+
if obj is not None: # ChromaDB doesn't accept None values
|
|
90
|
+
flattened[prefix] = obj
|
|
91
|
+
else:
|
|
92
|
+
# Convert other complex types to JSON strings
|
|
93
|
+
try:
|
|
94
|
+
flattened[prefix] = json.dumps(obj)
|
|
95
|
+
except (TypeError, ValueError):
|
|
96
|
+
# If it can't be serialized, convert to string
|
|
97
|
+
flattened[prefix] = str(obj)
|
|
98
|
+
|
|
99
|
+
_flatten_recursive(metadata)
|
|
100
|
+
return flattened
|
|
101
|
+
|
|
63
102
|
@property
|
|
64
103
|
def client(self) -> ClientAPI:
|
|
65
104
|
if self._client is None:
|
|
@@ -147,11 +186,14 @@ class ChromaDb(VectorDb):
|
|
|
147
186
|
|
|
148
187
|
metadata["content_hash"] = content_hash
|
|
149
188
|
|
|
189
|
+
# Flatten metadata for ChromaDB compatibility
|
|
190
|
+
flattened_metadata = self._flatten_metadata(metadata)
|
|
191
|
+
|
|
150
192
|
docs_embeddings.append(document.embedding)
|
|
151
193
|
docs.append(cleaned_content)
|
|
152
194
|
ids.append(doc_id)
|
|
153
|
-
docs_metadata.append(
|
|
154
|
-
log_debug(f"Prepared document: {document.id} | {document.name} | {
|
|
195
|
+
docs_metadata.append(flattened_metadata)
|
|
196
|
+
log_debug(f"Prepared document: {document.id} | {document.name} | {flattened_metadata}")
|
|
155
197
|
|
|
156
198
|
if self._collection is None:
|
|
157
199
|
logger.warning("Collection does not exist")
|
|
@@ -196,11 +238,14 @@ class ChromaDb(VectorDb):
|
|
|
196
238
|
|
|
197
239
|
metadata["content_hash"] = content_hash
|
|
198
240
|
|
|
241
|
+
# Flatten metadata for ChromaDB compatibility
|
|
242
|
+
flattened_metadata = self._flatten_metadata(metadata)
|
|
243
|
+
|
|
199
244
|
docs_embeddings.append(document.embedding)
|
|
200
245
|
docs.append(cleaned_content)
|
|
201
246
|
ids.append(doc_id)
|
|
202
|
-
docs_metadata.append(
|
|
203
|
-
log_debug(f"Prepared document: {document.id} | {document.name} | {
|
|
247
|
+
docs_metadata.append(flattened_metadata)
|
|
248
|
+
log_debug(f"Prepared document: {document.id} | {document.name} | {flattened_metadata}")
|
|
204
249
|
|
|
205
250
|
if self._collection is None:
|
|
206
251
|
logger.warning("Collection does not exist")
|
|
@@ -262,11 +307,14 @@ class ChromaDb(VectorDb):
|
|
|
262
307
|
|
|
263
308
|
metadata["content_hash"] = content_hash
|
|
264
309
|
|
|
310
|
+
# Flatten metadata for ChromaDB compatibility
|
|
311
|
+
flattened_metadata = self._flatten_metadata(metadata)
|
|
312
|
+
|
|
265
313
|
docs_embeddings.append(document.embedding)
|
|
266
314
|
docs.append(cleaned_content)
|
|
267
315
|
ids.append(doc_id)
|
|
268
|
-
docs_metadata.append(
|
|
269
|
-
log_debug(f"Upserted document: {document.id} | {document.name} | {
|
|
316
|
+
docs_metadata.append(flattened_metadata)
|
|
317
|
+
log_debug(f"Upserted document: {document.id} | {document.name} | {flattened_metadata}")
|
|
270
318
|
|
|
271
319
|
if self._collection is None:
|
|
272
320
|
logger.warning("Collection does not exist")
|
|
@@ -313,11 +361,14 @@ class ChromaDb(VectorDb):
|
|
|
313
361
|
|
|
314
362
|
metadata["content_hash"] = content_hash
|
|
315
363
|
|
|
364
|
+
# Flatten metadata for ChromaDB compatibility
|
|
365
|
+
flattened_metadata = self._flatten_metadata(metadata)
|
|
366
|
+
|
|
316
367
|
docs_embeddings.append(document.embedding)
|
|
317
368
|
docs.append(cleaned_content)
|
|
318
369
|
ids.append(doc_id)
|
|
319
|
-
docs_metadata.append(
|
|
320
|
-
log_debug(f"Upserted document: {document.id} | {document.name} | {
|
|
370
|
+
docs_metadata.append(flattened_metadata)
|
|
371
|
+
log_debug(f"Upserted document: {document.id} | {document.name} | {flattened_metadata}")
|
|
321
372
|
|
|
322
373
|
if self._collection is None:
|
|
323
374
|
logger.warning("Collection does not exist")
|
|
@@ -747,6 +798,9 @@ class ChromaDb(VectorDb):
|
|
|
747
798
|
logger.debug(f"No documents found with content_id: {content_id}")
|
|
748
799
|
return
|
|
749
800
|
|
|
801
|
+
# Flatten the new metadata first
|
|
802
|
+
flattened_new_metadata = self._flatten_metadata(metadata)
|
|
803
|
+
|
|
750
804
|
# Merge metadata for each document
|
|
751
805
|
updated_metadatas = []
|
|
752
806
|
for i, current_meta in enumerate(current_metadatas or []):
|
|
@@ -754,26 +808,13 @@ class ChromaDb(VectorDb):
|
|
|
754
808
|
meta_dict: Dict[str, Any] = {}
|
|
755
809
|
else:
|
|
756
810
|
meta_dict = dict(current_meta) # Convert Mapping to dict
|
|
757
|
-
updated_meta: Dict[str, Any] = meta_dict.copy()
|
|
758
|
-
updated_meta.update(metadata)
|
|
759
|
-
|
|
760
|
-
if "filters" not in updated_meta:
|
|
761
|
-
updated_meta["filters"] = {}
|
|
762
|
-
if isinstance(updated_meta["filters"], dict):
|
|
763
|
-
updated_meta["filters"].update(metadata)
|
|
764
|
-
else:
|
|
765
|
-
updated_meta["filters"] = metadata
|
|
766
|
-
updated_metadatas.append(updated_meta)
|
|
767
811
|
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
for meta in updated_metadatas:
|
|
772
|
-
cleaned_meta = {k: v for k, v in meta.items() if v is not None}
|
|
773
|
-
cleaned_metadatas.append(cleaned_meta)
|
|
812
|
+
# Update with flattened metadata
|
|
813
|
+
meta_dict.update(flattened_new_metadata)
|
|
814
|
+
updated_metadatas.append(meta_dict)
|
|
774
815
|
|
|
775
816
|
# Convert to the expected type for ChromaDB
|
|
776
|
-
chroma_metadatas = cast(List[Mapping[str, Union[str, int, float, bool]]],
|
|
817
|
+
chroma_metadatas = cast(List[Mapping[str, Union[str, int, float, bool]]], updated_metadatas)
|
|
777
818
|
collection.update(ids=ids, metadatas=chroma_metadatas) # type: ignore
|
|
778
819
|
logger.debug(f"Updated metadata for {len(ids)} documents with content_id: {content_id}")
|
|
779
820
|
|
|
@@ -950,17 +950,28 @@ class LanceDb(VectorDb):
|
|
|
950
950
|
logger.error("Table not initialized")
|
|
951
951
|
return
|
|
952
952
|
|
|
953
|
-
#
|
|
954
|
-
|
|
955
|
-
results = self.table.search().
|
|
953
|
+
# Get all documents and filter in Python (LanceDB doesn't support JSON operators)
|
|
954
|
+
total_count = self.table.count_rows()
|
|
955
|
+
results = self.table.search().select(["id", "payload"]).limit(total_count).to_pandas()
|
|
956
956
|
|
|
957
957
|
if results.empty:
|
|
958
|
+
logger.debug("No documents found")
|
|
959
|
+
return
|
|
960
|
+
|
|
961
|
+
# Find matching documents with the given content_id
|
|
962
|
+
matching_rows = []
|
|
963
|
+
for _, row in results.iterrows():
|
|
964
|
+
payload = json.loads(row["payload"])
|
|
965
|
+
if payload.get("content_id") == content_id:
|
|
966
|
+
matching_rows.append(row)
|
|
967
|
+
|
|
968
|
+
if not matching_rows:
|
|
958
969
|
logger.debug(f"No documents found with content_id: {content_id}")
|
|
959
970
|
return
|
|
960
971
|
|
|
961
972
|
# Update each matching document
|
|
962
973
|
updated_count = 0
|
|
963
|
-
for
|
|
974
|
+
for row in matching_rows:
|
|
964
975
|
row_id = row["id"]
|
|
965
976
|
current_payload = json.loads(row["payload"])
|
|
966
977
|
|
agno/vectordb/milvus/milvus.py
CHANGED
|
@@ -423,6 +423,9 @@ class Milvus(VectorDb):
|
|
|
423
423
|
else:
|
|
424
424
|
for document in documents:
|
|
425
425
|
document.embed(embedder=self.embedder)
|
|
426
|
+
if not document.embedding:
|
|
427
|
+
log_debug(f"Skipping document without embedding: {document.name} ({document.meta_data})")
|
|
428
|
+
continue
|
|
426
429
|
cleaned_content = document.content.replace("\x00", "\ufffd")
|
|
427
430
|
doc_id = md5(cleaned_content.encode()).hexdigest()
|
|
428
431
|
|
|
@@ -465,6 +468,9 @@ class Milvus(VectorDb):
|
|
|
465
468
|
|
|
466
469
|
async def process_document(document):
|
|
467
470
|
document.embed(embedder=self.embedder)
|
|
471
|
+
if not document.embedding:
|
|
472
|
+
log_debug(f"Skipping document without embedding: {document.name} ({document.meta_data})")
|
|
473
|
+
return None
|
|
468
474
|
cleaned_content = document.content.replace("\x00", "\ufffd")
|
|
469
475
|
doc_id = md5(cleaned_content.encode()).hexdigest()
|
|
470
476
|
|
agno/workflow/workflow.py
CHANGED
|
@@ -1708,6 +1708,7 @@ class Workflow:
|
|
|
1708
1708
|
# Create workflow run response with PENDING status
|
|
1709
1709
|
workflow_run_response = WorkflowRunOutput(
|
|
1710
1710
|
run_id=run_id,
|
|
1711
|
+
input=input,
|
|
1711
1712
|
session_id=session_id,
|
|
1712
1713
|
workflow_id=self.id,
|
|
1713
1714
|
workflow_name=self.name,
|
|
@@ -1798,6 +1799,7 @@ class Workflow:
|
|
|
1798
1799
|
# Create workflow run response with PENDING status
|
|
1799
1800
|
workflow_run_response = WorkflowRunOutput(
|
|
1800
1801
|
run_id=run_id,
|
|
1802
|
+
input=input,
|
|
1801
1803
|
session_id=session_id,
|
|
1802
1804
|
workflow_id=self.id,
|
|
1803
1805
|
workflow_name=self.name,
|
|
@@ -1971,6 +1973,7 @@ class Workflow:
|
|
|
1971
1973
|
# Create workflow run response that will be updated by reference
|
|
1972
1974
|
workflow_run_response = WorkflowRunOutput(
|
|
1973
1975
|
run_id=run_id,
|
|
1976
|
+
input=input,
|
|
1974
1977
|
session_id=session_id,
|
|
1975
1978
|
workflow_id=self.id,
|
|
1976
1979
|
workflow_name=self.name,
|
|
@@ -2139,6 +2142,7 @@ class Workflow:
|
|
|
2139
2142
|
# Create workflow run response that will be updated by reference
|
|
2140
2143
|
workflow_run_response = WorkflowRunOutput(
|
|
2141
2144
|
run_id=run_id,
|
|
2145
|
+
input=input,
|
|
2142
2146
|
session_id=session_id,
|
|
2143
2147
|
workflow_id=self.id,
|
|
2144
2148
|
workflow_name=self.name,
|
|
@@ -2367,6 +2371,34 @@ class Workflow:
|
|
|
2367
2371
|
"""Convert workflow to dictionary representation"""
|
|
2368
2372
|
|
|
2369
2373
|
def serialize_step(step):
|
|
2374
|
+
# Handle callable functions (not wrapped in Step objects)
|
|
2375
|
+
if callable(step) and hasattr(step, "__name__"):
|
|
2376
|
+
step_dict = {
|
|
2377
|
+
"name": step.__name__,
|
|
2378
|
+
"description": "User-defined callable step",
|
|
2379
|
+
"type": StepType.STEP.value,
|
|
2380
|
+
}
|
|
2381
|
+
return step_dict
|
|
2382
|
+
|
|
2383
|
+
# Handle Agent and Team objects directly
|
|
2384
|
+
if isinstance(step, Agent):
|
|
2385
|
+
step_dict = {
|
|
2386
|
+
"name": step.name or "unnamed_agent",
|
|
2387
|
+
"description": step.description or "Agent step",
|
|
2388
|
+
"type": StepType.STEP.value,
|
|
2389
|
+
"agent": step,
|
|
2390
|
+
}
|
|
2391
|
+
return step_dict
|
|
2392
|
+
|
|
2393
|
+
if isinstance(step, Team):
|
|
2394
|
+
step_dict = {
|
|
2395
|
+
"name": step.name or "unnamed_team",
|
|
2396
|
+
"description": step.description or "Team step",
|
|
2397
|
+
"type": StepType.STEP.value,
|
|
2398
|
+
"team": step,
|
|
2399
|
+
}
|
|
2400
|
+
return step_dict
|
|
2401
|
+
|
|
2370
2402
|
step_dict = {
|
|
2371
2403
|
"name": step.name if hasattr(step, "name") else f"unnamed_{type(step).__name__.lower()}",
|
|
2372
2404
|
"description": step.description if hasattr(step, "description") else "User-defined callable step",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agno
|
|
3
|
-
Version: 2.0.
|
|
3
|
+
Version: 2.0.7
|
|
4
4
|
Summary: Agno: a lightweight library for building Multi-Agent Systems
|
|
5
5
|
Author-email: Ashpreet Bedi <ashpreet@agno.com>
|
|
6
6
|
Project-URL: homepage, https://agno.com
|
|
@@ -161,6 +161,8 @@ Requires-Dist: opencv-python; extra == "opencv"
|
|
|
161
161
|
Provides-Extra: psycopg
|
|
162
162
|
Requires-Dist: psycopg-binary; extra == "psycopg"
|
|
163
163
|
Requires-Dist: psycopg; extra == "psycopg"
|
|
164
|
+
Provides-Extra: reportlab
|
|
165
|
+
Requires-Dist: reportlab; extra == "reportlab"
|
|
164
166
|
Provides-Extra: todoist
|
|
165
167
|
Requires-Dist: todoist-api-python; extra == "todoist"
|
|
166
168
|
Provides-Extra: valyu
|
|
@@ -306,6 +308,7 @@ Requires-Dist: agno[mem0]; extra == "tools"
|
|
|
306
308
|
Requires-Dist: agno[memori]; extra == "tools"
|
|
307
309
|
Requires-Dist: agno[google_bigquery]; extra == "tools"
|
|
308
310
|
Requires-Dist: agno[psycopg]; extra == "tools"
|
|
311
|
+
Requires-Dist: agno[reportlab]; extra == "tools"
|
|
309
312
|
Requires-Dist: agno[trafilatura]; extra == "tools"
|
|
310
313
|
Requires-Dist: agno[neo4j]; extra == "tools"
|
|
311
314
|
Provides-Extra: storage
|