agno 2.3.24__py3-none-any.whl → 2.3.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +297 -11
- agno/db/base.py +214 -0
- agno/db/dynamo/dynamo.py +47 -0
- agno/db/firestore/firestore.py +47 -0
- agno/db/gcs_json/gcs_json_db.py +47 -0
- agno/db/in_memory/in_memory_db.py +47 -0
- agno/db/json/json_db.py +47 -0
- agno/db/mongo/async_mongo.py +229 -0
- agno/db/mongo/mongo.py +47 -0
- agno/db/mongo/schemas.py +16 -0
- agno/db/mysql/async_mysql.py +47 -0
- agno/db/mysql/mysql.py +47 -0
- agno/db/postgres/async_postgres.py +231 -0
- agno/db/postgres/postgres.py +239 -0
- agno/db/postgres/schemas.py +19 -0
- agno/db/redis/redis.py +47 -0
- agno/db/singlestore/singlestore.py +47 -0
- agno/db/sqlite/async_sqlite.py +242 -0
- agno/db/sqlite/schemas.py +18 -0
- agno/db/sqlite/sqlite.py +239 -0
- agno/db/surrealdb/surrealdb.py +47 -0
- agno/knowledge/chunking/code.py +90 -0
- agno/knowledge/chunking/document.py +62 -2
- agno/knowledge/chunking/strategy.py +14 -0
- agno/knowledge/knowledge.py +7 -1
- agno/knowledge/reader/arxiv_reader.py +1 -0
- agno/knowledge/reader/csv_reader.py +1 -0
- agno/knowledge/reader/docx_reader.py +1 -0
- agno/knowledge/reader/firecrawl_reader.py +1 -0
- agno/knowledge/reader/json_reader.py +1 -0
- agno/knowledge/reader/markdown_reader.py +1 -0
- agno/knowledge/reader/pdf_reader.py +1 -0
- agno/knowledge/reader/pptx_reader.py +1 -0
- agno/knowledge/reader/s3_reader.py +1 -0
- agno/knowledge/reader/tavily_reader.py +1 -0
- agno/knowledge/reader/text_reader.py +1 -0
- agno/knowledge/reader/web_search_reader.py +1 -0
- agno/knowledge/reader/website_reader.py +1 -0
- agno/knowledge/reader/wikipedia_reader.py +1 -0
- agno/knowledge/reader/youtube_reader.py +1 -0
- agno/knowledge/utils.py +1 -0
- agno/learn/__init__.py +65 -0
- agno/learn/config.py +463 -0
- agno/learn/curate.py +185 -0
- agno/learn/machine.py +690 -0
- agno/learn/schemas.py +1043 -0
- agno/learn/stores/__init__.py +35 -0
- agno/learn/stores/entity_memory.py +3275 -0
- agno/learn/stores/learned_knowledge.py +1583 -0
- agno/learn/stores/protocol.py +117 -0
- agno/learn/stores/session_context.py +1217 -0
- agno/learn/stores/user_memory.py +1495 -0
- agno/learn/stores/user_profile.py +1220 -0
- agno/learn/utils.py +209 -0
- agno/models/base.py +59 -0
- agno/os/routers/knowledge/knowledge.py +7 -0
- agno/tools/browserbase.py +78 -6
- agno/tools/google_bigquery.py +11 -2
- agno/utils/agent.py +30 -1
- {agno-2.3.24.dist-info → agno-2.3.25.dist-info}/METADATA +24 -2
- {agno-2.3.24.dist-info → agno-2.3.25.dist-info}/RECORD +64 -50
- {agno-2.3.24.dist-info → agno-2.3.25.dist-info}/WHEEL +0 -0
- {agno-2.3.24.dist-info → agno-2.3.25.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.24.dist-info → agno-2.3.25.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
2
|
+
|
|
3
|
+
try:
|
|
4
|
+
from chonkie import CodeChunker
|
|
5
|
+
from chonkie.tokenizer import TokenizerProtocol
|
|
6
|
+
except ImportError:
|
|
7
|
+
raise ImportError(
|
|
8
|
+
"`chonkie` is required for code chunking. "
|
|
9
|
+
"Please install it using `pip install chonkie[code]` to use CodeChunking."
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
from agno.knowledge.chunking.strategy import ChunkingStrategy
|
|
13
|
+
from agno.knowledge.document.base import Document
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class CodeChunking(ChunkingStrategy):
|
|
17
|
+
"""Splits code into chunks based on its structure, leveraging Abstract Syntax Trees (ASTs) to create contextually relevant segments using Chonkie.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
tokenizer: The tokenizer to use. Can be a string name or a TokenizerProtocol instance.
|
|
21
|
+
chunk_size: The size of the chunks to create.
|
|
22
|
+
language: The language to parse. Use "auto" for detection or specify a tree-sitter-language-pack language.
|
|
23
|
+
include_nodes: Whether to include AST nodes (Note: Chonkie's base Chunk type does not store node information).
|
|
24
|
+
chunker_params: Additional parameters to pass to Chonkie's CodeChunker.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
tokenizer: Union[str, TokenizerProtocol] = "character",
|
|
30
|
+
chunk_size: int = 2048,
|
|
31
|
+
language: Union[Literal["auto"], Any] = "auto",
|
|
32
|
+
include_nodes: bool = False,
|
|
33
|
+
chunker_params: Optional[Dict[str, Any]] = None,
|
|
34
|
+
):
|
|
35
|
+
self.tokenizer = tokenizer
|
|
36
|
+
self.chunk_size = chunk_size
|
|
37
|
+
self.language = language
|
|
38
|
+
self.include_nodes = include_nodes
|
|
39
|
+
self.chunker_params = chunker_params
|
|
40
|
+
self.chunker: Optional[CodeChunker] = None
|
|
41
|
+
|
|
42
|
+
def _initialize_chunker(self):
|
|
43
|
+
"""Lazily initialize the chunker with Chonkie dependency."""
|
|
44
|
+
if self.chunker is not None:
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
_chunker_params: Dict[str, Any] = {
|
|
48
|
+
"tokenizer": self.tokenizer,
|
|
49
|
+
"chunk_size": self.chunk_size,
|
|
50
|
+
"language": self.language,
|
|
51
|
+
"include_nodes": self.include_nodes,
|
|
52
|
+
}
|
|
53
|
+
if self.chunker_params:
|
|
54
|
+
_chunker_params.update(self.chunker_params)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
self.chunker = CodeChunker(**_chunker_params)
|
|
58
|
+
except ValueError as e:
|
|
59
|
+
if "Tokenizer not found" in str(e):
|
|
60
|
+
raise ImportError(
|
|
61
|
+
f"Missing dependencies for tokenizer `{self.tokenizer}`. "
|
|
62
|
+
f"Please install using `pip install tiktoken`, `pip install transformers`, or `pip install tokenizers`"
|
|
63
|
+
) from e
|
|
64
|
+
raise
|
|
65
|
+
|
|
66
|
+
def chunk(self, document: Document) -> List[Document]:
|
|
67
|
+
"""Split document into code chunks using Chonkie."""
|
|
68
|
+
if not document.content:
|
|
69
|
+
return [document]
|
|
70
|
+
|
|
71
|
+
# Ensure chunker is initialized (will raise ImportError if Chonkie is missing)
|
|
72
|
+
self._initialize_chunker()
|
|
73
|
+
|
|
74
|
+
# Use Chonkie to split into code chunks
|
|
75
|
+
if self.chunker is None:
|
|
76
|
+
raise RuntimeError("Chunker failed to initialize")
|
|
77
|
+
|
|
78
|
+
chunks = self.chunker.chunk(document.content)
|
|
79
|
+
|
|
80
|
+
# Convert chunks to Documents
|
|
81
|
+
chunked_documents: List[Document] = []
|
|
82
|
+
for i, chunk in enumerate(chunks, 1):
|
|
83
|
+
meta_data = document.meta_data.copy()
|
|
84
|
+
meta_data["chunk"] = i
|
|
85
|
+
chunk_id = f"{document.id}_{i}" if document.id else None
|
|
86
|
+
meta_data["chunk_size"] = len(chunk.text)
|
|
87
|
+
|
|
88
|
+
chunked_documents.append(Document(id=chunk_id, name=document.name, meta_data=meta_data, content=chunk.text))
|
|
89
|
+
|
|
90
|
+
return chunked_documents
|
|
@@ -20,16 +20,75 @@ class DocumentChunking(ChunkingStrategy):
|
|
|
20
20
|
raw_paragraphs = document.content.split("\n\n")
|
|
21
21
|
paragraphs = [self.clean_text(para) for para in raw_paragraphs]
|
|
22
22
|
chunks: List[Document] = []
|
|
23
|
-
current_chunk = []
|
|
23
|
+
current_chunk: List[str] = []
|
|
24
24
|
current_size = 0
|
|
25
25
|
chunk_meta_data = document.meta_data
|
|
26
26
|
chunk_number = 1
|
|
27
27
|
|
|
28
28
|
for para in paragraphs:
|
|
29
29
|
para = para.strip()
|
|
30
|
+
if not para:
|
|
31
|
+
continue
|
|
32
|
+
|
|
30
33
|
para_size = len(para)
|
|
31
34
|
|
|
32
|
-
|
|
35
|
+
# If paragraph itself is larger than chunk_size, split it by sentences
|
|
36
|
+
if para_size > self.chunk_size:
|
|
37
|
+
# Save current chunk first
|
|
38
|
+
if current_chunk:
|
|
39
|
+
meta_data = chunk_meta_data.copy()
|
|
40
|
+
meta_data["chunk"] = chunk_number
|
|
41
|
+
chunk_id = None
|
|
42
|
+
if document.id:
|
|
43
|
+
chunk_id = f"{document.id}_{chunk_number}"
|
|
44
|
+
elif document.name:
|
|
45
|
+
chunk_id = f"{document.name}_{chunk_number}"
|
|
46
|
+
meta_data["chunk_size"] = len("\n\n".join(current_chunk))
|
|
47
|
+
chunks.append(
|
|
48
|
+
Document(
|
|
49
|
+
id=chunk_id, name=document.name, meta_data=meta_data, content="\n\n".join(current_chunk)
|
|
50
|
+
)
|
|
51
|
+
)
|
|
52
|
+
chunk_number += 1
|
|
53
|
+
current_chunk = []
|
|
54
|
+
current_size = 0
|
|
55
|
+
|
|
56
|
+
# Split oversized paragraph by sentences
|
|
57
|
+
import re
|
|
58
|
+
|
|
59
|
+
sentences = re.split(r"(?<=[.!?])\s+", para)
|
|
60
|
+
for sentence in sentences:
|
|
61
|
+
sentence = sentence.strip()
|
|
62
|
+
if not sentence:
|
|
63
|
+
continue
|
|
64
|
+
sentence_size = len(sentence)
|
|
65
|
+
|
|
66
|
+
if current_size + sentence_size <= self.chunk_size:
|
|
67
|
+
current_chunk.append(sentence)
|
|
68
|
+
current_size += sentence_size
|
|
69
|
+
else:
|
|
70
|
+
if current_chunk:
|
|
71
|
+
meta_data = chunk_meta_data.copy()
|
|
72
|
+
meta_data["chunk"] = chunk_number
|
|
73
|
+
chunk_id = None
|
|
74
|
+
if document.id:
|
|
75
|
+
chunk_id = f"{document.id}_{chunk_number}"
|
|
76
|
+
elif document.name:
|
|
77
|
+
chunk_id = f"{document.name}_{chunk_number}"
|
|
78
|
+
meta_data["chunk_size"] = len(" ".join(current_chunk))
|
|
79
|
+
chunks.append(
|
|
80
|
+
Document(
|
|
81
|
+
id=chunk_id,
|
|
82
|
+
name=document.name,
|
|
83
|
+
meta_data=meta_data,
|
|
84
|
+
content=" ".join(current_chunk),
|
|
85
|
+
)
|
|
86
|
+
)
|
|
87
|
+
chunk_number += 1
|
|
88
|
+
current_chunk = [sentence]
|
|
89
|
+
current_size = sentence_size
|
|
90
|
+
|
|
91
|
+
elif current_size + para_size <= self.chunk_size:
|
|
33
92
|
current_chunk.append(para)
|
|
34
93
|
current_size += para_size
|
|
35
94
|
else:
|
|
@@ -47,6 +106,7 @@ class DocumentChunking(ChunkingStrategy):
|
|
|
47
106
|
id=chunk_id, name=document.name, meta_data=meta_data, content="\n\n".join(current_chunk)
|
|
48
107
|
)
|
|
49
108
|
)
|
|
109
|
+
chunk_number += 1
|
|
50
110
|
current_chunk = [para]
|
|
51
111
|
current_size = para_size
|
|
52
112
|
|
|
@@ -36,6 +36,7 @@ class ChunkingStrategyType(str, Enum):
|
|
|
36
36
|
"""Enumeration of available chunking strategies."""
|
|
37
37
|
|
|
38
38
|
AGENTIC_CHUNKER = "AgenticChunker"
|
|
39
|
+
CODE_CHUNKER = "CodeChunker"
|
|
39
40
|
DOCUMENT_CHUNKER = "DocumentChunker"
|
|
40
41
|
RECURSIVE_CHUNKER = "RecursiveChunker"
|
|
41
42
|
SEMANTIC_CHUNKER = "SemanticChunker"
|
|
@@ -70,6 +71,7 @@ class ChunkingStrategyFactory:
|
|
|
70
71
|
"""Create an instance of the chunking strategy with the given parameters."""
|
|
71
72
|
strategy_map = {
|
|
72
73
|
ChunkingStrategyType.AGENTIC_CHUNKER: cls._create_agentic_chunking,
|
|
74
|
+
ChunkingStrategyType.CODE_CHUNKER: cls._create_code_chunking,
|
|
73
75
|
ChunkingStrategyType.DOCUMENT_CHUNKER: cls._create_document_chunking,
|
|
74
76
|
ChunkingStrategyType.RECURSIVE_CHUNKER: cls._create_recursive_chunking,
|
|
75
77
|
ChunkingStrategyType.SEMANTIC_CHUNKER: cls._create_semantic_chunking,
|
|
@@ -91,6 +93,18 @@ class ChunkingStrategyFactory:
|
|
|
91
93
|
# Remove overlap since AgenticChunking doesn't support it
|
|
92
94
|
return AgenticChunking(**kwargs)
|
|
93
95
|
|
|
96
|
+
@classmethod
|
|
97
|
+
def _create_code_chunking(
|
|
98
|
+
cls, chunk_size: Optional[int] = None, overlap: Optional[int] = None, **kwargs
|
|
99
|
+
) -> ChunkingStrategy:
|
|
100
|
+
from agno.knowledge.chunking.code import CodeChunking
|
|
101
|
+
|
|
102
|
+
# CodeChunking accepts chunk_size but not overlap
|
|
103
|
+
if chunk_size is not None:
|
|
104
|
+
kwargs["chunk_size"] = chunk_size
|
|
105
|
+
# Remove overlap since CodeChunking doesn't support it
|
|
106
|
+
return CodeChunking(**kwargs)
|
|
107
|
+
|
|
94
108
|
@classmethod
|
|
95
109
|
def _create_document_chunking(
|
|
96
110
|
cls, chunk_size: Optional[int] = None, overlap: Optional[int] = None, **kwargs
|
agno/knowledge/knowledge.py
CHANGED
|
@@ -2034,7 +2034,13 @@ class Knowledge:
|
|
|
2034
2034
|
self.vector_db.update_metadata(content_id=content.id, metadata=content.metadata or {})
|
|
2035
2035
|
|
|
2036
2036
|
return content_row.to_dict()
|
|
2037
|
-
|
|
2037
|
+
|
|
2038
|
+
else:
|
|
2039
|
+
if self.name:
|
|
2040
|
+
log_debug(f"Contents DB not found for knowledge base: {self.name}")
|
|
2041
|
+
else:
|
|
2042
|
+
log_debug("Contents DB not found for knowledge base")
|
|
2043
|
+
return None
|
|
2038
2044
|
|
|
2039
2045
|
async def _aupdate_content(self, content: Content) -> Optional[Dict[str, Any]]:
|
|
2040
2046
|
if self.contents_db:
|
|
@@ -20,6 +20,7 @@ class ArxivReader(Reader):
|
|
|
20
20
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
21
21
|
"""Get the list of supported chunking strategies for Arxiv readers."""
|
|
22
22
|
return [
|
|
23
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
23
24
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
24
25
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
25
26
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
@@ -29,6 +29,7 @@ class CSVReader(Reader):
|
|
|
29
29
|
"""Get the list of supported chunking strategies for CSV readers."""
|
|
30
30
|
return [
|
|
31
31
|
ChunkingStrategyType.ROW_CHUNKER,
|
|
32
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
32
33
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
33
34
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
34
35
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
@@ -27,6 +27,7 @@ class DocxReader(Reader):
|
|
|
27
27
|
"""Get the list of supported chunking strategies for DOCX readers."""
|
|
28
28
|
return [
|
|
29
29
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
30
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
30
31
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
31
32
|
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
|
32
33
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
@@ -46,6 +46,7 @@ class FirecrawlReader(Reader):
|
|
|
46
46
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
47
47
|
"""Get the list of supported chunking strategies for Firecrawl readers."""
|
|
48
48
|
return [
|
|
49
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
49
50
|
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
|
50
51
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
51
52
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
@@ -24,6 +24,7 @@ class JSONReader(Reader):
|
|
|
24
24
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
25
25
|
"""Get the list of supported chunking strategies for JSON readers."""
|
|
26
26
|
return [
|
|
27
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
27
28
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
28
29
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
29
30
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
@@ -31,6 +31,7 @@ class MarkdownReader(Reader):
|
|
|
31
31
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
32
32
|
"""Get the list of supported chunking strategies for Markdown readers."""
|
|
33
33
|
strategies = [
|
|
34
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
34
35
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
35
36
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
36
37
|
ChunkingStrategyType.RECURSIVE_CHUNKER,
|
|
@@ -204,6 +204,7 @@ class BasePDFReader(Reader):
|
|
|
204
204
|
"""Get the list of supported chunking strategies for PDF readers."""
|
|
205
205
|
return [
|
|
206
206
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
207
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
207
208
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
208
209
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
209
210
|
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
|
@@ -27,6 +27,7 @@ class PPTXReader(Reader):
|
|
|
27
27
|
"""Get the list of supported chunking strategies for PPTX readers."""
|
|
28
28
|
return [
|
|
29
29
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
30
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
30
31
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
31
32
|
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
|
32
33
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
@@ -38,6 +38,7 @@ class S3Reader(Reader):
|
|
|
38
38
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
39
39
|
"""Get the list of supported chunking strategies for S3 readers."""
|
|
40
40
|
return [
|
|
41
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
41
42
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
42
43
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
43
44
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
@@ -65,6 +65,7 @@ class TavilyReader(Reader):
|
|
|
65
65
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
66
66
|
"""Get the list of supported chunking strategies for Tavily readers."""
|
|
67
67
|
return [
|
|
68
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
68
69
|
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
|
69
70
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
70
71
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
@@ -21,6 +21,7 @@ class TextReader(Reader):
|
|
|
21
21
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
22
22
|
"""Get the list of supported chunking strategies for Text readers."""
|
|
23
23
|
return [
|
|
24
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
24
25
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
25
26
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
26
27
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
@@ -56,6 +56,7 @@ class WebSearchReader(Reader):
|
|
|
56
56
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
57
57
|
"""Get the list of supported chunking strategies for Web Search readers."""
|
|
58
58
|
return [
|
|
59
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
59
60
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
60
61
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
61
62
|
ChunkingStrategyType.RECURSIVE_CHUNKER,
|
|
@@ -52,6 +52,7 @@ class WebsiteReader(Reader):
|
|
|
52
52
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
53
53
|
"""Get the list of supported chunking strategies for Website readers."""
|
|
54
54
|
return [
|
|
55
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
55
56
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
56
57
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
57
58
|
ChunkingStrategyType.RECURSIVE_CHUNKER,
|
|
@@ -27,6 +27,7 @@ class WikipediaReader(Reader):
|
|
|
27
27
|
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
28
28
|
"""Get the list of supported chunking strategies for Wikipedia readers."""
|
|
29
29
|
return [
|
|
30
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
30
31
|
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
31
32
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
32
33
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
@@ -27,6 +27,7 @@ class YouTubeReader(Reader):
|
|
|
27
27
|
"""Get the list of supported chunking strategies for YouTube readers."""
|
|
28
28
|
return [
|
|
29
29
|
ChunkingStrategyType.RECURSIVE_CHUNKER,
|
|
30
|
+
ChunkingStrategyType.CODE_CHUNKER,
|
|
30
31
|
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
31
32
|
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
32
33
|
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
agno/knowledge/utils.py
CHANGED
|
@@ -15,6 +15,7 @@ def _get_chunker_class(strategy_type):
|
|
|
15
15
|
ChunkingStrategyType.AGENTIC_CHUNKER: lambda: _import_class(
|
|
16
16
|
"agno.knowledge.chunking.agentic", "AgenticChunking"
|
|
17
17
|
),
|
|
18
|
+
ChunkingStrategyType.CODE_CHUNKER: lambda: _import_class("agno.knowledge.chunking.code", "CodeChunking"),
|
|
18
19
|
ChunkingStrategyType.DOCUMENT_CHUNKER: lambda: _import_class(
|
|
19
20
|
"agno.knowledge.chunking.document", "DocumentChunking"
|
|
20
21
|
),
|
agno/learn/__init__.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agno Learning Module
|
|
3
|
+
====================
|
|
4
|
+
Gives agents the ability to learn and remember.
|
|
5
|
+
|
|
6
|
+
Main Components:
|
|
7
|
+
- LearningMachine: Unified learning system
|
|
8
|
+
- Config: Configuration for learning types
|
|
9
|
+
- Schemas: Data structures for learning types
|
|
10
|
+
- Stores: Storage backends for learning types
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from agno.learn.config import (
|
|
14
|
+
EntityMemoryConfig,
|
|
15
|
+
LearnedKnowledgeConfig,
|
|
16
|
+
LearningMode,
|
|
17
|
+
MemoriesConfig,
|
|
18
|
+
SessionContextConfig,
|
|
19
|
+
UserMemoryConfig,
|
|
20
|
+
UserProfileConfig,
|
|
21
|
+
)
|
|
22
|
+
from agno.learn.machine import LearningMachine
|
|
23
|
+
from agno.learn.schemas import (
|
|
24
|
+
EntityMemory,
|
|
25
|
+
LearnedKnowledge,
|
|
26
|
+
Memories,
|
|
27
|
+
SessionContext,
|
|
28
|
+
UserProfile,
|
|
29
|
+
)
|
|
30
|
+
from agno.learn.stores import (
|
|
31
|
+
EntityMemoryStore,
|
|
32
|
+
LearnedKnowledgeStore,
|
|
33
|
+
LearningStore,
|
|
34
|
+
MemoriesStore,
|
|
35
|
+
SessionContextStore,
|
|
36
|
+
UserMemoryStore,
|
|
37
|
+
UserProfileStore,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
__all__ = [
|
|
41
|
+
# Main class
|
|
42
|
+
"LearningMachine",
|
|
43
|
+
# Configs
|
|
44
|
+
"LearningMode",
|
|
45
|
+
"UserProfileConfig",
|
|
46
|
+
"UserMemoryConfig",
|
|
47
|
+
"MemoriesConfig", # Backwards compatibility alias
|
|
48
|
+
"EntityMemoryConfig",
|
|
49
|
+
"SessionContextConfig",
|
|
50
|
+
"LearnedKnowledgeConfig",
|
|
51
|
+
# Schemas
|
|
52
|
+
"UserProfile",
|
|
53
|
+
"Memories",
|
|
54
|
+
"EntityMemory",
|
|
55
|
+
"SessionContext",
|
|
56
|
+
"LearnedKnowledge",
|
|
57
|
+
# Stores
|
|
58
|
+
"LearningStore",
|
|
59
|
+
"UserProfileStore",
|
|
60
|
+
"UserMemoryStore",
|
|
61
|
+
"MemoriesStore", # Backwards compatibility alias
|
|
62
|
+
"SessionContextStore",
|
|
63
|
+
"LearnedKnowledgeStore",
|
|
64
|
+
"EntityMemoryStore",
|
|
65
|
+
]
|