agno 2.4.5__py3-none-any.whl → 2.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -823,7 +823,13 @@ class Knowledge:
823
823
  log_warning(f"Invalid filter key: {key} - not present in knowledge base")
824
824
 
825
825
  elif isinstance(filters, List):
826
- # Validate that list contains FilterExpr instances
826
+ # Validate list filters against known metadata keys
827
+ if valid_metadata_filters is None or not valid_metadata_filters:
828
+ # Can't validate keys without metadata - return original list
829
+ log_warning("No valid metadata filters tracked yet. Cannot validate list filter keys.")
830
+ return filters, []
831
+
832
+ valid_list_filters: List[FilterExpr] = []
827
833
  for i, filter_item in enumerate(filters):
828
834
  if not isinstance(filter_item, FilterExpr):
829
835
  log_warning(
@@ -832,9 +838,23 @@ class Knowledge:
832
838
  f"Use filter expressions like EQ('key', 'value'), IN('key', [values]), "
833
839
  f"AND(...), OR(...), NOT(...) from agno.filters"
834
840
  )
835
- # Filter expressions are already validated, return empty dict/list
836
- # The actual filtering happens in the vector_db layer
837
- return filters, []
841
+ continue
842
+
843
+ # Check if filter has a key attribute and validate it
844
+ if hasattr(filter_item, "key"):
845
+ key = filter_item.key
846
+ base_key = key.split(".")[-1] if "." in key else key
847
+ if base_key in valid_metadata_filters or key in valid_metadata_filters:
848
+ valid_list_filters.append(filter_item)
849
+ else:
850
+ invalid_keys.append(key)
851
+ log_warning(f"Invalid filter key: {key} - not present in knowledge base")
852
+ else:
853
+ # Complex filters (AND, OR, NOT) - keep them as-is
854
+ # They contain nested filters that will be validated by the vector DB
855
+ valid_list_filters.append(filter_item)
856
+
857
+ return valid_list_filters, invalid_keys
838
858
 
839
859
  return valid_filters, invalid_keys
840
860
 
@@ -1541,7 +1561,49 @@ class Knowledge:
1541
1561
  # 6. Chunk documents if needed
1542
1562
  if reader and not reader.chunk:
1543
1563
  read_documents = await reader.chunk_documents_async(read_documents)
1544
- # 7. Prepare and insert the content in the vector database
1564
+
1565
+ # 7. Group documents by source URL for multi-page readers (like WebsiteReader)
1566
+ docs_by_source: Dict[str, List[Document]] = {}
1567
+ for doc in read_documents:
1568
+ source_url = doc.meta_data.get("url", content.url) if doc.meta_data else content.url
1569
+ source_url = source_url or "unknown"
1570
+ if source_url not in docs_by_source:
1571
+ docs_by_source[source_url] = []
1572
+ docs_by_source[source_url].append(doc)
1573
+
1574
+ # 8. Process each source separately if multiple sources exist
1575
+ if len(docs_by_source) > 1:
1576
+ for source_url, source_docs in docs_by_source.items():
1577
+ # Compute per-document hash based on actual source URL
1578
+ doc_hash = self._build_document_content_hash(source_docs[0], content)
1579
+
1580
+ # Check skip_if_exists for each source individually
1581
+ if self._should_skip(doc_hash, skip_if_exists):
1582
+ log_debug(f"Skipping already indexed: {source_url}")
1583
+ continue
1584
+
1585
+ doc_id = generate_id(doc_hash)
1586
+ self._prepare_documents_for_insert(source_docs, doc_id, calculate_sizes=True)
1587
+
1588
+ # Insert with per-document hash
1589
+ if self.vector_db.upsert_available() and upsert:
1590
+ try:
1591
+ await self.vector_db.async_upsert(doc_hash, source_docs, content.metadata)
1592
+ except Exception as e:
1593
+ log_error(f"Error upserting document from {source_url}: {e}")
1594
+ continue
1595
+ else:
1596
+ try:
1597
+ await self.vector_db.async_insert(doc_hash, documents=source_docs, filters=content.metadata)
1598
+ except Exception as e:
1599
+ log_error(f"Error inserting document from {source_url}: {e}")
1600
+ continue
1601
+
1602
+ content.status = ContentStatus.COMPLETED
1603
+ await self._aupdate_content(content)
1604
+ return
1605
+
1606
+ # 9. Single source - use existing logic with original content hash
1545
1607
  if not content.id:
1546
1608
  content.id = generate_id(content.content_hash or "")
1547
1609
  self._prepare_documents_for_insert(read_documents, content.id, calculate_sizes=True)
@@ -1648,7 +1710,48 @@ class Knowledge:
1648
1710
  if reader:
1649
1711
  read_documents = self._chunk_documents_sync(reader, read_documents)
1650
1712
 
1651
- # 7. Prepare and insert the content in the vector database
1713
+ # 7. Group documents by source URL for multi-page readers (like WebsiteReader)
1714
+ docs_by_source: Dict[str, List[Document]] = {}
1715
+ for doc in read_documents:
1716
+ source_url = doc.meta_data.get("url", content.url) if doc.meta_data else content.url
1717
+ source_url = source_url or "unknown"
1718
+ if source_url not in docs_by_source:
1719
+ docs_by_source[source_url] = []
1720
+ docs_by_source[source_url].append(doc)
1721
+
1722
+ # 8. Process each source separately if multiple sources exist
1723
+ if len(docs_by_source) > 1:
1724
+ for source_url, source_docs in docs_by_source.items():
1725
+ # Compute per-document hash based on actual source URL
1726
+ doc_hash = self._build_document_content_hash(source_docs[0], content)
1727
+
1728
+ # Check skip_if_exists for each source individually
1729
+ if self._should_skip(doc_hash, skip_if_exists):
1730
+ log_debug(f"Skipping already indexed: {source_url}")
1731
+ continue
1732
+
1733
+ doc_id = generate_id(doc_hash)
1734
+ self._prepare_documents_for_insert(source_docs, doc_id, calculate_sizes=True)
1735
+
1736
+ # Insert with per-document hash
1737
+ if self.vector_db.upsert_available() and upsert:
1738
+ try:
1739
+ self.vector_db.upsert(doc_hash, source_docs, content.metadata)
1740
+ except Exception as e:
1741
+ log_error(f"Error upserting document from {source_url}: {e}")
1742
+ continue
1743
+ else:
1744
+ try:
1745
+ self.vector_db.insert(doc_hash, documents=source_docs, filters=content.metadata)
1746
+ except Exception as e:
1747
+ log_error(f"Error inserting document from {source_url}: {e}")
1748
+ continue
1749
+
1750
+ content.status = ContentStatus.COMPLETED
1751
+ self._update_content(content)
1752
+ return
1753
+
1754
+ # 9. Single source - use existing logic with original content hash
1652
1755
  if not content.id:
1653
1756
  content.id = generate_id(content.content_hash or "")
1654
1757
  self._prepare_documents_for_insert(read_documents, content.id, calculate_sizes=True)
@@ -1900,11 +2003,11 @@ class Knowledge:
1900
2003
  if self._should_skip(content.content_hash, skip_if_exists):
1901
2004
  content.status = ContentStatus.COMPLETED
1902
2005
  await self._aupdate_content(content)
1903
- return
2006
+ continue # Skip to next topic, don't exit loop
1904
2007
 
1905
2008
  if self.vector_db.__class__.__name__ == "LightRag":
1906
2009
  await self._aprocess_lightrag_content(content, KnowledgeContentOrigin.TOPIC)
1907
- return
2010
+ continue # Skip to next topic, don't exit loop
1908
2011
 
1909
2012
  if self.vector_db and self.vector_db.content_hash_exists(content.content_hash) and skip_if_exists:
1910
2013
  log_info(f"Content {content.content_hash} already exists, skipping")
@@ -1961,11 +2064,11 @@ class Knowledge:
1961
2064
  if self._should_skip(content.content_hash, skip_if_exists):
1962
2065
  content.status = ContentStatus.COMPLETED
1963
2066
  self._update_content(content)
1964
- return
2067
+ continue # Skip to next topic, don't exit loop
1965
2068
 
1966
2069
  if self.vector_db.__class__.__name__ == "LightRag":
1967
2070
  self._process_lightrag_content(content, KnowledgeContentOrigin.TOPIC)
1968
- return
2071
+ continue # Skip to next topic, don't exit loop
1969
2072
 
1970
2073
  if self.vector_db and self.vector_db.content_hash_exists(content.content_hash) and skip_if_exists:
1971
2074
  log_info(f"Content {content.content_hash} already exists, skipping")
@@ -3896,6 +3999,42 @@ class Knowledge:
3896
3999
  hash_input = ":".join(hash_parts)
3897
4000
  return hashlib.sha256(hash_input.encode()).hexdigest()
3898
4001
 
4002
+ def _build_document_content_hash(self, document: Document, content: Content) -> str:
4003
+ """
4004
+ Build content hash for a specific document.
4005
+
4006
+ Used for multi-page readers (like WebsiteReader) where each crawled page
4007
+ should have its own unique content hash based on its actual URL.
4008
+
4009
+ Args:
4010
+ document: The document to build the hash for
4011
+ content: The original content object (for fallback name/description)
4012
+
4013
+ Returns:
4014
+ A unique hash string for this specific document
4015
+ """
4016
+ hash_parts = []
4017
+
4018
+ if content.name:
4019
+ hash_parts.append(content.name)
4020
+ if content.description:
4021
+ hash_parts.append(content.description)
4022
+
4023
+ # Use document's own URL if available (set by WebsiteReader)
4024
+ doc_url = document.meta_data.get("url") if document.meta_data else None
4025
+ if doc_url:
4026
+ hash_parts.append(str(doc_url))
4027
+ elif content.url:
4028
+ hash_parts.append(content.url)
4029
+ elif content.path:
4030
+ hash_parts.append(str(content.path))
4031
+ else:
4032
+ # Fallback: use content hash for uniqueness
4033
+ hash_parts.append(hashlib.sha256(document.content.encode()).hexdigest()[:16])
4034
+
4035
+ hash_input = ":".join(hash_parts)
4036
+ return hashlib.sha256(hash_input.encode()).hexdigest()
4037
+
3899
4038
  def _ensure_string_field(self, value: Any, field_name: str, default: str = "") -> str:
3900
4039
  """
3901
4040
  Safely ensure a field is a string, handling various edge cases.
@@ -4625,7 +4764,12 @@ Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUC
4625
4764
  retrieval_timer = Timer()
4626
4765
  retrieval_timer.start()
4627
4766
 
4628
- docs = self.search(query=query, filters=knowledge_filters)
4767
+ try:
4768
+ docs = self.search(query=query, filters=knowledge_filters)
4769
+ except Exception as e:
4770
+ retrieval_timer.stop()
4771
+ log_warning(f"Knowledge search failed: {e}")
4772
+ return f"Error searching knowledge base: {type(e).__name__}"
4629
4773
 
4630
4774
  if run_response is not None and docs:
4631
4775
  references = MessageReferences(
@@ -4657,7 +4801,12 @@ Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUC
4657
4801
  retrieval_timer = Timer()
4658
4802
  retrieval_timer.start()
4659
4803
 
4660
- docs = await self.asearch(query=query, filters=knowledge_filters)
4804
+ try:
4805
+ docs = await self.asearch(query=query, filters=knowledge_filters)
4806
+ except Exception as e:
4807
+ retrieval_timer.stop()
4808
+ log_warning(f"Knowledge search failed: {e}")
4809
+ return f"Error searching knowledge base: {type(e).__name__}"
4661
4810
 
4662
4811
  if run_response is not None and docs:
4663
4812
  references = MessageReferences(
@@ -4735,7 +4884,12 @@ Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUC
4735
4884
  retrieval_timer = Timer()
4736
4885
  retrieval_timer.start()
4737
4886
 
4738
- docs = self.search(query=query, filters=search_filters)
4887
+ try:
4888
+ docs = self.search(query=query, filters=search_filters)
4889
+ except Exception as e:
4890
+ retrieval_timer.stop()
4891
+ log_warning(f"Knowledge search failed: {e}")
4892
+ return f"Error searching knowledge base: {type(e).__name__}"
4739
4893
 
4740
4894
  if run_response is not None and docs:
4741
4895
  references = MessageReferences(
@@ -4789,7 +4943,12 @@ Make sure to pass the filters as [Dict[str: Any]] to the tool. FOLLOW THIS STRUC
4789
4943
  retrieval_timer = Timer()
4790
4944
  retrieval_timer.start()
4791
4945
 
4792
- docs = await self.asearch(query=query, filters=search_filters)
4946
+ try:
4947
+ docs = await self.asearch(query=query, filters=search_filters)
4948
+ except Exception as e:
4949
+ retrieval_timer.stop()
4950
+ log_warning(f"Knowledge search failed: {e}")
4951
+ return f"Error searching knowledge base: {type(e).__name__}"
4793
4952
 
4794
4953
  if run_response is not None and docs:
4795
4954
  references = MessageReferences(
@@ -110,7 +110,7 @@ class TextReader(Reader):
110
110
  chunked_documents = self.chunk_document(document)
111
111
 
112
112
  if not chunked_documents:
113
- return [document]
113
+ return []
114
114
 
115
115
  tasks = [process_chunk(chunk_doc) for chunk_doc in chunked_documents]
116
116
  return await asyncio.gather(*tasks)
@@ -0,0 +1,299 @@
1
+ from os import getenv
2
+ from typing import Any, Dict, List, Literal, Optional
3
+
4
+ from pydantic import ConfigDict, Field
5
+
6
+ from agno.knowledge.document import Document
7
+ from agno.knowledge.reranker.base import Reranker
8
+ from agno.utils.log import logger
9
+
10
+ try:
11
+ from boto3 import client as AwsClient
12
+ from boto3.session import Session
13
+ from botocore.exceptions import ClientError
14
+ except ImportError:
15
+ raise ImportError("`boto3` not installed. Please install it via `pip install boto3`.")
16
+
17
+
18
+ # Model ID constants
19
+ AMAZON_RERANK_V1 = "amazon.rerank-v1:0"
20
+ COHERE_RERANK_V3_5 = "cohere.rerank-v3-5:0"
21
+
22
+ # Type alias for supported models
23
+ RerankerModel = Literal["amazon.rerank-v1:0", "cohere.rerank-v3-5:0"]
24
+
25
+
26
+ class AwsBedrockReranker(Reranker):
27
+ """
28
+ AWS Bedrock reranker supporting Amazon Rerank 1.0 and Cohere Rerank 3.5 models.
29
+
30
+ This reranker uses the unified Bedrock Rerank API (bedrock-agent-runtime)
31
+ which provides a consistent interface for both model providers.
32
+
33
+ To use this reranker, you need to either:
34
+ 1. Set the following environment variables:
35
+ - AWS_ACCESS_KEY_ID
36
+ - AWS_SECRET_ACCESS_KEY
37
+ - AWS_REGION
38
+ 2. Or provide a boto3 Session object
39
+
40
+ Args:
41
+ model (str): The model ID to use. Options:
42
+ - 'amazon.rerank-v1:0' (Amazon Rerank 1.0)
43
+ - 'cohere.rerank-v3-5:0' (Cohere Rerank 3.5)
44
+ Default is 'cohere.rerank-v3-5:0'.
45
+ top_n (Optional[int]): Number of top results to return after reranking.
46
+ If None, returns all documents reranked.
47
+ aws_region (Optional[str]): The AWS region to use.
48
+ aws_access_key_id (Optional[str]): The AWS access key ID to use.
49
+ aws_secret_access_key (Optional[str]): The AWS secret access key to use.
50
+ session (Optional[Session]): A boto3 Session object for authentication.
51
+ additional_model_request_fields (Optional[Dict]): Additional model-specific
52
+ parameters to pass in the request (e.g., Cohere-specific options).
53
+
54
+ Example:
55
+ ```python
56
+ from agno.knowledge.reranker.aws_bedrock import AwsBedrockReranker
57
+
58
+ # Using Cohere Rerank 3.5 (default)
59
+ reranker = AwsBedrockReranker(
60
+ model="cohere.rerank-v3-5:0",
61
+ top_n=5,
62
+ aws_region="us-west-2",
63
+ )
64
+
65
+ # Using Amazon Rerank 1.0
66
+ reranker = AwsBedrockReranker(
67
+ model="amazon.rerank-v1:0",
68
+ top_n=10,
69
+ aws_region="us-west-2",
70
+ )
71
+
72
+ # Rerank documents
73
+ reranked_docs = reranker.rerank(query="What is machine learning?", documents=docs)
74
+ ```
75
+
76
+ Note:
77
+ - Amazon Rerank 1.0 is NOT available in us-east-1 (N. Virginia).
78
+ Use Cohere Rerank 3.5 in that region.
79
+ - Maximum 1000 documents per request.
80
+ """
81
+
82
+ model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True)
83
+
84
+ model: str = Field(default=COHERE_RERANK_V3_5, description="Reranker model ID")
85
+ top_n: Optional[int] = Field(default=None, description="Number of top results to return")
86
+
87
+ aws_region: Optional[str] = Field(default=None, description="AWS region")
88
+ aws_access_key_id: Optional[str] = Field(default=None, description="AWS access key ID")
89
+ aws_secret_access_key: Optional[str] = Field(default=None, description="AWS secret access key")
90
+ session: Optional[Session] = Field(default=None, description="Boto3 session", exclude=True)
91
+
92
+ additional_model_request_fields: Optional[Dict[str, Any]] = Field(
93
+ default=None,
94
+ description="Additional model-specific request parameters",
95
+ )
96
+
97
+ _client: Optional[AwsClient] = None
98
+
99
+ @property
100
+ def client(self) -> AwsClient:
101
+ """
102
+ Returns a bedrock-agent-runtime client for the Rerank API.
103
+
104
+ Returns:
105
+ AwsClient: An instance of the bedrock-agent-runtime client.
106
+ """
107
+ if self._client is not None:
108
+ return self._client
109
+
110
+ if self.session:
111
+ self._client = self.session.client("bedrock-agent-runtime")
112
+ return self._client
113
+
114
+ aws_access_key_id = self.aws_access_key_id or getenv("AWS_ACCESS_KEY_ID")
115
+ aws_secret_access_key = self.aws_secret_access_key or getenv("AWS_SECRET_ACCESS_KEY")
116
+ aws_region = self.aws_region or getenv("AWS_REGION")
117
+
118
+ if not aws_access_key_id or not aws_secret_access_key:
119
+ # Fall back to default credential chain
120
+ self._client = AwsClient(
121
+ service_name="bedrock-agent-runtime",
122
+ region_name=aws_region,
123
+ )
124
+ else:
125
+ self._client = AwsClient(
126
+ service_name="bedrock-agent-runtime",
127
+ region_name=aws_region,
128
+ aws_access_key_id=aws_access_key_id,
129
+ aws_secret_access_key=aws_secret_access_key,
130
+ )
131
+
132
+ return self._client
133
+
134
+ def _get_model_arn(self) -> str:
135
+ """
136
+ Constructs the full model ARN for the reranker model.
137
+
138
+ Returns:
139
+ str: The model ARN.
140
+ """
141
+ region = self.aws_region or getenv("AWS_REGION", "us-west-2")
142
+ return f"arn:aws:bedrock:{region}::foundation-model/{self.model}"
143
+
144
+ def _build_sources(self, documents: List[Document]) -> List[Dict[str, Any]]:
145
+ """
146
+ Convert Document objects to Bedrock Rerank API source format.
147
+
148
+ Args:
149
+ documents: List of Document objects to convert.
150
+
151
+ Returns:
152
+ List of RerankSource objects for the API.
153
+ """
154
+ sources = []
155
+ for doc in documents:
156
+ # Use text format for document content
157
+ source = {
158
+ "type": "INLINE",
159
+ "inlineDocumentSource": {
160
+ "type": "TEXT",
161
+ "textDocument": {
162
+ "text": doc.content,
163
+ },
164
+ },
165
+ }
166
+ sources.append(source)
167
+ return sources
168
+
169
+ def _rerank(self, query: str, documents: List[Document]) -> List[Document]:
170
+ """
171
+ Internal method to perform reranking via Bedrock Rerank API.
172
+
173
+ Args:
174
+ query: The query string to rank documents against.
175
+ documents: List of Document objects to rerank.
176
+
177
+ Returns:
178
+ List of Document objects sorted by relevance score.
179
+ """
180
+ if not documents:
181
+ return []
182
+
183
+ # Validate top_n
184
+ top_n = self.top_n
185
+ if top_n is not None and top_n <= 0:
186
+ logger.warning(f"top_n should be a positive integer, got {self.top_n}, setting top_n to None")
187
+ top_n = None
188
+
189
+ # Build the request
190
+ rerank_request: Dict[str, Any] = {
191
+ "queries": [
192
+ {
193
+ "type": "TEXT",
194
+ "textQuery": {
195
+ "text": query,
196
+ },
197
+ }
198
+ ],
199
+ "sources": self._build_sources(documents),
200
+ "rerankingConfiguration": {
201
+ "type": "BEDROCK_RERANKING_MODEL",
202
+ "bedrockRerankingConfiguration": {
203
+ "modelConfiguration": {
204
+ "modelArn": self._get_model_arn(),
205
+ },
206
+ },
207
+ },
208
+ }
209
+
210
+ # Add numberOfResults if top_n is specified
211
+ if top_n is not None:
212
+ rerank_request["rerankingConfiguration"]["bedrockRerankingConfiguration"]["numberOfResults"] = top_n
213
+
214
+ # Add additional model request fields if provided
215
+ if self.additional_model_request_fields:
216
+ rerank_request["rerankingConfiguration"]["bedrockRerankingConfiguration"]["modelConfiguration"][
217
+ "additionalModelRequestFields"
218
+ ] = self.additional_model_request_fields
219
+
220
+ # Call the Rerank API
221
+ response = self.client.rerank(**rerank_request)
222
+
223
+ # Process results
224
+ reranked_docs: List[Document] = []
225
+ results = response.get("results", [])
226
+
227
+ for result in results:
228
+ index = result.get("index")
229
+ relevance_score = result.get("relevanceScore")
230
+
231
+ if index is not None and index < len(documents):
232
+ doc = documents[index]
233
+ doc.reranking_score = relevance_score
234
+ reranked_docs.append(doc)
235
+
236
+ # Results from API are already sorted by relevance, but ensure sorting
237
+ reranked_docs.sort(
238
+ key=lambda x: x.reranking_score if x.reranking_score is not None else float("-inf"),
239
+ reverse=True,
240
+ )
241
+
242
+ return reranked_docs
243
+
244
+ def rerank(self, query: str, documents: List[Document]) -> List[Document]:
245
+ """
246
+ Rerank documents based on their relevance to a query.
247
+
248
+ Args:
249
+ query: The query string to rank documents against.
250
+ documents: List of Document objects to rerank.
251
+
252
+ Returns:
253
+ List of Document objects sorted by relevance score (highest first).
254
+ Each document will have its `reranking_score` attribute set.
255
+ """
256
+ try:
257
+ return self._rerank(query=query, documents=documents)
258
+ except ClientError as e:
259
+ error_code = e.response.get("Error", {}).get("Code", "Unknown")
260
+ error_message = e.response.get("Error", {}).get("Message", str(e))
261
+ logger.error(f"AWS Bedrock Rerank API error ({error_code}): {error_message}. Returning original documents.")
262
+ return documents
263
+ except Exception as e:
264
+ logger.error(f"Error reranking documents: {e}. Returning original documents.")
265
+ return documents
266
+
267
+
268
+ class CohereBedrockReranker(AwsBedrockReranker):
269
+ """
270
+ Convenience class for Cohere Rerank 3.5 on AWS Bedrock.
271
+
272
+ This is a pre-configured AwsBedrockReranker using the Cohere Rerank 3.5 model.
273
+
274
+ Example:
275
+ ```python
276
+ reranker = CohereBedrockReranker(top_n=5, aws_region="us-west-2")
277
+ reranked_docs = reranker.rerank(query="What is AI?", documents=docs)
278
+ ```
279
+ """
280
+
281
+ model: str = Field(default=COHERE_RERANK_V3_5)
282
+
283
+
284
+ class AmazonReranker(AwsBedrockReranker):
285
+ """
286
+ Convenience class for Amazon Rerank 1.0 on AWS Bedrock.
287
+
288
+ This is a pre-configured AwsBedrockReranker using the Amazon Rerank 1.0 model.
289
+
290
+ Note: Amazon Rerank 1.0 is NOT available in us-east-1 (N. Virginia).
291
+
292
+ Example:
293
+ ```python
294
+ reranker = AmazonReranker(top_n=5, aws_region="us-west-2")
295
+ reranked_docs = reranker.rerank(query="What is AI?", documents=docs)
296
+ ```
297
+ """
298
+
299
+ model: str = Field(default=AMAZON_RERANK_V1)
agno/learn/machine.py CHANGED
@@ -645,12 +645,11 @@ class LearningMachine:
645
645
  for name, store in self.stores.items():
646
646
  try:
647
647
  result = await store.arecall(**context)
648
- if result is not None:
649
- results[name] = result
650
- try:
651
- log_debug(f"Recalled from {name}: {result}")
652
- except Exception:
653
- pass
648
+ results[name] = result
649
+ try:
650
+ log_debug(f"Recalled from {name}: {result}")
651
+ except Exception:
652
+ pass
654
653
  except Exception as e:
655
654
  log_warning(f"Error recalling from {name}: {e}")
656
655