pycityagent 2.0.0a21__py3-none-any.whl → 2.0.0a24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,6 @@
1
1
  """Memory."""
2
2
 
3
+ from .faiss_query import FaissQuery
3
4
  from .memory import Memory
4
5
  from .memory_base import MemoryBase, MemoryUnit
5
6
  from .profile import ProfileMemory, ProfileMemoryUnit
@@ -8,4 +9,5 @@ from .state import StateMemory
8
9
 
9
10
  __all__ = [
10
11
  "Memory",
12
+ "FaissQuery",
11
13
  ]
@@ -0,0 +1,302 @@
1
+ import asyncio
2
+ from collections.abc import Sequence
3
+ from typing import Any, Literal, Optional, Union
4
+
5
+ import faiss
6
+ import numpy as np
7
+ from langchain_community.docstore.in_memory import InMemoryDocstore
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain_core.documents import Document
10
+ from langchain_core.embeddings import Embeddings
11
+
12
+ from ..utils.decorators import lock_decorator
13
+
14
+
15
+ class FaissQuery:
16
+ def __init__(
17
+ self,
18
+ embeddings: Optional[Embeddings] = None,
19
+ index_type: Any = faiss.IndexFlatL2,
20
+ dimension: Optional[int] = None,
21
+ ) -> None:
22
+ self._embeddings = embeddings
23
+ self._lock = asyncio.Lock()
24
+ if embeddings is None:
25
+ self._index = None
26
+ self._vectors_store = None
27
+ else:
28
+ if dimension is None:
29
+ dimension = len(embeddings.embed_query("hello world"))
30
+ self._index = index_type(dimension)
31
+ self._vectors_store = FAISS(
32
+ embedding_function=embeddings,
33
+ index=self._index,
34
+ docstore=InMemoryDocstore(),
35
+ index_to_docstore_id={},
36
+ )
37
+
38
+ @property
39
+ def embeddings(
40
+ self,
41
+ ) -> Embeddings:
42
+ if self._embeddings is None:
43
+ raise RuntimeError(f"No embedding set, please `set_embeddings` first!")
44
+ return self._embeddings
45
+
46
+ @property
47
+ def vectors_store(
48
+ self,
49
+ ) -> FAISS:
50
+ if self._vectors_store is None:
51
+ raise RuntimeError(f"No embedding set, thus no vector stores initialized!")
52
+ return self._vectors_store
53
+
54
+ @lock_decorator
55
+ async def add_documents(
56
+ self,
57
+ agent_id: int,
58
+ documents: Union[str, Sequence[str]],
59
+ extra_tags: Optional[dict] = None,
60
+ ) -> list[str]:
61
+ if isinstance(documents, str):
62
+ documents = [documents]
63
+ _metadata = {"_id": agent_id}
64
+ if extra_tags is not None:
65
+ _metadata.update(extra_tags)
66
+ to_add_documents = [
67
+ Document(page_content=doc, metadata=_metadata) for doc in documents
68
+ ]
69
+ return await self.vectors_store.aadd_documents(
70
+ documents=to_add_documents,
71
+ )
72
+
73
+ @lock_decorator
74
+ async def delete_documents(
75
+ self,
76
+ to_delete_ids: list[str],
77
+ ):
78
+ await self.vectors_store.adelete(
79
+ ids=to_delete_ids,
80
+ )
81
+
82
+ @lock_decorator
83
+ async def similarity_search(
84
+ self,
85
+ query: str,
86
+ agent_id: int,
87
+ k: int = 4,
88
+ fetch_k: int = 20,
89
+ return_score_type: Union[
90
+ Literal["none"], Literal["similarity_score"], Literal["L2-distance"]
91
+ ] = "none",
92
+ filter: Optional[dict] = None,
93
+ ) -> Union[list[tuple[str, dict]], list[tuple[str, float, dict]]]:
94
+ """
95
+ Return content most similar to the given query.
96
+
97
+ Args:
98
+ query (str): The text to look up documents similar to.
99
+ agent_id (int): The identifier of the agent to filter specific documents. Only documents associated with this agent will be considered.
100
+ k (int, optional): The number of top similar contents to return. Defaults to 4.
101
+ fetch_k (int, optional): The number of documents to fetch before applying any filters. Defaults to 20.
102
+ return_score_type (Union[Literal["none"], Literal["similarity_score"], Literal["L2-distance"]], optional):
103
+ Specifies whether and how to return similarity scores with the results:
104
+ - "none": Do not return scores; only return the contents (default).
105
+ - "similarity_score": Return a tuple of content and its similarity score.
106
+ - "L2-distance": Return a tuple of content and its L2 distance from the query.
107
+ filter (dict, optional): The filter dict for metadata.
108
+
109
+ Returns:
110
+ Union[list[tuple[str,dict]], list[tuple[str, float,dict]]]:
111
+ Depending on the `return_score_type` parameter, returns either a list of strings representing the top-k similar contents,
112
+ or a list of tuples where each tuple contains a string and a floating-point score.
113
+ """
114
+ _filter = {
115
+ "_id": agent_id,
116
+ }
117
+ if filter is not None:
118
+ _filter.update(filter)
119
+ if return_score_type == "L2-distance":
120
+ _result = await self.vectors_store.asimilarity_search_with_score(
121
+ query=query,
122
+ k=k,
123
+ filter=_filter,
124
+ fetch_k=fetch_k,
125
+ )
126
+ return [(r.page_content, s, r.metadata) for r, s in _result]
127
+ elif return_score_type == "none":
128
+ _result = await self.vectors_store.asimilarity_search(
129
+ query=query,
130
+ k=k,
131
+ filter=_filter,
132
+ fetch_k=fetch_k,
133
+ )
134
+ return [(r.page_content, r.metadata) for r in _result]
135
+ elif return_score_type == "similarity_score":
136
+ _result = await self.vectors_store.asimilarity_search_with_relevance_scores(
137
+ query=query,
138
+ k=k,
139
+ filter=_filter,
140
+ fetch_k=fetch_k,
141
+ )
142
+ return [(r.page_content, s, r.metadata) for r, s in _result]
143
+ else:
144
+ raise ValueError(f"Invalid `return_score_type` {return_score_type}!")
145
+
146
+ @lock_decorator
147
+ async def similarity_search_by_embedding(
148
+ self,
149
+ embedding: list[float],
150
+ agent_id: int,
151
+ k: int = 4,
152
+ fetch_k: int = 20,
153
+ return_score_type: Union[Literal["none"], Literal["L2-distance"]] = "none",
154
+ filter: Optional[dict] = None,
155
+ ) -> Union[list[tuple[str, dict]], list[tuple[str, float, dict]]]:
156
+ """
157
+ Return content most similar to the given query.
158
+
159
+ Args:
160
+ embedding (list[float]): The vector to look up documents similar to.
161
+ agent_id (int): The identifier of the agent to filter specific documents. Only documents associated with this agent will be considered.
162
+ k (int, optional): The number of top similar contents to return. Defaults to 4.
163
+ fetch_k (int, optional): The number of documents to fetch before applying any filters. Defaults to 20.
164
+ return_score_type (Union[Literal["none"], Literal["similarity_score"], Literal["L2-distance"]], optional):
165
+ Specifies whether and how to return similarity scores with the results:
166
+ - "none": Do not return scores; only return the contents (default).
167
+ - "L2-distance": Return a tuple of content and its L2 distance from the query.
168
+ filter (dict, optional): The filter dict for metadata.
169
+
170
+ Returns:
171
+ Union[list[tuple[str,dict]], list[tuple[str, float,dict]]]:
172
+ Depending on the `return_score_type` parameter, returns either a list of strings representing the top-k similar contents,
173
+ or a list of tuples where each tuple contains a string and a floating-point score.
174
+ """
175
+ _filter = {
176
+ "_id": agent_id,
177
+ }
178
+ if filter is not None:
179
+ _filter.update(filter)
180
+ if return_score_type == "L2-distance":
181
+ _result = await self.vectors_store.asimilarity_search_with_score_by_vector(
182
+ embedding=embedding,
183
+ k=k,
184
+ filter=_filter,
185
+ fetch_k=fetch_k,
186
+ )
187
+ return [(r.page_content, s, r.metadata) for r, s in _result]
188
+ elif return_score_type == "none":
189
+ _result = await self.vectors_store.asimilarity_search_by_vector(
190
+ embedding=embedding,
191
+ k=k,
192
+ filter=_filter,
193
+ fetch_k=fetch_k,
194
+ )
195
+ return [(r.page_content, r.metadata) for r in _result]
196
+ else:
197
+ raise ValueError(f"Invalid `return_score_type` {return_score_type}!")
198
+
199
+ @lock_decorator
200
+ async def marginal_relevance_search(
201
+ self,
202
+ query: str,
203
+ agent_id: int,
204
+ k: int = 4,
205
+ fetch_k: int = 20,
206
+ lambda_mult: float = 0.5,
207
+ return_score_type: Literal["none"] = "none",
208
+ filter: Optional[dict] = None,
209
+ ) -> list[tuple[str, dict]]:
210
+ """
211
+ Return contents selected using the maximal marginal relevance asynchronously.
212
+
213
+ Args:
214
+ query (str): The text to look up documents similar to.
215
+ agent_id (int): The identifier of the agent to filter specific documents. Only documents associated with this agent will be considered.
216
+ k (int, optional): The number of top similar contents to return. Defaults to 4.
217
+ fetch_k (int, optional): The number of documents to fetch before applying any filters. Defaults to 20.
218
+ lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5.
219
+ return_score_type (Literal["none"].,optional):
220
+ Specifies whether and how to return similarity scores with the results:
221
+ - "none": Do not return scores; only return the contents (default).
222
+ filter (dict, optional): The filter dict for metadata.
223
+
224
+ Returns:
225
+ list[tuple[str,dict]]: the result contents.
226
+ """
227
+ _filter = {
228
+ "_id": agent_id,
229
+ }
230
+ if filter is not None:
231
+ _filter.update(filter)
232
+
233
+ if return_score_type == "none":
234
+ _result = await self.vectors_store.amax_marginal_relevance_search(
235
+ query=query,
236
+ k=k,
237
+ filter=_filter,
238
+ fetch_k=fetch_k,
239
+ lambda_mult=lambda_mult,
240
+ )
241
+ return [(r.page_content, r.metadata) for r in _result]
242
+ else:
243
+ raise ValueError(f"Invalid `return_score_type` {return_score_type}!")
244
+
245
+ @lock_decorator
246
+ async def marginal_relevance_search_by_embedding(
247
+ self,
248
+ embedding: list[float],
249
+ agent_id: int,
250
+ k: int = 4,
251
+ fetch_k: int = 20,
252
+ lambda_mult: float = 0.5,
253
+ return_score_type: Union[Literal["none"], Literal["similarity_score"]] = "none",
254
+ filter: Optional[dict] = None,
255
+ ) -> Union[list[tuple[str, dict]], list[tuple[str, float, dict]]]:
256
+ """
257
+ Return contents selected using the maximal marginal relevance asynchronously.
258
+
259
+ Args:
260
+ embedding (list[float]): The vector to look up documents similar to.
261
+ agent_id (int): The identifier of the agent to filter specific documents. Only documents associated with this agent will be considered.
262
+ k (int, optional): The number of top similar contents to return. Defaults to 4.
263
+ fetch_k (int, optional): The number of documents to fetch before applying any filters. Defaults to 20.
264
+ lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5.
265
+ return_score_type (Union[Literal["none"], Literal["similarity_score"]], optional):
266
+ Specifies whether and how to return similarity scores with the results:
267
+ - "none": Do not return scores; only return the contents (default).
268
+ - "similarity_score": Return a tuple of content and its similarity score.
269
+ filter (dict, optional): The filter dict for metadata.
270
+
271
+ Returns:
272
+ Union[list[tuple[str,dict]], list[tuple[str, float,dict]]]:
273
+ Depending on the `return_score_type` parameter, returns either a list of strings representing the top-k similar contents,
274
+ or a list of tuples where each tuple contains a string and a floating-point score.
275
+ """
276
+
277
+ _filter = {
278
+ "_id": agent_id,
279
+ }
280
+ if filter is not None:
281
+ _filter.update(filter)
282
+ if return_score_type == "none":
283
+ _result = await self.vectors_store.amax_marginal_relevance_search_by_vector(
284
+ embedding=embedding,
285
+ k=k,
286
+ filter=_filter,
287
+ fetch_k=fetch_k,
288
+ lambda_mult=lambda_mult,
289
+ )
290
+ return [(r.page_content, r.metadata) for r in _result]
291
+ elif return_score_type == "similarity_score":
292
+ _result = await self.vectors_store.amax_marginal_relevance_search_with_score_by_vector(
293
+ embedding=embedding,
294
+ k=k,
295
+ filter=_filter,
296
+ fetch_k=fetch_k,
297
+ lambda_mult=lambda_mult,
298
+ )
299
+ return [(r.page_content, s, r.metadata) for r, s in _result]
300
+
301
+ else:
302
+ raise ValueError(f"Invalid `return_score_type` {return_score_type}!")
@@ -1,21 +1,25 @@
1
1
  import asyncio
2
2
  import logging
3
+ from collections import defaultdict
4
+ from collections.abc import Callable, Sequence
3
5
  from copy import deepcopy
4
6
  from datetime import datetime
5
- from typing import Any, Literal, Optional, Union
6
- from collections.abc import Sequence,Callable
7
+ from typing import Any, Literal, Optional, Union
7
8
 
8
9
  import numpy as np
10
+ from langchain_core.embeddings import Embeddings
9
11
  from pyparsing import deque
10
12
 
11
13
  from ..utils.decorators import lock_decorator
12
14
  from .const import *
15
+ from .faiss_query import FaissQuery
13
16
  from .profile import ProfileMemory
14
17
  from .self_define import DynamicMemory
15
18
  from .state import StateMemory
16
19
 
17
20
  logger = logging.getLogger("pycityagent")
18
21
 
22
+
19
23
  class Memory:
20
24
  """
21
25
  A class to manage different types of memory (state, profile, dynamic).
@@ -33,7 +37,8 @@ class Memory:
33
37
  base: Optional[dict[Any, Any]] = None,
34
38
  motion: Optional[dict[Any, Any]] = None,
35
39
  activate_timestamp: bool = False,
36
- embedding_model: Any = None,
40
+ embedding_model: Optional[Embeddings] = None,
41
+ faiss_query: Optional[FaissQuery] = None,
37
42
  ) -> None:
38
43
  """
39
44
  Initializes the Memory with optional configuration.
@@ -51,20 +56,21 @@ class Memory:
51
56
  base (Optional[dict[Any, Any]], optional): base attribute dict from City Simulator.
52
57
  motion (Optional[dict[Any, Any]], optional): motion attribute dict from City Simulator.
53
58
  activate_timestamp (bool): Whether activate timestamp storage in MemoryUnit
54
- embedding_model (Any): The embedding model for memory search.
59
+ embedding_model (Embeddings): The embedding model for memory search.
60
+ faiss_query (FaissQuery): The faiss_query of the agent. Defaults to None.
55
61
  """
56
62
  self.watchers: dict[str, list[Callable]] = {}
57
63
  self._lock = asyncio.Lock()
58
- self.embedding_model = embedding_model
59
-
60
- # 初始化embedding存储
61
- self._embeddings = {"state": {}, "profile": {}, "dynamic": {}}
64
+ self._agent_id: int = -1
65
+ self._embedding_model = embedding_model
62
66
 
63
67
  _dynamic_config: dict[Any, Any] = {}
64
68
  _state_config: dict[Any, Any] = {}
65
69
  _profile_config: dict[Any, Any] = {}
66
70
  # 记录哪些字段需要embedding
67
71
  self._embedding_fields: dict[str, bool] = {}
72
+ self._embedding_field_to_doc_id: dict[Any, str] = defaultdict(str)
73
+ self._faiss_query = faiss_query
68
74
 
69
75
  if config is not None:
70
76
  for k, v in config.items():
@@ -135,8 +141,55 @@ class Memory:
135
141
  self._profile = ProfileMemory(
136
142
  msg=_profile_config, activate_timestamp=activate_timestamp
137
143
  )
138
- self.memories = [] # 存储记忆内容
139
- self.embeddings = [] # 存储记忆的向量表示
144
+ # self.memories = [] # 存储记忆内容
145
+ # self.embeddings = [] # 存储记忆的向量表示
146
+
147
+ def set_embedding_model(
148
+ self,
149
+ embedding_model: Embeddings,
150
+ ):
151
+ self._embedding_model = embedding_model
152
+
153
+ @property
154
+ def embedding_model(
155
+ self,
156
+ ):
157
+ if self._embedding_model is None:
158
+ raise RuntimeError(
159
+ f"embedding_model before assignment, please `set_embedding_model` first!"
160
+ )
161
+ return self._embedding_model
162
+
163
+ def set_faiss_query(self, faiss_query: FaissQuery):
164
+ """
165
+ Set the FaissQuery of the agent.
166
+ """
167
+ self._faiss_query = faiss_query
168
+
169
+ @property
170
+ def agent_id(
171
+ self,
172
+ ):
173
+ if self._agent_id < 0:
174
+ raise RuntimeError(
175
+ f"agent_id before assignment, please `set_agent_id` first!"
176
+ )
177
+ return self._agent_id
178
+
179
+ def set_agent_id(self, agent_id: int):
180
+ """
181
+ Set the FaissQuery of the agent.
182
+ """
183
+ self._agent_id = agent_id
184
+
185
+ @property
186
+ def faiss_query(self) -> FaissQuery:
187
+ """FaissQuery"""
188
+ if self._faiss_query is None:
189
+ raise RuntimeError(
190
+ f"FaissQuery access before assignment, please `set_faiss_query` first!"
191
+ )
192
+ return self._faiss_query
140
193
 
141
194
  @lock_decorator
142
195
  async def get(
@@ -192,11 +245,23 @@ class Memory:
192
245
  if mode == "replace":
193
246
  await _mem.update(key, value, store_snapshot)
194
247
  # 如果字段需要embedding,则更新embedding
195
- if self.embedding_model and self._embedding_fields.get(key, False):
248
+ if self._embedding_fields.get(key, False) and self.embedding_model:
196
249
  memory_type = self._get_memory_type(_mem)
197
- self._embeddings[memory_type][key] = (
198
- await self._generate_embedding(f"{key}: {str(value)}")
250
+ # 覆盖更新删除原vector
251
+ orig_doc_id = self._embedding_field_to_doc_id[key]
252
+ if orig_doc_id:
253
+ await self.faiss_query.delete_documents(
254
+ to_delete_ids=[orig_doc_id],
255
+ )
256
+ doc_ids: list[str] = await self.faiss_query.add_documents(
257
+ agent_id=self.agent_id,
258
+ documents=f"{key}: {str(value)}",
259
+ extra_tags={
260
+ "type": memory_type,
261
+ "key": key,
262
+ },
199
263
  )
264
+ self._embedding_field_to_doc_id[key] = doc_ids[0]
200
265
  if key in self.watchers:
201
266
  for callback in self.watchers[key]:
202
267
  asyncio.create_task(callback())
@@ -214,13 +279,17 @@ class Memory:
214
279
  f"Type of {type(original_value)} does not support mode `merge`, using `replace` instead!"
215
280
  )
216
281
  await _mem.update(key, value, store_snapshot)
217
- if self.embedding_model and self._embedding_fields.get(key, False):
282
+ if self._embedding_fields.get(key, False) and self.embedding_model:
218
283
  memory_type = self._get_memory_type(_mem)
219
- self._embeddings[memory_type][key] = (
220
- await self._generate_embedding(
221
- f"{key}: {str(original_value)}"
222
- )
284
+ doc_ids = await self.faiss_query.add_documents(
285
+ agent_id=self.agent_id,
286
+ documents=f"{key}: {str(original_value)}",
287
+ extra_tags={
288
+ "type": memory_type,
289
+ "key": key,
290
+ },
223
291
  )
292
+ self._embedding_field_to_doc_id[key] = doc_ids[0]
224
293
  if key in self.watchers:
225
294
  for callback in self.watchers[key]:
226
295
  asyncio.create_task(callback())
@@ -240,68 +309,6 @@ class Memory:
240
309
  else:
241
310
  return "dynamic"
242
311
 
243
- async def _generate_embedding(self, text: str) -> np.ndarray:
244
- """生成文本的向量表示
245
-
246
- Args:
247
- text: 输入文本
248
-
249
- Returns:
250
- np.ndarray: 文本的向量表示
251
-
252
- Raises:
253
- ValueError: 如果embedding_model未初始化
254
- """
255
- if not self.embedding_model:
256
- raise RuntimeError("Embedding model not initialized")
257
-
258
- return await self.embedding_model.embed(text)
259
-
260
- async def search(self, query: str, top_k: int = 3) -> str:
261
- """搜索相关记忆
262
-
263
- Args:
264
- query: 查询文本
265
- top_k: 返回最相关的记忆数量
266
-
267
- Returns:
268
- str: 格式化的相关记忆文本
269
- """
270
- if not self.embedding_model:
271
- return "Embedding model not initialized"
272
-
273
- query_embedding = await self._generate_embedding(query)
274
- all_results = []
275
-
276
- # 搜索所有记忆类型中启用了embedding的字段
277
- for memory_type, embeddings in self._embeddings.items():
278
- for key, embedding in embeddings.items():
279
- similarity = self._cosine_similarity(query_embedding, embedding)
280
- value = await self.get(key)
281
-
282
- all_results.append(
283
- {
284
- "type": memory_type,
285
- "key": key,
286
- "content": f"{key}: {str(value)}",
287
- "similarity": similarity,
288
- }
289
- )
290
-
291
- # 按相似度排序
292
- all_results.sort(key=lambda x: x["similarity"], reverse=True)
293
- top_results = all_results[:top_k]
294
-
295
- # 格式化输出
296
- formatted_results = []
297
- for result in top_results:
298
- formatted_results.append(
299
- f"- [{result['type']}] {result['content']} "
300
- f"(相关度: {result['similarity']:.2f})"
301
- )
302
-
303
- return "\n".join(formatted_results)
304
-
305
312
  async def update_batch(
306
313
  self,
307
314
  content: Union[dict, Sequence[tuple[Any, Any]]],
@@ -388,67 +395,54 @@ class Memory:
388
395
  if _snapshot:
389
396
  await _mem.load(snapshots=_snapshot, reset_memory=reset_memory)
390
397
 
398
+ # async def add(self, content: str, metadata: Optional[dict] = None) -> None:
399
+ # """添加新的记忆
400
+
401
+ # Args:
402
+ # content: 记忆内容
403
+ # metadata: 相关元数据,如时间、地点等
404
+ # """
405
+ # embedding = await self.embedding_model.aembed_query(content)
406
+ # self.memories.append(
407
+ # {
408
+ # "content": content,
409
+ # "metadata": metadata or {},
410
+ # "timestamp": datetime.now(),
411
+ # "embedding": embedding,
412
+ # }
413
+ # )
414
+ # self.embeddings.append(embedding)
415
+
391
416
  @lock_decorator
392
- async def get_top_k(
393
- self,
394
- key: Any,
395
- metric: Callable[[Any], Any],
396
- top_k: Optional[int] = None,
397
- mode: Union[Literal["read only"], Literal["read and write"]] = "read only",
398
- preserve_order: bool = True,
399
- ) -> Any:
400
- """
401
- Retrieves the top-k items from the memory based on the given key and metric.
417
+ async def search(
418
+ self, query: str, top_k: int = 3, filter: Optional[dict] = None
419
+ ) -> str:
420
+ """搜索相关记忆
402
421
 
403
422
  Args:
404
- key (Any): The key of the item to retrieve.
405
- metric (Callable[[Any], Any]): A callable function that defines the metric for ranking the items.
406
- top_k (Optional[int], optional): The number of top items to retrieve. Defaults to None (all items).
407
- mode (Union[Literal["read only"], Literal["read and write"]], optional): Access mode for the item. Defaults to "read only".
408
- preserve_order (bool): Whether preserve original order in output values.
423
+ query: 查询文本
424
+ top_k: 返回最相关的记忆数量
425
+ filter (dict, optional): 记忆的筛选条件,如 {"type":"dynamic", "key":"self_define_1",},默认为空
409
426
 
410
427
  Returns:
411
- Any: The top-k items based on the specified metric.
412
-
413
- Raises:
414
- ValueError: If an invalid mode is provided.
415
- KeyError: If the key is not found in any of the memory sections.
416
- """
417
- if mode == "read only":
418
- process_func = deepcopy
419
- elif mode == "read and write":
420
- process_func = lambda x: x
421
- else:
422
- raise ValueError(f"Invalid get mode `{mode}`!")
423
- for _mem in [self._state, self._profile, self._dynamic]:
424
- try:
425
- value = await _mem.get_top_k(key, metric, top_k, preserve_order)
426
- return process_func(value)
427
- except KeyError as e:
428
- continue
429
- raise KeyError(f"No attribute `{key}` in memories!")
430
-
431
- async def add(self, content: str, metadata: Optional[dict] = None) -> None:
432
- """添加新的记忆
433
-
434
- Args:
435
- content: 记忆内容
436
- metadata: 相关元数据,如时间、地点等
428
+ str: 格式化的相关记忆文本
437
429
  """
438
- embedding = await self.embedding_model.embed(content)
439
- self.memories.append(
440
- {
441
- "content": content,
442
- "metadata": metadata or {},
443
- "timestamp": datetime.now(),
444
- "embedding": embedding,
445
- }
430
+ if not self._embedding_model:
431
+ return "Embedding model not initialized"
432
+ top_results: list[tuple[str, float, dict]] = (
433
+ await self.faiss_query.similarity_search( # type:ignore
434
+ query=query,
435
+ agent_id=self.agent_id,
436
+ k=top_k,
437
+ return_score_type="similarity_score",
438
+ filter=filter,
439
+ )
446
440
  )
447
- self.embeddings.append(embedding)
448
-
449
- def _cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:
450
- """计算余弦相似度"""
451
- dot_product = np.dot(v1, v2)
452
- norm_v1 = np.linalg.norm(v1)
453
- norm_v2 = np.linalg.norm(v2)
454
- return dot_product / (norm_v1 * norm_v2)
441
+ # 格式化输出
442
+ formatted_results = []
443
+ for content, score, metadata in top_results:
444
+ formatted_results.append(
445
+ f"- [{metadata['type']}] {content} " f"(相关度: {score:.2f})"
446
+ )
447
+
448
+ return "\n".join(formatted_results)