MemoryOS 1.0.1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (82) hide show
  1. {memoryos-1.0.1.dist-info → memoryos-1.1.2.dist-info}/METADATA +7 -2
  2. {memoryos-1.0.1.dist-info → memoryos-1.1.2.dist-info}/RECORD +79 -65
  3. {memoryos-1.0.1.dist-info → memoryos-1.1.2.dist-info}/WHEEL +1 -1
  4. memos/__init__.py +1 -1
  5. memos/api/client.py +109 -0
  6. memos/api/config.py +11 -9
  7. memos/api/context/dependencies.py +15 -55
  8. memos/api/middleware/request_context.py +9 -40
  9. memos/api/product_api.py +2 -3
  10. memos/api/product_models.py +91 -16
  11. memos/api/routers/product_router.py +23 -16
  12. memos/api/start_api.py +10 -0
  13. memos/configs/graph_db.py +4 -0
  14. memos/configs/mem_scheduler.py +38 -3
  15. memos/context/context.py +255 -0
  16. memos/embedders/factory.py +2 -0
  17. memos/graph_dbs/nebular.py +230 -232
  18. memos/graph_dbs/neo4j.py +35 -1
  19. memos/graph_dbs/neo4j_community.py +7 -0
  20. memos/llms/factory.py +2 -0
  21. memos/llms/openai.py +74 -2
  22. memos/log.py +27 -15
  23. memos/mem_cube/general.py +3 -1
  24. memos/mem_os/core.py +60 -22
  25. memos/mem_os/main.py +3 -6
  26. memos/mem_os/product.py +35 -11
  27. memos/mem_reader/factory.py +2 -0
  28. memos/mem_reader/simple_struct.py +127 -74
  29. memos/mem_scheduler/analyzer/__init__.py +0 -0
  30. memos/mem_scheduler/analyzer/mos_for_test_scheduler.py +569 -0
  31. memos/mem_scheduler/analyzer/scheduler_for_eval.py +280 -0
  32. memos/mem_scheduler/base_scheduler.py +126 -56
  33. memos/mem_scheduler/general_modules/dispatcher.py +2 -2
  34. memos/mem_scheduler/general_modules/misc.py +99 -1
  35. memos/mem_scheduler/general_modules/scheduler_logger.py +17 -11
  36. memos/mem_scheduler/general_scheduler.py +40 -88
  37. memos/mem_scheduler/memory_manage_modules/__init__.py +5 -0
  38. memos/mem_scheduler/memory_manage_modules/memory_filter.py +308 -0
  39. memos/mem_scheduler/{general_modules → memory_manage_modules}/retriever.py +34 -7
  40. memos/mem_scheduler/monitors/dispatcher_monitor.py +9 -8
  41. memos/mem_scheduler/monitors/general_monitor.py +119 -39
  42. memos/mem_scheduler/optimized_scheduler.py +124 -0
  43. memos/mem_scheduler/orm_modules/__init__.py +0 -0
  44. memos/mem_scheduler/orm_modules/base_model.py +635 -0
  45. memos/mem_scheduler/orm_modules/monitor_models.py +261 -0
  46. memos/mem_scheduler/scheduler_factory.py +2 -0
  47. memos/mem_scheduler/schemas/monitor_schemas.py +96 -29
  48. memos/mem_scheduler/utils/config_utils.py +100 -0
  49. memos/mem_scheduler/utils/db_utils.py +33 -0
  50. memos/mem_scheduler/utils/filter_utils.py +1 -1
  51. memos/mem_scheduler/webservice_modules/__init__.py +0 -0
  52. memos/memories/activation/kv.py +2 -1
  53. memos/memories/textual/item.py +95 -16
  54. memos/memories/textual/naive.py +1 -1
  55. memos/memories/textual/tree.py +27 -3
  56. memos/memories/textual/tree_text_memory/organize/handler.py +4 -2
  57. memos/memories/textual/tree_text_memory/organize/manager.py +28 -14
  58. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +1 -2
  59. memos/memories/textual/tree_text_memory/organize/reorganizer.py +75 -23
  60. memos/memories/textual/tree_text_memory/retrieve/bochasearch.py +7 -5
  61. memos/memories/textual/tree_text_memory/retrieve/internet_retriever.py +6 -2
  62. memos/memories/textual/tree_text_memory/retrieve/internet_retriever_factory.py +2 -0
  63. memos/memories/textual/tree_text_memory/retrieve/recall.py +70 -22
  64. memos/memories/textual/tree_text_memory/retrieve/searcher.py +101 -33
  65. memos/memories/textual/tree_text_memory/retrieve/xinyusearch.py +5 -4
  66. memos/memos_tools/singleton.py +174 -0
  67. memos/memos_tools/thread_safe_dict.py +22 -0
  68. memos/memos_tools/thread_safe_dict_segment.py +382 -0
  69. memos/parsers/factory.py +2 -0
  70. memos/reranker/concat.py +59 -0
  71. memos/reranker/cosine_local.py +1 -0
  72. memos/reranker/factory.py +5 -0
  73. memos/reranker/http_bge.py +225 -12
  74. memos/templates/mem_scheduler_prompts.py +242 -0
  75. memos/types.py +4 -1
  76. memos/api/context/context.py +0 -147
  77. memos/api/context/context_thread.py +0 -96
  78. memos/mem_scheduler/mos_for_test_scheduler.py +0 -146
  79. {memoryos-1.0.1.dist-info → memoryos-1.1.2.dist-info}/entry_points.txt +0 -0
  80. {memoryos-1.0.1.dist-info → memoryos-1.1.2.dist-info/licenses}/LICENSE +0 -0
  81. /memos/mem_scheduler/{general_modules → webservice_modules}/rabbitmq_service.py +0 -0
  82. /memos/mem_scheduler/{general_modules → webservice_modules}/redis_service.py +0 -0
@@ -2,15 +2,17 @@
2
2
 
3
3
  import json
4
4
 
5
- from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from concurrent.futures import as_completed
6
6
  from datetime import datetime
7
+ from typing import Any
7
8
 
8
9
  import requests
9
10
 
11
+ from memos.context.context import ContextThreadPoolExecutor
10
12
  from memos.embedders.factory import OllamaEmbedder
11
13
  from memos.log import get_logger
12
14
  from memos.mem_reader.base import BaseMemReader
13
- from memos.memories.textual.item import TextualMemoryItem
15
+ from memos.memories.textual.item import SourceMessage, TextualMemoryItem
14
16
 
15
17
 
16
18
  logger = get_logger(__name__)
@@ -177,7 +179,7 @@ class BochaAISearchRetriever:
177
179
  if not info:
178
180
  info = {"user_id": "", "session_id": ""}
179
181
 
180
- with ThreadPoolExecutor(max_workers=8) as executor:
182
+ with ContextThreadPoolExecutor(max_workers=8) as executor:
181
183
  futures = [
182
184
  executor.submit(self._process_result, r, query, parsed_goal, info)
183
185
  for r in search_results
@@ -193,7 +195,7 @@ class BochaAISearchRetriever:
193
195
  return list(unique_memory_items.values())
194
196
 
195
197
  def _process_result(
196
- self, result: dict, query: str, parsed_goal: str, info: None
198
+ self, result: dict, query: str, parsed_goal: str, info: dict[str, Any]
197
199
  ) -> list[TextualMemoryItem]:
198
200
  """Process one Bocha search result into TextualMemoryItem."""
199
201
  title = result.get("name", "")
@@ -225,7 +227,7 @@ class BochaAISearchRetriever:
225
227
  )
226
228
  read_item_i.metadata.source = "web"
227
229
  read_item_i.metadata.memory_type = "OuterMemory"
228
- read_item_i.metadata.sources = [url] if url else []
230
+ read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else []
229
231
  read_item_i.metadata.visibility = "public"
230
232
  memory_items.append(read_item_i)
231
233
  return memory_items
@@ -7,7 +7,11 @@ from datetime import datetime
7
7
  import requests
8
8
 
9
9
  from memos.embedders.factory import OllamaEmbedder
10
- from memos.memories.textual.item import TextualMemoryItem, TreeNodeTextualMemoryMetadata
10
+ from memos.memories.textual.item import (
11
+ SourceMessage,
12
+ TextualMemoryItem,
13
+ TreeNodeTextualMemoryMetadata,
14
+ )
11
15
 
12
16
 
13
17
  class GoogleCustomSearchAPI:
@@ -172,7 +176,7 @@ class InternetGoogleRetriever:
172
176
  visibility="public",
173
177
  memory_type="LongTermMemory", # Internet search results as working memory
174
178
  key=title,
175
- sources=[link] if link else [],
179
+ sources=[SourceMessage(type="web", url=link)] if link else [],
176
180
  embedding=self.embedder.embed([memory_content])[0], # Can add embedding later
177
181
  created_at=datetime.now().isoformat(),
178
182
  usage=[],
@@ -10,6 +10,7 @@ from memos.memories.textual.tree_text_memory.retrieve.internet_retriever import
10
10
  InternetGoogleRetriever,
11
11
  )
12
12
  from memos.memories.textual.tree_text_memory.retrieve.xinyusearch import XinyuSearchRetriever
13
+ from memos.memos_tools.singleton import singleton_factory
13
14
 
14
15
 
15
16
  class InternetRetrieverFactory:
@@ -23,6 +24,7 @@ class InternetRetrieverFactory:
23
24
  }
24
25
 
25
26
  @classmethod
27
+ @singleton_factory()
26
28
  def from_config(
27
29
  cls, config_factory: InternetRetrieverConfigFactory, embedder: BaseEmbedder
28
30
  ) -> InternetGoogleRetriever | None:
@@ -1,11 +1,16 @@
1
1
  import concurrent.futures
2
2
 
3
+ from memos.context.context import ContextThreadPoolExecutor
3
4
  from memos.embedders.factory import OllamaEmbedder
4
5
  from memos.graph_dbs.neo4j import Neo4jGraphDB
6
+ from memos.log import get_logger
5
7
  from memos.memories.textual.item import TextualMemoryItem
6
8
  from memos.memories.textual.tree_text_memory.retrieve.retrieval_mid_structs import ParsedTaskGoal
7
9
 
8
10
 
11
+ logger = get_logger(__name__)
12
+
13
+
9
14
  class GraphMemoryRetriever:
10
15
  """
11
16
  Unified memory retriever that combines both graph-based and vector-based retrieval logic.
@@ -14,6 +19,8 @@ class GraphMemoryRetriever:
14
19
  def __init__(self, graph_store: Neo4jGraphDB, embedder: OllamaEmbedder):
15
20
  self.graph_store = graph_store
16
21
  self.embedder = embedder
22
+ self.max_workers = 10
23
+ self.filter_weight = 0.6
17
24
 
18
25
  def retrieve(
19
26
  self,
@@ -22,6 +29,7 @@ class GraphMemoryRetriever:
22
29
  top_k: int,
23
30
  memory_scope: str,
24
31
  query_embedding: list[list[float]] | None = None,
32
+ search_filter: dict | None = None,
25
33
  ) -> list[TextualMemoryItem]:
26
34
  """
27
35
  Perform hybrid memory retrieval:
@@ -35,7 +43,7 @@ class GraphMemoryRetriever:
35
43
  top_k (int): Number of candidates to return.
36
44
  memory_scope (str): One of ['working', 'long_term', 'user'].
37
45
  query_embedding(list of embedding): list of embedding of query
38
-
46
+ search_filter (dict, optional): Optional metadata filters for search results.
39
47
  Returns:
40
48
  list: Combined memory items.
41
49
  """
@@ -45,16 +53,20 @@ class GraphMemoryRetriever:
45
53
  if memory_scope == "WorkingMemory":
46
54
  # For working memory, retrieve all entries (no filtering)
47
55
  working_memories = self.graph_store.get_all_memory_items(
48
- scope="WorkingMemory", include_embedding=True
56
+ scope="WorkingMemory", include_embedding=False
49
57
  )
50
58
  return [TextualMemoryItem.from_dict(record) for record in working_memories]
51
59
 
52
- with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
60
+ with ContextThreadPoolExecutor(max_workers=2) as executor:
53
61
  # Structured graph-based retrieval
54
62
  future_graph = executor.submit(self._graph_recall, parsed_goal, memory_scope)
55
63
  # Vector similarity search
56
64
  future_vector = executor.submit(
57
- self._vector_recall, query_embedding, memory_scope, top_k
65
+ self._vector_recall,
66
+ query_embedding or [],
67
+ memory_scope,
68
+ top_k,
69
+ search_filter=search_filter,
58
70
  )
59
71
 
60
72
  graph_results = future_graph.result()
@@ -153,7 +165,7 @@ class GraphMemoryRetriever:
153
165
  return []
154
166
 
155
167
  # Load nodes and post-filter
156
- node_dicts = self.graph_store.get_nodes(list(candidate_ids), include_embedding=True)
168
+ node_dicts = self.graph_store.get_nodes(list(candidate_ids), include_embedding=False)
157
169
 
158
170
  final_nodes = []
159
171
  for node in node_dicts:
@@ -181,34 +193,70 @@ class GraphMemoryRetriever:
181
193
  top_k: int = 20,
182
194
  max_num: int = 3,
183
195
  cube_name: str | None = None,
196
+ search_filter: dict | None = None,
184
197
  ) -> list[TextualMemoryItem]:
185
198
  """
186
- # TODO: tackle with post-filter and pre-filter(5.18+) better.
187
199
  Perform vector-based similarity retrieval using query embedding.
200
+ # TODO: tackle with post-filter and pre-filter(5.18+) better.
188
201
  """
189
- all_matches = []
202
+ if not query_embedding:
203
+ return []
190
204
 
191
- def search_single(vec):
205
+ def search_single(vec, filt=None):
192
206
  return (
193
207
  self.graph_store.search_by_embedding(
194
- vector=vec, top_k=top_k, scope=memory_scope, cube_name=cube_name
208
+ vector=vec,
209
+ top_k=top_k,
210
+ scope=memory_scope,
211
+ cube_name=cube_name,
212
+ search_filter=filt,
195
213
  )
196
214
  or []
197
215
  )
198
216
 
199
- with concurrent.futures.ThreadPoolExecutor() as executor:
200
- futures = [executor.submit(search_single, vec) for vec in query_embedding[:max_num]]
201
- for future in concurrent.futures.as_completed(futures):
202
- result = future.result()
203
- all_matches.extend(result)
204
-
205
- if not all_matches:
217
+ def search_path_a():
218
+ """Path A: search without filter"""
219
+ path_a_hits = []
220
+ with ContextThreadPoolExecutor() as executor:
221
+ futures = [
222
+ executor.submit(search_single, vec, None) for vec in query_embedding[:max_num]
223
+ ]
224
+ for f in concurrent.futures.as_completed(futures):
225
+ path_a_hits.extend(f.result() or [])
226
+ return path_a_hits
227
+
228
+ def search_path_b():
229
+ """Path B: search with filter"""
230
+ if not search_filter:
231
+ return []
232
+ path_b_hits = []
233
+ with ContextThreadPoolExecutor() as executor:
234
+ futures = [
235
+ executor.submit(search_single, vec, search_filter)
236
+ for vec in query_embedding[:max_num]
237
+ ]
238
+ for f in concurrent.futures.as_completed(futures):
239
+ path_b_hits.extend(f.result() or [])
240
+ return path_b_hits
241
+
242
+ # Execute both paths concurrently
243
+ all_hits = []
244
+ with ContextThreadPoolExecutor(max_workers=2) as executor:
245
+ path_a_future = executor.submit(search_path_a)
246
+ path_b_future = executor.submit(search_path_b)
247
+
248
+ all_hits.extend(path_a_future.result())
249
+ all_hits.extend(path_b_future.result())
250
+
251
+ if not all_hits:
206
252
  return []
207
253
 
208
- # Step 3: Extract matched IDs and retrieve full nodes
209
- unique_ids = set({r["id"] for r in all_matches})
210
- node_dicts = self.graph_store.get_nodes(
211
- list(unique_ids), include_embedding=True, cube_name=cube_name
254
+ # merge and deduplicate
255
+ unique_ids = {r["id"] for r in all_hits if r.get("id")}
256
+ node_dicts = (
257
+ self.graph_store.get_nodes(
258
+ list(unique_ids), include_embedding=False, cube_name=cube_name
259
+ )
260
+ or []
212
261
  )
213
-
214
- return [TextualMemoryItem.from_dict(record) for record in node_dicts]
262
+ return [TextualMemoryItem.from_dict(n) for n in node_dicts]
@@ -1,8 +1,9 @@
1
- import concurrent.futures
2
1
  import json
2
+ import traceback
3
3
 
4
4
  from datetime import datetime
5
5
 
6
+ from memos.context.context import ContextThreadPoolExecutor
6
7
  from memos.embedders.factory import OllamaEmbedder
7
8
  from memos.graph_dbs.factory import Neo4jGraphDB
8
9
  from memos.llms.factory import AzureLLM, OllamaLLM, OpenAILLM
@@ -42,13 +43,17 @@ class Searcher:
42
43
  self.internet_retriever = internet_retriever
43
44
  self.moscube = moscube
44
45
 
45
- self._usage_executor = concurrent.futures.ThreadPoolExecutor(
46
- max_workers=4, thread_name_prefix="usage"
47
- )
46
+ self._usage_executor = ContextThreadPoolExecutor(max_workers=4, thread_name_prefix="usage")
48
47
 
49
48
  @timed
50
49
  def search(
51
- self, query: str, top_k: int, info=None, mode="fast", memory_type="All"
50
+ self,
51
+ query: str,
52
+ top_k: int,
53
+ info=None,
54
+ mode="fast",
55
+ memory_type="All",
56
+ search_filter: dict | None = None,
52
57
  ) -> list[TextualMemoryItem]:
53
58
  """
54
59
  Search for memories based on a query.
@@ -63,6 +68,7 @@ class Searcher:
63
68
  - 'fine': Uses a more detailed search process, invoking large models for higher precision, but slower performance.
64
69
  memory_type (str): Type restriction for search.
65
70
  ['All', 'WorkingMemory', 'LongTermMemory', 'UserMemory']
71
+ search_filter (dict, optional): Optional metadata filters for search results.
66
72
  Returns:
67
73
  list[TextualMemoryItem]: List of matching memories.
68
74
  """
@@ -78,9 +84,11 @@ class Searcher:
78
84
  else:
79
85
  logger.debug(f"[SEARCH] Received info dict: {info}")
80
86
 
81
- parsed_goal, query_embedding, context, query = self._parse_task(query, info, mode)
87
+ parsed_goal, query_embedding, context, query = self._parse_task(
88
+ query, info, mode, search_filter=search_filter
89
+ )
82
90
  results = self._retrieve_paths(
83
- query, parsed_goal, query_embedding, info, top_k, mode, memory_type
91
+ query, parsed_goal, query_embedding, info, top_k, mode, memory_type, search_filter
84
92
  )
85
93
  deduped = self._deduplicate_results(results)
86
94
  final_results = self._sort_and_trim(deduped, top_k)
@@ -96,7 +104,7 @@ class Searcher:
96
104
  return final_results
97
105
 
98
106
  @timed
99
- def _parse_task(self, query, info, mode, top_k=5):
107
+ def _parse_task(self, query, info, mode, top_k=5, search_filter: dict | None = None):
100
108
  """Parse user query, do embedding search and create context"""
101
109
  context = []
102
110
  query_embedding = None
@@ -109,9 +117,24 @@ class Searcher:
109
117
  # retrieve related nodes by embedding
110
118
  related_nodes = [
111
119
  self.graph_store.get_node(n["id"])
112
- for n in self.graph_store.search_by_embedding(query_embedding, top_k=top_k)
120
+ for n in self.graph_store.search_by_embedding(
121
+ query_embedding, top_k=top_k, search_filter=search_filter
122
+ )
113
123
  ]
114
- context = list({node["memory"] for node in related_nodes})
124
+ memories = []
125
+ for node in related_nodes:
126
+ try:
127
+ m = (
128
+ node.get("memory")
129
+ if isinstance(node, dict)
130
+ else (getattr(node, "memory", None))
131
+ )
132
+ if isinstance(m, str) and m:
133
+ memories.append(m)
134
+ except Exception:
135
+ logger.error(f"[SEARCH] Error during search: {traceback.format_exc()}")
136
+ continue
137
+ context = list(dict.fromkeys(memories))
115
138
 
116
139
  # optional: supplement context with internet knowledge
117
140
  """if self.internet_retriever:
@@ -135,10 +158,20 @@ class Searcher:
135
158
  return parsed_goal, query_embedding, context, query
136
159
 
137
160
  @timed
138
- def _retrieve_paths(self, query, parsed_goal, query_embedding, info, top_k, mode, memory_type):
161
+ def _retrieve_paths(
162
+ self,
163
+ query,
164
+ parsed_goal,
165
+ query_embedding,
166
+ info,
167
+ top_k,
168
+ mode,
169
+ memory_type,
170
+ search_filter: dict | None = None,
171
+ ):
139
172
  """Run A/B/C retrieval paths in parallel"""
140
173
  tasks = []
141
- with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
174
+ with ContextThreadPoolExecutor(max_workers=3) as executor:
142
175
  tasks.append(
143
176
  executor.submit(
144
177
  self._retrieve_from_working_memory,
@@ -147,6 +180,7 @@ class Searcher:
147
180
  query_embedding,
148
181
  top_k,
149
182
  memory_type,
183
+ search_filter,
150
184
  )
151
185
  )
152
186
  tasks.append(
@@ -157,6 +191,7 @@ class Searcher:
157
191
  query_embedding,
158
192
  top_k,
159
193
  memory_type,
194
+ search_filter,
160
195
  )
161
196
  )
162
197
  tasks.append(
@@ -193,14 +228,24 @@ class Searcher:
193
228
  # --- Path A
194
229
  @timed
195
230
  def _retrieve_from_working_memory(
196
- self, query, parsed_goal, query_embedding, top_k, memory_type
231
+ self,
232
+ query,
233
+ parsed_goal,
234
+ query_embedding,
235
+ top_k,
236
+ memory_type,
237
+ search_filter: dict | None = None,
197
238
  ):
198
239
  """Retrieve and rerank from WorkingMemory"""
199
240
  if memory_type not in ["All", "WorkingMemory"]:
200
241
  logger.info(f"[PATH-A] '{query}'Skipped (memory_type does not match)")
201
242
  return []
202
243
  items = self.graph_retriever.retrieve(
203
- query=query, parsed_goal=parsed_goal, top_k=top_k, memory_scope="WorkingMemory"
244
+ query=query,
245
+ parsed_goal=parsed_goal,
246
+ top_k=top_k,
247
+ memory_scope="WorkingMemory",
248
+ search_filter=search_filter,
204
249
  )
205
250
  return self.reranker.rerank(
206
251
  query=query,
@@ -208,37 +253,61 @@ class Searcher:
208
253
  graph_results=items,
209
254
  top_k=top_k,
210
255
  parsed_goal=parsed_goal,
256
+ search_filter=search_filter,
211
257
  )
212
258
 
213
259
  # --- Path B
214
260
  @timed
215
261
  def _retrieve_from_long_term_and_user(
216
- self, query, parsed_goal, query_embedding, top_k, memory_type
262
+ self,
263
+ query,
264
+ parsed_goal,
265
+ query_embedding,
266
+ top_k,
267
+ memory_type,
268
+ search_filter: dict | None = None,
217
269
  ):
218
270
  """Retrieve and rerank from LongTermMemory and UserMemory"""
219
271
  results = []
220
- if memory_type in ["All", "LongTermMemory"]:
221
- results += self.graph_retriever.retrieve(
222
- query=query,
223
- parsed_goal=parsed_goal,
224
- query_embedding=query_embedding,
225
- top_k=top_k * 2,
226
- memory_scope="LongTermMemory",
227
- )
228
- if memory_type in ["All", "UserMemory"]:
229
- results += self.graph_retriever.retrieve(
230
- query=query,
231
- parsed_goal=parsed_goal,
232
- query_embedding=query_embedding,
233
- top_k=top_k * 2,
234
- memory_scope="UserMemory",
235
- )
272
+ tasks = []
273
+
274
+ with ContextThreadPoolExecutor(max_workers=2) as executor:
275
+ if memory_type in ["All", "LongTermMemory"]:
276
+ tasks.append(
277
+ executor.submit(
278
+ self.graph_retriever.retrieve,
279
+ query=query,
280
+ parsed_goal=parsed_goal,
281
+ query_embedding=query_embedding,
282
+ top_k=top_k * 2,
283
+ memory_scope="LongTermMemory",
284
+ search_filter=search_filter,
285
+ )
286
+ )
287
+ if memory_type in ["All", "UserMemory"]:
288
+ tasks.append(
289
+ executor.submit(
290
+ self.graph_retriever.retrieve,
291
+ query=query,
292
+ parsed_goal=parsed_goal,
293
+ query_embedding=query_embedding,
294
+ top_k=top_k * 2,
295
+ memory_scope="UserMemory",
296
+ search_filter=search_filter,
297
+ )
298
+ )
299
+
300
+ # Collect results from all tasks
301
+ for task in tasks:
302
+ results.extend(task.result())
303
+
236
304
  return self.reranker.rerank(
237
305
  query=query,
238
306
  query_embedding=query_embedding[0],
239
307
  graph_results=results,
240
308
  top_k=top_k,
241
309
  parsed_goal=parsed_goal,
310
+ search_filter=search_filter,
242
311
  )
243
312
 
244
313
  @timed
@@ -300,8 +369,7 @@ class Searcher:
300
369
  final_items = []
301
370
  for item, score in sorted_results:
302
371
  meta_data = item.metadata.model_dump()
303
- if "relativity" not in meta_data:
304
- meta_data["relativity"] = score
372
+ meta_data["relativity"] = score
305
373
  final_items.append(
306
374
  TextualMemoryItem(
307
375
  id=item.id,
@@ -3,15 +3,16 @@
3
3
  import json
4
4
  import uuid
5
5
 
6
- from concurrent.futures import ThreadPoolExecutor, as_completed
6
+ from concurrent.futures import as_completed
7
7
  from datetime import datetime
8
8
 
9
9
  import requests
10
10
 
11
+ from memos.context.context import ContextThreadPoolExecutor
11
12
  from memos.embedders.factory import OllamaEmbedder
12
13
  from memos.log import get_logger
13
14
  from memos.mem_reader.base import BaseMemReader
14
- from memos.memories.textual.item import TextualMemoryItem
15
+ from memos.memories.textual.item import SourceMessage, TextualMemoryItem
15
16
 
16
17
 
17
18
  logger = get_logger(__name__)
@@ -150,7 +151,7 @@ class XinyuSearchRetriever:
150
151
  # Convert to TextualMemoryItem format
151
152
  memory_items: list[TextualMemoryItem] = []
152
153
 
153
- with ThreadPoolExecutor(max_workers=8) as executor:
154
+ with ContextThreadPoolExecutor(max_workers=8) as executor:
154
155
  futures = [
155
156
  executor.submit(self._process_result, result, query, parsed_goal, info)
156
157
  for result in search_results
@@ -332,7 +333,7 @@ class XinyuSearchRetriever:
332
333
  )
333
334
  read_item_i.metadata.source = "web"
334
335
  read_item_i.metadata.memory_type = "OuterMemory"
335
- read_item_i.metadata.sources = [url] if url else []
336
+ read_item_i.metadata.sources = [SourceMessage(type="web", url=url)] if url else []
336
337
  read_item_i.metadata.visibility = "public"
337
338
 
338
339
  memory_items.append(read_item_i)