MemoryOS 0.1.13__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

Files changed (84) hide show
  1. {memoryos-0.1.13.dist-info → memoryos-0.2.1.dist-info}/METADATA +78 -49
  2. memoryos-0.2.1.dist-info/RECORD +152 -0
  3. memoryos-0.2.1.dist-info/entry_points.txt +3 -0
  4. memos/__init__.py +1 -1
  5. memos/api/config.py +471 -0
  6. memos/api/exceptions.py +28 -0
  7. memos/api/mcp_serve.py +502 -0
  8. memos/api/product_api.py +35 -0
  9. memos/api/product_models.py +159 -0
  10. memos/api/routers/__init__.py +1 -0
  11. memos/api/routers/product_router.py +358 -0
  12. memos/chunkers/sentence_chunker.py +8 -2
  13. memos/cli.py +113 -0
  14. memos/configs/embedder.py +27 -0
  15. memos/configs/graph_db.py +83 -2
  16. memos/configs/llm.py +48 -0
  17. memos/configs/mem_cube.py +1 -1
  18. memos/configs/mem_reader.py +4 -0
  19. memos/configs/mem_scheduler.py +91 -5
  20. memos/configs/memory.py +10 -4
  21. memos/dependency.py +52 -0
  22. memos/embedders/ark.py +92 -0
  23. memos/embedders/factory.py +4 -0
  24. memos/embedders/sentence_transformer.py +8 -2
  25. memos/embedders/universal_api.py +32 -0
  26. memos/graph_dbs/base.py +2 -2
  27. memos/graph_dbs/factory.py +2 -0
  28. memos/graph_dbs/item.py +46 -0
  29. memos/graph_dbs/neo4j.py +377 -101
  30. memos/graph_dbs/neo4j_community.py +300 -0
  31. memos/llms/base.py +9 -0
  32. memos/llms/deepseek.py +54 -0
  33. memos/llms/factory.py +10 -1
  34. memos/llms/hf.py +170 -13
  35. memos/llms/hf_singleton.py +114 -0
  36. memos/llms/ollama.py +4 -0
  37. memos/llms/openai.py +68 -1
  38. memos/llms/qwen.py +63 -0
  39. memos/llms/vllm.py +153 -0
  40. memos/mem_cube/general.py +77 -16
  41. memos/mem_cube/utils.py +102 -0
  42. memos/mem_os/core.py +131 -41
  43. memos/mem_os/main.py +93 -11
  44. memos/mem_os/product.py +1098 -35
  45. memos/mem_os/utils/default_config.py +352 -0
  46. memos/mem_os/utils/format_utils.py +1154 -0
  47. memos/mem_reader/simple_struct.py +13 -8
  48. memos/mem_scheduler/base_scheduler.py +467 -36
  49. memos/mem_scheduler/general_scheduler.py +125 -244
  50. memos/mem_scheduler/modules/base.py +9 -0
  51. memos/mem_scheduler/modules/dispatcher.py +68 -2
  52. memos/mem_scheduler/modules/misc.py +39 -0
  53. memos/mem_scheduler/modules/monitor.py +228 -49
  54. memos/mem_scheduler/modules/rabbitmq_service.py +317 -0
  55. memos/mem_scheduler/modules/redis_service.py +32 -22
  56. memos/mem_scheduler/modules/retriever.py +250 -23
  57. memos/mem_scheduler/modules/schemas.py +189 -7
  58. memos/mem_scheduler/mos_for_test_scheduler.py +143 -0
  59. memos/mem_scheduler/utils.py +51 -2
  60. memos/mem_user/persistent_user_manager.py +260 -0
  61. memos/memories/activation/item.py +25 -0
  62. memos/memories/activation/kv.py +10 -3
  63. memos/memories/activation/vllmkv.py +219 -0
  64. memos/memories/factory.py +2 -0
  65. memos/memories/textual/general.py +7 -5
  66. memos/memories/textual/item.py +3 -1
  67. memos/memories/textual/tree.py +14 -6
  68. memos/memories/textual/tree_text_memory/organize/conflict.py +198 -0
  69. memos/memories/textual/tree_text_memory/organize/manager.py +72 -23
  70. memos/memories/textual/tree_text_memory/organize/redundancy.py +193 -0
  71. memos/memories/textual/tree_text_memory/organize/relation_reason_detector.py +233 -0
  72. memos/memories/textual/tree_text_memory/organize/reorganizer.py +606 -0
  73. memos/memories/textual/tree_text_memory/retrieve/recall.py +0 -1
  74. memos/memories/textual/tree_text_memory/retrieve/reranker.py +2 -2
  75. memos/memories/textual/tree_text_memory/retrieve/searcher.py +6 -5
  76. memos/parsers/markitdown.py +8 -2
  77. memos/templates/mem_reader_prompts.py +105 -36
  78. memos/templates/mem_scheduler_prompts.py +96 -47
  79. memos/templates/tree_reorganize_prompts.py +223 -0
  80. memos/vec_dbs/base.py +12 -0
  81. memos/vec_dbs/qdrant.py +46 -20
  82. memoryos-0.1.13.dist-info/RECORD +0 -122
  83. {memoryos-0.1.13.dist-info → memoryos-0.2.1.dist-info}/LICENSE +0 -0
  84. {memoryos-0.1.13.dist-info → memoryos-0.2.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,233 @@
1
+ import json
2
+
3
+ from memos.embedders.factory import OllamaEmbedder
4
+ from memos.graph_dbs.item import GraphDBNode
5
+ from memos.graph_dbs.neo4j import Neo4jGraphDB
6
+ from memos.llms.base import BaseLLM
7
+ from memos.log import get_logger
8
+ from memos.memories.textual.item import TreeNodeTextualMemoryMetadata
9
+ from memos.templates.tree_reorganize_prompts import (
10
+ AGGREGATE_PROMPT,
11
+ INFER_FACT_PROMPT,
12
+ PAIRWISE_RELATION_PROMPT,
13
+ )
14
+
15
+
16
+ logger = get_logger(__name__)
17
+
18
+
19
+ class RelationAndReasoningDetector:
20
+ def __init__(self, graph_store: Neo4jGraphDB, llm: BaseLLM, embedder: OllamaEmbedder):
21
+ self.graph_store = graph_store
22
+ self.llm = llm
23
+ self.embedder = embedder
24
+
25
+ def process_node(self, node: GraphDBNode, exclude_ids: list[str], top_k: int = 5):
26
+ """
27
+ Unified pipeline for:
28
+ 1) Pairwise relations (cause, condition, conflict, relate)
29
+ 2) Inferred nodes
30
+ 3) Sequence links
31
+ 4) Aggregate concepts
32
+ """
33
+ if node.metadata.type == "reasoning":
34
+ logger.info(f"Skip reasoning for inferred node {node.id}")
35
+ return {
36
+ "relations": [],
37
+ "inferred_nodes": [],
38
+ "sequence_links": [],
39
+ "aggregate_nodes": [],
40
+ }
41
+
42
+ results = {
43
+ "relations": [],
44
+ "inferred_nodes": [],
45
+ "sequence_links": [],
46
+ "aggregate_nodes": [],
47
+ }
48
+
49
+ nearest = self.graph_store.get_neighbors_by_tag(
50
+ tags=node.metadata.tags,
51
+ exclude_ids=exclude_ids,
52
+ top_k=top_k,
53
+ min_overlap=2,
54
+ )
55
+ nearest = [GraphDBNode(**cand_data) for cand_data in nearest]
56
+
57
+ """
58
+ # 1) Pairwise relations (including CAUSE/CONDITION/CONFLICT)
59
+ pairwise = self._detect_pairwise_causal_condition_relations(node, nearest)
60
+ results["relations"].extend(pairwise["relations"])
61
+ """
62
+
63
+ """
64
+ # 2) Inferred nodes (from causal/condition)
65
+ inferred = self._infer_fact_nodes_from_relations(pairwise)
66
+ results["inferred_nodes"].extend(inferred)
67
+ """
68
+
69
+ """
70
+ 3) Sequence (optional, if you have timestamps)
71
+ seq = self._detect_sequence_links(node, nearest)
72
+ results["sequence_links"].extend(seq)
73
+ """
74
+
75
+ # 4) Aggregate
76
+ agg = self._detect_aggregate_node_for_group(node, nearest, min_group_size=5)
77
+ if agg:
78
+ results["aggregate_nodes"].append(agg)
79
+
80
+ return results
81
+
82
+ def _detect_pairwise_causal_condition_relations(
83
+ self, node: GraphDBNode, nearest_nodes: list[GraphDBNode]
84
+ ):
85
+ """
86
+ Vector/tag search ➜ For each candidate, use LLM to decide:
87
+ - CAUSE
88
+ - CONDITION
89
+ - RELATE
90
+ - CONFLICT
91
+ """
92
+ results = {"relations": []}
93
+
94
+ for candidate in nearest_nodes:
95
+ prompt = PAIRWISE_RELATION_PROMPT.format(
96
+ node1=node.memory,
97
+ node2=candidate.memory,
98
+ )
99
+ response_text = self._call_llm(prompt)
100
+ relation_type = self._parse_relation_result(response_text)
101
+ if relation_type != "NONE":
102
+ results["relations"].append(
103
+ {
104
+ "source_id": node.id,
105
+ "target_id": candidate.id,
106
+ "relation_type": relation_type,
107
+ }
108
+ )
109
+
110
+ return results
111
+
112
+ def _infer_fact_nodes_from_relations(self, pairwise_results: dict):
113
+ inferred_nodes = []
114
+ for rel in pairwise_results["relations"]:
115
+ if rel["relation_type"] in ("CAUSE", "CONDITION"):
116
+ src = self.graph_store.get_node(rel["source_id"])
117
+ tgt = self.graph_store.get_node(rel["target_id"])
118
+ if not src or not tgt:
119
+ continue
120
+
121
+ prompt = INFER_FACT_PROMPT.format(
122
+ source=src["memory"], target=tgt["memory"], relation_type=rel["relation_type"]
123
+ )
124
+ response_text = self._call_llm(prompt).strip()
125
+ if not response_text:
126
+ continue
127
+ embedding = self.embedder.embed([response_text])[0]
128
+
129
+ inferred_nodes.append(
130
+ GraphDBNode(
131
+ memory=response_text,
132
+ metadata=src["metadata"].__class__(
133
+ user_id="",
134
+ session_id="",
135
+ memory_type="LongTermMemory",
136
+ status="activated",
137
+ key=f"InferredFact:{rel['relation_type']}",
138
+ tags=["inferred"],
139
+ embedding=embedding,
140
+ usage=[],
141
+ sources=[src["id"], tgt["id"]],
142
+ background=f"Inferred from {rel['relation_type']}",
143
+ confidence=0.9,
144
+ type="reasoning",
145
+ ),
146
+ )
147
+ )
148
+ return inferred_nodes
149
+
150
+ def _detect_sequence_links(self, node: GraphDBNode, nearest_nodes: list[GraphDBNode]):
151
+ """
152
+ If node has timestamp, find other nodes to link FOLLOWS edges.
153
+ """
154
+ results = []
155
+ # Pseudo: find older/newer events with same tags
156
+ # TODO: add time sequence recall
157
+ neighbors = nearest_nodes
158
+ for cand in neighbors:
159
+ # Compare timestamps
160
+ if cand.metadata.updated_at < node.metadata.updated_at:
161
+ results.append({"from_id": cand.id, "to_id": node.id})
162
+ elif cand.metadata.updated_at > node.metadata.updated_at:
163
+ results.append({"from_id": node.id, "to_id": cand.id})
164
+ return results
165
+
166
+ def _detect_aggregate_node_for_group(
167
+ self, node: GraphDBNode, nearest_nodes: list[GraphDBNode], min_group_size: int = 3
168
+ ):
169
+ """
170
+ If nodes share overlapping tags, LLM checks if they should be summarized into a new concept.
171
+ """
172
+ if len(nearest_nodes) < min_group_size:
173
+ return None
174
+ combined_nodes = [node, *nearest_nodes]
175
+
176
+ joined = "\n".join(f"- {n.memory}" for n in combined_nodes)
177
+ prompt = AGGREGATE_PROMPT.replace("{joined}", joined)
178
+ response_text = self._call_llm(prompt)
179
+ response_json = self._parse_json_result(response_text)
180
+ if not response_json:
181
+ return None
182
+ summary = json.loads(response_text)
183
+ embedding = self.embedder.embed([summary["value"]])[0]
184
+
185
+ parent_node = GraphDBNode(
186
+ memory=summary["value"],
187
+ metadata=TreeNodeTextualMemoryMetadata(
188
+ user_id="", # TODO: summarized node: no user_id
189
+ session_id="", # TODO: summarized node: no session_id
190
+ memory_type=node.metadata.memory_type,
191
+ status="activated",
192
+ key=summary["key"],
193
+ tags=summary.get("tags", []),
194
+ embedding=embedding,
195
+ usage=[],
196
+ sources=[n.id for n in nearest_nodes],
197
+ background=summary.get("background", ""),
198
+ confidence=0.99,
199
+ type="reasoning",
200
+ ),
201
+ )
202
+ return parent_node
203
+
204
+ def _call_llm(self, prompt: str) -> str:
205
+ messages = [{"role": "user", "content": prompt}]
206
+ try:
207
+ response = self.llm.generate(messages).strip()
208
+ logger.debug(f"[LLM Raw] {response}")
209
+ return response
210
+ except Exception as e:
211
+ logger.warning(f"[LLM Error] {e}")
212
+ return ""
213
+
214
+ def _parse_json_result(self, response_text):
215
+ try:
216
+ response_text = response_text.replace("```", "").replace("json", "")
217
+ response_json = json.loads(response_text)
218
+ return response_json
219
+ except json.JSONDecodeError:
220
+ return {}
221
+
222
+ def _parse_relation_result(self, response_text: str) -> str:
223
+ """
224
+ Normalize and validate the LLM relation type output.
225
+ """
226
+ relation = response_text.strip().upper()
227
+ valid = {"CAUSE", "CONDITION", "RELATE", "CONFLICT", "NONE"}
228
+ if relation not in valid:
229
+ logger.warning(
230
+ f"[RelationDetector] Unexpected relation type: {relation}. Fallback to NONE."
231
+ )
232
+ return "NONE"
233
+ return relation