MemoryOS 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of MemoryOS might be problematic. Click here for more details.

@@ -0,0 +1,235 @@
1
+ import json
2
+
3
+ from memos.embedders.factory import OllamaEmbedder
4
+ from memos.graph_dbs.item import GraphDBNode
5
+ from memos.graph_dbs.neo4j import Neo4jGraphDB
6
+ from memos.llms.base import BaseLLM
7
+ from memos.log import get_logger
8
+ from memos.memories.textual.item import TreeNodeTextualMemoryMetadata
9
+ from memos.templates.tree_reorganize_prompts import (
10
+ AGGREGATE_PROMPT,
11
+ INFER_FACT_PROMPT,
12
+ PAIRWISE_RELATION_PROMPT,
13
+ )
14
+
15
+
16
+ logger = get_logger(__name__)
17
+
18
+
19
+ class RelationAndReasoningDetector:
20
+ def __init__(self, graph_store: Neo4jGraphDB, llm: BaseLLM, embedder: OllamaEmbedder):
21
+ self.graph_store = graph_store
22
+ self.llm = llm
23
+ self.embedder = embedder
24
+
25
+ def process_node(self, node: GraphDBNode, exclude_ids: list[str], top_k: int = 5):
26
+ """
27
+ Unified pipeline for:
28
+ 1) Pairwise relations (cause, condition, conflict, relate)
29
+ 2) Inferred nodes
30
+ 3) Sequence links
31
+ 4) Aggregate concepts
32
+ """
33
+ if node.metadata.type == "reasoning":
34
+ logger.info(f"Skip reasoning for inferred node {node.id}")
35
+ return {
36
+ "relations": [],
37
+ "inferred_nodes": [],
38
+ "sequence_links": [],
39
+ "aggregate_nodes": [],
40
+ }
41
+
42
+ results = {
43
+ "relations": [],
44
+ "inferred_nodes": [],
45
+ "sequence_links": [],
46
+ "aggregate_nodes": [],
47
+ }
48
+
49
+ nearest = self.graph_store.get_neighbors_by_tag(
50
+ tags=node.metadata.tags,
51
+ exclude_ids=exclude_ids,
52
+ top_k=top_k,
53
+ min_overlap=2,
54
+ )
55
+ nearest = [GraphDBNode(**cand_data) for cand_data in nearest]
56
+
57
+ # 1) Pairwise relations (including CAUSE/CONDITION/CONFLICT)
58
+ pairwise = self._detect_pairwise_causal_condition_relations(node, nearest)
59
+ results["relations"].extend(pairwise["relations"])
60
+
61
+ # 2) Inferred nodes (from causal/condition)
62
+ inferred = self._infer_fact_nodes_from_relations(pairwise)
63
+ results["inferred_nodes"].extend(inferred)
64
+
65
+ # 3) Sequence (optional, if you have timestamps)
66
+ seq = self._detect_sequence_links(node, nearest)
67
+ results["sequence_links"].extend(seq)
68
+
69
+ # 4) Aggregate
70
+ agg = self._detect_aggregate_node_for_group(node, nearest, min_group_size=3)
71
+ if agg:
72
+ results["aggregate_nodes"].append(agg)
73
+
74
+ return results
75
+
76
+ def _detect_pairwise_causal_condition_relations(
77
+ self, node: GraphDBNode, nearest_nodes: list[GraphDBNode]
78
+ ):
79
+ """
80
+ Vector/tag search ➜ For each candidate, use LLM to decide:
81
+ - CAUSE
82
+ - CONDITION
83
+ - RELATE_TO
84
+ - CONFLICT
85
+ """
86
+ results = {"relations": []}
87
+
88
+ for candidate in nearest_nodes:
89
+ prompt = PAIRWISE_RELATION_PROMPT.format(
90
+ node1=node.memory,
91
+ node2=candidate.memory,
92
+ )
93
+ response_text = self._call_llm(prompt)
94
+ relation_type = self._parse_relation_result(response_text)
95
+ if relation_type != "NONE":
96
+ results["relations"].append(
97
+ {
98
+ "source_id": node.id,
99
+ "target_id": candidate.id,
100
+ "relation_type": relation_type,
101
+ }
102
+ )
103
+
104
+ return results
105
+
106
+ def _infer_fact_nodes_from_relations(self, pairwise_results: dict):
107
+ inferred_nodes = []
108
+ for rel in pairwise_results["relations"]:
109
+ if rel["relation_type"] in ("CAUSE", "CONDITION"):
110
+ src = self.graph_store.get_node(rel["source_id"])
111
+ tgt = self.graph_store.get_node(rel["target_id"])
112
+ if not src or not tgt:
113
+ continue
114
+
115
+ prompt = INFER_FACT_PROMPT.format(
116
+ source=src["memory"], target=tgt["memory"], relation_type=rel["relation_type"]
117
+ )
118
+ response_text = self._call_llm(prompt).strip()
119
+ if not response_text:
120
+ continue
121
+ embedding = self.embedder.embed([response_text])[0]
122
+
123
+ inferred_nodes.append(
124
+ GraphDBNode(
125
+ memory=response_text,
126
+ metadata=src["metadata"].__class__(
127
+ user_id="",
128
+ session_id="",
129
+ memory_type="LongTermMemory",
130
+ status="activated",
131
+ key=f"InferredFact:{rel['relation_type']}",
132
+ tags=["inferred"],
133
+ embedding=embedding,
134
+ usage=[],
135
+ sources=[src["id"], tgt["id"]],
136
+ background=f"Inferred from {rel['relation_type']}",
137
+ confidence=0.9,
138
+ type="reasoning",
139
+ ),
140
+ )
141
+ )
142
+ return inferred_nodes
143
+
144
+ def _detect_sequence_links(self, node: GraphDBNode, nearest_nodes: list[GraphDBNode]):
145
+ """
146
+ If node has timestamp, find other nodes to link FOLLOWS edges.
147
+ """
148
+ results = []
149
+ # Pseudo: find older/newer events with same tags
150
+ # TODO: add time sequence recall
151
+ neighbors = nearest_nodes
152
+ for cand in neighbors:
153
+ # Compare timestamps
154
+ if cand.metadata.updated_at < node.metadata.updated_at:
155
+ results.append({"from_id": cand.id, "to_id": node.id})
156
+ elif cand.metadata.updated_at > node.metadata.updated_at:
157
+ results.append({"from_id": node.id, "to_id": cand.id})
158
+ return results
159
+
160
+ def _detect_aggregate_node_for_group(
161
+ self, node: GraphDBNode, nearest_nodes: list[GraphDBNode], min_group_size: int = 3
162
+ ):
163
+ """
164
+ If nodes share overlapping tags, LLM checks if they should be summarized into a new concept.
165
+ """
166
+ if len(nearest_nodes) < min_group_size:
167
+ return None
168
+ combined_nodes = [node, *nearest_nodes]
169
+
170
+ joined = "\n".join(f"- {n.memory}" for n in combined_nodes)
171
+ prompt = AGGREGATE_PROMPT.format(joined=joined)
172
+ response_text = self._call_llm(prompt)
173
+ response_json = self._parse_json_result(response_text)
174
+ if not response_json:
175
+ return None
176
+ summary = json.loads(response_text)
177
+ embedding = self.embedder.embed([summary["value"]])[0]
178
+
179
+ parent_node = GraphDBNode(
180
+ memory=summary["value"],
181
+ metadata=TreeNodeTextualMemoryMetadata(
182
+ user_id="", # TODO: summarized node: no user_id
183
+ session_id="", # TODO: summarized node: no session_id
184
+ memory_type=node.metadata.memory_type,
185
+ status="activated",
186
+ key=summary["key"],
187
+ tags=summary.get("tags", []),
188
+ embedding=embedding,
189
+ usage=[],
190
+ sources=[n.id for n in nearest_nodes],
191
+ background=summary.get("background", ""),
192
+ confidence=0.99,
193
+ type="reasoning",
194
+ ),
195
+ )
196
+ return parent_node
197
+
198
+ def _call_llm(self, prompt: str) -> str:
199
+ messages = [{"role": "user", "content": prompt}]
200
+ try:
201
+ response = self.llm.generate(messages).strip()
202
+ logger.debug(f"[LLM Raw] {response}")
203
+ return response
204
+ except Exception as e:
205
+ logger.warning(f"[LLM Error] {e}")
206
+ return ""
207
+
208
+ def _parse_relation_result(self, response_text: str) -> str:
209
+ relation = response_text.strip().upper()
210
+ valid = {"CAUSE", "CONDITION", "RELATE_TO", "CONFLICT", "NONE"}
211
+ if relation not in valid:
212
+ logger.warning(f"[RelationDetector] Unexpected relation: {relation}. Fallback NONE.")
213
+ return "NONE"
214
+ return relation
215
+
216
+ def _parse_json_result(self, response_text):
217
+ try:
218
+ response_text = response_text.replace("```", "").replace("json", "")
219
+ response_json = json.loads(response_text)
220
+ return response_json
221
+ except json.JSONDecodeError:
222
+ return {}
223
+
224
+ def _parse_relation_result(self, response_text: str) -> str:
225
+ """
226
+ Normalize and validate the LLM relation type output.
227
+ """
228
+ relation = response_text.strip().upper()
229
+ valid = {"CAUSE", "CONDITION", "RELATE_TO", "CONFLICT", "NONE"}
230
+ if relation not in valid:
231
+ logger.warning(
232
+ f"[RelationDetector] Unexpected relation type: {relation}. Fallback to NONE."
233
+ )
234
+ return "NONE"
235
+ return relation