agentrun-mem0ai 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentrun_mem0/__init__.py +6 -0
- agentrun_mem0/client/__init__.py +0 -0
- agentrun_mem0/client/main.py +1747 -0
- agentrun_mem0/client/project.py +931 -0
- agentrun_mem0/client/utils.py +115 -0
- agentrun_mem0/configs/__init__.py +0 -0
- agentrun_mem0/configs/base.py +90 -0
- agentrun_mem0/configs/embeddings/__init__.py +0 -0
- agentrun_mem0/configs/embeddings/base.py +110 -0
- agentrun_mem0/configs/enums.py +7 -0
- agentrun_mem0/configs/llms/__init__.py +0 -0
- agentrun_mem0/configs/llms/anthropic.py +56 -0
- agentrun_mem0/configs/llms/aws_bedrock.py +192 -0
- agentrun_mem0/configs/llms/azure.py +57 -0
- agentrun_mem0/configs/llms/base.py +62 -0
- agentrun_mem0/configs/llms/deepseek.py +56 -0
- agentrun_mem0/configs/llms/lmstudio.py +59 -0
- agentrun_mem0/configs/llms/ollama.py +56 -0
- agentrun_mem0/configs/llms/openai.py +79 -0
- agentrun_mem0/configs/llms/vllm.py +56 -0
- agentrun_mem0/configs/prompts.py +459 -0
- agentrun_mem0/configs/rerankers/__init__.py +0 -0
- agentrun_mem0/configs/rerankers/base.py +17 -0
- agentrun_mem0/configs/rerankers/cohere.py +15 -0
- agentrun_mem0/configs/rerankers/config.py +12 -0
- agentrun_mem0/configs/rerankers/huggingface.py +17 -0
- agentrun_mem0/configs/rerankers/llm.py +48 -0
- agentrun_mem0/configs/rerankers/sentence_transformer.py +16 -0
- agentrun_mem0/configs/rerankers/zero_entropy.py +28 -0
- agentrun_mem0/configs/vector_stores/__init__.py +0 -0
- agentrun_mem0/configs/vector_stores/alibabacloud_mysql.py +64 -0
- agentrun_mem0/configs/vector_stores/aliyun_tablestore.py +32 -0
- agentrun_mem0/configs/vector_stores/azure_ai_search.py +57 -0
- agentrun_mem0/configs/vector_stores/azure_mysql.py +84 -0
- agentrun_mem0/configs/vector_stores/baidu.py +27 -0
- agentrun_mem0/configs/vector_stores/chroma.py +58 -0
- agentrun_mem0/configs/vector_stores/databricks.py +61 -0
- agentrun_mem0/configs/vector_stores/elasticsearch.py +65 -0
- agentrun_mem0/configs/vector_stores/faiss.py +37 -0
- agentrun_mem0/configs/vector_stores/langchain.py +30 -0
- agentrun_mem0/configs/vector_stores/milvus.py +42 -0
- agentrun_mem0/configs/vector_stores/mongodb.py +25 -0
- agentrun_mem0/configs/vector_stores/neptune.py +27 -0
- agentrun_mem0/configs/vector_stores/opensearch.py +41 -0
- agentrun_mem0/configs/vector_stores/pgvector.py +52 -0
- agentrun_mem0/configs/vector_stores/pinecone.py +55 -0
- agentrun_mem0/configs/vector_stores/qdrant.py +47 -0
- agentrun_mem0/configs/vector_stores/redis.py +24 -0
- agentrun_mem0/configs/vector_stores/s3_vectors.py +28 -0
- agentrun_mem0/configs/vector_stores/supabase.py +44 -0
- agentrun_mem0/configs/vector_stores/upstash_vector.py +34 -0
- agentrun_mem0/configs/vector_stores/valkey.py +15 -0
- agentrun_mem0/configs/vector_stores/vertex_ai_vector_search.py +28 -0
- agentrun_mem0/configs/vector_stores/weaviate.py +41 -0
- agentrun_mem0/embeddings/__init__.py +0 -0
- agentrun_mem0/embeddings/aws_bedrock.py +100 -0
- agentrun_mem0/embeddings/azure_openai.py +55 -0
- agentrun_mem0/embeddings/base.py +31 -0
- agentrun_mem0/embeddings/configs.py +30 -0
- agentrun_mem0/embeddings/gemini.py +39 -0
- agentrun_mem0/embeddings/huggingface.py +44 -0
- agentrun_mem0/embeddings/langchain.py +35 -0
- agentrun_mem0/embeddings/lmstudio.py +29 -0
- agentrun_mem0/embeddings/mock.py +11 -0
- agentrun_mem0/embeddings/ollama.py +53 -0
- agentrun_mem0/embeddings/openai.py +49 -0
- agentrun_mem0/embeddings/together.py +31 -0
- agentrun_mem0/embeddings/vertexai.py +64 -0
- agentrun_mem0/exceptions.py +503 -0
- agentrun_mem0/graphs/__init__.py +0 -0
- agentrun_mem0/graphs/configs.py +105 -0
- agentrun_mem0/graphs/neptune/__init__.py +0 -0
- agentrun_mem0/graphs/neptune/base.py +497 -0
- agentrun_mem0/graphs/neptune/neptunedb.py +511 -0
- agentrun_mem0/graphs/neptune/neptunegraph.py +474 -0
- agentrun_mem0/graphs/tools.py +371 -0
- agentrun_mem0/graphs/utils.py +97 -0
- agentrun_mem0/llms/__init__.py +0 -0
- agentrun_mem0/llms/anthropic.py +87 -0
- agentrun_mem0/llms/aws_bedrock.py +665 -0
- agentrun_mem0/llms/azure_openai.py +141 -0
- agentrun_mem0/llms/azure_openai_structured.py +91 -0
- agentrun_mem0/llms/base.py +131 -0
- agentrun_mem0/llms/configs.py +34 -0
- agentrun_mem0/llms/deepseek.py +107 -0
- agentrun_mem0/llms/gemini.py +201 -0
- agentrun_mem0/llms/groq.py +88 -0
- agentrun_mem0/llms/langchain.py +94 -0
- agentrun_mem0/llms/litellm.py +87 -0
- agentrun_mem0/llms/lmstudio.py +114 -0
- agentrun_mem0/llms/ollama.py +117 -0
- agentrun_mem0/llms/openai.py +147 -0
- agentrun_mem0/llms/openai_structured.py +52 -0
- agentrun_mem0/llms/sarvam.py +89 -0
- agentrun_mem0/llms/together.py +88 -0
- agentrun_mem0/llms/vllm.py +107 -0
- agentrun_mem0/llms/xai.py +52 -0
- agentrun_mem0/memory/__init__.py +0 -0
- agentrun_mem0/memory/base.py +63 -0
- agentrun_mem0/memory/graph_memory.py +698 -0
- agentrun_mem0/memory/kuzu_memory.py +713 -0
- agentrun_mem0/memory/main.py +2229 -0
- agentrun_mem0/memory/memgraph_memory.py +689 -0
- agentrun_mem0/memory/setup.py +56 -0
- agentrun_mem0/memory/storage.py +218 -0
- agentrun_mem0/memory/telemetry.py +90 -0
- agentrun_mem0/memory/utils.py +208 -0
- agentrun_mem0/proxy/__init__.py +0 -0
- agentrun_mem0/proxy/main.py +189 -0
- agentrun_mem0/reranker/__init__.py +9 -0
- agentrun_mem0/reranker/base.py +20 -0
- agentrun_mem0/reranker/cohere_reranker.py +85 -0
- agentrun_mem0/reranker/huggingface_reranker.py +147 -0
- agentrun_mem0/reranker/llm_reranker.py +142 -0
- agentrun_mem0/reranker/sentence_transformer_reranker.py +107 -0
- agentrun_mem0/reranker/zero_entropy_reranker.py +96 -0
- agentrun_mem0/utils/factory.py +283 -0
- agentrun_mem0/utils/gcp_auth.py +167 -0
- agentrun_mem0/vector_stores/__init__.py +0 -0
- agentrun_mem0/vector_stores/alibabacloud_mysql.py +547 -0
- agentrun_mem0/vector_stores/aliyun_tablestore.py +252 -0
- agentrun_mem0/vector_stores/azure_ai_search.py +396 -0
- agentrun_mem0/vector_stores/azure_mysql.py +463 -0
- agentrun_mem0/vector_stores/baidu.py +368 -0
- agentrun_mem0/vector_stores/base.py +58 -0
- agentrun_mem0/vector_stores/chroma.py +332 -0
- agentrun_mem0/vector_stores/configs.py +67 -0
- agentrun_mem0/vector_stores/databricks.py +761 -0
- agentrun_mem0/vector_stores/elasticsearch.py +237 -0
- agentrun_mem0/vector_stores/faiss.py +479 -0
- agentrun_mem0/vector_stores/langchain.py +180 -0
- agentrun_mem0/vector_stores/milvus.py +250 -0
- agentrun_mem0/vector_stores/mongodb.py +310 -0
- agentrun_mem0/vector_stores/neptune_analytics.py +467 -0
- agentrun_mem0/vector_stores/opensearch.py +292 -0
- agentrun_mem0/vector_stores/pgvector.py +404 -0
- agentrun_mem0/vector_stores/pinecone.py +382 -0
- agentrun_mem0/vector_stores/qdrant.py +270 -0
- agentrun_mem0/vector_stores/redis.py +295 -0
- agentrun_mem0/vector_stores/s3_vectors.py +176 -0
- agentrun_mem0/vector_stores/supabase.py +237 -0
- agentrun_mem0/vector_stores/upstash_vector.py +293 -0
- agentrun_mem0/vector_stores/valkey.py +824 -0
- agentrun_mem0/vector_stores/vertex_ai_vector_search.py +635 -0
- agentrun_mem0/vector_stores/weaviate.py +343 -0
- agentrun_mem0ai-0.0.11.data/data/README.md +205 -0
- agentrun_mem0ai-0.0.11.dist-info/METADATA +277 -0
- agentrun_mem0ai-0.0.11.dist-info/RECORD +150 -0
- agentrun_mem0ai-0.0.11.dist-info/WHEEL +4 -0
- agentrun_mem0ai-0.0.11.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,698 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from agentrun_mem0.memory.utils import format_entities, sanitize_relationship_for_cypher
|
|
4
|
+
|
|
5
|
+
try:
|
|
6
|
+
from langchain_neo4j import Neo4jGraph
|
|
7
|
+
except ImportError:
|
|
8
|
+
raise ImportError("langchain_neo4j is not installed. Please install it using pip install langchain-neo4j")
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
from rank_bm25 import BM25Okapi
|
|
12
|
+
except ImportError:
|
|
13
|
+
raise ImportError("rank_bm25 is not installed. Please install it using pip install rank-bm25")
|
|
14
|
+
|
|
15
|
+
from agentrun_mem0.graphs.tools import (
|
|
16
|
+
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
|
|
17
|
+
DELETE_MEMORY_TOOL_GRAPH,
|
|
18
|
+
EXTRACT_ENTITIES_STRUCT_TOOL,
|
|
19
|
+
EXTRACT_ENTITIES_TOOL,
|
|
20
|
+
RELATIONS_STRUCT_TOOL,
|
|
21
|
+
RELATIONS_TOOL,
|
|
22
|
+
)
|
|
23
|
+
from agentrun_mem0.graphs.utils import EXTRACT_RELATIONS_PROMPT, get_delete_messages
|
|
24
|
+
from agentrun_mem0.utils.factory import EmbedderFactory, LlmFactory
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class MemoryGraph:
|
|
30
|
+
def __init__(self, config):
|
|
31
|
+
self.config = config
|
|
32
|
+
self.graph = Neo4jGraph(
|
|
33
|
+
self.config.graph_store.config.url,
|
|
34
|
+
self.config.graph_store.config.username,
|
|
35
|
+
self.config.graph_store.config.password,
|
|
36
|
+
self.config.graph_store.config.database,
|
|
37
|
+
refresh_schema=False,
|
|
38
|
+
driver_config={"notifications_min_severity": "OFF"},
|
|
39
|
+
)
|
|
40
|
+
self.embedding_model = EmbedderFactory.create(
|
|
41
|
+
self.config.embedder.provider, self.config.embedder.config, self.config.vector_store.config
|
|
42
|
+
)
|
|
43
|
+
self.node_label = ":`__Entity__`" if self.config.graph_store.config.base_label else ""
|
|
44
|
+
|
|
45
|
+
if self.config.graph_store.config.base_label:
|
|
46
|
+
# Safely add user_id index
|
|
47
|
+
try:
|
|
48
|
+
self.graph.query(f"CREATE INDEX entity_single IF NOT EXISTS FOR (n {self.node_label}) ON (n.user_id)")
|
|
49
|
+
except Exception:
|
|
50
|
+
pass
|
|
51
|
+
try: # Safely try to add composite index (Enterprise only)
|
|
52
|
+
self.graph.query(
|
|
53
|
+
f"CREATE INDEX entity_composite IF NOT EXISTS FOR (n {self.node_label}) ON (n.name, n.user_id)"
|
|
54
|
+
)
|
|
55
|
+
except Exception:
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
# Default to openai if no specific provider is configured
|
|
59
|
+
self.llm_provider = "openai"
|
|
60
|
+
if self.config.llm and self.config.llm.provider:
|
|
61
|
+
self.llm_provider = self.config.llm.provider
|
|
62
|
+
if self.config.graph_store and self.config.graph_store.llm and self.config.graph_store.llm.provider:
|
|
63
|
+
self.llm_provider = self.config.graph_store.llm.provider
|
|
64
|
+
|
|
65
|
+
# Get LLM config with proper null checks
|
|
66
|
+
llm_config = None
|
|
67
|
+
if self.config.graph_store and self.config.graph_store.llm and hasattr(self.config.graph_store.llm, "config"):
|
|
68
|
+
llm_config = self.config.graph_store.llm.config
|
|
69
|
+
elif hasattr(self.config.llm, "config"):
|
|
70
|
+
llm_config = self.config.llm.config
|
|
71
|
+
self.llm = LlmFactory.create(self.llm_provider, llm_config)
|
|
72
|
+
self.user_id = None
|
|
73
|
+
self.threshold = 0.7
|
|
74
|
+
|
|
75
|
+
def add(self, data, filters):
|
|
76
|
+
"""
|
|
77
|
+
Adds data to the graph.
|
|
78
|
+
|
|
79
|
+
Args:
|
|
80
|
+
data (str): The data to add to the graph.
|
|
81
|
+
filters (dict): A dictionary containing filters to be applied during the addition.
|
|
82
|
+
"""
|
|
83
|
+
entity_type_map = self._retrieve_nodes_from_data(data, filters)
|
|
84
|
+
to_be_added = self._establish_nodes_relations_from_data(data, filters, entity_type_map)
|
|
85
|
+
search_output = self._search_graph_db(node_list=list(entity_type_map.keys()), filters=filters)
|
|
86
|
+
to_be_deleted = self._get_delete_entities_from_search_output(search_output, data, filters)
|
|
87
|
+
|
|
88
|
+
# TODO: Batch queries with APOC plugin
|
|
89
|
+
# TODO: Add more filter support
|
|
90
|
+
deleted_entities = self._delete_entities(to_be_deleted, filters)
|
|
91
|
+
added_entities = self._add_entities(to_be_added, filters, entity_type_map)
|
|
92
|
+
|
|
93
|
+
return {"deleted_entities": deleted_entities, "added_entities": added_entities}
|
|
94
|
+
|
|
95
|
+
def search(self, query, filters, limit=100):
|
|
96
|
+
"""
|
|
97
|
+
Search for memories and related graph data.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
query (str): Query to search for.
|
|
101
|
+
filters (dict): A dictionary containing filters to be applied during the search.
|
|
102
|
+
limit (int): The maximum number of nodes and relationships to retrieve. Defaults to 100.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
dict: A dictionary containing:
|
|
106
|
+
- "contexts": List of search results from the base data store.
|
|
107
|
+
- "entities": List of related graph data based on the query.
|
|
108
|
+
"""
|
|
109
|
+
entity_type_map = self._retrieve_nodes_from_data(query, filters)
|
|
110
|
+
search_output = self._search_graph_db(node_list=list(entity_type_map.keys()), filters=filters)
|
|
111
|
+
|
|
112
|
+
if not search_output:
|
|
113
|
+
return []
|
|
114
|
+
|
|
115
|
+
search_outputs_sequence = [
|
|
116
|
+
[item["source"], item["relationship"], item["destination"]] for item in search_output
|
|
117
|
+
]
|
|
118
|
+
bm25 = BM25Okapi(search_outputs_sequence)
|
|
119
|
+
|
|
120
|
+
tokenized_query = query.split(" ")
|
|
121
|
+
reranked_results = bm25.get_top_n(tokenized_query, search_outputs_sequence, n=5)
|
|
122
|
+
|
|
123
|
+
search_results = []
|
|
124
|
+
for item in reranked_results:
|
|
125
|
+
search_results.append({"source": item[0], "relationship": item[1], "destination": item[2]})
|
|
126
|
+
|
|
127
|
+
logger.info(f"Returned {len(search_results)} search results")
|
|
128
|
+
|
|
129
|
+
return search_results
|
|
130
|
+
|
|
131
|
+
def delete_all(self, filters):
|
|
132
|
+
# Build node properties for filtering
|
|
133
|
+
node_props = ["user_id: $user_id"]
|
|
134
|
+
if filters.get("agent_id"):
|
|
135
|
+
node_props.append("agent_id: $agent_id")
|
|
136
|
+
if filters.get("run_id"):
|
|
137
|
+
node_props.append("run_id: $run_id")
|
|
138
|
+
node_props_str = ", ".join(node_props)
|
|
139
|
+
|
|
140
|
+
cypher = f"""
|
|
141
|
+
MATCH (n {self.node_label} {{{node_props_str}}})
|
|
142
|
+
DETACH DELETE n
|
|
143
|
+
"""
|
|
144
|
+
params = {"user_id": filters["user_id"]}
|
|
145
|
+
if filters.get("agent_id"):
|
|
146
|
+
params["agent_id"] = filters["agent_id"]
|
|
147
|
+
if filters.get("run_id"):
|
|
148
|
+
params["run_id"] = filters["run_id"]
|
|
149
|
+
self.graph.query(cypher, params=params)
|
|
150
|
+
|
|
151
|
+
def get_all(self, filters, limit=100):
|
|
152
|
+
"""
|
|
153
|
+
Retrieves all nodes and relationships from the graph database based on optional filtering criteria.
|
|
154
|
+
Args:
|
|
155
|
+
filters (dict): A dictionary containing filters to be applied during the retrieval.
|
|
156
|
+
limit (int): The maximum number of nodes and relationships to retrieve. Defaults to 100.
|
|
157
|
+
Returns:
|
|
158
|
+
list: A list of dictionaries, each containing:
|
|
159
|
+
- 'contexts': The base data store response for each memory.
|
|
160
|
+
- 'entities': A list of strings representing the nodes and relationships
|
|
161
|
+
"""
|
|
162
|
+
params = {"user_id": filters["user_id"], "limit": limit}
|
|
163
|
+
|
|
164
|
+
# Build node properties based on filters
|
|
165
|
+
node_props = ["user_id: $user_id"]
|
|
166
|
+
if filters.get("agent_id"):
|
|
167
|
+
node_props.append("agent_id: $agent_id")
|
|
168
|
+
params["agent_id"] = filters["agent_id"]
|
|
169
|
+
if filters.get("run_id"):
|
|
170
|
+
node_props.append("run_id: $run_id")
|
|
171
|
+
params["run_id"] = filters["run_id"]
|
|
172
|
+
node_props_str = ", ".join(node_props)
|
|
173
|
+
|
|
174
|
+
query = f"""
|
|
175
|
+
MATCH (n {self.node_label} {{{node_props_str}}})-[r]->(m {self.node_label} {{{node_props_str}}})
|
|
176
|
+
RETURN n.name AS source, type(r) AS relationship, m.name AS target
|
|
177
|
+
LIMIT $limit
|
|
178
|
+
"""
|
|
179
|
+
results = self.graph.query(query, params=params)
|
|
180
|
+
|
|
181
|
+
final_results = []
|
|
182
|
+
for result in results:
|
|
183
|
+
final_results.append(
|
|
184
|
+
{
|
|
185
|
+
"source": result["source"],
|
|
186
|
+
"relationship": result["relationship"],
|
|
187
|
+
"target": result["target"],
|
|
188
|
+
}
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
logger.info(f"Retrieved {len(final_results)} relationships")
|
|
192
|
+
|
|
193
|
+
return final_results
|
|
194
|
+
|
|
195
|
+
def _retrieve_nodes_from_data(self, data, filters):
|
|
196
|
+
"""Extracts all the entities mentioned in the query."""
|
|
197
|
+
_tools = [EXTRACT_ENTITIES_TOOL]
|
|
198
|
+
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
|
|
199
|
+
_tools = [EXTRACT_ENTITIES_STRUCT_TOOL]
|
|
200
|
+
search_results = self.llm.generate_response(
|
|
201
|
+
messages=[
|
|
202
|
+
{
|
|
203
|
+
"role": "system",
|
|
204
|
+
"content": f"You are a smart assistant who understands entities and their types in a given text. If user message contains self reference such as 'I', 'me', 'my' etc. then use {filters['user_id']} as the source entity. Extract all the entities from the text. ***DO NOT*** answer the question itself if the given text is a question.",
|
|
205
|
+
},
|
|
206
|
+
{"role": "user", "content": data},
|
|
207
|
+
],
|
|
208
|
+
tools=_tools,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
entity_type_map = {}
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
for tool_call in search_results["tool_calls"]:
|
|
215
|
+
if tool_call["name"] != "extract_entities":
|
|
216
|
+
continue
|
|
217
|
+
for item in tool_call["arguments"]["entities"]:
|
|
218
|
+
entity_type_map[item["entity"]] = item["entity_type"]
|
|
219
|
+
except Exception as e:
|
|
220
|
+
logger.exception(
|
|
221
|
+
f"Error in search tool: {e}, llm_provider={self.llm_provider}, search_results={search_results}"
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
entity_type_map = {k.lower().replace(" ", "_"): v.lower().replace(" ", "_") for k, v in entity_type_map.items()}
|
|
225
|
+
logger.debug(f"Entity type map: {entity_type_map}\n search_results={search_results}")
|
|
226
|
+
return entity_type_map
|
|
227
|
+
|
|
228
|
+
def _establish_nodes_relations_from_data(self, data, filters, entity_type_map):
|
|
229
|
+
"""Establish relations among the extracted nodes."""
|
|
230
|
+
|
|
231
|
+
# Compose user identification string for prompt
|
|
232
|
+
user_identity = f"user_id: {filters['user_id']}"
|
|
233
|
+
if filters.get("agent_id"):
|
|
234
|
+
user_identity += f", agent_id: {filters['agent_id']}"
|
|
235
|
+
if filters.get("run_id"):
|
|
236
|
+
user_identity += f", run_id: {filters['run_id']}"
|
|
237
|
+
|
|
238
|
+
if self.config.graph_store.custom_prompt:
|
|
239
|
+
system_content = EXTRACT_RELATIONS_PROMPT.replace("USER_ID", user_identity)
|
|
240
|
+
# Add the custom prompt line if configured
|
|
241
|
+
system_content = system_content.replace("CUSTOM_PROMPT", f"4. {self.config.graph_store.custom_prompt}")
|
|
242
|
+
messages = [
|
|
243
|
+
{"role": "system", "content": system_content},
|
|
244
|
+
{"role": "user", "content": data},
|
|
245
|
+
]
|
|
246
|
+
else:
|
|
247
|
+
system_content = EXTRACT_RELATIONS_PROMPT.replace("USER_ID", user_identity)
|
|
248
|
+
messages = [
|
|
249
|
+
{"role": "system", "content": system_content},
|
|
250
|
+
{"role": "user", "content": f"List of entities: {list(entity_type_map.keys())}. \n\nText: {data}"},
|
|
251
|
+
]
|
|
252
|
+
|
|
253
|
+
_tools = [RELATIONS_TOOL]
|
|
254
|
+
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
|
|
255
|
+
_tools = [RELATIONS_STRUCT_TOOL]
|
|
256
|
+
|
|
257
|
+
extracted_entities = self.llm.generate_response(
|
|
258
|
+
messages=messages,
|
|
259
|
+
tools=_tools,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
entities = []
|
|
263
|
+
if extracted_entities.get("tool_calls"):
|
|
264
|
+
entities = extracted_entities["tool_calls"][0].get("arguments", {}).get("entities", [])
|
|
265
|
+
|
|
266
|
+
entities = self._remove_spaces_from_entities(entities)
|
|
267
|
+
logger.debug(f"Extracted entities: {entities}")
|
|
268
|
+
return entities
|
|
269
|
+
|
|
270
|
+
def _search_graph_db(self, node_list, filters, limit=100):
|
|
271
|
+
"""Search similar nodes among and their respective incoming and outgoing relations."""
|
|
272
|
+
result_relations = []
|
|
273
|
+
|
|
274
|
+
# Build node properties for filtering
|
|
275
|
+
node_props = ["user_id: $user_id"]
|
|
276
|
+
if filters.get("agent_id"):
|
|
277
|
+
node_props.append("agent_id: $agent_id")
|
|
278
|
+
if filters.get("run_id"):
|
|
279
|
+
node_props.append("run_id: $run_id")
|
|
280
|
+
node_props_str = ", ".join(node_props)
|
|
281
|
+
|
|
282
|
+
for node in node_list:
|
|
283
|
+
n_embedding = self.embedding_model.embed(node)
|
|
284
|
+
|
|
285
|
+
cypher_query = f"""
|
|
286
|
+
MATCH (n {self.node_label} {{{node_props_str}}})
|
|
287
|
+
WHERE n.embedding IS NOT NULL
|
|
288
|
+
WITH n, round(2 * vector.similarity.cosine(n.embedding, $n_embedding) - 1, 4) AS similarity // denormalize for backward compatibility
|
|
289
|
+
WHERE similarity >= $threshold
|
|
290
|
+
CALL {{
|
|
291
|
+
WITH n
|
|
292
|
+
MATCH (n)-[r]->(m {self.node_label} {{{node_props_str}}})
|
|
293
|
+
RETURN n.name AS source, elementId(n) AS source_id, type(r) AS relationship, elementId(r) AS relation_id, m.name AS destination, elementId(m) AS destination_id
|
|
294
|
+
UNION
|
|
295
|
+
WITH n
|
|
296
|
+
MATCH (n)<-[r]-(m {self.node_label} {{{node_props_str}}})
|
|
297
|
+
RETURN m.name AS source, elementId(m) AS source_id, type(r) AS relationship, elementId(r) AS relation_id, n.name AS destination, elementId(n) AS destination_id
|
|
298
|
+
}}
|
|
299
|
+
WITH distinct source, source_id, relationship, relation_id, destination, destination_id, similarity
|
|
300
|
+
RETURN source, source_id, relationship, relation_id, destination, destination_id, similarity
|
|
301
|
+
ORDER BY similarity DESC
|
|
302
|
+
LIMIT $limit
|
|
303
|
+
"""
|
|
304
|
+
|
|
305
|
+
params = {
|
|
306
|
+
"n_embedding": n_embedding,
|
|
307
|
+
"threshold": self.threshold,
|
|
308
|
+
"user_id": filters["user_id"],
|
|
309
|
+
"limit": limit,
|
|
310
|
+
}
|
|
311
|
+
if filters.get("agent_id"):
|
|
312
|
+
params["agent_id"] = filters["agent_id"]
|
|
313
|
+
if filters.get("run_id"):
|
|
314
|
+
params["run_id"] = filters["run_id"]
|
|
315
|
+
|
|
316
|
+
ans = self.graph.query(cypher_query, params=params)
|
|
317
|
+
result_relations.extend(ans)
|
|
318
|
+
|
|
319
|
+
return result_relations
|
|
320
|
+
|
|
321
|
+
def _get_delete_entities_from_search_output(self, search_output, data, filters):
|
|
322
|
+
"""Get the entities to be deleted from the search output."""
|
|
323
|
+
search_output_string = format_entities(search_output)
|
|
324
|
+
|
|
325
|
+
# Compose user identification string for prompt
|
|
326
|
+
user_identity = f"user_id: {filters['user_id']}"
|
|
327
|
+
if filters.get("agent_id"):
|
|
328
|
+
user_identity += f", agent_id: {filters['agent_id']}"
|
|
329
|
+
if filters.get("run_id"):
|
|
330
|
+
user_identity += f", run_id: {filters['run_id']}"
|
|
331
|
+
|
|
332
|
+
system_prompt, user_prompt = get_delete_messages(search_output_string, data, user_identity)
|
|
333
|
+
|
|
334
|
+
_tools = [DELETE_MEMORY_TOOL_GRAPH]
|
|
335
|
+
if self.llm_provider in ["azure_openai_structured", "openai_structured"]:
|
|
336
|
+
_tools = [
|
|
337
|
+
DELETE_MEMORY_STRUCT_TOOL_GRAPH,
|
|
338
|
+
]
|
|
339
|
+
|
|
340
|
+
memory_updates = self.llm.generate_response(
|
|
341
|
+
messages=[
|
|
342
|
+
{"role": "system", "content": system_prompt},
|
|
343
|
+
{"role": "user", "content": user_prompt},
|
|
344
|
+
],
|
|
345
|
+
tools=_tools,
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
to_be_deleted = []
|
|
349
|
+
for item in memory_updates.get("tool_calls", []):
|
|
350
|
+
if item.get("name") == "delete_graph_memory":
|
|
351
|
+
to_be_deleted.append(item.get("arguments"))
|
|
352
|
+
# Clean entities formatting
|
|
353
|
+
to_be_deleted = self._remove_spaces_from_entities(to_be_deleted)
|
|
354
|
+
logger.debug(f"Deleted relationships: {to_be_deleted}")
|
|
355
|
+
return to_be_deleted
|
|
356
|
+
|
|
357
|
+
def _delete_entities(self, to_be_deleted, filters):
|
|
358
|
+
"""Delete the entities from the graph."""
|
|
359
|
+
user_id = filters["user_id"]
|
|
360
|
+
agent_id = filters.get("agent_id", None)
|
|
361
|
+
run_id = filters.get("run_id", None)
|
|
362
|
+
results = []
|
|
363
|
+
|
|
364
|
+
for item in to_be_deleted:
|
|
365
|
+
source = item["source"]
|
|
366
|
+
destination = item["destination"]
|
|
367
|
+
relationship = item["relationship"]
|
|
368
|
+
|
|
369
|
+
# Build the agent filter for the query
|
|
370
|
+
|
|
371
|
+
params = {
|
|
372
|
+
"source_name": source,
|
|
373
|
+
"dest_name": destination,
|
|
374
|
+
"user_id": user_id,
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
if agent_id:
|
|
378
|
+
params["agent_id"] = agent_id
|
|
379
|
+
if run_id:
|
|
380
|
+
params["run_id"] = run_id
|
|
381
|
+
|
|
382
|
+
# Build node properties for filtering
|
|
383
|
+
source_props = ["name: $source_name", "user_id: $user_id"]
|
|
384
|
+
dest_props = ["name: $dest_name", "user_id: $user_id"]
|
|
385
|
+
if agent_id:
|
|
386
|
+
source_props.append("agent_id: $agent_id")
|
|
387
|
+
dest_props.append("agent_id: $agent_id")
|
|
388
|
+
if run_id:
|
|
389
|
+
source_props.append("run_id: $run_id")
|
|
390
|
+
dest_props.append("run_id: $run_id")
|
|
391
|
+
source_props_str = ", ".join(source_props)
|
|
392
|
+
dest_props_str = ", ".join(dest_props)
|
|
393
|
+
|
|
394
|
+
# Delete the specific relationship between nodes
|
|
395
|
+
cypher = f"""
|
|
396
|
+
MATCH (n {self.node_label} {{{source_props_str}}})
|
|
397
|
+
-[r:{relationship}]->
|
|
398
|
+
(m {self.node_label} {{{dest_props_str}}})
|
|
399
|
+
|
|
400
|
+
DELETE r
|
|
401
|
+
RETURN
|
|
402
|
+
n.name AS source,
|
|
403
|
+
m.name AS target,
|
|
404
|
+
type(r) AS relationship
|
|
405
|
+
"""
|
|
406
|
+
|
|
407
|
+
result = self.graph.query(cypher, params=params)
|
|
408
|
+
results.append(result)
|
|
409
|
+
|
|
410
|
+
return results
|
|
411
|
+
|
|
412
|
+
def _add_entities(self, to_be_added, filters, entity_type_map):
|
|
413
|
+
"""Add the new entities to the graph. Merge the nodes if they already exist."""
|
|
414
|
+
user_id = filters["user_id"]
|
|
415
|
+
agent_id = filters.get("agent_id", None)
|
|
416
|
+
run_id = filters.get("run_id", None)
|
|
417
|
+
results = []
|
|
418
|
+
for item in to_be_added:
|
|
419
|
+
# entities
|
|
420
|
+
source = item["source"]
|
|
421
|
+
destination = item["destination"]
|
|
422
|
+
relationship = item["relationship"]
|
|
423
|
+
|
|
424
|
+
# types
|
|
425
|
+
source_type = entity_type_map.get(source, "__User__")
|
|
426
|
+
source_label = self.node_label if self.node_label else f":`{source_type}`"
|
|
427
|
+
source_extra_set = f", source:`{source_type}`" if self.node_label else ""
|
|
428
|
+
destination_type = entity_type_map.get(destination, "__User__")
|
|
429
|
+
destination_label = self.node_label if self.node_label else f":`{destination_type}`"
|
|
430
|
+
destination_extra_set = f", destination:`{destination_type}`" if self.node_label else ""
|
|
431
|
+
|
|
432
|
+
# embeddings
|
|
433
|
+
source_embedding = self.embedding_model.embed(source)
|
|
434
|
+
dest_embedding = self.embedding_model.embed(destination)
|
|
435
|
+
|
|
436
|
+
# search for the nodes with the closest embeddings
|
|
437
|
+
source_node_search_result = self._search_source_node(source_embedding, filters, threshold=0.9)
|
|
438
|
+
destination_node_search_result = self._search_destination_node(dest_embedding, filters, threshold=0.9)
|
|
439
|
+
|
|
440
|
+
# TODO: Create a cypher query and common params for all the cases
|
|
441
|
+
if not destination_node_search_result and source_node_search_result:
|
|
442
|
+
# Build destination MERGE properties
|
|
443
|
+
merge_props = ["name: $destination_name", "user_id: $user_id"]
|
|
444
|
+
if agent_id:
|
|
445
|
+
merge_props.append("agent_id: $agent_id")
|
|
446
|
+
if run_id:
|
|
447
|
+
merge_props.append("run_id: $run_id")
|
|
448
|
+
merge_props_str = ", ".join(merge_props)
|
|
449
|
+
|
|
450
|
+
cypher = f"""
|
|
451
|
+
MATCH (source)
|
|
452
|
+
WHERE elementId(source) = $source_id
|
|
453
|
+
SET source.mentions = coalesce(source.mentions, 0) + 1
|
|
454
|
+
WITH source
|
|
455
|
+
MERGE (destination {destination_label} {{{merge_props_str}}})
|
|
456
|
+
ON CREATE SET
|
|
457
|
+
destination.created = timestamp(),
|
|
458
|
+
destination.mentions = 1
|
|
459
|
+
{destination_extra_set}
|
|
460
|
+
ON MATCH SET
|
|
461
|
+
destination.mentions = coalesce(destination.mentions, 0) + 1
|
|
462
|
+
WITH source, destination
|
|
463
|
+
CALL db.create.setNodeVectorProperty(destination, 'embedding', $destination_embedding)
|
|
464
|
+
WITH source, destination
|
|
465
|
+
MERGE (source)-[r:{relationship}]->(destination)
|
|
466
|
+
ON CREATE SET
|
|
467
|
+
r.created = timestamp(),
|
|
468
|
+
r.mentions = 1
|
|
469
|
+
ON MATCH SET
|
|
470
|
+
r.mentions = coalesce(r.mentions, 0) + 1
|
|
471
|
+
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
|
|
472
|
+
"""
|
|
473
|
+
|
|
474
|
+
params = {
|
|
475
|
+
"source_id": source_node_search_result[0]["elementId(source_candidate)"],
|
|
476
|
+
"destination_name": destination,
|
|
477
|
+
"destination_embedding": dest_embedding,
|
|
478
|
+
"user_id": user_id,
|
|
479
|
+
}
|
|
480
|
+
if agent_id:
|
|
481
|
+
params["agent_id"] = agent_id
|
|
482
|
+
if run_id:
|
|
483
|
+
params["run_id"] = run_id
|
|
484
|
+
|
|
485
|
+
elif destination_node_search_result and not source_node_search_result:
|
|
486
|
+
# Build source MERGE properties
|
|
487
|
+
merge_props = ["name: $source_name", "user_id: $user_id"]
|
|
488
|
+
if agent_id:
|
|
489
|
+
merge_props.append("agent_id: $agent_id")
|
|
490
|
+
if run_id:
|
|
491
|
+
merge_props.append("run_id: $run_id")
|
|
492
|
+
merge_props_str = ", ".join(merge_props)
|
|
493
|
+
|
|
494
|
+
cypher = f"""
|
|
495
|
+
MATCH (destination)
|
|
496
|
+
WHERE elementId(destination) = $destination_id
|
|
497
|
+
SET destination.mentions = coalesce(destination.mentions, 0) + 1
|
|
498
|
+
WITH destination
|
|
499
|
+
MERGE (source {source_label} {{{merge_props_str}}})
|
|
500
|
+
ON CREATE SET
|
|
501
|
+
source.created = timestamp(),
|
|
502
|
+
source.mentions = 1
|
|
503
|
+
{source_extra_set}
|
|
504
|
+
ON MATCH SET
|
|
505
|
+
source.mentions = coalesce(source.mentions, 0) + 1
|
|
506
|
+
WITH source, destination
|
|
507
|
+
CALL db.create.setNodeVectorProperty(source, 'embedding', $source_embedding)
|
|
508
|
+
WITH source, destination
|
|
509
|
+
MERGE (source)-[r:{relationship}]->(destination)
|
|
510
|
+
ON CREATE SET
|
|
511
|
+
r.created = timestamp(),
|
|
512
|
+
r.mentions = 1
|
|
513
|
+
ON MATCH SET
|
|
514
|
+
r.mentions = coalesce(r.mentions, 0) + 1
|
|
515
|
+
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
|
|
516
|
+
"""
|
|
517
|
+
|
|
518
|
+
params = {
|
|
519
|
+
"destination_id": destination_node_search_result[0]["elementId(destination_candidate)"],
|
|
520
|
+
"source_name": source,
|
|
521
|
+
"source_embedding": source_embedding,
|
|
522
|
+
"user_id": user_id,
|
|
523
|
+
}
|
|
524
|
+
if agent_id:
|
|
525
|
+
params["agent_id"] = agent_id
|
|
526
|
+
if run_id:
|
|
527
|
+
params["run_id"] = run_id
|
|
528
|
+
|
|
529
|
+
elif source_node_search_result and destination_node_search_result:
|
|
530
|
+
cypher = f"""
|
|
531
|
+
MATCH (source)
|
|
532
|
+
WHERE elementId(source) = $source_id
|
|
533
|
+
SET source.mentions = coalesce(source.mentions, 0) + 1
|
|
534
|
+
WITH source
|
|
535
|
+
MATCH (destination)
|
|
536
|
+
WHERE elementId(destination) = $destination_id
|
|
537
|
+
SET destination.mentions = coalesce(destination.mentions, 0) + 1
|
|
538
|
+
MERGE (source)-[r:{relationship}]->(destination)
|
|
539
|
+
ON CREATE SET
|
|
540
|
+
r.created_at = timestamp(),
|
|
541
|
+
r.updated_at = timestamp(),
|
|
542
|
+
r.mentions = 1
|
|
543
|
+
ON MATCH SET r.mentions = coalesce(r.mentions, 0) + 1
|
|
544
|
+
RETURN source.name AS source, type(r) AS relationship, destination.name AS target
|
|
545
|
+
"""
|
|
546
|
+
|
|
547
|
+
params = {
|
|
548
|
+
"source_id": source_node_search_result[0]["elementId(source_candidate)"],
|
|
549
|
+
"destination_id": destination_node_search_result[0]["elementId(destination_candidate)"],
|
|
550
|
+
"user_id": user_id,
|
|
551
|
+
}
|
|
552
|
+
if agent_id:
|
|
553
|
+
params["agent_id"] = agent_id
|
|
554
|
+
if run_id:
|
|
555
|
+
params["run_id"] = run_id
|
|
556
|
+
|
|
557
|
+
else:
|
|
558
|
+
# Build dynamic MERGE props for both source and destination
|
|
559
|
+
source_props = ["name: $source_name", "user_id: $user_id"]
|
|
560
|
+
dest_props = ["name: $dest_name", "user_id: $user_id"]
|
|
561
|
+
if agent_id:
|
|
562
|
+
source_props.append("agent_id: $agent_id")
|
|
563
|
+
dest_props.append("agent_id: $agent_id")
|
|
564
|
+
if run_id:
|
|
565
|
+
source_props.append("run_id: $run_id")
|
|
566
|
+
dest_props.append("run_id: $run_id")
|
|
567
|
+
source_props_str = ", ".join(source_props)
|
|
568
|
+
dest_props_str = ", ".join(dest_props)
|
|
569
|
+
|
|
570
|
+
cypher = f"""
|
|
571
|
+
MERGE (source {source_label} {{{source_props_str}}})
|
|
572
|
+
ON CREATE SET source.created = timestamp(),
|
|
573
|
+
source.mentions = 1
|
|
574
|
+
{source_extra_set}
|
|
575
|
+
ON MATCH SET source.mentions = coalesce(source.mentions, 0) + 1
|
|
576
|
+
WITH source
|
|
577
|
+
CALL db.create.setNodeVectorProperty(source, 'embedding', $source_embedding)
|
|
578
|
+
WITH source
|
|
579
|
+
MERGE (destination {destination_label} {{{dest_props_str}}})
|
|
580
|
+
ON CREATE SET destination.created = timestamp(),
|
|
581
|
+
destination.mentions = 1
|
|
582
|
+
{destination_extra_set}
|
|
583
|
+
ON MATCH SET destination.mentions = coalesce(destination.mentions, 0) + 1
|
|
584
|
+
WITH source, destination
|
|
585
|
+
CALL db.create.setNodeVectorProperty(destination, 'embedding', $dest_embedding)
|
|
586
|
+
WITH source, destination
|
|
587
|
+
MERGE (source)-[rel:{relationship}]->(destination)
|
|
588
|
+
ON CREATE SET rel.created = timestamp(), rel.mentions = 1
|
|
589
|
+
ON MATCH SET rel.mentions = coalesce(rel.mentions, 0) + 1
|
|
590
|
+
RETURN source.name AS source, type(rel) AS relationship, destination.name AS target
|
|
591
|
+
"""
|
|
592
|
+
|
|
593
|
+
params = {
|
|
594
|
+
"source_name": source,
|
|
595
|
+
"dest_name": destination,
|
|
596
|
+
"source_embedding": source_embedding,
|
|
597
|
+
"dest_embedding": dest_embedding,
|
|
598
|
+
"user_id": user_id,
|
|
599
|
+
}
|
|
600
|
+
if agent_id:
|
|
601
|
+
params["agent_id"] = agent_id
|
|
602
|
+
if run_id:
|
|
603
|
+
params["run_id"] = run_id
|
|
604
|
+
result = self.graph.query(cypher, params=params)
|
|
605
|
+
results.append(result)
|
|
606
|
+
return results
|
|
607
|
+
|
|
608
|
+
def _remove_spaces_from_entities(self, entity_list):
|
|
609
|
+
for item in entity_list:
|
|
610
|
+
item["source"] = item["source"].lower().replace(" ", "_")
|
|
611
|
+
# Use the sanitization function for relationships to handle special characters
|
|
612
|
+
item["relationship"] = sanitize_relationship_for_cypher(item["relationship"].lower().replace(" ", "_"))
|
|
613
|
+
item["destination"] = item["destination"].lower().replace(" ", "_")
|
|
614
|
+
return entity_list
|
|
615
|
+
|
|
616
|
+
def _search_source_node(self, source_embedding, filters, threshold=0.9):
|
|
617
|
+
# Build WHERE conditions
|
|
618
|
+
where_conditions = ["source_candidate.embedding IS NOT NULL", "source_candidate.user_id = $user_id"]
|
|
619
|
+
if filters.get("agent_id"):
|
|
620
|
+
where_conditions.append("source_candidate.agent_id = $agent_id")
|
|
621
|
+
if filters.get("run_id"):
|
|
622
|
+
where_conditions.append("source_candidate.run_id = $run_id")
|
|
623
|
+
where_clause = " AND ".join(where_conditions)
|
|
624
|
+
|
|
625
|
+
cypher = f"""
|
|
626
|
+
MATCH (source_candidate {self.node_label})
|
|
627
|
+
WHERE {where_clause}
|
|
628
|
+
|
|
629
|
+
WITH source_candidate,
|
|
630
|
+
round(2 * vector.similarity.cosine(source_candidate.embedding, $source_embedding) - 1, 4) AS source_similarity // denormalize for backward compatibility
|
|
631
|
+
WHERE source_similarity >= $threshold
|
|
632
|
+
|
|
633
|
+
WITH source_candidate, source_similarity
|
|
634
|
+
ORDER BY source_similarity DESC
|
|
635
|
+
LIMIT 1
|
|
636
|
+
|
|
637
|
+
RETURN elementId(source_candidate)
|
|
638
|
+
"""
|
|
639
|
+
|
|
640
|
+
params = {
|
|
641
|
+
"source_embedding": source_embedding,
|
|
642
|
+
"user_id": filters["user_id"],
|
|
643
|
+
"threshold": threshold,
|
|
644
|
+
}
|
|
645
|
+
if filters.get("agent_id"):
|
|
646
|
+
params["agent_id"] = filters["agent_id"]
|
|
647
|
+
if filters.get("run_id"):
|
|
648
|
+
params["run_id"] = filters["run_id"]
|
|
649
|
+
|
|
650
|
+
result = self.graph.query(cypher, params=params)
|
|
651
|
+
return result
|
|
652
|
+
|
|
653
|
+
def _search_destination_node(self, destination_embedding, filters, threshold=0.9):
|
|
654
|
+
# Build WHERE conditions
|
|
655
|
+
where_conditions = ["destination_candidate.embedding IS NOT NULL", "destination_candidate.user_id = $user_id"]
|
|
656
|
+
if filters.get("agent_id"):
|
|
657
|
+
where_conditions.append("destination_candidate.agent_id = $agent_id")
|
|
658
|
+
if filters.get("run_id"):
|
|
659
|
+
where_conditions.append("destination_candidate.run_id = $run_id")
|
|
660
|
+
where_clause = " AND ".join(where_conditions)
|
|
661
|
+
|
|
662
|
+
cypher = f"""
|
|
663
|
+
MATCH (destination_candidate {self.node_label})
|
|
664
|
+
WHERE {where_clause}
|
|
665
|
+
|
|
666
|
+
WITH destination_candidate,
|
|
667
|
+
round(2 * vector.similarity.cosine(destination_candidate.embedding, $destination_embedding) - 1, 4) AS destination_similarity // denormalize for backward compatibility
|
|
668
|
+
|
|
669
|
+
WHERE destination_similarity >= $threshold
|
|
670
|
+
|
|
671
|
+
WITH destination_candidate, destination_similarity
|
|
672
|
+
ORDER BY destination_similarity DESC
|
|
673
|
+
LIMIT 1
|
|
674
|
+
|
|
675
|
+
RETURN elementId(destination_candidate)
|
|
676
|
+
"""
|
|
677
|
+
|
|
678
|
+
params = {
|
|
679
|
+
"destination_embedding": destination_embedding,
|
|
680
|
+
"user_id": filters["user_id"],
|
|
681
|
+
"threshold": threshold,
|
|
682
|
+
}
|
|
683
|
+
if filters.get("agent_id"):
|
|
684
|
+
params["agent_id"] = filters["agent_id"]
|
|
685
|
+
if filters.get("run_id"):
|
|
686
|
+
params["run_id"] = filters["run_id"]
|
|
687
|
+
|
|
688
|
+
result = self.graph.query(cypher, params=params)
|
|
689
|
+
return result
|
|
690
|
+
|
|
691
|
+
# Reset is not defined in base.py
|
|
692
|
+
def reset(self):
|
|
693
|
+
"""Reset the graph by clearing all nodes and relationships."""
|
|
694
|
+
logger.warning("Clearing graph...")
|
|
695
|
+
cypher_query = """
|
|
696
|
+
MATCH (n) DETACH DELETE n
|
|
697
|
+
"""
|
|
698
|
+
return self.graph.query(cypher_query)
|