aip-agents-binary 0.6.2__py3-none-manylinux_2_31_x86_64.whl → 0.6.4__py3-none-manylinux_2_31_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aip-agents-binary might be problematic. Click here for more details.
- aip_agents/agent/base_langgraph_agent.py +26 -6
- aip_agents/agent/langgraph_memory_enhancer_agent.py +368 -34
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +3 -2
- aip_agents/agent/langgraph_react_agent.py +30 -6
- aip_agents/agent/langgraph_react_agent.pyi +1 -1
- aip_agents/mcp/client/transports.py +5 -1
- aip_agents/memory/adapters/base_adapter.py +94 -0
- aip_agents/memory/adapters/base_adapter.pyi +26 -0
- aip_agents/tools/__init__.py +11 -2
- aip_agents/tools/__init__.pyi +2 -1
- aip_agents/tools/date_range_tool.py +554 -0
- aip_agents/tools/date_range_tool.pyi +21 -0
- aip_agents/tools/memory_search/__init__.py +8 -1
- aip_agents/tools/memory_search/__init__.pyi +3 -3
- aip_agents/tools/memory_search/mem0.py +108 -1
- aip_agents/tools/memory_search/mem0.pyi +11 -1
- aip_agents/tools/memory_search/schema.py +33 -0
- aip_agents/tools/memory_search/schema.pyi +10 -0
- aip_agents/tools/memory_search_tool.py +8 -0
- aip_agents/tools/memory_search_tool.pyi +2 -2
- {aip_agents_binary-0.6.2.dist-info → aip_agents_binary-0.6.4.dist-info}/METADATA +5 -16
- {aip_agents_binary-0.6.2.dist-info → aip_agents_binary-0.6.4.dist-info}/RECORD +24 -24
- aip_agents/examples/demo_memory_recall.py +0 -401
- aip_agents/examples/demo_memory_recall.pyi +0 -58
- {aip_agents_binary-0.6.2.dist-info → aip_agents_binary-0.6.4.dist-info}/WHEEL +0 -0
- {aip_agents_binary-0.6.2.dist-info → aip_agents_binary-0.6.4.dist-info}/top_level.txt +0 -0
|
@@ -455,6 +455,14 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
455
455
|
|
|
456
456
|
future.add_done_callback(_log_completion)
|
|
457
457
|
|
|
458
|
+
def _should_save_interaction(self, final_state: dict[str, Any] | None) -> bool:
|
|
459
|
+
"""Return True when interaction should be saved to memory.
|
|
460
|
+
|
|
461
|
+
Subclasses can override this to skip persistence for specific response types.
|
|
462
|
+
"""
|
|
463
|
+
del final_state
|
|
464
|
+
return True
|
|
465
|
+
|
|
458
466
|
def _resolve_and_validate_tools(self) -> list[BaseTool]:
|
|
459
467
|
"""Resolve and validate regular tools for LangGraph usage.
|
|
460
468
|
|
|
@@ -898,7 +906,12 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
898
906
|
formatted_output = self._format_graph_output(final_state_result)
|
|
899
907
|
|
|
900
908
|
try:
|
|
901
|
-
self.
|
|
909
|
+
if self._should_save_interaction(final_state_result):
|
|
910
|
+
self._memory_save_interaction(
|
|
911
|
+
user_text=query,
|
|
912
|
+
ai_text=formatted_output,
|
|
913
|
+
memory_user_id=memory_user_id,
|
|
914
|
+
)
|
|
902
915
|
except Exception:
|
|
903
916
|
pass
|
|
904
917
|
|
|
@@ -2442,7 +2455,7 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2442
2455
|
and isinstance(context.last_final_content, str)
|
|
2443
2456
|
and context.last_final_content
|
|
2444
2457
|
)
|
|
2445
|
-
if should_save_early:
|
|
2458
|
+
if should_save_early and self._should_save_interaction(context.final_state):
|
|
2446
2459
|
try:
|
|
2447
2460
|
logger.info(
|
|
2448
2461
|
"Agent '%s': A2A persisting memory early (len=%d) for user_id='%s'",
|
|
@@ -2461,6 +2474,8 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2461
2474
|
context.saved_memory = True
|
|
2462
2475
|
except Exception:
|
|
2463
2476
|
pass
|
|
2477
|
+
elif should_save_early:
|
|
2478
|
+
context.saved_memory = True
|
|
2464
2479
|
except Exception:
|
|
2465
2480
|
pass
|
|
2466
2481
|
|
|
@@ -2492,10 +2507,15 @@ class BaseLangGraphAgent(BaseAgent):
|
|
|
2492
2507
|
)
|
|
2493
2508
|
except Exception:
|
|
2494
2509
|
pass
|
|
2495
|
-
self.
|
|
2496
|
-
|
|
2497
|
-
|
|
2498
|
-
|
|
2510
|
+
if self._should_save_interaction(context.final_state):
|
|
2511
|
+
self._memory_save_interaction(
|
|
2512
|
+
user_text=context.original_query,
|
|
2513
|
+
ai_text=final_text,
|
|
2514
|
+
memory_user_id=context.memory_user_id,
|
|
2515
|
+
)
|
|
2516
|
+
context.saved_memory = True
|
|
2517
|
+
else:
|
|
2518
|
+
context.saved_memory = True
|
|
2499
2519
|
except Exception:
|
|
2500
2520
|
pass
|
|
2501
2521
|
|
|
@@ -12,7 +12,8 @@ import json
|
|
|
12
12
|
import textwrap
|
|
13
13
|
from typing import Any
|
|
14
14
|
|
|
15
|
-
from langchain_core.
|
|
15
|
+
from langchain_core.language_models import BaseChatModel
|
|
16
|
+
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
|
|
16
17
|
from langgraph.graph import END, StateGraph
|
|
17
18
|
from langgraph.graph.state import CompiledStateGraph
|
|
18
19
|
|
|
@@ -20,17 +21,23 @@ from aip_agents.agent.langgraph_react_agent import LangGraphReactAgent
|
|
|
20
21
|
from aip_agents.agent.system_instruction_context import get_current_date_context
|
|
21
22
|
from aip_agents.memory.guidance import MEM0_MEMORY_RECALL_GUIDANCE
|
|
22
23
|
from aip_agents.tools.memory_search_tool import (
|
|
24
|
+
MEMORY_DELETE_TOOL_NAME,
|
|
23
25
|
MEMORY_SEARCH_TOOL_NAME,
|
|
24
26
|
LongTermMemorySearchTool,
|
|
27
|
+
Mem0DeleteTool,
|
|
25
28
|
Mem0SearchTool,
|
|
26
29
|
)
|
|
30
|
+
from aip_agents.utils.langgraph import (
|
|
31
|
+
convert_langchain_messages_to_gllm_messages,
|
|
32
|
+
convert_lm_output_to_langchain_message,
|
|
33
|
+
)
|
|
27
34
|
from aip_agents.utils.logger import get_logger
|
|
28
35
|
|
|
29
36
|
logger = get_logger(__name__)
|
|
30
37
|
|
|
31
38
|
|
|
32
39
|
class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
33
|
-
"""Simplified mini-agent for automatic memory retrieval and query enhancement.
|
|
40
|
+
"""Simplified mini-agent for automatic memory retrieval or deletion and query enhancement.
|
|
34
41
|
|
|
35
42
|
This agent has a simple 2-node LangGraph (agent + tools) and uses existing memory
|
|
36
43
|
infrastructure to enhance user queries with relevant context. It acts as a
|
|
@@ -54,7 +61,12 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
54
61
|
- model: LLM model to use for memory decisions
|
|
55
62
|
- Other BaseLangGraphAgent parameters
|
|
56
63
|
"""
|
|
57
|
-
|
|
64
|
+
memory_search_tool: LongTermMemorySearchTool = Mem0SearchTool(
|
|
65
|
+
memory=memory,
|
|
66
|
+
default_user_id=kwargs.get("memory_agent_id"),
|
|
67
|
+
user_id_provider=None,
|
|
68
|
+
)
|
|
69
|
+
memory_delete_tool: LongTermMemorySearchTool = Mem0DeleteTool(
|
|
58
70
|
memory=memory,
|
|
59
71
|
default_user_id=kwargs.get("memory_agent_id"),
|
|
60
72
|
user_id_provider=None,
|
|
@@ -63,7 +75,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
63
75
|
super().__init__(
|
|
64
76
|
name="LangGraphMemoryEnhancerAgent",
|
|
65
77
|
instruction=self._build_simple_instruction(),
|
|
66
|
-
tools=[
|
|
78
|
+
tools=[memory_search_tool, memory_delete_tool],
|
|
67
79
|
**kwargs,
|
|
68
80
|
)
|
|
69
81
|
|
|
@@ -78,18 +90,19 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
78
90
|
instruction = textwrap.dedent(f"""
|
|
79
91
|
{date_context}
|
|
80
92
|
|
|
81
|
-
You are a Memory
|
|
93
|
+
You are a Memory Controller Agent that decides whether to retrieve or delete memory.
|
|
82
94
|
|
|
83
|
-
Important: You WILL NOT see the tool results. The system will append
|
|
84
|
-
to the user input after your turn. Your sole responsibility
|
|
85
|
-
calls with concise arguments based on the user's message.
|
|
95
|
+
Important: You WILL NOT see the tool results. The system will either append retrieved memory
|
|
96
|
+
to the user input or return a memory action summary after your turn. Your sole responsibility
|
|
97
|
+
is to trigger the correct tool calls with concise arguments based on the user's message.
|
|
86
98
|
|
|
87
99
|
What to do:
|
|
88
100
|
1. Read the user's message as-is (do not rephrase it).
|
|
89
|
-
2.
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
101
|
+
2. Decide which tool to call:
|
|
102
|
+
- Use `built_in_mem0_search` to retrieve memory for answering questions.
|
|
103
|
+
- Use `built_in_mem0_delete` when the user asks to forget/delete memories.
|
|
104
|
+
Prefer a single call, but you MAY make multiple calls when clearly needed.
|
|
105
|
+
- If the user implies a time frame (e.g., "yesterday", "last week"), set `start_date`/`end_date`.
|
|
93
106
|
- If the user implies a precise range, set `start_date`/`end_date` (YYYY-MM-DD).
|
|
94
107
|
- If the user mentions a topic, set a concise `query` (few words or at most a sentence).
|
|
95
108
|
- Adjust `limit` to higher number to allow more memory to be retrieved if needed.
|
|
@@ -110,7 +123,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
110
123
|
return instruction
|
|
111
124
|
|
|
112
125
|
async def _memory_retrieval_node(self, state: dict, config: dict | None = None) -> dict:
|
|
113
|
-
"""Execute memory retrieval using explicit tool calls or synthesized defaults.
|
|
126
|
+
"""Execute memory retrieval or deletion using explicit tool calls or synthesized defaults.
|
|
114
127
|
|
|
115
128
|
Args:
|
|
116
129
|
state: LangGraph state containing the conversation `messages` history.
|
|
@@ -147,7 +160,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
147
160
|
if not tool_calls:
|
|
148
161
|
return []
|
|
149
162
|
|
|
150
|
-
return [tc for tc in tool_calls if tc.get("name")
|
|
163
|
+
return [tc for tc in tool_calls if tc.get("name") in {MEMORY_SEARCH_TOOL_NAME, MEMORY_DELETE_TOOL_NAME}]
|
|
151
164
|
|
|
152
165
|
async def _execute_mem0_tool_calls(
|
|
153
166
|
self,
|
|
@@ -166,11 +179,39 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
166
179
|
List of `ToolMessage` objects describing each execution result.
|
|
167
180
|
"""
|
|
168
181
|
tool_messages: list[ToolMessage] = []
|
|
182
|
+
delete_intent: dict[str, Any] | None = None
|
|
183
|
+
user_query = self._extract_last_human_query(state.get("messages", [])) or self._fallback_query(
|
|
184
|
+
state.get("messages", [])
|
|
185
|
+
)
|
|
169
186
|
for index, tool_call in enumerate(tool_calls):
|
|
170
|
-
|
|
171
|
-
|
|
187
|
+
tool_name = tool_call.get("name") or MEMORY_SEARCH_TOOL_NAME
|
|
188
|
+
args = dict(tool_call.get("args") or {})
|
|
189
|
+
if "id" not in args and "id" in tool_call:
|
|
190
|
+
args["id"] = tool_call["id"]
|
|
191
|
+
log_args = self._redact_mem0_args(tool_name, args)
|
|
192
|
+
logger.info("Executing memory tool call #%s name=%s args=%s", index, tool_name, log_args)
|
|
193
|
+
if tool_name == MEMORY_DELETE_TOOL_NAME:
|
|
194
|
+
delete_intent = delete_intent or await self._preprocess_delete_intent(user_query, state, config)
|
|
195
|
+
if not self._is_delete_intent_confirmed(delete_intent):
|
|
196
|
+
tool_messages.append(self._build_delete_confirmation_message(tool_call, user_query))
|
|
197
|
+
continue
|
|
198
|
+
tool_messages.append(await self._execute_mem0_call(tool_name, args, state, config))
|
|
172
199
|
return tool_messages
|
|
173
200
|
|
|
201
|
+
def _redact_mem0_args(self, tool_name: str, args: dict[str, Any]) -> dict[str, Any]:
|
|
202
|
+
"""Redact sensitive fields from Mem0 tool args before logging."""
|
|
203
|
+
if tool_name != MEMORY_DELETE_TOOL_NAME:
|
|
204
|
+
return args
|
|
205
|
+
|
|
206
|
+
redacted_args = dict(args)
|
|
207
|
+
if "memory_ids" in redacted_args:
|
|
208
|
+
memory_ids = redacted_args.pop("memory_ids")
|
|
209
|
+
if isinstance(memory_ids, list):
|
|
210
|
+
redacted_args["memory_ids_count"] = len(memory_ids)
|
|
211
|
+
else:
|
|
212
|
+
redacted_args["memory_ids_count"] = 0
|
|
213
|
+
return redacted_args
|
|
214
|
+
|
|
174
215
|
async def _execute_default_retrieval(
|
|
175
216
|
self,
|
|
176
217
|
default_query: str | None,
|
|
@@ -188,7 +229,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
188
229
|
Single-item list containing the resulting `ToolMessage`.
|
|
189
230
|
"""
|
|
190
231
|
args = self._build_default_mem0_args(default_query)
|
|
191
|
-
tool_message = await self._execute_mem0_call(args, state, config)
|
|
232
|
+
tool_message = await self._execute_mem0_call(MEMORY_SEARCH_TOOL_NAME, args, state, config)
|
|
192
233
|
return [tool_message]
|
|
193
234
|
|
|
194
235
|
def _build_default_mem0_args(self, query: str | None) -> dict[str, Any]:
|
|
@@ -209,6 +250,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
209
250
|
|
|
210
251
|
async def _execute_mem0_call(
|
|
211
252
|
self,
|
|
253
|
+
tool_name: str,
|
|
212
254
|
args: dict[str, Any],
|
|
213
255
|
state: dict,
|
|
214
256
|
config: dict | None,
|
|
@@ -216,6 +258,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
216
258
|
"""Execute a single Mem0 tool call with metadata resolution.
|
|
217
259
|
|
|
218
260
|
Args:
|
|
261
|
+
tool_name: Name of the memory tool to invoke.
|
|
219
262
|
args: Base arguments supplied by the LLM or synthesized defaults.
|
|
220
263
|
state: LangGraph state that may include additional metadata.
|
|
221
264
|
config: Optional runnable configuration forwarded to the tool.
|
|
@@ -223,30 +266,30 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
223
266
|
Returns:
|
|
224
267
|
`ToolMessage` containing raw tool output or an error description.
|
|
225
268
|
"""
|
|
226
|
-
args_with_metadata = self._merge_metadata(args, state)
|
|
227
|
-
tool_config = self._create_tool_config(config, state, tool_name=
|
|
228
|
-
mem0_tool = self.resolved_tools[0]
|
|
229
|
-
|
|
269
|
+
args_with_metadata = self._merge_metadata(args, state, tool_name)
|
|
270
|
+
tool_config = self._create_tool_config(config, state, tool_name=tool_name)
|
|
230
271
|
try:
|
|
272
|
+
mem0_tool = self._get_tool_by_name(tool_name)
|
|
231
273
|
result = await mem0_tool.ainvoke(args_with_metadata, config=tool_config)
|
|
232
274
|
content = str(result)
|
|
233
275
|
except Exception as exc:
|
|
234
|
-
content = f"Error executing memory
|
|
276
|
+
content = f"Error executing memory tool '{tool_name}': {exc}"
|
|
235
277
|
|
|
236
278
|
return ToolMessage(content=content, tool_call_id=args.get("id", ""))
|
|
237
279
|
|
|
238
|
-
def _merge_metadata(self, args: dict[str, Any], state: dict) -> dict[str, Any]:
|
|
280
|
+
def _merge_metadata(self, args: dict[str, Any], state: dict, tool_name: str) -> dict[str, Any]:
|
|
239
281
|
"""Merge resolved metadata into tool arguments.
|
|
240
282
|
|
|
241
283
|
Args:
|
|
242
284
|
args: Tool arguments that may already include metadata.
|
|
243
285
|
state: LangGraph state providing globally resolved metadata values.
|
|
286
|
+
tool_name: Name of the tool requesting metadata (used to resolve tool-specific metadata).
|
|
244
287
|
|
|
245
288
|
Returns:
|
|
246
289
|
Copy of ``args`` containing merged metadata entries.
|
|
247
290
|
"""
|
|
248
291
|
args_with_metadata = dict(args)
|
|
249
|
-
effective_metadata = self._resolve_effective_metadata(state)
|
|
292
|
+
effective_metadata = self._resolve_effective_metadata(state, tool_name)
|
|
250
293
|
if not effective_metadata:
|
|
251
294
|
return args_with_metadata
|
|
252
295
|
|
|
@@ -259,11 +302,12 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
259
302
|
args_with_metadata["metadata"] = merged_metadata
|
|
260
303
|
return args_with_metadata
|
|
261
304
|
|
|
262
|
-
def _resolve_effective_metadata(self, state: dict) -> dict[str, Any] | None:
|
|
305
|
+
def _resolve_effective_metadata(self, state: dict, tool_name: str) -> dict[str, Any] | None:
|
|
263
306
|
"""Resolve metadata for the Mem0 tool, swallowing resolution errors.
|
|
264
307
|
|
|
265
308
|
Args:
|
|
266
309
|
state: LangGraph state whose ``metadata`` key may include overrides.
|
|
310
|
+
tool_name: Name of the tool whose metadata resolution strategy should be used.
|
|
267
311
|
|
|
268
312
|
Returns:
|
|
269
313
|
Resolved metadata dictionary or ``None`` if not available.
|
|
@@ -273,7 +317,7 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
273
317
|
return None
|
|
274
318
|
|
|
275
319
|
try:
|
|
276
|
-
return self._resolve_tool_metadata(
|
|
320
|
+
return self._resolve_tool_metadata(tool_name, raw_metadata)
|
|
277
321
|
except Exception:
|
|
278
322
|
return None
|
|
279
323
|
|
|
@@ -309,6 +353,16 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
309
353
|
"""
|
|
310
354
|
messages = state.get("messages", [])
|
|
311
355
|
original_query = self._extract_last_human_query(messages) or self._fallback_query(messages)
|
|
356
|
+
delete_action = self._extract_delete_action(messages)
|
|
357
|
+
if delete_action:
|
|
358
|
+
action_block = self._format_memory_action(delete_action)
|
|
359
|
+
return {"messages": [AIMessage(content=action_block)]}
|
|
360
|
+
|
|
361
|
+
delete_error = self._extract_delete_error(messages)
|
|
362
|
+
if delete_error:
|
|
363
|
+
action_block = self._format_memory_action_error(delete_error)
|
|
364
|
+
return {"messages": [AIMessage(content=action_block)]}
|
|
365
|
+
|
|
312
366
|
memories = self._collect_unique_memories(messages)
|
|
313
367
|
tagged_memory = self._format_memories(memories)
|
|
314
368
|
|
|
@@ -365,11 +419,13 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
365
419
|
if not isinstance(message, ToolMessage):
|
|
366
420
|
return []
|
|
367
421
|
|
|
368
|
-
raw_results = self.
|
|
369
|
-
|
|
422
|
+
raw_results = self._parse_tool_message_json(message)
|
|
423
|
+
if isinstance(raw_results, list):
|
|
424
|
+
return [memory for memory in raw_results if isinstance(memory, dict)]
|
|
425
|
+
return []
|
|
370
426
|
|
|
371
|
-
def
|
|
372
|
-
"""Parse the JSON content of a tool message
|
|
427
|
+
def _parse_tool_message_json(self, message: ToolMessage) -> Any:
|
|
428
|
+
"""Parse the JSON content of a tool message.
|
|
373
429
|
|
|
374
430
|
Args:
|
|
375
431
|
message: Tool message emitted by the memory search tool.
|
|
@@ -385,13 +441,291 @@ class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
|
385
441
|
exc,
|
|
386
442
|
message.content[:200],
|
|
387
443
|
)
|
|
388
|
-
return
|
|
389
|
-
|
|
390
|
-
if not isinstance(raw_results, list):
|
|
391
|
-
return []
|
|
444
|
+
return None
|
|
392
445
|
|
|
393
446
|
return raw_results
|
|
394
447
|
|
|
448
|
+
def _extract_delete_action(self, messages: list) -> dict[str, Any] | None:
|
|
449
|
+
"""Return delete action details if a delete tool message is present.
|
|
450
|
+
|
|
451
|
+
Args:
|
|
452
|
+
messages: Ordered message history produced during the graph run.
|
|
453
|
+
|
|
454
|
+
Returns:
|
|
455
|
+
Action dict or None when no delete action is detected.
|
|
456
|
+
"""
|
|
457
|
+
for message in messages:
|
|
458
|
+
if not isinstance(message, ToolMessage):
|
|
459
|
+
continue
|
|
460
|
+
raw_payload = self._parse_tool_message_json(message)
|
|
461
|
+
if not isinstance(raw_payload, dict):
|
|
462
|
+
continue
|
|
463
|
+
status = raw_payload.get("status")
|
|
464
|
+
if status == "success" and raw_payload.get("mode"):
|
|
465
|
+
return raw_payload
|
|
466
|
+
if status == "needs_confirmation":
|
|
467
|
+
return raw_payload
|
|
468
|
+
return None
|
|
469
|
+
|
|
470
|
+
def _format_memory_action(self, action: dict[str, Any]) -> str:
|
|
471
|
+
"""Format a memory action block for delete results.
|
|
472
|
+
|
|
473
|
+
Args:
|
|
474
|
+
action: Parsed action payload from the delete tool.
|
|
475
|
+
|
|
476
|
+
Returns:
|
|
477
|
+
Formatted action block string.
|
|
478
|
+
"""
|
|
479
|
+
status = action.get("status", "success")
|
|
480
|
+
summary = action.get("summary")
|
|
481
|
+
if status == "needs_confirmation":
|
|
482
|
+
summary = summary or "Do you want me to delete the related memories?"
|
|
483
|
+
else:
|
|
484
|
+
mode = action.get("mode", "unknown")
|
|
485
|
+
result = action.get("result")
|
|
486
|
+
summary = summary or f"Deleted memories (mode: {mode})."
|
|
487
|
+
if isinstance(result, dict):
|
|
488
|
+
count = result.get("count") or result.get("deleted") or result.get("total")
|
|
489
|
+
if count is not None:
|
|
490
|
+
summary = f"Deleted {count} memories (mode: {mode})."
|
|
491
|
+
return "\n".join(
|
|
492
|
+
[
|
|
493
|
+
"<MEMORY_ACTION>",
|
|
494
|
+
"action=delete",
|
|
495
|
+
f"status={status}",
|
|
496
|
+
f"summary={summary}",
|
|
497
|
+
"</MEMORY_ACTION>",
|
|
498
|
+
]
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
def _extract_delete_error(self, messages: list) -> str | None:
|
|
502
|
+
"""Return delete error summary if delete tool failed.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
messages: Ordered message history produced during the graph run.
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Error summary string or None when no delete error is detected.
|
|
509
|
+
"""
|
|
510
|
+
for message in messages:
|
|
511
|
+
if not isinstance(message, ToolMessage):
|
|
512
|
+
continue
|
|
513
|
+
content = message.content if isinstance(message.content, str) else str(message.content)
|
|
514
|
+
if MEMORY_DELETE_TOOL_NAME in content and "Error" in content:
|
|
515
|
+
return content[:200]
|
|
516
|
+
return None
|
|
517
|
+
|
|
518
|
+
def _format_memory_action_error(self, error_summary: str) -> str:
|
|
519
|
+
"""Format a memory action block for delete errors."""
|
|
520
|
+
safe_summary = error_summary.replace("\n", " ").strip()
|
|
521
|
+
return "\n".join(
|
|
522
|
+
[
|
|
523
|
+
"<MEMORY_ACTION>",
|
|
524
|
+
"action=delete",
|
|
525
|
+
"status=error",
|
|
526
|
+
f"summary={safe_summary}",
|
|
527
|
+
"</MEMORY_ACTION>",
|
|
528
|
+
]
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
def _get_tool_by_name(self, tool_name: str) -> Any:
|
|
532
|
+
"""Return the resolved tool instance by name."""
|
|
533
|
+
for tool in self.resolved_tools:
|
|
534
|
+
if tool.name == tool_name:
|
|
535
|
+
return tool
|
|
536
|
+
raise ValueError(f"Tool '{tool_name}' not found in resolved tools.")
|
|
537
|
+
|
|
538
|
+
async def _preprocess_delete_intent(
|
|
539
|
+
self,
|
|
540
|
+
query: str | None,
|
|
541
|
+
state: dict,
|
|
542
|
+
config: dict | None,
|
|
543
|
+
) -> dict[str, Any]:
|
|
544
|
+
"""Run a pre-processing intent check for delete requests.
|
|
545
|
+
|
|
546
|
+
Args:
|
|
547
|
+
query: Latest user query.
|
|
548
|
+
state: LangGraph state containing metadata for the request.
|
|
549
|
+
config: Optional runnable configuration forwarded to the model.
|
|
550
|
+
|
|
551
|
+
Returns:
|
|
552
|
+
Normalized intent payload with intent/confidence/reason keys.
|
|
553
|
+
"""
|
|
554
|
+
if not isinstance(query, str) or not query.strip():
|
|
555
|
+
return {"intent": "unknown", "confidence": "low", "reason": "empty_query"}
|
|
556
|
+
|
|
557
|
+
raw_response = await self._invoke_delete_intent_model(query, state, config)
|
|
558
|
+
return self._parse_delete_intent_payload(raw_response)
|
|
559
|
+
|
|
560
|
+
async def _invoke_delete_intent_model(
|
|
561
|
+
self,
|
|
562
|
+
query: str,
|
|
563
|
+
state: dict,
|
|
564
|
+
config: dict | None,
|
|
565
|
+
) -> str:
|
|
566
|
+
"""Invoke the configured model to classify delete intent.
|
|
567
|
+
|
|
568
|
+
Args:
|
|
569
|
+
query: User query to classify.
|
|
570
|
+
state: LangGraph state containing request metadata.
|
|
571
|
+
config: Optional runnable configuration forwarded to the model.
|
|
572
|
+
|
|
573
|
+
Returns:
|
|
574
|
+
Raw model output string, or empty string on failure.
|
|
575
|
+
"""
|
|
576
|
+
instruction = self._build_delete_intent_instruction()
|
|
577
|
+
effective_event_emitter = state.get("event_emitter") or self.event_emitter
|
|
578
|
+
if self.lm_invoker is not None:
|
|
579
|
+
return await self._invoke_delete_intent_with_invoker(query, instruction, effective_event_emitter)
|
|
580
|
+
|
|
581
|
+
if isinstance(self.model, BaseChatModel):
|
|
582
|
+
return await self._invoke_delete_intent_with_chat_model(query, instruction, config)
|
|
583
|
+
|
|
584
|
+
logger.warning("Delete intent check skipped; no model configured.")
|
|
585
|
+
return ""
|
|
586
|
+
|
|
587
|
+
async def _invoke_delete_intent_with_invoker(
|
|
588
|
+
self,
|
|
589
|
+
query: str,
|
|
590
|
+
instruction: str,
|
|
591
|
+
event_emitter: Any,
|
|
592
|
+
) -> str:
|
|
593
|
+
"""Invoke delete intent check using an LM invoker."""
|
|
594
|
+
messages = convert_langchain_messages_to_gllm_messages([HumanMessage(content=query)], instruction)
|
|
595
|
+
restore_tools = self.resolved_tools if self.resolved_tools else None
|
|
596
|
+
if restore_tools is not None:
|
|
597
|
+
self.lm_invoker.set_tools([])
|
|
598
|
+
try:
|
|
599
|
+
lm_output = await self.lm_invoker.invoke(messages=messages, event_emitter=event_emitter)
|
|
600
|
+
except Exception as exc:
|
|
601
|
+
logger.warning("Delete intent check failed: %s", exc)
|
|
602
|
+
return ""
|
|
603
|
+
finally:
|
|
604
|
+
if restore_tools is not None:
|
|
605
|
+
self.lm_invoker.set_tools(restore_tools)
|
|
606
|
+
|
|
607
|
+
ai_message = convert_lm_output_to_langchain_message(lm_output)
|
|
608
|
+
return self._coerce_message_content(ai_message)
|
|
609
|
+
|
|
610
|
+
async def _invoke_delete_intent_with_chat_model(
|
|
611
|
+
self,
|
|
612
|
+
query: str,
|
|
613
|
+
instruction: str,
|
|
614
|
+
config: dict | None,
|
|
615
|
+
) -> str:
|
|
616
|
+
"""Invoke delete intent check using a LangChain chat model."""
|
|
617
|
+
prompt = [SystemMessage(content=instruction), HumanMessage(content=query)]
|
|
618
|
+
try:
|
|
619
|
+
ai_message = await self.model.ainvoke(prompt, config)
|
|
620
|
+
except Exception as exc:
|
|
621
|
+
logger.warning("Delete intent check failed: %s", exc)
|
|
622
|
+
return ""
|
|
623
|
+
return self._coerce_message_content(ai_message)
|
|
624
|
+
|
|
625
|
+
def _parse_delete_intent_payload(self, content: str) -> dict[str, Any]:
|
|
626
|
+
"""Parse delete intent payload from model output."""
|
|
627
|
+
default_payload = {"intent": "unknown", "confidence": "low", "reason": "unparsed"}
|
|
628
|
+
if not isinstance(content, str) or not content.strip():
|
|
629
|
+
return default_payload
|
|
630
|
+
|
|
631
|
+
payload = self._extract_json_payload(content)
|
|
632
|
+
if not isinstance(payload, dict):
|
|
633
|
+
return default_payload
|
|
634
|
+
|
|
635
|
+
return self._normalize_delete_intent_payload(payload, default_payload)
|
|
636
|
+
|
|
637
|
+
def _extract_json_payload(self, content: str) -> dict[str, Any] | None:
|
|
638
|
+
"""Extract a JSON payload from a raw string."""
|
|
639
|
+
raw_text = content.strip()
|
|
640
|
+
if raw_text.startswith("```"):
|
|
641
|
+
raw_text = raw_text.strip("`")
|
|
642
|
+
if raw_text.lower().startswith("json"):
|
|
643
|
+
raw_text = raw_text[4:].strip()
|
|
644
|
+
|
|
645
|
+
try:
|
|
646
|
+
return json.loads(raw_text)
|
|
647
|
+
except json.JSONDecodeError:
|
|
648
|
+
start = raw_text.find("{")
|
|
649
|
+
end = raw_text.rfind("}")
|
|
650
|
+
if start == -1 or end == -1 or end <= start:
|
|
651
|
+
return None
|
|
652
|
+
try:
|
|
653
|
+
return json.loads(raw_text[start : end + 1])
|
|
654
|
+
except json.JSONDecodeError:
|
|
655
|
+
return None
|
|
656
|
+
|
|
657
|
+
def _normalize_delete_intent_payload(
|
|
658
|
+
self,
|
|
659
|
+
payload: dict[str, Any],
|
|
660
|
+
default_payload: dict[str, str],
|
|
661
|
+
) -> dict[str, Any]:
|
|
662
|
+
"""Normalize payload keys and guard against invalid values."""
|
|
663
|
+
intent = str(payload.get("intent", "")).lower()
|
|
664
|
+
confidence = str(payload.get("confidence", "")).lower()
|
|
665
|
+
if intent not in {"delete", "retrieve", "unknown"}:
|
|
666
|
+
intent = "unknown"
|
|
667
|
+
if confidence not in {"high", "medium", "low"}:
|
|
668
|
+
confidence = "low"
|
|
669
|
+
|
|
670
|
+
reason = payload.get("reason")
|
|
671
|
+
if not isinstance(reason, str):
|
|
672
|
+
reason = default_payload["reason"]
|
|
673
|
+
|
|
674
|
+
return {"intent": intent, "confidence": confidence, "reason": reason}
|
|
675
|
+
|
|
676
|
+
@staticmethod
|
|
677
|
+
def _coerce_message_content(message: AIMessage) -> str:
|
|
678
|
+
"""Normalize AI message content into a string."""
|
|
679
|
+
content = message.content
|
|
680
|
+
return content if isinstance(content, str) else str(content)
|
|
681
|
+
|
|
682
|
+
def _build_delete_intent_instruction(self) -> str:
|
|
683
|
+
"""Return the system prompt for delete intent classification.
|
|
684
|
+
|
|
685
|
+
Design rationale:
|
|
686
|
+
- Require JSON-only output for deterministic parsing.
|
|
687
|
+
- Use intent labels (delete|retrieve|unknown) to avoid keyword false positives.
|
|
688
|
+
- Gate deletion on high confidence to keep ambiguous requests safe.
|
|
689
|
+
|
|
690
|
+
Tuning guidance:
|
|
691
|
+
- Add examples if delete intents are missed.
|
|
692
|
+
- Adjust confidence thresholds if false negatives become frequent.
|
|
693
|
+
"""
|
|
694
|
+
return (
|
|
695
|
+
"You are a memory deletion intent checker. Determine whether the user is asking to "
|
|
696
|
+
"delete/forget memories stored about them. Reply with JSON only: "
|
|
697
|
+
'{"intent": "delete|retrieve|unknown", "confidence": "high|medium|low", '
|
|
698
|
+
'"reason": "short"}. '
|
|
699
|
+
"If unsure, respond with intent unknown and low confidence."
|
|
700
|
+
)
|
|
701
|
+
|
|
702
|
+
def _is_delete_intent_confirmed(self, decision: dict[str, Any] | None) -> bool:
|
|
703
|
+
"""Return True when delete intent is confirmed by pre-processing."""
|
|
704
|
+
if not isinstance(decision, dict):
|
|
705
|
+
logger.warning("Delete intent check failed: decision is not a dict.")
|
|
706
|
+
return False
|
|
707
|
+
intent = decision.get("intent")
|
|
708
|
+
confidence = decision.get("confidence")
|
|
709
|
+
reason = decision.get("reason", "unknown")
|
|
710
|
+
if intent != "delete":
|
|
711
|
+
logger.info("Delete intent not confirmed: intent=%s reason=%s.", intent, reason)
|
|
712
|
+
return False
|
|
713
|
+
if confidence != "high":
|
|
714
|
+
logger.info("Delete intent not confirmed: confidence=%s reason=%s.", confidence, reason)
|
|
715
|
+
return False
|
|
716
|
+
return True
|
|
717
|
+
|
|
718
|
+
def _build_delete_confirmation_message(self, tool_call: dict[str, Any], query: str | None) -> ToolMessage:
|
|
719
|
+
"""Return a ToolMessage asking for delete confirmation."""
|
|
720
|
+
summary = "Do you want me to delete the related memories?"
|
|
721
|
+
if isinstance(query, str) and query.strip():
|
|
722
|
+
trimmed = query.strip()
|
|
723
|
+
if len(trimmed) > 160:
|
|
724
|
+
trimmed = f"{trimmed[:157]}..."
|
|
725
|
+
summary = f"Do you want me to delete memories related to: '{trimmed}'?"
|
|
726
|
+
payload = {"status": "needs_confirmation", "summary": summary}
|
|
727
|
+
return ToolMessage(content=json.dumps(payload), tool_call_id=tool_call.get("id", ""))
|
|
728
|
+
|
|
395
729
|
def _format_memories(self, memories: list[dict[str, Any]]) -> str:
|
|
396
730
|
"""Format memory hits using the underlying tool formatter.
|
|
397
731
|
|
|
@@ -2,7 +2,8 @@ from _typeshed import Incomplete
|
|
|
2
2
|
from aip_agents.agent.langgraph_react_agent import LangGraphReactAgent as LangGraphReactAgent
|
|
3
3
|
from aip_agents.agent.system_instruction_context import get_current_date_context as get_current_date_context
|
|
4
4
|
from aip_agents.memory.guidance import MEM0_MEMORY_RECALL_GUIDANCE as MEM0_MEMORY_RECALL_GUIDANCE
|
|
5
|
-
from aip_agents.tools.memory_search_tool import LongTermMemorySearchTool as LongTermMemorySearchTool, MEMORY_SEARCH_TOOL_NAME as MEMORY_SEARCH_TOOL_NAME, Mem0SearchTool as Mem0SearchTool
|
|
5
|
+
from aip_agents.tools.memory_search_tool import LongTermMemorySearchTool as LongTermMemorySearchTool, MEMORY_DELETE_TOOL_NAME as MEMORY_DELETE_TOOL_NAME, MEMORY_SEARCH_TOOL_NAME as MEMORY_SEARCH_TOOL_NAME, Mem0DeleteTool as Mem0DeleteTool, Mem0SearchTool as Mem0SearchTool
|
|
6
|
+
from aip_agents.utils.langgraph import convert_langchain_messages_to_gllm_messages as convert_langchain_messages_to_gllm_messages, convert_lm_output_to_langchain_message as convert_lm_output_to_langchain_message
|
|
6
7
|
from aip_agents.utils.logger import get_logger as get_logger
|
|
7
8
|
from langgraph.graph import StateGraph
|
|
8
9
|
from langgraph.graph.state import CompiledStateGraph
|
|
@@ -10,7 +11,7 @@ from langgraph.graph.state import CompiledStateGraph
|
|
|
10
11
|
logger: Incomplete
|
|
11
12
|
|
|
12
13
|
class LangGraphMemoryEnhancerAgent(LangGraphReactAgent):
|
|
13
|
-
"""Simplified mini-agent for automatic memory retrieval and query enhancement.
|
|
14
|
+
"""Simplified mini-agent for automatic memory retrieval or deletion and query enhancement.
|
|
14
15
|
|
|
15
16
|
This agent has a simple 2-node LangGraph (agent + tools) and uses existing memory
|
|
16
17
|
infrastructure to enhance user queries with relevant context. It acts as a
|