agent-runtime-core 0.7.0__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_runtime_core/__init__.py +108 -1
- agent_runtime_core/agentic_loop.py +254 -0
- agent_runtime_core/config.py +54 -4
- agent_runtime_core/config_schema.py +307 -0
- agent_runtime_core/interfaces.py +106 -0
- agent_runtime_core/json_runtime.py +509 -0
- agent_runtime_core/llm/__init__.py +80 -7
- agent_runtime_core/llm/anthropic.py +133 -12
- agent_runtime_core/llm/models_config.py +180 -0
- agent_runtime_core/memory/__init__.py +70 -0
- agent_runtime_core/memory/manager.py +554 -0
- agent_runtime_core/memory/mixin.py +294 -0
- agent_runtime_core/multi_agent.py +569 -0
- agent_runtime_core/persistence/__init__.py +2 -0
- agent_runtime_core/persistence/file.py +277 -0
- agent_runtime_core/rag/__init__.py +65 -0
- agent_runtime_core/rag/chunking.py +224 -0
- agent_runtime_core/rag/indexer.py +253 -0
- agent_runtime_core/rag/retriever.py +261 -0
- agent_runtime_core/runner.py +193 -15
- agent_runtime_core/tool_calling_agent.py +88 -130
- agent_runtime_core/tools.py +179 -0
- agent_runtime_core/vectorstore/__init__.py +193 -0
- agent_runtime_core/vectorstore/base.py +138 -0
- agent_runtime_core/vectorstore/embeddings.py +242 -0
- agent_runtime_core/vectorstore/sqlite_vec.py +328 -0
- agent_runtime_core/vectorstore/vertex.py +295 -0
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/METADATA +202 -1
- agent_runtime_core-0.7.1.dist-info/RECORD +57 -0
- agent_runtime_core-0.7.0.dist-info/RECORD +0 -39
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/WHEEL +0 -0
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,554 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory manager for cross-conversation memory.
|
|
3
|
+
|
|
4
|
+
Handles extraction of memories from conversations and recall of relevant
|
|
5
|
+
memories for new conversations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
from typing import Any, Optional
|
|
13
|
+
from uuid import UUID, uuid4
|
|
14
|
+
|
|
15
|
+
from agent_runtime_core.interfaces import LLMClient, Message
|
|
16
|
+
from agent_runtime_core.persistence.base import (
|
|
17
|
+
Fact,
|
|
18
|
+
FactType,
|
|
19
|
+
KnowledgeStore,
|
|
20
|
+
Scope,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# =============================================================================
|
|
27
|
+
# Configuration
|
|
28
|
+
# =============================================================================
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class MemoryConfig:
|
|
33
|
+
"""Configuration for memory extraction and recall."""
|
|
34
|
+
|
|
35
|
+
# Extraction settings
|
|
36
|
+
extract_after_messages: int = 4 # Extract after this many messages
|
|
37
|
+
extract_on_conversation_end: bool = True # Always extract at end
|
|
38
|
+
max_facts_per_extraction: int = 5 # Max facts to extract at once
|
|
39
|
+
|
|
40
|
+
# Recall settings
|
|
41
|
+
max_memories_to_recall: int = 10 # Max memories to include in context
|
|
42
|
+
relevance_threshold: float = 0.5 # Min relevance score (0-1)
|
|
43
|
+
|
|
44
|
+
# What to extract
|
|
45
|
+
extract_user_facts: bool = True # Name, preferences, etc.
|
|
46
|
+
extract_project_facts: bool = True # Project-specific info
|
|
47
|
+
extract_preferences: bool = True # User preferences
|
|
48
|
+
|
|
49
|
+
# Storage
|
|
50
|
+
scope: Scope = Scope.GLOBAL # Where to store memories
|
|
51
|
+
|
|
52
|
+
# Model settings (optional override)
|
|
53
|
+
extraction_model: Optional[str] = None # Model for extraction
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# =============================================================================
|
|
57
|
+
# Data Classes
|
|
58
|
+
# =============================================================================
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class ExtractedMemory:
|
|
63
|
+
"""A memory extracted from a conversation."""
|
|
64
|
+
|
|
65
|
+
key: str # Unique identifier (e.g., "user_name", "preferred_language")
|
|
66
|
+
value: Any # The memory content
|
|
67
|
+
fact_type: FactType # Type of fact
|
|
68
|
+
confidence: float = 1.0 # How confident we are (0-1)
|
|
69
|
+
source_message: Optional[str] = None # The message it came from
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass
|
|
73
|
+
class RecalledMemory:
|
|
74
|
+
"""A memory recalled for use in a conversation."""
|
|
75
|
+
|
|
76
|
+
key: str
|
|
77
|
+
value: Any
|
|
78
|
+
fact_type: FactType
|
|
79
|
+
relevance: float = 1.0 # How relevant to current context (0-1)
|
|
80
|
+
created_at: Optional[datetime] = None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
# =============================================================================
|
|
84
|
+
# Prompts
|
|
85
|
+
# =============================================================================
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
EXTRACTION_SYSTEM_PROMPT = """You are a memory extraction system. Your job is to identify important facts, preferences, and information from conversations that should be remembered for future interactions.
|
|
89
|
+
|
|
90
|
+
Extract ONLY information that would be useful to remember across conversations, such as:
|
|
91
|
+
- User's name, role, or identity
|
|
92
|
+
- User preferences (communication style, formatting, tools, etc.)
|
|
93
|
+
- Important project details (tech stack, constraints, goals)
|
|
94
|
+
- Recurring topics or interests
|
|
95
|
+
- Explicit requests to remember something
|
|
96
|
+
|
|
97
|
+
Do NOT extract:
|
|
98
|
+
- Temporary or session-specific information
|
|
99
|
+
- Information that changes frequently
|
|
100
|
+
- Sensitive information (passwords, API keys, etc.)
|
|
101
|
+
- Generic conversation content
|
|
102
|
+
|
|
103
|
+
For each fact, provide:
|
|
104
|
+
- key: A unique, descriptive identifier (snake_case, e.g., "user_name", "preferred_language")
|
|
105
|
+
- value: The actual information (can be string, number, list, or object)
|
|
106
|
+
- type: One of "user", "project", "preference", "context"
|
|
107
|
+
- confidence: How confident you are this is correct (0.0 to 1.0)
|
|
108
|
+
|
|
109
|
+
Respond with a JSON array of facts. If no facts should be extracted, respond with an empty array [].
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
EXTRACTION_USER_PROMPT = """Extract memorable facts from this conversation:
|
|
113
|
+
|
|
114
|
+
{conversation}
|
|
115
|
+
|
|
116
|
+
Respond with a JSON array of facts to remember. Example format:
|
|
117
|
+
[
|
|
118
|
+
{{"key": "user_name", "value": "Alice", "type": "user", "confidence": 1.0}},
|
|
119
|
+
{{"key": "preferred_theme", "value": "dark", "type": "preference", "confidence": 0.9}}
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
If nothing should be remembered, respond with: []"""
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
RECALL_SYSTEM_PROMPT = """You are a memory relevance system. Given a set of stored memories and a current conversation context, determine which memories are relevant.
|
|
126
|
+
|
|
127
|
+
For each memory, assign a relevance score from 0.0 to 1.0:
|
|
128
|
+
- 1.0: Directly relevant and should definitely be used
|
|
129
|
+
- 0.7-0.9: Likely relevant, good to include
|
|
130
|
+
- 0.4-0.6: Possibly relevant, include if space allows
|
|
131
|
+
- 0.1-0.3: Tangentially related
|
|
132
|
+
- 0.0: Not relevant at all
|
|
133
|
+
|
|
134
|
+
Respond with a JSON array of objects with "key" and "relevance" fields.
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
RECALL_USER_PROMPT = """Current conversation context:
|
|
138
|
+
{context}
|
|
139
|
+
|
|
140
|
+
Available memories:
|
|
141
|
+
{memories}
|
|
142
|
+
|
|
143
|
+
Which memories are relevant? Respond with JSON array:
|
|
144
|
+
[{{"key": "memory_key", "relevance": 0.9}}, ...]"""
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
# =============================================================================
|
|
148
|
+
# Memory Manager
|
|
149
|
+
# =============================================================================
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class MemoryManager:
|
|
153
|
+
"""
|
|
154
|
+
Manages cross-conversation memory extraction and recall.
|
|
155
|
+
|
|
156
|
+
Uses an LLM to extract memorable facts from conversations and
|
|
157
|
+
recall relevant memories for new conversations.
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
def __init__(
|
|
161
|
+
self,
|
|
162
|
+
knowledge_store: KnowledgeStore,
|
|
163
|
+
llm_client: LLMClient,
|
|
164
|
+
config: Optional[MemoryConfig] = None,
|
|
165
|
+
user_id: Optional[str] = None,
|
|
166
|
+
):
|
|
167
|
+
"""
|
|
168
|
+
Initialize the memory manager.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
knowledge_store: Store for persisting memories (facts)
|
|
172
|
+
llm_client: LLM client for extraction/recall
|
|
173
|
+
config: Memory configuration
|
|
174
|
+
user_id: Optional user ID for scoping memories
|
|
175
|
+
"""
|
|
176
|
+
self._store = knowledge_store
|
|
177
|
+
self._llm = llm_client
|
|
178
|
+
self._config = config or MemoryConfig()
|
|
179
|
+
self._user_id = user_id
|
|
180
|
+
|
|
181
|
+
def _get_memory_key(self, key: str) -> str:
|
|
182
|
+
"""Get the full key including user_id prefix if set."""
|
|
183
|
+
if self._user_id:
|
|
184
|
+
return f"user:{self._user_id}:{key}"
|
|
185
|
+
return key
|
|
186
|
+
|
|
187
|
+
def _format_conversation(self, messages: list[Message]) -> str:
|
|
188
|
+
"""Format messages for the extraction prompt."""
|
|
189
|
+
lines = []
|
|
190
|
+
for msg in messages:
|
|
191
|
+
role = msg.get("role", "unknown")
|
|
192
|
+
content = msg.get("content", "")
|
|
193
|
+
if isinstance(content, list):
|
|
194
|
+
# Handle multi-part content
|
|
195
|
+
content = " ".join(
|
|
196
|
+
p.get("text", "") for p in content if p.get("type") == "text"
|
|
197
|
+
)
|
|
198
|
+
lines.append(f"{role.upper()}: {content}")
|
|
199
|
+
return "\n".join(lines)
|
|
200
|
+
|
|
201
|
+
def _parse_fact_type(self, type_str: str) -> FactType:
|
|
202
|
+
"""Parse a fact type string to FactType enum."""
|
|
203
|
+
type_map = {
|
|
204
|
+
"user": FactType.USER,
|
|
205
|
+
"project": FactType.PROJECT,
|
|
206
|
+
"preference": FactType.PREFERENCE,
|
|
207
|
+
"context": FactType.CONTEXT,
|
|
208
|
+
}
|
|
209
|
+
return type_map.get(type_str.lower(), FactType.CUSTOM)
|
|
210
|
+
|
|
211
|
+
async def extract_memories(
|
|
212
|
+
self,
|
|
213
|
+
messages: list[Message],
|
|
214
|
+
user_id: Optional[str] = None,
|
|
215
|
+
save: bool = True,
|
|
216
|
+
) -> list[ExtractedMemory]:
|
|
217
|
+
"""
|
|
218
|
+
Extract memorable facts from a conversation.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
messages: Conversation messages to analyze
|
|
222
|
+
user_id: Optional user ID (overrides instance user_id)
|
|
223
|
+
save: Whether to save extracted memories to store
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
List of extracted memories
|
|
227
|
+
"""
|
|
228
|
+
if not messages:
|
|
229
|
+
return []
|
|
230
|
+
|
|
231
|
+
effective_user_id = user_id or self._user_id
|
|
232
|
+
conversation_text = self._format_conversation(messages)
|
|
233
|
+
|
|
234
|
+
# Call LLM to extract facts
|
|
235
|
+
try:
|
|
236
|
+
response = await self._llm.generate(
|
|
237
|
+
messages=[
|
|
238
|
+
{"role": "system", "content": EXTRACTION_SYSTEM_PROMPT},
|
|
239
|
+
{"role": "user", "content": EXTRACTION_USER_PROMPT.format(
|
|
240
|
+
conversation=conversation_text
|
|
241
|
+
)},
|
|
242
|
+
],
|
|
243
|
+
temperature=0.1, # Low temperature for consistent extraction
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# Parse response
|
|
247
|
+
content = response.message.get("content", "[]")
|
|
248
|
+
# Handle potential markdown code blocks
|
|
249
|
+
if "```" in content:
|
|
250
|
+
content = content.split("```")[1]
|
|
251
|
+
if content.startswith("json"):
|
|
252
|
+
content = content[4:]
|
|
253
|
+
|
|
254
|
+
facts_data = json.loads(content.strip())
|
|
255
|
+
|
|
256
|
+
except (json.JSONDecodeError, Exception) as e:
|
|
257
|
+
logger.warning(f"Failed to extract memories: {e}")
|
|
258
|
+
return []
|
|
259
|
+
|
|
260
|
+
# Convert to ExtractedMemory objects
|
|
261
|
+
extracted = []
|
|
262
|
+
for fact_data in facts_data[:self._config.max_facts_per_extraction]:
|
|
263
|
+
memory = ExtractedMemory(
|
|
264
|
+
key=fact_data.get("key", ""),
|
|
265
|
+
value=fact_data.get("value"),
|
|
266
|
+
fact_type=self._parse_fact_type(fact_data.get("type", "custom")),
|
|
267
|
+
confidence=float(fact_data.get("confidence", 1.0)),
|
|
268
|
+
)
|
|
269
|
+
if memory.key: # Only include if key is set
|
|
270
|
+
extracted.append(memory)
|
|
271
|
+
|
|
272
|
+
# Save to store if requested
|
|
273
|
+
if save and extracted:
|
|
274
|
+
for memory in extracted:
|
|
275
|
+
await self._save_memory(memory, effective_user_id)
|
|
276
|
+
|
|
277
|
+
logger.info(f"Extracted {len(extracted)} memories from conversation")
|
|
278
|
+
return extracted
|
|
279
|
+
|
|
280
|
+
async def _save_memory(
|
|
281
|
+
self,
|
|
282
|
+
memory: ExtractedMemory,
|
|
283
|
+
user_id: Optional[str] = None,
|
|
284
|
+
) -> None:
|
|
285
|
+
"""Save an extracted memory to the knowledge store."""
|
|
286
|
+
effective_user_id = user_id or self._user_id
|
|
287
|
+
full_key = self._get_memory_key(memory.key) if effective_user_id else memory.key
|
|
288
|
+
|
|
289
|
+
# Check if fact already exists
|
|
290
|
+
existing = await self._store.get_fact_by_key(full_key, self._config.scope)
|
|
291
|
+
|
|
292
|
+
if existing:
|
|
293
|
+
# Update existing fact
|
|
294
|
+
existing.value = memory.value
|
|
295
|
+
existing.confidence = memory.confidence
|
|
296
|
+
existing.updated_at = datetime.utcnow()
|
|
297
|
+
await self._store.save_fact(existing, self._config.scope)
|
|
298
|
+
else:
|
|
299
|
+
# Create new fact
|
|
300
|
+
fact = Fact(
|
|
301
|
+
id=uuid4(),
|
|
302
|
+
key=full_key,
|
|
303
|
+
value=memory.value,
|
|
304
|
+
fact_type=memory.fact_type,
|
|
305
|
+
confidence=memory.confidence,
|
|
306
|
+
source=f"user:{effective_user_id}" if effective_user_id else None,
|
|
307
|
+
metadata={"user_id": effective_user_id} if effective_user_id else {},
|
|
308
|
+
)
|
|
309
|
+
await self._store.save_fact(fact, self._config.scope)
|
|
310
|
+
|
|
311
|
+
async def recall_memories(
|
|
312
|
+
self,
|
|
313
|
+
query: Optional[str] = None,
|
|
314
|
+
messages: Optional[list[Message]] = None,
|
|
315
|
+
user_id: Optional[str] = None,
|
|
316
|
+
max_memories: Optional[int] = None,
|
|
317
|
+
) -> list[RecalledMemory]:
|
|
318
|
+
"""
|
|
319
|
+
Recall relevant memories for a conversation.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
query: Optional query to find relevant memories
|
|
323
|
+
messages: Optional conversation context
|
|
324
|
+
user_id: Optional user ID (overrides instance user_id)
|
|
325
|
+
max_memories: Max memories to return (overrides config)
|
|
326
|
+
|
|
327
|
+
Returns:
|
|
328
|
+
List of relevant memories
|
|
329
|
+
"""
|
|
330
|
+
effective_user_id = user_id or self._user_id
|
|
331
|
+
max_count = max_memories or self._config.max_memories_to_recall
|
|
332
|
+
|
|
333
|
+
# Get all facts for this user
|
|
334
|
+
all_facts = await self._store.list_facts(
|
|
335
|
+
scope=self._config.scope,
|
|
336
|
+
limit=100, # Get more than we need for filtering
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Filter to user's facts if user_id is set
|
|
340
|
+
if effective_user_id:
|
|
341
|
+
prefix = f"user:{effective_user_id}:"
|
|
342
|
+
all_facts = [f for f in all_facts if f.key.startswith(prefix)]
|
|
343
|
+
|
|
344
|
+
if not all_facts:
|
|
345
|
+
return []
|
|
346
|
+
|
|
347
|
+
# If no query/messages, return all memories (up to limit)
|
|
348
|
+
if not query and not messages:
|
|
349
|
+
return [
|
|
350
|
+
RecalledMemory(
|
|
351
|
+
key=self._strip_user_prefix(f.key, effective_user_id),
|
|
352
|
+
value=f.value,
|
|
353
|
+
fact_type=f.fact_type,
|
|
354
|
+
relevance=1.0,
|
|
355
|
+
created_at=f.created_at,
|
|
356
|
+
)
|
|
357
|
+
for f in all_facts[:max_count]
|
|
358
|
+
]
|
|
359
|
+
|
|
360
|
+
# Use LLM to rank relevance
|
|
361
|
+
context = query or self._format_conversation(messages or [])
|
|
362
|
+
memories_text = "\n".join(
|
|
363
|
+
f"- {self._strip_user_prefix(f.key, effective_user_id)}: {f.value}"
|
|
364
|
+
for f in all_facts
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
try:
|
|
368
|
+
response = await self._llm.generate(
|
|
369
|
+
messages=[
|
|
370
|
+
{"role": "system", "content": RECALL_SYSTEM_PROMPT},
|
|
371
|
+
{"role": "user", "content": RECALL_USER_PROMPT.format(
|
|
372
|
+
context=context,
|
|
373
|
+
memories=memories_text,
|
|
374
|
+
)},
|
|
375
|
+
],
|
|
376
|
+
temperature=0.1,
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
content = response.message.get("content", "[]")
|
|
380
|
+
if "```" in content:
|
|
381
|
+
content = content.split("```")[1]
|
|
382
|
+
if content.startswith("json"):
|
|
383
|
+
content = content[4:]
|
|
384
|
+
|
|
385
|
+
relevance_data = json.loads(content.strip())
|
|
386
|
+
relevance_map = {r["key"]: r["relevance"] for r in relevance_data}
|
|
387
|
+
|
|
388
|
+
except (json.JSONDecodeError, Exception) as e:
|
|
389
|
+
logger.warning(f"Failed to rank memories: {e}")
|
|
390
|
+
# Fall back to returning all memories
|
|
391
|
+
relevance_map = {
|
|
392
|
+
self._strip_user_prefix(f.key, effective_user_id): 1.0
|
|
393
|
+
for f in all_facts
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
# Build recalled memories with relevance scores
|
|
397
|
+
recalled = []
|
|
398
|
+
for fact in all_facts:
|
|
399
|
+
stripped_key = self._strip_user_prefix(fact.key, effective_user_id)
|
|
400
|
+
relevance = relevance_map.get(stripped_key, 0.0)
|
|
401
|
+
|
|
402
|
+
if relevance >= self._config.relevance_threshold:
|
|
403
|
+
recalled.append(RecalledMemory(
|
|
404
|
+
key=stripped_key,
|
|
405
|
+
value=fact.value,
|
|
406
|
+
fact_type=fact.fact_type,
|
|
407
|
+
relevance=relevance,
|
|
408
|
+
created_at=fact.created_at,
|
|
409
|
+
))
|
|
410
|
+
|
|
411
|
+
# Sort by relevance and limit
|
|
412
|
+
recalled.sort(key=lambda m: m.relevance, reverse=True)
|
|
413
|
+
return recalled[:max_count]
|
|
414
|
+
|
|
415
|
+
def _strip_user_prefix(self, key: str, user_id: Optional[str]) -> str:
|
|
416
|
+
"""Strip the user prefix from a key."""
|
|
417
|
+
if user_id:
|
|
418
|
+
prefix = f"user:{user_id}:"
|
|
419
|
+
if key.startswith(prefix):
|
|
420
|
+
return key[len(prefix):]
|
|
421
|
+
return key
|
|
422
|
+
|
|
423
|
+
async def get_memory(
|
|
424
|
+
self,
|
|
425
|
+
key: str,
|
|
426
|
+
user_id: Optional[str] = None,
|
|
427
|
+
) -> Optional[Any]:
|
|
428
|
+
"""
|
|
429
|
+
Get a specific memory by key.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
key: Memory key
|
|
433
|
+
user_id: Optional user ID
|
|
434
|
+
|
|
435
|
+
Returns:
|
|
436
|
+
Memory value or None
|
|
437
|
+
"""
|
|
438
|
+
effective_user_id = user_id or self._user_id
|
|
439
|
+
full_key = self._get_memory_key(key) if effective_user_id else key
|
|
440
|
+
|
|
441
|
+
fact = await self._store.get_fact_by_key(full_key, self._config.scope)
|
|
442
|
+
return fact.value if fact else None
|
|
443
|
+
|
|
444
|
+
async def set_memory(
|
|
445
|
+
self,
|
|
446
|
+
key: str,
|
|
447
|
+
value: Any,
|
|
448
|
+
fact_type: FactType = FactType.CUSTOM,
|
|
449
|
+
user_id: Optional[str] = None,
|
|
450
|
+
) -> None:
|
|
451
|
+
"""
|
|
452
|
+
Manually set a memory.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
key: Memory key
|
|
456
|
+
value: Memory value
|
|
457
|
+
fact_type: Type of fact
|
|
458
|
+
user_id: Optional user ID
|
|
459
|
+
"""
|
|
460
|
+
memory = ExtractedMemory(
|
|
461
|
+
key=key,
|
|
462
|
+
value=value,
|
|
463
|
+
fact_type=fact_type,
|
|
464
|
+
confidence=1.0,
|
|
465
|
+
)
|
|
466
|
+
await self._save_memory(memory, user_id or self._user_id)
|
|
467
|
+
|
|
468
|
+
async def delete_memory(
|
|
469
|
+
self,
|
|
470
|
+
key: str,
|
|
471
|
+
user_id: Optional[str] = None,
|
|
472
|
+
) -> bool:
|
|
473
|
+
"""
|
|
474
|
+
Delete a specific memory.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
key: Memory key
|
|
478
|
+
user_id: Optional user ID
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
True if memory existed and was deleted
|
|
482
|
+
"""
|
|
483
|
+
effective_user_id = user_id or self._user_id
|
|
484
|
+
full_key = self._get_memory_key(key) if effective_user_id else key
|
|
485
|
+
|
|
486
|
+
fact = await self._store.get_fact_by_key(full_key, self._config.scope)
|
|
487
|
+
if fact:
|
|
488
|
+
return await self._store.delete_fact(fact.id, self._config.scope)
|
|
489
|
+
return False
|
|
490
|
+
|
|
491
|
+
async def clear_memories(
|
|
492
|
+
self,
|
|
493
|
+
user_id: Optional[str] = None,
|
|
494
|
+
) -> int:
|
|
495
|
+
"""
|
|
496
|
+
Clear all memories for a user.
|
|
497
|
+
|
|
498
|
+
Args:
|
|
499
|
+
user_id: Optional user ID
|
|
500
|
+
|
|
501
|
+
Returns:
|
|
502
|
+
Number of memories deleted
|
|
503
|
+
"""
|
|
504
|
+
effective_user_id = user_id or self._user_id
|
|
505
|
+
|
|
506
|
+
all_facts = await self._store.list_facts(scope=self._config.scope, limit=1000)
|
|
507
|
+
|
|
508
|
+
if effective_user_id:
|
|
509
|
+
prefix = f"user:{effective_user_id}:"
|
|
510
|
+
facts_to_delete = [f for f in all_facts if f.key.startswith(prefix)]
|
|
511
|
+
else:
|
|
512
|
+
facts_to_delete = all_facts
|
|
513
|
+
|
|
514
|
+
count = 0
|
|
515
|
+
for fact in facts_to_delete:
|
|
516
|
+
if await self._store.delete_fact(fact.id, self._config.scope):
|
|
517
|
+
count += 1
|
|
518
|
+
|
|
519
|
+
return count
|
|
520
|
+
|
|
521
|
+
def format_memories_for_prompt(
|
|
522
|
+
self,
|
|
523
|
+
memories: list[RecalledMemory],
|
|
524
|
+
format_style: str = "list",
|
|
525
|
+
) -> str:
|
|
526
|
+
"""
|
|
527
|
+
Format recalled memories for inclusion in a prompt.
|
|
528
|
+
|
|
529
|
+
Args:
|
|
530
|
+
memories: List of recalled memories
|
|
531
|
+
format_style: "list", "prose", or "structured"
|
|
532
|
+
|
|
533
|
+
Returns:
|
|
534
|
+
Formatted string for prompt inclusion
|
|
535
|
+
"""
|
|
536
|
+
if not memories:
|
|
537
|
+
return ""
|
|
538
|
+
|
|
539
|
+
if format_style == "list":
|
|
540
|
+
lines = ["Remembered information about the user:"]
|
|
541
|
+
for m in memories:
|
|
542
|
+
lines.append(f"- {m.key}: {m.value}")
|
|
543
|
+
return "\n".join(lines)
|
|
544
|
+
|
|
545
|
+
elif format_style == "prose":
|
|
546
|
+
facts = [f"{m.key} is {m.value}" for m in memories]
|
|
547
|
+
return "What I remember: " + "; ".join(facts) + "."
|
|
548
|
+
|
|
549
|
+
elif format_style == "structured":
|
|
550
|
+
data = {m.key: m.value for m in memories}
|
|
551
|
+
return f"User context: {json.dumps(data)}"
|
|
552
|
+
|
|
553
|
+
return ""
|
|
554
|
+
|