outomem 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- outomem-0.1.0/PKG-INFO +21 -0
- outomem-0.1.0/README.md +82 -0
- outomem-0.1.0/outomem/__init__.py +7 -0
- outomem-0.1.0/outomem/core.py +428 -0
- outomem-0.1.0/outomem/layers.py +519 -0
- outomem-0.1.0/outomem/neo4j_layers.py +438 -0
- outomem-0.1.0/outomem/prompts.py +215 -0
- outomem-0.1.0/outomem/providers/__init__.py +31 -0
- outomem-0.1.0/outomem/providers/anthropic_client.py +35 -0
- outomem-0.1.0/outomem/providers/base.py +14 -0
- outomem-0.1.0/outomem/providers/google_client.py +42 -0
- outomem-0.1.0/outomem/providers/openai_client.py +61 -0
- outomem-0.1.0/outomem/utils.py +204 -0
- outomem-0.1.0/outomem.egg-info/PKG-INFO +21 -0
- outomem-0.1.0/outomem.egg-info/SOURCES.txt +25 -0
- outomem-0.1.0/outomem.egg-info/dependency_links.txt +1 -0
- outomem-0.1.0/outomem.egg-info/requires.txt +17 -0
- outomem-0.1.0/outomem.egg-info/top_level.txt +1 -0
- outomem-0.1.0/pyproject.toml +26 -0
- outomem-0.1.0/setup.cfg +4 -0
- outomem-0.1.0/tests/test_get_context.py +0 -0
- outomem-0.1.0/tests/test_layers.py +0 -0
- outomem-0.1.0/tests/test_neo4j_layers.py +0 -0
- outomem-0.1.0/tests/test_prompts.py +119 -0
- outomem-0.1.0/tests/test_providers.py +46 -0
- outomem-0.1.0/tests/test_remember.py +0 -0
- outomem-0.1.0/tests/test_utils.py +145 -0
outomem-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: outomem
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: The ultimate memory system library for AI agents.
|
|
5
|
+
Requires-Python: >=3.10
|
|
6
|
+
Requires-Dist: lancedb>=0.5.0
|
|
7
|
+
Requires-Dist: fastembed>=0.3.0
|
|
8
|
+
Requires-Dist: anthropic>=0.20.0
|
|
9
|
+
Requires-Dist: openai>=1.0.0
|
|
10
|
+
Requires-Dist: google-genai>=1.0.0
|
|
11
|
+
Requires-Dist: pydantic>=2.0.0
|
|
12
|
+
Requires-Dist: tiktoken>=0.5.0
|
|
13
|
+
Requires-Dist: python-dateutil>=2.8.0
|
|
14
|
+
Requires-Dist: neo4j>=5.0.0
|
|
15
|
+
Requires-Dist: pyarrow>=14.0.0
|
|
16
|
+
Requires-Dist: numpy>=1.24.0
|
|
17
|
+
Provides-Extra: dev
|
|
18
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
19
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
|
20
|
+
Requires-Dist: pytest-mock>=3.10.0; extra == "dev"
|
|
21
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
outomem-0.1.0/README.md
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# outomem
|
|
2
|
+
|
|
3
|
+
<img src="logo.svg" alt="outomem logo" width="400">
|
|
4
|
+
|
|
5
|
+
Outomem is the ultimate memory system library for AI agents. This tool manages user preferences, finds contradictions, and builds context for agents. The system organizes data into four layers: personalization, long term, temporal sessions, and raw facts. It tracks sentiment and detects when a user changes their mind by looking for polarity flips. Memory strength decays over time to keep context fresh. We built it with a Korean first design approach.
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install outomem
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
Note: You need external database instances running.
|
|
14
|
+
|
|
15
|
+
## Quick Start
|
|
16
|
+
|
|
17
|
+
```python
|
|
18
|
+
from outomem import Outomem
|
|
19
|
+
|
|
20
|
+
# Initialize the memory system
|
|
21
|
+
memory = Outomem(
|
|
22
|
+
provider="openai",
|
|
23
|
+
base_url="https://api.openai.com/v1",
|
|
24
|
+
api_key="your-api-key",
|
|
25
|
+
model="gpt-4o",
|
|
26
|
+
embed_api_url="https://api.openai.com/v1",
|
|
27
|
+
embed_api_key="your-api-key",
|
|
28
|
+
embed_model="text-embedding-3-small",
|
|
29
|
+
db_config={
|
|
30
|
+
"vector_store": "lancedb",
|
|
31
|
+
"vector_path": "./outomem.lance",
|
|
32
|
+
"graph_store": "neo4j",
|
|
33
|
+
"graph_uri": "bolt://localhost:7687",
|
|
34
|
+
"graph_user": "neo4j",
|
|
35
|
+
"graph_password": "password",
|
|
36
|
+
},
|
|
37
|
+
style_path="./style.md"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
# Store a new memory
|
|
41
|
+
memory.remember("I prefer dark mode for all my applications.")
|
|
42
|
+
|
|
43
|
+
# Get context for a query
|
|
44
|
+
context = memory.get_context("What are the user's UI preferences?")
|
|
45
|
+
print(context)
|
|
46
|
+
```
|
|
47
|
+
|
|
48
|
+
## Philosophy
|
|
49
|
+
|
|
50
|
+
See [Design Philosophy](ai-docs/philosophy.md).
|
|
51
|
+
|
|
52
|
+
## Architecture Overview
|
|
53
|
+
|
|
54
|
+
Outomem uses a layered approach to manage different types of information. The vector store handles semantic retrieval while the graph store manages complex relationships between facts. This hybrid setup allows for both fast similarity search and deep graph traversal.
|
|
55
|
+
|
|
56
|
+
See [Architecture](ai-docs/architecture.md) for more details.
|
|
57
|
+
|
|
58
|
+
## API Overview
|
|
59
|
+
|
|
60
|
+
| Class | Description |
|
|
61
|
+
| :--- | :--- |
|
|
62
|
+
| Outomem | Main API for managing agent memory and context. |
|
|
63
|
+
| LayerManager | Handles vector storage and retrieval. |
|
|
64
|
+
| GraphLayerManager | Manages graph database operations for relational facts. |
|
|
65
|
+
|
|
66
|
+
## Documentation Index
|
|
67
|
+
|
|
68
|
+
### Core Concepts
|
|
69
|
+
- [Design Philosophy](ai-docs/philosophy.md)
|
|
70
|
+
- [Architecture](ai-docs/architecture.md)
|
|
71
|
+
|
|
72
|
+
### Project Management
|
|
73
|
+
- [Governance](ai-docs/GOVERNANCE.md)
|
|
74
|
+
|
|
75
|
+
## Requirements
|
|
76
|
+
|
|
77
|
+
- Python >= 3.10
|
|
78
|
+
- External database instances (configurable)
|
|
79
|
+
|
|
80
|
+
## License
|
|
81
|
+
|
|
82
|
+
MIT
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
"""outomem - AI agent memory system with layered LanceDB + Neo4j storage."""
|
|
2
|
+
|
|
3
|
+
from outomem.core import Outomem
|
|
4
|
+
from outomem.layers import LayerManager
|
|
5
|
+
from outomem.neo4j_layers import Neo4jLayerManager
|
|
6
|
+
|
|
7
|
+
__all__ = ["Outomem", "LayerManager", "Neo4jLayerManager"]
|
|
@@ -0,0 +1,428 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from outomem.layers import LayerManager
|
|
6
|
+
from outomem.neo4j_layers import Neo4jLayerManager
|
|
7
|
+
from outomem.prompts import (
|
|
8
|
+
get_consolidation_prompt,
|
|
9
|
+
get_context_synthesis_prompt,
|
|
10
|
+
get_extraction_prompt,
|
|
11
|
+
)
|
|
12
|
+
from outomem.providers import LLMProvider, create_provider
|
|
13
|
+
from outomem.utils import (
|
|
14
|
+
count_tokens,
|
|
15
|
+
format_conversation,
|
|
16
|
+
load_style_file,
|
|
17
|
+
safe_json_parse,
|
|
18
|
+
truncate_to_token_limit,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
POSITIVE_KEYWORDS = (
|
|
23
|
+
"좋아",
|
|
24
|
+
"사랑",
|
|
25
|
+
"싫어",
|
|
26
|
+
"최고",
|
|
27
|
+
"잘",
|
|
28
|
+
"재밌",
|
|
29
|
+
"행복",
|
|
30
|
+
"짱",
|
|
31
|
+
"good",
|
|
32
|
+
"like",
|
|
33
|
+
"love",
|
|
34
|
+
"best",
|
|
35
|
+
"great",
|
|
36
|
+
"awesome",
|
|
37
|
+
"amazing",
|
|
38
|
+
"fantastic",
|
|
39
|
+
"wonderful",
|
|
40
|
+
"excellent",
|
|
41
|
+
"prefer",
|
|
42
|
+
)
|
|
43
|
+
NEGATIVE_KEYWORDS = (
|
|
44
|
+
"싫어",
|
|
45
|
+
"可恶",
|
|
46
|
+
"bad",
|
|
47
|
+
"hate",
|
|
48
|
+
"dislike",
|
|
49
|
+
"worst",
|
|
50
|
+
"terrible",
|
|
51
|
+
"awful",
|
|
52
|
+
"horrible",
|
|
53
|
+
"disgusting",
|
|
54
|
+
"annoying",
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class Outomem:
|
|
59
|
+
_provider: LLMProvider
|
|
60
|
+
_lancedb: LayerManager
|
|
61
|
+
_neo4j: Neo4jLayerManager
|
|
62
|
+
_style: str
|
|
63
|
+
|
|
64
|
+
def __init__(
|
|
65
|
+
self,
|
|
66
|
+
provider: str,
|
|
67
|
+
base_url: str,
|
|
68
|
+
api_key: str,
|
|
69
|
+
model: str,
|
|
70
|
+
embed_api_url: str,
|
|
71
|
+
embed_api_key: str,
|
|
72
|
+
embed_model: str,
|
|
73
|
+
neo4j_uri: str,
|
|
74
|
+
neo4j_user: str,
|
|
75
|
+
neo4j_password: str,
|
|
76
|
+
db_path: str = "./outomem.lance",
|
|
77
|
+
style_path: str = "./style.md",
|
|
78
|
+
) -> None:
|
|
79
|
+
self._provider = create_provider(provider, base_url, api_key, model)
|
|
80
|
+
embed_fn = self._create_api_embed_fn(embed_api_url, embed_api_key, embed_model)
|
|
81
|
+
self._lancedb = LayerManager(db_path, embed_fn=embed_fn)
|
|
82
|
+
self._neo4j = Neo4jLayerManager(
|
|
83
|
+
uri=neo4j_uri,
|
|
84
|
+
auth=(neo4j_user, neo4j_password),
|
|
85
|
+
)
|
|
86
|
+
self._style = load_style_file(style_path)
|
|
87
|
+
|
|
88
|
+
def _create_api_embed_fn(self, api_url: str, api_key: str, model: str):
|
|
89
|
+
import requests
|
|
90
|
+
|
|
91
|
+
def embed_fn(texts: list[str]) -> list[list[float]]:
|
|
92
|
+
response = requests.post(
|
|
93
|
+
api_url,
|
|
94
|
+
headers={
|
|
95
|
+
"Authorization": f"Bearer {api_key}",
|
|
96
|
+
"Content-Type": "application/json",
|
|
97
|
+
},
|
|
98
|
+
json={"input": texts, "model": model},
|
|
99
|
+
timeout=30,
|
|
100
|
+
)
|
|
101
|
+
response.raise_for_status()
|
|
102
|
+
data = response.json()
|
|
103
|
+
if "data" in data:
|
|
104
|
+
return [item["embedding"] for item in data["data"]]
|
|
105
|
+
return data.get("embeddings", [[]])
|
|
106
|
+
|
|
107
|
+
return embed_fn
|
|
108
|
+
|
|
109
|
+
def _detect_sentiment(self, text: str) -> str:
|
|
110
|
+
text_lower = text.lower()
|
|
111
|
+
pos_count = sum(1 for kw in POSITIVE_KEYWORDS if kw in text_lower)
|
|
112
|
+
neg_count = sum(1 for kw in NEGATIVE_KEYWORDS if kw in text_lower)
|
|
113
|
+
if pos_count > neg_count:
|
|
114
|
+
return "positive"
|
|
115
|
+
elif neg_count > pos_count:
|
|
116
|
+
return "negative"
|
|
117
|
+
return "neutral"
|
|
118
|
+
|
|
119
|
+
def _is_contradictory(self, sentiment1: str, sentiment2: str) -> bool:
|
|
120
|
+
return (
|
|
121
|
+
sentiment1 != "neutral"
|
|
122
|
+
and sentiment2 != "neutral"
|
|
123
|
+
and sentiment1 != sentiment2
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
def _recalculate_strengths(self) -> None:
|
|
127
|
+
self._lancedb.recalculate_all_strengths()
|
|
128
|
+
self._neo4j.recalculate_all_strengths()
|
|
129
|
+
|
|
130
|
+
def remember(self, conversation: list[dict[str, str]] | str) -> None:
|
|
131
|
+
conv_list = format_conversation(conversation)
|
|
132
|
+
if not conv_list:
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
conv_text = self._format_conv_for_llm(conv_list)
|
|
136
|
+
|
|
137
|
+
sys_prompt, user_prompt = get_extraction_prompt(conv_text, self._style)
|
|
138
|
+
raw_response = self._provider.complete(user_prompt, sys_prompt)
|
|
139
|
+
parsed = safe_json_parse(raw_response)
|
|
140
|
+
if not isinstance(parsed, dict):
|
|
141
|
+
return
|
|
142
|
+
|
|
143
|
+
session_id = self._lancedb._generate_session_id()
|
|
144
|
+
|
|
145
|
+
personal = parsed.get("personal", [])
|
|
146
|
+
factual = parsed.get("factual", [])
|
|
147
|
+
temporal = parsed.get("temporal", [])
|
|
148
|
+
|
|
149
|
+
duplicates_found = False
|
|
150
|
+
|
|
151
|
+
for fact in personal:
|
|
152
|
+
if not isinstance(fact, str) or not fact.strip():
|
|
153
|
+
continue
|
|
154
|
+
sentiment = self._detect_sentiment(fact)
|
|
155
|
+
similar = self._lancedb.find_active_similar_personalizations(
|
|
156
|
+
fact, threshold=0.85
|
|
157
|
+
)
|
|
158
|
+
if similar:
|
|
159
|
+
existing = similar[0]
|
|
160
|
+
if self._is_contradictory(existing["sentiment"], sentiment):
|
|
161
|
+
existing_active = (
|
|
162
|
+
self._lancedb.find_active_similar_personalizations(
|
|
163
|
+
fact, threshold=0.95
|
|
164
|
+
)
|
|
165
|
+
)
|
|
166
|
+
if existing_active:
|
|
167
|
+
self._lancedb.deactivate_personalization(existing["id"])
|
|
168
|
+
self._neo4j.deactivate_personalization(existing["id"])
|
|
169
|
+
change_event_content = (
|
|
170
|
+
f"취향 변화: {existing['content']} → {fact}"
|
|
171
|
+
)
|
|
172
|
+
self._lancedb.add_temporal(
|
|
173
|
+
session_id=session_id,
|
|
174
|
+
event_type="preference_change",
|
|
175
|
+
content=change_event_content,
|
|
176
|
+
metadata="{}",
|
|
177
|
+
related_personalization_id=existing["id"],
|
|
178
|
+
old_content=existing["content"],
|
|
179
|
+
new_content=fact,
|
|
180
|
+
)
|
|
181
|
+
self._neo4j.add_temporal(
|
|
182
|
+
session_id=session_id,
|
|
183
|
+
event_type="preference_change",
|
|
184
|
+
content=change_event_content,
|
|
185
|
+
metadata="{}",
|
|
186
|
+
related_personalization_id=existing["id"],
|
|
187
|
+
old_content=existing["content"],
|
|
188
|
+
new_content=fact,
|
|
189
|
+
)
|
|
190
|
+
self._lancedb.add_personalization(
|
|
191
|
+
content=fact,
|
|
192
|
+
category="preference",
|
|
193
|
+
strength=0.8,
|
|
194
|
+
sentiment=sentiment,
|
|
195
|
+
contradiction_with=existing["id"],
|
|
196
|
+
)
|
|
197
|
+
fact_vector = self._compute_embedding(fact)
|
|
198
|
+
self._neo4j.add_personalization(
|
|
199
|
+
content=fact,
|
|
200
|
+
category="preference",
|
|
201
|
+
strength=0.8,
|
|
202
|
+
sentiment=sentiment,
|
|
203
|
+
contradiction_with=existing["id"],
|
|
204
|
+
vector=fact_vector,
|
|
205
|
+
)
|
|
206
|
+
duplicates_found = True
|
|
207
|
+
else:
|
|
208
|
+
self._lancedb.boost_personalization_strength(
|
|
209
|
+
existing["id"], boost=0.15
|
|
210
|
+
)
|
|
211
|
+
self._neo4j.boost_personalization_strength(
|
|
212
|
+
existing["id"], boost=0.15
|
|
213
|
+
)
|
|
214
|
+
else:
|
|
215
|
+
self._lancedb.boost_personalization_strength(
|
|
216
|
+
existing["id"], boost=0.15
|
|
217
|
+
)
|
|
218
|
+
self._neo4j.boost_personalization_strength(
|
|
219
|
+
existing["id"], boost=0.15
|
|
220
|
+
)
|
|
221
|
+
duplicates_found = True
|
|
222
|
+
else:
|
|
223
|
+
self._lancedb.add_personalization(
|
|
224
|
+
content=fact,
|
|
225
|
+
category="preference",
|
|
226
|
+
strength=1.0,
|
|
227
|
+
sentiment=sentiment,
|
|
228
|
+
)
|
|
229
|
+
fact_vector = self._compute_embedding(fact)
|
|
230
|
+
self._neo4j.add_personalization(
|
|
231
|
+
content=fact,
|
|
232
|
+
category="preference",
|
|
233
|
+
strength=1.0,
|
|
234
|
+
sentiment=sentiment,
|
|
235
|
+
vector=fact_vector,
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
for fact in factual:
|
|
239
|
+
if not isinstance(fact, str) or not fact.strip():
|
|
240
|
+
continue
|
|
241
|
+
similar = self._lancedb.find_similar_long_term(fact, threshold=0.85)
|
|
242
|
+
if not similar:
|
|
243
|
+
self._lancedb.add_long_term(content=fact, source_facts=[])
|
|
244
|
+
|
|
245
|
+
for fact in temporal:
|
|
246
|
+
if not isinstance(fact, str) or not fact.strip():
|
|
247
|
+
continue
|
|
248
|
+
self._lancedb.add_temporal(
|
|
249
|
+
session_id=session_id,
|
|
250
|
+
event_type="event",
|
|
251
|
+
content=fact,
|
|
252
|
+
)
|
|
253
|
+
self._neo4j.add_temporal(
|
|
254
|
+
session_id=session_id,
|
|
255
|
+
event_type="event",
|
|
256
|
+
content=fact,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
for fact in temporal + factual + personal:
|
|
260
|
+
if not isinstance(fact, str) or not fact.strip():
|
|
261
|
+
continue
|
|
262
|
+
self._lancedb.add_raw_fact(content=fact, conversation=conv_text)
|
|
263
|
+
|
|
264
|
+
if duplicates_found:
|
|
265
|
+
all_personalizations = self._lancedb.get_all_personalizations()
|
|
266
|
+
memories = [
|
|
267
|
+
{
|
|
268
|
+
"id": p["id"],
|
|
269
|
+
"content": p["content"],
|
|
270
|
+
"strength": p.get("strength", 0),
|
|
271
|
+
}
|
|
272
|
+
for p in all_personalizations
|
|
273
|
+
]
|
|
274
|
+
sys_p, usr_p = get_consolidation_prompt(memories)
|
|
275
|
+
response = self._provider.complete(usr_p, sys_p)
|
|
276
|
+
result = safe_json_parse(response)
|
|
277
|
+
if isinstance(result, dict):
|
|
278
|
+
for merged in result.get("consolidated", []):
|
|
279
|
+
if (
|
|
280
|
+
isinstance(merged, dict)
|
|
281
|
+
and "original_ids" in merged
|
|
282
|
+
and "content" in merged
|
|
283
|
+
):
|
|
284
|
+
self._lancedb.merge_personalizations(
|
|
285
|
+
ids=merged["original_ids"],
|
|
286
|
+
new_content=merged["content"],
|
|
287
|
+
boost=merged.get("boost", 0.15),
|
|
288
|
+
)
|
|
289
|
+
merged_vector = self._compute_embedding(merged["content"])
|
|
290
|
+
self._neo4j.merge_personalizations(
|
|
291
|
+
ids=merged["original_ids"],
|
|
292
|
+
new_content=merged["content"],
|
|
293
|
+
boost=merged.get("boost", 0.15),
|
|
294
|
+
vector=merged_vector,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
def _format_conv_for_llm(self, conv_list: list[dict[str, str]]) -> str:
|
|
298
|
+
lines = []
|
|
299
|
+
for msg in conv_list:
|
|
300
|
+
role = msg.get("role", "unknown")
|
|
301
|
+
content = msg.get("content", "")
|
|
302
|
+
lines.append(f"{role}: {content}")
|
|
303
|
+
return "\n".join(lines)
|
|
304
|
+
|
|
305
|
+
def _compute_embedding(self, text: str) -> list[float]:
|
|
306
|
+
return self._lancedb._compute_embedding(text)
|
|
307
|
+
|
|
308
|
+
@staticmethod
|
|
309
|
+
def _build_section(label: str, content: str) -> str:
|
|
310
|
+
return f"=== {label} ===\n{content}\n=== END {label} ==="
|
|
311
|
+
|
|
312
|
+
def _format_memories_list(
|
|
313
|
+
self, results: list[dict[str, Any]], with_strength: bool
|
|
314
|
+
) -> str:
|
|
315
|
+
if not results:
|
|
316
|
+
return ""
|
|
317
|
+
lines = []
|
|
318
|
+
for r in results[:5]:
|
|
319
|
+
content = r.get("content", "")
|
|
320
|
+
if with_strength:
|
|
321
|
+
strength = r.get("strength", 0)
|
|
322
|
+
lines.append(f"- {content} (importance: {strength:.0%})")
|
|
323
|
+
else:
|
|
324
|
+
lines.append(f"- {content}")
|
|
325
|
+
return "\n".join(lines)
|
|
326
|
+
|
|
327
|
+
def _format_events_list(self, results: list[dict[str, Any]]) -> str:
|
|
328
|
+
if not results:
|
|
329
|
+
return ""
|
|
330
|
+
lines = []
|
|
331
|
+
for r in results[:5]:
|
|
332
|
+
event_type = r.get("event_type", "")
|
|
333
|
+
if event_type == "preference_change":
|
|
334
|
+
old = r.get("old_content", "")
|
|
335
|
+
new = r.get("new_content", "")
|
|
336
|
+
ts = r.get("timestamp", "")
|
|
337
|
+
if old and new:
|
|
338
|
+
lines.append(
|
|
339
|
+
f"[Preference changed] previous: {old}, current: {new}, date: {ts}"
|
|
340
|
+
)
|
|
341
|
+
else:
|
|
342
|
+
lines.append(
|
|
343
|
+
f"[Preference changed] {r.get('content', '')}, date: {ts}"
|
|
344
|
+
)
|
|
345
|
+
else:
|
|
346
|
+
ts = r.get("timestamp", "")
|
|
347
|
+
content = r.get("content", "")
|
|
348
|
+
if ts:
|
|
349
|
+
lines.append(f"[Event] {content}, date: {ts}")
|
|
350
|
+
else:
|
|
351
|
+
lines.append(f"[Event] {content}")
|
|
352
|
+
return "\n".join(lines)
|
|
353
|
+
|
|
354
|
+
def _fallback_context(self, pers: str, lt: str, temp: str, raw: str) -> str:
|
|
355
|
+
parts = []
|
|
356
|
+
if pers:
|
|
357
|
+
parts.append(f"사용자 취향: {pers.replace(chr(10), ', ')}")
|
|
358
|
+
if lt:
|
|
359
|
+
parts.append(f"핵심 사실: {lt.replace(chr(10), ', ')}")
|
|
360
|
+
if temp:
|
|
361
|
+
parts.append(f"최근 사건: {temp.replace(chr(10), ', ')}")
|
|
362
|
+
if raw:
|
|
363
|
+
parts.append(f"참고: {raw.replace(chr(10), ', ')}")
|
|
364
|
+
return ". ".join(parts) if parts else "(기억 없음)"
|
|
365
|
+
|
|
366
|
+
def get_context(
|
|
367
|
+
self,
|
|
368
|
+
full_history: list[dict[str, str]] | str | None = None,
|
|
369
|
+
max_tokens: int = 4096,
|
|
370
|
+
) -> str:
|
|
371
|
+
model = self._provider.model
|
|
372
|
+
|
|
373
|
+
if not full_history:
|
|
374
|
+
return ""
|
|
375
|
+
|
|
376
|
+
conv_list = format_conversation(full_history)
|
|
377
|
+
conv_text = self._format_conv_for_llm(conv_list)
|
|
378
|
+
query_embedding = self._compute_embedding(conv_text)
|
|
379
|
+
|
|
380
|
+
pers_results = self._lancedb.search("personalization", query_embedding, limit=5)
|
|
381
|
+
lt_results = self._lancedb.search("long_term", query_embedding, limit=5)
|
|
382
|
+
raw_results = self._lancedb.search("raw_facts", query_embedding, limit=2)
|
|
383
|
+
temp_results = self._lancedb.search(
|
|
384
|
+
"temporal_sessions", query_embedding, limit=5
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
for results in [pers_results, lt_results, raw_results, temp_results]:
|
|
388
|
+
results.sort(key=lambda r: r.get("_distance", float("inf")))
|
|
389
|
+
|
|
390
|
+
self._recalculate_strengths()
|
|
391
|
+
|
|
392
|
+
for p in pers_results:
|
|
393
|
+
try:
|
|
394
|
+
self._lancedb.record_access(p["id"])
|
|
395
|
+
self._neo4j.record_access(p["id"])
|
|
396
|
+
except Exception:
|
|
397
|
+
pass
|
|
398
|
+
|
|
399
|
+
pers_text = self._format_memories_list(pers_results, with_strength=True)
|
|
400
|
+
lt_text = self._format_memories_list(lt_results, with_strength=False)
|
|
401
|
+
temp_text = self._format_events_list(temp_results)
|
|
402
|
+
raw_text = self._format_memories_list(raw_results[:2], with_strength=False)
|
|
403
|
+
|
|
404
|
+
if not any([pers_text, lt_text, temp_text, raw_text]):
|
|
405
|
+
return (
|
|
406
|
+
truncate_to_token_limit(self._style, max_tokens, model)
|
|
407
|
+
if self._style
|
|
408
|
+
else ""
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
try:
|
|
412
|
+
sys_p, usr_p = get_context_synthesis_prompt(
|
|
413
|
+
conversation=conv_text,
|
|
414
|
+
style=self._style,
|
|
415
|
+
personalization=pers_text,
|
|
416
|
+
long_term=lt_text,
|
|
417
|
+
recent_events=temp_text,
|
|
418
|
+
)
|
|
419
|
+
synthesis = self._provider.complete(usr_p, sys_p).strip()
|
|
420
|
+
except Exception:
|
|
421
|
+
synthesis = self._fallback_context(pers_text, lt_text, temp_text, raw_text)
|
|
422
|
+
|
|
423
|
+
context = synthesis
|
|
424
|
+
|
|
425
|
+
if count_tokens(context, model) > max_tokens:
|
|
426
|
+
context = truncate_to_token_limit(context, max_tokens, model)
|
|
427
|
+
|
|
428
|
+
return context
|