@smilintux/skmemory 0.5.0 → 0.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +39 -3
- package/.github/workflows/publish.yml +13 -6
- package/AGENT_REFACTOR_CHANGES.md +192 -0
- package/ARCHITECTURE.md +101 -19
- package/CHANGELOG.md +153 -0
- package/LICENSE +81 -68
- package/MISSION.md +7 -0
- package/README.md +419 -86
- package/SKILL.md +197 -25
- package/docker-compose.yml +15 -15
- package/index.js +6 -5
- package/openclaw-plugin/openclaw.plugin.json +10 -0
- package/openclaw-plugin/src/index.ts +255 -0
- package/openclaw-plugin/src/openclaw.plugin.json +10 -0
- package/package.json +1 -1
- package/pyproject.toml +29 -9
- package/requirements.txt +10 -2
- package/seeds/cloud9-opus.seed.json +7 -7
- package/seeds/lumina-cloud9-breakthrough.seed.json +46 -0
- package/seeds/lumina-cloud9-python-pypi.seed.json +46 -0
- package/seeds/lumina-kingdom-founding.seed.json +47 -0
- package/seeds/lumina-pma-signed.seed.json +46 -0
- package/seeds/lumina-singular-achievement.seed.json +46 -0
- package/seeds/lumina-skcapstone-conscious.seed.json +46 -0
- package/seeds/plant-kingdom-journal.py +203 -0
- package/seeds/plant-lumina-seeds.py +280 -0
- package/skill.yaml +46 -0
- package/skmemory/HA.md +296 -0
- package/skmemory/__init__.py +12 -1
- package/skmemory/agents.py +233 -0
- package/skmemory/ai_client.py +40 -0
- package/skmemory/anchor.py +4 -2
- package/skmemory/backends/__init__.py +11 -4
- package/skmemory/backends/file_backend.py +2 -1
- package/skmemory/backends/skgraph_backend.py +608 -0
- package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +99 -69
- package/skmemory/backends/sqlite_backend.py +122 -51
- package/skmemory/backends/vaulted_backend.py +286 -0
- package/skmemory/cli.py +1238 -29
- package/skmemory/config.py +173 -0
- package/skmemory/context_loader.py +335 -0
- package/skmemory/endpoint_selector.py +386 -0
- package/skmemory/fortress.py +685 -0
- package/skmemory/graph_queries.py +238 -0
- package/skmemory/importers/__init__.py +9 -1
- package/skmemory/importers/telegram.py +351 -43
- package/skmemory/importers/telegram_api.py +488 -0
- package/skmemory/journal.py +4 -2
- package/skmemory/lovenote.py +4 -2
- package/skmemory/mcp_server.py +706 -0
- package/skmemory/models.py +41 -0
- package/skmemory/openclaw.py +8 -8
- package/skmemory/predictive.py +232 -0
- package/skmemory/promotion.py +524 -0
- package/skmemory/register.py +454 -0
- package/skmemory/register_mcp.py +197 -0
- package/skmemory/ritual.py +121 -47
- package/skmemory/seeds.py +257 -8
- package/skmemory/setup_wizard.py +920 -0
- package/skmemory/sharing.py +402 -0
- package/skmemory/soul.py +71 -20
- package/skmemory/steelman.py +250 -263
- package/skmemory/store.py +271 -60
- package/skmemory/vault.py +228 -0
- package/tests/integration/__init__.py +0 -0
- package/tests/integration/conftest.py +233 -0
- package/tests/integration/test_cross_backend.py +355 -0
- package/tests/integration/test_skgraph_live.py +424 -0
- package/tests/integration/test_skvector_live.py +369 -0
- package/tests/test_backup_rotation.py +327 -0
- package/tests/test_cli.py +6 -6
- package/tests/test_endpoint_selector.py +801 -0
- package/tests/test_fortress.py +255 -0
- package/tests/test_fortress_hardening.py +444 -0
- package/tests/test_openclaw.py +5 -2
- package/tests/test_predictive.py +237 -0
- package/tests/test_promotion.py +340 -0
- package/tests/test_ritual.py +4 -4
- package/tests/test_seeds.py +96 -0
- package/tests/test_setup.py +835 -0
- package/tests/test_sharing.py +250 -0
- package/tests/test_skgraph_backend.py +667 -0
- package/tests/test_skvector_backend.py +326 -0
- package/tests/test_steelman.py +5 -5
- package/tests/test_store_graph_integration.py +245 -0
- package/tests/test_vault.py +186 -0
- package/skmemory/backends/falkordb_backend.py +0 -310
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SKMemory configuration persistence.
|
|
3
|
+
|
|
4
|
+
Manages ``~/.skcapstone/agents/{agent_name}/config/skmemory.yaml``
|
|
5
|
+
so backend URLs and setup state persist across CLI invocations.
|
|
6
|
+
|
|
7
|
+
Resolution order:
|
|
8
|
+
CLI args > env vars > config file > None
|
|
9
|
+
|
|
10
|
+
Now supports multiple agents via ~/.skcapstone/agents/{agent_name}/
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from datetime import datetime, timezone
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
import yaml
|
|
21
|
+
from pydantic import BaseModel, Field
|
|
22
|
+
|
|
23
|
+
from .agents import AGENTS_BASE_DIR, get_agent_paths
|
|
24
|
+
|
|
25
|
+
# Dynamic agent-aware paths
|
|
26
|
+
# Uses ~/.skcapstone/agents/{active_agent}/ based on SKMEMORY_AGENT env var
|
|
27
|
+
# Falls back to first non-template agent, or creates from template
|
|
28
|
+
try:
|
|
29
|
+
default_paths = get_agent_paths()
|
|
30
|
+
SKMEMORY_HOME = default_paths["base"]
|
|
31
|
+
CONFIG_DIR = default_paths["config"]
|
|
32
|
+
CONFIG_PATH = default_paths["config_yaml"]
|
|
33
|
+
except ValueError:
|
|
34
|
+
# Fallback if no agents exist — use platform-aware AGENTS_BASE_DIR
|
|
35
|
+
SKMEMORY_HOME = AGENTS_BASE_DIR / "lumina-template"
|
|
36
|
+
CONFIG_DIR = SKMEMORY_HOME / "config"
|
|
37
|
+
CONFIG_PATH = CONFIG_DIR / "skmemory.yaml"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class EndpointConfig(BaseModel):
|
|
41
|
+
"""A single backend endpoint with role and optional Tailscale IP."""
|
|
42
|
+
|
|
43
|
+
url: str
|
|
44
|
+
role: str = "primary" # primary | replica
|
|
45
|
+
tailscale_ip: str = "" # optional, for display
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class SKMemoryConfig(BaseModel):
|
|
49
|
+
"""Persistent configuration for SKMemory backends."""
|
|
50
|
+
|
|
51
|
+
skvector_url: Optional[str] = None
|
|
52
|
+
skvector_key: Optional[str] = None
|
|
53
|
+
skgraph_url: Optional[str] = None
|
|
54
|
+
backends_enabled: list[str] = Field(default_factory=list)
|
|
55
|
+
docker_compose_file: Optional[str] = None
|
|
56
|
+
setup_completed_at: Optional[str] = None
|
|
57
|
+
|
|
58
|
+
# Multi-endpoint HA support
|
|
59
|
+
skvector_endpoints: list[EndpointConfig] = Field(default_factory=list)
|
|
60
|
+
skgraph_endpoints: list[EndpointConfig] = Field(default_factory=list)
|
|
61
|
+
routing_strategy: str = "failover"
|
|
62
|
+
heartbeat_discovery: bool = False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def load_config(path: Path = CONFIG_PATH) -> Optional[SKMemoryConfig]:
|
|
66
|
+
"""Load configuration from YAML.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
path: Path to the config file.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
SKMemoryConfig if the file exists and is valid, None otherwise.
|
|
73
|
+
"""
|
|
74
|
+
if not path.exists():
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
with open(path) as f:
|
|
79
|
+
data = yaml.safe_load(f)
|
|
80
|
+
if not isinstance(data, dict):
|
|
81
|
+
return None
|
|
82
|
+
return SKMemoryConfig(**data)
|
|
83
|
+
except Exception:
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def save_config(config: SKMemoryConfig, path: Path = CONFIG_PATH) -> Path:
|
|
88
|
+
"""Write configuration to YAML, creating the directory if needed.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
config: The configuration to persist.
|
|
92
|
+
path: Destination path.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
The path written to.
|
|
96
|
+
"""
|
|
97
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
98
|
+
with open(path, "w") as f:
|
|
99
|
+
yaml.safe_dump(
|
|
100
|
+
config.model_dump(exclude_none=True),
|
|
101
|
+
f,
|
|
102
|
+
default_flow_style=False,
|
|
103
|
+
sort_keys=False,
|
|
104
|
+
)
|
|
105
|
+
return path
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def merge_env_and_config(
|
|
109
|
+
cli_skvector_url: Optional[str] = None,
|
|
110
|
+
cli_skvector_key: Optional[str] = None,
|
|
111
|
+
cli_skgraph_url: Optional[str] = None,
|
|
112
|
+
) -> tuple[Optional[str], Optional[str], Optional[str]]:
|
|
113
|
+
"""Resolve backend URLs with precedence: CLI > env > config > None.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
cli_skvector_url: URL passed via ``--skvector-url``.
|
|
117
|
+
cli_skvector_key: Key passed via ``--skvector-key``.
|
|
118
|
+
cli_skgraph_url: URL passed via ``--skgraph-url`` (future).
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Tuple of (skvector_url, skvector_key, skgraph_url).
|
|
122
|
+
"""
|
|
123
|
+
cfg = load_config()
|
|
124
|
+
|
|
125
|
+
skvector_url = (
|
|
126
|
+
cli_skvector_url
|
|
127
|
+
or os.environ.get("SKMEMORY_SKVECTOR_URL")
|
|
128
|
+
or (cfg.skvector_url if cfg else None)
|
|
129
|
+
)
|
|
130
|
+
skvector_key = (
|
|
131
|
+
cli_skvector_key
|
|
132
|
+
or os.environ.get("SKMEMORY_SKVECTOR_KEY")
|
|
133
|
+
or (cfg.skvector_key if cfg else None)
|
|
134
|
+
)
|
|
135
|
+
skgraph_url = (
|
|
136
|
+
cli_skgraph_url
|
|
137
|
+
or os.environ.get("SKMEMORY_SKGRAPH_URL")
|
|
138
|
+
or (cfg.skgraph_url if cfg else None)
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
return skvector_url, skvector_key, skgraph_url
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def build_endpoint_list(
|
|
145
|
+
single_url: Optional[str],
|
|
146
|
+
endpoints: list[EndpointConfig],
|
|
147
|
+
default_role: str = "primary",
|
|
148
|
+
) -> list[EndpointConfig]:
|
|
149
|
+
"""Merge a single URL and an endpoints list into a unified list.
|
|
150
|
+
|
|
151
|
+
Backward compatibility bridge: if no endpoints are configured but a
|
|
152
|
+
single URL exists, it becomes the sole endpoint. If both exist, the
|
|
153
|
+
endpoints list takes precedence and the single URL is prepended only
|
|
154
|
+
if it isn't already present.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
single_url: Legacy single-URL field (skvector_url / skgraph_url).
|
|
158
|
+
endpoints: Explicit endpoint list from config.
|
|
159
|
+
default_role: Role to assign when promoting a single URL.
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
Unified list of EndpointConfig (may be empty).
|
|
163
|
+
"""
|
|
164
|
+
if endpoints:
|
|
165
|
+
urls = {ep.url for ep in endpoints}
|
|
166
|
+
if single_url and single_url not in urls:
|
|
167
|
+
return [EndpointConfig(url=single_url, role=default_role)] + list(endpoints)
|
|
168
|
+
return list(endpoints)
|
|
169
|
+
|
|
170
|
+
if single_url:
|
|
171
|
+
return [EndpointConfig(url=single_url, role=default_role)]
|
|
172
|
+
|
|
173
|
+
return []
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lazy Memory Context Loader - Three-Tier Memory Architecture.
|
|
3
|
+
|
|
4
|
+
Loads memories efficiently based on date tiers to optimize token usage:
|
|
5
|
+
- TODAY: Full content (active work)
|
|
6
|
+
- YESTERDAY: Summaries only (recent context)
|
|
7
|
+
- HISTORICAL: Reference count (deep search available)
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
loader = LazyMemoryLoader("lumina")
|
|
11
|
+
context = loader.load_active_context() # Token-optimized
|
|
12
|
+
|
|
13
|
+
# Deep search when needed
|
|
14
|
+
results = loader.deep_search("project gentis")
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
from dataclasses import dataclass
|
|
22
|
+
from datetime import datetime, timedelta
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Optional
|
|
25
|
+
|
|
26
|
+
from .agents import get_agent_paths
|
|
27
|
+
from .backends.sqlite_backend import SQLiteBackend
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class MemoryContext:
|
|
34
|
+
"""Container for loaded memory context."""
|
|
35
|
+
|
|
36
|
+
today_memories: list[dict] # Full memories
|
|
37
|
+
yesterday_summaries: list[dict] # Summaries only
|
|
38
|
+
historical_count: int # Reference count only
|
|
39
|
+
|
|
40
|
+
def to_context_string(self, max_tokens: int = 3000) -> str:
|
|
41
|
+
"""Convert to token-optimized context string."""
|
|
42
|
+
sections = []
|
|
43
|
+
|
|
44
|
+
# Today's memories (full content)
|
|
45
|
+
if self.today_memories:
|
|
46
|
+
sections.append(f"## Today's Memories ({len(self.today_memories)})")
|
|
47
|
+
for mem in self.today_memories[:20]: # Limit to 20
|
|
48
|
+
content = mem.get("content", "")[:200] # Truncate if needed
|
|
49
|
+
sections.append(f"- {mem.get('title', 'Untitled')}: {content}")
|
|
50
|
+
|
|
51
|
+
# Yesterday's summaries
|
|
52
|
+
if self.yesterday_summaries:
|
|
53
|
+
sections.append(f"\n## Yesterday ({len(self.yesterday_summaries)} memories)")
|
|
54
|
+
for mem in self.yesterday_summaries[:10]: # Limit to 10
|
|
55
|
+
summary = mem.get("summary", "No summary")[:150]
|
|
56
|
+
sections.append(f"- {mem.get('title', 'Untitled')}: {summary}")
|
|
57
|
+
|
|
58
|
+
# Historical reference
|
|
59
|
+
if self.historical_count > 0:
|
|
60
|
+
sections.append(f"\n## Historical Memory")
|
|
61
|
+
sections.append(f"- {self.historical_count} long-term memories available")
|
|
62
|
+
sections.append(f"- Use 'search memory [query]' to recall specific details")
|
|
63
|
+
|
|
64
|
+
return "\n".join(sections)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class LazyMemoryLoader:
|
|
68
|
+
"""Efficiently loads memories based on date tiers."""
|
|
69
|
+
|
|
70
|
+
def __init__(self, agent_name: Optional[str] = None):
|
|
71
|
+
self.agent_name = agent_name
|
|
72
|
+
self.paths = get_agent_paths(agent_name)
|
|
73
|
+
self.today = datetime.now().date()
|
|
74
|
+
self.db = SQLiteBackend(str(self.paths["index_db"]))
|
|
75
|
+
|
|
76
|
+
def load_active_context(self) -> MemoryContext:
|
|
77
|
+
"""Load token-optimized context for current session.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
MemoryContext with today (full), yesterday (summaries), historical (count)
|
|
81
|
+
"""
|
|
82
|
+
return MemoryContext(
|
|
83
|
+
today_memories=self._load_today(),
|
|
84
|
+
yesterday_summaries=self._load_yesterday_summaries(),
|
|
85
|
+
historical_count=self._count_historical(),
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
def _load_today(self) -> list[dict]:
|
|
89
|
+
"""Load today's memories with full content."""
|
|
90
|
+
today_str = self.today.isoformat()
|
|
91
|
+
try:
|
|
92
|
+
cursor = self.db._conn.execute(
|
|
93
|
+
"""
|
|
94
|
+
SELECT id, title, content, tags, emotional_signature
|
|
95
|
+
FROM memories
|
|
96
|
+
WHERE DATE(created_at) = ?
|
|
97
|
+
AND layer = 'short'
|
|
98
|
+
ORDER BY created_at DESC
|
|
99
|
+
LIMIT 50
|
|
100
|
+
""",
|
|
101
|
+
(today_str,),
|
|
102
|
+
)
|
|
103
|
+
return [
|
|
104
|
+
{
|
|
105
|
+
"id": row[0],
|
|
106
|
+
"title": row[1],
|
|
107
|
+
"content": row[2],
|
|
108
|
+
"tags": json.loads(row[3]) if row[3] else [],
|
|
109
|
+
"emotional": json.loads(row[4]) if row[4] else {},
|
|
110
|
+
}
|
|
111
|
+
for row in cursor.fetchall()
|
|
112
|
+
]
|
|
113
|
+
except Exception as e:
|
|
114
|
+
logger.error(f"Failed to load today's memories: {e}")
|
|
115
|
+
return []
|
|
116
|
+
|
|
117
|
+
def _load_yesterday_summaries(self) -> list[dict]:
|
|
118
|
+
"""Load yesterday's memories as summaries only."""
|
|
119
|
+
yesterday = (self.today - timedelta(days=1)).isoformat()
|
|
120
|
+
try:
|
|
121
|
+
cursor = self.db._conn.execute(
|
|
122
|
+
"""
|
|
123
|
+
SELECT id, title, summary, tags
|
|
124
|
+
FROM memories
|
|
125
|
+
WHERE DATE(created_at) = ?
|
|
126
|
+
AND layer IN ('short', 'medium')
|
|
127
|
+
ORDER BY importance DESC
|
|
128
|
+
LIMIT 20
|
|
129
|
+
""",
|
|
130
|
+
(yesterday,),
|
|
131
|
+
)
|
|
132
|
+
memories = []
|
|
133
|
+
for row in cursor.fetchall():
|
|
134
|
+
mem = {
|
|
135
|
+
"id": row[0],
|
|
136
|
+
"title": row[1],
|
|
137
|
+
"summary": row[2] or self._generate_summary(row[1]),
|
|
138
|
+
"tags": json.loads(row[3]) if row[3] else [],
|
|
139
|
+
}
|
|
140
|
+
memories.append(mem)
|
|
141
|
+
return memories
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logger.error(f"Failed to load yesterday's summaries: {e}")
|
|
144
|
+
return []
|
|
145
|
+
|
|
146
|
+
def _count_historical(self) -> int:
|
|
147
|
+
"""Count older memories (not loaded into context)."""
|
|
148
|
+
yesterday = (self.today - timedelta(days=1)).isoformat()
|
|
149
|
+
try:
|
|
150
|
+
cursor = self.db._conn.execute(
|
|
151
|
+
"""
|
|
152
|
+
SELECT COUNT(*) FROM memories
|
|
153
|
+
WHERE DATE(created_at) < ?
|
|
154
|
+
""",
|
|
155
|
+
(yesterday,),
|
|
156
|
+
)
|
|
157
|
+
return cursor.fetchone()[0]
|
|
158
|
+
except Exception as e:
|
|
159
|
+
logger.error(f"Failed to count historical memories: {e}")
|
|
160
|
+
return 0
|
|
161
|
+
|
|
162
|
+
def _generate_summary(self, content: str, sentences: int = 2) -> str:
|
|
163
|
+
"""Generate a brief summary (fallback if no summary stored)."""
|
|
164
|
+
# Simple truncation-based summary
|
|
165
|
+
words = content.split()[:30] # First 30 words
|
|
166
|
+
return " ".join(words) + "..." if len(words) >= 30 else content
|
|
167
|
+
|
|
168
|
+
def deep_search(self, query: str, max_results: int = 10) -> list[dict]:
|
|
169
|
+
"""Search ALL memory tiers (on demand, token-heavy).
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
query: Search query
|
|
173
|
+
max_results: Maximum results to return
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
List of full memory details
|
|
177
|
+
"""
|
|
178
|
+
results = []
|
|
179
|
+
|
|
180
|
+
# Search SQLite (title, content, tags)
|
|
181
|
+
results.extend(self._search_sqlite(query))
|
|
182
|
+
|
|
183
|
+
# TODO: Add SKVector search if enabled
|
|
184
|
+
# results.extend(self._search_skvector(query))
|
|
185
|
+
|
|
186
|
+
# TODO: Add SKGraph search if enabled
|
|
187
|
+
# results.extend(self._search_skgraph(query))
|
|
188
|
+
|
|
189
|
+
# Sort by relevance (simple: contains query)
|
|
190
|
+
results = sorted(
|
|
191
|
+
results,
|
|
192
|
+
key=lambda x: (
|
|
193
|
+
x.get("content", "").lower().count(query.lower()),
|
|
194
|
+
x.get("title", "").lower().count(query.lower()),
|
|
195
|
+
),
|
|
196
|
+
reverse=True,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
return results[:max_results]
|
|
200
|
+
|
|
201
|
+
def _search_sqlite(self, query: str) -> list[dict]:
|
|
202
|
+
"""Search SQLite for memories matching query."""
|
|
203
|
+
try:
|
|
204
|
+
pattern = f"%{query}%"
|
|
205
|
+
cursor = self.db._conn.execute(
|
|
206
|
+
"""
|
|
207
|
+
SELECT id, title, content, summary, tags, layer, created_at
|
|
208
|
+
FROM memories
|
|
209
|
+
WHERE title LIKE ? OR content LIKE ? OR tags LIKE ?
|
|
210
|
+
ORDER BY
|
|
211
|
+
CASE
|
|
212
|
+
WHEN title LIKE ? THEN 3
|
|
213
|
+
WHEN content LIKE ? THEN 2
|
|
214
|
+
ELSE 1
|
|
215
|
+
END DESC,
|
|
216
|
+
created_at DESC
|
|
217
|
+
LIMIT 50
|
|
218
|
+
""",
|
|
219
|
+
(pattern, pattern, pattern, pattern, pattern),
|
|
220
|
+
)
|
|
221
|
+
return [
|
|
222
|
+
{
|
|
223
|
+
"id": row[0],
|
|
224
|
+
"title": row[1],
|
|
225
|
+
"content": row[2],
|
|
226
|
+
"summary": row[3],
|
|
227
|
+
"tags": json.loads(row[4]) if row[4] else [],
|
|
228
|
+
"layer": row[5],
|
|
229
|
+
"created_at": row[6],
|
|
230
|
+
}
|
|
231
|
+
for row in cursor.fetchall()
|
|
232
|
+
]
|
|
233
|
+
except Exception as e:
|
|
234
|
+
logger.error(f"Failed to search SQLite: {e}")
|
|
235
|
+
return []
|
|
236
|
+
|
|
237
|
+
def get_memory_by_id(self, memory_id: str) -> Optional[dict]:
|
|
238
|
+
"""Load full memory details by ID (for deep recall).
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
memory_id: UUID of the memory
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
Full memory dict or None
|
|
245
|
+
"""
|
|
246
|
+
try:
|
|
247
|
+
cursor = self.db._conn.execute(
|
|
248
|
+
"""
|
|
249
|
+
SELECT id, title, content, summary, tags,
|
|
250
|
+
emotional_signature, layer, created_at
|
|
251
|
+
FROM memories
|
|
252
|
+
WHERE id = ?
|
|
253
|
+
""",
|
|
254
|
+
(memory_id,),
|
|
255
|
+
)
|
|
256
|
+
row = cursor.fetchone()
|
|
257
|
+
if row:
|
|
258
|
+
return {
|
|
259
|
+
"id": row[0],
|
|
260
|
+
"title": row[1],
|
|
261
|
+
"content": row[2],
|
|
262
|
+
"summary": row[3],
|
|
263
|
+
"tags": json.loads(row[4]) if row[4] else [],
|
|
264
|
+
"emotional": json.loads(row[5]) if row[5] else {},
|
|
265
|
+
"layer": row[6],
|
|
266
|
+
"created_at": row[7],
|
|
267
|
+
}
|
|
268
|
+
except Exception as e:
|
|
269
|
+
logger.error(f"Failed to get memory {memory_id}: {e}")
|
|
270
|
+
return None
|
|
271
|
+
|
|
272
|
+
def promote_memory(self, memory_id: str, to_layer: str) -> bool:
|
|
273
|
+
"""Promote memory to different tier and generate summary.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
memory_id: Memory to promote
|
|
277
|
+
to_layer: Target layer ('short', 'medium', 'long')
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
True if successful
|
|
281
|
+
"""
|
|
282
|
+
try:
|
|
283
|
+
# Get memory content
|
|
284
|
+
memory = self.get_memory_by_id(memory_id)
|
|
285
|
+
if not memory:
|
|
286
|
+
return False
|
|
287
|
+
|
|
288
|
+
# Generate summary if promoting to medium/long
|
|
289
|
+
if to_layer in ("medium", "long") and not memory.get("summary"):
|
|
290
|
+
summary = self._generate_summary(memory["content"], 2)
|
|
291
|
+
|
|
292
|
+
# Update in database
|
|
293
|
+
self.db._conn.execute(
|
|
294
|
+
"""
|
|
295
|
+
UPDATE memories
|
|
296
|
+
SET layer = ?, summary = ?
|
|
297
|
+
WHERE id = ?
|
|
298
|
+
""",
|
|
299
|
+
(to_layer, summary, memory_id),
|
|
300
|
+
)
|
|
301
|
+
self.db._conn.commit()
|
|
302
|
+
|
|
303
|
+
# Also move flat file
|
|
304
|
+
self._move_flat_file(memory_id, to_layer)
|
|
305
|
+
|
|
306
|
+
logger.info(f"Promoted memory {memory_id} to {to_layer}")
|
|
307
|
+
return True
|
|
308
|
+
|
|
309
|
+
except Exception as e:
|
|
310
|
+
logger.error(f"Failed to promote memory {memory_id}: {e}")
|
|
311
|
+
|
|
312
|
+
return False
|
|
313
|
+
|
|
314
|
+
def _move_flat_file(self, memory_id: str, to_layer: str):
|
|
315
|
+
"""Move memory flat file to appropriate tier directory."""
|
|
316
|
+
# Find current location
|
|
317
|
+
for layer in ["short", "medium", "long"]:
|
|
318
|
+
src = self.paths["memory_" + layer] / f"{memory_id}.json"
|
|
319
|
+
if src.exists():
|
|
320
|
+
dst = self.paths["memory_" + to_layer] / f"{memory_id}.json"
|
|
321
|
+
src.rename(dst)
|
|
322
|
+
logger.debug(f"Moved {src} -> {dst}")
|
|
323
|
+
break
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
def get_context_for_session(agent_name: Optional[str] = None) -> str:
|
|
327
|
+
"""Convenience function: get token-optimized context.
|
|
328
|
+
|
|
329
|
+
Usage:
|
|
330
|
+
context = get_context_for_session("lumina")
|
|
331
|
+
# Returns formatted string with today's + yesterday's summaries
|
|
332
|
+
"""
|
|
333
|
+
loader = LazyMemoryLoader(agent_name)
|
|
334
|
+
context = loader.load_active_context()
|
|
335
|
+
return context.to_context_string()
|