@simbimbo/memory-ocmemog 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -0
- package/LICENSE +21 -0
- package/README.md +223 -0
- package/brain/__init__.py +1 -0
- package/brain/runtime/__init__.py +13 -0
- package/brain/runtime/config.py +21 -0
- package/brain/runtime/inference.py +83 -0
- package/brain/runtime/instrumentation.py +17 -0
- package/brain/runtime/memory/__init__.py +13 -0
- package/brain/runtime/memory/api.py +152 -0
- package/brain/runtime/memory/artifacts.py +33 -0
- package/brain/runtime/memory/candidate.py +89 -0
- package/brain/runtime/memory/context_builder.py +87 -0
- package/brain/runtime/memory/conversation_state.py +1825 -0
- package/brain/runtime/memory/distill.py +198 -0
- package/brain/runtime/memory/embedding_engine.py +94 -0
- package/brain/runtime/memory/freshness.py +91 -0
- package/brain/runtime/memory/health.py +42 -0
- package/brain/runtime/memory/integrity.py +170 -0
- package/brain/runtime/memory/interaction_memory.py +57 -0
- package/brain/runtime/memory/memory_consolidation.py +60 -0
- package/brain/runtime/memory/memory_gate.py +38 -0
- package/brain/runtime/memory/memory_graph.py +54 -0
- package/brain/runtime/memory/memory_links.py +109 -0
- package/brain/runtime/memory/memory_salience.py +235 -0
- package/brain/runtime/memory/memory_synthesis.py +33 -0
- package/brain/runtime/memory/memory_taxonomy.py +35 -0
- package/brain/runtime/memory/person_identity.py +83 -0
- package/brain/runtime/memory/person_memory.py +138 -0
- package/brain/runtime/memory/pondering_engine.py +577 -0
- package/brain/runtime/memory/promote.py +237 -0
- package/brain/runtime/memory/provenance.py +356 -0
- package/brain/runtime/memory/reinforcement.py +73 -0
- package/brain/runtime/memory/retrieval.py +153 -0
- package/brain/runtime/memory/semantic_search.py +66 -0
- package/brain/runtime/memory/sentiment_memory.py +67 -0
- package/brain/runtime/memory/store.py +400 -0
- package/brain/runtime/memory/tool_catalog.py +68 -0
- package/brain/runtime/memory/unresolved_state.py +93 -0
- package/brain/runtime/memory/vector_index.py +270 -0
- package/brain/runtime/model_roles.py +11 -0
- package/brain/runtime/model_router.py +22 -0
- package/brain/runtime/providers.py +59 -0
- package/brain/runtime/security/__init__.py +3 -0
- package/brain/runtime/security/redaction.py +14 -0
- package/brain/runtime/state_store.py +25 -0
- package/brain/runtime/storage_paths.py +41 -0
- package/docs/architecture/memory.md +118 -0
- package/docs/release-checklist.md +34 -0
- package/docs/reports/ocmemog-code-audit-2026-03-14.md +155 -0
- package/docs/usage.md +223 -0
- package/index.ts +726 -0
- package/ocmemog/__init__.py +1 -0
- package/ocmemog/sidecar/__init__.py +1 -0
- package/ocmemog/sidecar/app.py +1068 -0
- package/ocmemog/sidecar/compat.py +74 -0
- package/ocmemog/sidecar/transcript_watcher.py +425 -0
- package/openclaw.plugin.json +18 -0
- package/package.json +60 -0
- package/scripts/install-ocmemog.sh +277 -0
- package/scripts/launchagents/com.openclaw.ocmemog.guard.plist +22 -0
- package/scripts/launchagents/com.openclaw.ocmemog.ponder.plist +22 -0
- package/scripts/launchagents/com.openclaw.ocmemog.sidecar.plist +27 -0
- package/scripts/ocmemog-context.sh +15 -0
- package/scripts/ocmemog-continuity-benchmark.py +178 -0
- package/scripts/ocmemog-demo.py +122 -0
- package/scripts/ocmemog-failover-test.sh +17 -0
- package/scripts/ocmemog-guard.sh +11 -0
- package/scripts/ocmemog-install.sh +93 -0
- package/scripts/ocmemog-load-test.py +106 -0
- package/scripts/ocmemog-ponder.sh +30 -0
- package/scripts/ocmemog-recall-test.py +58 -0
- package/scripts/ocmemog-reindex-vectors.py +14 -0
- package/scripts/ocmemog-reliability-soak.py +177 -0
- package/scripts/ocmemog-sidecar.sh +46 -0
- package/scripts/ocmemog-soak-report.py +58 -0
- package/scripts/ocmemog-soak-test.py +44 -0
- package/scripts/ocmemog-test-rig.py +345 -0
- package/scripts/ocmemog-transcript-append.py +45 -0
- package/scripts/ocmemog-transcript-watcher.py +8 -0
- package/scripts/ocmemog-transcript-watcher.sh +7 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, Any, Iterable, Tuple
|
|
4
|
+
|
|
5
|
+
from brain.runtime.instrumentation import emit_event
|
|
6
|
+
from brain.runtime import state_store
|
|
7
|
+
from brain.runtime.memory import memory_links, provenance, store, vector_index
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def _match_score(text: str, query: str) -> float:
|
|
11
|
+
if not text:
|
|
12
|
+
return 0.0
|
|
13
|
+
text_l = text.lower()
|
|
14
|
+
query_l = query.lower()
|
|
15
|
+
if query_l in text_l:
|
|
16
|
+
return 1.0
|
|
17
|
+
return 0.0
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
MEMORY_BUCKETS: Tuple[str, ...] = (
|
|
21
|
+
"knowledge",
|
|
22
|
+
"reflections",
|
|
23
|
+
"directives",
|
|
24
|
+
"tasks",
|
|
25
|
+
"runbooks",
|
|
26
|
+
"lessons",
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _empty_results() -> Dict[str, List[Dict[str, Any]]]:
|
|
31
|
+
return {bucket: [] for bucket in MEMORY_BUCKETS}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def retrieve(prompt: str, limit: int = 5, categories: Iterable[str] | None = None) -> Dict[str, List[Dict[str, Any]]]:
|
|
35
|
+
emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_retrieval_start", status="ok")
|
|
36
|
+
emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_retrieval_rank_start", status="ok")
|
|
37
|
+
|
|
38
|
+
conn = store.connect()
|
|
39
|
+
results = _empty_results()
|
|
40
|
+
selected_categories = tuple(dict.fromkeys(category for category in (categories or MEMORY_BUCKETS) if category in MEMORY_BUCKETS))
|
|
41
|
+
|
|
42
|
+
# reinforcement lookup (by memory_reference)
|
|
43
|
+
reinf_rows = conn.execute(
|
|
44
|
+
"SELECT memory_reference, reward_score, confidence FROM experiences",
|
|
45
|
+
).fetchall()
|
|
46
|
+
reinforcement: Dict[str, Dict[str, float]] = {}
|
|
47
|
+
for row in reinf_rows:
|
|
48
|
+
reference = str(row[0] or "")
|
|
49
|
+
if not reference:
|
|
50
|
+
continue
|
|
51
|
+
current = reinforcement.setdefault(reference, {"reward_score": 0.0, "confidence": 0.0, "count": 0.0})
|
|
52
|
+
current["reward_score"] += float(row[1] or 0.0)
|
|
53
|
+
current["confidence"] += float(row[2] or 0.0)
|
|
54
|
+
current["count"] += 1.0
|
|
55
|
+
for current in reinforcement.values():
|
|
56
|
+
count = max(1.0, float(current.get("count") or 1.0))
|
|
57
|
+
current["reward_score"] = float(current.get("reward_score") or 0.0) / count
|
|
58
|
+
current["confidence"] = float(current.get("confidence") or 0.0) / count
|
|
59
|
+
|
|
60
|
+
def score_record(content: str, memory_ref: str, promo_conf: float) -> float:
|
|
61
|
+
keyword = _match_score(content, prompt)
|
|
62
|
+
reinf = reinforcement.get(memory_ref, {})
|
|
63
|
+
reinf_score = float(reinf.get("reward_score", 0.0)) * 0.5
|
|
64
|
+
promo_score = float(promo_conf) * 0.3
|
|
65
|
+
return round(keyword + reinf_score + promo_score, 3)
|
|
66
|
+
|
|
67
|
+
for table, key in [(bucket, bucket) for bucket in selected_categories]:
|
|
68
|
+
try:
|
|
69
|
+
rows = conn.execute(
|
|
70
|
+
f"SELECT id, content, confidence, metadata_json FROM {table} ORDER BY id DESC LIMIT ?",
|
|
71
|
+
(limit * 10,),
|
|
72
|
+
).fetchall()
|
|
73
|
+
except Exception:
|
|
74
|
+
continue
|
|
75
|
+
for row in rows:
|
|
76
|
+
content = row["content"] if isinstance(row, dict) else row[1]
|
|
77
|
+
if not _match_score(content, prompt):
|
|
78
|
+
continue
|
|
79
|
+
mem_ref = f"{table}:{row[0]}"
|
|
80
|
+
promo_conf = row["confidence"] if isinstance(row, dict) else row[2]
|
|
81
|
+
metadata = provenance.fetch_reference(mem_ref)
|
|
82
|
+
results[key].append({
|
|
83
|
+
"content": content,
|
|
84
|
+
"score": score_record(content, mem_ref, promo_conf),
|
|
85
|
+
"memory_reference": mem_ref,
|
|
86
|
+
"links": memory_links.get_memory_links(mem_ref),
|
|
87
|
+
"provenance_preview": (metadata or {}).get("provenance_preview") or provenance.preview_from_metadata((metadata or {}).get("metadata")),
|
|
88
|
+
})
|
|
89
|
+
|
|
90
|
+
results[key] = sorted(results[key], key=lambda x: x["score"], reverse=True)[:limit]
|
|
91
|
+
|
|
92
|
+
if prompt.strip() and all(not results.get(bucket) for bucket in selected_categories):
|
|
93
|
+
semantic = vector_index.search_memory(prompt, limit=limit)
|
|
94
|
+
for item in semantic:
|
|
95
|
+
source_type = item.get("source_type") or "knowledge"
|
|
96
|
+
if source_type not in selected_categories:
|
|
97
|
+
continue
|
|
98
|
+
try:
|
|
99
|
+
row = conn.execute(
|
|
100
|
+
f"SELECT id, content, confidence, metadata_json FROM {source_type} WHERE id=?",
|
|
101
|
+
(int(item.get("source_id") or 0),),
|
|
102
|
+
).fetchone()
|
|
103
|
+
except Exception:
|
|
104
|
+
continue
|
|
105
|
+
if not row:
|
|
106
|
+
continue
|
|
107
|
+
content = row["content"] if isinstance(row, dict) else row[1]
|
|
108
|
+
mem_ref = f"{source_type}:{row[0]}"
|
|
109
|
+
promo_conf = row["confidence"] if isinstance(row, dict) else row[2]
|
|
110
|
+
metadata = provenance.fetch_reference(mem_ref)
|
|
111
|
+
results[source_type].append({
|
|
112
|
+
"content": content,
|
|
113
|
+
"score": score_record(content, mem_ref, promo_conf),
|
|
114
|
+
"memory_reference": mem_ref,
|
|
115
|
+
"links": memory_links.get_memory_links(mem_ref),
|
|
116
|
+
"provenance_preview": (metadata or {}).get("provenance_preview") or provenance.preview_from_metadata((metadata or {}).get("metadata")),
|
|
117
|
+
})
|
|
118
|
+
for bucket in selected_categories:
|
|
119
|
+
results[bucket] = sorted(results[bucket], key=lambda x: x["score"], reverse=True)[:limit]
|
|
120
|
+
|
|
121
|
+
conn.close()
|
|
122
|
+
emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_retrieval_rank_complete", status="ok")
|
|
123
|
+
emit_event(state_store.reports_dir() / "brain_memory.log.jsonl", "brain_memory_retrieval_complete", status="ok")
|
|
124
|
+
return results
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def retrieve_for_queries(
|
|
128
|
+
queries: Iterable[str],
|
|
129
|
+
*,
|
|
130
|
+
limit: int = 5,
|
|
131
|
+
categories: Iterable[str] | None = None,
|
|
132
|
+
) -> Dict[str, List[Dict[str, Any]]]:
|
|
133
|
+
merged = _empty_results()
|
|
134
|
+
seen_refs = {bucket: set() for bucket in MEMORY_BUCKETS}
|
|
135
|
+
selected_categories = tuple(dict.fromkeys(category for category in (categories or MEMORY_BUCKETS) if category in MEMORY_BUCKETS))
|
|
136
|
+
normalized_queries = [query.strip() for query in queries if isinstance(query, str) and query.strip()]
|
|
137
|
+
|
|
138
|
+
if not normalized_queries:
|
|
139
|
+
return retrieve("", limit=limit, categories=selected_categories)
|
|
140
|
+
|
|
141
|
+
for query in normalized_queries:
|
|
142
|
+
partial = retrieve(query, limit=limit, categories=selected_categories)
|
|
143
|
+
for bucket in selected_categories:
|
|
144
|
+
for item in partial.get(bucket, []):
|
|
145
|
+
ref = item.get("memory_reference")
|
|
146
|
+
if ref in seen_refs[bucket]:
|
|
147
|
+
continue
|
|
148
|
+
seen_refs[bucket].add(ref)
|
|
149
|
+
merged[bucket].append(item)
|
|
150
|
+
|
|
151
|
+
for bucket in selected_categories:
|
|
152
|
+
merged[bucket] = sorted(merged[bucket], key=lambda x: x["score"], reverse=True)[:limit]
|
|
153
|
+
return merged
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
|
|
5
|
+
from brain.runtime import state_store
|
|
6
|
+
from brain.runtime.instrumentation import emit_event
|
|
7
|
+
from brain.runtime.memory import embedding_engine, store, retrieval, freshness
|
|
8
|
+
|
|
9
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _cosine_similarity(a: List[float], b: List[float]) -> float:
|
|
13
|
+
if not a or not b:
|
|
14
|
+
return 0.0
|
|
15
|
+
size = min(len(a), len(b))
|
|
16
|
+
if size == 0:
|
|
17
|
+
return 0.0
|
|
18
|
+
dot = sum(x * y for x, y in zip(a[:size], b[:size]))
|
|
19
|
+
mag_a = sum(x * x for x in a[:size]) ** 0.5
|
|
20
|
+
mag_b = sum(x * x for x in b[:size]) ** 0.5
|
|
21
|
+
if mag_a == 0 or mag_b == 0:
|
|
22
|
+
return 0.0
|
|
23
|
+
return dot / (mag_a * mag_b)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def semantic_search(query: str, limit: int = 5) -> List[Dict[str, Any]]:
|
|
27
|
+
emit_event(LOGFILE, "brain_semantic_search_start", status="ok")
|
|
28
|
+
query_embedding = embedding_engine.generate_embedding(query)
|
|
29
|
+
conn = store.connect()
|
|
30
|
+
rows = conn.execute(
|
|
31
|
+
"SELECT id, source_type, source_id, embedding FROM vector_embeddings"
|
|
32
|
+
).fetchall()
|
|
33
|
+
conn.close()
|
|
34
|
+
|
|
35
|
+
reinforcement = retrieval.retrieve(query, limit=limit * 2)
|
|
36
|
+
freshness_info = {item["memory_id"]: item for item in freshness.scan_freshness(limit=limit).get("advisories", [])}
|
|
37
|
+
|
|
38
|
+
results: List[Dict[str, Any]] = []
|
|
39
|
+
for row in rows:
|
|
40
|
+
try:
|
|
41
|
+
embedding = [float(x) for x in __import__("json").loads(row["embedding"])]
|
|
42
|
+
except Exception:
|
|
43
|
+
continue
|
|
44
|
+
similarity = _cosine_similarity(query_embedding or [], embedding)
|
|
45
|
+
memory_ref = f"{row['source_type']}:{row['source_id']}"
|
|
46
|
+
reinforcement_weight = 0.0
|
|
47
|
+
for bucket in reinforcement.values():
|
|
48
|
+
for item in bucket:
|
|
49
|
+
if item.get("memory_reference") == memory_ref:
|
|
50
|
+
reinforcement_weight = item.get("score", 0.0)
|
|
51
|
+
freshness_score = freshness_info.get(int(row["source_id"],), {}).get("freshness_score", 0.0) if str(row["source_id"]).isdigit() else 0.0
|
|
52
|
+
combined = similarity + reinforcement_weight + freshness_score
|
|
53
|
+
results.append(
|
|
54
|
+
{
|
|
55
|
+
"memory_reference": memory_ref,
|
|
56
|
+
"score": round(combined, 6),
|
|
57
|
+
"similarity": round(similarity, 6),
|
|
58
|
+
"freshness": freshness_score,
|
|
59
|
+
"reinforcement_weight": reinforcement_weight,
|
|
60
|
+
"promotion_confidence": 0.0,
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
results.sort(key=lambda item: item["score"], reverse=True)
|
|
65
|
+
emit_event(LOGFILE, "brain_semantic_search_complete", status="ok", result_count=len(results[:limit]))
|
|
66
|
+
return results[:limit]
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sqlite3
|
|
4
|
+
import time
|
|
5
|
+
from typing import Dict, List
|
|
6
|
+
|
|
7
|
+
from brain.runtime import state_store
|
|
8
|
+
from brain.runtime.instrumentation import emit_event
|
|
9
|
+
from brain.runtime.memory import person_memory
|
|
10
|
+
|
|
11
|
+
LOGFILE = state_store.reports_dir() / "brain_memory.log.jsonl"
|
|
12
|
+
|
|
13
|
+
SENTIMENTS = {"positive", "neutral", "negative", "frustrated", "urgent", "excited"}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def classify_sentiment(text: str) -> str:
|
|
17
|
+
lowered = (text or "").lower()
|
|
18
|
+
if "urgent" in lowered:
|
|
19
|
+
return "urgent"
|
|
20
|
+
if "frustrated" in lowered or "angry" in lowered:
|
|
21
|
+
return "frustrated"
|
|
22
|
+
if "excited" in lowered or "great" in lowered:
|
|
23
|
+
return "excited"
|
|
24
|
+
if "bad" in lowered:
|
|
25
|
+
return "negative"
|
|
26
|
+
if "good" in lowered:
|
|
27
|
+
return "positive"
|
|
28
|
+
return "neutral"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _connect() -> sqlite3.Connection:
|
|
32
|
+
path = state_store.data_dir() / "sentiment_memory.db"
|
|
33
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
34
|
+
conn = sqlite3.connect(str(path))
|
|
35
|
+
conn.row_factory = sqlite3.Row
|
|
36
|
+
conn.execute(
|
|
37
|
+
"""
|
|
38
|
+
CREATE TABLE IF NOT EXISTS sentiment_memory (
|
|
39
|
+
person_id TEXT,
|
|
40
|
+
sentiment TEXT,
|
|
41
|
+
timestamp TEXT
|
|
42
|
+
)
|
|
43
|
+
"""
|
|
44
|
+
)
|
|
45
|
+
return conn
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def update_person_sentiment_baseline(person_id: str, sentiment: str) -> None:
|
|
49
|
+
sentiment = sentiment if sentiment in SENTIMENTS else "neutral"
|
|
50
|
+
conn = _connect()
|
|
51
|
+
conn.execute(
|
|
52
|
+
"INSERT INTO sentiment_memory (person_id, sentiment, timestamp) VALUES (?, ?, ?)",
|
|
53
|
+
(person_id, sentiment, time.strftime("%Y-%m-%d %H:%M:%S")),
|
|
54
|
+
)
|
|
55
|
+
conn.commit()
|
|
56
|
+
conn.close()
|
|
57
|
+
emit_event(LOGFILE, "brain_person_sentiment_updated", status="ok", person_id=person_id)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def list_sentiment(person_id: str, limit: int = 10) -> List[Dict[str, str]]:
|
|
61
|
+
conn = _connect()
|
|
62
|
+
rows = conn.execute(
|
|
63
|
+
"SELECT sentiment, timestamp FROM sentiment_memory WHERE person_id=? ORDER BY timestamp DESC LIMIT ?",
|
|
64
|
+
(person_id, limit),
|
|
65
|
+
).fetchall()
|
|
66
|
+
conn.close()
|
|
67
|
+
return [dict(row) for row in rows]
|
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sqlite3
|
|
4
|
+
import queue
|
|
5
|
+
import threading
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from brain.runtime import state_store
|
|
8
|
+
|
|
9
|
+
SCHEMA_VERSION = "v1"
|
|
10
|
+
|
|
11
|
+
SCHEMA_SQL = """
|
|
12
|
+
CREATE TABLE IF NOT EXISTS memory_events (
|
|
13
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
14
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
15
|
+
event_type TEXT NOT NULL,
|
|
16
|
+
source TEXT,
|
|
17
|
+
details_json TEXT DEFAULT '{}',
|
|
18
|
+
schema_version TEXT NOT NULL
|
|
19
|
+
);
|
|
20
|
+
|
|
21
|
+
CREATE TABLE IF NOT EXISTS environment_cognition (
|
|
22
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
23
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
24
|
+
source TEXT,
|
|
25
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
26
|
+
metadata_json TEXT DEFAULT '{}',
|
|
27
|
+
content TEXT NOT NULL,
|
|
28
|
+
schema_version TEXT NOT NULL
|
|
29
|
+
);
|
|
30
|
+
|
|
31
|
+
CREATE TABLE IF NOT EXISTS experiences (
|
|
32
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
33
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
34
|
+
task_id TEXT,
|
|
35
|
+
outcome TEXT,
|
|
36
|
+
reward_score REAL,
|
|
37
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
38
|
+
memory_reference TEXT,
|
|
39
|
+
experience_type TEXT,
|
|
40
|
+
source_module TEXT,
|
|
41
|
+
metadata_json TEXT DEFAULT '{}',
|
|
42
|
+
schema_version TEXT NOT NULL
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
CREATE TABLE IF NOT EXISTS directives (
|
|
46
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
47
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
48
|
+
source TEXT,
|
|
49
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
50
|
+
metadata_json TEXT DEFAULT '{}',
|
|
51
|
+
content TEXT NOT NULL,
|
|
52
|
+
schema_version TEXT NOT NULL
|
|
53
|
+
);
|
|
54
|
+
|
|
55
|
+
CREATE TABLE IF NOT EXISTS candidates (
|
|
56
|
+
candidate_id TEXT PRIMARY KEY,
|
|
57
|
+
source_event_id INTEGER,
|
|
58
|
+
distilled_summary TEXT,
|
|
59
|
+
verification_points TEXT,
|
|
60
|
+
confidence_score REAL,
|
|
61
|
+
status TEXT NOT NULL DEFAULT 'pending',
|
|
62
|
+
verification_status TEXT NOT NULL DEFAULT 'unverified',
|
|
63
|
+
metadata_json TEXT DEFAULT '{}',
|
|
64
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
65
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
66
|
+
schema_version TEXT NOT NULL
|
|
67
|
+
);
|
|
68
|
+
|
|
69
|
+
CREATE TABLE IF NOT EXISTS promotions (
|
|
70
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
71
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
72
|
+
candidate_id TEXT,
|
|
73
|
+
source TEXT,
|
|
74
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
75
|
+
status TEXT NOT NULL DEFAULT 'promoted',
|
|
76
|
+
decision_reason TEXT,
|
|
77
|
+
metadata_json TEXT DEFAULT '{}',
|
|
78
|
+
content TEXT NOT NULL,
|
|
79
|
+
schema_version TEXT NOT NULL
|
|
80
|
+
);
|
|
81
|
+
|
|
82
|
+
CREATE TABLE IF NOT EXISTS demotions (
|
|
83
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
84
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
85
|
+
memory_reference TEXT NOT NULL,
|
|
86
|
+
previous_confidence REAL,
|
|
87
|
+
new_confidence REAL,
|
|
88
|
+
reason TEXT,
|
|
89
|
+
schema_version TEXT NOT NULL
|
|
90
|
+
);
|
|
91
|
+
|
|
92
|
+
CREATE TABLE IF NOT EXISTS cold_storage (
|
|
93
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
94
|
+
archived_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
95
|
+
source_table TEXT NOT NULL,
|
|
96
|
+
source_id INTEGER NOT NULL,
|
|
97
|
+
content TEXT NOT NULL,
|
|
98
|
+
metadata_json TEXT DEFAULT '{}',
|
|
99
|
+
reason TEXT,
|
|
100
|
+
schema_version TEXT NOT NULL
|
|
101
|
+
);
|
|
102
|
+
|
|
103
|
+
CREATE TABLE IF NOT EXISTS knowledge (
|
|
104
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
105
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
106
|
+
source TEXT,
|
|
107
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
108
|
+
metadata_json TEXT DEFAULT '{}',
|
|
109
|
+
content TEXT NOT NULL,
|
|
110
|
+
schema_version TEXT NOT NULL
|
|
111
|
+
);
|
|
112
|
+
|
|
113
|
+
CREATE TABLE IF NOT EXISTS runbooks (
|
|
114
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
115
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
116
|
+
source TEXT,
|
|
117
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
118
|
+
metadata_json TEXT DEFAULT '{}',
|
|
119
|
+
content TEXT NOT NULL,
|
|
120
|
+
schema_version TEXT NOT NULL
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
CREATE TABLE IF NOT EXISTS lessons (
|
|
124
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
125
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
126
|
+
source TEXT,
|
|
127
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
128
|
+
metadata_json TEXT DEFAULT '{}',
|
|
129
|
+
content TEXT NOT NULL,
|
|
130
|
+
schema_version TEXT NOT NULL
|
|
131
|
+
);
|
|
132
|
+
|
|
133
|
+
CREATE TABLE IF NOT EXISTS reflections (
|
|
134
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
135
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
136
|
+
source TEXT,
|
|
137
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
138
|
+
metadata_json TEXT DEFAULT '{}',
|
|
139
|
+
content TEXT NOT NULL,
|
|
140
|
+
schema_version TEXT NOT NULL
|
|
141
|
+
);
|
|
142
|
+
|
|
143
|
+
CREATE TABLE IF NOT EXISTS tasks (
|
|
144
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
145
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
146
|
+
source TEXT,
|
|
147
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
148
|
+
metadata_json TEXT DEFAULT '{}',
|
|
149
|
+
content TEXT NOT NULL,
|
|
150
|
+
schema_version TEXT NOT NULL
|
|
151
|
+
);
|
|
152
|
+
|
|
153
|
+
CREATE TABLE IF NOT EXISTS memory_index (
|
|
154
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
155
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
156
|
+
source TEXT,
|
|
157
|
+
confidence REAL NOT NULL DEFAULT 1.0,
|
|
158
|
+
metadata_json TEXT DEFAULT '{}',
|
|
159
|
+
content TEXT NOT NULL,
|
|
160
|
+
schema_version TEXT NOT NULL
|
|
161
|
+
);
|
|
162
|
+
|
|
163
|
+
CREATE TABLE IF NOT EXISTS vector_embeddings (
|
|
164
|
+
id TEXT PRIMARY KEY,
|
|
165
|
+
source_type TEXT NOT NULL,
|
|
166
|
+
source_id TEXT NOT NULL,
|
|
167
|
+
embedding TEXT NOT NULL,
|
|
168
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
169
|
+
);
|
|
170
|
+
|
|
171
|
+
CREATE TABLE IF NOT EXISTS artifacts (
|
|
172
|
+
artifact_id TEXT PRIMARY KEY,
|
|
173
|
+
artifact_type TEXT,
|
|
174
|
+
source_path TEXT,
|
|
175
|
+
content_hash TEXT,
|
|
176
|
+
metadata TEXT,
|
|
177
|
+
created_at TIMESTAMP DEFAULT (datetime('now'))
|
|
178
|
+
);
|
|
179
|
+
|
|
180
|
+
CREATE TABLE IF NOT EXISTS conversation_turns (
|
|
181
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
182
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
183
|
+
conversation_id TEXT,
|
|
184
|
+
session_id TEXT,
|
|
185
|
+
thread_id TEXT,
|
|
186
|
+
message_id TEXT,
|
|
187
|
+
role TEXT NOT NULL,
|
|
188
|
+
content TEXT NOT NULL,
|
|
189
|
+
transcript_path TEXT,
|
|
190
|
+
transcript_offset INTEGER,
|
|
191
|
+
transcript_end_offset INTEGER,
|
|
192
|
+
source TEXT,
|
|
193
|
+
metadata_json TEXT DEFAULT '{}',
|
|
194
|
+
schema_version TEXT NOT NULL
|
|
195
|
+
);
|
|
196
|
+
|
|
197
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_turns_conversation ON conversation_turns(conversation_id, id DESC);
|
|
198
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_turns_session ON conversation_turns(session_id, id DESC);
|
|
199
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_turns_thread ON conversation_turns(thread_id, id DESC);
|
|
200
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_turns_message ON conversation_turns(message_id);
|
|
201
|
+
|
|
202
|
+
CREATE TABLE IF NOT EXISTS conversation_checkpoints (
|
|
203
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
204
|
+
timestamp TEXT NOT NULL DEFAULT (datetime('now')),
|
|
205
|
+
conversation_id TEXT,
|
|
206
|
+
session_id TEXT,
|
|
207
|
+
thread_id TEXT,
|
|
208
|
+
turn_start_id INTEGER,
|
|
209
|
+
turn_end_id INTEGER,
|
|
210
|
+
checkpoint_kind TEXT NOT NULL DEFAULT 'manual',
|
|
211
|
+
summary TEXT NOT NULL,
|
|
212
|
+
latest_user_ask TEXT,
|
|
213
|
+
last_assistant_commitment TEXT,
|
|
214
|
+
open_loops_json TEXT DEFAULT '[]',
|
|
215
|
+
pending_actions_json TEXT DEFAULT '[]',
|
|
216
|
+
parent_checkpoint_id INTEGER,
|
|
217
|
+
root_checkpoint_id INTEGER,
|
|
218
|
+
depth INTEGER NOT NULL DEFAULT 0,
|
|
219
|
+
metadata_json TEXT DEFAULT '{}',
|
|
220
|
+
schema_version TEXT NOT NULL
|
|
221
|
+
);
|
|
222
|
+
|
|
223
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_checkpoints_conversation ON conversation_checkpoints(conversation_id, id DESC);
|
|
224
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_checkpoints_session ON conversation_checkpoints(session_id, id DESC);
|
|
225
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_checkpoints_thread ON conversation_checkpoints(thread_id, id DESC);
|
|
226
|
+
|
|
227
|
+
CREATE TABLE IF NOT EXISTS conversation_state (
|
|
228
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
229
|
+
scope_type TEXT NOT NULL,
|
|
230
|
+
scope_id TEXT NOT NULL,
|
|
231
|
+
conversation_id TEXT,
|
|
232
|
+
session_id TEXT,
|
|
233
|
+
thread_id TEXT,
|
|
234
|
+
latest_user_turn_id INTEGER,
|
|
235
|
+
latest_assistant_turn_id INTEGER,
|
|
236
|
+
latest_user_ask TEXT,
|
|
237
|
+
last_assistant_commitment TEXT,
|
|
238
|
+
open_loops_json TEXT DEFAULT '[]',
|
|
239
|
+
pending_actions_json TEXT DEFAULT '[]',
|
|
240
|
+
unresolved_state_json TEXT DEFAULT '[]',
|
|
241
|
+
latest_checkpoint_id INTEGER,
|
|
242
|
+
metadata_json TEXT DEFAULT '{}',
|
|
243
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
244
|
+
schema_version TEXT NOT NULL,
|
|
245
|
+
UNIQUE(scope_type, scope_id)
|
|
246
|
+
);
|
|
247
|
+
|
|
248
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_state_conversation ON conversation_state(conversation_id);
|
|
249
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_state_session ON conversation_state(session_id);
|
|
250
|
+
CREATE INDEX IF NOT EXISTS idx_conversation_state_thread ON conversation_state(thread_id);
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
_WRITE_QUEUE: "queue.Queue[tuple]" = queue.Queue()
|
|
254
|
+
_WRITE_LOCK = threading.Lock()
|
|
255
|
+
_WRITE_WORKER_STARTED = False
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def is_lock_error(exc: BaseException) -> bool:
|
|
259
|
+
message = str(exc).lower()
|
|
260
|
+
return isinstance(exc, sqlite3.OperationalError) and (
|
|
261
|
+
"database is locked" in message
|
|
262
|
+
or "database table is locked" in message
|
|
263
|
+
or "busy" in message
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _write_worker() -> None:
|
|
268
|
+
while True:
|
|
269
|
+
fn, event, container = _WRITE_QUEUE.get()
|
|
270
|
+
try:
|
|
271
|
+
container["result"] = fn()
|
|
272
|
+
except Exception as exc: # pragma: no cover
|
|
273
|
+
container["error"] = exc
|
|
274
|
+
finally:
|
|
275
|
+
event.set()
|
|
276
|
+
_WRITE_QUEUE.task_done()
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def _ensure_write_worker() -> None:
|
|
280
|
+
global _WRITE_WORKER_STARTED
|
|
281
|
+
with _WRITE_LOCK:
|
|
282
|
+
if _WRITE_WORKER_STARTED:
|
|
283
|
+
return
|
|
284
|
+
thread = threading.Thread(target=_write_worker, daemon=True)
|
|
285
|
+
thread.start()
|
|
286
|
+
_WRITE_WORKER_STARTED = True
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
def submit_write(fn, timeout: float = 30.0):
|
|
290
|
+
_ensure_write_worker()
|
|
291
|
+
event = threading.Event()
|
|
292
|
+
container: dict = {}
|
|
293
|
+
_WRITE_QUEUE.put((fn, event, container))
|
|
294
|
+
if not event.wait(timeout=timeout):
|
|
295
|
+
raise TimeoutError("write queue timeout")
|
|
296
|
+
if "error" in container:
|
|
297
|
+
raise container["error"]
|
|
298
|
+
return container.get("result")
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def db_path() -> Path:
|
|
302
|
+
return state_store.memory_db_path()
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
_SCHEMA_READY = False
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def connect(*, ensure_schema: bool = True) -> sqlite3.Connection:
|
|
309
|
+
global _SCHEMA_READY
|
|
310
|
+
if ensure_schema and not _SCHEMA_READY:
|
|
311
|
+
init_db()
|
|
312
|
+
_SCHEMA_READY = True
|
|
313
|
+
path = db_path()
|
|
314
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
315
|
+
conn = sqlite3.connect(str(path), timeout=30)
|
|
316
|
+
conn.row_factory = sqlite3.Row
|
|
317
|
+
|
|
318
|
+
conn.execute("PRAGMA journal_mode=WAL")
|
|
319
|
+
conn.execute("PRAGMA busy_timeout=15000")
|
|
320
|
+
conn.execute("PRAGMA synchronous=NORMAL")
|
|
321
|
+
conn.execute("PRAGMA temp_store=MEMORY")
|
|
322
|
+
conn.execute("PRAGMA cache_size=-20000")
|
|
323
|
+
return conn
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
def _ensure_column(conn: sqlite3.Connection, table: str, column: str, coltype: str) -> None:
|
|
327
|
+
cols = [row[1] for row in conn.execute(f"PRAGMA table_info({table})").fetchall()]
|
|
328
|
+
if column not in cols:
|
|
329
|
+
conn.execute(f"ALTER TABLE {table} ADD COLUMN {column} {coltype}")
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def init_db() -> None:
|
|
333
|
+
conn = connect(ensure_schema=False)
|
|
334
|
+
conn.executescript(SCHEMA_SQL)
|
|
335
|
+
_ensure_column(conn, "experiences", "experience_type", "TEXT")
|
|
336
|
+
_ensure_column(conn, "experiences", "source_module", "TEXT")
|
|
337
|
+
_ensure_column(conn, "experiences", "metadata_json", "TEXT DEFAULT '{}' ")
|
|
338
|
+
_ensure_column(conn, "candidates", "status", "TEXT NOT NULL DEFAULT 'pending'")
|
|
339
|
+
_ensure_column(conn, "candidates", "verification_status", "TEXT NOT NULL DEFAULT 'unverified'")
|
|
340
|
+
_ensure_column(conn, "candidates", "created_at", "TEXT")
|
|
341
|
+
_ensure_column(conn, "candidates", "updated_at", "TEXT")
|
|
342
|
+
conn.execute("UPDATE candidates SET created_at=datetime('now') WHERE created_at IS NULL")
|
|
343
|
+
conn.execute("UPDATE candidates SET updated_at=datetime('now') WHERE updated_at IS NULL")
|
|
344
|
+
_ensure_column(conn, "promotions", "candidate_id", "TEXT")
|
|
345
|
+
_ensure_column(conn, "promotions", "status", "TEXT NOT NULL DEFAULT 'promoted'")
|
|
346
|
+
_ensure_column(conn, "promotions", "decision_reason", "TEXT")
|
|
347
|
+
_ensure_column(conn, "conversation_turns", "conversation_id", "TEXT")
|
|
348
|
+
_ensure_column(conn, "conversation_turns", "session_id", "TEXT")
|
|
349
|
+
_ensure_column(conn, "conversation_turns", "thread_id", "TEXT")
|
|
350
|
+
_ensure_column(conn, "conversation_turns", "message_id", "TEXT")
|
|
351
|
+
_ensure_column(conn, "conversation_turns", "role", "TEXT NOT NULL DEFAULT 'unknown'")
|
|
352
|
+
_ensure_column(conn, "conversation_turns", "content", "TEXT")
|
|
353
|
+
_ensure_column(conn, "conversation_turns", "transcript_path", "TEXT")
|
|
354
|
+
_ensure_column(conn, "conversation_turns", "transcript_offset", "INTEGER")
|
|
355
|
+
_ensure_column(conn, "conversation_turns", "transcript_end_offset", "INTEGER")
|
|
356
|
+
_ensure_column(conn, "conversation_turns", "source", "TEXT")
|
|
357
|
+
_ensure_column(conn, "conversation_turns", "metadata_json", "TEXT DEFAULT '{}'")
|
|
358
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_turns_conversation ON conversation_turns(conversation_id, id DESC)")
|
|
359
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_turns_session ON conversation_turns(session_id, id DESC)")
|
|
360
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_turns_thread ON conversation_turns(thread_id, id DESC)")
|
|
361
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_turns_message ON conversation_turns(message_id)")
|
|
362
|
+
_ensure_column(conn, "conversation_checkpoints", "conversation_id", "TEXT")
|
|
363
|
+
_ensure_column(conn, "conversation_checkpoints", "session_id", "TEXT")
|
|
364
|
+
_ensure_column(conn, "conversation_checkpoints", "thread_id", "TEXT")
|
|
365
|
+
_ensure_column(conn, "conversation_checkpoints", "turn_start_id", "INTEGER")
|
|
366
|
+
_ensure_column(conn, "conversation_checkpoints", "turn_end_id", "INTEGER")
|
|
367
|
+
_ensure_column(conn, "conversation_checkpoints", "checkpoint_kind", "TEXT NOT NULL DEFAULT 'manual'")
|
|
368
|
+
_ensure_column(conn, "conversation_checkpoints", "summary", "TEXT NOT NULL DEFAULT ''")
|
|
369
|
+
_ensure_column(conn, "conversation_checkpoints", "latest_user_ask", "TEXT")
|
|
370
|
+
_ensure_column(conn, "conversation_checkpoints", "last_assistant_commitment", "TEXT")
|
|
371
|
+
_ensure_column(conn, "conversation_checkpoints", "open_loops_json", "TEXT DEFAULT '[]'")
|
|
372
|
+
_ensure_column(conn, "conversation_checkpoints", "pending_actions_json", "TEXT DEFAULT '[]'")
|
|
373
|
+
_ensure_column(conn, "conversation_checkpoints", "parent_checkpoint_id", "INTEGER")
|
|
374
|
+
_ensure_column(conn, "conversation_checkpoints", "root_checkpoint_id", "INTEGER")
|
|
375
|
+
_ensure_column(conn, "conversation_checkpoints", "depth", "INTEGER NOT NULL DEFAULT 0")
|
|
376
|
+
_ensure_column(conn, "conversation_checkpoints", "metadata_json", "TEXT DEFAULT '{}'")
|
|
377
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_checkpoints_conversation ON conversation_checkpoints(conversation_id, id DESC)")
|
|
378
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_checkpoints_session ON conversation_checkpoints(session_id, id DESC)")
|
|
379
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_checkpoints_thread ON conversation_checkpoints(thread_id, id DESC)")
|
|
380
|
+
_ensure_column(conn, "conversation_state", "scope_type", "TEXT")
|
|
381
|
+
_ensure_column(conn, "conversation_state", "scope_id", "TEXT")
|
|
382
|
+
_ensure_column(conn, "conversation_state", "conversation_id", "TEXT")
|
|
383
|
+
_ensure_column(conn, "conversation_state", "session_id", "TEXT")
|
|
384
|
+
_ensure_column(conn, "conversation_state", "thread_id", "TEXT")
|
|
385
|
+
_ensure_column(conn, "conversation_state", "latest_user_turn_id", "INTEGER")
|
|
386
|
+
_ensure_column(conn, "conversation_state", "latest_assistant_turn_id", "INTEGER")
|
|
387
|
+
_ensure_column(conn, "conversation_state", "latest_user_ask", "TEXT")
|
|
388
|
+
_ensure_column(conn, "conversation_state", "last_assistant_commitment", "TEXT")
|
|
389
|
+
_ensure_column(conn, "conversation_state", "open_loops_json", "TEXT DEFAULT '[]'")
|
|
390
|
+
_ensure_column(conn, "conversation_state", "pending_actions_json", "TEXT DEFAULT '[]'")
|
|
391
|
+
_ensure_column(conn, "conversation_state", "unresolved_state_json", "TEXT DEFAULT '[]'")
|
|
392
|
+
_ensure_column(conn, "conversation_state", "latest_checkpoint_id", "INTEGER")
|
|
393
|
+
_ensure_column(conn, "conversation_state", "metadata_json", "TEXT DEFAULT '{}'")
|
|
394
|
+
_ensure_column(conn, "conversation_state", "updated_at", "TEXT")
|
|
395
|
+
conn.execute("CREATE UNIQUE INDEX IF NOT EXISTS idx_conversation_state_scope ON conversation_state(scope_type, scope_id)")
|
|
396
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_state_conversation ON conversation_state(conversation_id)")
|
|
397
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_state_session ON conversation_state(session_id)")
|
|
398
|
+
conn.execute("CREATE INDEX IF NOT EXISTS idx_conversation_state_thread ON conversation_state(thread_id)")
|
|
399
|
+
conn.commit()
|
|
400
|
+
conn.close()
|