tribalmemory 0.1.1__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tribalmemory/cli.py +147 -4
- tribalmemory/interfaces.py +44 -0
- tribalmemory/mcp/server.py +160 -14
- tribalmemory/server/app.py +53 -2
- tribalmemory/server/config.py +41 -0
- tribalmemory/server/models.py +65 -0
- tribalmemory/server/routes.py +68 -0
- tribalmemory/services/fts_store.py +255 -0
- tribalmemory/services/memory.py +193 -33
- tribalmemory/services/reranker.py +267 -0
- tribalmemory/services/session_store.py +412 -0
- tribalmemory/services/vector_store.py +86 -1
- {tribalmemory-0.1.1.dist-info → tribalmemory-0.2.0.dist-info}/METADATA +1 -1
- {tribalmemory-0.1.1.dist-info → tribalmemory-0.2.0.dist-info}/RECORD +18 -15
- {tribalmemory-0.1.1.dist-info → tribalmemory-0.2.0.dist-info}/WHEEL +0 -0
- {tribalmemory-0.1.1.dist-info → tribalmemory-0.2.0.dist-info}/entry_points.txt +0 -0
- {tribalmemory-0.1.1.dist-info → tribalmemory-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {tribalmemory-0.1.1.dist-info → tribalmemory-0.2.0.dist-info}/top_level.txt +0 -0
tribalmemory/server/models.py
CHANGED
|
@@ -204,3 +204,68 @@ class ImportResponse(BaseModel):
|
|
|
204
204
|
duration_ms: float = 0.0
|
|
205
205
|
error_details: list[str] = Field(default_factory=list)
|
|
206
206
|
error: Optional[str] = None
|
|
207
|
+
|
|
208
|
+
# =============================================================================
|
|
209
|
+
# Session Indexing Models (Issue #38)
|
|
210
|
+
# =============================================================================
|
|
211
|
+
|
|
212
|
+
class SessionMessageRequest(BaseModel):
|
|
213
|
+
"""A single message in a session transcript."""
|
|
214
|
+
role: str = Field(..., description="Message role (user, assistant, system)")
|
|
215
|
+
content: str = Field(..., description="Message content")
|
|
216
|
+
timestamp: datetime = Field(..., description="When the message was sent")
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
class SessionIngestRequest(BaseModel):
|
|
220
|
+
"""Request to ingest session transcript."""
|
|
221
|
+
session_id: str = Field(..., description="Unique session identifier")
|
|
222
|
+
messages: list[SessionMessageRequest] = Field(
|
|
223
|
+
..., description="Conversation messages to index"
|
|
224
|
+
)
|
|
225
|
+
instance_id: Optional[str] = Field(
|
|
226
|
+
default=None,
|
|
227
|
+
description="Override instance ID (defaults to server's instance_id)"
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class SessionIngestResponse(BaseModel):
|
|
232
|
+
"""Response from session ingestion."""
|
|
233
|
+
success: bool
|
|
234
|
+
chunks_created: int = 0
|
|
235
|
+
messages_processed: int = 0
|
|
236
|
+
error: Optional[str] = None
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
class SessionSearchRequest(BaseModel):
|
|
240
|
+
"""Request to search session transcripts."""
|
|
241
|
+
query: str = Field(..., description="Natural language search query")
|
|
242
|
+
session_id: Optional[str] = Field(
|
|
243
|
+
default=None,
|
|
244
|
+
description="Filter to specific session (optional)"
|
|
245
|
+
)
|
|
246
|
+
limit: int = Field(default=5, ge=1, le=50, description="Maximum results")
|
|
247
|
+
min_relevance: float = Field(
|
|
248
|
+
default=0.0,
|
|
249
|
+
ge=0.0,
|
|
250
|
+
le=1.0,
|
|
251
|
+
description="Minimum similarity score"
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
class SessionChunkResponse(BaseModel):
|
|
256
|
+
"""A session transcript chunk result."""
|
|
257
|
+
chunk_id: str
|
|
258
|
+
session_id: str
|
|
259
|
+
instance_id: str
|
|
260
|
+
content: str
|
|
261
|
+
similarity_score: float
|
|
262
|
+
start_time: datetime
|
|
263
|
+
end_time: datetime
|
|
264
|
+
chunk_index: int
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
class SessionSearchResponse(BaseModel):
|
|
268
|
+
"""Response from session search."""
|
|
269
|
+
results: list[SessionChunkResponse]
|
|
270
|
+
query: str
|
|
271
|
+
error: Optional[str] = None
|
tribalmemory/server/routes.py
CHANGED
|
@@ -7,6 +7,7 @@ from fastapi import APIRouter, HTTPException, Depends
|
|
|
7
7
|
|
|
8
8
|
from ..interfaces import MemorySource, MemoryEntry
|
|
9
9
|
from ..services import TribalMemoryService
|
|
10
|
+
from ..services.session_store import SessionStore, SessionMessage
|
|
10
11
|
from .models import (
|
|
11
12
|
RememberRequest,
|
|
12
13
|
RecallRequest,
|
|
@@ -24,6 +25,11 @@ from .models import (
|
|
|
24
25
|
ExportResponse,
|
|
25
26
|
ImportRequest,
|
|
26
27
|
ImportResponse,
|
|
28
|
+
SessionIngestRequest,
|
|
29
|
+
SessionIngestResponse,
|
|
30
|
+
SessionSearchRequest,
|
|
31
|
+
SessionSearchResponse,
|
|
32
|
+
SessionChunkResponse,
|
|
27
33
|
)
|
|
28
34
|
|
|
29
35
|
router = APIRouter(prefix="/v1", tags=["memory"])
|
|
@@ -40,6 +46,17 @@ def get_memory_service() -> TribalMemoryService:
|
|
|
40
46
|
return _memory_service
|
|
41
47
|
|
|
42
48
|
|
|
49
|
+
def get_session_store() -> SessionStore:
|
|
50
|
+
"""Dependency injection for session store.
|
|
51
|
+
|
|
52
|
+
This is set by the app during startup.
|
|
53
|
+
"""
|
|
54
|
+
from .app import _session_store
|
|
55
|
+
if _session_store is None:
|
|
56
|
+
raise HTTPException(status_code=503, detail="Session store not initialized")
|
|
57
|
+
return _session_store
|
|
58
|
+
|
|
59
|
+
|
|
43
60
|
def get_instance_id() -> str:
|
|
44
61
|
"""Get the current instance ID."""
|
|
45
62
|
from .app import _instance_id
|
|
@@ -376,3 +393,54 @@ async def shutdown() -> ShutdownResponse:
|
|
|
376
393
|
0.5, lambda: os.kill(os.getpid(), signal.SIGTERM)
|
|
377
394
|
)
|
|
378
395
|
return ShutdownResponse(status="shutting_down")
|
|
396
|
+
|
|
397
|
+
# =============================================================================
|
|
398
|
+
# Session Indexing Routes (Issue #38)
|
|
399
|
+
# =============================================================================
|
|
400
|
+
|
|
401
|
+
@router.post("/sessions/ingest", response_model=SessionIngestResponse)
|
|
402
|
+
async def ingest_session(
|
|
403
|
+
request: SessionIngestRequest,
|
|
404
|
+
store: SessionStore = Depends(get_session_store),
|
|
405
|
+
instance_id: str = Depends(get_instance_id),
|
|
406
|
+
) -> SessionIngestResponse:
|
|
407
|
+
"""Ingest a session transcript for indexing."""
|
|
408
|
+
try:
|
|
409
|
+
messages = [
|
|
410
|
+
SessionMessage(role=msg.role, content=msg.content, timestamp=msg.timestamp)
|
|
411
|
+
for msg in request.messages
|
|
412
|
+
]
|
|
413
|
+
|
|
414
|
+
result = await store.ingest(
|
|
415
|
+
session_id=request.session_id,
|
|
416
|
+
messages=messages,
|
|
417
|
+
instance_id=request.instance_id or instance_id,
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
return SessionIngestResponse(
|
|
421
|
+
success=result.get("success", False),
|
|
422
|
+
chunks_created=result.get("chunks_created", 0),
|
|
423
|
+
messages_processed=result.get("messages_processed", 0),
|
|
424
|
+
error=result.get("error"),
|
|
425
|
+
)
|
|
426
|
+
except Exception as e:
|
|
427
|
+
return SessionIngestResponse(success=False, error=str(e))
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
@router.get("/sessions/search", response_model=SessionSearchResponse)
|
|
431
|
+
async def search_sessions(
|
|
432
|
+
query: str,
|
|
433
|
+
session_id: Optional[str] = None,
|
|
434
|
+
limit: int = 5,
|
|
435
|
+
min_relevance: float = 0.0,
|
|
436
|
+
store: SessionStore = Depends(get_session_store),
|
|
437
|
+
) -> SessionSearchResponse:
|
|
438
|
+
"""Search session transcripts by semantic similarity."""
|
|
439
|
+
try:
|
|
440
|
+
results = await store.search(query, session_id, limit, min_relevance)
|
|
441
|
+
return SessionSearchResponse(
|
|
442
|
+
results=[SessionChunkResponse(**r) for r in results],
|
|
443
|
+
query=query,
|
|
444
|
+
)
|
|
445
|
+
except Exception as e:
|
|
446
|
+
return SessionSearchResponse(results=[], query=query, error=str(e))
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
"""SQLite FTS5 full-text search store for BM25 hybrid search.
|
|
2
|
+
|
|
3
|
+
Provides keyword-based BM25 search alongside LanceDB vector search.
|
|
4
|
+
FTS5 excels at exact-token queries (error strings, config names, IDs)
|
|
5
|
+
while vector search handles semantic/fuzzy queries.
|
|
6
|
+
|
|
7
|
+
The two are combined via hybrid scoring:
|
|
8
|
+
finalScore = vectorWeight * vectorScore + textWeight * bm25Score
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
import sqlite3
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class FTSStore:
|
|
20
|
+
"""SQLite FTS5 store for keyword search over memories.
|
|
21
|
+
|
|
22
|
+
Creates a FTS5 virtual table alongside the main vector store.
|
|
23
|
+
Supports index, search, delete, and update operations.
|
|
24
|
+
|
|
25
|
+
Note: All methods are synchronous. SQLite operations are typically
|
|
26
|
+
sub-millisecond for the document counts we handle (<100k). If latency
|
|
27
|
+
becomes an issue on slow storage, wrap calls in asyncio.to_thread().
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, db_path: str):
|
|
31
|
+
"""Initialize FTS store.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
db_path: Path to the SQLite database file. Created if missing.
|
|
35
|
+
"""
|
|
36
|
+
self.db_path = db_path
|
|
37
|
+
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
|
38
|
+
self._conn: Optional[sqlite3.Connection] = None
|
|
39
|
+
self._fts_available: Optional[bool] = None
|
|
40
|
+
self._ensure_initialized()
|
|
41
|
+
|
|
42
|
+
def _get_conn(self) -> sqlite3.Connection:
|
|
43
|
+
if self._conn is None:
|
|
44
|
+
self._conn = sqlite3.connect(self.db_path)
|
|
45
|
+
self._conn.row_factory = sqlite3.Row
|
|
46
|
+
return self._conn
|
|
47
|
+
|
|
48
|
+
def _ensure_initialized(self) -> None:
|
|
49
|
+
"""Create FTS5 virtual table if it doesn't exist."""
|
|
50
|
+
conn = self._get_conn()
|
|
51
|
+
if not self.is_available():
|
|
52
|
+
logger.warning("FTS5 not available in this SQLite build")
|
|
53
|
+
return
|
|
54
|
+
conn.execute("""
|
|
55
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts
|
|
56
|
+
USING fts5(id, content, tags, tokenize='porter')
|
|
57
|
+
""")
|
|
58
|
+
# Mapping table to track which IDs are indexed (for upsert/delete)
|
|
59
|
+
conn.execute("""
|
|
60
|
+
CREATE TABLE IF NOT EXISTS fts_ids (
|
|
61
|
+
id TEXT PRIMARY KEY
|
|
62
|
+
)
|
|
63
|
+
""")
|
|
64
|
+
conn.commit()
|
|
65
|
+
|
|
66
|
+
def is_available(self) -> bool:
|
|
67
|
+
"""Check if FTS5 is available in the current SQLite build."""
|
|
68
|
+
if self._fts_available is not None:
|
|
69
|
+
return self._fts_available
|
|
70
|
+
try:
|
|
71
|
+
conn = self._get_conn()
|
|
72
|
+
conn.execute(
|
|
73
|
+
"CREATE VIRTUAL TABLE IF NOT EXISTS _fts5_test "
|
|
74
|
+
"USING fts5(test_col)"
|
|
75
|
+
)
|
|
76
|
+
conn.execute("DROP TABLE IF EXISTS _fts5_test")
|
|
77
|
+
conn.commit()
|
|
78
|
+
self._fts_available = True
|
|
79
|
+
except sqlite3.OperationalError:
|
|
80
|
+
self._fts_available = False
|
|
81
|
+
return self._fts_available
|
|
82
|
+
|
|
83
|
+
def index(self, memory_id: str, content: str, tags: list[str]) -> None:
|
|
84
|
+
"""Index a memory for full-text search.
|
|
85
|
+
|
|
86
|
+
If the memory_id already exists, it is replaced (upsert).
|
|
87
|
+
"""
|
|
88
|
+
if not self.is_available():
|
|
89
|
+
return
|
|
90
|
+
conn = self._get_conn()
|
|
91
|
+
tags_text = " ".join(tags)
|
|
92
|
+
|
|
93
|
+
# Check if exists — delete first for upsert
|
|
94
|
+
existing = conn.execute(
|
|
95
|
+
"SELECT id FROM fts_ids WHERE id = ?", (memory_id,)
|
|
96
|
+
).fetchone()
|
|
97
|
+
if existing:
|
|
98
|
+
conn.execute(
|
|
99
|
+
"DELETE FROM memories_fts WHERE id = ?", (memory_id,)
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
conn.execute(
|
|
103
|
+
"INSERT INTO memories_fts (id, content, tags) VALUES (?, ?, ?)",
|
|
104
|
+
(memory_id, content, tags_text),
|
|
105
|
+
)
|
|
106
|
+
conn.execute(
|
|
107
|
+
"INSERT OR REPLACE INTO fts_ids (id) VALUES (?)",
|
|
108
|
+
(memory_id,),
|
|
109
|
+
)
|
|
110
|
+
conn.commit()
|
|
111
|
+
|
|
112
|
+
def search(
|
|
113
|
+
self, query: str, limit: int = 10
|
|
114
|
+
) -> list[dict]:
|
|
115
|
+
"""Search memories using BM25.
|
|
116
|
+
|
|
117
|
+
Returns list of dicts with 'id' and 'rank' keys.
|
|
118
|
+
BM25 rank is negative; more negative = better match.
|
|
119
|
+
"""
|
|
120
|
+
if not self.is_available():
|
|
121
|
+
return []
|
|
122
|
+
conn = self._get_conn()
|
|
123
|
+
# Use bm25() for ranking. FTS5 bm25() returns negative values
|
|
124
|
+
# where more negative = better match.
|
|
125
|
+
try:
|
|
126
|
+
rows = conn.execute(
|
|
127
|
+
"""
|
|
128
|
+
SELECT id, rank
|
|
129
|
+
FROM memories_fts
|
|
130
|
+
WHERE memories_fts MATCH ?
|
|
131
|
+
ORDER BY rank
|
|
132
|
+
LIMIT ?
|
|
133
|
+
""",
|
|
134
|
+
(query, limit),
|
|
135
|
+
).fetchall()
|
|
136
|
+
return [{"id": row["id"], "rank": row["rank"]} for row in rows]
|
|
137
|
+
except sqlite3.OperationalError as e:
|
|
138
|
+
# Malformed FTS query (unbalanced quotes, etc.)
|
|
139
|
+
logger.warning(f"FTS5 search error: {e}")
|
|
140
|
+
return []
|
|
141
|
+
|
|
142
|
+
def delete(self, memory_id: str) -> None:
|
|
143
|
+
"""Remove a memory from the FTS index."""
|
|
144
|
+
if not self.is_available():
|
|
145
|
+
return
|
|
146
|
+
conn = self._get_conn()
|
|
147
|
+
conn.execute(
|
|
148
|
+
"DELETE FROM memories_fts WHERE id = ?", (memory_id,)
|
|
149
|
+
)
|
|
150
|
+
conn.execute("DELETE FROM fts_ids WHERE id = ?", (memory_id,))
|
|
151
|
+
conn.commit()
|
|
152
|
+
|
|
153
|
+
def count(self) -> int:
|
|
154
|
+
"""Return number of indexed documents."""
|
|
155
|
+
if not self.is_available():
|
|
156
|
+
return 0
|
|
157
|
+
conn = self._get_conn()
|
|
158
|
+
row = conn.execute("SELECT COUNT(*) FROM fts_ids").fetchone()
|
|
159
|
+
return row[0]
|
|
160
|
+
|
|
161
|
+
def close(self) -> None:
|
|
162
|
+
"""Close the database connection."""
|
|
163
|
+
if self._conn:
|
|
164
|
+
self._conn.close()
|
|
165
|
+
self._conn = None
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def bm25_rank_to_score(rank: float) -> float:
|
|
169
|
+
"""Convert BM25 rank to a 0..1 score.
|
|
170
|
+
|
|
171
|
+
FTS5 bm25() returns negative values where more negative = better.
|
|
172
|
+
We use: score = 1 / (1 + abs(rank))
|
|
173
|
+
"""
|
|
174
|
+
return 1.0 / (1.0 + abs(rank))
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def hybrid_merge(
|
|
178
|
+
vector_results: list[dict],
|
|
179
|
+
bm25_results: list[dict],
|
|
180
|
+
vector_weight: float = 0.7,
|
|
181
|
+
text_weight: float = 0.3,
|
|
182
|
+
) -> list[dict]:
|
|
183
|
+
"""Merge vector similarity and BM25 results with weighted scoring.
|
|
184
|
+
|
|
185
|
+
BM25 ranks are min-max normalized to 0..1 so they're comparable
|
|
186
|
+
with vector similarity scores (also 0..1). The best BM25 hit gets
|
|
187
|
+
score 1.0, the worst gets a proportional score.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
vector_results: List of {"id": str, "score": float} (0..1 cosine sim)
|
|
191
|
+
bm25_results: List of {"id": str, "rank": float} (negative BM25 rank)
|
|
192
|
+
vector_weight: Weight for vector similarity score
|
|
193
|
+
text_weight: Weight for BM25 text score
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
Merged list sorted by final_score descending.
|
|
197
|
+
Each dict has: id, vector_score, text_score, final_score.
|
|
198
|
+
"""
|
|
199
|
+
# Normalize weights
|
|
200
|
+
total = vector_weight + text_weight
|
|
201
|
+
if total > 0:
|
|
202
|
+
vector_weight /= total
|
|
203
|
+
text_weight /= total
|
|
204
|
+
|
|
205
|
+
# Min-max normalize BM25 ranks to 0..1
|
|
206
|
+
# BM25 ranks are negative; more negative = better match.
|
|
207
|
+
# When empty, skip normalization entirely — no BM25 contribution.
|
|
208
|
+
bm25_normalized: dict[str, float] = {}
|
|
209
|
+
if bm25_results:
|
|
210
|
+
abs_ranks = [abs(br["rank"]) for br in bm25_results]
|
|
211
|
+
max_rank = max(abs_ranks)
|
|
212
|
+
min_rank = min(abs_ranks)
|
|
213
|
+
rank_range = max_rank - min_rank
|
|
214
|
+
|
|
215
|
+
for br in bm25_results:
|
|
216
|
+
if rank_range > 0:
|
|
217
|
+
# Normalize: best rank (highest abs) → 1.0, worst → ~0
|
|
218
|
+
score = (abs(br["rank"]) - min_rank) / rank_range
|
|
219
|
+
else:
|
|
220
|
+
# All same rank → all get 1.0
|
|
221
|
+
score = 1.0
|
|
222
|
+
bm25_normalized[br["id"]] = score
|
|
223
|
+
|
|
224
|
+
# Build candidate map
|
|
225
|
+
candidates: dict[str, dict] = {}
|
|
226
|
+
|
|
227
|
+
for vr in vector_results:
|
|
228
|
+
mid = vr["id"]
|
|
229
|
+
candidates[mid] = {
|
|
230
|
+
"id": mid,
|
|
231
|
+
"vector_score": vr["score"],
|
|
232
|
+
"text_score": 0.0,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
for mid, text_score in bm25_normalized.items():
|
|
236
|
+
if mid in candidates:
|
|
237
|
+
candidates[mid]["text_score"] = text_score
|
|
238
|
+
else:
|
|
239
|
+
candidates[mid] = {
|
|
240
|
+
"id": mid,
|
|
241
|
+
"vector_score": 0.0,
|
|
242
|
+
"text_score": text_score,
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
# Compute final scores
|
|
246
|
+
for c in candidates.values():
|
|
247
|
+
c["final_score"] = (
|
|
248
|
+
vector_weight * c["vector_score"]
|
|
249
|
+
+ text_weight * c["text_score"]
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
# Sort by final score descending
|
|
253
|
+
return sorted(
|
|
254
|
+
candidates.values(), key=lambda x: x["final_score"], reverse=True
|
|
255
|
+
)
|