tribalmemory 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,412 @@
1
+ """Session transcript indexing service.
2
+
3
+ Indexes conversation transcripts as chunked embeddings for contextual recall.
4
+ Supports delta-based ingestion and retention-based cleanup.
5
+ """
6
+
7
+ import logging
8
+ import uuid
9
+ from dataclasses import dataclass, field
10
+ from datetime import datetime, timedelta, timezone
11
+ from typing import Optional
12
+
13
+ from ..interfaces import IEmbeddingService, IVectorStore
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ @dataclass
19
+ class SessionMessage:
20
+ """A single message in a conversation transcript.
21
+
22
+ Attributes:
23
+ role: Message role (user, assistant, system)
24
+ content: Message content
25
+ timestamp: When the message was sent
26
+ """
27
+ role: str
28
+ content: str
29
+ timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
30
+
31
+
32
+ @dataclass
33
+ class SessionChunk:
34
+ """A chunk of conversation transcript with embedding.
35
+
36
+ Attributes:
37
+ chunk_id: Unique identifier for this chunk
38
+ session_id: ID of the session this chunk belongs to
39
+ instance_id: Which agent instance processed this session
40
+ content: The actual conversation content (multiple messages)
41
+ embedding: Vector embedding of the content
42
+ start_time: Timestamp of first message in chunk
43
+ end_time: Timestamp of last message in chunk
44
+ chunk_index: Sequential index within session (0, 1, 2...)
45
+ """
46
+ chunk_id: str
47
+ session_id: str
48
+ instance_id: str
49
+ content: str
50
+ embedding: list[float]
51
+ start_time: datetime
52
+ end_time: datetime
53
+ chunk_index: int
54
+
55
+
56
+ class SessionStore:
57
+ """Service for indexing and searching session transcripts.
58
+
59
+ Usage:
60
+ store = SessionStore(
61
+ instance_id="clawdio-1",
62
+ embedding_service=embedding_service,
63
+ vector_store=vector_store,
64
+ )
65
+
66
+ # Ingest a session transcript
67
+ messages = [
68
+ SessionMessage("user", "What is Docker?", datetime.now(timezone.utc)),
69
+ SessionMessage("assistant", "Docker is a container platform", datetime.now(timezone.utc)),
70
+ ]
71
+ await store.ingest("session-123", messages)
72
+
73
+ # Search across all sessions
74
+ results = await store.search("Docker setup error")
75
+
76
+ # Search within specific session
77
+ results = await store.search("Docker", session_id="session-123")
78
+ """
79
+
80
+ # Chunking parameters
81
+ TARGET_CHUNK_TOKENS = 400 # Target size for each chunk
82
+ WORDS_PER_TOKEN = 0.75 # Approximate tokens per word
83
+ OVERLAP_TOKENS = 50 # Overlap between chunks for context
84
+
85
+ def __init__(
86
+ self,
87
+ instance_id: str,
88
+ embedding_service: IEmbeddingService,
89
+ vector_store: IVectorStore,
90
+ ):
91
+ self.instance_id = instance_id
92
+ self.embedding_service = embedding_service
93
+ self.vector_store = vector_store
94
+
95
+ # Track last ingested index per session for delta ingestion
96
+ self._session_state: dict[str, int] = {}
97
+
98
+ async def ingest(
99
+ self,
100
+ session_id: str,
101
+ messages: list[SessionMessage],
102
+ instance_id: Optional[str] = None,
103
+ ) -> dict:
104
+ """Ingest session messages with delta-based processing.
105
+
106
+ Only processes new messages since last ingestion for this session.
107
+
108
+ Args:
109
+ session_id: Unique identifier for the session
110
+ messages: List of conversation messages
111
+ instance_id: Override instance ID (defaults to self.instance_id)
112
+
113
+ Returns:
114
+ Dict with keys: success, chunks_created, messages_processed
115
+ """
116
+ if not messages:
117
+ return {
118
+ "success": True,
119
+ "chunks_created": 0,
120
+ "messages_processed": 0,
121
+ }
122
+
123
+ # Delta ingestion: only process new messages
124
+ last_index = self._session_state.get(session_id, 0)
125
+ new_messages = messages[last_index:]
126
+
127
+ if not new_messages:
128
+ return {
129
+ "success": True,
130
+ "chunks_created": 0,
131
+ "messages_processed": 0,
132
+ }
133
+
134
+ try:
135
+ # Create chunks from new messages
136
+ chunks = await self._chunk_messages(
137
+ new_messages,
138
+ session_id,
139
+ instance_id or self.instance_id,
140
+ )
141
+
142
+ # Store chunks in vector store
143
+ for chunk in chunks:
144
+ await self._store_chunk(chunk)
145
+
146
+ # Update state
147
+ self._session_state[session_id] = len(messages)
148
+
149
+ return {
150
+ "success": True,
151
+ "chunks_created": len(chunks),
152
+ "messages_processed": len(new_messages),
153
+ }
154
+
155
+ except Exception as e:
156
+ logger.exception(f"Failed to ingest session {session_id}: {e}")
157
+ return {
158
+ "success": False,
159
+ "error": str(e),
160
+ }
161
+
162
+ async def search(
163
+ self,
164
+ query: str,
165
+ session_id: Optional[str] = None,
166
+ limit: int = 5,
167
+ min_relevance: float = 0.0,
168
+ ) -> list[dict]:
169
+ """Search session transcripts by semantic similarity.
170
+
171
+ Args:
172
+ query: Natural language search query
173
+ session_id: Optional filter to specific session
174
+ limit: Maximum number of results to return
175
+ min_relevance: Minimum similarity score (0.0 to 1.0)
176
+
177
+ Returns:
178
+ List of dicts with keys: chunk_id, session_id, instance_id,
179
+ content, similarity_score, start_time, end_time, chunk_index
180
+ """
181
+ try:
182
+ # Generate query embedding
183
+ query_embedding = await self.embedding_service.embed(query)
184
+
185
+ # Search chunks
186
+ results = await self._search_chunks(
187
+ query_embedding,
188
+ session_id,
189
+ limit,
190
+ min_relevance,
191
+ )
192
+
193
+ return results
194
+
195
+ except Exception as e:
196
+ logger.exception(f"Failed to search sessions: {e}")
197
+ return []
198
+
199
+ async def cleanup(self, retention_days: int = 30) -> int:
200
+ """Delete session chunks older than retention period.
201
+
202
+ Args:
203
+ retention_days: Number of days to retain chunks
204
+
205
+ Returns:
206
+ Number of chunks deleted
207
+ """
208
+ try:
209
+ cutoff_time = datetime.now(timezone.utc) - timedelta(days=retention_days)
210
+
211
+ # Find and delete expired chunks
212
+ deleted = await self._delete_chunks_before(cutoff_time)
213
+
214
+ return deleted
215
+
216
+ except Exception as e:
217
+ logger.exception(f"Failed to cleanup sessions: {e}")
218
+ return 0
219
+
220
+ async def get_stats(self) -> dict:
221
+ """Get statistics about indexed sessions.
222
+
223
+ Returns:
224
+ Dict with keys: total_chunks, total_sessions,
225
+ earliest_chunk, latest_chunk
226
+ """
227
+ try:
228
+ chunks = await self._get_all_chunks()
229
+
230
+ if not chunks:
231
+ return {
232
+ "total_chunks": 0,
233
+ "total_sessions": 0,
234
+ "earliest_chunk": None,
235
+ "latest_chunk": None,
236
+ }
237
+
238
+ session_ids = set()
239
+ timestamps = []
240
+
241
+ for chunk in chunks:
242
+ session_ids.add(chunk["session_id"])
243
+ timestamps.append(chunk["start_time"])
244
+
245
+ return {
246
+ "total_chunks": len(chunks),
247
+ "total_sessions": len(session_ids),
248
+ "earliest_chunk": min(timestamps) if timestamps else None,
249
+ "latest_chunk": max(timestamps) if timestamps else None,
250
+ }
251
+
252
+ except Exception as e:
253
+ logger.exception(f"Failed to get stats: {e}")
254
+ return {
255
+ "total_chunks": 0,
256
+ "total_sessions": 0,
257
+ "earliest_chunk": None,
258
+ "latest_chunk": None,
259
+ }
260
+
261
+ async def _chunk_messages(
262
+ self,
263
+ messages: list[SessionMessage],
264
+ session_id: str,
265
+ instance_id: str,
266
+ ) -> list[SessionChunk]:
267
+ """Chunk messages into ~400 token windows with overlap.
268
+
269
+ Uses a simple word-count approximation: words / 0.75 ≈ tokens.
270
+ """
271
+ chunks = []
272
+ chunk_index = 0
273
+
274
+ # Convert messages to text with timestamps
275
+ message_texts = []
276
+ for msg in messages:
277
+ text = f"{msg.role}: {msg.content}"
278
+ message_texts.append((text, msg.timestamp))
279
+
280
+ # Estimate tokens
281
+ target_words = int(self.TARGET_CHUNK_TOKENS * self.WORDS_PER_TOKEN)
282
+ overlap_words = int(self.OVERLAP_TOKENS * self.WORDS_PER_TOKEN)
283
+
284
+ i = 0
285
+ while i < len(message_texts):
286
+ chunk_messages = []
287
+ chunk_word_count = 0
288
+ start_time = message_texts[i][1]
289
+ end_time = start_time
290
+
291
+ # Collect messages until we reach target size
292
+ while i < len(message_texts) and chunk_word_count < target_words:
293
+ text, timestamp = message_texts[i]
294
+ words = len(text.split())
295
+ chunk_messages.append(text)
296
+ chunk_word_count += words
297
+ end_time = timestamp
298
+ i += 1
299
+
300
+ # Create chunk
301
+ if chunk_messages:
302
+ content = "\n".join(chunk_messages)
303
+ embedding = await self.embedding_service.embed(content)
304
+
305
+ chunk = SessionChunk(
306
+ chunk_id=str(uuid.uuid4()),
307
+ session_id=session_id,
308
+ instance_id=instance_id,
309
+ content=content,
310
+ embedding=embedding,
311
+ start_time=start_time,
312
+ end_time=end_time,
313
+ chunk_index=chunk_index,
314
+ )
315
+ chunks.append(chunk)
316
+ chunk_index += 1
317
+
318
+ # Backtrack for overlap
319
+ if i < len(message_texts):
320
+ # Calculate how many messages to backtrack
321
+ overlap_word_target = 0
322
+ backtrack = 0
323
+ while (backtrack < len(chunk_messages) and
324
+ overlap_word_target < overlap_words):
325
+ backtrack += 1
326
+ overlap_word_target += len(chunk_messages[-backtrack].split())
327
+
328
+ i -= min(backtrack, 2) # Backtrack at most 2 messages
329
+ i = max(i, 0)
330
+
331
+ return chunks
332
+
333
+ async def _store_chunk(self, chunk: SessionChunk) -> None:
334
+ """Store a session chunk in memory.
335
+
336
+ Note: Currently uses in-memory list storage. This is intentional for v0.2.0
337
+ to keep the initial implementation simple and testable. Data does not persist
338
+ across restarts. A future version will integrate with LanceDB for persistent
339
+ storage in a separate 'session_chunks' table. See issue #38 follow-up.
340
+ """
341
+ if not hasattr(self, '_chunks'):
342
+ self._chunks = []
343
+
344
+ self._chunks.append({
345
+ "chunk_id": chunk.chunk_id,
346
+ "session_id": chunk.session_id,
347
+ "instance_id": chunk.instance_id,
348
+ "content": chunk.content,
349
+ "embedding": chunk.embedding,
350
+ "start_time": chunk.start_time,
351
+ "end_time": chunk.end_time,
352
+ "chunk_index": chunk.chunk_index,
353
+ })
354
+
355
+ async def _search_chunks(
356
+ self,
357
+ query_embedding: list[float],
358
+ session_id: Optional[str],
359
+ limit: int,
360
+ min_relevance: float,
361
+ ) -> list[dict]:
362
+ """Search for chunks by similarity."""
363
+ if not hasattr(self, '_chunks'):
364
+ return []
365
+
366
+ # Calculate similarities
367
+ results = []
368
+ for chunk in self._chunks:
369
+ # Filter by session_id if provided
370
+ if session_id and chunk["session_id"] != session_id:
371
+ continue
372
+
373
+ similarity = self.embedding_service.similarity(
374
+ query_embedding,
375
+ chunk["embedding"],
376
+ )
377
+
378
+ if similarity >= min_relevance:
379
+ results.append({
380
+ "chunk_id": chunk["chunk_id"],
381
+ "session_id": chunk["session_id"],
382
+ "instance_id": chunk["instance_id"],
383
+ "content": chunk["content"],
384
+ "similarity_score": similarity,
385
+ "start_time": chunk["start_time"],
386
+ "end_time": chunk["end_time"],
387
+ "chunk_index": chunk["chunk_index"],
388
+ })
389
+
390
+ # Sort by similarity
391
+ results.sort(key=lambda x: x["similarity_score"], reverse=True)
392
+
393
+ return results[:limit]
394
+
395
+ async def _delete_chunks_before(self, cutoff_time: datetime) -> int:
396
+ """Delete chunks older than cutoff time."""
397
+ if not hasattr(self, '_chunks'):
398
+ return 0
399
+
400
+ initial_count = len(self._chunks)
401
+ self._chunks = [
402
+ chunk for chunk in self._chunks
403
+ if chunk["end_time"] >= cutoff_time
404
+ ]
405
+
406
+ return initial_count - len(self._chunks)
407
+
408
+ async def _get_all_chunks(self) -> list[dict]:
409
+ """Get all stored chunks."""
410
+ if not hasattr(self, '_chunks'):
411
+ return []
412
+ return self._chunks
@@ -252,7 +252,63 @@ class LanceDBVectorStore(IVectorStore):
252
252
  async def count(self, filters: Optional[dict] = None) -> int:
253
253
  entries = await self.list(limit=100000, filters=filters)
254
254
  return len(entries)
255
-
255
+
256
+ async def get_stats(self) -> dict:
257
+ """Compute stats natively over LanceDB rows.
258
+
259
+ Iterates rows in pages to avoid loading all embeddings into
260
+ RAM. Only the metadata columns are read.
261
+ """
262
+ await self._ensure_initialized()
263
+
264
+ by_source: dict[str, int] = {}
265
+ by_instance: dict[str, int] = {}
266
+ by_tag: dict[str, int] = {}
267
+ total = 0
268
+ corrections = 0
269
+
270
+ page_size = 1000
271
+ offset = 0
272
+ while True:
273
+ rows = (
274
+ self._table.search()
275
+ .where("deleted = false")
276
+ .select(["source_type", "source_instance", "tags",
277
+ "supersedes"])
278
+ .limit(page_size + offset)
279
+ .to_list()
280
+ )
281
+ page = rows[offset:]
282
+ if not page:
283
+ break
284
+
285
+ for row in page:
286
+ total += 1
287
+ src = row.get("source_type", "unknown")
288
+ by_source[src] = by_source.get(src, 0) + 1
289
+
290
+ inst = row.get("source_instance", "unknown")
291
+ by_instance[inst] = by_instance.get(inst, 0) + 1
292
+
293
+ tags = json.loads(row.get("tags", "[]"))
294
+ for tag in tags:
295
+ by_tag[tag] = by_tag.get(tag, 0) + 1
296
+
297
+ if row.get("supersedes"):
298
+ corrections += 1
299
+
300
+ if len(page) < page_size:
301
+ break
302
+ offset += page_size
303
+
304
+ return {
305
+ "total_memories": total,
306
+ "by_source_type": by_source,
307
+ "by_tag": by_tag,
308
+ "by_instance": by_instance,
309
+ "corrections": corrections,
310
+ }
311
+
256
312
  def _row_to_entry(self, row: dict) -> MemoryEntry:
257
313
  return MemoryEntry(
258
314
  id=row["id"],
@@ -358,3 +414,32 @@ class InMemoryVectorStore(IVectorStore):
358
414
  async def count(self, filters: Optional[dict] = None) -> int:
359
415
  entries = await self.list(limit=100000, filters=filters)
360
416
  return len(entries)
417
+
418
+ async def get_stats(self) -> dict:
419
+ """Compute stats in a single pass over in-memory entries."""
420
+ by_source: dict[str, int] = {}
421
+ by_instance: dict[str, int] = {}
422
+ by_tag: dict[str, int] = {}
423
+ total = 0
424
+ corrections = 0
425
+
426
+ for entry in self._store.values():
427
+ if entry.id in self._deleted:
428
+ continue
429
+ total += 1
430
+ src = entry.source_type.value
431
+ by_source[src] = by_source.get(src, 0) + 1
432
+ inst = entry.source_instance
433
+ by_instance[inst] = by_instance.get(inst, 0) + 1
434
+ for tag in entry.tags:
435
+ by_tag[tag] = by_tag.get(tag, 0) + 1
436
+ if entry.supersedes:
437
+ corrections += 1
438
+
439
+ return {
440
+ "total_memories": total,
441
+ "by_source_type": by_source,
442
+ "by_tag": by_tag,
443
+ "by_instance": by_instance,
444
+ "corrections": corrections,
445
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tribalmemory
3
- Version: 0.1.0
3
+ Version: 0.2.0
4
4
  Summary: Shared memory infrastructure for multi-instance AI agents
5
5
  Author-email: Joe <joe@example.com>
6
6
  License: Apache-2.0
@@ -48,12 +48,48 @@ Dynamic: license-file
48
48
 
49
49
  One memory store, many agents. Teach Claude Code something — Codex already knows it. That's not just persistence — it's **cross-agent intelligence**.
50
50
 
51
+ <p align="center">
52
+ <img src="docs/assets/one-brain-two-agents.gif" alt="One Brain, Two Agents — Claude Code stores memories, Codex recalls them" width="700">
53
+ <br>
54
+ <em>Claude Code stores architecture decisions → Codex recalls them instantly</em>
55
+ </p>
56
+
57
+ [![asciinema demo](https://img.shields.io/badge/demo-asciinema-d40000)](https://asciinema.org/a/ZM74iIXzM07SV21P)
58
+ [![PyPI](https://img.shields.io/pypi/v/tribalmemory)](https://pypi.org/project/tribalmemory/)
59
+ [![License](https://img.shields.io/badge/license-Apache%202.0-blue)](LICENSE)
60
+
51
61
  ## Why
52
62
 
53
63
  Every AI coding assistant starts fresh. Claude Code doesn't know what you told Codex. Codex doesn't know what you told Claude. You repeat yourself constantly.
54
64
 
55
65
  Tribal Memory is a shared memory server that any AI agent can connect to. Store a memory from one agent, recall it from another. It just works.
56
66
 
67
+ ## Install
68
+
69
+ **macOS:**
70
+ ```bash
71
+ # Install uv (https://docs.astral.sh/uv/)
72
+ curl -LsSf https://astral.sh/uv/install.sh | sh
73
+
74
+ # Restart your terminal, or run:
75
+ source ~/.zshrc
76
+
77
+ # Install tribalmemory
78
+ uv tool install tribalmemory
79
+ ```
80
+
81
+ > **Why uv?** macOS blocks `pip install` into the system Python with "externally-managed-environment" errors. `uv tool install` handles isolated environments automatically.
82
+
83
+ **Linux:**
84
+ ```bash
85
+ pip install tribalmemory
86
+
87
+ # Or with uv:
88
+ # curl -LsSf https://astral.sh/uv/install.sh | sh
89
+ # source ~/.bashrc
90
+ # uv tool install tribalmemory
91
+ ```
92
+
57
93
  ## Quick Start
58
94
 
59
95
  ### Option A: Local Mode (Zero Cloud, Zero Cost)
@@ -61,8 +97,6 @@ Tribal Memory is a shared memory server that any AI agent can connect to. Store
61
97
  No API keys. No cloud. Everything runs on your machine.
62
98
 
63
99
  ```bash
64
- pip install tribalmemory
65
-
66
100
  # Set up with local Ollama embeddings
67
101
  tribalmemory init --local
68
102
 
@@ -76,8 +110,6 @@ tribalmemory serve
76
110
  ### Option B: OpenAI Embeddings
77
111
 
78
112
  ```bash
79
- pip install tribalmemory
80
-
81
113
  # Set up with OpenAI
82
114
  export OPENAI_API_KEY=sk-...
83
115
  tribalmemory init
@@ -224,19 +256,40 @@ await service.correct(
224
256
  )
225
257
  ```
226
258
 
259
+ ## Demo
260
+
261
+ See cross-agent memory sharing in action:
262
+
263
+ ```bash
264
+ # Start the server
265
+ tribalmemory serve
266
+
267
+ # Run the interactive demo
268
+ ./demo.sh
269
+ ```
270
+
271
+ See [docs/demo-output.md](docs/demo-output.md) for example output.
272
+
227
273
  ## HTTP API
228
274
 
275
+ All endpoints are under the `/v1` prefix.
276
+
229
277
  ```bash
230
278
  # Store a memory
231
- curl -X POST http://localhost:18790/memories \
279
+ curl -X POST http://localhost:18790/v1/remember \
232
280
  -H "Content-Type: application/json" \
233
281
  -d '{"content": "The database uses Postgres 16", "tags": ["infra"]}'
234
282
 
235
283
  # Search memories
236
- curl "http://localhost:18790/memories/search?query=what+database&limit=5"
284
+ curl -X POST http://localhost:18790/v1/recall \
285
+ -H "Content-Type: application/json" \
286
+ -d '{"query": "what database", "limit": 5}'
237
287
 
238
288
  # Get stats
239
- curl http://localhost:18790/stats
289
+ curl http://localhost:18790/v1/stats
290
+
291
+ # Health check
292
+ curl http://localhost:18790/v1/health
240
293
  ```
241
294
 
242
295
  ## OpenClaw Integration
@@ -1,6 +1,6 @@
1
1
  tribalmemory/__init__.py,sha256=DNgC_ZT0lrhxsPdhXu4oeG_UdrLstYQHeKwR-U2toeY,104
2
- tribalmemory/cli.py,sha256=QYpHt0E-hRWK35YttlJks-xuV6kDfc6O4FrxChvZaxU,9043
3
- tribalmemory/interfaces.py,sha256=W2BNYyYlY-RHTbnJn8-u1wbhfcUK5egzjUzrGexxyB0,9966
2
+ tribalmemory/cli.py,sha256=wbVsgMst4NEuWkwA2dily_qC8AP6jvAHKwHgE3B-TG8,15960
3
+ tribalmemory/interfaces.py,sha256=d3uFTBG9mDtB69cVSMipE5Qi3JDGZGUAYmhhAagT3rM,11430
4
4
  tribalmemory/utils.py,sha256=aei7xR6OVMGkllPySA2boeHyI3D1JsHTUX1YeaZdkMY,696
5
5
  tribalmemory/a21/__init__.py,sha256=u1793uKzbWGKwiAVmCxEO9_3rdTL7QKTLhQQB8Umsl4,1181
6
6
  tribalmemory/a21/system.py,sha256=gGFVWBTckSTFv8ZciEUc05HYjwxZP22UpIqbxXD6giM,9185
@@ -19,7 +19,7 @@ tribalmemory/a21/providers/openai.py,sha256=MxFJXph8rVFAqkVMCS3vxdqwBB8_MhoqqGg6
19
19
  tribalmemory/a21/providers/timestamp.py,sha256=T1sJkaQSixRip2C0AUPnljWP0u5w4iHoIcVRmG8FgPo,2994
20
20
  tribalmemory/mcp/__init__.py,sha256=r7CzTwzAnSgox8_cBNZgtXDjMGxC7o8polptSvB-AvY,262
21
21
  tribalmemory/mcp/__main__.py,sha256=GX6mNmM_Lpq4EyuTr__awVTf1btCi7TpEkNtMTkgas0,115
22
- tribalmemory/mcp/server.py,sha256=otmDaFe2sXlHNyPorLgUIvkaM6F01EQ-1OdiyhqGDoU,15458
22
+ tribalmemory/mcp/server.py,sha256=4YyY0Wl0RZRKFNkxc2kaFbkt4HWcVP6eryrABKfFKZQ,21029
23
23
  tribalmemory/performance/__init__.py,sha256=truSiOqk2WuzatOrzVQICwF8pxLxMlCD8R9O-uqiX3I,55
24
24
  tribalmemory/performance/benchmarks.py,sha256=2MVfML04Y-05YpmHCsU-SLtS05-H38oJ7a6DCk2qGIc,8985
25
25
  tribalmemory/performance/corpus_generator.py,sha256=ovln1d-7JGd5fJbdSRsdxlA0uaqLCVM3Lo_1SDGRkA0,5894
@@ -27,25 +27,28 @@ tribalmemory/portability/__init__.py,sha256=_USwXZyUKn9idsItv94AAzxKv1wqURYf68Iz
27
27
  tribalmemory/portability/embedding_metadata.py,sha256=uT_f9jc_vpemY2WxK_UhP3kOAWgZLMRShKKxh3OzzPM,10478
28
28
  tribalmemory/server/__init__.py,sha256=2YYwVr1IEr93MVa_7BKCX43bC82-ySnAfwypUw_nQJQ,238
29
29
  tribalmemory/server/__main__.py,sha256=Uk2_8MH-aQ65QWDg_XMe5i-hdaQB-yJApLT8YUaIdEs,116
30
- tribalmemory/server/app.py,sha256=P4QrOaw35t8NQvrJjYCXMEEFMuH2MrIibCyB2uwpvVU,4836
31
- tribalmemory/server/config.py,sha256=4BCRUjTnkVCEGPNUK5UiOFXyO91BxwxXsCuEG0lnVh4,3552
32
- tribalmemory/server/models.py,sha256=bY6IBn6_AW7PnGunQGa0W7LgKym0-KW05lGMztLbhY0,5652
33
- tribalmemory/server/routes.py,sha256=8-Ie0Ib7t_tUT_kZvsb1QUuNQZ2Dd6rOTlI0d2CoiAk,11403
30
+ tribalmemory/server/app.py,sha256=Ku2UOJ2cnh-RIhtLBaKuZvOx8tjNVbOplhn1U2ePbBM,6796
31
+ tribalmemory/server/config.py,sha256=UHiUIdonPdnsjdZh1vE0C6aOVbP4gS5Bxh7YGJgHh2U,5484
32
+ tribalmemory/server/models.py,sha256=-Y8l5uJ7ij_oiwNtAnLRbmXE9oQlwcBsrCRgnYp14N0,7719
33
+ tribalmemory/server/routes.py,sha256=O2qYpiFYFXY5akTAYcHYmv6AZNqyz3NwnxGPx7GPHFw,13842
34
34
  tribalmemory/services/__init__.py,sha256=htv8HuG_r_lYJwP5Q1bwO-oir436U0NfJrOxk9mB7kU,468
35
35
  tribalmemory/services/deduplication.py,sha256=E8PaIDB6g24H28tKHrB5rMBJaKGGT3pFTDepXQThvcc,3679
36
36
  tribalmemory/services/embeddings.py,sha256=0kY1uPyCg81AlRTNg5QhXbRLDv8hN9khKR4JDGF2sik,10005
37
+ tribalmemory/services/fts_store.py,sha256=5-SBGmzDeQR0-8aDMMO-zZuo8M7VK_tlKYUVDNAitV4,8424
37
38
  tribalmemory/services/import_export.py,sha256=KfEl5EXAFcuyDmhOYJZfjiIRtJYY4qQlvxv2ePJQHaA,15095
38
- tribalmemory/services/memory.py,sha256=UDypOGf8fAfN4iepkncrJn9v3QWvQx5lmItSZfwLUGg,9438
39
- tribalmemory/services/vector_store.py,sha256=8oq1V-pedIW4A7FXvqHY91L5DK56YdGlZUC3qjDtugo,12703
39
+ tribalmemory/services/memory.py,sha256=R7p3K73VXebNWLIMeJUQNH3SvyYisydslei4FCF_y2k,15943
40
+ tribalmemory/services/reranker.py,sha256=0RSvQFHB609aWcxBl92fiwxIbAYILTqNL9MaAp0lQ74,8720
41
+ tribalmemory/services/session_store.py,sha256=wkVF9pNJOqkVXIYOkyvABSaRxaLoHDldT8KTZGThDU0,13818
42
+ tribalmemory/services/vector_store.py,sha256=fL8YgnHiCLPqxqV64pQ_rMLIzdJc6ohN2c4wgGz0buw,15364
40
43
  tribalmemory/testing/__init__.py,sha256=XVS3uy0BjABCErgZohaqtj5MF20NKmj1KmtJfRiI_XI,524
41
44
  tribalmemory/testing/embedding_utils.py,sha256=E40lSAU7cz-ow2hzKLSaZmafIlOP8d9gakP8R2DTlGM,3705
42
45
  tribalmemory/testing/fixtures.py,sha256=_zDyUVm6CQqXK1Us8CN6A95tJcmo1D7LFDktIvjOmwM,3584
43
46
  tribalmemory/testing/metrics.py,sha256=X1n84dJDNQXsfGV-i-MzhsWKnFgqHWIcIaQB-BUp0e0,8711
44
47
  tribalmemory/testing/mocks.py,sha256=sjLy-pq3D_T21rEvWWKM_bqw7xnchRPGLARZNfKkpGU,19788
45
48
  tribalmemory/testing/semantic_expansions.py,sha256=AbbJIXYN4EJT8WKJ7UmIlNRlv63VejbcnbzBy2z2Ofk,2953
46
- tribalmemory-0.1.0.dist-info/licenses/LICENSE,sha256=M8D9Xf3B6C6DFiCgAAhKcXeTscaC4cw1fhr3LHUrALU,10774
47
- tribalmemory-0.1.0.dist-info/METADATA,sha256=l-vV4rYIt3WFbe6Jbtzot4fZ3yTuzQA_yEXcmzdVJXs,7804
48
- tribalmemory-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
49
- tribalmemory-0.1.0.dist-info/entry_points.txt,sha256=9Pep7JNCk9ifdFP4WbeCugDOjMrLVegGJ5iuvbcZ9e8,103
50
- tribalmemory-0.1.0.dist-info/top_level.txt,sha256=kX36ZpH4W7EWcInV4MrIudicusdz5hfkezKMZ3HCMQs,13
51
- tribalmemory-0.1.0.dist-info/RECORD,,
49
+ tribalmemory-0.2.0.dist-info/licenses/LICENSE,sha256=M8D9Xf3B6C6DFiCgAAhKcXeTscaC4cw1fhr3LHUrALU,10774
50
+ tribalmemory-0.2.0.dist-info/METADATA,sha256=q-7NnUqHid7QR-8QW3o_BI3OSUDzSIHXlW8APe6xc7g,9239
51
+ tribalmemory-0.2.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
52
+ tribalmemory-0.2.0.dist-info/entry_points.txt,sha256=9Pep7JNCk9ifdFP4WbeCugDOjMrLVegGJ5iuvbcZ9e8,103
53
+ tribalmemory-0.2.0.dist-info/top_level.txt,sha256=kX36ZpH4W7EWcInV4MrIudicusdz5hfkezKMZ3HCMQs,13
54
+ tribalmemory-0.2.0.dist-info/RECORD,,