tribalmemory 0.1.1__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,10 @@
1
1
  """Tribal Memory Service - Main API for agents."""
2
2
 
3
+ import asyncio
4
+ import logging
3
5
  import os
4
6
  from datetime import datetime
7
+ from pathlib import Path
5
8
  from typing import Optional
6
9
  import uuid
7
10
 
@@ -15,6 +18,11 @@ from ..interfaces import (
15
18
  StoreResult,
16
19
  )
17
20
  from .deduplication import SemanticDeduplicationService
21
+ from .fts_store import FTSStore, hybrid_merge
22
+ from .graph_store import GraphStore, EntityExtractor
23
+ from .reranker import IReranker, NoopReranker, create_reranker
24
+
25
+ logger = logging.getLogger(__name__)
18
26
 
19
27
 
20
28
  class TribalMemoryService(IMemoryService):
@@ -31,6 +39,11 @@ class TribalMemoryService(IMemoryService):
31
39
  results = await service.recall("What language for Wally?")
32
40
  """
33
41
 
42
+ # Graph expansion scoring constants
43
+ GRAPH_1HOP_SCORE = 0.85 # Score for direct entity mentions
44
+ GRAPH_2HOP_SCORE = 0.70 # Score for connected entity mentions
45
+ GRAPH_EXPANSION_BUFFER = 2 # Multiplier for candidate pool before fetching
46
+
34
47
  def __init__(
35
48
  self,
36
49
  instance_id: str,
@@ -39,11 +52,30 @@ class TribalMemoryService(IMemoryService):
39
52
  dedup_exact_threshold: float = 0.98,
40
53
  dedup_near_threshold: float = 0.90,
41
54
  auto_reject_duplicates: bool = True,
55
+ fts_store: Optional[FTSStore] = None,
56
+ hybrid_search: bool = True,
57
+ hybrid_vector_weight: float = 0.7,
58
+ hybrid_text_weight: float = 0.3,
59
+ hybrid_candidate_multiplier: int = 4,
60
+ reranker: Optional[IReranker] = None,
61
+ rerank_pool_multiplier: int = 2,
62
+ graph_store: Optional[GraphStore] = None,
63
+ graph_enabled: bool = True,
42
64
  ):
43
65
  self.instance_id = instance_id
44
66
  self.embedding_service = embedding_service
45
67
  self.vector_store = vector_store
46
68
  self.auto_reject_duplicates = auto_reject_duplicates
69
+ self.fts_store = fts_store
70
+ self.hybrid_search = hybrid_search and fts_store is not None
71
+ self.hybrid_vector_weight = hybrid_vector_weight
72
+ self.hybrid_text_weight = hybrid_text_weight
73
+ self.hybrid_candidate_multiplier = hybrid_candidate_multiplier
74
+ self.reranker = reranker or NoopReranker()
75
+ self.rerank_pool_multiplier = rerank_pool_multiplier
76
+ self.graph_store = graph_store
77
+ self.graph_enabled = graph_enabled and graph_store is not None
78
+ self.entity_extractor = EntityExtractor() if self.graph_enabled else None
47
79
 
48
80
  self.dedup_service = SemanticDeduplicationService(
49
81
  vector_store=vector_store,
@@ -89,7 +121,36 @@ class TribalMemoryService(IMemoryService):
89
121
  confidence=1.0,
90
122
  )
91
123
 
92
- return await self.vector_store.store(entry)
124
+ result = await self.vector_store.store(entry)
125
+
126
+ # Index in FTS for hybrid search (best-effort; vector store is primary)
127
+ if result.success and self.fts_store:
128
+ try:
129
+ self.fts_store.index(entry.id, content, tags or [])
130
+ except Exception as e:
131
+ logger.warning("FTS indexing failed for %s: %s", entry.id, e)
132
+
133
+ # Extract and store entities for graph-enriched search
134
+ if result.success and self.graph_enabled and self.entity_extractor:
135
+ try:
136
+ entities, relationships = self.entity_extractor.extract_with_relationships(
137
+ content
138
+ )
139
+ for entity in entities:
140
+ self.graph_store.add_entity(entity, memory_id=entry.id)
141
+ for rel in relationships:
142
+ self.graph_store.add_relationship(rel, memory_id=entry.id)
143
+ if entities:
144
+ logger.debug(
145
+ "Extracted entities: %s, relationships: %s from %s",
146
+ [e.name for e in entities],
147
+ [(r.source, r.relation_type, r.target) for r in relationships],
148
+ entry.id
149
+ )
150
+ except Exception as e:
151
+ logger.warning("Graph indexing failed for %s: %s", entry.id, e)
152
+
153
+ return result
93
154
 
94
155
  async def recall(
95
156
  self,
@@ -97,14 +158,29 @@ class TribalMemoryService(IMemoryService):
97
158
  limit: int = 5,
98
159
  min_relevance: float = 0.7,
99
160
  tags: Optional[list[str]] = None,
161
+ graph_expansion: bool = True,
100
162
  ) -> list[RecallResult]:
101
- """Recall relevant memories.
163
+ """Recall relevant memories using hybrid search with optional graph expansion.
164
+
165
+ When hybrid search is enabled (FTS store available), combines
166
+ vector similarity with BM25 keyword matching for better results.
167
+ Falls back to vector-only search when FTS is unavailable.
168
+
169
+ When graph expansion is enabled, entities are extracted from the query
170
+ and the candidate pool is expanded via entity graph traversal.
102
171
 
103
172
  Args:
104
173
  query: Natural language query
105
174
  limit: Maximum results
106
175
  min_relevance: Minimum similarity score
107
176
  tags: Filter by tags (e.g., ["work", "preferences"])
177
+ graph_expansion: Expand candidates via entity graph (default True)
178
+
179
+ Returns:
180
+ List of RecallResult objects with retrieval_method indicating source:
181
+ - "vector": Pure vector similarity search
182
+ - "hybrid": Vector + BM25 merge
183
+ - "graph": Entity graph traversal (1-hop or 2-hop)
108
184
  """
109
185
  try:
110
186
  query_embedding = await self.embedding_service.embed(query)
@@ -112,15 +188,225 @@ class TribalMemoryService(IMemoryService):
112
188
  return []
113
189
 
114
190
  filters = {"tags": tags} if tags else None
191
+
192
+ if self.hybrid_search and self.fts_store:
193
+ results = await self._hybrid_recall(
194
+ query, query_embedding, limit, min_relevance, filters
195
+ )
196
+ else:
197
+ # Vector-only fallback
198
+ vector_results = await self.vector_store.recall(
199
+ query_embedding,
200
+ limit=limit,
201
+ min_similarity=min_relevance,
202
+ filters=filters,
203
+ )
204
+ # Mark as vector retrieval
205
+ results = [
206
+ RecallResult(
207
+ memory=r.memory,
208
+ similarity_score=r.similarity_score,
209
+ retrieval_time_ms=r.retrieval_time_ms,
210
+ retrieval_method="vector",
211
+ )
212
+ for r in vector_results
213
+ ]
115
214
 
116
- results = await self.vector_store.recall(
215
+ # Graph expansion: find additional memories via entity connections
216
+ if graph_expansion and self.graph_enabled and self.entity_extractor:
217
+ results = await self._expand_via_graph(
218
+ query, results, limit, min_relevance
219
+ )
220
+
221
+ return self._filter_superseded(results)
222
+
223
+ async def _hybrid_recall(
224
+ self,
225
+ query: str,
226
+ query_embedding: list[float],
227
+ limit: int,
228
+ min_relevance: float,
229
+ filters: Optional[dict],
230
+ ) -> list[RecallResult]:
231
+ """Hybrid recall: vector + BM25 combined, then reranked."""
232
+ candidate_limit = limit * self.hybrid_candidate_multiplier
233
+
234
+ # 1. Vector search — get wide candidate pool
235
+ vector_results = await self.vector_store.recall(
117
236
  query_embedding,
118
- limit=limit,
119
- min_similarity=min_relevance,
237
+ limit=candidate_limit,
238
+ min_similarity=min_relevance * 0.5, # Lower threshold for candidates
120
239
  filters=filters,
121
240
  )
241
+
242
+ # 2. BM25 search
243
+ bm25_results = self.fts_store.search(query, limit=candidate_limit)
244
+
245
+ # 3. Build lookup for vector results
246
+ vector_for_merge = [
247
+ {"id": r.memory.id, "score": r.similarity_score}
248
+ for r in vector_results
249
+ ]
250
+
251
+ # 4. Hybrid merge
252
+ merged = hybrid_merge(
253
+ vector_for_merge,
254
+ bm25_results,
255
+ self.hybrid_vector_weight,
256
+ self.hybrid_text_weight,
257
+ )
258
+
259
+ # 5. Build candidate results for reranking — need full MemoryEntry for each
260
+ # Create lookup from vector results
261
+ entry_map = {r.memory.id: r for r in vector_results}
262
+
263
+ # Get rerank_pool_multiplier * limit candidates before reranking
264
+ rerank_pool_size = min(limit * self.rerank_pool_multiplier, len(merged))
122
265
 
123
- return self._filter_superseded(results)
266
+ # Separate cached (vector) hits from BM25-only hits that need fetching
267
+ cached_hits: list[tuple[dict, RecallResult]] = []
268
+ bm25_only_ids: list[dict] = []
269
+
270
+ for m in merged[:rerank_pool_size]:
271
+ if m["id"] in entry_map:
272
+ cached_hits.append((m, entry_map[m["id"]]))
273
+ else:
274
+ bm25_only_ids.append(m)
275
+
276
+ # Batch-fetch BM25-only hits concurrently
277
+ fetched_entries = await asyncio.gather(
278
+ *(self.vector_store.get(m["id"]) for m in bm25_only_ids)
279
+ ) if bm25_only_ids else []
280
+
281
+ # Build candidate list
282
+ candidates: list[RecallResult] = []
283
+
284
+ # Add cached vector hits (mark as hybrid since we used BM25 merge)
285
+ for m, recall_result in cached_hits:
286
+ candidates.append(RecallResult(
287
+ memory=recall_result.memory,
288
+ similarity_score=m["final_score"],
289
+ retrieval_time_ms=recall_result.retrieval_time_ms,
290
+ retrieval_method="hybrid",
291
+ ))
292
+
293
+ # Add fetched BM25-only hits
294
+ for m, entry in zip(bm25_only_ids, fetched_entries):
295
+ if entry and m["final_score"] >= min_relevance * 0.5:
296
+ candidates.append(RecallResult(
297
+ memory=entry,
298
+ similarity_score=m["final_score"],
299
+ retrieval_time_ms=0,
300
+ retrieval_method="hybrid",
301
+ ))
302
+
303
+ # 6. Rerank candidates
304
+ reranked = self.reranker.rerank(query, candidates, top_k=limit)
305
+
306
+ return self._filter_superseded(reranked)
307
+
308
+ async def _expand_via_graph(
309
+ self,
310
+ query: str,
311
+ existing_results: list[RecallResult],
312
+ limit: int,
313
+ min_relevance: float,
314
+ ) -> list[RecallResult]:
315
+ """Expand recall candidates via entity graph traversal.
316
+
317
+ Extracts entities from the query, finds memories connected to those
318
+ entities via the graph, and merges them with existing results.
319
+
320
+ Args:
321
+ query: The original query string.
322
+ existing_results: Results from vector/hybrid search.
323
+ limit: Maximum total results.
324
+ min_relevance: Minimum relevance threshold (filters graph results too).
325
+
326
+ Returns:
327
+ Combined results with graph-expanded memories, sorted by score.
328
+ """
329
+ # Extract entities from query
330
+ query_entities = self.entity_extractor.extract(query)
331
+ if not query_entities:
332
+ return existing_results
333
+
334
+ # Collect memory IDs from existing results to avoid duplicates
335
+ existing_ids = {r.memory.id for r in existing_results}
336
+
337
+ # Find memories connected to query entities via graph
338
+ graph_memory_ids: set[str] = set()
339
+ entity_to_hops: dict[str, int] = {} # Track hop distance for scoring
340
+
341
+ for entity in query_entities:
342
+ # Direct mentions (1 hop)
343
+ direct_ids = self.graph_store.get_memories_for_entity(entity.name)
344
+ for mid in direct_ids:
345
+ if mid not in existing_ids:
346
+ graph_memory_ids.add(mid)
347
+ # Use setdefault to preserve shortest path (1-hop takes precedence)
348
+ entity_to_hops.setdefault(mid, 1)
349
+
350
+ # Connected entities (2 hops)
351
+ connected = self.graph_store.find_connected(entity.name, hops=1)
352
+ for connected_entity in connected:
353
+ connected_ids = self.graph_store.get_memories_for_entity(
354
+ connected_entity.name
355
+ )
356
+ for mid in connected_ids:
357
+ if mid not in existing_ids:
358
+ graph_memory_ids.add(mid)
359
+ # Use setdefault to preserve shortest path
360
+ entity_to_hops.setdefault(mid, 2)
361
+
362
+ if not graph_memory_ids:
363
+ return existing_results
364
+
365
+ # Cap graph candidates to prevent memory leak (#2)
366
+ max_graph_candidates = limit * self.GRAPH_EXPANSION_BUFFER
367
+ if len(graph_memory_ids) > max_graph_candidates:
368
+ # Prioritize 1-hop over 2-hop when capping
369
+ one_hop_ids = [mid for mid in graph_memory_ids if entity_to_hops[mid] == 1]
370
+ two_hop_ids = [mid for mid in graph_memory_ids if entity_to_hops[mid] == 2]
371
+
372
+ capped_ids: list[str] = []
373
+ capped_ids.extend(one_hop_ids[:max_graph_candidates])
374
+ remaining = max_graph_candidates - len(capped_ids)
375
+ if remaining > 0:
376
+ capped_ids.extend(two_hop_ids[:remaining])
377
+
378
+ graph_memory_ids = set(capped_ids)
379
+
380
+ # Batch-fetch graph-connected memories concurrently for performance
381
+ graph_memory_list = list(graph_memory_ids)
382
+ fetched_entries = await asyncio.gather(
383
+ *(self.vector_store.get(mid) for mid in graph_memory_list)
384
+ ) if graph_memory_list else []
385
+
386
+ graph_results: list[RecallResult] = []
387
+ for memory_id, entry in zip(graph_memory_list, fetched_entries):
388
+ if entry:
389
+ # Score based on hop distance using class constants
390
+ hops = entity_to_hops[memory_id] # Fail fast if logic is wrong
391
+ graph_score = (
392
+ self.GRAPH_1HOP_SCORE if hops == 1
393
+ else self.GRAPH_2HOP_SCORE
394
+ )
395
+
396
+ # Filter by min_relevance
397
+ if graph_score >= min_relevance:
398
+ graph_results.append(RecallResult(
399
+ memory=entry,
400
+ similarity_score=graph_score,
401
+ retrieval_time_ms=0,
402
+ retrieval_method="graph",
403
+ ))
404
+
405
+ # Combine existing + graph results (#10: single sort, no redundant pre-sort)
406
+ combined = existing_results + graph_results
407
+ combined.sort(key=lambda r: r.similarity_score, reverse=True)
408
+
409
+ return combined[:limit]
124
410
 
125
411
  async def correct(
126
412
  self,
@@ -157,7 +443,18 @@ class TribalMemoryService(IMemoryService):
157
443
 
158
444
  async def forget(self, memory_id: str) -> bool:
159
445
  """Forget (soft delete) a memory."""
160
- return await self.vector_store.delete(memory_id)
446
+ result = await self.vector_store.delete(memory_id)
447
+ if result and self.fts_store:
448
+ try:
449
+ self.fts_store.delete(memory_id)
450
+ except Exception as e:
451
+ logger.warning("FTS cleanup failed for %s: %s", memory_id, e)
452
+ if result and self.graph_store:
453
+ try:
454
+ self.graph_store.delete_memory(memory_id)
455
+ except Exception as e:
456
+ logger.warning("Graph cleanup failed for %s: %s", memory_id, e)
457
+ return result
161
458
 
162
459
  async def get(self, memory_id: str) -> Optional[MemoryEntry]:
163
460
  """Get a memory by ID with full provenance."""
@@ -165,46 +462,116 @@ class TribalMemoryService(IMemoryService):
165
462
 
166
463
  async def get_stats(self) -> dict:
167
464
  """Get memory statistics.
168
-
169
- Note: Stats are computed over up to 10,000 most recent memories.
170
- For systems with >10k memories, consider using count() with filters.
465
+
466
+ Delegates to vector_store.get_stats() which computes aggregates
467
+ efficiently (paginated by default, native queries for SQL-backed
468
+ stores).
171
469
  """
172
- all_memories = await self.vector_store.list(limit=10000)
173
-
174
- by_source: dict[str, int] = {}
175
- by_instance: dict[str, int] = {}
176
- by_tag: dict[str, int] = {}
177
-
178
- for m in all_memories:
179
- source = m.source_type.value
180
- by_source[source] = by_source.get(source, 0) + 1
181
-
182
- instance = m.source_instance
183
- by_instance[instance] = by_instance.get(instance, 0) + 1
184
-
185
- for tag in m.tags:
186
- by_tag[tag] = by_tag.get(tag, 0) + 1
187
-
188
- corrections = sum(1 for m in all_memories if m.supersedes)
189
-
190
- return {
191
- "total_memories": len(all_memories),
192
- "by_source_type": by_source,
193
- "by_tag": by_tag,
194
- "by_instance": by_instance,
195
- "corrections": corrections,
196
- }
470
+ return await self.vector_store.get_stats()
197
471
 
198
472
  @staticmethod
199
473
  def _filter_superseded(results: list[RecallResult]) -> list[RecallResult]:
200
474
  """Remove memories that are superseded by corrections in the result set."""
201
- superseded_ids = {
475
+ superseded_ids: set[str] = {
202
476
  r.memory.supersedes for r in results if r.memory.supersedes
203
477
  }
204
478
  if not superseded_ids:
205
479
  return results
206
480
  return [r for r in results if r.memory.id not in superseded_ids]
207
481
 
482
+ async def recall_entity(
483
+ self,
484
+ entity_name: str,
485
+ hops: int = 1,
486
+ limit: int = 10,
487
+ ) -> list[RecallResult]:
488
+ """Recall all memories associated with an entity and its connections.
489
+
490
+ This enables entity-centric queries like:
491
+ - "Tell me everything about auth-service"
492
+ - "What do we know about PostgreSQL?"
493
+
494
+ Args:
495
+ entity_name: Name of the entity to query
496
+ hops: Number of relationship hops to traverse (1 = direct only)
497
+ limit: Maximum results to return
498
+
499
+ Returns:
500
+ List of recall results for memories mentioning the entity or connected entities
501
+ """
502
+ if not self.graph_enabled:
503
+ logger.warning("Graph search not enabled, returning empty results")
504
+ return []
505
+
506
+ # Get memories directly mentioning the entity
507
+ direct_memories = set(self.graph_store.get_memories_for_entity(entity_name))
508
+
509
+ # Get memories for connected entities (if hops > 0)
510
+ if hops > 0:
511
+ connected = self.graph_store.find_connected(entity_name, hops=hops)
512
+ for entity in connected:
513
+ direct_memories.update(
514
+ self.graph_store.get_memories_for_entity(entity.name)
515
+ )
516
+
517
+ if not direct_memories:
518
+ return []
519
+
520
+ # Fetch full memory entries
521
+ results: list[RecallResult] = []
522
+ for memory_id in list(direct_memories)[:limit]:
523
+ entry = await self.vector_store.get(memory_id)
524
+ if entry:
525
+ results.append(RecallResult(
526
+ memory=entry,
527
+ similarity_score=1.0, # Entity match confidence (exact)
528
+ retrieval_time_ms=0,
529
+ retrieval_method="entity",
530
+ ))
531
+
532
+ return results
533
+
534
+ def get_entity_graph(
535
+ self,
536
+ entity_name: str,
537
+ hops: int = 2,
538
+ ) -> dict:
539
+ """Get the relationship graph around an entity.
540
+
541
+ Returns a dict with:
542
+ - entities: list of connected entities with types
543
+ - relationships: list of relationships
544
+
545
+ Useful for visualization and debugging.
546
+ """
547
+ if not self.graph_enabled:
548
+ return {"entities": [], "relationships": []}
549
+
550
+ connected = self.graph_store.find_connected(
551
+ entity_name, hops=hops, include_source=True
552
+ )
553
+
554
+ entities = [
555
+ {"name": e.name, "type": e.entity_type}
556
+ for e in connected
557
+ ]
558
+
559
+ # Get relationships for all entities
560
+ relationships = []
561
+ seen_rels = set()
562
+ for entity in connected:
563
+ for rel in self.graph_store.get_relationships_for_entity(entity.name):
564
+ rel_key = (rel.source, rel.target, rel.relation_type)
565
+ if rel_key not in seen_rels:
566
+ seen_rels.add(rel_key)
567
+ relationships.append({
568
+ "source": rel.source,
569
+ "target": rel.target,
570
+ "type": rel.relation_type,
571
+ })
572
+
573
+ return {"entities": entities, "relationships": relationships}
574
+
208
575
 
209
576
  def create_memory_service(
210
577
  instance_id: Optional[str] = None,
@@ -213,6 +580,14 @@ def create_memory_service(
213
580
  api_base: Optional[str] = None,
214
581
  embedding_model: Optional[str] = None,
215
582
  embedding_dimensions: Optional[int] = None,
583
+ hybrid_search: bool = True,
584
+ hybrid_vector_weight: float = 0.7,
585
+ hybrid_text_weight: float = 0.3,
586
+ hybrid_candidate_multiplier: int = 4,
587
+ reranking: str = "heuristic",
588
+ recency_decay_days: float = 30.0,
589
+ tag_boost_weight: float = 0.1,
590
+ rerank_pool_multiplier: int = 2,
216
591
  ) -> TribalMemoryService:
217
592
  """Factory function to create a memory service with sensible defaults.
218
593
 
@@ -225,6 +600,18 @@ def create_memory_service(
225
600
  For Ollama: "http://localhost:11434/v1"
226
601
  embedding_model: Embedding model name. Default: "text-embedding-3-small".
227
602
  embedding_dimensions: Embedding output dimensions. Default: 1536.
603
+ hybrid_search: Enable BM25 hybrid search (default: True).
604
+ hybrid_vector_weight: Weight for vector similarity (default: 0.7).
605
+ hybrid_text_weight: Weight for BM25 text score (default: 0.3).
606
+ hybrid_candidate_multiplier: Multiplier for candidate pool size
607
+ (default: 4). Retrieves 4× limit from each source before
608
+ merging.
609
+ reranking: Reranking mode: "auto", "cross-encoder", "heuristic", "none"
610
+ (default: "heuristic").
611
+ recency_decay_days: Half-life for recency boost (default: 30.0).
612
+ tag_boost_weight: Weight for tag match boost (default: 0.1).
613
+ rerank_pool_multiplier: How many candidates to give the reranker
614
+ (N × limit). Default: 2.
228
615
 
229
616
  Returns:
230
617
  Configured TribalMemoryService ready for use.
@@ -267,9 +654,56 @@ def create_memory_service(
267
654
  vector_store = InMemoryVectorStore(embedding_service)
268
655
  else:
269
656
  vector_store = InMemoryVectorStore(embedding_service)
657
+
658
+ # Create FTS store for hybrid search (co-located with LanceDB)
659
+ fts_store = None
660
+ if hybrid_search and db_path:
661
+ try:
662
+ fts_db_path = str(Path(db_path) / "fts_index.db")
663
+ fts_store = FTSStore(fts_db_path)
664
+ if fts_store.is_available():
665
+ logger.info("Hybrid search enabled (SQLite FTS5)")
666
+ else:
667
+ logger.warning(
668
+ "FTS5 not available in SQLite build. "
669
+ "Hybrid search disabled, using vector-only."
670
+ )
671
+ fts_store = None
672
+ except Exception as e:
673
+ logger.warning(f"FTS store init failed: {e}. Using vector-only.")
674
+ fts_store = None
675
+
676
+ # Create reranker
677
+ from ..server.config import SearchConfig
678
+ search_config = SearchConfig(
679
+ reranking=reranking,
680
+ recency_decay_days=recency_decay_days,
681
+ tag_boost_weight=tag_boost_weight,
682
+ )
683
+ reranker = create_reranker(search_config)
684
+
685
+ # Create graph store for entity-enriched search (co-located with LanceDB)
686
+ graph_store = None
687
+ if db_path:
688
+ try:
689
+ graph_db_path = str(Path(db_path) / "graph.db")
690
+ graph_store = GraphStore(graph_db_path)
691
+ logger.info("Graph store enabled (SQLite)")
692
+ except Exception as e:
693
+ logger.warning(f"Graph store init failed: {e}. Graph search disabled.")
694
+ graph_store = None
270
695
 
271
696
  return TribalMemoryService(
272
697
  instance_id=instance_id,
273
698
  embedding_service=embedding_service,
274
- vector_store=vector_store
699
+ vector_store=vector_store,
700
+ fts_store=fts_store,
701
+ hybrid_search=hybrid_search,
702
+ hybrid_vector_weight=hybrid_vector_weight,
703
+ hybrid_text_weight=hybrid_text_weight,
704
+ hybrid_candidate_multiplier=hybrid_candidate_multiplier,
705
+ reranker=reranker,
706
+ rerank_pool_multiplier=rerank_pool_multiplier,
707
+ graph_store=graph_store,
708
+ graph_enabled=graph_store is not None,
275
709
  )