hindsight-api 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. hindsight_api/__init__.py +2 -0
  2. hindsight_api/alembic/env.py +24 -1
  3. hindsight_api/alembic/versions/d9f6a3b4c5e2_rename_bank_to_interactions.py +14 -4
  4. hindsight_api/alembic/versions/e0a1b2c3d4e5_disposition_to_3_traits.py +54 -13
  5. hindsight_api/alembic/versions/rename_personality_to_disposition.py +18 -7
  6. hindsight_api/api/http.py +234 -228
  7. hindsight_api/api/mcp.py +14 -3
  8. hindsight_api/engine/__init__.py +12 -1
  9. hindsight_api/engine/entity_resolver.py +38 -37
  10. hindsight_api/engine/interface.py +592 -0
  11. hindsight_api/engine/llm_wrapper.py +176 -6
  12. hindsight_api/engine/memory_engine.py +993 -217
  13. hindsight_api/engine/retain/bank_utils.py +13 -12
  14. hindsight_api/engine/retain/chunk_storage.py +3 -2
  15. hindsight_api/engine/retain/fact_storage.py +10 -7
  16. hindsight_api/engine/retain/link_utils.py +17 -16
  17. hindsight_api/engine/retain/observation_regeneration.py +17 -16
  18. hindsight_api/engine/retain/orchestrator.py +2 -3
  19. hindsight_api/engine/retain/types.py +25 -8
  20. hindsight_api/engine/search/graph_retrieval.py +6 -5
  21. hindsight_api/engine/search/mpfp_retrieval.py +8 -7
  22. hindsight_api/engine/search/retrieval.py +12 -11
  23. hindsight_api/engine/search/think_utils.py +1 -1
  24. hindsight_api/engine/search/tracer.py +1 -1
  25. hindsight_api/engine/task_backend.py +32 -0
  26. hindsight_api/extensions/__init__.py +66 -0
  27. hindsight_api/extensions/base.py +81 -0
  28. hindsight_api/extensions/builtin/__init__.py +18 -0
  29. hindsight_api/extensions/builtin/tenant.py +33 -0
  30. hindsight_api/extensions/context.py +110 -0
  31. hindsight_api/extensions/http.py +89 -0
  32. hindsight_api/extensions/loader.py +125 -0
  33. hindsight_api/extensions/operation_validator.py +325 -0
  34. hindsight_api/extensions/tenant.py +63 -0
  35. hindsight_api/main.py +1 -1
  36. hindsight_api/mcp_local.py +7 -1
  37. hindsight_api/migrations.py +54 -10
  38. hindsight_api/models.py +15 -0
  39. hindsight_api/pg0.py +1 -1
  40. {hindsight_api-0.1.11.dist-info → hindsight_api-0.1.12.dist-info}/METADATA +1 -1
  41. hindsight_api-0.1.12.dist-info/RECORD +74 -0
  42. hindsight_api-0.1.11.dist-info/RECORD +0 -64
  43. {hindsight_api-0.1.11.dist-info → hindsight_api-0.1.12.dist-info}/WHEEL +0 -0
  44. {hindsight_api-0.1.11.dist-info → hindsight_api-0.1.12.dist-info}/entry_points.txt +0 -0
@@ -10,6 +10,7 @@ from typing import TypedDict
10
10
  from pydantic import BaseModel, Field
11
11
 
12
12
  from ..db_utils import acquire_with_retry
13
+ from ..memory_engine import fq_table
13
14
  from ..response_models import DispositionTraits
14
15
 
15
16
  logger = logging.getLogger(__name__)
@@ -51,9 +52,9 @@ async def get_bank_profile(pool, bank_id: str) -> BankProfile:
51
52
  async with acquire_with_retry(pool) as conn:
52
53
  # Try to get existing bank
53
54
  row = await conn.fetchrow(
54
- """
55
+ f"""
55
56
  SELECT name, disposition, background
56
- FROM banks WHERE bank_id = $1
57
+ FROM {fq_table("banks")} WHERE bank_id = $1
57
58
  """,
58
59
  bank_id,
59
60
  )
@@ -70,8 +71,8 @@ async def get_bank_profile(pool, bank_id: str) -> BankProfile:
70
71
 
71
72
  # Bank doesn't exist, create with defaults
72
73
  await conn.execute(
73
- """
74
- INSERT INTO banks (bank_id, name, disposition, background)
74
+ f"""
75
+ INSERT INTO {fq_table("banks")} (bank_id, name, disposition, background)
75
76
  VALUES ($1, $2, $3::jsonb, $4)
76
77
  ON CONFLICT (bank_id) DO NOTHING
77
78
  """,
@@ -98,8 +99,8 @@ async def update_bank_disposition(pool, bank_id: str, disposition: dict[str, int
98
99
 
99
100
  async with acquire_with_retry(pool) as conn:
100
101
  await conn.execute(
101
- """
102
- UPDATE banks
102
+ f"""
103
+ UPDATE {fq_table("banks")}
103
104
  SET disposition = $2::jsonb,
104
105
  updated_at = NOW()
105
106
  WHERE bank_id = $1
@@ -140,8 +141,8 @@ async def merge_bank_background(pool, llm_config, bank_id: str, new_info: str, u
140
141
  if inferred_disposition:
141
142
  # Update both background and disposition
142
143
  await conn.execute(
143
- """
144
- UPDATE banks
144
+ f"""
145
+ UPDATE {fq_table("banks")}
145
146
  SET background = $2,
146
147
  disposition = $3::jsonb,
147
148
  updated_at = NOW()
@@ -154,8 +155,8 @@ async def merge_bank_background(pool, llm_config, bank_id: str, new_info: str, u
154
155
  else:
155
156
  # Update only background
156
157
  await conn.execute(
157
- """
158
- UPDATE banks
158
+ f"""
159
+ UPDATE {fq_table("banks")}
159
160
  SET background = $2,
160
161
  updated_at = NOW()
161
162
  WHERE bank_id = $1
@@ -361,9 +362,9 @@ async def list_banks(pool) -> list:
361
362
  """
362
363
  async with acquire_with_retry(pool) as conn:
363
364
  rows = await conn.fetch(
364
- """
365
+ f"""
365
366
  SELECT bank_id, name, disposition, background, created_at, updated_at
366
- FROM banks
367
+ FROM {fq_table("banks")}
367
368
  ORDER BY updated_at DESC
368
369
  """
369
370
  )
@@ -6,6 +6,7 @@ Handles storage of document chunks in the database.
6
6
 
7
7
  import logging
8
8
 
9
+ from ..memory_engine import fq_table
9
10
  from .types import ChunkMetadata
10
11
 
11
12
  logger = logging.getLogger(__name__)
@@ -42,8 +43,8 @@ async def store_chunks_batch(conn, bank_id: str, document_id: str, chunks: list[
42
43
 
43
44
  # Batch insert all chunks
44
45
  await conn.execute(
45
- """
46
- INSERT INTO chunks (chunk_id, document_id, bank_id, chunk_text, chunk_index)
46
+ f"""
47
+ INSERT INTO {fq_table("chunks")} (chunk_id, document_id, bank_id, chunk_text, chunk_index)
47
48
  SELECT * FROM unnest($1::text[], $2::text[], $3::text[], $4::text[], $5::integer[])
48
49
  """,
49
50
  chunk_ids,
@@ -7,6 +7,7 @@ Handles insertion of facts into the database.
7
7
  import json
8
8
  import logging
9
9
 
10
+ from ..memory_engine import fq_table
10
11
  from .types import ProcessedFact
11
12
 
12
13
  logger = logging.getLogger(__name__)
@@ -67,8 +68,8 @@ async def insert_facts_batch(
67
68
 
68
69
  # Batch insert all facts
69
70
  results = await conn.fetch(
70
- """
71
- INSERT INTO memory_units (bank_id, text, embedding, event_date, occurred_start, occurred_end, mentioned_at,
71
+ f"""
72
+ INSERT INTO {fq_table("memory_units")} (bank_id, text, embedding, event_date, occurred_start, occurred_end, mentioned_at,
72
73
  context, fact_type, confidence_score, access_count, metadata, chunk_id, document_id)
73
74
  SELECT $1, * FROM unnest(
74
75
  $2::text[], $3::vector[], $4::timestamptz[], $5::timestamptz[], $6::timestamptz[], $7::timestamptz[],
@@ -107,8 +108,8 @@ async def ensure_bank_exists(conn, bank_id: str) -> None:
107
108
  bank_id: Bank identifier
108
109
  """
109
110
  await conn.execute(
110
- """
111
- INSERT INTO banks (bank_id, disposition, background)
111
+ f"""
112
+ INSERT INTO {fq_table("banks")} (bank_id, disposition, background)
112
113
  VALUES ($1, $2::jsonb, $3)
113
114
  ON CONFLICT (bank_id) DO UPDATE
114
115
  SET updated_at = NOW()
@@ -141,12 +142,14 @@ async def handle_document_tracking(
141
142
  # Always delete old document first if it exists (cascades to units and links)
142
143
  # Only delete on the first batch to avoid deleting data we just inserted
143
144
  if is_first_batch:
144
- await conn.fetchval("DELETE FROM documents WHERE id = $1 AND bank_id = $2 RETURNING id", document_id, bank_id)
145
+ await conn.fetchval(
146
+ f"DELETE FROM {fq_table('documents')} WHERE id = $1 AND bank_id = $2 RETURNING id", document_id, bank_id
147
+ )
145
148
 
146
149
  # Insert document (or update if exists from concurrent operations)
147
150
  await conn.execute(
148
- """
149
- INSERT INTO documents (id, bank_id, original_text, content_hash, metadata, retain_params)
151
+ f"""
152
+ INSERT INTO {fq_table("documents")} (id, bank_id, original_text, content_hash, metadata, retain_params)
150
153
  VALUES ($1, $2, $3, $4, $5, $6)
151
154
  ON CONFLICT (id, bank_id) DO UPDATE
152
155
  SET original_text = EXCLUDED.original_text,
@@ -7,6 +7,7 @@ import time
7
7
  from datetime import UTC, datetime, timedelta
8
8
  from uuid import UUID
9
9
 
10
+ from ..memory_engine import fq_table
10
11
  from .types import EntityLink
11
12
 
12
13
  logger = logging.getLogger(__name__)
@@ -290,9 +291,9 @@ async def extract_entities_batch_optimized(
290
291
 
291
292
  entity_id_list = [uuid.UUID(eid) if isinstance(eid, str) else eid for eid in all_entity_ids]
292
293
  rows = await conn.fetch(
293
- """
294
+ f"""
294
295
  SELECT entity_id, unit_id
295
- FROM unit_entities
296
+ FROM {fq_table("unit_entities")}
296
297
  WHERE entity_id = ANY($1::uuid[])
297
298
  """,
298
299
  entity_id_list,
@@ -413,9 +414,9 @@ async def create_temporal_links_batch_per_fact(
413
414
  # Get the event_date for each new unit
414
415
  fetch_dates_start = time_mod.time()
415
416
  rows = await conn.fetch(
416
- """
417
+ f"""
417
418
  SELECT id, event_date
418
- FROM memory_units
419
+ FROM {fq_table("memory_units")}
419
420
  WHERE id::text = ANY($1)
420
421
  """,
421
422
  unit_ids,
@@ -432,9 +433,9 @@ async def create_temporal_links_batch_per_fact(
432
433
 
433
434
  fetch_neighbors_start = time_mod.time()
434
435
  all_candidates = await conn.fetch(
435
- """
436
+ f"""
436
437
  SELECT id, event_date
437
- FROM memory_units
438
+ FROM {fq_table("memory_units")}
438
439
  WHERE bank_id = $1
439
440
  AND event_date BETWEEN $2 AND $3
440
441
  AND id::text != ALL($4)
@@ -479,8 +480,8 @@ async def create_temporal_links_batch_per_fact(
479
480
  if links:
480
481
  insert_start = time_mod.time()
481
482
  await conn.executemany(
482
- """
483
- INSERT INTO memory_links (from_unit_id, to_unit_id, link_type, weight, entity_id)
483
+ f"""
484
+ INSERT INTO {fq_table("memory_links")} (from_unit_id, to_unit_id, link_type, weight, entity_id)
484
485
  VALUES ($1, $2, $3, $4, $5)
485
486
  ON CONFLICT (from_unit_id, to_unit_id, link_type, COALESCE(entity_id, '00000000-0000-0000-0000-000000000000'::uuid)) DO NOTHING
486
487
  """,
@@ -535,9 +536,9 @@ async def create_semantic_links_batch(
535
536
  # Fetch ALL existing units with embeddings in ONE query
536
537
  fetch_start = time_mod.time()
537
538
  all_existing = await conn.fetch(
538
- """
539
+ f"""
539
540
  SELECT id, embedding
540
- FROM memory_units
541
+ FROM {fq_table("memory_units")}
541
542
  WHERE bank_id = $1
542
543
  AND embedding IS NOT NULL
543
544
  AND id::text != ALL($2)
@@ -644,8 +645,8 @@ async def create_semantic_links_batch(
644
645
  if all_links:
645
646
  insert_start = time_mod.time()
646
647
  await conn.executemany(
647
- """
648
- INSERT INTO memory_links (from_unit_id, to_unit_id, link_type, weight, entity_id)
648
+ f"""
649
+ INSERT INTO {fq_table("memory_links")} (from_unit_id, to_unit_id, link_type, weight, entity_id)
649
650
  VALUES ($1, $2, $3, $4, $5)
650
651
  ON CONFLICT (from_unit_id, to_unit_id, link_type, COALESCE(entity_id, '00000000-0000-0000-0000-000000000000'::uuid)) DO NOTHING
651
652
  """,
@@ -721,8 +722,8 @@ async def insert_entity_links_batch(conn, links: list[EntityLink], chunk_size: i
721
722
 
722
723
  # Insert from temp table with ON CONFLICT (single query for all rows)
723
724
  insert_start = time_mod.time()
724
- await conn.execute("""
725
- INSERT INTO memory_links (from_unit_id, to_unit_id, link_type, weight, entity_id)
725
+ await conn.execute(f"""
726
+ INSERT INTO {fq_table("memory_links")} (from_unit_id, to_unit_id, link_type, weight, entity_id)
726
727
  SELECT from_unit_id, to_unit_id, link_type, weight, entity_id
727
728
  FROM _temp_entity_links
728
729
  ON CONFLICT (from_unit_id, to_unit_id, link_type, COALESCE(entity_id, '00000000-0000-0000-0000-000000000000'::uuid)) DO NOTHING
@@ -808,8 +809,8 @@ async def create_causal_links_batch(
808
809
  insert_start = time_mod.time()
809
810
  try:
810
811
  await conn.executemany(
811
- """
812
- INSERT INTO memory_links (from_unit_id, to_unit_id, link_type, weight, entity_id)
812
+ f"""
813
+ INSERT INTO {fq_table("memory_links")} (from_unit_id, to_unit_id, link_type, weight, entity_id)
813
814
  VALUES ($1, $2, $3, $4, $5)
814
815
  ON CONFLICT (from_unit_id, to_unit_id, link_type, COALESCE(entity_id, '00000000-0000-0000-0000-000000000000'::uuid)) DO NOTHING
815
816
  """,
@@ -9,6 +9,7 @@ import time
9
9
  import uuid
10
10
  from datetime import UTC, datetime
11
11
 
12
+ from ..memory_engine import fq_table
12
13
  from ..search import observation_utils
13
14
  from . import embedding_utils
14
15
  from .types import EntityLink
@@ -75,8 +76,8 @@ async def regenerate_observations_batch(
75
76
 
76
77
  # Batch query for entity names
77
78
  entity_rows = await conn.fetch(
78
- """
79
- SELECT id, canonical_name FROM entities
79
+ f"""
80
+ SELECT id, canonical_name FROM {fq_table("entities")}
80
81
  WHERE id = ANY($1) AND bank_id = $2
81
82
  """,
82
83
  entity_uuids,
@@ -86,10 +87,10 @@ async def regenerate_observations_batch(
86
87
 
87
88
  # Batch query for fact counts
88
89
  fact_counts = await conn.fetch(
89
- """
90
+ f"""
90
91
  SELECT ue.entity_id, COUNT(*) as cnt
91
- FROM unit_entities ue
92
- JOIN memory_units mu ON ue.unit_id = mu.id
92
+ FROM {fq_table("unit_entities")} ue
93
+ JOIN {fq_table("memory_units")} mu ON ue.unit_id = mu.id
93
94
  WHERE ue.entity_id = ANY($1) AND mu.bank_id = $2
94
95
  GROUP BY ue.entity_id
95
96
  """,
@@ -154,10 +155,10 @@ async def _regenerate_entity_observations(
154
155
 
155
156
  # Get all facts mentioning this entity (exclude observations themselves)
156
157
  rows = await conn.fetch(
157
- """
158
+ f"""
158
159
  SELECT mu.id, mu.text, mu.context, mu.occurred_start, mu.fact_type
159
- FROM memory_units mu
160
- JOIN unit_entities ue ON mu.id = ue.unit_id
160
+ FROM {fq_table("memory_units")} mu
161
+ JOIN {fq_table("unit_entities")} ue ON mu.id = ue.unit_id
161
162
  WHERE mu.bank_id = $1
162
163
  AND ue.entity_id = $2
163
164
  AND mu.fact_type IN ('world', 'experience')
@@ -193,12 +194,12 @@ async def _regenerate_entity_observations(
193
194
 
194
195
  # Delete old observations for this entity
195
196
  await conn.execute(
196
- """
197
- DELETE FROM memory_units
197
+ f"""
198
+ DELETE FROM {fq_table("memory_units")}
198
199
  WHERE id IN (
199
200
  SELECT mu.id
200
- FROM memory_units mu
201
- JOIN unit_entities ue ON mu.id = ue.unit_id
201
+ FROM {fq_table("memory_units")} mu
202
+ JOIN {fq_table("unit_entities")} ue ON mu.id = ue.unit_id
202
203
  WHERE mu.bank_id = $1
203
204
  AND mu.fact_type = 'observation'
204
205
  AND ue.entity_id = $2
@@ -217,8 +218,8 @@ async def _regenerate_entity_observations(
217
218
 
218
219
  for obs_text, embedding in zip(observations, embeddings):
219
220
  result = await conn.fetchrow(
220
- """
221
- INSERT INTO memory_units (
221
+ f"""
222
+ INSERT INTO {fq_table("memory_units")} (
222
223
  bank_id, text, embedding, context, event_date,
223
224
  occurred_start, occurred_end, mentioned_at,
224
225
  fact_type, access_count
@@ -240,8 +241,8 @@ async def _regenerate_entity_observations(
240
241
 
241
242
  # Link observation to entity
242
243
  await conn.execute(
243
- """
244
- INSERT INTO unit_entities (unit_id, entity_id)
244
+ f"""
245
+ INSERT INTO {fq_table("unit_entities")} (unit_id, entity_id)
245
246
  VALUES ($1, $2)
246
247
  """,
247
248
  uuid.UUID(obs_id),
@@ -8,7 +8,6 @@ import logging
8
8
  import time
9
9
  import uuid
10
10
  from datetime import UTC, datetime
11
- from typing import Any
12
11
 
13
12
  from ..db_utils import acquire_with_retry
14
13
  from . import bank_utils
@@ -29,7 +28,7 @@ from . import (
29
28
  link_creation,
30
29
  observation_regeneration,
31
30
  )
32
- from .types import ExtractedFact, ProcessedFact, RetainContent
31
+ from .types import ExtractedFact, ProcessedFact, RetainContent, RetainContentDict
33
32
 
34
33
  logger = logging.getLogger(__name__)
35
34
 
@@ -43,7 +42,7 @@ async def retain_batch(
43
42
  format_date_fn,
44
43
  duplicate_checker_fn,
45
44
  bank_id: str,
46
- contents_dicts: list[dict[str, Any]],
45
+ contents_dicts: list[RetainContentDict],
47
46
  document_id: str | None = None,
48
47
  is_first_batch: bool = True,
49
48
  fact_type_override: str | None = None,
@@ -7,9 +7,33 @@ from content input to fact storage.
7
7
 
8
8
  from dataclasses import dataclass, field
9
9
  from datetime import UTC, datetime
10
+ from typing import TypedDict
10
11
  from uuid import UUID
11
12
 
12
13
 
14
+ class RetainContentDict(TypedDict, total=False):
15
+ """Type definition for content items in retain_batch_async.
16
+
17
+ Fields:
18
+ content: Text content to store (required)
19
+ context: Context about the content (optional)
20
+ event_date: When the content occurred (optional, defaults to now)
21
+ metadata: Custom key-value metadata (optional)
22
+ document_id: Document ID for this content item (optional)
23
+ """
24
+
25
+ content: str # Required
26
+ context: str
27
+ event_date: datetime
28
+ metadata: dict[str, str]
29
+ document_id: str
30
+
31
+
32
+ def _now_utc() -> datetime:
33
+ """Factory function for default event_date."""
34
+ return datetime.now(UTC)
35
+
36
+
13
37
  @dataclass
14
38
  class RetainContent:
15
39
  """
@@ -20,16 +44,9 @@ class RetainContent:
20
44
 
21
45
  content: str
22
46
  context: str = ""
23
- event_date: datetime | None = None
47
+ event_date: datetime = field(default_factory=_now_utc)
24
48
  metadata: dict[str, str] = field(default_factory=dict)
25
49
 
26
- def __post_init__(self):
27
- """Ensure event_date is set."""
28
- if self.event_date is None:
29
- from datetime import datetime
30
-
31
- self.event_date = datetime.now(UTC)
32
-
33
50
 
34
51
  @dataclass
35
52
  class ChunkMetadata:
@@ -10,6 +10,7 @@ import logging
10
10
  from abc import ABC, abstractmethod
11
11
 
12
12
  from ..db_utils import acquire_with_retry
13
+ from ..memory_engine import fq_table
13
14
  from .types import RetrievalResult
14
15
 
15
16
  logger = logging.getLogger(__name__)
@@ -139,11 +140,11 @@ class BFSGraphRetriever(GraphRetriever):
139
140
 
140
141
  # Step 1: Find entry points
141
142
  entry_points = await conn.fetch(
142
- """
143
+ f"""
143
144
  SELECT id, text, context, event_date, occurred_start, occurred_end,
144
145
  mentioned_at, access_count, embedding, fact_type, document_id, chunk_id,
145
146
  1 - (embedding <=> $1::vector) AS similarity
146
- FROM memory_units
147
+ FROM {fq_table("memory_units")}
147
148
  WHERE bank_id = $2
148
149
  AND embedding IS NOT NULL
149
150
  AND fact_type = $3
@@ -188,13 +189,13 @@ class BFSGraphRetriever(GraphRetriever):
188
189
  if batch_nodes and budget_remaining > 0:
189
190
  max_neighbors = len(batch_nodes) * 20
190
191
  neighbors = await conn.fetch(
191
- """
192
+ f"""
192
193
  SELECT mu.id, mu.text, mu.context, mu.occurred_start, mu.occurred_end,
193
194
  mu.mentioned_at, mu.access_count, mu.embedding, mu.fact_type,
194
195
  mu.document_id, mu.chunk_id,
195
196
  ml.weight, ml.link_type, ml.from_unit_id
196
- FROM memory_links ml
197
- JOIN memory_units mu ON ml.to_unit_id = mu.id
197
+ FROM {fq_table("memory_links")} ml
198
+ JOIN {fq_table("memory_units")} mu ON ml.to_unit_id = mu.id
198
199
  WHERE ml.from_unit_id = ANY($1::uuid[])
199
200
  AND ml.weight >= $2
200
201
  AND mu.fact_type = $3
@@ -20,6 +20,7 @@ from collections import defaultdict
20
20
  from dataclasses import dataclass, field
21
21
 
22
22
  from ..db_utils import acquire_with_retry
23
+ from ..memory_engine import fq_table
23
24
  from .graph_retrieval import GraphRetriever
24
25
  from .types import RetrievalResult
25
26
 
@@ -217,10 +218,10 @@ async def load_typed_adjacency(pool, bank_id: str) -> TypedAdjacency:
217
218
  """
218
219
  async with acquire_with_retry(pool) as conn:
219
220
  rows = await conn.fetch(
220
- """
221
+ f"""
221
222
  SELECT ml.from_unit_id, ml.to_unit_id, ml.link_type, ml.weight
222
- FROM memory_links ml
223
- JOIN memory_units mu ON ml.from_unit_id = mu.id
223
+ FROM {fq_table("memory_links")} ml
224
+ JOIN {fq_table("memory_units")} mu ON ml.from_unit_id = mu.id
224
225
  WHERE mu.bank_id = $1
225
226
  AND ml.weight >= 0.1
226
227
  ORDER BY ml.from_unit_id, ml.weight DESC
@@ -252,10 +253,10 @@ async def fetch_memory_units_by_ids(
252
253
 
253
254
  async with acquire_with_retry(pool) as conn:
254
255
  rows = await conn.fetch(
255
- """
256
+ f"""
256
257
  SELECT id, text, context, event_date, occurred_start, occurred_end,
257
258
  mentioned_at, access_count, embedding, fact_type, document_id, chunk_id
258
- FROM memory_units
259
+ FROM {fq_table("memory_units")}
259
260
  WHERE id = ANY($1::uuid[])
260
261
  AND fact_type = $2
261
262
  """,
@@ -418,9 +419,9 @@ class MPFPGraphRetriever(GraphRetriever):
418
419
  """Fallback: find semantic seeds via embedding search."""
419
420
  async with acquire_with_retry(pool) as conn:
420
421
  rows = await conn.fetch(
421
- """
422
+ f"""
422
423
  SELECT id, 1 - (embedding <=> $1::vector) AS similarity
423
- FROM memory_units
424
+ FROM {fq_table("memory_units")}
424
425
  WHERE bank_id = $2
425
426
  AND embedding IS NOT NULL
426
427
  AND fact_type = $3
@@ -16,6 +16,7 @@ from typing import Optional
16
16
 
17
17
  from ...config import get_config
18
18
  from ..db_utils import acquire_with_retry
19
+ from ..memory_engine import fq_table
19
20
  from .graph_retrieval import BFSGraphRetriever, GraphRetriever
20
21
  from .mpfp_retrieval import MPFPGraphRetriever
21
22
  from .types import RetrievalResult
@@ -80,10 +81,10 @@ async def retrieve_semantic(
80
81
  List of RetrievalResult objects
81
82
  """
82
83
  results = await conn.fetch(
83
- """
84
+ f"""
84
85
  SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id,
85
86
  1 - (embedding <=> $1::vector) AS similarity
86
- FROM memory_units
87
+ FROM {fq_table("memory_units")}
87
88
  WHERE bank_id = $2
88
89
  AND embedding IS NOT NULL
89
90
  AND fact_type = $3
@@ -131,10 +132,10 @@ async def retrieve_bm25(conn, query_text: str, bank_id: str, fact_type: str, lim
131
132
  query_tsquery = " | ".join(tokens)
132
133
 
133
134
  results = await conn.fetch(
134
- """
135
+ f"""
135
136
  SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id,
136
137
  ts_rank_cd(search_vector, to_tsquery('english', $1)) AS bm25_score
137
- FROM memory_units
138
+ FROM {fq_table("memory_units")}
138
139
  WHERE bank_id = $2
139
140
  AND fact_type = $3
140
141
  AND search_vector @@ to_tsquery('english', $1)
@@ -188,10 +189,10 @@ async def retrieve_temporal(
188
189
  end_date = end_date.replace(tzinfo=UTC)
189
190
 
190
191
  entry_points = await conn.fetch(
191
- """
192
+ f"""
192
193
  SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id,
193
194
  1 - (embedding <=> $1::vector) AS similarity
194
- FROM memory_units
195
+ FROM {fq_table("memory_units")}
195
196
  WHERE bank_id = $2
196
197
  AND fact_type = $3
197
198
  AND embedding IS NOT NULL
@@ -272,12 +273,12 @@ async def retrieve_temporal(
272
273
  # Get neighbors via temporal and causal links
273
274
  if budget_remaining > 0:
274
275
  neighbors = await conn.fetch(
275
- """
276
+ f"""
276
277
  SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start, mu.occurred_end, mu.mentioned_at, mu.access_count, mu.embedding, mu.fact_type, mu.document_id, mu.chunk_id,
277
278
  ml.weight, ml.link_type,
278
279
  1 - (mu.embedding <=> $1::vector) AS similarity
279
- FROM memory_links ml
280
- JOIN memory_units mu ON ml.to_unit_id = mu.id
280
+ FROM {fq_table("memory_links")} ml
281
+ JOIN {fq_table("memory_units")} mu ON ml.to_unit_id = mu.id
281
282
  WHERE ml.from_unit_id = $2
282
283
  AND ml.link_type IN ('temporal', 'causes', 'caused_by', 'enables', 'prevents')
283
284
  AND ml.weight >= 0.1
@@ -546,11 +547,11 @@ async def _get_temporal_entry_points(
546
547
  end_date = end_date.replace(tzinfo=UTC)
547
548
 
548
549
  rows = await conn.fetch(
549
- """
550
+ f"""
550
551
  SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at,
551
552
  access_count, embedding, fact_type, document_id, chunk_id,
552
553
  1 - (embedding <=> $1::vector) AS similarity
553
- FROM memory_units
554
+ FROM {fq_table("memory_units")}
554
555
  WHERE bank_id = $2
555
556
  AND fact_type = $3
556
557
  AND embedding IS NOT NULL
@@ -101,7 +101,7 @@ def build_think_prompt(
101
101
  name: str,
102
102
  disposition: DispositionTraits,
103
103
  background: str,
104
- context: str = None,
104
+ context: str | None = None,
105
105
  ) -> str:
106
106
  """Build the think prompt for the LLM."""
107
107
  disposition_desc = build_disposition_description(disposition)
@@ -115,7 +115,7 @@ class SearchTracer:
115
115
  node_id: str,
116
116
  text: str,
117
117
  context: str,
118
- event_date: datetime,
118
+ event_date: datetime | None,
119
119
  access_count: int,
120
120
  is_entry_point: bool,
121
121
  parent_node_id: str | None,
@@ -89,6 +89,38 @@ class TaskBackend(ABC):
89
89
  traceback.print_exc()
90
90
 
91
91
 
92
+ class SyncTaskBackend(TaskBackend):
93
+ """
94
+ Synchronous task backend that executes tasks immediately.
95
+
96
+ This is useful for embedded/CLI usage where we don't want background
97
+ workers that prevent clean exit. Tasks are executed inline rather than
98
+ being queued.
99
+ """
100
+
101
+ async def initialize(self):
102
+ """No-op for sync backend."""
103
+ self._initialized = True
104
+ logger.debug("SyncTaskBackend initialized")
105
+
106
+ async def submit_task(self, task_dict: dict[str, Any]):
107
+ """
108
+ Execute the task immediately (synchronously).
109
+
110
+ Args:
111
+ task_dict: Task dictionary to execute
112
+ """
113
+ if not self._initialized:
114
+ await self.initialize()
115
+
116
+ await self._execute_task(task_dict)
117
+
118
+ async def shutdown(self):
119
+ """No-op for sync backend."""
120
+ self._initialized = False
121
+ logger.debug("SyncTaskBackend shutdown")
122
+
123
+
92
124
  class AsyncIOQueueBackend(TaskBackend):
93
125
  """
94
126
  Task backend implementation using asyncio queues.