hindsight-api 0.3.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. hindsight_api/__init__.py +1 -1
  2. hindsight_api/admin/cli.py +59 -0
  3. hindsight_api/alembic/versions/h3c4d5e6f7g8_mental_models_v4.py +112 -0
  4. hindsight_api/alembic/versions/i4d5e6f7g8h9_delete_opinions.py +41 -0
  5. hindsight_api/alembic/versions/j5e6f7g8h9i0_mental_model_versions.py +95 -0
  6. hindsight_api/alembic/versions/k6f7g8h9i0j1_add_directive_subtype.py +58 -0
  7. hindsight_api/alembic/versions/l7g8h9i0j1k2_add_worker_columns.py +109 -0
  8. hindsight_api/alembic/versions/m8h9i0j1k2l3_mental_model_id_to_text.py +41 -0
  9. hindsight_api/alembic/versions/n9i0j1k2l3m4_learnings_and_pinned_reflections.py +134 -0
  10. hindsight_api/alembic/versions/o0j1k2l3m4n5_migrate_mental_models_data.py +113 -0
  11. hindsight_api/alembic/versions/p1k2l3m4n5o6_new_knowledge_architecture.py +194 -0
  12. hindsight_api/alembic/versions/q2l3m4n5o6p7_fix_mental_model_fact_type.py +50 -0
  13. hindsight_api/alembic/versions/r3m4n5o6p7q8_add_reflect_response_to_reflections.py +47 -0
  14. hindsight_api/alembic/versions/s4n5o6p7q8r9_add_consolidated_at_to_memory_units.py +53 -0
  15. hindsight_api/alembic/versions/t5o6p7q8r9s0_rename_mental_models_to_observations.py +134 -0
  16. hindsight_api/alembic/versions/u6p7q8r9s0t1_mental_models_text_id.py +41 -0
  17. hindsight_api/alembic/versions/v7q8r9s0t1u2_add_max_tokens_to_mental_models.py +50 -0
  18. hindsight_api/api/http.py +1120 -93
  19. hindsight_api/api/mcp.py +11 -191
  20. hindsight_api/config.py +174 -46
  21. hindsight_api/engine/consolidation/__init__.py +5 -0
  22. hindsight_api/engine/consolidation/consolidator.py +926 -0
  23. hindsight_api/engine/consolidation/prompts.py +77 -0
  24. hindsight_api/engine/cross_encoder.py +153 -22
  25. hindsight_api/engine/directives/__init__.py +5 -0
  26. hindsight_api/engine/directives/models.py +37 -0
  27. hindsight_api/engine/embeddings.py +136 -13
  28. hindsight_api/engine/interface.py +32 -13
  29. hindsight_api/engine/llm_wrapper.py +505 -43
  30. hindsight_api/engine/memory_engine.py +2101 -1094
  31. hindsight_api/engine/mental_models/__init__.py +14 -0
  32. hindsight_api/engine/mental_models/models.py +53 -0
  33. hindsight_api/engine/reflect/__init__.py +18 -0
  34. hindsight_api/engine/reflect/agent.py +933 -0
  35. hindsight_api/engine/reflect/models.py +109 -0
  36. hindsight_api/engine/reflect/observations.py +186 -0
  37. hindsight_api/engine/reflect/prompts.py +483 -0
  38. hindsight_api/engine/reflect/tools.py +437 -0
  39. hindsight_api/engine/reflect/tools_schema.py +250 -0
  40. hindsight_api/engine/response_models.py +130 -4
  41. hindsight_api/engine/retain/bank_utils.py +79 -201
  42. hindsight_api/engine/retain/fact_extraction.py +81 -48
  43. hindsight_api/engine/retain/fact_storage.py +5 -8
  44. hindsight_api/engine/retain/link_utils.py +5 -8
  45. hindsight_api/engine/retain/orchestrator.py +1 -55
  46. hindsight_api/engine/retain/types.py +2 -2
  47. hindsight_api/engine/search/graph_retrieval.py +2 -2
  48. hindsight_api/engine/search/link_expansion_retrieval.py +164 -29
  49. hindsight_api/engine/search/mpfp_retrieval.py +1 -1
  50. hindsight_api/engine/search/retrieval.py +14 -14
  51. hindsight_api/engine/search/think_utils.py +41 -140
  52. hindsight_api/engine/search/trace.py +0 -1
  53. hindsight_api/engine/search/tracer.py +2 -5
  54. hindsight_api/engine/search/types.py +0 -3
  55. hindsight_api/engine/task_backend.py +112 -196
  56. hindsight_api/engine/utils.py +0 -151
  57. hindsight_api/extensions/__init__.py +10 -1
  58. hindsight_api/extensions/builtin/tenant.py +11 -4
  59. hindsight_api/extensions/operation_validator.py +81 -4
  60. hindsight_api/extensions/tenant.py +26 -0
  61. hindsight_api/main.py +28 -5
  62. hindsight_api/mcp_local.py +12 -53
  63. hindsight_api/mcp_tools.py +494 -0
  64. hindsight_api/models.py +0 -2
  65. hindsight_api/worker/__init__.py +11 -0
  66. hindsight_api/worker/main.py +296 -0
  67. hindsight_api/worker/poller.py +486 -0
  68. {hindsight_api-0.3.0.dist-info → hindsight_api-0.4.1.dist-info}/METADATA +12 -6
  69. hindsight_api-0.4.1.dist-info/RECORD +112 -0
  70. {hindsight_api-0.3.0.dist-info → hindsight_api-0.4.1.dist-info}/entry_points.txt +1 -0
  71. hindsight_api/engine/retain/observation_regeneration.py +0 -254
  72. hindsight_api/engine/search/observation_utils.py +0 -125
  73. hindsight_api/engine/search/scoring.py +0 -159
  74. hindsight_api-0.3.0.dist-info/RECORD +0 -82
  75. {hindsight_api-0.3.0.dist-info → hindsight_api-0.4.1.dist-info}/WHEEL +0 -0
@@ -1,31 +1,40 @@
1
1
  """
2
- Abstract task backend for running async tasks.
2
+ Task backend for distributed task processing.
3
3
 
4
- This provides an abstraction that can be adapted to different execution models:
5
- - AsyncIO queue (default implementation)
6
- - Pub/Sub architectures (future)
7
- - Message brokers (future)
4
+ This provides an abstraction for task storage and execution:
5
+ - BrokerTaskBackend: Uses PostgreSQL as broker (production)
6
+ - SyncTaskBackend: Executes tasks immediately (testing/embedded)
8
7
  """
9
8
 
10
- import asyncio
9
+ import json
11
10
  import logging
12
11
  from abc import ABC, abstractmethod
13
12
  from collections.abc import Awaitable, Callable
14
- from typing import Any
13
+ from typing import TYPE_CHECKING, Any
14
+
15
+ if TYPE_CHECKING:
16
+ import asyncpg
15
17
 
16
18
  logger = logging.getLogger(__name__)
17
19
 
18
20
 
21
+ def fq_table(table: str, schema: str | None = None) -> str:
22
+ """Get fully-qualified table name with optional schema prefix."""
23
+ if schema:
24
+ return f'"{schema}".{table}'
25
+ return table
26
+
27
+
19
28
  class TaskBackend(ABC):
20
29
  """
21
30
  Abstract base class for task execution backends.
22
31
 
23
32
  Implementations must:
24
33
  1. Store/publish task events (as serializable dicts)
25
- 2. Execute tasks through a provided executor callback
34
+ 2. Execute tasks through a provided executor callback (optional)
26
35
 
27
36
  The backend treats tasks as pure dictionaries that can be serialized
28
- and sent over the network. The executor (typically MemoryEngine.execute_task)
37
+ and stored in the database. The executor (typically MemoryEngine.execute_task)
29
38
  receives the dict and routes it to the appropriate handler.
30
39
  """
31
40
 
@@ -46,7 +55,7 @@ class TaskBackend(ABC):
46
55
  @abstractmethod
47
56
  async def initialize(self):
48
57
  """
49
- Initialize the backend (e.g., start workers, connect to broker).
58
+ Initialize the backend (e.g., connect to database).
50
59
  """
51
60
  pass
52
61
 
@@ -63,7 +72,7 @@ class TaskBackend(ABC):
63
72
  @abstractmethod
64
73
  async def shutdown(self):
65
74
  """
66
- Shutdown the backend gracefully (e.g., stop workers, close connections).
75
+ Shutdown the backend gracefully.
67
76
  """
68
77
  pass
69
78
 
@@ -93,9 +102,8 @@ class SyncTaskBackend(TaskBackend):
93
102
  """
94
103
  Synchronous task backend that executes tasks immediately.
95
104
 
96
- This is useful for embedded/CLI usage where we don't want background
97
- workers that prevent clean exit. Tasks are executed inline rather than
98
- being queued.
105
+ This is useful for tests and embedded/CLI usage where we don't want
106
+ background workers. Tasks are executed inline rather than being queued.
99
107
  """
100
108
 
101
109
  async def initialize(self):
@@ -121,221 +129,129 @@ class SyncTaskBackend(TaskBackend):
121
129
  logger.debug("SyncTaskBackend shutdown")
122
130
 
123
131
 
124
- class NoopTaskBackend(TaskBackend):
132
+ class BrokerTaskBackend(TaskBackend):
125
133
  """
126
- No-op task backend that discards all tasks.
134
+ Task backend using PostgreSQL as broker.
127
135
 
128
- This is useful for tests where background task execution is not needed
129
- and would only slow down the test suite.
130
- """
136
+ submit_task() stores task_payload in async_operations table.
137
+ Actual polling and execution is handled separately by WorkerPoller.
131
138
 
132
- async def initialize(self):
133
- """No-op."""
134
- self._initialized = True
135
- logger.debug("NoopTaskBackend initialized")
136
-
137
- async def submit_task(self, task_dict: dict[str, Any]):
138
- """Discard the task (do nothing)."""
139
- pass
140
-
141
- async def shutdown(self):
142
- """No-op."""
143
- self._initialized = False
144
- logger.debug("NoopTaskBackend shutdown")
145
-
146
-
147
- class AsyncIOQueueBackend(TaskBackend):
148
- """
149
- Task backend implementation using asyncio queues.
150
-
151
- This is the default implementation that uses in-process asyncio queues
152
- and a periodic consumer worker.
139
+ This backend is used by the API to store tasks. Workers poll
140
+ the database separately to claim and execute tasks.
153
141
  """
154
142
 
155
- def __init__(self, batch_size: int = 10, batch_interval: float = 1.0):
143
+ def __init__(
144
+ self,
145
+ pool_getter: Callable[[], "asyncpg.Pool"],
146
+ schema: str | None = None,
147
+ schema_getter: Callable[[], str | None] | None = None,
148
+ ):
156
149
  """
157
- Initialize AsyncIO queue backend.
150
+ Initialize the broker task backend.
158
151
 
159
152
  Args:
160
- batch_size: Maximum number of tasks to process in one batch
161
- batch_interval: Maximum time (seconds) to wait before processing batch
153
+ pool_getter: Callable that returns the asyncpg connection pool
154
+ schema: Database schema for multi-tenant support (optional, static)
155
+ schema_getter: Callable that returns current schema dynamically (optional).
156
+ If set, takes precedence over static schema for submit_task.
162
157
  """
163
158
  super().__init__()
164
- self._queue: asyncio.Queue | None = None
165
- self._worker_task: asyncio.Task | None = None
166
- self._shutdown_event: asyncio.Event | None = None
167
- self._batch_size = batch_size
168
- self._batch_interval = batch_interval
169
- self._in_flight_count = 0
170
- self._in_flight_lock = asyncio.Lock()
159
+ self._pool_getter = pool_getter
160
+ self._schema = schema
161
+ self._schema_getter = schema_getter
171
162
 
172
163
  async def initialize(self):
173
- """Initialize the queue and start the worker."""
174
- if self._initialized:
175
- return
176
-
177
- self._queue = asyncio.Queue()
178
- self._shutdown_event = asyncio.Event()
179
- self._worker_task = asyncio.create_task(self._worker())
164
+ """Initialize the backend."""
180
165
  self._initialized = True
181
- logger.info("AsyncIOQueueBackend initialized")
166
+ logger.info("BrokerTaskBackend initialized")
182
167
 
183
168
  async def submit_task(self, task_dict: dict[str, Any]):
184
169
  """
185
- Submit a task by putting it in the queue.
170
+ Store task payload in async_operations table.
171
+
172
+ The task_dict should contain an 'operation_id' if updating an existing
173
+ operation record, otherwise a new operation will be created.
186
174
 
187
175
  Args:
188
- task_dict: Task dictionary to execute
176
+ task_dict: Task dictionary to store (must be JSON serializable)
189
177
  """
190
178
  if not self._initialized:
191
179
  await self.initialize()
192
180
 
193
- await self._queue.put(task_dict)
181
+ pool = self._pool_getter()
182
+ operation_id = task_dict.get("operation_id")
183
+ task_type = task_dict.get("type", "unknown")
184
+ bank_id = task_dict.get("bank_id")
185
+ payload_json = json.dumps(task_dict)
186
+
187
+ schema = self._schema_getter() if self._schema_getter else self._schema
188
+ table = fq_table("async_operations", schema)
189
+
190
+ if operation_id:
191
+ # Update existing operation with task payload
192
+ await pool.execute(
193
+ f"""
194
+ UPDATE {table}
195
+ SET task_payload = $1::jsonb, updated_at = now()
196
+ WHERE operation_id = $2
197
+ """,
198
+ payload_json,
199
+ operation_id,
200
+ )
201
+ logger.debug(f"Updated task payload for operation {operation_id}")
202
+ else:
203
+ # Insert new operation (for tasks without pre-created records)
204
+ # e.g., access_count_update tasks
205
+ import uuid
206
+
207
+ new_id = uuid.uuid4()
208
+ await pool.execute(
209
+ f"""
210
+ INSERT INTO {table} (operation_id, bank_id, operation_type, status, task_payload)
211
+ VALUES ($1, $2, $3, 'pending', $4::jsonb)
212
+ """,
213
+ new_id,
214
+ bank_id,
215
+ task_type,
216
+ payload_json,
217
+ )
218
+ logger.debug(f"Created new operation {new_id} for task type {task_type}")
219
+
220
+ async def shutdown(self):
221
+ """Shutdown the backend."""
222
+ self._initialized = False
223
+ logger.info("BrokerTaskBackend shutdown")
194
224
 
195
225
  async def wait_for_pending_tasks(self, timeout: float = 120.0):
196
226
  """
197
- Wait for all pending tasks in the queue and in-flight tasks to complete.
227
+ Wait for pending tasks to be processed.
198
228
 
199
- This is useful in tests to ensure background tasks complete before assertions.
229
+ In the broker model, this polls the database to check if tasks
230
+ for this process have been completed. This is useful in tests
231
+ when worker_enabled=True (API processes its own tasks).
200
232
 
201
233
  Args:
202
- timeout: Maximum time to wait in seconds (default 120s for long-running tasks)
234
+ timeout: Maximum time to wait in seconds
203
235
  """
204
- if not self._initialized or self._queue is None:
205
- return
236
+ import asyncio
237
+
238
+ pool = self._pool_getter()
239
+ schema = self._schema_getter() if self._schema_getter else self._schema
240
+ table = fq_table("async_operations", schema)
206
241
 
207
- # Wait for queue to be empty AND no in-flight tasks
208
242
  start_time = asyncio.get_event_loop().time()
209
243
  while asyncio.get_event_loop().time() - start_time < timeout:
210
- async with self._in_flight_lock:
211
- in_flight = self._in_flight_count
212
-
213
- if self._queue.empty() and in_flight == 0:
214
- # Queue is empty and no tasks in flight, we're done
244
+ # Check if there are any pending tasks with payloads
245
+ count = await pool.fetchval(
246
+ f"""
247
+ SELECT COUNT(*) FROM {table}
248
+ WHERE status = 'pending' AND task_payload IS NOT NULL
249
+ """
250
+ )
251
+
252
+ if count == 0:
215
253
  return
216
254
 
217
- # Wait a bit before checking again
218
255
  await asyncio.sleep(0.5)
219
256
 
220
- async def shutdown(self):
221
- """Shutdown the worker and drain the queue."""
222
- if not self._initialized:
223
- return
224
-
225
- logger.info("Shutting down AsyncIOQueueBackend...")
226
-
227
- # Signal shutdown
228
- self._shutdown_event.set()
229
-
230
- # Cancel worker
231
- if self._worker_task is not None:
232
- self._worker_task.cancel()
233
- try:
234
- await self._worker_task
235
- except asyncio.CancelledError:
236
- pass # Worker cancelled successfully
237
-
238
- self._initialized = False
239
- logger.info("AsyncIOQueueBackend shutdown complete")
240
-
241
- async def _execute_task_with_tracking(self, task_dict: dict[str, Any]):
242
- """Execute a task and track its in-flight status."""
243
- async with self._in_flight_lock:
244
- self._in_flight_count += 1
245
- try:
246
- await self._execute_task(task_dict)
247
- finally:
248
- async with self._in_flight_lock:
249
- self._in_flight_count -= 1
250
-
251
- async def _execute_task_no_tracking(self, task_dict: dict[str, Any]):
252
- """Execute a task without in-flight tracking (tracking done at batch level)."""
253
- await self._execute_task(task_dict)
254
-
255
- def _get_queue_stats(self) -> tuple[int, dict[str, int]]:
256
- """Get current queue size and bank_id distribution."""
257
- queue_size = self._queue.qsize() if self._queue else 0
258
- bank_distribution: dict[str, int] = {}
259
-
260
- if queue_size > 0 and self._queue:
261
- # Peek at queue items without removing them
262
- # Note: This is a snapshot and may not be perfectly accurate due to concurrency
263
- try:
264
- # Access internal deque for logging purposes only
265
- items = list(self._queue._queue) # type: ignore[attr-defined]
266
- for item in items:
267
- bank_id = item.get("bank_id", "unknown")
268
- bank_distribution[bank_id] = bank_distribution.get(bank_id, 0) + 1
269
- except Exception:
270
- pass # Queue access failed, return empty distribution
271
-
272
- return queue_size, bank_distribution
273
-
274
- async def _worker(self):
275
- """
276
- Background worker that processes tasks in batches.
277
-
278
- Collects tasks for up to batch_interval seconds or batch_size items,
279
- then processes them.
280
- """
281
- while not self._shutdown_event.is_set():
282
- try:
283
- # Collect tasks for batching
284
- tasks = []
285
- deadline = asyncio.get_event_loop().time() + self._batch_interval
286
-
287
- while len(tasks) < self._batch_size and asyncio.get_event_loop().time() < deadline:
288
- try:
289
- remaining_time = max(0.1, deadline - asyncio.get_event_loop().time())
290
- task_dict = await asyncio.wait_for(self._queue.get(), timeout=remaining_time)
291
- # Track task as in-flight immediately when picked up from queue
292
- # This prevents wait_for_pending_tasks from returning too early
293
- async with self._in_flight_lock:
294
- self._in_flight_count += 1
295
- tasks.append(task_dict)
296
- except TimeoutError:
297
- break
298
-
299
- # Process batch
300
- if tasks:
301
- # Log batch start with queue stats
302
- queue_size, bank_distribution = self._get_queue_stats()
303
-
304
- # Summarize batch by task type and bank
305
- batch_summary: dict[str, dict[str, int]] = {}
306
- for task_dict in tasks:
307
- task_type = task_dict.get("type", "unknown")
308
- bank_id = task_dict.get("bank_id", "unknown")
309
- if task_type not in batch_summary:
310
- batch_summary[task_type] = {}
311
- batch_summary[task_type][bank_id] = batch_summary[task_type].get(bank_id, 0) + 1
312
-
313
- # Build log message
314
- batch_parts = []
315
- for task_type, banks in sorted(batch_summary.items()):
316
- bank_str = ", ".join(f"{b}:{c}" for b, c in sorted(banks.items()))
317
- batch_parts.append(f"{task_type}[{bank_str}]")
318
- batch_str = ", ".join(batch_parts)
319
-
320
- if queue_size > 0:
321
- pending_str = ", ".join(f"{k}:{v}" for k, v in sorted(bank_distribution.items()))
322
- logger.info(
323
- f"Processing {len(tasks)} tasks: {batch_str} (pending={queue_size} [{pending_str}])"
324
- )
325
- else:
326
- logger.info(f"Processing {len(tasks)} tasks: {batch_str}")
327
-
328
- # Execute tasks concurrently (in_flight already tracked when picked up)
329
- await asyncio.gather(
330
- *[self._execute_task_no_tracking(task_dict) for task_dict in tasks], return_exceptions=True
331
- )
332
-
333
- # Decrement in_flight count after all tasks complete
334
- async with self._in_flight_lock:
335
- self._in_flight_count -= len(tasks)
336
-
337
- except asyncio.CancelledError:
338
- break
339
- except Exception as e:
340
- logger.error(f"Worker error: {e}")
341
- await asyncio.sleep(1) # Backoff on error
257
+ logger.warning(f"Timeout waiting for pending tasks after {timeout}s")
@@ -65,154 +65,3 @@ async def extract_facts(
65
65
  return [], chunks
66
66
 
67
67
  return facts, chunks
68
-
69
-
70
- def cosine_similarity(vec1: list[float], vec2: list[float]) -> float:
71
- """
72
- Calculate cosine similarity between two vectors.
73
-
74
- Args:
75
- vec1: First vector
76
- vec2: Second vector
77
-
78
- Returns:
79
- Similarity score between 0 and 1
80
- """
81
- if len(vec1) != len(vec2):
82
- raise ValueError("Vectors must have same dimension")
83
-
84
- dot_product = sum(a * b for a, b in zip(vec1, vec2))
85
- magnitude1 = sum(a * a for a in vec1) ** 0.5
86
- magnitude2 = sum(b * b for b in vec2) ** 0.5
87
-
88
- if magnitude1 == 0 or magnitude2 == 0:
89
- return 0.0
90
-
91
- return dot_product / (magnitude1 * magnitude2)
92
-
93
-
94
- def calculate_recency_weight(days_since: float, half_life_days: float = 365.0) -> float:
95
- """
96
- Calculate recency weight using logarithmic decay.
97
-
98
- This provides much better differentiation over long time periods compared to
99
- exponential decay. Uses a log-based decay where the half-life parameter controls
100
- when memories reach 50% weight.
101
-
102
- Examples:
103
- - Today (0 days): 1.0
104
- - 1 year (365 days): ~0.5 (with default half_life=365)
105
- - 2 years (730 days): ~0.33
106
- - 5 years (1825 days): ~0.17
107
- - 10 years (3650 days): ~0.09
108
-
109
- This ensures that 2-year-old and 5-year-old memories have meaningfully
110
- different weights, unlike exponential decay which makes them both ~0.
111
-
112
- Args:
113
- days_since: Number of days since the memory was created
114
- half_life_days: Number of days for weight to reach 0.5 (default: 1 year)
115
-
116
- Returns:
117
- Weight between 0 and 1
118
- """
119
- import math
120
-
121
- # Logarithmic decay: 1 / (1 + log(1 + days_since/half_life))
122
- # This decays much slower than exponential, giving better long-term differentiation
123
- normalized_age = days_since / half_life_days
124
- return 1.0 / (1.0 + math.log1p(normalized_age))
125
-
126
-
127
- def calculate_frequency_weight(access_count: int, max_boost: float = 2.0) -> float:
128
- """
129
- Calculate frequency weight based on access count.
130
-
131
- Frequently accessed memories are weighted higher.
132
- Uses logarithmic scaling to avoid over-weighting.
133
-
134
- Args:
135
- access_count: Number of times the memory was accessed
136
- max_boost: Maximum multiplier for frequently accessed memories
137
-
138
- Returns:
139
- Weight between 1.0 and max_boost
140
- """
141
- import math
142
-
143
- if access_count <= 0:
144
- return 1.0
145
-
146
- # Logarithmic scaling: log(access_count + 1) / log(10)
147
- # This gives: 0 accesses = 1.0, 9 accesses ~= 1.5, 99 accesses ~= 2.0
148
- normalized = math.log(access_count + 1) / math.log(10)
149
- return 1.0 + min(normalized, max_boost - 1.0)
150
-
151
-
152
- def calculate_temporal_anchor(occurred_start: datetime, occurred_end: datetime) -> datetime:
153
- """
154
- Calculate a single temporal anchor point from a temporal range.
155
-
156
- Used for spreading activation - we need a single representative date
157
- to calculate temporal proximity between facts. This simplifies the
158
- range-to-range distance problem.
159
-
160
- Strategy: Use midpoint of the range for balanced representation.
161
-
162
- Args:
163
- occurred_start: Start of temporal range
164
- occurred_end: End of temporal range
165
-
166
- Returns:
167
- Single datetime representing the temporal anchor (midpoint)
168
-
169
- Examples:
170
- - Point event (July 14): start=July 14, end=July 14 → anchor=July 14
171
- - Month range (February): start=Feb 1, end=Feb 28 → anchor=Feb 14
172
- - Year range (2023): start=Jan 1, end=Dec 31 → anchor=July 1
173
- """
174
- # Calculate midpoint
175
- time_delta = occurred_end - occurred_start
176
- midpoint = occurred_start + (time_delta / 2)
177
- return midpoint
178
-
179
-
180
- def calculate_temporal_proximity(anchor_a: datetime, anchor_b: datetime, half_life_days: float = 30.0) -> float:
181
- """
182
- Calculate temporal proximity between two temporal anchors.
183
-
184
- Used for spreading activation to determine how "close" two facts are
185
- in time. Uses logarithmic decay so that temporal similarity doesn't
186
- drop off too quickly.
187
-
188
- Args:
189
- anchor_a: Temporal anchor of first fact
190
- anchor_b: Temporal anchor of second fact
191
- half_life_days: Number of days for proximity to reach 0.5
192
- (default: 30 days = 1 month)
193
-
194
- Returns:
195
- Proximity score in [0, 1] where:
196
- - 1.0 = same day
197
- - 0.5 = ~half_life days apart
198
- - 0.0 = very distant in time
199
-
200
- Examples:
201
- - Same day: 1.0
202
- - 1 week apart (half_life=30): ~0.7
203
- - 1 month apart (half_life=30): ~0.5
204
- - 1 year apart (half_life=30): ~0.2
205
- """
206
- import math
207
-
208
- days_apart = abs((anchor_a - anchor_b).days)
209
-
210
- if days_apart == 0:
211
- return 1.0
212
-
213
- # Logarithmic decay: 1 / (1 + log(1 + days_apart/half_life))
214
- # Similar to calculate_recency_weight but for proximity between events
215
- normalized_distance = days_apart / half_life_days
216
- proximity = 1.0 / (1.0 + math.log1p(normalized_distance))
217
-
218
- return proximity
@@ -21,6 +21,10 @@ from hindsight_api.extensions.context import DefaultExtensionContext, ExtensionC
21
21
  from hindsight_api.extensions.http import HttpExtension
22
22
  from hindsight_api.extensions.loader import load_extension
23
23
  from hindsight_api.extensions.operation_validator import (
24
+ # Consolidation operation
25
+ ConsolidateContext,
26
+ ConsolidateResult,
27
+ # Core operations
24
28
  OperationValidationError,
25
29
  OperationValidatorExtension,
26
30
  RecallContext,
@@ -33,6 +37,7 @@ from hindsight_api.extensions.operation_validator import (
33
37
  )
34
38
  from hindsight_api.extensions.tenant import (
35
39
  AuthenticationError,
40
+ Tenant,
36
41
  TenantContext,
37
42
  TenantExtension,
38
43
  )
@@ -47,7 +52,7 @@ __all__ = [
47
52
  "DefaultExtensionContext",
48
53
  # HTTP Extension
49
54
  "HttpExtension",
50
- # Operation Validator
55
+ # Operation Validator - Core
51
56
  "OperationValidationError",
52
57
  "OperationValidatorExtension",
53
58
  "RecallContext",
@@ -57,10 +62,14 @@ __all__ = [
57
62
  "RetainContext",
58
63
  "RetainResult",
59
64
  "ValidationResult",
65
+ # Operation Validator - Consolidation
66
+ "ConsolidateContext",
67
+ "ConsolidateResult",
60
68
  # Tenant/Auth
61
69
  "ApiKeyTenantExtension",
62
70
  "AuthenticationError",
63
71
  "RequestContext",
72
+ "Tenant",
64
73
  "TenantContext",
65
74
  "TenantExtension",
66
75
  ]
@@ -1,6 +1,7 @@
1
1
  """Built-in tenant extension implementations."""
2
2
 
3
- from hindsight_api.extensions.tenant import AuthenticationError, TenantContext, TenantExtension
3
+ from hindsight_api.config import get_config
4
+ from hindsight_api.extensions.tenant import AuthenticationError, Tenant, TenantContext, TenantExtension
4
5
  from hindsight_api.models import RequestContext
5
6
 
6
7
 
@@ -10,11 +11,13 @@ class ApiKeyTenantExtension(TenantExtension):
10
11
 
11
12
  This is a simple implementation that:
12
13
  1. Validates the API key matches HINDSIGHT_API_TENANT_API_KEY
13
- 2. Returns 'public' as the schema for all authenticated requests
14
+ 2. Returns the configured schema (HINDSIGHT_API_DATABASE_SCHEMA, default 'public')
15
+ for all authenticated requests
14
16
 
15
17
  Configuration:
16
18
  HINDSIGHT_API_TENANT_EXTENSION=hindsight_api.extensions.builtin.tenant:ApiKeyTenantExtension
17
19
  HINDSIGHT_API_TENANT_API_KEY=your-secret-key
20
+ HINDSIGHT_API_DATABASE_SCHEMA=your-schema (optional, defaults to 'public')
18
21
 
19
22
  For multi-tenant setups with separate schemas per tenant, implement a custom
20
23
  TenantExtension that looks up the schema based on the API key or token claims.
@@ -27,7 +30,11 @@ class ApiKeyTenantExtension(TenantExtension):
27
30
  raise ValueError("HINDSIGHT_API_TENANT_API_KEY is required when using ApiKeyTenantExtension")
28
31
 
29
32
  async def authenticate(self, context: RequestContext) -> TenantContext:
30
- """Validate API key and return public schema context."""
33
+ """Validate API key and return configured schema context."""
31
34
  if context.api_key != self.expected_api_key:
32
35
  raise AuthenticationError("Invalid API key")
33
- return TenantContext(schema_name="public")
36
+ return TenantContext(schema_name=get_config().database_schema)
37
+
38
+ async def list_tenants(self) -> list[Tenant]:
39
+ """Return configured schema for single-tenant setup."""
40
+ return [Tenant(schema=get_config().database_schema)]