hindsight-api 0.4.1__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,10 +57,11 @@ class WorkerPoller:
57
57
  worker_id: str,
58
58
  executor: Callable[[dict[str, Any]], Awaitable[None]],
59
59
  poll_interval_ms: int = 500,
60
- batch_size: int = 10,
61
60
  max_retries: int = 3,
62
61
  schema: str | None = None,
63
62
  tenant_extension: "TenantExtension | None" = None,
63
+ max_slots: int = 10,
64
+ consolidation_max_slots: int = 2,
64
65
  ):
65
66
  """
66
67
  Initialize the worker poller.
@@ -70,28 +71,32 @@ class WorkerPoller:
70
71
  worker_id: Unique identifier for this worker
71
72
  executor: Async function to execute tasks (typically MemoryEngine.execute_task)
72
73
  poll_interval_ms: Interval between polls when no tasks found (milliseconds)
73
- batch_size: Maximum number of tasks to claim per poll cycle
74
74
  max_retries: Maximum retry attempts before marking task as failed
75
75
  schema: Database schema for single-tenant support (ignored if tenant_extension is set)
76
76
  tenant_extension: Extension for dynamic multi-tenant discovery. If set, list_tenants()
77
77
  is called on each poll cycle to discover schemas dynamically.
78
+ max_slots: Maximum concurrent tasks per worker
79
+ consolidation_max_slots: Maximum concurrent consolidation tasks per worker
78
80
  """
79
81
  self._pool = pool
80
82
  self._worker_id = worker_id
81
83
  self._executor = executor
82
84
  self._poll_interval_ms = poll_interval_ms
83
- self._batch_size = batch_size
84
85
  self._max_retries = max_retries
85
86
  self._schema = schema
86
87
  self._tenant_extension = tenant_extension
88
+ self._max_slots = max_slots
89
+ self._consolidation_max_slots = consolidation_max_slots
87
90
  self._shutdown = asyncio.Event()
88
91
  self._current_tasks: set[asyncio.Task] = set()
89
92
  self._in_flight_count = 0
90
93
  self._in_flight_lock = asyncio.Lock()
91
94
  self._last_progress_log = 0.0
92
95
  self._tasks_completed_since_log = 0
93
- # Track active tasks locally: operation_id -> (op_type, bank_id, schema)
94
- self._active_tasks: dict[str, tuple[str, str, str | None]] = {}
96
+ # Track active tasks locally: operation_id -> (op_type, bank_id, schema, asyncio.Task)
97
+ self._active_tasks: dict[str, tuple[str, str, str | None, asyncio.Task]] = {}
98
+ # Track in-flight tasks by operation type
99
+ self._in_flight_by_type: dict[str, int] = {}
95
100
 
96
101
  async def _get_schemas(self) -> list[str | None]:
97
102
  """Get list of schemas to poll. Returns [None] for public schema."""
@@ -102,59 +107,114 @@ class WorkerPoller:
102
107
  # Single schema mode
103
108
  return [self._schema]
104
109
 
105
- async def claim_batch(self) -> list[ClaimedTask]:
110
+ async def _get_available_slots(self) -> tuple[int, int]:
106
111
  """
107
- Claim up to batch_size pending tasks atomically across all tenant schemas.
112
+ Calculate available slots for claiming tasks.
108
113
 
109
- Uses FOR UPDATE SKIP LOCKED to ensure no conflicts with other workers.
114
+ Returns:
115
+ (total_available, consolidation_available) tuple
116
+ """
117
+ async with self._in_flight_lock:
118
+ total_in_flight = self._in_flight_count
119
+ consolidation_in_flight = self._in_flight_by_type.get("consolidation", 0)
110
120
 
111
- For consolidation tasks specifically, skips pending tasks if there's already
112
- a processing consolidation for the same bank (to avoid duplicate work).
121
+ total_available = max(0, self._max_slots - total_in_flight)
122
+ consolidation_available = max(0, self._consolidation_max_slots - consolidation_in_flight)
113
123
 
114
- If tenant_extension is configured, dynamically discovers schemas on each call.
124
+ return total_available, consolidation_available
125
+
126
+ async def wait_for_active_tasks(self, timeout: float = 10.0) -> bool:
127
+ """
128
+ Wait for all active background tasks to complete (test helper).
129
+
130
+ This is a test-only utility that allows tests to synchronize with
131
+ fire-and-forget background tasks without using sleep().
132
+
133
+ Args:
134
+ timeout: Maximum time to wait in seconds
135
+
136
+ Returns:
137
+ True if all tasks completed, False if timeout was reached
138
+ """
139
+ start_time = asyncio.get_event_loop().time()
140
+ while True:
141
+ async with self._in_flight_lock:
142
+ if self._in_flight_count == 0:
143
+ return True
144
+
145
+ elapsed = asyncio.get_event_loop().time() - start_time
146
+ if elapsed >= timeout:
147
+ return False
148
+
149
+ # Short sleep to avoid busy-waiting
150
+ await asyncio.sleep(0.01)
151
+
152
+ async def claim_batch(self) -> list[ClaimedTask]:
153
+ """
154
+ Claim pending tasks atomically across all tenant schemas,
155
+ respecting slot limits (total and consolidation).
156
+
157
+ Uses FOR UPDATE SKIP LOCKED to ensure no conflicts with other workers.
115
158
 
116
159
  Returns:
117
160
  List of ClaimedTask objects containing operation_id, task_dict, and schema
118
161
  """
162
+ # Calculate available slots
163
+ total_available, consolidation_available = await self._get_available_slots()
164
+
165
+ if total_available <= 0:
166
+ return []
167
+
119
168
  schemas = await self._get_schemas()
120
169
  all_tasks: list[ClaimedTask] = []
121
- remaining_batch = self._batch_size
170
+ remaining_total = total_available
171
+ remaining_consolidation = consolidation_available
122
172
 
123
173
  for schema in schemas:
124
- if remaining_batch <= 0:
174
+ if remaining_total <= 0:
125
175
  break
126
176
 
127
- tasks = await self._claim_batch_for_schema(schema, remaining_batch)
177
+ tasks = await self._claim_batch_for_schema(schema, remaining_total, remaining_consolidation)
178
+
179
+ # Update remaining slots based on what was claimed
180
+ for task in tasks:
181
+ op_type = task.task_dict.get("operation_type", "unknown")
182
+ if op_type == "consolidation":
183
+ remaining_consolidation -= 1
184
+
128
185
  all_tasks.extend(tasks)
129
- remaining_batch -= len(tasks)
186
+ remaining_total -= len(tasks)
130
187
 
131
188
  return all_tasks
132
189
 
133
- async def _claim_batch_for_schema(self, schema: str | None, limit: int) -> list[ClaimedTask]:
134
- """Claim tasks from a specific schema."""
190
+ async def _claim_batch_for_schema(
191
+ self, schema: str | None, limit: int, consolidation_limit: int
192
+ ) -> list[ClaimedTask]:
193
+ """Claim tasks from a specific schema respecting slot limits."""
194
+ try:
195
+ return await self._claim_batch_for_schema_inner(schema, limit, consolidation_limit)
196
+ except Exception as e:
197
+ logger.warning(f"Worker {self._worker_id} failed to claim tasks for schema {schema or 'public'}: {e}")
198
+ return []
199
+
200
+ async def _claim_batch_for_schema_inner(
201
+ self, schema: str | None, limit: int, consolidation_limit: int
202
+ ) -> list[ClaimedTask]:
203
+ """Inner implementation for claiming tasks from a specific schema with slot limits."""
135
204
  table = fq_table("async_operations", schema)
136
205
 
137
206
  async with self._pool.acquire() as conn:
138
207
  async with conn.transaction():
139
- # Select and lock pending tasks
140
- # For consolidation: skip if same bank already has one processing
141
- rows = await conn.fetch(
208
+ # Strategy: Claim non-consolidation tasks first, then consolidation up to limit
209
+
210
+ # 1. Claim non-consolidation tasks (up to limit)
211
+ non_consolidation_rows = await conn.fetch(
142
212
  f"""
143
213
  SELECT operation_id, task_payload
144
- FROM {table} AS pending
145
- WHERE status = 'pending' AND task_payload IS NOT NULL
146
- AND (
147
- -- Non-consolidation tasks: always claimable
148
- operation_type != 'consolidation'
149
- OR
150
- -- Consolidation: only if no other consolidation processing for same bank
151
- NOT EXISTS (
152
- SELECT 1 FROM {table} AS processing
153
- WHERE processing.bank_id = pending.bank_id
154
- AND processing.operation_type = 'consolidation'
155
- AND processing.status = 'processing'
156
- )
157
- )
214
+ FROM {table}
215
+ WHERE status = 'pending'
216
+ AND task_payload IS NOT NULL
217
+ AND operation_type != 'consolidation'
158
218
  ORDER BY created_at
159
219
  LIMIT $1
160
220
  FOR UPDATE SKIP LOCKED
@@ -162,11 +222,39 @@ class WorkerPoller:
162
222
  limit,
163
223
  )
164
224
 
165
- if not rows:
225
+ claimed_count = len(non_consolidation_rows)
226
+ remaining_limit = limit - claimed_count
227
+
228
+ # 2. Claim consolidation tasks (up to consolidation_limit and remaining_limit)
229
+ consolidation_rows = []
230
+ if consolidation_limit > 0 and remaining_limit > 0:
231
+ consolidation_rows = await conn.fetch(
232
+ f"""
233
+ SELECT operation_id, task_payload
234
+ FROM {table} AS pending
235
+ WHERE status = 'pending'
236
+ AND task_payload IS NOT NULL
237
+ AND operation_type = 'consolidation'
238
+ AND NOT EXISTS (
239
+ SELECT 1 FROM {table} AS processing
240
+ WHERE processing.bank_id = pending.bank_id
241
+ AND processing.operation_type = 'consolidation'
242
+ AND processing.status = 'processing'
243
+ )
244
+ ORDER BY created_at
245
+ LIMIT $1
246
+ FOR UPDATE SKIP LOCKED
247
+ """,
248
+ min(consolidation_limit, remaining_limit),
249
+ )
250
+
251
+ all_rows = non_consolidation_rows + consolidation_rows
252
+
253
+ if not all_rows:
166
254
  return []
167
255
 
168
256
  # Claim the tasks by updating status and worker_id
169
- operation_ids = [row["operation_id"] for row in rows]
257
+ operation_ids = [row["operation_id"] for row in all_rows]
170
258
  await conn.execute(
171
259
  f"""
172
260
  UPDATE {table}
@@ -184,7 +272,7 @@ class WorkerPoller:
184
272
  task_dict=json.loads(row["task_payload"]),
185
273
  schema=schema,
186
274
  )
187
- for row in rows
275
+ for row in all_rows
188
276
  ]
189
277
 
190
278
  async def _mark_completed(self, operation_id: str, schema: str | None):
@@ -250,18 +338,43 @@ class WorkerPoller:
250
338
  logger.warning(f"Task {operation_id} failed, will retry (attempt {retry_count + 1}/{self._max_retries})")
251
339
 
252
340
  async def execute_task(self, task: ClaimedTask):
253
- """Execute a single task and update its status."""
341
+ """Execute a single task as a background job (fire-and-forget)."""
254
342
  task_type = task.task_dict.get("type", "unknown")
343
+ operation_type = task.task_dict.get("operation_type", "unknown")
255
344
  bank_id = task.task_dict.get("bank_id", "unknown")
256
345
 
346
+ # Create background task
347
+ bg_task = asyncio.create_task(self._execute_task_inner(task))
348
+
257
349
  # Track this task as active
258
350
  async with self._in_flight_lock:
259
- self._active_tasks[task.operation_id] = (task_type, bank_id, task.schema)
351
+ self._active_tasks[task.operation_id] = (task_type, bank_id, task.schema, bg_task)
352
+ self._in_flight_count += 1
353
+ self._in_flight_by_type[operation_type] = self._in_flight_by_type.get(operation_type, 0) + 1
354
+
355
+ # Add cleanup callback
356
+ bg_task.add_done_callback(lambda _: asyncio.create_task(self._cleanup_task(task.operation_id, operation_type)))
357
+
358
+ async def _cleanup_task(self, operation_id: str, operation_type: str):
359
+ """Remove task from tracking after completion."""
360
+ async with self._in_flight_lock:
361
+ if operation_id in self._active_tasks:
362
+ self._active_tasks.pop(operation_id, None)
363
+ self._in_flight_count -= 1
364
+ count = self._in_flight_by_type.get(operation_type, 0)
365
+ if count > 0:
366
+ self._in_flight_by_type[operation_type] = count - 1
367
+ if self._in_flight_by_type[operation_type] == 0:
368
+ del self._in_flight_by_type[operation_type]
369
+
370
+ async def _execute_task_inner(self, task: ClaimedTask):
371
+ """Inner task execution with error handling."""
372
+ task_type = task.task_dict.get("type", "unknown")
373
+ bank_id = task.task_dict.get("bank_id", "unknown")
260
374
 
261
375
  try:
262
376
  schema_info = f", schema={task.schema}" if task.schema else ""
263
377
  logger.debug(f"Executing task {task.operation_id} (type={task_type}, bank={bank_id}{schema_info})")
264
- # Pass schema to executor so it can set the correct context
265
378
  if task.schema:
266
379
  task.task_dict["_schema"] = task.schema
267
380
  await self._executor(task.task_dict)
@@ -271,10 +384,6 @@ class WorkerPoller:
271
384
  error_msg = f"{type(e).__name__}: {e}\n{traceback.format_exc()}"
272
385
  logger.error(f"Task {task.operation_id} failed: {e}")
273
386
  await self._retry_or_fail(task.operation_id, error_msg, task.schema)
274
- finally:
275
- # Remove from active tasks
276
- async with self._in_flight_lock:
277
- self._active_tasks.pop(task.operation_id, None)
278
387
 
279
388
  async def recover_own_tasks(self) -> int:
280
389
  """
@@ -293,20 +402,23 @@ class WorkerPoller:
293
402
  total_count = 0
294
403
 
295
404
  for schema in schemas:
296
- table = fq_table("async_operations", schema)
405
+ try:
406
+ table = fq_table("async_operations", schema)
297
407
 
298
- result = await self._pool.execute(
299
- f"""
300
- UPDATE {table}
301
- SET status = 'pending', worker_id = NULL, claimed_at = NULL, updated_at = now()
302
- WHERE status = 'processing' AND worker_id = $1
303
- """,
304
- self._worker_id,
305
- )
408
+ result = await self._pool.execute(
409
+ f"""
410
+ UPDATE {table}
411
+ SET status = 'pending', worker_id = NULL, claimed_at = NULL, updated_at = now()
412
+ WHERE status = 'processing' AND worker_id = $1
413
+ """,
414
+ self._worker_id,
415
+ )
306
416
 
307
- # Parse "UPDATE N" to get count
308
- count = int(result.split()[-1]) if result else 0
309
- total_count += count
417
+ # Parse "UPDATE N" to get count
418
+ count = int(result.split()[-1]) if result else 0
419
+ total_count += count
420
+ except Exception as e:
421
+ logger.warning(f"Worker {self._worker_id} failed to recover tasks for schema {schema or 'public'}: {e}")
310
422
 
311
423
  if total_count > 0:
312
424
  logger.info(f"Worker {self._worker_id} recovered {total_count} stale tasks from previous run")
@@ -314,59 +426,59 @@ class WorkerPoller:
314
426
 
315
427
  async def run(self):
316
428
  """
317
- Main polling loop.
318
-
319
- Continuously polls for pending tasks, claims them, and executes them
320
- until shutdown is signaled.
429
+ Main polling loop with fire-and-forget task execution.
321
430
 
322
- If tenant_extension is configured, dynamically discovers schemas on each poll.
431
+ Continuously polls for pending tasks, spawns them as background tasks,
432
+ and immediately continues polling (up to slot limits).
323
433
  """
324
- # Recover any tasks from a previous crash before starting
325
434
  await self.recover_own_tasks()
326
435
 
327
- logger.info(f"Worker {self._worker_id} starting polling loop")
436
+ logger.info(
437
+ f"Worker {self._worker_id} starting polling loop "
438
+ f"(max_slots={self._max_slots}, consolidation_max_slots={self._consolidation_max_slots})"
439
+ )
328
440
 
329
441
  while not self._shutdown.is_set():
330
442
  try:
331
- # Claim a batch of tasks (across all tenant schemas if configured)
443
+ # Claim a batch of tasks (respecting slot limits)
332
444
  tasks = await self.claim_batch()
333
445
 
334
446
  if tasks:
335
447
  # Log batch info
336
448
  task_types: dict[str, int] = {}
337
449
  schemas_seen: set[str | None] = set()
450
+ consolidation_count = 0
338
451
  for task in tasks:
339
452
  t = task.task_dict.get("type", "unknown")
453
+ op_type = task.task_dict.get("operation_type", "unknown")
340
454
  task_types[t] = task_types.get(t, 0) + 1
341
455
  schemas_seen.add(task.schema)
456
+ if op_type == "consolidation":
457
+ consolidation_count += 1
458
+
342
459
  types_str = ", ".join(f"{k}:{v}" for k, v in task_types.items())
343
460
  schemas_str = ", ".join(s or "public" for s in schemas_seen)
344
461
  logger.info(
345
- f"Worker {self._worker_id} claimed {len(tasks)} tasks: {types_str} (schemas: {schemas_str})"
462
+ f"Worker {self._worker_id} claimed {len(tasks)} tasks "
463
+ f"({consolidation_count} consolidation): {types_str} (schemas: {schemas_str})"
346
464
  )
347
465
 
348
- # Track in-flight tasks
349
- async with self._in_flight_lock:
350
- self._in_flight_count += len(tasks)
351
-
352
- # Execute tasks concurrently
353
- try:
354
- await asyncio.gather(
355
- *[self.execute_task(task) for task in tasks],
356
- return_exceptions=True,
357
- )
358
- finally:
359
- async with self._in_flight_lock:
360
- self._in_flight_count -= len(tasks)
361
- else:
362
- # No tasks found, wait before polling again
363
- try:
364
- await asyncio.wait_for(
365
- self._shutdown.wait(),
366
- timeout=self._poll_interval_ms / 1000,
367
- )
368
- except asyncio.TimeoutError:
369
- pass # Normal timeout, continue polling
466
+ # Spawn tasks as background jobs (fire-and-forget)
467
+ for task in tasks:
468
+ await self.execute_task(task)
469
+
470
+ # Continue immediately to claim more tasks (if slots available)
471
+ continue
472
+
473
+ # No tasks claimed (either no pending tasks or slots full)
474
+ # Wait before polling again
475
+ try:
476
+ await asyncio.wait_for(
477
+ self._shutdown.wait(),
478
+ timeout=self._poll_interval_ms / 1000,
479
+ )
480
+ except asyncio.TimeoutError:
481
+ pass # Normal timeout, continue polling
370
482
 
371
483
  # Log progress stats periodically
372
484
  await self._log_progress_if_due()
@@ -397,15 +509,27 @@ class WorkerPoller:
397
509
  while asyncio.get_event_loop().time() - start_time < timeout:
398
510
  async with self._in_flight_lock:
399
511
  in_flight = self._in_flight_count
512
+ active_task_objects = [task_info[3] for task_info in self._active_tasks.values()]
400
513
 
401
514
  if in_flight == 0:
402
515
  logger.info(f"Worker {self._worker_id} graceful shutdown complete")
403
516
  return
404
517
 
405
518
  logger.info(f"Worker {self._worker_id} waiting for {in_flight} in-flight tasks")
406
- await asyncio.sleep(0.5)
407
519
 
408
- logger.warning(f"Worker {self._worker_id} shutdown timeout after {timeout}s")
520
+ # Wait for at least one task to complete
521
+ if active_task_objects:
522
+ done, _ = await asyncio.wait(active_task_objects, timeout=0.5, return_when=asyncio.FIRST_COMPLETED)
523
+ else:
524
+ await asyncio.sleep(0.5)
525
+
526
+ logger.warning(f"Worker {self._worker_id} shutdown timeout after {timeout}s, cancelling remaining tasks")
527
+
528
+ # Cancel remaining tasks
529
+ async with self._in_flight_lock:
530
+ for operation_id, (_, _, _, bg_task) in list(self._active_tasks.items()):
531
+ if not bg_task.done():
532
+ bg_task.cancel()
409
533
 
410
534
  async def _log_progress_if_due(self):
411
535
  """Log progress stats every PROGRESS_LOG_INTERVAL seconds."""
@@ -416,14 +540,19 @@ class WorkerPoller:
416
540
  self._last_progress_log = now
417
541
 
418
542
  try:
419
- # Get local active tasks (this worker only)
543
+ # Get local active tasks
420
544
  async with self._in_flight_lock:
421
545
  in_flight = self._in_flight_count
422
- active_tasks = dict(self._active_tasks) # Copy to avoid holding lock
546
+ in_flight_by_type = dict(self._in_flight_by_type)
547
+ active_tasks = dict(self._active_tasks)
548
+
549
+ consolidation_count = in_flight_by_type.get("consolidation", 0)
550
+ available_slots = self._max_slots - in_flight
551
+ available_consolidation_slots = self._consolidation_max_slots - consolidation_count
423
552
 
424
- # Build local processing breakdown grouped by (op_type, bank_id)
553
+ # Build local processing breakdown
425
554
  task_groups: dict[tuple[str, str], int] = {}
426
- for op_type, bank_id, _ in active_tasks.values():
555
+ for op_type, bank_id, _, _ in active_tasks.values():
427
556
  key = (op_type, bank_id)
428
557
  task_groups[key] = task_groups.get(key, 0) + 1
429
558
 
@@ -432,7 +561,7 @@ class WorkerPoller:
432
561
  if len(processing_info) > 10:
433
562
  processing_str += f" +{len(processing_info) - 10} more"
434
563
 
435
- # Get global stats from DB across all schemas
564
+ # Get global stats from DB
436
565
  schemas = await self._get_schemas()
437
566
  global_pending = 0
438
567
  all_worker_counts: dict[str, int] = {}
@@ -444,7 +573,6 @@ class WorkerPoller:
444
573
  row = await conn.fetchrow(f"SELECT COUNT(*) as count FROM {table} WHERE status = 'pending'")
445
574
  global_pending += row["count"] if row else 0
446
575
 
447
- # Get processing breakdown by worker
448
576
  worker_rows = await conn.fetch(
449
577
  f"""
450
578
  SELECT worker_id, COUNT(*) as count
@@ -457,7 +585,6 @@ class WorkerPoller:
457
585
  wid = wr["worker_id"] or "unknown"
458
586
  all_worker_counts[wid] = all_worker_counts.get(wid, 0) + wr["count"]
459
587
 
460
- # Format other workers' processing counts
461
588
  other_workers = []
462
589
  for wid, cnt in all_worker_counts.items():
463
590
  if wid != self._worker_id:
@@ -466,7 +593,9 @@ class WorkerPoller:
466
593
 
467
594
  schemas_str = ", ".join(s or "public" for s in schemas)
468
595
  logger.info(
469
- f"[WORKER_STATS] worker={self._worker_id} in_flight={in_flight} | "
596
+ f"[WORKER_STATS] worker={self._worker_id} "
597
+ f"slots={in_flight}/{self._max_slots} (consolidation={consolidation_count}/{self._consolidation_max_slots}) | "
598
+ f"available={available_slots} (consolidation={available_consolidation_slots}) | "
470
599
  f"global: pending={global_pending} (schemas: {schemas_str}) | "
471
600
  f"others: {others_str} | "
472
601
  f"my_active: {processing_str}"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hindsight-api
3
- Version: 0.4.1
3
+ Version: 0.4.3
4
4
  Summary: Hindsight: Agent Memory That Works Like Human Memory
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: aiohttp>=3.13.3
@@ -14,6 +14,7 @@ Requires-Dist: fastapi[standard]>=0.120.3
14
14
  Requires-Dist: fastmcp>=2.14.0
15
15
  Requires-Dist: filelock>=3.20.1
16
16
  Requires-Dist: flashrank>=0.2.0
17
+ Requires-Dist: google-auth>=2.0.0
17
18
  Requires-Dist: google-genai>=1.0.0
18
19
  Requires-Dist: greenlet>=3.2.4
19
20
  Requires-Dist: httpx>=0.27.0
@@ -1,14 +1,14 @@
1
- hindsight_api/__init__.py,sha256=y8um49GprBg-BgGNjmSIRwE5PFOQWNmyio0m4xAiSHo,1197
1
+ hindsight_api/__init__.py,sha256=arVpYrig3T18LCK9dh5WpEAYzY1OumvsrD2S2ZuQsmg,1197
2
2
  hindsight_api/banner.py,sha256=BXn-jhkXe4xi-YV4JeuaVvjYhTMs96O43XoOMv4Cd28,4591
3
- hindsight_api/config.py,sha256=PTnOLxdq7S4xBBuUA1ADLMKXylhphUQf7-DjSwZg7l0,26497
4
- hindsight_api/daemon.py,sha256=3CKcO_ENQ57dIWrTsmYUj-V4zvoAB1toNtVh3EVkg-c,5982
5
- hindsight_api/main.py,sha256=7poaTkS1U4E0SEKMqJ1m-L_IQKIikb-bIcay-btqXU8,14178
3
+ hindsight_api/config.py,sha256=mLUMuQrhZCbUd___6Wqdc-OcMXtZ_CZZmxM4b7vO7CE,32506
4
+ hindsight_api/daemon.py,sha256=yqMtalX0tlNz7KUpMjsRpC0r8T0WwHGvVKPxuO_Ye40,6111
5
+ hindsight_api/main.py,sha256=-nVh2KrW-Y9y7Punv-1qUz7q4M4IY2vDHBiyAC-Igpk,15243
6
6
  hindsight_api/mcp_local.py,sha256=fJnCxMBc79GlBZrma94Ux6g-GVuh-W66194cqQdkKJQ,5613
7
- hindsight_api/mcp_tools.py,sha256=KGzgDeRoChwgt3HB-OoUHcWgHz6ELequLIkw6u7kkyo,19669
8
- hindsight_api/metrics.py,sha256=go3X7wyFAPkc55HFvu7esiaJXDrUsrSrC8Pq5NjcqU0,20692
7
+ hindsight_api/mcp_tools.py,sha256=cLQ9Bdu8FoL2DscO_Z1pAGpNiCeFY2PHBvIPLZXkwE8,20493
8
+ hindsight_api/metrics.py,sha256=zgOh_UFTT8ZtqnLaZuyErRtoPZ9SGP3mbmiHT3wX3v4,20677
9
9
  hindsight_api/migrations.py,sha256=V4QL_N1cMe6kNF1ejJ3lPIPFXKU2Pzbaiviws7AyMIY,14624
10
10
  hindsight_api/models.py,sha256=SzJ8uM2nGr3D6X-UEfE8VIT-PbS9J4DmRT_4lv5n9T8,12831
11
- hindsight_api/pg0.py,sha256=XORoiemECidQgBP53EBSCF3i0PJegLRRWKl2hU5UPhE,6390
11
+ hindsight_api/pg0.py,sha256=Ntj3FYPLfmQTskG4gHoz_NTlQ4A3DqCm2PbbXm-ivGQ,6337
12
12
  hindsight_api/server.py,sha256=MU2ZvKe3KWfxKYZq8EEJPgKMmq5diPkRqfQBaz-yOQI,2483
13
13
  hindsight_api/admin/__init__.py,sha256=RvaczuwTxg6ajc_Jlk0EhVz5JqlNB3T8su060gRQwfs,26
14
14
  hindsight_api/admin/cli.py,sha256=A1qkZ_9GWjz1qOIQYnmj-qUN005cIIlpFsvYH7tZdyc,11607
@@ -39,23 +39,23 @@ hindsight_api/alembic/versions/t5o6p7q8r9s0_rename_mental_models_to_observations
39
39
  hindsight_api/alembic/versions/u6p7q8r9s0t1_mental_models_text_id.py,sha256=uvil81f-4ag2dIxBXUGKZ5vxkqdNQRpxCWj_iVih09w,1355
40
40
  hindsight_api/alembic/versions/v7q8r9s0t1u2_add_max_tokens_to_mental_models.py,sha256=Mw68uW8PK-SaHcYcqb41vWI0R22t70SSasNS2Myeoec,1656
41
41
  hindsight_api/api/__init__.py,sha256=npF0AAy8WJhHF5a9ehkNn9_iYLk7RQOk2gdkdFb49Hk,3840
42
- hindsight_api/api/http.py,sha256=5YFLGWDILApMf4lLxpc72S9EtnOIOvuu8c3K124THPc,133363
43
- hindsight_api/api/mcp.py,sha256=zV0TmkxKEqwhLIfNAdezYgsZ1PF9Lo8j5_lD73ULpKU,6707
42
+ hindsight_api/api/http.py,sha256=uWQ2P5GiC8tBK1-lbwYxfZgon9tpVgIvLJyp9SeK2Y4,132812
43
+ hindsight_api/api/mcp.py,sha256=4ZxeEa_LHcyFRf1jK60hr4JDLXObOQdnpc3bTLJCdVI,8647
44
44
  hindsight_api/engine/__init__.py,sha256=-BwaSwG9fTT_BBO0c_2MBkxG6-tGdclSzIqsgHw4cnw,1633
45
- hindsight_api/engine/cross_encoder.py,sha256=rA-iFXO-hXOx6BPCUi_Q6p-wm93eE8Bd6cuivFkeKBY,36029
45
+ hindsight_api/engine/cross_encoder.py,sha256=Q1s-C-JOOJ246Twl1FyYbeXAJnfdXnfhcDpntScYFvQ,32301
46
46
  hindsight_api/engine/db_budget.py,sha256=1OmZiuszpuEaYz355QlOqwaupXPd9FrnbyENsFboBkg,8642
47
47
  hindsight_api/engine/db_utils.py,sha256=Fq1pXETt8ZPhkWYjrcGbgL6glrwmCGWh3_lYJgHqQPo,3067
48
- hindsight_api/engine/embeddings.py,sha256=FAFf7mb7Woz0BoJmBF_m1y3FZt8Ty0yw3ZYaYSHrtMg,30736
48
+ hindsight_api/engine/embeddings.py,sha256=KvK65y89E4Hxz8gvQR4G6qSGNlGoai6NBIaWdMzoV_A,27054
49
49
  hindsight_api/engine/entity_resolver.py,sha256=qVvWJHnbGEfh0iUFtc1dbM3IUNwPMsQsmg2rMgiX2DY,23794
50
- hindsight_api/engine/interface.py,sha256=rldxkBmp_bqEeTBD713uZeXvrqJB9Ix1L62gazlNEi0,16899
51
- hindsight_api/engine/llm_wrapper.py,sha256=Mh38zSlNGhsbN0f2VA1JGZ52HRab_ndcKqvEhyajgK0,68084
52
- hindsight_api/engine/memory_engine.py,sha256=lYWrQYzHib0UPiGNFxe_5QXxkJv14DjiG93SZ1sHo7c,231906
50
+ hindsight_api/engine/interface.py,sha256=wpJUIN-64RFJ_iYNtYWlyR4L-mDO2xijXUFqLD4tkeg,15821
51
+ hindsight_api/engine/llm_wrapper.py,sha256=q0d01bdYCVcW9lwJVNxhw3hAp1Qr4YSwU3vXqM3K80Y,71399
52
+ hindsight_api/engine/memory_engine.py,sha256=NpMG8XwTODJogpumWg1RxajUOFoXv7LJkTcjvP9NNV0,227563
53
53
  hindsight_api/engine/query_analyzer.py,sha256=7APe0MjBcUxjivcMlM03PmMk_w5FjWvlEe20yAJlHlc,19741
54
- hindsight_api/engine/response_models.py,sha256=1fNAFPztlmYfOaoRfwYyrhzdPBO9UL8QHFNXW6Lmjgg,16322
54
+ hindsight_api/engine/response_models.py,sha256=ZPP80NmEP205erz5qEE8IJ9-c622UHqYo17e5UOiXAE,15578
55
55
  hindsight_api/engine/task_backend.py,sha256=zDH24tTwIH_59eFpQzepv0KkZXOIVMpmDkrg1Y5khDA,8172
56
- hindsight_api/engine/utils.py,sha256=OtEFDViKcCpFmKN3Qir8YV4zp0kv7iaREcgDXCkwShw,2089
56
+ hindsight_api/engine/utils.py,sha256=k6RcLtwe9XAKDTQRIfgR2zKEdjDLlf72vVeepyK20us,1898
57
57
  hindsight_api/engine/consolidation/__init__.py,sha256=qEUPy0R7akNoAooQL1TAt2rVasjvnXTcNzh2zpN0flc,160
58
- hindsight_api/engine/consolidation/consolidator.py,sha256=E2wEsSnHVFEFEahq51QCkp4zGZW-LZxMUxgZh49cEt8,33037
58
+ hindsight_api/engine/consolidation/consolidator.py,sha256=bP-lPLD1WAo0fgzsRTHhKhJJDXFkRg4vWcVFL_vr-ao,34828
59
59
  hindsight_api/engine/consolidation/prompts.py,sha256=UgJJvXeG7bH0h-N0AWlUsmWoYxfJY2gIP_3f9xjCvSc,3422
60
60
  hindsight_api/engine/directives/__init__.py,sha256=5ZxaRqZVyJckbGElaI2DMRMBtnj-qYkxRKdnOHBwovA,118
61
61
  hindsight_api/engine/directives/models.py,sha256=PKxvmhW1-fjBITAOBu7RKX5Lj61c2jdsTaX8ADelKag,1523
@@ -75,11 +75,11 @@ hindsight_api/engine/retain/deduplication.py,sha256=kqs7I7eIc_ppvgAF9GlzL6fSGuEE
75
75
  hindsight_api/engine/retain/embedding_processing.py,sha256=R35oyKYIKjuqC-yZl5Ru56F8xRe0N6KW_9p5PZ9CBi0,1649
76
76
  hindsight_api/engine/retain/embedding_utils.py,sha256=uulXIBiA7XNsj16K1VGawR3s5jV-hsAmvmoCi-IodpU,1565
77
77
  hindsight_api/engine/retain/entity_processing.py,sha256=0x5b48Im7pWjeqg3xTMIRVhrzd4otc4rSkFBjxgOL9Y,3632
78
- hindsight_api/engine/retain/fact_extraction.py,sha256=LdrXyoDERRWJhofHHCVlLrTi880RRIIeAk1AgZiDBAw,63187
78
+ hindsight_api/engine/retain/fact_extraction.py,sha256=Im6UAFH5X6DBZqWT68Uf41psOmNP602kPdvBatPIdzI,62763
79
79
  hindsight_api/engine/retain/fact_storage.py,sha256=PUdMfNWaGuDA-DodeT3hs8ft81ldzXZedCMXys-sFf4,6690
80
80
  hindsight_api/engine/retain/link_creation.py,sha256=KP2kGU2VCymJptgw0hjaSdsjvncBgNp3P_A4OB_qx-w,3082
81
81
  hindsight_api/engine/retain/link_utils.py,sha256=eKa9Ecf7Mpqjl4laAEtRilQgu4fbsGWAjg98kdMDsDc,33078
82
- hindsight_api/engine/retain/orchestrator.py,sha256=URQm9oXFWhLTmQjHlolnyWjcFDusEitn5UVbIvVdcXQ,20480
82
+ hindsight_api/engine/retain/orchestrator.py,sha256=QmuTGj3pLmQe4IROZFbxcHih40oft0u8xLInAeUGI0g,20393
83
83
  hindsight_api/engine/retain/types.py,sha256=zNkjqUA6oUAFe9a5SEbZfQC5PSmpYqTyBfgdmyqPpnw,7722
84
84
  hindsight_api/engine/search/__init__.py,sha256=YPz_4g7IOabx078Xwg3RBfbOpJ649NRwNfe0gTI9P1U,802
85
85
  hindsight_api/engine/search/fusion.py,sha256=cY81BH9U5RyWrPXbQnrDBghtelDMckZWCke9aqMyNnQ,4220
@@ -104,9 +104,9 @@ hindsight_api/extensions/tenant.py,sha256=0LraksQ1gzsOYLEGrx2q2F0or596Ywfo_MqD1F
104
104
  hindsight_api/extensions/builtin/__init__.py,sha256=hLx2oFYZ1JtZhTWfab6AYcR02SWP2gIdbEqnZezT8ek,526
105
105
  hindsight_api/extensions/builtin/tenant.py,sha256=R7jfNR41deGWqQB5P8Qk5njy1bZgvemcTpkXDRiAZBA,1835
106
106
  hindsight_api/worker/__init__.py,sha256=hzpMLvOfgL2KKrrik_9ouvEzCdvJSrH-pj5UdFK63J0,256
107
- hindsight_api/worker/main.py,sha256=1OrQdHL-6u-311W0XMAoLHOXCu8MOETiQkR0TQ23qh8,9547
108
- hindsight_api/worker/poller.py,sha256=l-y8xpekKZ7zcGo83osOsbFd_tBi49LqrAJsN-mxiMY,19306
109
- hindsight_api-0.4.1.dist-info/METADATA,sha256=7qQlHBih3InJcpEZv3UAWzBkhhgQ0DgLKayw-hmp9VI,5760
110
- hindsight_api-0.4.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
111
- hindsight_api-0.4.1.dist-info/entry_points.txt,sha256=1-mxPbRGL_Byf9ZrHYkPW-TEgLYFcwCiSFCxOgI_3vM,206
112
- hindsight_api-0.4.1.dist-info/RECORD,,
107
+ hindsight_api/worker/main.py,sha256=eE6AmFErNJKGBrLivwceAMo5n73y_e6EUwjQoGo-lhE,9757
108
+ hindsight_api/worker/poller.py,sha256=2jyl5bJPEwgr6T6xgEvdHPGqtXvOIs28v4v38a26sRo,25122
109
+ hindsight_api-0.4.3.dist-info/METADATA,sha256=aCH_ERkOa46kNuq9x4tgAeQ6kqixoLu0OUBFWBsG74U,5794
110
+ hindsight_api-0.4.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
111
+ hindsight_api-0.4.3.dist-info/entry_points.txt,sha256=1-mxPbRGL_Byf9ZrHYkPW-TEgLYFcwCiSFCxOgI_3vM,206
112
+ hindsight_api-0.4.3.dist-info/RECORD,,