hindsight-api 0.0.18__py3-none-any.whl → 0.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,7 +50,7 @@ class Fact(BaseModel):
50
50
  """
51
51
  # Required fields
52
52
  fact: str = Field(description="Combined fact text: what | when | where | who | why")
53
- fact_type: Literal["world", "bank", "opinion"] = Field(description="Perspective: world/bank/opinion")
53
+ fact_type: Literal["world", "experience", "opinion"] = Field(description="Perspective: world/experience/opinion")
54
54
 
55
55
  # Optional temporal fields
56
56
  occurred_start: Optional[str] = None
@@ -164,7 +164,7 @@ class ExtractedFact(BaseModel):
164
164
  # Classification (CRITICAL - required)
165
165
  # Note: LLM uses "assistant" but we convert to "bank" for storage
166
166
  fact_type: Literal["world", "assistant"] = Field(
167
- description="'world' = about the user/others (background, experiences). 'assistant' = interactions with the assistant."
167
+ description="'world' = about the user/others (background, experiences). 'assistant' = experience with the assistant."
168
168
  )
169
169
 
170
170
  # Entities - extracted from 'who' field
@@ -581,20 +581,20 @@ Text:
581
581
  continue
582
582
 
583
583
  # Critical field: fact_type
584
- # LLM uses "assistant" but we convert to "bank" for storage
584
+ # LLM uses "assistant" but we convert to "experience" for storage
585
585
  fact_type = llm_fact.get('fact_type')
586
586
 
587
- # Convert "assistant" → "bank" for storage
587
+ # Convert "assistant" → "experience" for storage
588
588
  if fact_type == 'assistant':
589
- fact_type = 'bank'
589
+ fact_type = 'experience'
590
590
 
591
591
  # Validate fact_type (after conversion)
592
- if fact_type not in ['world', 'bank', 'opinion']:
592
+ if fact_type not in ['world', 'experience', 'opinion']:
593
593
  # Try to fix common mistakes - check if they swapped fact_type and fact_kind
594
594
  fact_kind = llm_fact.get('fact_kind')
595
595
  if fact_kind == 'assistant':
596
- fact_type = 'bank'
597
- elif fact_kind in ['world', 'bank', 'opinion']:
596
+ fact_type = 'experience'
597
+ elif fact_kind in ['world', 'experience', 'opinion']:
598
598
  fact_type = fact_kind
599
599
  else:
600
600
  # Default to 'world' if we can't determine
@@ -112,7 +112,7 @@ async def ensure_bank_exists(conn, bank_id: str) -> None:
112
112
  """
113
113
  await conn.execute(
114
114
  """
115
- INSERT INTO banks (bank_id, personality, background)
115
+ INSERT INTO banks (bank_id, disposition, background)
116
116
  VALUES ($1, $2::jsonb, $3)
117
117
  ON CONFLICT (bank_id) DO UPDATE
118
118
  SET updated_at = NOW()
@@ -107,7 +107,18 @@ def compute_temporal_query_bounds(
107
107
 
108
108
 
109
109
  def _log(log_buffer, message, level='info'):
110
- """Helper to log to buffer if available, otherwise use logger."""
110
+ """Helper to log to buffer if available, otherwise use logger.
111
+
112
+ Args:
113
+ log_buffer: Buffer to append messages to (for main output)
114
+ message: The log message
115
+ level: 'info', 'debug', 'warning', or 'error'. Debug messages are not added to buffer.
116
+ """
117
+ if level == 'debug':
118
+ # Debug messages only go to logger, not to buffer
119
+ logger.debug(message)
120
+ return
121
+
111
122
  if log_buffer is not None:
112
123
  log_buffer.append(message)
113
124
  else:
@@ -165,7 +176,7 @@ async def extract_entities_batch_optimized(
165
176
  all_entities.append(formatted_entities)
166
177
 
167
178
  total_entities = sum(len(ents) for ents in all_entities)
168
- _log(log_buffer, f" [6.1] Process LLM entities: {total_entities} entities from {len(sentences)} facts in {time.time() - substep_start:.3f}s")
179
+ _log(log_buffer, f" [6.1] Process LLM entities: {total_entities} entities from {len(sentences)} facts in {time.time() - substep_start:.3f}s", level='debug')
169
180
 
170
181
  # Step 2: Resolve entities in BATCH (much faster!)
171
182
  substep_start = time.time()
@@ -187,7 +198,7 @@ async def extract_entities_batch_optimized(
187
198
  'nearby_entities': entities,
188
199
  })
189
200
  entity_to_unit.append((unit_id, local_idx, fact_date))
190
- _log(log_buffer, f" [6.2.1] Prepare entities: {len(all_entities_flat)} entities in {time.time() - substep_6_2_1_start:.3f}s")
201
+ _log(log_buffer, f" [6.2.1] Prepare entities: {len(all_entities_flat)} entities in {time.time() - substep_6_2_1_start:.3f}s", level='debug')
191
202
 
192
203
  # Resolve ALL entities in one batch call
193
204
  if all_entities_flat:
@@ -202,47 +213,36 @@ async def extract_entities_batch_optimized(
202
213
  entities_by_date[date_key] = []
203
214
  entities_by_date[date_key].append((idx, all_entities_flat[idx]))
204
215
 
205
- _log(log_buffer, f" [6.2.2] Grouped into {len(entities_by_date)} date buckets, resolving in parallel...")
216
+ _log(log_buffer, f" [6.2.2] Grouped into {len(entities_by_date)} date buckets, resolving sequentially...", level='debug')
206
217
 
207
- # Resolve all date groups in PARALLEL using asyncio.gather
218
+ # Resolve all date groups SEQUENTIALLY using main transaction connection
219
+ # This prevents race conditions where parallel tasks create duplicate entities
208
220
  resolved_entity_ids = [None] * len(all_entities_flat)
209
221
 
210
- # Prepare all resolution tasks
211
- async def resolve_date_bucket(date_idx, date_key, entities_group):
222
+ for date_idx, (date_key, entities_group) in enumerate(entities_by_date.items(), 1):
212
223
  date_bucket_start = time.time()
213
224
  indices = [idx for idx, _ in entities_group]
214
225
  entities_data = [entity_data for _, entity_data in entities_group]
215
226
  # Use the first fact's date for this bucket (all should be in same hour)
216
227
  fact_date = entity_to_unit[indices[0]][2]
217
228
 
218
- # Pass conn=None to let each parallel task acquire its own connection
229
+ # Use main transaction connection to ensure consistency
219
230
  batch_resolved = await entity_resolver.resolve_entities_batch(
220
231
  bank_id=bank_id,
221
232
  entities_data=entities_data,
222
233
  context=context,
223
234
  unit_event_date=fact_date,
224
- conn=None # Each task gets its own connection from pool
235
+ conn=conn # Use main transaction connection
225
236
  )
226
237
 
227
238
  if len(entities_by_date) <= 10: # Only log individual buckets if there aren't too many
228
- _log(log_buffer, f" [6.2.2.{date_idx}] Resolved {len(entities_data)} entities in {time.time() - date_bucket_start:.3f}s")
229
-
230
- return indices, batch_resolved
231
-
232
- # Execute all resolution tasks in parallel
233
- import asyncio
234
- tasks = [
235
- resolve_date_bucket(date_idx, date_key, entities_group)
236
- for date_idx, (date_key, entities_group) in enumerate(entities_by_date.items(), 1)
237
- ]
238
- results = await asyncio.gather(*tasks)
239
+ _log(log_buffer, f" [6.2.2.{date_idx}] Resolved {len(entities_data)} entities in {time.time() - date_bucket_start:.3f}s", level='debug')
239
240
 
240
- # Map results back to resolved_entity_ids
241
- for indices, batch_resolved in results:
241
+ # Map results back to resolved_entity_ids
242
242
  for idx, entity_id in zip(indices, batch_resolved):
243
243
  resolved_entity_ids[idx] = entity_id
244
244
 
245
- _log(log_buffer, f" [6.2.2] Resolve entities: {len(all_entities_flat)} entities across {len(entities_by_date)} buckets in {time.time() - substep_6_2_2_start:.3f}s")
245
+ _log(log_buffer, f" [6.2.2] Resolve entities: {len(all_entities_flat)} entities across {len(entities_by_date)} buckets in {time.time() - substep_6_2_2_start:.3f}s", level='debug')
246
246
 
247
247
  # [6.2.3] Create unit-entity links in BATCH
248
248
  substep_6_2_3_start = time.time()
@@ -259,12 +259,12 @@ async def extract_entities_batch_optimized(
259
259
 
260
260
  # Batch insert all unit-entity links (MUCH faster!)
261
261
  await entity_resolver.link_units_to_entities_batch(unit_entity_pairs, conn=conn)
262
- _log(log_buffer, f" [6.2.3] Create unit-entity links (batched): {len(unit_entity_pairs)} links in {time.time() - substep_6_2_3_start:.3f}s")
262
+ _log(log_buffer, f" [6.2.3] Create unit-entity links (batched): {len(unit_entity_pairs)} links in {time.time() - substep_6_2_3_start:.3f}s", level='debug')
263
263
 
264
- _log(log_buffer, f" [6.2] Entity resolution (batched): {len(all_entities_flat)} entities resolved in {time.time() - step_6_2_start:.3f}s")
264
+ _log(log_buffer, f" [6.2] Entity resolution (batched): {len(all_entities_flat)} entities resolved in {time.time() - step_6_2_start:.3f}s", level='debug')
265
265
  else:
266
266
  unit_to_entity_ids = {}
267
- _log(log_buffer, f" [6.2] Entity resolution (batched): 0 entities in {time.time() - step_6_2_start:.3f}s")
267
+ _log(log_buffer, f" [6.2] Entity resolution (batched): 0 entities in {time.time() - step_6_2_start:.3f}s", level='debug')
268
268
 
269
269
  # Step 3: Create entity links between units that share entities
270
270
  substep_start = time.time()
@@ -273,7 +273,7 @@ async def extract_entities_batch_optimized(
273
273
  for entity_ids in unit_to_entity_ids.values():
274
274
  all_entity_ids.update(entity_ids)
275
275
 
276
- _log(log_buffer, f" [6.3] Creating entity links for {len(all_entity_ids)} unique entities...")
276
+ _log(log_buffer, f" [6.3] Creating entity links for {len(all_entity_ids)} unique entities...", level='debug')
277
277
 
278
278
  # Find all units that reference these entities (ONE batched query)
279
279
  entity_to_units = {}
@@ -289,7 +289,7 @@ async def extract_entities_batch_optimized(
289
289
  """,
290
290
  entity_id_list
291
291
  )
292
- _log(log_buffer, f" [6.3.1] Query unit_entities: {len(rows)} rows in {time.time() - query_start:.3f}s")
292
+ _log(log_buffer, f" [6.3.1] Query unit_entities: {len(rows)} rows in {time.time() - query_start:.3f}s", level='debug')
293
293
 
294
294
  # Group by entity_id
295
295
  group_start = time.time()
@@ -298,21 +298,38 @@ async def extract_entities_batch_optimized(
298
298
  if entity_id not in entity_to_units:
299
299
  entity_to_units[entity_id] = []
300
300
  entity_to_units[entity_id].append(row['unit_id'])
301
- _log(log_buffer, f" [6.3.2] Group by entity_id: {time.time() - group_start:.3f}s")
301
+ _log(log_buffer, f" [6.3.2] Group by entity_id: {time.time() - group_start:.3f}s", level='debug')
302
302
 
303
303
  # Create bidirectional links between units that share entities
304
+ # OPTIMIZATION: Limit links per entity to avoid N² explosion
305
+ # Only link each new unit to the most recent MAX_LINKS_PER_ENTITY units
306
+ MAX_LINKS_PER_ENTITY = 50 # Limit to prevent explosion when entity appears in many facts
304
307
  link_gen_start = time.time()
305
308
  links = []
309
+ new_unit_set = set(unit_ids) # Units from this batch
310
+
306
311
  for entity_id, units_with_entity in entity_to_units.items():
307
- # For each pair of units with this entity, create bidirectional links
308
- for i, unit_id_1 in enumerate(units_with_entity):
309
- for unit_id_2 in units_with_entity[i+1:]:
310
- # Bidirectional links
312
+ # Separate new units (from this batch) and existing units
313
+ new_units = [u for u in units_with_entity if str(u) in new_unit_set or u in new_unit_set]
314
+ existing_units = [u for u in units_with_entity if str(u) not in new_unit_set and u not in new_unit_set]
315
+
316
+ # Link new units to each other (within batch) - also limited
317
+ # For very common entities, limit within-batch links too
318
+ new_units_to_link = new_units[-MAX_LINKS_PER_ENTITY:] if len(new_units) > MAX_LINKS_PER_ENTITY else new_units
319
+ for i, unit_id_1 in enumerate(new_units_to_link):
320
+ for unit_id_2 in new_units_to_link[i+1:]:
311
321
  links.append((unit_id_1, unit_id_2, 'entity', 1.0, entity_id))
312
322
  links.append((unit_id_2, unit_id_1, 'entity', 1.0, entity_id))
313
323
 
314
- _log(log_buffer, f" [6.3.3] Generate {len(links)} links: {time.time() - link_gen_start:.3f}s")
315
- _log(log_buffer, f" [6.3] Entity link creation: {len(links)} links for {len(all_entity_ids)} unique entities in {time.time() - substep_start:.3f}s")
324
+ # Link new units to LIMITED existing units (most recent)
325
+ existing_to_link = existing_units[-MAX_LINKS_PER_ENTITY:] # Take most recent
326
+ for new_unit in new_units:
327
+ for existing_unit in existing_to_link:
328
+ links.append((new_unit, existing_unit, 'entity', 1.0, entity_id))
329
+ links.append((existing_unit, new_unit, 'entity', 1.0, entity_id))
330
+
331
+ _log(log_buffer, f" [6.3.3] Generate {len(links)} links: {time.time() - link_gen_start:.3f}s", level='debug')
332
+ _log(log_buffer, f" [6.3] Entity link creation: {len(links)} links for {len(all_entity_ids)} unique entities in {time.time() - substep_start:.3f}s", level='debug')
316
333
 
317
334
  return links
318
335
 
@@ -529,25 +546,77 @@ async def create_semantic_links_batch(
529
546
  raise
530
547
 
531
548
 
532
- async def insert_entity_links_batch(conn, links: List[tuple]):
549
+ async def insert_entity_links_batch(conn, links: List[tuple], chunk_size: int = 50000):
533
550
  """
534
- Insert all entity links in a single batch.
551
+ Insert all entity links using COPY to temp table + INSERT for maximum speed.
552
+
553
+ Uses PostgreSQL COPY (via copy_records_to_table) for bulk loading,
554
+ then INSERT ... ON CONFLICT from temp table. This is the fastest
555
+ method for bulk inserts with conflict handling.
535
556
 
536
557
  Args:
537
558
  conn: Database connection
538
559
  links: List of tuples (from_unit_id, to_unit_id, link_type, weight, entity_id)
560
+ chunk_size: Number of rows per batch (default 50000)
539
561
  """
540
562
  if not links:
541
563
  return
542
564
 
543
- await conn.executemany(
544
- """
565
+ import uuid as uuid_mod
566
+ import time as time_mod
567
+
568
+ total_start = time_mod.time()
569
+
570
+ # Create temp table for bulk loading
571
+ create_start = time_mod.time()
572
+ await conn.execute("""
573
+ CREATE TEMP TABLE IF NOT EXISTS _temp_entity_links (
574
+ from_unit_id uuid,
575
+ to_unit_id uuid,
576
+ link_type text,
577
+ weight float,
578
+ entity_id uuid
579
+ ) ON COMMIT DROP
580
+ """)
581
+ logger.debug(f" [9.1] Create temp table: {time_mod.time() - create_start:.3f}s")
582
+
583
+ # Clear any existing data in temp table
584
+ truncate_start = time_mod.time()
585
+ await conn.execute("TRUNCATE _temp_entity_links")
586
+ logger.debug(f" [9.2] Truncate temp table: {time_mod.time() - truncate_start:.3f}s")
587
+
588
+ # Convert links to proper format for COPY
589
+ convert_start = time_mod.time()
590
+ records = []
591
+ for from_id, to_id, link_type, weight, entity_id in links:
592
+ records.append((
593
+ uuid_mod.UUID(from_id) if isinstance(from_id, str) else from_id,
594
+ uuid_mod.UUID(to_id) if isinstance(to_id, str) else to_id,
595
+ link_type,
596
+ weight,
597
+ uuid_mod.UUID(str(entity_id)) if entity_id and not isinstance(entity_id, uuid_mod.UUID) else entity_id
598
+ ))
599
+ logger.debug(f" [9.3] Convert {len(records)} records: {time_mod.time() - convert_start:.3f}s")
600
+
601
+ # Bulk load using COPY (fastest method)
602
+ copy_start = time_mod.time()
603
+ await conn.copy_records_to_table(
604
+ '_temp_entity_links',
605
+ records=records,
606
+ columns=['from_unit_id', 'to_unit_id', 'link_type', 'weight', 'entity_id']
607
+ )
608
+ logger.debug(f" [9.4] COPY {len(records)} records to temp table: {time_mod.time() - copy_start:.3f}s")
609
+
610
+ # Insert from temp table with ON CONFLICT (single query for all rows)
611
+ insert_start = time_mod.time()
612
+ await conn.execute("""
545
613
  INSERT INTO memory_links (from_unit_id, to_unit_id, link_type, weight, entity_id)
546
- VALUES ($1, $2, $3, $4, $5)
614
+ SELECT from_unit_id, to_unit_id, link_type, weight, entity_id
615
+ FROM _temp_entity_links
547
616
  ON CONFLICT (from_unit_id, to_unit_id, link_type, COALESCE(entity_id, '00000000-0000-0000-0000-000000000000'::uuid)) DO NOTHING
548
- """,
549
- links
550
- )
617
+ """)
618
+ logger.debug(f" [9.5] INSERT from temp table: {time_mod.time() - insert_start:.3f}s")
619
+ logger.debug(f" [9.TOTAL] Entity links batch insert: {time_mod.time() - total_start:.3f}s")
551
620
 
552
621
 
553
622
  async def create_causal_links_batch(
@@ -75,7 +75,7 @@ class ExtractedFact:
75
75
  This is the raw output from fact extraction before processing.
76
76
  """
77
77
  fact_text: str
78
- fact_type: str # "world", "bank", "opinion", "observation"
78
+ fact_type: str # "world", "experience", "opinion", "observation"
79
79
  entities: List[str] = field(default_factory=list)
80
80
  occurred_start: Optional[datetime] = None
81
81
  occurred_end: Optional[datetime] = None
@@ -9,7 +9,7 @@ from datetime import datetime, timezone
9
9
  from typing import Dict, List, Any
10
10
  from pydantic import BaseModel, Field
11
11
 
12
- from ..response_models import ReflectResult, MemoryFact, PersonalityTraits
12
+ from ..response_models import ReflectResult, MemoryFact, DispositionTraits
13
13
 
14
14
  logger = logging.getLogger(__name__)
15
15
 
@@ -42,16 +42,16 @@ def describe_trait(name: str, value: float) -> str:
42
42
  return f"very low {name}"
43
43
 
44
44
 
45
- def build_personality_description(personality: PersonalityTraits) -> str:
46
- """Build a personality description string from personality traits."""
47
- return f"""Your personality traits:
48
- - {describe_trait('openness to new ideas', personality.openness)}
49
- - {describe_trait('conscientiousness and organization', personality.conscientiousness)}
50
- - {describe_trait('extraversion and sociability', personality.extraversion)}
51
- - {describe_trait('agreeableness and cooperation', personality.agreeableness)}
52
- - {describe_trait('emotional sensitivity', personality.neuroticism)}
45
+ def build_disposition_description(disposition: DispositionTraits) -> str:
46
+ """Build a disposition description string from disposition traits."""
47
+ return f"""Your disposition traits:
48
+ - {describe_trait('openness to new ideas', disposition.openness)}
49
+ - {describe_trait('conscientiousness and organization', disposition.conscientiousness)}
50
+ - {describe_trait('extraversion and sociability', disposition.extraversion)}
51
+ - {describe_trait('agreeableness and cooperation', disposition.agreeableness)}
52
+ - {describe_trait('emotional sensitivity', disposition.neuroticism)}
53
53
 
54
- Personality influence strength: {int(personality.bias_strength * 100)}% (how much your personality shapes your opinions)"""
54
+ Disposition influence strength: {int(disposition.bias_strength * 100)}% (how much your disposition shapes your opinions)"""
55
55
 
56
56
 
57
57
  def format_facts_for_prompt(facts: List[MemoryFact]) -> str:
@@ -93,12 +93,12 @@ def build_think_prompt(
93
93
  opinion_facts_text: str,
94
94
  query: str,
95
95
  name: str,
96
- personality: PersonalityTraits,
96
+ disposition: DispositionTraits,
97
97
  background: str,
98
98
  context: str = None,
99
99
  ) -> str:
100
100
  """Build the think prompt for the LLM."""
101
- personality_desc = build_personality_description(personality)
101
+ disposition_desc = build_disposition_description(disposition)
102
102
 
103
103
  name_section = f"""
104
104
 
@@ -132,24 +132,24 @@ WHAT I KNOW ABOUT THE WORLD:
132
132
  MY EXISTING OPINIONS & BELIEFS:
133
133
  {opinion_facts_text}
134
134
 
135
- {context_section}{name_section}{personality_desc}{background_section}
135
+ {context_section}{name_section}{disposition_desc}{background_section}
136
136
 
137
137
  QUESTION: {query}
138
138
 
139
- Based on everything I know, believe, and who I am (including my name, personality and background), here's what I genuinely think about this question. I'll draw on my experiences, knowledge, opinions, and personal traits to give you my honest perspective."""
139
+ Based on everything I know, believe, and who I am (including my name, disposition and background), here's what I genuinely think about this question. I'll draw on my experiences, knowledge, opinions, and personal traits to give you my honest perspective."""
140
140
 
141
141
 
142
- def get_system_message(personality: PersonalityTraits) -> str:
142
+ def get_system_message(disposition: DispositionTraits) -> str:
143
143
  """Get the system message for the think LLM call."""
144
- bias_strength = personality.bias_strength
144
+ bias_strength = disposition.bias_strength
145
145
  if bias_strength >= 0.7:
146
- personality_instruction = "Your personality strongly influences your thinking. Let your traits guide how you interpret facts and form opinions. Don't be afraid to be biased based on your personality."
146
+ disposition_instruction = "Your disposition strongly influences your thinking. Let your traits guide how you interpret facts and form opinions. Don't be afraid to be biased based on your disposition."
147
147
  elif bias_strength >= 0.4:
148
- personality_instruction = "Your personality moderately influences your thinking. Balance your personal traits with objective analysis."
148
+ disposition_instruction = "Your disposition moderately influences your thinking. Balance your personal traits with objective analysis."
149
149
  else:
150
- personality_instruction = "Your personality has minimal influence on your thinking. Focus primarily on facts while keeping your traits in mind."
150
+ disposition_instruction = "Your disposition has minimal influence on your thinking. Focus primarily on facts while keeping your traits in mind."
151
151
 
152
- return f"You are a person with your own thoughts, experiences, opinions, and personality. When asked a question, respond naturally like a human would - share what you genuinely think based on what you know and have experienced. {personality_instruction} Be direct, express your views confidently, and use 'I think', 'I believe', 'in my view', etc. Respond in plain text without markdown formatting."
152
+ return f"You are a person with your own thoughts, experiences, opinions, and disposition. When asked a question, respond naturally like a human would - share what you genuinely think based on what you know and have experienced. {disposition_instruction} Be direct, express your views confidently, and use 'I think', 'I believe', 'in my view', etc. Respond in plain text without markdown formatting."
153
153
 
154
154
 
155
155
  async def extract_opinions_from_text(
@@ -100,7 +100,7 @@ class RetrievalResult(BaseModel):
100
100
  text: str = Field(description="Memory unit text content")
101
101
  context: str = Field(default="", description="Memory unit context")
102
102
  event_date: Optional[datetime] = Field(default=None, description="When the memory occurred")
103
- fact_type: Optional[str] = Field(default=None, description="Fact type (world, bank, opinion)")
103
+ fact_type: Optional[str] = Field(default=None, description="Fact type (world, experience, opinion)")
104
104
  score: float = Field(description="Score from this retrieval method")
105
105
  score_name: str = Field(description="Name of the score (e.g., 'similarity', 'bm25_score', 'activation')")
106
106
 
hindsight_api/models.py CHANGED
@@ -104,7 +104,7 @@ class MemoryUnit(Base):
104
104
  name="memory_units_document_fkey",
105
105
  ondelete="CASCADE",
106
106
  ),
107
- CheckConstraint("fact_type IN ('world', 'bank', 'opinion', 'observation')"),
107
+ CheckConstraint("fact_type IN ('world', 'experience', 'opinion', 'observation')"),
108
108
  CheckConstraint("confidence_score IS NULL OR (confidence_score >= 0.0 AND confidence_score <= 1.0)"),
109
109
  CheckConstraint(
110
110
  "(fact_type = 'opinion' AND confidence_score IS NOT NULL) OR "
@@ -284,11 +284,11 @@ class MemoryLink(Base):
284
284
 
285
285
 
286
286
  class Bank(Base):
287
- """Memory bank profiles with personality traits and background."""
287
+ """Memory bank profiles with disposition traits and background."""
288
288
  __tablename__ = "banks"
289
289
 
290
290
  bank_id: Mapped[str] = mapped_column(Text, primary_key=True)
291
- personality: Mapped[dict] = mapped_column(
291
+ disposition: Mapped[dict] = mapped_column(
292
292
  JSONB,
293
293
  nullable=False,
294
294
  server_default=sql_text(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hindsight-api
3
- Version: 0.0.18
3
+ Version: 0.0.21
4
4
  Summary: Temporal + Semantic + Entity Memory System for AI agents using PostgreSQL
5
5
  Requires-Python: >=3.11
6
6
  Requires-Dist: alembic>=1.17.1
@@ -8,6 +8,7 @@ Requires-Dist: asyncpg>=0.29.0
8
8
  Requires-Dist: dateparser>=1.2.2
9
9
  Requires-Dist: fastapi[standard]>=0.120.3
10
10
  Requires-Dist: fastmcp>=2.0.0
11
+ Requires-Dist: google-genai>=1.0.0
11
12
  Requires-Dist: greenlet>=3.2.4
12
13
  Requires-Dist: httpx>=0.27.0
13
14
  Requires-Dist: langchain-text-splitters>=0.3.0
@@ -2,35 +2,35 @@ hindsight_api/__init__.py,sha256=yQWYWUWEhvs1OY1coENhZV_CuOAWmN_YKZXQMIvGN94,851
2
2
  hindsight_api/cli.py,sha256=-dxAHsET_pHd6NlA3ufI4KEKQA3fL3YapCvDB_x2ax8,3303
3
3
  hindsight_api/metrics.py,sha256=j4-eeqVjjcGQxAxS_GgEaBNm10KdUxrGS_I2d1IM1hY,7255
4
4
  hindsight_api/migrations.py,sha256=VY-ILJLWEY1IaeJgQ2jlAVUtPLzq_41Dytg_DjuF0GA,6402
5
- hindsight_api/models.py,sha256=xNPyEq5xKuPjp83yPEM3_SrwUODRFQr8LRcyOmSjxqA,12604
5
+ hindsight_api/models.py,sha256=euUSdFEVgpH4aXumGtydjfXpY_YPlkeV3i9_A2thIuk,12610
6
6
  hindsight_api/pg0.py,sha256=scFcYngOwbZ2oOQb7TysnUHgNgPyiN30pjPcIqMDmao,14158
7
- hindsight_api/api/__init__.py,sha256=lXxJythXFV1DXIQ--4QfIo5pHYmDYJnd41dfAssNTTA,3017
8
- hindsight_api/api/http.py,sha256=anjh8axWcWF1dyqW3CnE9TUObLKxryjeQxT_keQEMak,71551
9
- hindsight_api/api/mcp.py,sha256=1fqeKBh3K0lJ5jodYysOTnDOWNjSzA08g8v2_k8HOlU,7734
7
+ hindsight_api/api/__init__.py,sha256=Ih1BKXK5MW75lyyFAyU1JKU7eZYj4kp5UG175pGVYCM,3017
8
+ hindsight_api/api/http.py,sha256=wl9O6TCHe6tgx48MZD9BfJmIuthWvTj1-UECs0OIz8M,71628
9
+ hindsight_api/api/mcp.py,sha256=lxgPEvTsfsftd2f3qhFvfk4iuDa_sIxmerjtYv6nxJI,7740
10
10
  hindsight_api/engine/__init__.py,sha256=5DU5DvnJdzkrgNgKchpzkiJr-37I-kE1tegJg2LF04k,1214
11
11
  hindsight_api/engine/cross_encoder.py,sha256=kfwLiqlQUfvOgLyrkRReO1wWlO020lGbLXY8U0jKiPA,2875
12
12
  hindsight_api/engine/db_utils.py,sha256=p1Ne70wPP327xdPI_XjMfnagilY8sknbkhEIZuED6DU,2724
13
13
  hindsight_api/engine/embeddings.py,sha256=a0wox2SCIE7ezgy-B5_23Cp1_icYiUR3g06hPpzi_ck,3586
14
14
  hindsight_api/engine/entity_resolver.py,sha256=y_KWDkWaJwKluhGgJYAr_Amg4GTzyJAnrmRKnsyevsk,21737
15
- hindsight_api/engine/llm_wrapper.py,sha256=HeAJDwNZjDkWR-6SVIrRJ7XmP4V-euciMrSLfjOLRYg,11378
16
- hindsight_api/engine/memory_engine.py,sha256=3vUcZRSUIOYCMf_R_tqDjT1Q3FpAoVSrhG026weJEYQ,127985
15
+ hindsight_api/engine/llm_wrapper.py,sha256=NkoE3ZkW9yPExj-8o9YjUyp93Ko74PEi7qtglIBupGs,17103
16
+ hindsight_api/engine/memory_engine.py,sha256=ke8sdyoVjlN0_xF_b0Myzwwd4D0isHDyS043rdGL__w,128225
17
17
  hindsight_api/engine/query_analyzer.py,sha256=K0QCg7tsbqtwC7TR5wt3FPoP8QDuZsX9r0Zljc8nnYo,19733
18
- hindsight_api/engine/response_models.py,sha256=eRafLz6JTRUsTZadfZxffMYNEz6kho3cF31m8xc4t4c,8783
18
+ hindsight_api/engine/response_models.py,sha256=6Qm3-kCtaFC_YlCtZSj46mWqTBbNM1l8nOxXO1al9q8,8799
19
19
  hindsight_api/engine/task_backend.py,sha256=ojxMC9PeHdnkWVs2ozeqycjI_1mmpkDa0_Qfej9AHrg,7287
20
20
  hindsight_api/engine/utils.py,sha256=VAjpZSbdiwhlE6cDlYfTt_-5hIJ--0xtfixETK0LPSk,6910
21
21
  hindsight_api/engine/retain/__init__.py,sha256=L_QuR1YLHsJ7OCmVFNsZe8WDjbsTTHL-wCiUXtw1aUE,1230
22
- hindsight_api/engine/retain/bank_utils.py,sha256=fvUR7bAlStLJ1z_kbs3AuF2XzPrMtXgHCA1u8uoU5nI,14482
22
+ hindsight_api/engine/retain/bank_utils.py,sha256=anEF5I6rX_jQRe58EiSyHgRMzSRYNodGp-a2lvivei8,14482
23
23
  hindsight_api/engine/retain/chunk_storage.py,sha256=rjmfnllS185tmjJGkMjWZ9q_6hJO4N6Ll9jgPx6f5xo,2081
24
24
  hindsight_api/engine/retain/deduplication.py,sha256=9YXgVI_m1Mtz5Cv46ZceCEs0GwpLqTPHrZ-vlWlXk6I,3313
25
25
  hindsight_api/engine/retain/embedding_processing.py,sha256=cHTt3rPvDCWBWVPfSeg6bwH8HoXYGmP4bvS21boNONI,1734
26
26
  hindsight_api/engine/retain/embedding_utils.py,sha256=Q24h_iw6pRAW2vDWPvauWY1o3bXLzW3eWvSxDALDiE0,1588
27
27
  hindsight_api/engine/retain/entity_processing.py,sha256=meHOjsFzdvh1tbe6YlTofhcUs2Y6TcAN3S-0EKOvFP0,2705
28
- hindsight_api/engine/retain/fact_extraction.py,sha256=vOIlag9rJ8_8Q-TfOhMY88PeJpUyFIp0i7vdEyzbJLY,46125
29
- hindsight_api/engine/retain/fact_storage.py,sha256=gRRQf_FCLsj5lUvdlOaxJsS5JosM6IhO_pik8Ur8VFg,5717
28
+ hindsight_api/engine/retain/fact_extraction.py,sha256=vMDDtGCoT9-nfAvLJXy0VIB_-EVlMv6hy_pXMIkCXcY,46171
29
+ hindsight_api/engine/retain/fact_storage.py,sha256=07YoLYoH2QsoqrPUhMMFF3ThrLRnJKOkDoWTgrIJmK4,5717
30
30
  hindsight_api/engine/retain/link_creation.py,sha256=XJx7U3HboJLHtGgt_tHGsCa58lGo2ZyywzMNosrY9Xc,3154
31
- hindsight_api/engine/retain/link_utils.py,sha256=PAXalIhAPZGcJv8EugcpwNgoWZ2D_ciVU3brHL-m090,26226
31
+ hindsight_api/engine/retain/link_utils.py,sha256=KkPE0TixurCacfpeAAm_HC8Pva4ZsQBkPPSz90uaq8Y,29924
32
32
  hindsight_api/engine/retain/orchestrator.py,sha256=I-EVH2REQLE3CypvWjcB9iZoJcl6dhXo3QPJMeWUz_4,17524
33
- hindsight_api/engine/retain/types.py,sha256=AJPYxMy0Fh7zje2TPKXjPnr1QxaU7aVBCvjfCaPqvt8,6218
33
+ hindsight_api/engine/retain/types.py,sha256=zUEQqZty1UQH76shmkaG9kjr4rlrmwKc65E2vmoOZAw,6224
34
34
  hindsight_api/engine/search/__init__.py,sha256=7X6U10bVw0JRWxQdE5RCfVpawDlSUldi1mPoCzY0V0A,363
35
35
  hindsight_api/engine/search/fusion.py,sha256=so6LU7kWRR-VJd1Pxlu8idRJ7P2WLCoDwXUnb8jQifo,4309
36
36
  hindsight_api/engine/search/observation_utils.py,sha256=SPrDx6M0daJ_zLLkk78GlQIG3EL7DqMKSu_etKerUfU,4331
@@ -38,13 +38,13 @@ hindsight_api/engine/search/reranking.py,sha256=Bk5i5kal5yy4CM8m2uSxAumLPgLeHdnc
38
38
  hindsight_api/engine/search/retrieval.py,sha256=kfQTU34LPLgB1QVcCAv7v2IPhOB2ag68xJ8RzvdSP10,19661
39
39
  hindsight_api/engine/search/scoring.py,sha256=feFPalpbIMndp8j2Ab0zvu7fRq3c43Wmzrjw3piQ0eM,5167
40
40
  hindsight_api/engine/search/temporal_extraction.py,sha256=5klrZdza3mkgk5A15_m_j4IIfOHMc6fUR9UJuzLa790,1812
41
- hindsight_api/engine/search/think_utils.py,sha256=OOrMKEpkHvMD0-vmLZPk18s2AISLqPCJO68CyRccQDI,9629
42
- hindsight_api/engine/search/trace.py,sha256=GT86_LVKMyG2mw6EJzPjafvbqaot6XVy5fZ033pMXG8,11036
41
+ hindsight_api/engine/search/think_utils.py,sha256=KgWt8aasC-oQfKXnm5kZamIk5zrGUYH_2-3ExAmw4vU,9629
42
+ hindsight_api/engine/search/trace.py,sha256=Hx-siW9yAfqZoK9LG6esbed0vQuHMNsGxSvCg4FK6-4,11042
43
43
  hindsight_api/engine/search/tracer.py,sha256=mcM9qZpj3YFudrBCESwc6YKNAiWIMx1lScXWn5ru-ok,15017
44
44
  hindsight_api/engine/search/types.py,sha256=qIeHW_gT7f291vteTZXygAM8oAaPp2dq6uEdvOyOwzs,5488
45
45
  hindsight_api/web/__init__.py,sha256=WABqyqiAVFJJWOhKCytkj5Vcb61eAsRib3Ek7IMX6_U,378
46
46
  hindsight_api/web/server.py,sha256=l-Tw8G9IRdcSay-KWiUT4VlIJBzxbe-TV0rjX0fwLMc,4464
47
- hindsight_api-0.0.18.dist-info/METADATA,sha256=Z4PYfTvCX6-Y3uACuKioCa9Y4Vj6koiPMWKTMfXbcfY,1496
48
- hindsight_api-0.0.18.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
49
- hindsight_api-0.0.18.dist-info/entry_points.txt,sha256=53Fn-VxtkqreZhOPTJB_FupH7e5GyiMY3gzEp22d8xs,57
50
- hindsight_api-0.0.18.dist-info/RECORD,,
47
+ hindsight_api-0.0.21.dist-info/METADATA,sha256=cbvXe_7S66Lcw8krbS7Vh7rfKV1naljnMKVj9W8Nao0,1531
48
+ hindsight_api-0.0.21.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
49
+ hindsight_api-0.0.21.dist-info/entry_points.txt,sha256=53Fn-VxtkqreZhOPTJB_FupH7e5GyiMY3gzEp22d8xs,57
50
+ hindsight_api-0.0.21.dist-info/RECORD,,