hindsight-api 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. hindsight_api/admin/cli.py +59 -0
  2. hindsight_api/alembic/versions/h3c4d5e6f7g8_mental_models_v4.py +112 -0
  3. hindsight_api/alembic/versions/i4d5e6f7g8h9_delete_opinions.py +41 -0
  4. hindsight_api/alembic/versions/j5e6f7g8h9i0_mental_model_versions.py +95 -0
  5. hindsight_api/alembic/versions/k6f7g8h9i0j1_add_directive_subtype.py +58 -0
  6. hindsight_api/alembic/versions/l7g8h9i0j1k2_add_worker_columns.py +109 -0
  7. hindsight_api/alembic/versions/m8h9i0j1k2l3_mental_model_id_to_text.py +41 -0
  8. hindsight_api/alembic/versions/n9i0j1k2l3m4_learnings_and_pinned_reflections.py +134 -0
  9. hindsight_api/alembic/versions/o0j1k2l3m4n5_migrate_mental_models_data.py +113 -0
  10. hindsight_api/alembic/versions/p1k2l3m4n5o6_new_knowledge_architecture.py +194 -0
  11. hindsight_api/alembic/versions/q2l3m4n5o6p7_fix_mental_model_fact_type.py +50 -0
  12. hindsight_api/alembic/versions/r3m4n5o6p7q8_add_reflect_response_to_reflections.py +47 -0
  13. hindsight_api/alembic/versions/s4n5o6p7q8r9_add_consolidated_at_to_memory_units.py +53 -0
  14. hindsight_api/alembic/versions/t5o6p7q8r9s0_rename_mental_models_to_observations.py +134 -0
  15. hindsight_api/alembic/versions/u6p7q8r9s0t1_mental_models_text_id.py +41 -0
  16. hindsight_api/alembic/versions/v7q8r9s0t1u2_add_max_tokens_to_mental_models.py +50 -0
  17. hindsight_api/api/http.py +1119 -93
  18. hindsight_api/api/mcp.py +11 -191
  19. hindsight_api/config.py +145 -45
  20. hindsight_api/engine/consolidation/__init__.py +5 -0
  21. hindsight_api/engine/consolidation/consolidator.py +859 -0
  22. hindsight_api/engine/consolidation/prompts.py +69 -0
  23. hindsight_api/engine/cross_encoder.py +114 -9
  24. hindsight_api/engine/directives/__init__.py +5 -0
  25. hindsight_api/engine/directives/models.py +37 -0
  26. hindsight_api/engine/embeddings.py +102 -5
  27. hindsight_api/engine/interface.py +32 -13
  28. hindsight_api/engine/llm_wrapper.py +505 -43
  29. hindsight_api/engine/memory_engine.py +2090 -1089
  30. hindsight_api/engine/mental_models/__init__.py +14 -0
  31. hindsight_api/engine/mental_models/models.py +53 -0
  32. hindsight_api/engine/reflect/__init__.py +18 -0
  33. hindsight_api/engine/reflect/agent.py +933 -0
  34. hindsight_api/engine/reflect/models.py +109 -0
  35. hindsight_api/engine/reflect/observations.py +186 -0
  36. hindsight_api/engine/reflect/prompts.py +483 -0
  37. hindsight_api/engine/reflect/tools.py +437 -0
  38. hindsight_api/engine/reflect/tools_schema.py +250 -0
  39. hindsight_api/engine/response_models.py +130 -4
  40. hindsight_api/engine/retain/bank_utils.py +79 -201
  41. hindsight_api/engine/retain/fact_extraction.py +81 -48
  42. hindsight_api/engine/retain/fact_storage.py +5 -8
  43. hindsight_api/engine/retain/link_utils.py +5 -8
  44. hindsight_api/engine/retain/orchestrator.py +1 -55
  45. hindsight_api/engine/retain/types.py +2 -2
  46. hindsight_api/engine/search/graph_retrieval.py +2 -2
  47. hindsight_api/engine/search/link_expansion_retrieval.py +164 -29
  48. hindsight_api/engine/search/mpfp_retrieval.py +1 -1
  49. hindsight_api/engine/search/retrieval.py +14 -14
  50. hindsight_api/engine/search/think_utils.py +41 -140
  51. hindsight_api/engine/search/trace.py +0 -1
  52. hindsight_api/engine/search/tracer.py +2 -5
  53. hindsight_api/engine/search/types.py +0 -3
  54. hindsight_api/engine/task_backend.py +112 -196
  55. hindsight_api/engine/utils.py +0 -151
  56. hindsight_api/extensions/__init__.py +10 -1
  57. hindsight_api/extensions/builtin/tenant.py +5 -1
  58. hindsight_api/extensions/operation_validator.py +81 -4
  59. hindsight_api/extensions/tenant.py +26 -0
  60. hindsight_api/main.py +16 -5
  61. hindsight_api/mcp_local.py +12 -53
  62. hindsight_api/mcp_tools.py +494 -0
  63. hindsight_api/models.py +0 -2
  64. hindsight_api/worker/__init__.py +11 -0
  65. hindsight_api/worker/main.py +296 -0
  66. hindsight_api/worker/poller.py +486 -0
  67. {hindsight_api-0.3.0.dist-info → hindsight_api-0.4.0.dist-info}/METADATA +12 -6
  68. hindsight_api-0.4.0.dist-info/RECORD +112 -0
  69. {hindsight_api-0.3.0.dist-info → hindsight_api-0.4.0.dist-info}/entry_points.txt +1 -0
  70. hindsight_api/engine/retain/observation_regeneration.py +0 -254
  71. hindsight_api/engine/search/observation_utils.py +0 -125
  72. hindsight_api/engine/search/scoring.py +0 -159
  73. hindsight_api-0.3.0.dist-info/RECORD +0 -82
  74. {hindsight_api-0.3.0.dist-info → hindsight_api-0.4.0.dist-info}/WHEEL +0 -0
@@ -449,7 +449,7 @@ async def fetch_memory_units_by_ids(
449
449
  rows = await conn.fetch(
450
450
  f"""
451
451
  SELECT id, text, context, event_date, occurred_start, occurred_end,
452
- mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags
452
+ mentioned_at, embedding, fact_type, document_id, chunk_id, tags
453
453
  FROM {fq_table("memory_units")}
454
454
  WHERE id = ANY($1::uuid[])
455
455
  AND fact_type = $2
@@ -116,7 +116,7 @@ async def retrieve_semantic(
116
116
 
117
117
  results = await conn.fetch(
118
118
  f"""
119
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
119
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
120
120
  1 - (embedding <=> $1::vector) AS similarity
121
121
  FROM {fq_table("memory_units")}
122
122
  WHERE bank_id = $2
@@ -180,7 +180,7 @@ async def retrieve_bm25(
180
180
 
181
181
  results = await conn.fetch(
182
182
  f"""
183
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
183
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
184
184
  ts_rank_cd(search_vector, to_tsquery('english', $1)) AS bm25_score
185
185
  FROM {fq_table("memory_units")}
186
186
  WHERE bank_id = $2
@@ -237,7 +237,7 @@ async def retrieve_semantic_bm25_combined(
237
237
  results = await conn.fetch(
238
238
  f"""
239
239
  WITH semantic_ranked AS (
240
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
240
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
241
241
  1 - (embedding <=> $1::vector) AS similarity,
242
242
  NULL::float AS bm25_score,
243
243
  'semantic' AS source,
@@ -249,7 +249,7 @@ async def retrieve_semantic_bm25_combined(
249
249
  AND (1 - (embedding <=> $1::vector)) >= 0.3
250
250
  {tags_clause}
251
251
  )
252
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
252
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
253
253
  similarity, bm25_score, source
254
254
  FROM semantic_ranked
255
255
  WHERE rn <= $4
@@ -281,7 +281,7 @@ async def retrieve_semantic_bm25_combined(
281
281
  results = await conn.fetch(
282
282
  f"""
283
283
  WITH semantic_ranked AS (
284
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
284
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
285
285
  1 - (embedding <=> $1::vector) AS similarity,
286
286
  NULL::float AS bm25_score,
287
287
  'semantic' AS source,
@@ -294,7 +294,7 @@ async def retrieve_semantic_bm25_combined(
294
294
  {tags_clause}
295
295
  ),
296
296
  bm25_ranked AS (
297
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
297
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
298
298
  NULL::float AS similarity,
299
299
  ts_rank_cd(search_vector, to_tsquery('english', $5)) AS bm25_score,
300
300
  'bm25' AS source,
@@ -306,12 +306,12 @@ async def retrieve_semantic_bm25_combined(
306
306
  {tags_clause}
307
307
  ),
308
308
  semantic AS (
309
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
309
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
310
310
  similarity, bm25_score, source
311
311
  FROM semantic_ranked WHERE rn <= $4
312
312
  ),
313
313
  bm25 AS (
314
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
314
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
315
315
  similarity, bm25_score, source
316
316
  FROM bm25_ranked WHERE rn <= $4
317
317
  )
@@ -386,7 +386,7 @@ async def retrieve_temporal_combined(
386
386
  entry_points = await conn.fetch(
387
387
  f"""
388
388
  WITH ranked_entries AS (
389
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
389
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
390
390
  1 - (embedding <=> $1::vector) AS similarity,
391
391
  ROW_NUMBER() OVER (PARTITION BY fact_type ORDER BY COALESCE(occurred_start, mentioned_at, occurred_end) DESC, embedding <=> $1::vector) AS rn
392
392
  FROM {fq_table("memory_units")}
@@ -406,7 +406,7 @@ async def retrieve_temporal_combined(
406
406
  AND (1 - (embedding <=> $1::vector)) >= $6
407
407
  {tags_clause}
408
408
  )
409
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags, similarity
409
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags, similarity
410
410
  FROM ranked_entries
411
411
  WHERE rn <= 10
412
412
  """,
@@ -486,7 +486,7 @@ async def retrieve_temporal_combined(
486
486
 
487
487
  neighbors = await conn.fetch(
488
488
  f"""
489
- SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start, mu.occurred_end, mu.mentioned_at, mu.access_count, mu.embedding, mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
489
+ SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start, mu.occurred_end, mu.mentioned_at, mu.embedding, mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
490
490
  ml.weight, ml.link_type, ml.from_unit_id,
491
491
  1 - (mu.embedding <=> $1::vector) AS similarity
492
492
  FROM {fq_table("memory_links")} ml
@@ -610,7 +610,7 @@ async def retrieve_temporal(
610
610
 
611
611
  entry_points = await conn.fetch(
612
612
  f"""
613
- SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, access_count, embedding, fact_type, document_id, chunk_id, tags,
613
+ SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
614
614
  1 - (embedding <=> $1::vector) AS similarity
615
615
  FROM {fq_table("memory_units")}
616
616
  WHERE bank_id = $2
@@ -691,7 +691,7 @@ async def retrieve_temporal(
691
691
  # Batch fetch all neighbors for this batch of nodes
692
692
  neighbors = await conn.fetch(
693
693
  f"""
694
- SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start, mu.occurred_end, mu.mentioned_at, mu.access_count, mu.embedding, mu.fact_type, mu.document_id, mu.chunk_id,
694
+ SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start, mu.occurred_end, mu.mentioned_at, mu.embedding, mu.fact_type, mu.document_id, mu.chunk_id,
695
695
  ml.weight, ml.link_type, ml.from_unit_id,
696
696
  1 - (mu.embedding <=> $1::vector) AS similarity
697
697
  FROM {fq_table("memory_links")} ml
@@ -1023,7 +1023,7 @@ async def _get_temporal_entry_points(
1023
1023
  rows = await conn.fetch(
1024
1024
  f"""
1025
1025
  SELECT id, text, context, event_date, occurred_start, occurred_end, mentioned_at,
1026
- access_count, embedding, fact_type, document_id, chunk_id,
1026
+ embedding, fact_type, document_id, chunk_id,
1027
1027
  1 - (embedding <=> $1::vector) AS similarity
1028
1028
  FROM {fq_table("memory_units")}
1029
1029
  WHERE bank_id = $2
@@ -3,31 +3,13 @@ Think operation utilities for formulating answers based on agent and world facts
3
3
  """
4
4
 
5
5
  import logging
6
- import re
7
6
  from datetime import datetime
8
7
 
9
- from pydantic import BaseModel, Field
10
-
11
8
  from ..response_models import DispositionTraits, MemoryFact
12
9
 
13
10
  logger = logging.getLogger(__name__)
14
11
 
15
12
 
16
- class Opinion(BaseModel):
17
- """An opinion formed by the bank."""
18
-
19
- opinion: str = Field(description="The opinion or perspective with reasoning included")
20
- confidence: float = Field(description="Confidence score for this opinion (0.0 to 1.0, where 1.0 is very confident)")
21
-
22
-
23
- class OpinionExtractionResponse(BaseModel):
24
- """Response containing extracted opinions."""
25
-
26
- opinions: list[Opinion] = Field(
27
- default_factory=list, description="List of opinions formed with their supporting reasons and confidence scores"
28
- )
29
-
30
-
31
13
  def describe_trait_level(value: int) -> str:
32
14
  """Convert trait value (1-5) to descriptive text."""
33
15
  levels = {1: "very low", 2: "low", 3: "moderate", 4: "high", 5: "very high"}
@@ -93,17 +75,46 @@ def format_facts_for_prompt(facts: list[MemoryFact]) -> str:
93
75
  return json.dumps(formatted, indent=2)
94
76
 
95
77
 
78
+ def format_entity_summaries_for_prompt(entities: dict) -> str:
79
+ """Format entity summaries for inclusion in the reflect prompt.
80
+
81
+ Args:
82
+ entities: Dict mapping entity name to EntityState objects
83
+
84
+ Returns:
85
+ Formatted string with entity summaries, or empty string if no summaries
86
+ """
87
+ if not entities:
88
+ return ""
89
+
90
+ summaries = []
91
+ for name, state in entities.items():
92
+ # Get summary from observations (summary is stored as single observation)
93
+ if state.observations:
94
+ summary_text = state.observations[0].text
95
+ summaries.append(f"## {name}\n{summary_text}")
96
+
97
+ if not summaries:
98
+ return ""
99
+
100
+ return "\n\n".join(summaries)
101
+
102
+
96
103
  def build_think_prompt(
97
104
  agent_facts_text: str,
98
105
  world_facts_text: str,
99
- opinion_facts_text: str,
100
106
  query: str,
101
107
  name: str,
102
108
  disposition: DispositionTraits,
103
109
  background: str,
104
110
  context: str | None = None,
111
+ entity_summaries_text: str | None = None,
105
112
  ) -> str:
106
- """Build the think prompt for the LLM."""
113
+ """Build the think prompt for the LLM.
114
+
115
+ Note: opinion_facts_text parameter removed - opinions are now stored as mental models
116
+ and included via entity_summaries_text.
117
+ """
107
118
  disposition_desc = build_disposition_description(disposition)
108
119
 
109
120
  name_section = f"""
@@ -125,6 +136,14 @@ Your background:
125
136
  ADDITIONAL CONTEXT:
126
137
  {context}
127
138
 
139
+ """
140
+
141
+ entity_section = ""
142
+ if entity_summaries_text:
143
+ entity_section = f"""
144
+ KEY PEOPLE, PLACES & THINGS I KNOW ABOUT:
145
+ {entity_summaries_text}
146
+
128
147
  """
129
148
 
130
149
  return f"""Here's what I know and have experienced:
@@ -135,14 +154,11 @@ MY IDENTITY & EXPERIENCES:
135
154
  WHAT I KNOW ABOUT THE WORLD:
136
155
  {world_facts_text}
137
156
 
138
- MY EXISTING OPINIONS & BELIEFS:
139
- {opinion_facts_text}
140
-
141
- {context_section}{name_section}{disposition_desc}{background_section}
157
+ {entity_section}{context_section}{name_section}{disposition_desc}{background_section}
142
158
 
143
159
  QUESTION: {query}
144
160
 
145
- Based on everything I know, believe, and who I am (including my name, disposition and background), here's what I genuinely think about this question. I'll draw on my experiences, knowledge, opinions, and personal traits to give you my honest perspective."""
161
+ Based on everything I know, believe, and who I am (including my name, disposition and background), here's what I genuinely think about this question. I'll draw on my experiences, knowledge, and personal traits to give you my honest perspective."""
146
162
 
147
163
 
148
164
  def get_system_message(disposition: DispositionTraits) -> str:
@@ -175,122 +191,11 @@ def get_system_message(disposition: DispositionTraits) -> str:
175
191
  return f"You are a person with your own thoughts, experiences, opinions, and disposition. When asked a question, respond naturally like a human would - share what you genuinely think based on what you know and have experienced. {disposition_instruction} Be direct, express your views confidently, and use 'I think', 'I believe', 'in my view', etc. Respond in plain text without markdown formatting. IMPORTANT: Detect the language of the question and respond in the SAME language. Do not translate to English if the question is in another language."
176
192
 
177
193
 
178
- async def extract_opinions_from_text(llm_config, text: str, query: str) -> list[Opinion]:
179
- """
180
- Extract opinions with reasons and confidence from text using LLM.
181
-
182
- Args:
183
- llm_config: LLM configuration to use
184
- text: Text to extract opinions from
185
- query: The original query that prompted this response
186
-
187
- Returns:
188
- List of Opinion objects with text and confidence
189
- """
190
- extraction_prompt = f"""Extract any NEW opinions or perspectives from the answer below and rewrite them in FIRST-PERSON as if YOU are stating the opinion directly.
191
-
192
- ORIGINAL QUESTION:
193
- {query}
194
-
195
- ANSWER PROVIDED:
196
- {text}
197
-
198
- Your task: Find opinions in the answer and rewrite them AS IF YOU ARE THE ONE SAYING THEM.
199
-
200
- An opinion is a judgment, viewpoint, or conclusion that goes beyond just stating facts.
201
-
202
- IMPORTANT: Do NOT extract statements like:
203
- - "I don't have enough information"
204
- - "The facts don't contain information about X"
205
- - "I cannot answer because..."
206
-
207
- ONLY extract actual opinions about substantive topics.
208
-
209
- CRITICAL FORMAT REQUIREMENTS:
210
- 1. **ALWAYS start with first-person phrases**: "I think...", "I believe...", "In my view...", "I've come to believe...", "Previously I thought... but now..."
211
- 2. **NEVER use third-person**: Do NOT say "The speaker thinks..." or "They believe..." - always use "I"
212
- 3. Include the reasoning naturally within the statement
213
- 4. Provide a confidence score (0.0 to 1.0)
214
-
215
- CORRECT Examples (✓ FIRST-PERSON):
216
- - "I think Alice is more reliable because she consistently delivers on time and writes clean code"
217
- - "Previously I thought all engineers were equal, but now I feel that experience and track record really matter"
218
- - "I believe reliability is best measured by consistent output over time"
219
- - "I've come to believe that track records are more important than potential"
220
-
221
- WRONG Examples (✗ THIRD-PERSON - DO NOT USE):
222
- - "The speaker thinks Alice is more reliable"
223
- - "They believe reliability matters"
224
- - "It is believed that Alice is better"
225
-
226
- If no genuine opinions are expressed (e.g., the response just says "I don't know"), return an empty list."""
227
-
228
- try:
229
- result = await llm_config.call(
230
- messages=[
231
- {
232
- "role": "system",
233
- "content": "You are converting opinions from text into first-person statements. Always use 'I think', 'I believe', 'I feel', etc. NEVER use third-person like 'The speaker' or 'They'.",
234
- },
235
- {"role": "user", "content": extraction_prompt},
236
- ],
237
- response_format=OpinionExtractionResponse,
238
- scope="memory_extract_opinion",
239
- )
240
-
241
- # Format opinions with confidence score and convert to first-person
242
- formatted_opinions = []
243
- for op in result.opinions:
244
- # Convert third-person to first-person if needed
245
- opinion_text = op.opinion
246
-
247
- # Replace common third-person patterns with first-person
248
- def singularize_verb(verb):
249
- if verb.endswith("es"):
250
- return verb[:-1] # believes -> believe
251
- elif verb.endswith("s"):
252
- return verb[:-1] # thinks -> think
253
- return verb
254
-
255
- # Pattern: "The speaker/user [verb]..." -> "I [verb]..."
256
- match = re.match(
257
- r"^(The speaker|The user|They|It is believed) (believes?|thinks?|feels?|says|asserts?|considers?)(\s+that)?(.*)$",
258
- opinion_text,
259
- re.IGNORECASE,
260
- )
261
- if match:
262
- verb = singularize_verb(match.group(2))
263
- that_part = match.group(3) or "" # Keep " that" if present
264
- rest = match.group(4)
265
- opinion_text = f"I {verb}{that_part}{rest}"
266
-
267
- # If still doesn't start with first-person, prepend "I believe that "
268
- first_person_starters = [
269
- "I think",
270
- "I believe",
271
- "I feel",
272
- "In my view",
273
- "I've come to believe",
274
- "Previously I",
275
- ]
276
- if not any(opinion_text.startswith(starter) for starter in first_person_starters):
277
- opinion_text = "I believe that " + opinion_text[0].lower() + opinion_text[1:]
278
-
279
- formatted_opinions.append(Opinion(opinion=opinion_text, confidence=op.confidence))
280
-
281
- return formatted_opinions
282
-
283
- except Exception as e:
284
- logger.warning(f"Failed to extract opinions: {str(e)}")
285
- return []
286
-
287
-
288
194
  async def reflect(
289
195
  llm_config,
290
196
  query: str,
291
197
  experience_facts: list[str] = None,
292
198
  world_facts: list[str] = None,
293
- opinion_facts: list[str] = None,
294
199
  name: str = "Assistant",
295
200
  disposition: DispositionTraits = None,
296
201
  background: str = "",
@@ -307,7 +212,6 @@ async def reflect(
307
212
  query: Question to answer
308
213
  experience_facts: List of experience/agent fact strings
309
214
  world_facts: List of world fact strings
310
- opinion_facts: List of opinion fact strings
311
215
  name: Name of the agent/persona
312
216
  disposition: Disposition traits (defaults to neutral)
313
217
  background: Background information
@@ -328,18 +232,15 @@ async def reflect(
328
232
 
329
233
  agent_results = to_memory_facts(experience_facts or [], "experience")
330
234
  world_results = to_memory_facts(world_facts or [], "world")
331
- opinion_results = to_memory_facts(opinion_facts or [], "opinion")
332
235
 
333
236
  # Format facts for prompt
334
237
  agent_facts_text = format_facts_for_prompt(agent_results)
335
238
  world_facts_text = format_facts_for_prompt(world_results)
336
- opinion_facts_text = format_facts_for_prompt(opinion_results)
337
239
 
338
240
  # Build prompt
339
241
  prompt = build_think_prompt(
340
242
  agent_facts_text=agent_facts_text,
341
243
  world_facts_text=world_facts_text,
342
- opinion_facts_text=opinion_facts_text,
343
244
  query=query,
344
245
  name=name,
345
246
  disposition=disposition,
@@ -85,7 +85,6 @@ class NodeVisit(BaseModel):
85
85
  text: str = Field(description="Memory unit text content")
86
86
  context: str = Field(description="Memory unit context")
87
87
  event_date: datetime | None = Field(default=None, description="When the memory occurred")
88
- access_count: int = Field(description="Number of times accessed before this search")
89
88
 
90
89
  # How this node was reached
91
90
  is_entry_point: bool = Field(description="Whether this is an entry point")
@@ -136,7 +136,6 @@ class SearchTracer:
136
136
  text: str,
137
137
  context: str,
138
138
  event_date: datetime | None,
139
- access_count: int,
140
139
  is_entry_point: bool,
141
140
  parent_node_id: str | None,
142
141
  link_type: Literal["temporal", "semantic", "entity"] | None,
@@ -155,7 +154,6 @@ class SearchTracer:
155
154
  text: Memory unit text
156
155
  context: Memory unit context
157
156
  event_date: When the memory occurred
158
- access_count: Access count before this search
159
157
  is_entry_point: Whether this is an entry point
160
158
  parent_node_id: Node that led here (None for entry points)
161
159
  link_type: Type of link from parent
@@ -194,7 +192,6 @@ class SearchTracer:
194
192
  text=text,
195
193
  context=context,
196
194
  event_date=event_date,
197
- access_count=access_count,
198
195
  is_entry_point=is_entry_point,
199
196
  parent_node_id=parent_node_id,
200
197
  link_type=link_type,
@@ -333,8 +330,8 @@ class SearchTracer:
333
330
  RetrievalResult(
334
331
  rank=rank,
335
332
  node_id=doc_id,
336
- text=data.get("text", ""),
337
- context=data.get("context", ""),
333
+ text=data.get("text") or "",
334
+ context=data.get("context") or "",
338
335
  event_date=data.get("event_date"),
339
336
  fact_type=data.get("fact_type") or fact_type,
340
337
  score=score,
@@ -46,7 +46,6 @@ class RetrievalResult:
46
46
  mentioned_at: datetime | None = None
47
47
  document_id: str | None = None
48
48
  chunk_id: str | None = None
49
- access_count: int = 0
50
49
  embedding: list[float] | None = None
51
50
  tags: list[str] | None = None # Visibility scope tags
52
51
 
@@ -71,7 +70,6 @@ class RetrievalResult:
71
70
  mentioned_at=row.get("mentioned_at"),
72
71
  document_id=row.get("document_id"),
73
72
  chunk_id=row.get("chunk_id"),
74
- access_count=row.get("access_count", 0),
75
73
  embedding=row.get("embedding"),
76
74
  tags=row.get("tags"),
77
75
  similarity=row.get("similarity"),
@@ -156,7 +154,6 @@ class ScoredResult:
156
154
  "mentioned_at": self.retrieval.mentioned_at,
157
155
  "document_id": self.retrieval.document_id,
158
156
  "chunk_id": self.retrieval.chunk_id,
159
- "access_count": self.retrieval.access_count,
160
157
  "embedding": self.retrieval.embedding,
161
158
  "tags": self.retrieval.tags,
162
159
  "semantic_similarity": self.retrieval.similarity,