hindsight-api 0.2.1__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hindsight_api/admin/__init__.py +1 -0
- hindsight_api/admin/cli.py +311 -0
- hindsight_api/alembic/versions/f1a2b3c4d5e6_add_memory_links_composite_index.py +44 -0
- hindsight_api/alembic/versions/g2a3b4c5d6e7_add_tags_column.py +48 -0
- hindsight_api/alembic/versions/h3c4d5e6f7g8_mental_models_v4.py +112 -0
- hindsight_api/alembic/versions/i4d5e6f7g8h9_delete_opinions.py +41 -0
- hindsight_api/alembic/versions/j5e6f7g8h9i0_mental_model_versions.py +95 -0
- hindsight_api/alembic/versions/k6f7g8h9i0j1_add_directive_subtype.py +58 -0
- hindsight_api/alembic/versions/l7g8h9i0j1k2_add_worker_columns.py +109 -0
- hindsight_api/alembic/versions/m8h9i0j1k2l3_mental_model_id_to_text.py +41 -0
- hindsight_api/alembic/versions/n9i0j1k2l3m4_learnings_and_pinned_reflections.py +134 -0
- hindsight_api/alembic/versions/o0j1k2l3m4n5_migrate_mental_models_data.py +113 -0
- hindsight_api/alembic/versions/p1k2l3m4n5o6_new_knowledge_architecture.py +194 -0
- hindsight_api/alembic/versions/q2l3m4n5o6p7_fix_mental_model_fact_type.py +50 -0
- hindsight_api/alembic/versions/r3m4n5o6p7q8_add_reflect_response_to_reflections.py +47 -0
- hindsight_api/alembic/versions/s4n5o6p7q8r9_add_consolidated_at_to_memory_units.py +53 -0
- hindsight_api/alembic/versions/t5o6p7q8r9s0_rename_mental_models_to_observations.py +134 -0
- hindsight_api/alembic/versions/u6p7q8r9s0t1_mental_models_text_id.py +41 -0
- hindsight_api/alembic/versions/v7q8r9s0t1u2_add_max_tokens_to_mental_models.py +50 -0
- hindsight_api/api/http.py +1406 -118
- hindsight_api/api/mcp.py +11 -196
- hindsight_api/config.py +359 -27
- hindsight_api/engine/consolidation/__init__.py +5 -0
- hindsight_api/engine/consolidation/consolidator.py +859 -0
- hindsight_api/engine/consolidation/prompts.py +69 -0
- hindsight_api/engine/cross_encoder.py +706 -88
- hindsight_api/engine/db_budget.py +284 -0
- hindsight_api/engine/db_utils.py +11 -0
- hindsight_api/engine/directives/__init__.py +5 -0
- hindsight_api/engine/directives/models.py +37 -0
- hindsight_api/engine/embeddings.py +553 -29
- hindsight_api/engine/entity_resolver.py +8 -5
- hindsight_api/engine/interface.py +40 -17
- hindsight_api/engine/llm_wrapper.py +744 -68
- hindsight_api/engine/memory_engine.py +2505 -1017
- hindsight_api/engine/mental_models/__init__.py +14 -0
- hindsight_api/engine/mental_models/models.py +53 -0
- hindsight_api/engine/query_analyzer.py +4 -3
- hindsight_api/engine/reflect/__init__.py +18 -0
- hindsight_api/engine/reflect/agent.py +933 -0
- hindsight_api/engine/reflect/models.py +109 -0
- hindsight_api/engine/reflect/observations.py +186 -0
- hindsight_api/engine/reflect/prompts.py +483 -0
- hindsight_api/engine/reflect/tools.py +437 -0
- hindsight_api/engine/reflect/tools_schema.py +250 -0
- hindsight_api/engine/response_models.py +168 -4
- hindsight_api/engine/retain/bank_utils.py +79 -201
- hindsight_api/engine/retain/fact_extraction.py +424 -195
- hindsight_api/engine/retain/fact_storage.py +35 -12
- hindsight_api/engine/retain/link_utils.py +29 -24
- hindsight_api/engine/retain/orchestrator.py +24 -43
- hindsight_api/engine/retain/types.py +11 -2
- hindsight_api/engine/search/graph_retrieval.py +43 -14
- hindsight_api/engine/search/link_expansion_retrieval.py +391 -0
- hindsight_api/engine/search/mpfp_retrieval.py +362 -117
- hindsight_api/engine/search/reranking.py +2 -2
- hindsight_api/engine/search/retrieval.py +848 -201
- hindsight_api/engine/search/tags.py +172 -0
- hindsight_api/engine/search/think_utils.py +42 -141
- hindsight_api/engine/search/trace.py +12 -1
- hindsight_api/engine/search/tracer.py +26 -6
- hindsight_api/engine/search/types.py +21 -3
- hindsight_api/engine/task_backend.py +113 -106
- hindsight_api/engine/utils.py +1 -152
- hindsight_api/extensions/__init__.py +10 -1
- hindsight_api/extensions/builtin/tenant.py +5 -1
- hindsight_api/extensions/context.py +10 -1
- hindsight_api/extensions/operation_validator.py +81 -4
- hindsight_api/extensions/tenant.py +26 -0
- hindsight_api/main.py +69 -6
- hindsight_api/mcp_local.py +12 -53
- hindsight_api/mcp_tools.py +494 -0
- hindsight_api/metrics.py +433 -48
- hindsight_api/migrations.py +141 -1
- hindsight_api/models.py +3 -3
- hindsight_api/pg0.py +53 -0
- hindsight_api/server.py +39 -2
- hindsight_api/worker/__init__.py +11 -0
- hindsight_api/worker/main.py +296 -0
- hindsight_api/worker/poller.py +486 -0
- {hindsight_api-0.2.1.dist-info → hindsight_api-0.4.0.dist-info}/METADATA +16 -6
- hindsight_api-0.4.0.dist-info/RECORD +112 -0
- {hindsight_api-0.2.1.dist-info → hindsight_api-0.4.0.dist-info}/entry_points.txt +2 -0
- hindsight_api/engine/retain/observation_regeneration.py +0 -254
- hindsight_api/engine/search/observation_utils.py +0 -125
- hindsight_api/engine/search/scoring.py +0 -159
- hindsight_api-0.2.1.dist-info/RECORD +0 -75
- {hindsight_api-0.2.1.dist-info → hindsight_api-0.4.0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,391 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Link Expansion graph retrieval.
|
|
3
|
+
|
|
4
|
+
A simple, fast graph retrieval that expands from seeds via:
|
|
5
|
+
1. Entity links: Find facts sharing entities with seeds (filtered by entity frequency)
|
|
6
|
+
2. Causal links: Find facts causally linked to seeds (top-k by weight)
|
|
7
|
+
|
|
8
|
+
Characteristics:
|
|
9
|
+
- 2-3 DB queries (seed finding + parallel entity/causal expansion)
|
|
10
|
+
- Sublinear: only touches connected facts via indexes
|
|
11
|
+
- No iteration, no propagation, no normalization
|
|
12
|
+
- Target: <100ms
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import time
|
|
17
|
+
|
|
18
|
+
from ..db_utils import acquire_with_retry
|
|
19
|
+
from ..memory_engine import fq_table
|
|
20
|
+
from .graph_retrieval import GraphRetriever
|
|
21
|
+
from .tags import TagsMatch, filter_results_by_tags
|
|
22
|
+
from .types import MPFPTimings, RetrievalResult
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
async def _find_semantic_seeds(
|
|
28
|
+
conn,
|
|
29
|
+
query_embedding_str: str,
|
|
30
|
+
bank_id: str,
|
|
31
|
+
fact_type: str,
|
|
32
|
+
limit: int = 20,
|
|
33
|
+
threshold: float = 0.3,
|
|
34
|
+
tags: list[str] | None = None,
|
|
35
|
+
tags_match: TagsMatch = "any",
|
|
36
|
+
) -> list[RetrievalResult]:
|
|
37
|
+
"""Find semantic seeds via embedding search."""
|
|
38
|
+
from .tags import build_tags_where_clause_simple
|
|
39
|
+
|
|
40
|
+
tags_clause = build_tags_where_clause_simple(tags, 6, match=tags_match)
|
|
41
|
+
params = [query_embedding_str, bank_id, fact_type, threshold, limit]
|
|
42
|
+
if tags:
|
|
43
|
+
params.append(tags)
|
|
44
|
+
|
|
45
|
+
rows = await conn.fetch(
|
|
46
|
+
f"""
|
|
47
|
+
SELECT id, text, context, event_date, occurred_start, occurred_end,
|
|
48
|
+
mentioned_at, embedding, fact_type, document_id, chunk_id, tags,
|
|
49
|
+
1 - (embedding <=> $1::vector) AS similarity
|
|
50
|
+
FROM {fq_table("memory_units")}
|
|
51
|
+
WHERE bank_id = $2
|
|
52
|
+
AND embedding IS NOT NULL
|
|
53
|
+
AND fact_type = $3
|
|
54
|
+
AND (1 - (embedding <=> $1::vector)) >= $4
|
|
55
|
+
{tags_clause}
|
|
56
|
+
ORDER BY embedding <=> $1::vector
|
|
57
|
+
LIMIT $5
|
|
58
|
+
""",
|
|
59
|
+
*params,
|
|
60
|
+
)
|
|
61
|
+
return [RetrievalResult.from_db_row(dict(r)) for r in rows]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class LinkExpansionRetriever(GraphRetriever):
|
|
65
|
+
"""
|
|
66
|
+
Graph retrieval via direct link expansion from seeds.
|
|
67
|
+
|
|
68
|
+
Expands through entity co-occurrence and causal links in a single query.
|
|
69
|
+
Fast and simple alternative to MPFP.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
max_entity_frequency: int = 500,
|
|
75
|
+
causal_weight_threshold: float = 0.3,
|
|
76
|
+
causal_limit_per_seed: int = 10,
|
|
77
|
+
):
|
|
78
|
+
"""
|
|
79
|
+
Initialize link expansion retriever.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
max_entity_frequency: Skip entities appearing in more than this many facts
|
|
83
|
+
causal_weight_threshold: Minimum weight for causal links
|
|
84
|
+
causal_limit_per_seed: Max causal links to follow per seed
|
|
85
|
+
"""
|
|
86
|
+
self.max_entity_frequency = max_entity_frequency
|
|
87
|
+
self.causal_weight_threshold = causal_weight_threshold
|
|
88
|
+
self.causal_limit_per_seed = causal_limit_per_seed
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def name(self) -> str:
|
|
92
|
+
return "link_expansion"
|
|
93
|
+
|
|
94
|
+
async def retrieve(
|
|
95
|
+
self,
|
|
96
|
+
pool,
|
|
97
|
+
query_embedding_str: str,
|
|
98
|
+
bank_id: str,
|
|
99
|
+
fact_type: str,
|
|
100
|
+
budget: int,
|
|
101
|
+
query_text: str | None = None,
|
|
102
|
+
semantic_seeds: list[RetrievalResult] | None = None,
|
|
103
|
+
temporal_seeds: list[RetrievalResult] | None = None,
|
|
104
|
+
adjacency=None,
|
|
105
|
+
tags: list[str] | None = None,
|
|
106
|
+
tags_match: TagsMatch = "any",
|
|
107
|
+
) -> tuple[list[RetrievalResult], MPFPTimings | None]:
|
|
108
|
+
"""
|
|
109
|
+
Retrieve facts by expanding links from seeds.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
pool: Database connection pool
|
|
113
|
+
query_embedding_str: Query embedding (unused, kept for interface)
|
|
114
|
+
bank_id: Memory bank ID
|
|
115
|
+
fact_type: Fact type to filter
|
|
116
|
+
budget: Maximum results to return
|
|
117
|
+
query_text: Original query text (unused)
|
|
118
|
+
semantic_seeds: Pre-computed semantic entry points
|
|
119
|
+
temporal_seeds: Pre-computed temporal entry points
|
|
120
|
+
adjacency: Unused, kept for interface compatibility
|
|
121
|
+
tags: Optional list of tags for visibility filtering (OR matching)
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Tuple of (results, timings)
|
|
125
|
+
"""
|
|
126
|
+
start_time = time.time()
|
|
127
|
+
timings = MPFPTimings(fact_type=fact_type)
|
|
128
|
+
|
|
129
|
+
# Use single connection for all queries to reduce pool pressure
|
|
130
|
+
# (queries are fast ~50ms each, connection acquisition is the bottleneck)
|
|
131
|
+
async with acquire_with_retry(pool) as conn:
|
|
132
|
+
# Find seeds if not provided
|
|
133
|
+
if semantic_seeds:
|
|
134
|
+
all_seeds = list(semantic_seeds)
|
|
135
|
+
else:
|
|
136
|
+
seeds_start = time.time()
|
|
137
|
+
all_seeds = await _find_semantic_seeds(
|
|
138
|
+
conn,
|
|
139
|
+
query_embedding_str,
|
|
140
|
+
bank_id,
|
|
141
|
+
fact_type,
|
|
142
|
+
limit=20,
|
|
143
|
+
threshold=0.3,
|
|
144
|
+
tags=tags,
|
|
145
|
+
tags_match=tags_match,
|
|
146
|
+
)
|
|
147
|
+
timings.seeds_time = time.time() - seeds_start
|
|
148
|
+
logger.debug(
|
|
149
|
+
f"[LinkExpansion] Found {len(all_seeds)} semantic seeds for fact_type={fact_type} "
|
|
150
|
+
f"(tags={tags}, tags_match={tags_match})"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Add temporal seeds if provided
|
|
154
|
+
if temporal_seeds:
|
|
155
|
+
all_seeds.extend(temporal_seeds)
|
|
156
|
+
|
|
157
|
+
if not all_seeds:
|
|
158
|
+
return [], timings
|
|
159
|
+
|
|
160
|
+
seed_ids = list({s.id for s in all_seeds})
|
|
161
|
+
timings.pattern_count = len(seed_ids)
|
|
162
|
+
|
|
163
|
+
# Run entity and causal expansion sequentially on same connection
|
|
164
|
+
query_start = time.time()
|
|
165
|
+
|
|
166
|
+
# For observations, traverse through source_memory_ids to find entity connections.
|
|
167
|
+
# Observations don't have direct unit_entities - they inherit entities via their
|
|
168
|
+
# source world/experience facts.
|
|
169
|
+
#
|
|
170
|
+
# Path: observation → source_memory_ids → world fact → entities →
|
|
171
|
+
# ALL world facts with those entities → their observations (excluding seeds)
|
|
172
|
+
if fact_type == "observation":
|
|
173
|
+
# Debug: Check what source_memory_ids exist on seed observations
|
|
174
|
+
debug_sources = await conn.fetch(
|
|
175
|
+
f"""
|
|
176
|
+
SELECT id, source_memory_ids
|
|
177
|
+
FROM {fq_table("memory_units")}
|
|
178
|
+
WHERE id = ANY($1::uuid[])
|
|
179
|
+
""",
|
|
180
|
+
seed_ids,
|
|
181
|
+
)
|
|
182
|
+
source_ids_found = []
|
|
183
|
+
for row in debug_sources:
|
|
184
|
+
if row["source_memory_ids"]:
|
|
185
|
+
source_ids_found.extend(row["source_memory_ids"])
|
|
186
|
+
logger.debug(
|
|
187
|
+
f"[LinkExpansion] observation graph: {len(seed_ids)} seeds, "
|
|
188
|
+
f"{len(source_ids_found)} source_memory_ids found"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
entity_rows = await conn.fetch(
|
|
192
|
+
f"""
|
|
193
|
+
WITH seed_sources AS (
|
|
194
|
+
-- Get source memory IDs from seed observations
|
|
195
|
+
SELECT DISTINCT unnest(source_memory_ids) AS source_id
|
|
196
|
+
FROM {fq_table("memory_units")}
|
|
197
|
+
WHERE id = ANY($1::uuid[])
|
|
198
|
+
AND source_memory_ids IS NOT NULL
|
|
199
|
+
),
|
|
200
|
+
source_entities AS (
|
|
201
|
+
-- Get entities from those source memories (filtered by frequency)
|
|
202
|
+
SELECT DISTINCT ue.entity_id
|
|
203
|
+
FROM seed_sources ss
|
|
204
|
+
JOIN {fq_table("unit_entities")} ue ON ss.source_id = ue.unit_id
|
|
205
|
+
JOIN {fq_table("entities")} e ON ue.entity_id = e.id
|
|
206
|
+
WHERE e.mention_count < $2
|
|
207
|
+
),
|
|
208
|
+
all_connected_sources AS (
|
|
209
|
+
-- Find ALL world facts sharing those entities (don't exclude seed sources)
|
|
210
|
+
-- The exclusion happens at the observation level, not the source level
|
|
211
|
+
SELECT DISTINCT other_ue.unit_id AS source_id
|
|
212
|
+
FROM source_entities se
|
|
213
|
+
JOIN {fq_table("unit_entities")} other_ue ON se.entity_id = other_ue.entity_id
|
|
214
|
+
)
|
|
215
|
+
-- Find observations derived from connected source memories
|
|
216
|
+
-- Only exclude the actual seed observations
|
|
217
|
+
SELECT
|
|
218
|
+
mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start,
|
|
219
|
+
mu.occurred_end, mu.mentioned_at, mu.embedding,
|
|
220
|
+
mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
|
|
221
|
+
COUNT(DISTINCT cs.source_id)::float AS score
|
|
222
|
+
FROM all_connected_sources cs
|
|
223
|
+
JOIN {fq_table("memory_units")} mu
|
|
224
|
+
ON mu.source_memory_ids @> ARRAY[cs.source_id]
|
|
225
|
+
WHERE mu.fact_type = 'observation'
|
|
226
|
+
AND mu.id != ALL($1::uuid[])
|
|
227
|
+
GROUP BY mu.id
|
|
228
|
+
ORDER BY score DESC
|
|
229
|
+
LIMIT $3
|
|
230
|
+
""",
|
|
231
|
+
seed_ids,
|
|
232
|
+
self.max_entity_frequency,
|
|
233
|
+
budget,
|
|
234
|
+
)
|
|
235
|
+
logger.debug(f"[LinkExpansion] observation graph: found {len(entity_rows)} connected observations")
|
|
236
|
+
else:
|
|
237
|
+
# For world/experience facts, use direct entity lookup
|
|
238
|
+
entity_rows = await conn.fetch(
|
|
239
|
+
f"""
|
|
240
|
+
SELECT
|
|
241
|
+
mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start,
|
|
242
|
+
mu.occurred_end, mu.mentioned_at, mu.embedding,
|
|
243
|
+
mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
|
|
244
|
+
COUNT(*)::float AS score
|
|
245
|
+
FROM {fq_table("unit_entities")} seed_ue
|
|
246
|
+
JOIN {fq_table("entities")} e ON seed_ue.entity_id = e.id
|
|
247
|
+
JOIN {fq_table("unit_entities")} other_ue ON seed_ue.entity_id = other_ue.entity_id
|
|
248
|
+
JOIN {fq_table("memory_units")} mu ON other_ue.unit_id = mu.id
|
|
249
|
+
WHERE seed_ue.unit_id = ANY($1::uuid[])
|
|
250
|
+
AND e.mention_count < $2
|
|
251
|
+
AND mu.id != ALL($1::uuid[])
|
|
252
|
+
AND mu.fact_type = $3
|
|
253
|
+
GROUP BY mu.id
|
|
254
|
+
ORDER BY score DESC
|
|
255
|
+
LIMIT $4
|
|
256
|
+
""",
|
|
257
|
+
seed_ids,
|
|
258
|
+
self.max_entity_frequency,
|
|
259
|
+
fact_type,
|
|
260
|
+
budget,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
causal_rows = await conn.fetch(
|
|
264
|
+
f"""
|
|
265
|
+
SELECT DISTINCT ON (mu.id)
|
|
266
|
+
mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start,
|
|
267
|
+
mu.occurred_end, mu.mentioned_at, mu.embedding,
|
|
268
|
+
mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
|
|
269
|
+
ml.weight + 1.0 AS score
|
|
270
|
+
FROM {fq_table("memory_links")} ml
|
|
271
|
+
JOIN {fq_table("memory_units")} mu ON ml.to_unit_id = mu.id
|
|
272
|
+
WHERE ml.from_unit_id = ANY($1::uuid[])
|
|
273
|
+
AND ml.link_type IN ('causes', 'caused_by', 'enables', 'prevents')
|
|
274
|
+
AND ml.weight >= $2
|
|
275
|
+
AND mu.fact_type = $3
|
|
276
|
+
ORDER BY mu.id, ml.weight DESC
|
|
277
|
+
LIMIT $4
|
|
278
|
+
""",
|
|
279
|
+
seed_ids,
|
|
280
|
+
self.causal_weight_threshold,
|
|
281
|
+
fact_type,
|
|
282
|
+
budget,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
# Fallback: semantic/temporal/entity links from memory_links table
|
|
286
|
+
# These are secondary to entity links (via unit_entities) and causal links
|
|
287
|
+
# Weight is halved (0.5x) to prioritize primary link types
|
|
288
|
+
# Check both directions: seeds -> others AND others -> seeds
|
|
289
|
+
fallback_rows = await conn.fetch(
|
|
290
|
+
f"""
|
|
291
|
+
WITH outgoing AS (
|
|
292
|
+
-- Links FROM seeds TO other facts
|
|
293
|
+
SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start,
|
|
294
|
+
mu.occurred_end, mu.mentioned_at, mu.embedding,
|
|
295
|
+
mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
|
|
296
|
+
ml.weight
|
|
297
|
+
FROM {fq_table("memory_links")} ml
|
|
298
|
+
JOIN {fq_table("memory_units")} mu ON ml.to_unit_id = mu.id
|
|
299
|
+
WHERE ml.from_unit_id = ANY($1::uuid[])
|
|
300
|
+
AND ml.link_type IN ('semantic', 'temporal', 'entity')
|
|
301
|
+
AND ml.weight >= $2
|
|
302
|
+
AND mu.fact_type = $3
|
|
303
|
+
AND mu.id != ALL($1::uuid[])
|
|
304
|
+
),
|
|
305
|
+
incoming AS (
|
|
306
|
+
-- Links FROM other facts TO seeds (reverse direction)
|
|
307
|
+
SELECT mu.id, mu.text, mu.context, mu.event_date, mu.occurred_start,
|
|
308
|
+
mu.occurred_end, mu.mentioned_at, mu.embedding,
|
|
309
|
+
mu.fact_type, mu.document_id, mu.chunk_id, mu.tags,
|
|
310
|
+
ml.weight
|
|
311
|
+
FROM {fq_table("memory_links")} ml
|
|
312
|
+
JOIN {fq_table("memory_units")} mu ON ml.from_unit_id = mu.id
|
|
313
|
+
WHERE ml.to_unit_id = ANY($1::uuid[])
|
|
314
|
+
AND ml.link_type IN ('semantic', 'temporal', 'entity')
|
|
315
|
+
AND ml.weight >= $2
|
|
316
|
+
AND mu.fact_type = $3
|
|
317
|
+
AND mu.id != ALL($1::uuid[])
|
|
318
|
+
),
|
|
319
|
+
combined AS (
|
|
320
|
+
SELECT * FROM outgoing
|
|
321
|
+
UNION ALL
|
|
322
|
+
SELECT * FROM incoming
|
|
323
|
+
)
|
|
324
|
+
SELECT DISTINCT ON (id)
|
|
325
|
+
id, text, context, event_date, occurred_start,
|
|
326
|
+
occurred_end, mentioned_at, embedding,
|
|
327
|
+
fact_type, document_id, chunk_id, tags,
|
|
328
|
+
(MAX(weight) * 0.5) AS score
|
|
329
|
+
FROM combined
|
|
330
|
+
GROUP BY id, text, context, event_date, occurred_start,
|
|
331
|
+
occurred_end, mentioned_at, embedding,
|
|
332
|
+
fact_type, document_id, chunk_id, tags
|
|
333
|
+
ORDER BY id, score DESC
|
|
334
|
+
LIMIT $4
|
|
335
|
+
""",
|
|
336
|
+
seed_ids,
|
|
337
|
+
self.causal_weight_threshold,
|
|
338
|
+
fact_type,
|
|
339
|
+
budget,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
timings.edge_load_time = time.time() - query_start
|
|
343
|
+
timings.db_queries = 3
|
|
344
|
+
timings.edge_count = len(entity_rows) + len(causal_rows) + len(fallback_rows)
|
|
345
|
+
|
|
346
|
+
# Merge results, taking max score per fact
|
|
347
|
+
# Priority: entity links (unit_entities) > causal links > fallback links
|
|
348
|
+
score_map: dict[str, float] = {}
|
|
349
|
+
row_map: dict[str, dict] = {}
|
|
350
|
+
|
|
351
|
+
for row in entity_rows:
|
|
352
|
+
fact_id = str(row["id"])
|
|
353
|
+
score_map[fact_id] = max(score_map.get(fact_id, 0), row["score"])
|
|
354
|
+
row_map[fact_id] = dict(row)
|
|
355
|
+
|
|
356
|
+
for row in causal_rows:
|
|
357
|
+
fact_id = str(row["id"])
|
|
358
|
+
score_map[fact_id] = max(score_map.get(fact_id, 0), row["score"])
|
|
359
|
+
if fact_id not in row_map:
|
|
360
|
+
row_map[fact_id] = dict(row)
|
|
361
|
+
|
|
362
|
+
for row in fallback_rows:
|
|
363
|
+
fact_id = str(row["id"])
|
|
364
|
+
score_map[fact_id] = max(score_map.get(fact_id, 0), row["score"])
|
|
365
|
+
if fact_id not in row_map:
|
|
366
|
+
row_map[fact_id] = dict(row)
|
|
367
|
+
|
|
368
|
+
# Sort by score and limit
|
|
369
|
+
sorted_ids = sorted(score_map.keys(), key=lambda x: score_map[x], reverse=True)[:budget]
|
|
370
|
+
rows = [row_map[fact_id] for fact_id in sorted_ids]
|
|
371
|
+
|
|
372
|
+
# Convert to results
|
|
373
|
+
results = []
|
|
374
|
+
for row in rows:
|
|
375
|
+
result = RetrievalResult.from_db_row(dict(row))
|
|
376
|
+
result.activation = row["score"]
|
|
377
|
+
results.append(result)
|
|
378
|
+
|
|
379
|
+
# Apply tags filtering (graph expansion may reach untagged memories)
|
|
380
|
+
if tags:
|
|
381
|
+
results = filter_results_by_tags(results, tags, match=tags_match)
|
|
382
|
+
|
|
383
|
+
timings.result_count = len(results)
|
|
384
|
+
timings.traverse = time.time() - start_time
|
|
385
|
+
|
|
386
|
+
logger.debug(
|
|
387
|
+
f"LinkExpansion: {len(results)} results from {len(seed_ids)} seeds "
|
|
388
|
+
f"in {timings.traverse * 1000:.1f}ms (query: {timings.edge_load_time * 1000:.1f}ms)"
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
return results, timings
|