mcal-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcal/__init__.py +165 -0
- mcal/backends/__init__.py +42 -0
- mcal/backends/base.py +383 -0
- mcal/baselines/__init__.py +1 -0
- mcal/core/__init__.py +101 -0
- mcal/core/embeddings.py +266 -0
- mcal/core/extraction_cache.py +398 -0
- mcal/core/goal_retriever.py +539 -0
- mcal/core/intent_tracker.py +734 -0
- mcal/core/models.py +445 -0
- mcal/core/rate_limiter.py +372 -0
- mcal/core/reasoning_store.py +1061 -0
- mcal/core/retry.py +188 -0
- mcal/core/storage.py +456 -0
- mcal/core/streaming.py +254 -0
- mcal/core/unified_extractor.py +1466 -0
- mcal/core/vector_index.py +206 -0
- mcal/evaluation/__init__.py +1 -0
- mcal/integrations/__init__.py +88 -0
- mcal/integrations/autogen.py +95 -0
- mcal/integrations/crewai.py +92 -0
- mcal/integrations/langchain.py +112 -0
- mcal/integrations/langgraph.py +50 -0
- mcal/mcal.py +1697 -0
- mcal/providers/bedrock.py +217 -0
- mcal/storage/__init__.py +1 -0
- mcal_ai-0.1.0.dist-info/METADATA +319 -0
- mcal_ai-0.1.0.dist-info/RECORD +32 -0
- mcal_ai-0.1.0.dist-info/WHEEL +5 -0
- mcal_ai-0.1.0.dist-info/entry_points.txt +2 -0
- mcal_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
- mcal_ai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,539 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Goal-Aware Retriever
|
|
3
|
+
|
|
4
|
+
Retrieves memories based on goal relevance, not just semantic similarity.
|
|
5
|
+
This is Pillar 3 of MCAL: Predictive/Goal-Aware Context Retrieval.
|
|
6
|
+
|
|
7
|
+
Key capabilities:
|
|
8
|
+
- Multi-factor relevance scoring (semantic + goal + decision + recency)
|
|
9
|
+
- Goal alignment scoring for memories
|
|
10
|
+
- Decision impact tracking
|
|
11
|
+
- Adaptive retrieval based on current objectives
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
from datetime import datetime, timedelta, timezone
|
|
18
|
+
from typing import Optional, Protocol
|
|
19
|
+
|
|
20
|
+
import numpy as np
|
|
21
|
+
|
|
22
|
+
from .models import (
|
|
23
|
+
DecisionTrail,
|
|
24
|
+
IntentNode,
|
|
25
|
+
Memory,
|
|
26
|
+
MemoryType,
|
|
27
|
+
RetrievalConfig,
|
|
28
|
+
RetrievalResult,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _utc_now() -> datetime:
|
|
33
|
+
"""Return current UTC time (timezone-aware)."""
|
|
34
|
+
return datetime.now(timezone.utc)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
logger = logging.getLogger(__name__)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# =============================================================================
|
|
41
|
+
# Embedding Client Protocol
|
|
42
|
+
# =============================================================================
|
|
43
|
+
|
|
44
|
+
class EmbeddingClient(Protocol):
|
|
45
|
+
"""Protocol for embedding client implementations."""
|
|
46
|
+
|
|
47
|
+
async def embed(self, text: str) -> list[float]:
|
|
48
|
+
"""Generate embedding for text."""
|
|
49
|
+
...
|
|
50
|
+
|
|
51
|
+
async def embed_batch(self, texts: list[str]) -> list[list[float]]:
|
|
52
|
+
"""Generate embeddings for multiple texts."""
|
|
53
|
+
...
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class LLMClient(Protocol):
|
|
57
|
+
"""Protocol for LLM client implementations."""
|
|
58
|
+
|
|
59
|
+
async def complete(self, prompt: str, system: Optional[str] = None) -> str:
|
|
60
|
+
"""Generate a completion for the given prompt."""
|
|
61
|
+
...
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# =============================================================================
|
|
65
|
+
# Prompts
|
|
66
|
+
# =============================================================================
|
|
67
|
+
|
|
68
|
+
GOAL_ALIGNMENT_SYSTEM = """You are an expert at assessing relevance between memories and goals.
|
|
69
|
+
|
|
70
|
+
Given a memory and a goal, rate how relevant the memory is to achieving that goal.
|
|
71
|
+
Consider:
|
|
72
|
+
- Direct relevance: Does the memory directly help with the goal?
|
|
73
|
+
- Indirect relevance: Does the memory provide context needed for the goal?
|
|
74
|
+
- Dependency: Is this memory required before the goal can be achieved?
|
|
75
|
+
|
|
76
|
+
Output a score from 0.0 to 1.0 and brief explanation."""
|
|
77
|
+
|
|
78
|
+
GOAL_ALIGNMENT_PROMPT = """Rate the relevance of this memory to this goal.
|
|
79
|
+
|
|
80
|
+
MEMORY: {memory}
|
|
81
|
+
|
|
82
|
+
GOAL: {goal}
|
|
83
|
+
|
|
84
|
+
Output as JSON:
|
|
85
|
+
{{
|
|
86
|
+
"score": 0.0-1.0,
|
|
87
|
+
"reason": "brief explanation"
|
|
88
|
+
}}
|
|
89
|
+
|
|
90
|
+
Output ONLY valid JSON."""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
# =============================================================================
|
|
94
|
+
# Goal-Aware Retriever
|
|
95
|
+
# =============================================================================
|
|
96
|
+
|
|
97
|
+
class GoalRetriever:
|
|
98
|
+
"""
|
|
99
|
+
Retrieves memories based on goal relevance, not just semantic similarity.
|
|
100
|
+
|
|
101
|
+
The key insight: traditional RAG asks "what's similar to this query?"
|
|
102
|
+
Goal-aware retrieval asks "what's needed to achieve this objective?"
|
|
103
|
+
|
|
104
|
+
Scoring formula:
|
|
105
|
+
Score = α × semantic_similarity
|
|
106
|
+
+ β × goal_alignment
|
|
107
|
+
+ γ × recency_decay
|
|
108
|
+
+ δ × reference_frequency
|
|
109
|
+
+ ε × decision_impact
|
|
110
|
+
+ ζ × user_importance
|
|
111
|
+
|
|
112
|
+
Usage:
|
|
113
|
+
retriever = GoalRetriever(embedding_client, llm_client)
|
|
114
|
+
|
|
115
|
+
# Add memories
|
|
116
|
+
retriever.add_memory(memory)
|
|
117
|
+
|
|
118
|
+
# Retrieve with goal awareness
|
|
119
|
+
results = await retriever.retrieve(
|
|
120
|
+
query="What should we focus on next?",
|
|
121
|
+
active_goals=current_goals,
|
|
122
|
+
decision_trail=recent_decisions
|
|
123
|
+
)
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
def __init__(
|
|
127
|
+
self,
|
|
128
|
+
embedding_client: EmbeddingClient,
|
|
129
|
+
llm_client: Optional[LLMClient] = None,
|
|
130
|
+
config: Optional[RetrievalConfig] = None
|
|
131
|
+
):
|
|
132
|
+
"""
|
|
133
|
+
Initialize the retriever.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
embedding_client: Client for generating embeddings
|
|
137
|
+
llm_client: Optional LLM client for goal alignment scoring
|
|
138
|
+
config: Retrieval configuration
|
|
139
|
+
"""
|
|
140
|
+
self.embedding_client = embedding_client
|
|
141
|
+
self.llm_client = llm_client
|
|
142
|
+
self.config = config or RetrievalConfig()
|
|
143
|
+
|
|
144
|
+
# Memory storage
|
|
145
|
+
self.memories: dict[str, Memory] = {}
|
|
146
|
+
self._embeddings: dict[str, np.ndarray] = {}
|
|
147
|
+
|
|
148
|
+
# Indices for fast lookup
|
|
149
|
+
self._decision_memories: dict[str, list[str]] = {} # decision_id -> memory_ids
|
|
150
|
+
self._goal_memories: dict[str, list[str]] = {} # goal_id -> memory_ids
|
|
151
|
+
|
|
152
|
+
async def add_memory(self, memory: Memory) -> str:
|
|
153
|
+
"""
|
|
154
|
+
Add a memory to the store.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
memory: Memory to add
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
Memory ID
|
|
161
|
+
"""
|
|
162
|
+
# Generate embedding if not present
|
|
163
|
+
if memory.embedding is None:
|
|
164
|
+
embedding = await self.embedding_client.embed(memory.content)
|
|
165
|
+
memory.embedding = embedding
|
|
166
|
+
|
|
167
|
+
# Store memory and embedding
|
|
168
|
+
self.memories[memory.id] = memory
|
|
169
|
+
self._embeddings[memory.id] = np.array(memory.embedding)
|
|
170
|
+
|
|
171
|
+
# Update indices
|
|
172
|
+
if memory.decision_trail_id:
|
|
173
|
+
if memory.decision_trail_id not in self._decision_memories:
|
|
174
|
+
self._decision_memories[memory.decision_trail_id] = []
|
|
175
|
+
self._decision_memories[memory.decision_trail_id].append(memory.id)
|
|
176
|
+
|
|
177
|
+
if memory.intent_node_id:
|
|
178
|
+
if memory.intent_node_id not in self._goal_memories:
|
|
179
|
+
self._goal_memories[memory.intent_node_id] = []
|
|
180
|
+
self._goal_memories[memory.intent_node_id].append(memory.id)
|
|
181
|
+
|
|
182
|
+
return memory.id
|
|
183
|
+
|
|
184
|
+
async def retrieve(
|
|
185
|
+
self,
|
|
186
|
+
query: str,
|
|
187
|
+
active_goals: Optional[list[IntentNode]] = None,
|
|
188
|
+
decision_trail: Optional[list[DecisionTrail]] = None,
|
|
189
|
+
config: Optional[RetrievalConfig] = None
|
|
190
|
+
) -> list[RetrievalResult]:
|
|
191
|
+
"""
|
|
192
|
+
Retrieve memories using multi-factor relevance scoring.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
query: Query string
|
|
196
|
+
active_goals: Currently active goals for goal alignment
|
|
197
|
+
decision_trail: Recent decisions for decision impact scoring
|
|
198
|
+
config: Override default retrieval config
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
List of RetrievalResult sorted by relevance
|
|
202
|
+
"""
|
|
203
|
+
config = config or self.config
|
|
204
|
+
|
|
205
|
+
if not self.memories:
|
|
206
|
+
return []
|
|
207
|
+
|
|
208
|
+
# Get query embedding
|
|
209
|
+
query_embedding = np.array(await self.embedding_client.embed(query))
|
|
210
|
+
|
|
211
|
+
# Score all memories
|
|
212
|
+
results = []
|
|
213
|
+
for memory_id, memory in self.memories.items():
|
|
214
|
+
# Apply type filter
|
|
215
|
+
if config.memory_types and memory.type not in config.memory_types:
|
|
216
|
+
continue
|
|
217
|
+
|
|
218
|
+
# Calculate component scores
|
|
219
|
+
scores = await self._calculate_scores(
|
|
220
|
+
memory=memory,
|
|
221
|
+
query=query,
|
|
222
|
+
query_embedding=query_embedding,
|
|
223
|
+
active_goals=active_goals,
|
|
224
|
+
decision_trail=decision_trail,
|
|
225
|
+
config=config
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Calculate weighted total
|
|
229
|
+
total_score = (
|
|
230
|
+
config.semantic_weight * scores["semantic"] +
|
|
231
|
+
config.goal_alignment_weight * scores["goal_alignment"] +
|
|
232
|
+
config.recency_weight * scores["recency"] +
|
|
233
|
+
config.reference_weight * scores["reference"] +
|
|
234
|
+
config.decision_impact_weight * scores["decision_impact"]
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
# Add user importance bonus
|
|
238
|
+
if memory.user_marked:
|
|
239
|
+
total_score += 0.1 # Bonus for user-marked items
|
|
240
|
+
|
|
241
|
+
if total_score >= config.min_score:
|
|
242
|
+
results.append(RetrievalResult(
|
|
243
|
+
memory=memory,
|
|
244
|
+
score=total_score,
|
|
245
|
+
score_breakdown=scores
|
|
246
|
+
))
|
|
247
|
+
|
|
248
|
+
# Mark memory as accessed
|
|
249
|
+
memory.access()
|
|
250
|
+
|
|
251
|
+
# Sort by score and limit
|
|
252
|
+
results.sort(key=lambda r: r.score, reverse=True)
|
|
253
|
+
results = results[:config.max_results]
|
|
254
|
+
|
|
255
|
+
logger.info(f"Retrieved {len(results)} memories for query")
|
|
256
|
+
|
|
257
|
+
return results
|
|
258
|
+
|
|
259
|
+
async def retrieve_for_goal(
|
|
260
|
+
self,
|
|
261
|
+
goal: IntentNode,
|
|
262
|
+
config: Optional[RetrievalConfig] = None
|
|
263
|
+
) -> list[RetrievalResult]:
|
|
264
|
+
"""
|
|
265
|
+
Retrieve all memories relevant to a specific goal.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
goal: Goal to retrieve memories for
|
|
269
|
+
config: Retrieval configuration
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Memories relevant to the goal
|
|
273
|
+
"""
|
|
274
|
+
return await self.retrieve(
|
|
275
|
+
query=goal.content,
|
|
276
|
+
active_goals=[goal],
|
|
277
|
+
config=config
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
async def _calculate_scores(
|
|
281
|
+
self,
|
|
282
|
+
memory: Memory,
|
|
283
|
+
query: str,
|
|
284
|
+
query_embedding: np.ndarray,
|
|
285
|
+
active_goals: Optional[list[IntentNode]],
|
|
286
|
+
decision_trail: Optional[list[DecisionTrail]],
|
|
287
|
+
config: RetrievalConfig
|
|
288
|
+
) -> dict[str, float]:
|
|
289
|
+
"""Calculate all component scores for a memory."""
|
|
290
|
+
scores = {}
|
|
291
|
+
|
|
292
|
+
# 1. Semantic similarity (cosine similarity)
|
|
293
|
+
memory_embedding = self._embeddings.get(memory.id)
|
|
294
|
+
if memory_embedding is not None:
|
|
295
|
+
scores["semantic"] = self._cosine_similarity(query_embedding, memory_embedding)
|
|
296
|
+
else:
|
|
297
|
+
scores["semantic"] = 0.0
|
|
298
|
+
|
|
299
|
+
# 2. Goal alignment
|
|
300
|
+
if active_goals and self.llm_client:
|
|
301
|
+
scores["goal_alignment"] = await self._compute_goal_alignment(
|
|
302
|
+
memory, active_goals
|
|
303
|
+
)
|
|
304
|
+
else:
|
|
305
|
+
# Fast heuristic: check if memory is linked to any active goal
|
|
306
|
+
goal_ids = [g.id for g in (active_goals or [])]
|
|
307
|
+
if memory.intent_node_id and memory.intent_node_id in goal_ids:
|
|
308
|
+
scores["goal_alignment"] = 0.8
|
|
309
|
+
else:
|
|
310
|
+
scores["goal_alignment"] = scores["semantic"] * 0.5 # Fallback
|
|
311
|
+
|
|
312
|
+
# 3. Recency decay
|
|
313
|
+
scores["recency"] = self._compute_recency_score(memory)
|
|
314
|
+
|
|
315
|
+
# 4. Reference frequency
|
|
316
|
+
scores["reference"] = self._compute_reference_score(memory)
|
|
317
|
+
|
|
318
|
+
# 5. Decision impact
|
|
319
|
+
if decision_trail:
|
|
320
|
+
scores["decision_impact"] = self._compute_decision_impact(
|
|
321
|
+
memory, decision_trail
|
|
322
|
+
)
|
|
323
|
+
else:
|
|
324
|
+
scores["decision_impact"] = 0.0
|
|
325
|
+
|
|
326
|
+
return scores
|
|
327
|
+
|
|
328
|
+
async def _compute_goal_alignment(
|
|
329
|
+
self,
|
|
330
|
+
memory: Memory,
|
|
331
|
+
goals: list[IntentNode]
|
|
332
|
+
) -> float:
|
|
333
|
+
"""Compute goal alignment score using LLM."""
|
|
334
|
+
if not self.llm_client or not goals:
|
|
335
|
+
return 0.0
|
|
336
|
+
|
|
337
|
+
# For efficiency, only compute for top goal (could be extended)
|
|
338
|
+
top_goal = goals[0]
|
|
339
|
+
|
|
340
|
+
prompt = GOAL_ALIGNMENT_PROMPT.format(
|
|
341
|
+
memory=memory.content,
|
|
342
|
+
goal=top_goal.content
|
|
343
|
+
)
|
|
344
|
+
|
|
345
|
+
try:
|
|
346
|
+
response = await self.llm_client.complete(
|
|
347
|
+
prompt, system=GOAL_ALIGNMENT_SYSTEM
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
# Parse response
|
|
351
|
+
import json
|
|
352
|
+
response = response.strip()
|
|
353
|
+
if response.startswith("```"):
|
|
354
|
+
response = response.split("```")[1]
|
|
355
|
+
if response.startswith("json"):
|
|
356
|
+
response = response[4:]
|
|
357
|
+
|
|
358
|
+
data = json.loads(response)
|
|
359
|
+
return float(data.get("score", 0.0))
|
|
360
|
+
|
|
361
|
+
except Exception as e:
|
|
362
|
+
logger.warning(f"Failed to compute goal alignment: {e}")
|
|
363
|
+
return 0.0
|
|
364
|
+
|
|
365
|
+
def _compute_recency_score(self, memory: Memory) -> float:
|
|
366
|
+
"""
|
|
367
|
+
Compute recency score with decay.
|
|
368
|
+
|
|
369
|
+
Uses exponential decay: score = e^(-λt)
|
|
370
|
+
where t is days since last access and λ is decay rate
|
|
371
|
+
"""
|
|
372
|
+
days_old = (_utc_now() - memory.last_accessed).total_seconds() / 86400
|
|
373
|
+
decay_rate = 0.1 # Configurable
|
|
374
|
+
return np.exp(-decay_rate * days_old)
|
|
375
|
+
|
|
376
|
+
def _compute_reference_score(self, memory: Memory) -> float:
|
|
377
|
+
"""
|
|
378
|
+
Compute score based on reference frequency.
|
|
379
|
+
|
|
380
|
+
Uses log scaling to prevent high-frequency items from dominating.
|
|
381
|
+
"""
|
|
382
|
+
if memory.reference_count == 0:
|
|
383
|
+
return 0.0
|
|
384
|
+
return min(1.0, np.log1p(memory.reference_count) / 5.0)
|
|
385
|
+
|
|
386
|
+
def _compute_decision_impact(
|
|
387
|
+
self,
|
|
388
|
+
memory: Memory,
|
|
389
|
+
decisions: list[DecisionTrail]
|
|
390
|
+
) -> float:
|
|
391
|
+
"""
|
|
392
|
+
Compute how much this memory impacted decisions.
|
|
393
|
+
|
|
394
|
+
A memory that was cited as evidence in decisions is more valuable.
|
|
395
|
+
"""
|
|
396
|
+
impact_count = 0
|
|
397
|
+
for decision in decisions:
|
|
398
|
+
# Check if memory is in evidence
|
|
399
|
+
for evidence in decision.evidence:
|
|
400
|
+
if evidence.turn_id and memory.turn_id == evidence.turn_id:
|
|
401
|
+
impact_count += 1
|
|
402
|
+
break
|
|
403
|
+
|
|
404
|
+
if impact_count == 0:
|
|
405
|
+
return 0.0
|
|
406
|
+
|
|
407
|
+
return min(1.0, impact_count / len(decisions))
|
|
408
|
+
|
|
409
|
+
def _cosine_similarity(self, a: np.ndarray, b: np.ndarray) -> float:
|
|
410
|
+
"""Compute cosine similarity between two vectors."""
|
|
411
|
+
norm_a = np.linalg.norm(a)
|
|
412
|
+
norm_b = np.linalg.norm(b)
|
|
413
|
+
|
|
414
|
+
if norm_a == 0 or norm_b == 0:
|
|
415
|
+
return 0.0
|
|
416
|
+
|
|
417
|
+
return float(np.dot(a, b) / (norm_a * norm_b))
|
|
418
|
+
|
|
419
|
+
def get_memories_for_decision(self, decision_id: str) -> list[Memory]:
|
|
420
|
+
"""Get all memories linked to a decision."""
|
|
421
|
+
memory_ids = self._decision_memories.get(decision_id, [])
|
|
422
|
+
return [self.memories[mid] for mid in memory_ids if mid in self.memories]
|
|
423
|
+
|
|
424
|
+
def get_memories_for_goal(self, goal_id: str) -> list[Memory]:
|
|
425
|
+
"""Get all memories linked to a goal."""
|
|
426
|
+
memory_ids = self._goal_memories.get(goal_id, [])
|
|
427
|
+
return [self.memories[mid] for mid in memory_ids if mid in self.memories]
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
# =============================================================================
|
|
431
|
+
# Context Assembler
|
|
432
|
+
# =============================================================================
|
|
433
|
+
|
|
434
|
+
class ContextAssembler:
|
|
435
|
+
"""
|
|
436
|
+
Assembles context from retrieved memories for LLM input.
|
|
437
|
+
|
|
438
|
+
Handles:
|
|
439
|
+
- Token budget management
|
|
440
|
+
- Priority ordering
|
|
441
|
+
- Format optimization
|
|
442
|
+
- Deduplication
|
|
443
|
+
"""
|
|
444
|
+
|
|
445
|
+
def __init__(self, max_tokens: int = 4000):
|
|
446
|
+
"""
|
|
447
|
+
Initialize the context assembler.
|
|
448
|
+
|
|
449
|
+
Args:
|
|
450
|
+
max_tokens: Maximum tokens for assembled context
|
|
451
|
+
"""
|
|
452
|
+
self.max_tokens = max_tokens
|
|
453
|
+
|
|
454
|
+
def assemble(
|
|
455
|
+
self,
|
|
456
|
+
retrieved: list[RetrievalResult],
|
|
457
|
+
active_goals: Optional[list[IntentNode]] = None,
|
|
458
|
+
decisions: Optional[list[DecisionTrail]] = None,
|
|
459
|
+
include_goals: bool = True,
|
|
460
|
+
include_decisions: bool = True
|
|
461
|
+
) -> str:
|
|
462
|
+
"""
|
|
463
|
+
Assemble context from retrieved memories and structured data.
|
|
464
|
+
|
|
465
|
+
Args:
|
|
466
|
+
retrieved: Retrieved memory results
|
|
467
|
+
active_goals: Active goals to include
|
|
468
|
+
decisions: Relevant decisions to include
|
|
469
|
+
include_goals: Whether to include goal summary
|
|
470
|
+
include_decisions: Whether to include decision summary
|
|
471
|
+
|
|
472
|
+
Returns:
|
|
473
|
+
Assembled context string
|
|
474
|
+
"""
|
|
475
|
+
sections = []
|
|
476
|
+
|
|
477
|
+
# 1. Active goals section
|
|
478
|
+
if include_goals and active_goals:
|
|
479
|
+
goal_section = self._format_goals(active_goals)
|
|
480
|
+
sections.append(("ACTIVE GOALS", goal_section))
|
|
481
|
+
|
|
482
|
+
# 2. Relevant decisions section
|
|
483
|
+
if include_decisions and decisions:
|
|
484
|
+
decision_section = self._format_decisions(decisions)
|
|
485
|
+
sections.append(("KEY DECISIONS", decision_section))
|
|
486
|
+
|
|
487
|
+
# 3. Retrieved memories section
|
|
488
|
+
if retrieved:
|
|
489
|
+
memory_section = self._format_memories(retrieved)
|
|
490
|
+
sections.append(("RELEVANT CONTEXT", memory_section))
|
|
491
|
+
|
|
492
|
+
# Assemble with headers
|
|
493
|
+
output = []
|
|
494
|
+
for header, content in sections:
|
|
495
|
+
output.append(f"### {header} ###")
|
|
496
|
+
output.append(content)
|
|
497
|
+
output.append("")
|
|
498
|
+
|
|
499
|
+
return "\n".join(output)
|
|
500
|
+
|
|
501
|
+
def _format_goals(self, goals: list[IntentNode]) -> str:
|
|
502
|
+
"""Format goals for context."""
|
|
503
|
+
lines = []
|
|
504
|
+
for goal in goals:
|
|
505
|
+
status_marker = {
|
|
506
|
+
"active": "🔵",
|
|
507
|
+
"completed": "✅",
|
|
508
|
+
"pending": "⏳"
|
|
509
|
+
}.get(goal.status.value, "")
|
|
510
|
+
|
|
511
|
+
lines.append(f"- {status_marker} [{goal.type.value}] {goal.content}")
|
|
512
|
+
|
|
513
|
+
return "\n".join(lines)
|
|
514
|
+
|
|
515
|
+
def _format_decisions(self, decisions: list[DecisionTrail]) -> str:
|
|
516
|
+
"""Format decisions for context."""
|
|
517
|
+
lines = []
|
|
518
|
+
for decision in decisions[:5]: # Limit to top 5
|
|
519
|
+
lines.append(f"DECISION: {decision.decision}")
|
|
520
|
+
lines.append(f" Rationale: {decision.rationale}")
|
|
521
|
+
if decision.alternatives:
|
|
522
|
+
alt_names = [a.option for a in decision.alternatives[:3]]
|
|
523
|
+
lines.append(f" Alternatives considered: {', '.join(alt_names)}")
|
|
524
|
+
lines.append("")
|
|
525
|
+
|
|
526
|
+
return "\n".join(lines)
|
|
527
|
+
|
|
528
|
+
def _format_memories(self, results: list[RetrievalResult]) -> str:
|
|
529
|
+
"""Format retrieved memories for context."""
|
|
530
|
+
lines = []
|
|
531
|
+
for result in results:
|
|
532
|
+
memory = result.memory
|
|
533
|
+
score = result.score
|
|
534
|
+
|
|
535
|
+
lines.append(f"[{memory.type.value}] (relevance: {score:.2f})")
|
|
536
|
+
lines.append(f" {memory.content}")
|
|
537
|
+
lines.append("")
|
|
538
|
+
|
|
539
|
+
return "\n".join(lines)
|