mcal-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcal/__init__.py +165 -0
- mcal/backends/__init__.py +42 -0
- mcal/backends/base.py +383 -0
- mcal/baselines/__init__.py +1 -0
- mcal/core/__init__.py +101 -0
- mcal/core/embeddings.py +266 -0
- mcal/core/extraction_cache.py +398 -0
- mcal/core/goal_retriever.py +539 -0
- mcal/core/intent_tracker.py +734 -0
- mcal/core/models.py +445 -0
- mcal/core/rate_limiter.py +372 -0
- mcal/core/reasoning_store.py +1061 -0
- mcal/core/retry.py +188 -0
- mcal/core/storage.py +456 -0
- mcal/core/streaming.py +254 -0
- mcal/core/unified_extractor.py +1466 -0
- mcal/core/vector_index.py +206 -0
- mcal/evaluation/__init__.py +1 -0
- mcal/integrations/__init__.py +88 -0
- mcal/integrations/autogen.py +95 -0
- mcal/integrations/crewai.py +92 -0
- mcal/integrations/langchain.py +112 -0
- mcal/integrations/langgraph.py +50 -0
- mcal/mcal.py +1697 -0
- mcal/providers/bedrock.py +217 -0
- mcal/storage/__init__.py +1 -0
- mcal_ai-0.1.0.dist-info/METADATA +319 -0
- mcal_ai-0.1.0.dist-info/RECORD +32 -0
- mcal_ai-0.1.0.dist-info/WHEEL +5 -0
- mcal_ai-0.1.0.dist-info/entry_points.txt +2 -0
- mcal_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
- mcal_ai-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1061 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Reasoning Store
|
|
3
|
+
|
|
4
|
+
Extracts and persists decision trails with full reasoning context.
|
|
5
|
+
This is Pillar 2 of MCAL: Reasoning Chain Preservation.
|
|
6
|
+
|
|
7
|
+
Key capabilities:
|
|
8
|
+
- Extract decisions with rationale from conversations
|
|
9
|
+
- Store alternatives considered and why they were rejected
|
|
10
|
+
- Track evidence supporting decisions
|
|
11
|
+
- Link decisions to goals and other decisions
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
from typing import Optional, Protocol
|
|
19
|
+
|
|
20
|
+
from .models import (
|
|
21
|
+
Alternative,
|
|
22
|
+
DecisionTrail,
|
|
23
|
+
DecisionReconciliation,
|
|
24
|
+
Evidence,
|
|
25
|
+
EvidenceSource,
|
|
26
|
+
InvalidatedDecision,
|
|
27
|
+
ModifiedDecision,
|
|
28
|
+
NewDecision,
|
|
29
|
+
TradeOff,
|
|
30
|
+
Turn,
|
|
31
|
+
VerifiedDecision,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger(__name__)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# =============================================================================
|
|
38
|
+
# LLM Client Protocol
|
|
39
|
+
# =============================================================================
|
|
40
|
+
|
|
41
|
+
class LLMClient(Protocol):
|
|
42
|
+
"""Protocol for LLM client implementations."""
|
|
43
|
+
|
|
44
|
+
async def complete(self, prompt: str, system: Optional[str] = None) -> str:
|
|
45
|
+
"""Generate a completion for the given prompt."""
|
|
46
|
+
...
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
# =============================================================================
|
|
50
|
+
# Prompts
|
|
51
|
+
# =============================================================================
|
|
52
|
+
|
|
53
|
+
DECISION_EXTRACTION_SYSTEM = """You are an expert at analyzing conversations to extract decision-making patterns.
|
|
54
|
+
|
|
55
|
+
Your task is to identify decisions that were made and capture the FULL reasoning context:
|
|
56
|
+
- What was the decision?
|
|
57
|
+
- What alternatives were considered?
|
|
58
|
+
- Why was this option chosen over alternatives?
|
|
59
|
+
- What evidence supported the decision?
|
|
60
|
+
- What trade-offs were acknowledged?
|
|
61
|
+
|
|
62
|
+
This is crucial for preserving the "WHY" behind decisions, not just the "WHAT".
|
|
63
|
+
|
|
64
|
+
Output your analysis as valid JSON."""
|
|
65
|
+
|
|
66
|
+
DECISION_EXTRACTION_PROMPT = """Analyze this conversation and extract all decisions with their reasoning context.
|
|
67
|
+
|
|
68
|
+
CONVERSATION:
|
|
69
|
+
{conversation}
|
|
70
|
+
|
|
71
|
+
For each decision, extract:
|
|
72
|
+
1. The decision itself (what was chosen)
|
|
73
|
+
2. The context (what situation prompted this decision)
|
|
74
|
+
3. Alternatives that were considered (even if briefly)
|
|
75
|
+
4. The rationale (WHY this was chosen)
|
|
76
|
+
5. Evidence supporting the decision
|
|
77
|
+
6. Trade-offs acknowledged
|
|
78
|
+
|
|
79
|
+
Output as JSON:
|
|
80
|
+
{{
|
|
81
|
+
"decisions": [
|
|
82
|
+
{{
|
|
83
|
+
"decision": "The choice that was made",
|
|
84
|
+
"context": "The situation/problem being addressed",
|
|
85
|
+
"alternatives": [
|
|
86
|
+
{{
|
|
87
|
+
"option": "Alternative option",
|
|
88
|
+
"pros": ["advantage 1"],
|
|
89
|
+
"cons": ["disadvantage 1"],
|
|
90
|
+
"rejection_reason": "Why this wasn't chosen"
|
|
91
|
+
}}
|
|
92
|
+
],
|
|
93
|
+
"rationale": "Why the chosen option was selected",
|
|
94
|
+
"evidence": [
|
|
95
|
+
{{
|
|
96
|
+
"claim": "Supporting fact or argument",
|
|
97
|
+
"source": "user_stated|inferred|external",
|
|
98
|
+
"turn_reference": "turn_N or null"
|
|
99
|
+
}}
|
|
100
|
+
],
|
|
101
|
+
"trade_offs": [
|
|
102
|
+
{{
|
|
103
|
+
"gained": "What was gained",
|
|
104
|
+
"sacrificed": "What was given up",
|
|
105
|
+
"justification": "Why this trade-off is acceptable"
|
|
106
|
+
}}
|
|
107
|
+
],
|
|
108
|
+
"confidence": 0.0-1.0,
|
|
109
|
+
"related_goal": "Goal this decision serves (if identifiable)"
|
|
110
|
+
}}
|
|
111
|
+
]
|
|
112
|
+
}}
|
|
113
|
+
|
|
114
|
+
Be thorough - capture implicit decisions too, not just explicit ones.
|
|
115
|
+
Output ONLY valid JSON, no explanation."""
|
|
116
|
+
|
|
117
|
+
DECISION_EXTRACTION_WITH_FULL_CONTEXT_PROMPT = """Analyze this conversation and extract all NEW decisions with their reasoning context.
|
|
118
|
+
|
|
119
|
+
=== USER'S ACTIVE GOALS ===
|
|
120
|
+
{active_goals}
|
|
121
|
+
|
|
122
|
+
=== PREVIOUS DECISIONS (from earlier sessions) ===
|
|
123
|
+
{previous_decisions}
|
|
124
|
+
|
|
125
|
+
=== CURRENT SESSION CONVERSATION ===
|
|
126
|
+
{conversation}
|
|
127
|
+
|
|
128
|
+
IMPORTANT CONTEXT:
|
|
129
|
+
- This is a CONTINUATION of an ongoing multi-session conversation
|
|
130
|
+
- The user has established goals and made prior decisions (listed above)
|
|
131
|
+
- Look for NEW decisions in this session that:
|
|
132
|
+
* Advance toward the active goals
|
|
133
|
+
* Update, refine, or reverse previous decisions
|
|
134
|
+
* Represent choices between alternatives
|
|
135
|
+
* Include implicit decisions (tool/approach/timing choices)
|
|
136
|
+
|
|
137
|
+
For each NEW decision found in this session, extract:
|
|
138
|
+
1. The decision itself (what was chosen)
|
|
139
|
+
2. The context (what prompted this decision)
|
|
140
|
+
3. Alternatives considered (even briefly mentioned)
|
|
141
|
+
4. The rationale (WHY this was chosen)
|
|
142
|
+
5. Evidence supporting the decision
|
|
143
|
+
6. Trade-offs acknowledged
|
|
144
|
+
7. Which goal this serves
|
|
145
|
+
8. Related previous decision (if any)
|
|
146
|
+
|
|
147
|
+
Output as JSON:
|
|
148
|
+
{{
|
|
149
|
+
"decisions": [
|
|
150
|
+
{{
|
|
151
|
+
"decision": "The choice that was made",
|
|
152
|
+
"context": "The situation/problem being addressed",
|
|
153
|
+
"alternatives": [
|
|
154
|
+
{{
|
|
155
|
+
"option": "Alternative option",
|
|
156
|
+
"pros": ["advantage 1"],
|
|
157
|
+
"cons": ["disadvantage 1"],
|
|
158
|
+
"rejection_reason": "Why this wasn't chosen"
|
|
159
|
+
}}
|
|
160
|
+
],
|
|
161
|
+
"rationale": "Why the chosen option was selected",
|
|
162
|
+
"evidence": [
|
|
163
|
+
{{
|
|
164
|
+
"claim": "Supporting fact or argument",
|
|
165
|
+
"source": "user_stated|inferred|external",
|
|
166
|
+
"turn_reference": "turn_N or null"
|
|
167
|
+
}}
|
|
168
|
+
],
|
|
169
|
+
"trade_offs": [
|
|
170
|
+
{{
|
|
171
|
+
"gained": "What was gained",
|
|
172
|
+
"sacrificed": "What was given up",
|
|
173
|
+
"justification": "Why this trade-off is acceptable"
|
|
174
|
+
}}
|
|
175
|
+
],
|
|
176
|
+
"confidence": 0.0-1.0,
|
|
177
|
+
"related_goal": "Goal this decision serves (if identifiable)",
|
|
178
|
+
"related_previous_decision": "ID or summary of related previous decision if any"
|
|
179
|
+
}}
|
|
180
|
+
]
|
|
181
|
+
}}
|
|
182
|
+
|
|
183
|
+
Only extract NEW decisions from the CURRENT SESSION - do not repeat previous decisions.
|
|
184
|
+
If no new decisions are found, return: {{"decisions": []}}
|
|
185
|
+
Output ONLY valid JSON, no explanation."""
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
# =============================================================================
|
|
189
|
+
# Decision Carry-Forward Prompt (Issue #1 Fix)
|
|
190
|
+
# =============================================================================
|
|
191
|
+
|
|
192
|
+
DECISION_CARRY_FORWARD_SYSTEM = """You are an expert at analyzing conversations and tracking decision evolution.
|
|
193
|
+
|
|
194
|
+
Your task is to RECONCILE prior decisions with new conversation context:
|
|
195
|
+
- Verify which prior decisions are still valid
|
|
196
|
+
- Identify any modifications or updates to prior decisions
|
|
197
|
+
- Extract genuinely new decisions
|
|
198
|
+
- Flag any decisions that were reversed or abandoned
|
|
199
|
+
|
|
200
|
+
This is crucial for maintaining accurate decision state across multiple conversation sessions.
|
|
201
|
+
|
|
202
|
+
Output your analysis as valid JSON."""
|
|
203
|
+
|
|
204
|
+
DECISION_CARRY_FORWARD_PROMPT = """You are analyzing a CONTINUATION of an ongoing conversation.
|
|
205
|
+
|
|
206
|
+
=== PRIOR DECISIONS (from earlier sessions) ===
|
|
207
|
+
{prior_decisions}
|
|
208
|
+
|
|
209
|
+
=== ACTIVE GOALS ===
|
|
210
|
+
{active_goals}
|
|
211
|
+
|
|
212
|
+
=== CURRENT SESSION MESSAGES ===
|
|
213
|
+
{conversation}
|
|
214
|
+
|
|
215
|
+
Your task is to RECONCILE the decision state by analyzing how this session affects prior decisions:
|
|
216
|
+
|
|
217
|
+
1. **VERIFIED**: Which prior decisions are confirmed/still valid?
|
|
218
|
+
- If a prior decision is referenced positively or built upon, it's verified
|
|
219
|
+
- If a prior decision is simply not mentioned, mark it verified with lower confidence (0.5)
|
|
220
|
+
|
|
221
|
+
2. **MODIFIED**: Which prior decisions were updated or refined?
|
|
222
|
+
- Include decisions where the approach changed but the goal remains
|
|
223
|
+
- Include decisions that were expanded or narrowed in scope
|
|
224
|
+
|
|
225
|
+
3. **NEW**: What genuinely NEW decisions were made in THIS session?
|
|
226
|
+
- Only include choices that weren't captured in prior decisions
|
|
227
|
+
- Include both explicit and implicit decisions
|
|
228
|
+
|
|
229
|
+
4. **INVALIDATED**: Which prior decisions were reversed or abandoned?
|
|
230
|
+
- Include decisions the user explicitly changed their mind about
|
|
231
|
+
- Include decisions that contradict new information
|
|
232
|
+
|
|
233
|
+
Output as JSON:
|
|
234
|
+
{{
|
|
235
|
+
"verified": [
|
|
236
|
+
{{
|
|
237
|
+
"decision_id": "ID from prior decisions",
|
|
238
|
+
"still_valid": true,
|
|
239
|
+
"confidence": 0.0-1.0,
|
|
240
|
+
"supporting_evidence": "Quote or reference from this session, or null if not mentioned"
|
|
241
|
+
}}
|
|
242
|
+
],
|
|
243
|
+
"modified": [
|
|
244
|
+
{{
|
|
245
|
+
"original_decision_id": "ID being modified",
|
|
246
|
+
"original_summary": "Brief summary of original decision",
|
|
247
|
+
"new_decision": "The updated decision",
|
|
248
|
+
"modification_type": "refined|updated|partially_changed",
|
|
249
|
+
"reason": "Why it changed",
|
|
250
|
+
"confidence": 0.0-1.0
|
|
251
|
+
}}
|
|
252
|
+
],
|
|
253
|
+
"new": [
|
|
254
|
+
{{
|
|
255
|
+
"decision": "The new choice made",
|
|
256
|
+
"context": "Situation prompting this",
|
|
257
|
+
"rationale": "Why this was chosen",
|
|
258
|
+
"confidence": 0.0-1.0,
|
|
259
|
+
"related_goal": "Goal this serves, or null",
|
|
260
|
+
"alternatives": [
|
|
261
|
+
{{
|
|
262
|
+
"option": "Alternative considered",
|
|
263
|
+
"rejection_reason": "Why not chosen"
|
|
264
|
+
}}
|
|
265
|
+
],
|
|
266
|
+
"evidence": [
|
|
267
|
+
{{
|
|
268
|
+
"claim": "Supporting fact",
|
|
269
|
+
"source": "user_stated|inferred|external"
|
|
270
|
+
}}
|
|
271
|
+
],
|
|
272
|
+
"trade_offs": [
|
|
273
|
+
{{
|
|
274
|
+
"gained": "Benefit",
|
|
275
|
+
"sacrificed": "Cost"
|
|
276
|
+
}}
|
|
277
|
+
]
|
|
278
|
+
}}
|
|
279
|
+
],
|
|
280
|
+
"invalidated": [
|
|
281
|
+
{{
|
|
282
|
+
"decision_id": "ID being invalidated",
|
|
283
|
+
"reason": "Why it's no longer valid"
|
|
284
|
+
}}
|
|
285
|
+
]
|
|
286
|
+
}}
|
|
287
|
+
|
|
288
|
+
IMPORTANT:
|
|
289
|
+
- EVERY prior decision must appear in exactly ONE category: verified, modified, or invalidated
|
|
290
|
+
- Use empty arrays [] for categories with no items
|
|
291
|
+
- Be conservative with "invalidated" - only use when clearly contradicted
|
|
292
|
+
- "new" should only contain genuinely NEW decisions not captured before
|
|
293
|
+
|
|
294
|
+
Output ONLY valid JSON, no explanation."""
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
# =============================================================================
|
|
298
|
+
# Reasoning Store
|
|
299
|
+
# =============================================================================
|
|
300
|
+
|
|
301
|
+
class ReasoningStore:
|
|
302
|
+
"""
|
|
303
|
+
Extracts and stores decision trails with full reasoning context.
|
|
304
|
+
|
|
305
|
+
Unlike simple fact storage (e.g., "User chose PostgreSQL"), this captures:
|
|
306
|
+
- What alternatives were considered
|
|
307
|
+
- Why the chosen option was selected
|
|
308
|
+
- What evidence supported the decision
|
|
309
|
+
- What trade-offs were made
|
|
310
|
+
|
|
311
|
+
Usage:
|
|
312
|
+
store = ReasoningStore(llm_client)
|
|
313
|
+
|
|
314
|
+
# Extract decisions from conversation
|
|
315
|
+
decisions = await store.extract_decisions(turns)
|
|
316
|
+
|
|
317
|
+
# Store a decision
|
|
318
|
+
store.add_decision(decision_trail)
|
|
319
|
+
|
|
320
|
+
# Query decisions related to a goal
|
|
321
|
+
related = store.get_decisions_for_goal(goal_id)
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
def __init__(self, llm_client: LLMClient):
|
|
325
|
+
"""
|
|
326
|
+
Initialize the reasoning store.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
llm_client: LLM client for extraction
|
|
330
|
+
"""
|
|
331
|
+
self.llm = llm_client
|
|
332
|
+
self.decisions: dict[str, DecisionTrail] = {}
|
|
333
|
+
self._goal_index: dict[str, list[str]] = {} # goal_id -> [decision_ids]
|
|
334
|
+
|
|
335
|
+
async def extract_decisions(
|
|
336
|
+
self,
|
|
337
|
+
turns: list[Turn],
|
|
338
|
+
goal_ids: Optional[list[str]] = None,
|
|
339
|
+
existing_decisions: Optional[list[DecisionTrail]] = None,
|
|
340
|
+
active_goals_context: Optional[str] = None
|
|
341
|
+
) -> list[DecisionTrail]:
|
|
342
|
+
"""
|
|
343
|
+
Extract decision trails from conversation.
|
|
344
|
+
|
|
345
|
+
For continuation sessions with existing decisions, uses the
|
|
346
|
+
Decision Carry-Forward approach to reconcile prior decisions
|
|
347
|
+
with new session context.
|
|
348
|
+
|
|
349
|
+
Args:
|
|
350
|
+
turns: List of conversation turns
|
|
351
|
+
goal_ids: Optional list of goal IDs to link decisions to
|
|
352
|
+
existing_decisions: Optional list of existing decisions for context
|
|
353
|
+
active_goals_context: Optional string describing active goals for context
|
|
354
|
+
|
|
355
|
+
Returns:
|
|
356
|
+
List of extracted DecisionTrail objects
|
|
357
|
+
"""
|
|
358
|
+
# Use carry-forward for continuation sessions
|
|
359
|
+
if existing_decisions and len(existing_decisions) > 0:
|
|
360
|
+
logger.info(f"Using Decision Carry-Forward with {len(existing_decisions)} prior decisions")
|
|
361
|
+
return await self._reconcile_and_extract(
|
|
362
|
+
turns=turns,
|
|
363
|
+
existing_decisions=existing_decisions,
|
|
364
|
+
active_goals_context=active_goals_context,
|
|
365
|
+
goal_ids=goal_ids
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
# First session - use smart formatting for long conversations
|
|
369
|
+
conversation = await self._format_conversation_smart(turns)
|
|
370
|
+
|
|
371
|
+
if active_goals_context:
|
|
372
|
+
# Have goals but no prior decisions (first session with goals)
|
|
373
|
+
prompt = DECISION_EXTRACTION_WITH_FULL_CONTEXT_PROMPT.format(
|
|
374
|
+
active_goals=active_goals_context,
|
|
375
|
+
previous_decisions="None (this is the first session)",
|
|
376
|
+
conversation=conversation
|
|
377
|
+
)
|
|
378
|
+
logger.debug(f"Using goal-aware extraction (first session)")
|
|
379
|
+
else:
|
|
380
|
+
prompt = DECISION_EXTRACTION_PROMPT.format(conversation=conversation)
|
|
381
|
+
|
|
382
|
+
# Call LLM for extraction
|
|
383
|
+
response = await self.llm.complete(prompt, system=DECISION_EXTRACTION_SYSTEM)
|
|
384
|
+
|
|
385
|
+
# Parse response
|
|
386
|
+
try:
|
|
387
|
+
data = json.loads(self._clean_json_response(response))
|
|
388
|
+
except json.JSONDecodeError as e:
|
|
389
|
+
logger.error(f"Failed to parse decision extraction response: {e}")
|
|
390
|
+
logger.debug(f"Response was: {response}")
|
|
391
|
+
return []
|
|
392
|
+
|
|
393
|
+
# Build decision trails
|
|
394
|
+
decisions = []
|
|
395
|
+
for decision_data in data.get("decisions", []):
|
|
396
|
+
decision = self._build_decision_trail(decision_data, turns)
|
|
397
|
+
decisions.append(decision)
|
|
398
|
+
self.add_decision(decision)
|
|
399
|
+
|
|
400
|
+
logger.info(f"Extracted {len(decisions)} decisions from conversation")
|
|
401
|
+
|
|
402
|
+
return decisions
|
|
403
|
+
|
|
404
|
+
async def _reconcile_and_extract(
|
|
405
|
+
self,
|
|
406
|
+
turns: list[Turn],
|
|
407
|
+
existing_decisions: list[DecisionTrail],
|
|
408
|
+
active_goals_context: Optional[str] = None,
|
|
409
|
+
goal_ids: Optional[list[str]] = None
|
|
410
|
+
) -> list[DecisionTrail]:
|
|
411
|
+
"""
|
|
412
|
+
Use Decision Carry-Forward to reconcile prior decisions with new session.
|
|
413
|
+
|
|
414
|
+
This is the core fix for Issue #1: instead of asking for "NEW decisions only"
|
|
415
|
+
(which returns empty), we ask the LLM to verify, modify, and extend.
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
turns: Current session's conversation turns
|
|
419
|
+
existing_decisions: Prior decisions to reconcile
|
|
420
|
+
active_goals_context: String describing active goals
|
|
421
|
+
goal_ids: Goal IDs to link new decisions to
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
List of all valid decisions after reconciliation
|
|
425
|
+
"""
|
|
426
|
+
conversation = await self._format_conversation_smart(turns)
|
|
427
|
+
prior_decisions = self._format_decisions_for_carry_forward(existing_decisions)
|
|
428
|
+
goals_context = active_goals_context or "None specified"
|
|
429
|
+
|
|
430
|
+
prompt = DECISION_CARRY_FORWARD_PROMPT.format(
|
|
431
|
+
prior_decisions=prior_decisions,
|
|
432
|
+
active_goals=goals_context,
|
|
433
|
+
conversation=conversation
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
# Call LLM for reconciliation
|
|
437
|
+
response = await self.llm.complete(prompt, system=DECISION_CARRY_FORWARD_SYSTEM)
|
|
438
|
+
|
|
439
|
+
# Parse reconciliation result
|
|
440
|
+
try:
|
|
441
|
+
data = json.loads(self._clean_json_response(response))
|
|
442
|
+
except json.JSONDecodeError as e:
|
|
443
|
+
logger.error(f"Failed to parse carry-forward response: {e}")
|
|
444
|
+
logger.debug(f"Response was: {response}")
|
|
445
|
+
# Fallback: return existing decisions unchanged
|
|
446
|
+
return existing_decisions
|
|
447
|
+
|
|
448
|
+
# Build reconciliation result
|
|
449
|
+
reconciliation = self._parse_reconciliation(data, existing_decisions)
|
|
450
|
+
|
|
451
|
+
# Apply reconciliation to get final decision list
|
|
452
|
+
final_decisions = self._apply_reconciliation(
|
|
453
|
+
existing_decisions=existing_decisions,
|
|
454
|
+
reconciliation=reconciliation,
|
|
455
|
+
turns=turns,
|
|
456
|
+
goal_ids=goal_ids
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
logger.info(
|
|
460
|
+
f"Carry-Forward: {len(reconciliation.verified)} verified, "
|
|
461
|
+
f"{len(reconciliation.modified)} modified, "
|
|
462
|
+
f"{len(reconciliation.new)} new, "
|
|
463
|
+
f"{len(reconciliation.invalidated)} invalidated"
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
return final_decisions
|
|
467
|
+
|
|
468
|
+
def _format_decisions_for_carry_forward(self, decisions: list[DecisionTrail]) -> str:
|
|
469
|
+
"""
|
|
470
|
+
Format decisions with IDs for carry-forward prompt.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
decisions: List of prior decisions
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
Formatted string with decision IDs for reference
|
|
477
|
+
"""
|
|
478
|
+
if not decisions:
|
|
479
|
+
return "None"
|
|
480
|
+
|
|
481
|
+
lines = []
|
|
482
|
+
for decision in decisions:
|
|
483
|
+
lines.append(f"[{decision.id}] Decision: {decision.decision}")
|
|
484
|
+
lines.append(f" Context: {decision.context}")
|
|
485
|
+
lines.append(f" Rationale: {decision.rationale}")
|
|
486
|
+
if decision.related_goals:
|
|
487
|
+
lines.append(f" Goals: {', '.join(decision.related_goals)}")
|
|
488
|
+
if decision.alternatives:
|
|
489
|
+
alts = ", ".join(a.option for a in decision.alternatives[:3])
|
|
490
|
+
lines.append(f" Alternatives considered: {alts}")
|
|
491
|
+
lines.append("")
|
|
492
|
+
|
|
493
|
+
return "\n".join(lines)
|
|
494
|
+
|
|
495
|
+
def _parse_reconciliation(
|
|
496
|
+
self,
|
|
497
|
+
data: dict,
|
|
498
|
+
existing_decisions: list[DecisionTrail]
|
|
499
|
+
) -> DecisionReconciliation:
|
|
500
|
+
"""
|
|
501
|
+
Parse LLM response into DecisionReconciliation object.
|
|
502
|
+
|
|
503
|
+
Args:
|
|
504
|
+
data: Parsed JSON from LLM
|
|
505
|
+
existing_decisions: Prior decisions for ID lookup
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
DecisionReconciliation object
|
|
509
|
+
"""
|
|
510
|
+
# Build lookup for existing decision IDs
|
|
511
|
+
existing_ids = {d.id for d in existing_decisions}
|
|
512
|
+
|
|
513
|
+
# Parse verified
|
|
514
|
+
verified = []
|
|
515
|
+
for v in data.get("verified", []):
|
|
516
|
+
decision_id = v.get("decision_id", "")
|
|
517
|
+
if decision_id in existing_ids:
|
|
518
|
+
# Handle LLM returning float/int instead of bool for still_valid
|
|
519
|
+
still_valid_raw = v.get("still_valid", True)
|
|
520
|
+
if isinstance(still_valid_raw, (int, float)):
|
|
521
|
+
still_valid = still_valid_raw >= 0.5
|
|
522
|
+
else:
|
|
523
|
+
still_valid = bool(still_valid_raw)
|
|
524
|
+
|
|
525
|
+
verified.append(VerifiedDecision(
|
|
526
|
+
decision_id=decision_id,
|
|
527
|
+
still_valid=still_valid,
|
|
528
|
+
confidence=float(v.get("confidence", 0.8)),
|
|
529
|
+
supporting_evidence=v.get("supporting_evidence")
|
|
530
|
+
))
|
|
531
|
+
|
|
532
|
+
# Parse modified
|
|
533
|
+
modified = []
|
|
534
|
+
for m in data.get("modified", []):
|
|
535
|
+
original_id = m.get("original_decision_id", "")
|
|
536
|
+
if original_id in existing_ids:
|
|
537
|
+
modified.append(ModifiedDecision(
|
|
538
|
+
original_decision_id=original_id,
|
|
539
|
+
original_summary=m.get("original_summary", ""),
|
|
540
|
+
new_decision=m.get("new_decision", ""),
|
|
541
|
+
modification_type=m.get("modification_type", "refined"),
|
|
542
|
+
reason=m.get("reason", ""),
|
|
543
|
+
confidence=m.get("confidence", 0.8)
|
|
544
|
+
))
|
|
545
|
+
|
|
546
|
+
# Parse new
|
|
547
|
+
new = []
|
|
548
|
+
for n in data.get("new", []):
|
|
549
|
+
new.append(NewDecision(
|
|
550
|
+
decision=n.get("decision", ""),
|
|
551
|
+
context=n.get("context", ""),
|
|
552
|
+
rationale=n.get("rationale", ""),
|
|
553
|
+
confidence=n.get("confidence", 0.8),
|
|
554
|
+
related_goal=n.get("related_goal"),
|
|
555
|
+
alternatives=n.get("alternatives", []),
|
|
556
|
+
evidence=n.get("evidence", []),
|
|
557
|
+
trade_offs=n.get("trade_offs", [])
|
|
558
|
+
))
|
|
559
|
+
|
|
560
|
+
# Parse invalidated
|
|
561
|
+
invalidated = []
|
|
562
|
+
for i in data.get("invalidated", []):
|
|
563
|
+
decision_id = i.get("decision_id", "")
|
|
564
|
+
if decision_id in existing_ids:
|
|
565
|
+
invalidated.append(InvalidatedDecision(
|
|
566
|
+
decision_id=decision_id,
|
|
567
|
+
reason=i.get("reason", "")
|
|
568
|
+
))
|
|
569
|
+
|
|
570
|
+
return DecisionReconciliation(
|
|
571
|
+
verified=verified,
|
|
572
|
+
modified=modified,
|
|
573
|
+
new=new,
|
|
574
|
+
invalidated=invalidated
|
|
575
|
+
)
|
|
576
|
+
|
|
577
|
+
def _apply_reconciliation(
|
|
578
|
+
self,
|
|
579
|
+
existing_decisions: list[DecisionTrail],
|
|
580
|
+
reconciliation: DecisionReconciliation,
|
|
581
|
+
turns: list[Turn],
|
|
582
|
+
goal_ids: Optional[list[str]] = None
|
|
583
|
+
) -> list[DecisionTrail]:
|
|
584
|
+
"""
|
|
585
|
+
Apply reconciliation to build final decision list.
|
|
586
|
+
|
|
587
|
+
Args:
|
|
588
|
+
existing_decisions: Prior decisions
|
|
589
|
+
reconciliation: Parsed reconciliation result
|
|
590
|
+
turns: Current session turns
|
|
591
|
+
goal_ids: Goal IDs for new decisions
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
Final list of valid decisions
|
|
595
|
+
"""
|
|
596
|
+
# Build lookup
|
|
597
|
+
existing_by_id = {d.id: d for d in existing_decisions}
|
|
598
|
+
|
|
599
|
+
# Track which decisions to keep
|
|
600
|
+
verified_ids = {v.decision_id for v in reconciliation.verified if v.still_valid}
|
|
601
|
+
modified_ids = {m.original_decision_id for m in reconciliation.modified}
|
|
602
|
+
invalidated_ids = {i.decision_id for i in reconciliation.invalidated}
|
|
603
|
+
|
|
604
|
+
final_decisions = []
|
|
605
|
+
|
|
606
|
+
# Add verified decisions (with updated confidence)
|
|
607
|
+
for verified in reconciliation.verified:
|
|
608
|
+
if verified.still_valid and verified.decision_id in existing_by_id:
|
|
609
|
+
decision = existing_by_id[verified.decision_id]
|
|
610
|
+
# Update confidence if we have supporting evidence
|
|
611
|
+
if verified.supporting_evidence:
|
|
612
|
+
decision.confidence = max(decision.confidence, verified.confidence)
|
|
613
|
+
final_decisions.append(decision)
|
|
614
|
+
|
|
615
|
+
# Add modified decisions (create new decision, invalidate old)
|
|
616
|
+
for modified in reconciliation.modified:
|
|
617
|
+
if modified.original_decision_id in existing_by_id:
|
|
618
|
+
# Create new decision from modified
|
|
619
|
+
new_decision = DecisionTrail(
|
|
620
|
+
decision=modified.new_decision,
|
|
621
|
+
context=f"Modified from: {modified.original_summary}",
|
|
622
|
+
rationale=modified.reason,
|
|
623
|
+
confidence=modified.confidence,
|
|
624
|
+
dependencies=[modified.original_decision_id],
|
|
625
|
+
related_goals=existing_by_id[modified.original_decision_id].related_goals
|
|
626
|
+
)
|
|
627
|
+
final_decisions.append(new_decision)
|
|
628
|
+
self.add_decision(new_decision)
|
|
629
|
+
|
|
630
|
+
# Mark original as invalidated
|
|
631
|
+
self.invalidate_decision(modified.original_decision_id, new_decision.id)
|
|
632
|
+
|
|
633
|
+
# Add new decisions
|
|
634
|
+
for new in reconciliation.new:
|
|
635
|
+
decision_data = {
|
|
636
|
+
"decision": new.decision,
|
|
637
|
+
"context": new.context,
|
|
638
|
+
"rationale": new.rationale,
|
|
639
|
+
"confidence": new.confidence,
|
|
640
|
+
"related_goal": new.related_goal,
|
|
641
|
+
"alternatives": new.alternatives,
|
|
642
|
+
"evidence": new.evidence,
|
|
643
|
+
"trade_offs": new.trade_offs
|
|
644
|
+
}
|
|
645
|
+
decision = self._build_decision_trail(decision_data, turns)
|
|
646
|
+
if goal_ids:
|
|
647
|
+
decision.related_goals = goal_ids
|
|
648
|
+
final_decisions.append(decision)
|
|
649
|
+
self.add_decision(decision)
|
|
650
|
+
|
|
651
|
+
# Decisions in invalidated list are NOT added to final
|
|
652
|
+
# (they're already excluded by not being in verified/modified)
|
|
653
|
+
|
|
654
|
+
# For any existing decisions not categorized, assume still valid (conservative)
|
|
655
|
+
categorized_ids = verified_ids | modified_ids | invalidated_ids
|
|
656
|
+
for decision in existing_decisions:
|
|
657
|
+
if decision.id not in categorized_ids:
|
|
658
|
+
logger.warning(f"Decision {decision.id} not categorized, assuming still valid")
|
|
659
|
+
final_decisions.append(decision)
|
|
660
|
+
|
|
661
|
+
return final_decisions
|
|
662
|
+
|
|
663
|
+
def add_decision(self, decision: DecisionTrail) -> str:
|
|
664
|
+
"""
|
|
665
|
+
Add a decision to the store.
|
|
666
|
+
|
|
667
|
+
Args:
|
|
668
|
+
decision: DecisionTrail to store
|
|
669
|
+
|
|
670
|
+
Returns:
|
|
671
|
+
Decision ID
|
|
672
|
+
"""
|
|
673
|
+
self.decisions[decision.id] = decision
|
|
674
|
+
|
|
675
|
+
# Update goal index
|
|
676
|
+
for goal_id in decision.related_goals:
|
|
677
|
+
if goal_id not in self._goal_index:
|
|
678
|
+
self._goal_index[goal_id] = []
|
|
679
|
+
self._goal_index[goal_id].append(decision.id)
|
|
680
|
+
|
|
681
|
+
return decision.id
|
|
682
|
+
|
|
683
|
+
def get_decision(self, decision_id: str) -> Optional[DecisionTrail]:
|
|
684
|
+
"""Get a decision by ID."""
|
|
685
|
+
return self.decisions.get(decision_id)
|
|
686
|
+
|
|
687
|
+
def get_decisions_for_goal(
|
|
688
|
+
self,
|
|
689
|
+
goal_id: str,
|
|
690
|
+
include_invalidated: bool = False
|
|
691
|
+
) -> list[DecisionTrail]:
|
|
692
|
+
"""
|
|
693
|
+
Get all decisions related to a goal.
|
|
694
|
+
|
|
695
|
+
Args:
|
|
696
|
+
goal_id: Goal ID to filter by
|
|
697
|
+
include_invalidated: Whether to include superseded decisions
|
|
698
|
+
|
|
699
|
+
Returns:
|
|
700
|
+
List of related decisions
|
|
701
|
+
"""
|
|
702
|
+
decision_ids = self._goal_index.get(goal_id, [])
|
|
703
|
+
decisions = [self.decisions[did] for did in decision_ids if did in self.decisions]
|
|
704
|
+
|
|
705
|
+
if not include_invalidated:
|
|
706
|
+
decisions = [d for d in decisions if d.is_valid]
|
|
707
|
+
|
|
708
|
+
return decisions
|
|
709
|
+
|
|
710
|
+
def get_valid_decisions(self) -> list[DecisionTrail]:
|
|
711
|
+
"""Get all currently valid (not superseded) decisions."""
|
|
712
|
+
return [d for d in self.decisions.values() if d.is_valid]
|
|
713
|
+
|
|
714
|
+
def get_decisions_with_alternative(
|
|
715
|
+
self,
|
|
716
|
+
alternative_keyword: str
|
|
717
|
+
) -> list[DecisionTrail]:
|
|
718
|
+
"""
|
|
719
|
+
Find decisions where a specific alternative was considered.
|
|
720
|
+
|
|
721
|
+
Useful for answering questions like "Why didn't we use MongoDB?"
|
|
722
|
+
|
|
723
|
+
Args:
|
|
724
|
+
alternative_keyword: Keyword to search in alternatives
|
|
725
|
+
|
|
726
|
+
Returns:
|
|
727
|
+
Decisions where this alternative was considered
|
|
728
|
+
"""
|
|
729
|
+
results = []
|
|
730
|
+
keyword_lower = alternative_keyword.lower()
|
|
731
|
+
|
|
732
|
+
for decision in self.decisions.values():
|
|
733
|
+
for alt in decision.alternatives:
|
|
734
|
+
if keyword_lower in alt.option.lower():
|
|
735
|
+
results.append(decision)
|
|
736
|
+
break
|
|
737
|
+
|
|
738
|
+
return results
|
|
739
|
+
|
|
740
|
+
def invalidate_decision(
|
|
741
|
+
self,
|
|
742
|
+
decision_id: str,
|
|
743
|
+
replacement_id: str
|
|
744
|
+
) -> None:
|
|
745
|
+
"""
|
|
746
|
+
Mark a decision as superseded by a new decision.
|
|
747
|
+
|
|
748
|
+
Args:
|
|
749
|
+
decision_id: ID of decision to invalidate
|
|
750
|
+
replacement_id: ID of decision that replaces it
|
|
751
|
+
"""
|
|
752
|
+
if decision_id in self.decisions:
|
|
753
|
+
self.decisions[decision_id].invalidate(replacement_id)
|
|
754
|
+
|
|
755
|
+
def get_decision_chain(self, decision_id: str) -> list[DecisionTrail]:
|
|
756
|
+
"""
|
|
757
|
+
Get the chain of decisions leading to this one.
|
|
758
|
+
|
|
759
|
+
Follows the dependencies to build the reasoning path.
|
|
760
|
+
|
|
761
|
+
Args:
|
|
762
|
+
decision_id: Starting decision ID
|
|
763
|
+
|
|
764
|
+
Returns:
|
|
765
|
+
List of decisions in dependency order
|
|
766
|
+
"""
|
|
767
|
+
chain = []
|
|
768
|
+
visited = set()
|
|
769
|
+
|
|
770
|
+
def traverse(did: str):
|
|
771
|
+
if did in visited or did not in self.decisions:
|
|
772
|
+
return
|
|
773
|
+
visited.add(did)
|
|
774
|
+
|
|
775
|
+
decision = self.decisions[did]
|
|
776
|
+
for dep_id in decision.dependencies:
|
|
777
|
+
traverse(dep_id)
|
|
778
|
+
|
|
779
|
+
chain.append(decision)
|
|
780
|
+
|
|
781
|
+
traverse(decision_id)
|
|
782
|
+
return chain
|
|
783
|
+
|
|
784
|
+
def _build_decision_trail(
|
|
785
|
+
self,
|
|
786
|
+
data: dict,
|
|
787
|
+
turns: list[Turn]
|
|
788
|
+
) -> DecisionTrail:
|
|
789
|
+
"""Build a DecisionTrail from extracted data."""
|
|
790
|
+
# Build alternatives
|
|
791
|
+
alternatives = []
|
|
792
|
+
for alt_data in data.get("alternatives", []):
|
|
793
|
+
alternatives.append(Alternative(
|
|
794
|
+
option=alt_data.get("option", ""),
|
|
795
|
+
pros=alt_data.get("pros", []),
|
|
796
|
+
cons=alt_data.get("cons", []),
|
|
797
|
+
rejection_reason=alt_data.get("rejection_reason")
|
|
798
|
+
))
|
|
799
|
+
|
|
800
|
+
# Build evidence
|
|
801
|
+
evidence = []
|
|
802
|
+
for ev_data in data.get("evidence", []):
|
|
803
|
+
source_str = ev_data.get("source", "inferred")
|
|
804
|
+
try:
|
|
805
|
+
source = EvidenceSource(source_str)
|
|
806
|
+
except ValueError:
|
|
807
|
+
source = EvidenceSource.INFERRED
|
|
808
|
+
|
|
809
|
+
evidence.append(Evidence(
|
|
810
|
+
claim=ev_data.get("claim", ""),
|
|
811
|
+
source=source,
|
|
812
|
+
turn_id=ev_data.get("turn_reference")
|
|
813
|
+
))
|
|
814
|
+
|
|
815
|
+
# Build trade-offs
|
|
816
|
+
trade_offs = []
|
|
817
|
+
for to_data in data.get("trade_offs", []):
|
|
818
|
+
trade_offs.append(TradeOff(
|
|
819
|
+
gained=to_data.get("gained", ""),
|
|
820
|
+
sacrificed=to_data.get("sacrificed", ""),
|
|
821
|
+
justification=to_data.get("justification")
|
|
822
|
+
))
|
|
823
|
+
|
|
824
|
+
# Build related goals
|
|
825
|
+
related_goals = []
|
|
826
|
+
if data.get("related_goal"):
|
|
827
|
+
related_goals.append(data["related_goal"])
|
|
828
|
+
|
|
829
|
+
return DecisionTrail(
|
|
830
|
+
decision=data.get("decision", ""),
|
|
831
|
+
context=data.get("context", ""),
|
|
832
|
+
alternatives=alternatives,
|
|
833
|
+
rationale=data.get("rationale", ""),
|
|
834
|
+
evidence=evidence,
|
|
835
|
+
trade_offs=trade_offs,
|
|
836
|
+
confidence=data.get("confidence", 0.8),
|
|
837
|
+
related_goals=related_goals
|
|
838
|
+
)
|
|
839
|
+
|
|
840
|
+
def _format_conversation(self, turns: list[Turn]) -> str:
|
|
841
|
+
"""Format turns for prompt."""
|
|
842
|
+
lines = []
|
|
843
|
+
for i, turn in enumerate(turns):
|
|
844
|
+
lines.append(f"[Turn {i+1}] [{turn.role}]: {turn.content}")
|
|
845
|
+
return "\n\n".join(lines)
|
|
846
|
+
|
|
847
|
+
def _estimate_tokens(self, text: str) -> int:
|
|
848
|
+
"""
|
|
849
|
+
Estimate token count for text.
|
|
850
|
+
|
|
851
|
+
Uses rough approximation of ~4 characters per token for English text.
|
|
852
|
+
This is conservative to avoid context overflow.
|
|
853
|
+
|
|
854
|
+
Args:
|
|
855
|
+
text: Text to estimate tokens for
|
|
856
|
+
|
|
857
|
+
Returns:
|
|
858
|
+
Estimated token count
|
|
859
|
+
"""
|
|
860
|
+
return len(text) // 4
|
|
861
|
+
|
|
862
|
+
async def _summarize_turns(self, turns: list[Turn]) -> str:
|
|
863
|
+
"""
|
|
864
|
+
Summarize a batch of conversation turns.
|
|
865
|
+
|
|
866
|
+
Uses LLM to create a concise summary preserving key decisions,
|
|
867
|
+
topics, and context without all the verbose back-and-forth.
|
|
868
|
+
|
|
869
|
+
Args:
|
|
870
|
+
turns: List of turns to summarize
|
|
871
|
+
|
|
872
|
+
Returns:
|
|
873
|
+
Condensed summary string
|
|
874
|
+
"""
|
|
875
|
+
if not turns:
|
|
876
|
+
return ""
|
|
877
|
+
|
|
878
|
+
# Format turns for summarization
|
|
879
|
+
turn_text = []
|
|
880
|
+
for i, turn in enumerate(turns):
|
|
881
|
+
turn_text.append(f"[{turn.role}]: {turn.content[:500]}") # Truncate very long turns
|
|
882
|
+
|
|
883
|
+
summarization_prompt = f"""Summarize the following conversation segment concisely.
|
|
884
|
+
Focus on:
|
|
885
|
+
1. Key decisions made or discussed
|
|
886
|
+
2. Important technical choices and their rationale
|
|
887
|
+
3. Context that would be needed to understand later conversation
|
|
888
|
+
4. Any problems identified and solutions proposed
|
|
889
|
+
|
|
890
|
+
Keep the summary under 500 words. Be factual and specific.
|
|
891
|
+
|
|
892
|
+
CONVERSATION SEGMENT:
|
|
893
|
+
{chr(10).join(turn_text)}
|
|
894
|
+
|
|
895
|
+
SUMMARY:"""
|
|
896
|
+
|
|
897
|
+
try:
|
|
898
|
+
summary = await self.llm.complete(summarization_prompt)
|
|
899
|
+
return f"[SUMMARY OF MIDDLE TURNS]: {summary.strip()}"
|
|
900
|
+
except Exception as e:
|
|
901
|
+
logger.warning(f"Failed to summarize turns: {e}")
|
|
902
|
+
# Fallback: just note what was skipped
|
|
903
|
+
return f"[SUMMARY: {len(turns)} turns omitted from middle of conversation]"
|
|
904
|
+
|
|
905
|
+
async def _format_conversation_smart(
|
|
906
|
+
self,
|
|
907
|
+
turns: list[Turn],
|
|
908
|
+
max_tokens: int = 15000,
|
|
909
|
+
first_n: int = 10,
|
|
910
|
+
last_n: int = 30
|
|
911
|
+
) -> str:
|
|
912
|
+
"""
|
|
913
|
+
Format conversation with smart chunking for long conversations.
|
|
914
|
+
|
|
915
|
+
Strategy: Sliding Window with Summary
|
|
916
|
+
- Keep first N turns (establishes context, goals, initial decisions)
|
|
917
|
+
- Summarize middle turns (preserve key information compactly)
|
|
918
|
+
- Keep last N turns (recent state, latest decisions)
|
|
919
|
+
|
|
920
|
+
This handles conversations of any length while preserving
|
|
921
|
+
the most important context for decision extraction.
|
|
922
|
+
|
|
923
|
+
Args:
|
|
924
|
+
turns: All conversation turns
|
|
925
|
+
max_tokens: Maximum token budget for conversation text
|
|
926
|
+
first_n: Number of initial turns to keep verbatim
|
|
927
|
+
last_n: Number of recent turns to keep verbatim
|
|
928
|
+
|
|
929
|
+
Returns:
|
|
930
|
+
Formatted conversation string within token budget
|
|
931
|
+
"""
|
|
932
|
+
if not turns:
|
|
933
|
+
return ""
|
|
934
|
+
|
|
935
|
+
total_turns = len(turns)
|
|
936
|
+
|
|
937
|
+
# For short conversations, use simple formatting
|
|
938
|
+
if total_turns <= (first_n + last_n):
|
|
939
|
+
return self._format_conversation(turns)
|
|
940
|
+
|
|
941
|
+
# Check if simple formatting fits within budget
|
|
942
|
+
simple_format = self._format_conversation(turns)
|
|
943
|
+
if self._estimate_tokens(simple_format) <= max_tokens:
|
|
944
|
+
return simple_format
|
|
945
|
+
|
|
946
|
+
logger.info(f"Long conversation detected ({total_turns} turns), applying sliding window")
|
|
947
|
+
|
|
948
|
+
# Split into three segments
|
|
949
|
+
first_turns = turns[:first_n]
|
|
950
|
+
middle_turns = turns[first_n:-last_n] if last_n > 0 else turns[first_n:]
|
|
951
|
+
last_turns = turns[-last_n:] if last_n > 0 else []
|
|
952
|
+
|
|
953
|
+
# Format first and last turns verbatim
|
|
954
|
+
first_formatted = []
|
|
955
|
+
for i, turn in enumerate(first_turns):
|
|
956
|
+
first_formatted.append(f"[Turn {i + 1}] [{turn.role}]: {turn.content}")
|
|
957
|
+
|
|
958
|
+
last_formatted = []
|
|
959
|
+
start_idx = len(turns) - len(last_turns)
|
|
960
|
+
for i, turn in enumerate(last_turns):
|
|
961
|
+
last_formatted.append(f"[Turn {start_idx + i + 1}] [{turn.role}]: {turn.content}")
|
|
962
|
+
|
|
963
|
+
# Summarize middle section
|
|
964
|
+
middle_summary = await self._summarize_turns(middle_turns)
|
|
965
|
+
|
|
966
|
+
# Combine all sections
|
|
967
|
+
sections = [
|
|
968
|
+
"=== CONVERSATION START ===",
|
|
969
|
+
"\n\n".join(first_formatted),
|
|
970
|
+
"",
|
|
971
|
+
"=== MIDDLE SECTION (SUMMARIZED) ===",
|
|
972
|
+
middle_summary,
|
|
973
|
+
"",
|
|
974
|
+
"=== RECENT CONVERSATION ===",
|
|
975
|
+
"\n\n".join(last_formatted)
|
|
976
|
+
]
|
|
977
|
+
|
|
978
|
+
result = "\n\n".join(sections)
|
|
979
|
+
|
|
980
|
+
# Log token savings
|
|
981
|
+
original_tokens = self._estimate_tokens(simple_format)
|
|
982
|
+
final_tokens = self._estimate_tokens(result)
|
|
983
|
+
logger.info(f"Conversation chunking: {original_tokens} → {final_tokens} tokens "
|
|
984
|
+
f"(saved {original_tokens - final_tokens} tokens)")
|
|
985
|
+
|
|
986
|
+
return result
|
|
987
|
+
|
|
988
|
+
def _format_decisions_for_context(self, decisions: list[DecisionTrail]) -> str:
|
|
989
|
+
"""
|
|
990
|
+
Format existing decisions for context-aware extraction prompt.
|
|
991
|
+
|
|
992
|
+
Args:
|
|
993
|
+
decisions: List of existing decisions
|
|
994
|
+
|
|
995
|
+
Returns:
|
|
996
|
+
Formatted string summary of decisions
|
|
997
|
+
"""
|
|
998
|
+
if not decisions:
|
|
999
|
+
return "None"
|
|
1000
|
+
|
|
1001
|
+
lines = []
|
|
1002
|
+
for i, decision in enumerate(decisions, 1):
|
|
1003
|
+
lines.append(f"{i}. [{decision.id}] {decision.decision}")
|
|
1004
|
+
lines.append(f" Context: {decision.context}")
|
|
1005
|
+
lines.append(f" Rationale: {decision.rationale}")
|
|
1006
|
+
if decision.related_goals:
|
|
1007
|
+
lines.append(f" Related goals: {', '.join(decision.related_goals)}")
|
|
1008
|
+
lines.append("")
|
|
1009
|
+
|
|
1010
|
+
return "\n".join(lines)
|
|
1011
|
+
|
|
1012
|
+
def _clean_json_response(self, response: str) -> str:
|
|
1013
|
+
"""Clean LLM response to extract JSON."""
|
|
1014
|
+
response = response.strip()
|
|
1015
|
+
if response.startswith("```json"):
|
|
1016
|
+
response = response[7:]
|
|
1017
|
+
elif response.startswith("```"):
|
|
1018
|
+
response = response[3:]
|
|
1019
|
+
if response.endswith("```"):
|
|
1020
|
+
response = response[:-3]
|
|
1021
|
+
return response.strip()
|
|
1022
|
+
|
|
1023
|
+
def format_decision_for_context(
|
|
1024
|
+
self,
|
|
1025
|
+
decision: DecisionTrail,
|
|
1026
|
+
include_alternatives: bool = True,
|
|
1027
|
+
include_evidence: bool = True
|
|
1028
|
+
) -> str:
|
|
1029
|
+
"""
|
|
1030
|
+
Format a decision trail for inclusion in LLM context.
|
|
1031
|
+
|
|
1032
|
+
Args:
|
|
1033
|
+
decision: Decision to format
|
|
1034
|
+
include_alternatives: Whether to include rejected alternatives
|
|
1035
|
+
include_evidence: Whether to include supporting evidence
|
|
1036
|
+
|
|
1037
|
+
Returns:
|
|
1038
|
+
Formatted string representation
|
|
1039
|
+
"""
|
|
1040
|
+
lines = [
|
|
1041
|
+
f"DECISION: {decision.decision}",
|
|
1042
|
+
f"Context: {decision.context}",
|
|
1043
|
+
f"Rationale: {decision.rationale}"
|
|
1044
|
+
]
|
|
1045
|
+
|
|
1046
|
+
if include_alternatives and decision.alternatives:
|
|
1047
|
+
lines.append("Alternatives considered:")
|
|
1048
|
+
for alt in decision.alternatives:
|
|
1049
|
+
lines.append(f" - {alt.option}: Rejected because {alt.rejection_reason}")
|
|
1050
|
+
|
|
1051
|
+
if include_evidence and decision.evidence:
|
|
1052
|
+
lines.append("Supporting evidence:")
|
|
1053
|
+
for ev in decision.evidence:
|
|
1054
|
+
lines.append(f" - {ev.claim} (source: {ev.source.value})")
|
|
1055
|
+
|
|
1056
|
+
if decision.trade_offs:
|
|
1057
|
+
lines.append("Trade-offs:")
|
|
1058
|
+
for to in decision.trade_offs:
|
|
1059
|
+
lines.append(f" - Gained {to.gained}, sacrificed {to.sacrificed}")
|
|
1060
|
+
|
|
1061
|
+
return "\n".join(lines)
|