ripperdoc 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ripperdoc/__init__.py +1 -1
- ripperdoc/cli/cli.py +5 -0
- ripperdoc/cli/commands/__init__.py +71 -6
- ripperdoc/cli/commands/clear_cmd.py +1 -0
- ripperdoc/cli/commands/exit_cmd.py +1 -1
- ripperdoc/cli/commands/help_cmd.py +11 -1
- ripperdoc/cli/commands/hooks_cmd.py +636 -0
- ripperdoc/cli/commands/permissions_cmd.py +36 -34
- ripperdoc/cli/commands/resume_cmd.py +71 -37
- ripperdoc/cli/ui/file_mention_completer.py +276 -0
- ripperdoc/cli/ui/helpers.py +100 -3
- ripperdoc/cli/ui/interrupt_handler.py +175 -0
- ripperdoc/cli/ui/message_display.py +249 -0
- ripperdoc/cli/ui/panels.py +63 -0
- ripperdoc/cli/ui/rich_ui.py +233 -648
- ripperdoc/cli/ui/tool_renderers.py +2 -2
- ripperdoc/core/agents.py +4 -4
- ripperdoc/core/custom_commands.py +411 -0
- ripperdoc/core/hooks/__init__.py +99 -0
- ripperdoc/core/hooks/config.py +303 -0
- ripperdoc/core/hooks/events.py +540 -0
- ripperdoc/core/hooks/executor.py +498 -0
- ripperdoc/core/hooks/integration.py +353 -0
- ripperdoc/core/hooks/manager.py +720 -0
- ripperdoc/core/providers/anthropic.py +476 -69
- ripperdoc/core/query.py +61 -4
- ripperdoc/core/query_utils.py +1 -1
- ripperdoc/core/tool.py +1 -1
- ripperdoc/tools/bash_tool.py +5 -5
- ripperdoc/tools/file_edit_tool.py +2 -2
- ripperdoc/tools/file_read_tool.py +2 -2
- ripperdoc/tools/multi_edit_tool.py +1 -1
- ripperdoc/utils/conversation_compaction.py +476 -0
- ripperdoc/utils/message_compaction.py +109 -154
- ripperdoc/utils/message_formatting.py +216 -0
- ripperdoc/utils/messages.py +31 -9
- ripperdoc/utils/path_ignore.py +3 -4
- ripperdoc/utils/session_history.py +19 -7
- {ripperdoc-0.2.6.dist-info → ripperdoc-0.2.8.dist-info}/METADATA +24 -3
- {ripperdoc-0.2.6.dist-info → ripperdoc-0.2.8.dist-info}/RECORD +44 -30
- {ripperdoc-0.2.6.dist-info → ripperdoc-0.2.8.dist-info}/WHEEL +0 -0
- {ripperdoc-0.2.6.dist-info → ripperdoc-0.2.8.dist-info}/entry_points.txt +0 -0
- {ripperdoc-0.2.6.dist-info → ripperdoc-0.2.8.dist-info}/licenses/LICENSE +0 -0
- {ripperdoc-0.2.6.dist-info → ripperdoc-0.2.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,476 @@
|
|
|
1
|
+
"""Conversation compaction (auto and manual)"""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Callable, List, Optional, Set, Tuple, Union
|
|
8
|
+
|
|
9
|
+
from ripperdoc.core.query import query_llm
|
|
10
|
+
from ripperdoc.utils.log import get_logger
|
|
11
|
+
from ripperdoc.utils.message_compaction import (
|
|
12
|
+
estimate_conversation_tokens,
|
|
13
|
+
micro_compact_messages,
|
|
14
|
+
)
|
|
15
|
+
from ripperdoc.utils.message_formatting import (
|
|
16
|
+
render_transcript,
|
|
17
|
+
extract_assistant_text,
|
|
18
|
+
)
|
|
19
|
+
from ripperdoc.utils.messages import (
|
|
20
|
+
AssistantMessage,
|
|
21
|
+
ProgressMessage,
|
|
22
|
+
UserMessage,
|
|
23
|
+
create_user_message,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
logger = get_logger()
|
|
27
|
+
|
|
28
|
+
ConversationMessage = Union[UserMessage, AssistantMessage, ProgressMessage]
|
|
29
|
+
|
|
30
|
+
RECENT_MESSAGES_AFTER_COMPACT = 8
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
34
|
+
# Summary Prompt Generation
|
|
35
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
36
|
+
|
|
37
|
+
def generate_summary_prompt(additional_instructions: Optional[str] = None) -> str:
|
|
38
|
+
"""Generate the system prompt for conversation summarization.
|
|
39
|
+
|
|
40
|
+
This prompt guides the model to create a detailed, structured summary
|
|
41
|
+
that preserves technical details essential for continuing development.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
additional_instructions: Optional custom instructions to append.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
The complete summary prompt string.
|
|
48
|
+
"""
|
|
49
|
+
base_prompt = """Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions.
|
|
50
|
+
This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context.
|
|
51
|
+
|
|
52
|
+
Before providing your final summary, wrap your analysis in <analysis> tags to organize your thoughts and ensure you've covered all necessary points. In your analysis process:
|
|
53
|
+
|
|
54
|
+
1. Chronologically analyze each message and section of the conversation. For each section thoroughly identify:
|
|
55
|
+
- The user's explicit requests and intents
|
|
56
|
+
- Your approach to addressing the user's requests
|
|
57
|
+
- Key decisions, technical concepts and code patterns
|
|
58
|
+
- Specific details like:
|
|
59
|
+
- file names
|
|
60
|
+
- full code snippets
|
|
61
|
+
- function signatures
|
|
62
|
+
- file edits
|
|
63
|
+
- Errors that you ran into and how you fixed them
|
|
64
|
+
- Pay special attention to specific user feedback that you received, especially if the user told you to do something differently.
|
|
65
|
+
2. Double-check for technical accuracy and completeness, addressing each required element thoroughly.
|
|
66
|
+
|
|
67
|
+
Your summary should include the following sections:
|
|
68
|
+
|
|
69
|
+
1. Primary Request and Intent: Capture all of the user's explicit requests and intents in detail
|
|
70
|
+
2. Key Technical Concepts: List all important technical concepts, technologies, and frameworks discussed.
|
|
71
|
+
3. Files and Code Sections: Enumerate specific files and code sections examined, modified, or created. Pay special attention to the most recent messages and include full code snippets where applicable and include a summary of why this file read or edit is important.
|
|
72
|
+
4. Errors and fixes: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received, especially if the user told you to do something differently.
|
|
73
|
+
5. Problem Solving: Document problems solved and any ongoing troubleshooting efforts.
|
|
74
|
+
6. All user messages: List ALL user messages that are not tool results. These are critical for understanding the users' feedback and changing intent.
|
|
75
|
+
7. Pending Tasks: Outline any pending tasks that you have explicitly been asked to work on.
|
|
76
|
+
8. Current Work: Describe in detail precisely what was being worked on immediately before this summary request, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable.
|
|
77
|
+
9. Optional Next Step: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's explicit requests, and the task you were working on immediately before this summary request. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. Do not start on tangential requests without confirming with the user first.
|
|
78
|
+
If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. This should be verbatim to ensure there's no drift in task interpretation.
|
|
79
|
+
|
|
80
|
+
Here's an example of how your output should be structured:
|
|
81
|
+
|
|
82
|
+
<example>
|
|
83
|
+
<analysis>
|
|
84
|
+
[Your thought process, ensuring all points are covered thoroughly and accurately]
|
|
85
|
+
</analysis>
|
|
86
|
+
|
|
87
|
+
<summary>
|
|
88
|
+
1. Primary Request and Intent:
|
|
89
|
+
[Detailed description]
|
|
90
|
+
|
|
91
|
+
2. Key Technical Concepts:
|
|
92
|
+
- [Concept 1]
|
|
93
|
+
- [Concept 2]
|
|
94
|
+
- [...]
|
|
95
|
+
|
|
96
|
+
3. Files and Code Sections:
|
|
97
|
+
- [File Name 1]
|
|
98
|
+
- [Summary of why this file is important]
|
|
99
|
+
- [Summary of the changes made to this file, if any]
|
|
100
|
+
- [Important Code Snippet]
|
|
101
|
+
- [File Name 2]
|
|
102
|
+
- [Important Code Snippet]
|
|
103
|
+
- [...]
|
|
104
|
+
|
|
105
|
+
4. Errors and fixes:
|
|
106
|
+
- [Detailed description of error 1]:
|
|
107
|
+
- [How you fixed the error]
|
|
108
|
+
- [User feedback on the error if any]
|
|
109
|
+
- [...]
|
|
110
|
+
|
|
111
|
+
5. Problem Solving:
|
|
112
|
+
[Description of solved problems and ongoing troubleshooting]
|
|
113
|
+
|
|
114
|
+
6. All user messages:
|
|
115
|
+
- [Detailed non tool use user message]
|
|
116
|
+
- [...]
|
|
117
|
+
|
|
118
|
+
7. Pending Tasks:
|
|
119
|
+
- [Task 1]
|
|
120
|
+
- [Task 2]
|
|
121
|
+
- [...]
|
|
122
|
+
|
|
123
|
+
8. Current Work:
|
|
124
|
+
[Precise description of current work]
|
|
125
|
+
|
|
126
|
+
9. Optional Next Step:
|
|
127
|
+
[Optional Next step to take]
|
|
128
|
+
|
|
129
|
+
</summary>
|
|
130
|
+
</example>
|
|
131
|
+
|
|
132
|
+
Please provide your summary based on the conversation so far, following this structure and ensuring precision and thoroughness in your response."""
|
|
133
|
+
|
|
134
|
+
if additional_instructions and additional_instructions.strip():
|
|
135
|
+
return f"{base_prompt}\n\nAdditional Instructions:\n{additional_instructions.strip()}"
|
|
136
|
+
|
|
137
|
+
return base_prompt
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def format_summary_response(raw_summary_text: str) -> str:
|
|
141
|
+
"""Format the summary response by extracting content from XML tags.
|
|
142
|
+
|
|
143
|
+
Converts <analysis>...</analysis> and <summary>...</summary> tags
|
|
144
|
+
to readable section headers.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
raw_summary_text: The raw response from the model.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Formatted summary text with clean section headers.
|
|
151
|
+
"""
|
|
152
|
+
formatted_text = raw_summary_text
|
|
153
|
+
|
|
154
|
+
# Extract and format analysis section
|
|
155
|
+
analysis_match = re.search(r"<analysis>([\s\S]*?)</analysis>", formatted_text)
|
|
156
|
+
if analysis_match:
|
|
157
|
+
extracted_content = analysis_match.group(1) or ""
|
|
158
|
+
formatted_text = re.sub(
|
|
159
|
+
r"<analysis>[\s\S]*?</analysis>",
|
|
160
|
+
f"Analysis:\n{extracted_content.strip()}",
|
|
161
|
+
formatted_text,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Extract and format summary section
|
|
165
|
+
summary_match = re.search(r"<summary>([\s\S]*?)</summary>", formatted_text)
|
|
166
|
+
if summary_match:
|
|
167
|
+
summary_content = summary_match.group(1) or ""
|
|
168
|
+
formatted_text = re.sub(
|
|
169
|
+
r"<summary>[\s\S]*?</summary>",
|
|
170
|
+
f"Summary:\n{summary_content.strip()}",
|
|
171
|
+
formatted_text,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
# Clean up excessive newlines
|
|
175
|
+
formatted_text = re.sub(r"\n\n+", "\n\n", formatted_text)
|
|
176
|
+
|
|
177
|
+
return formatted_text.strip()
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def build_continuation_prompt(summary_text: str, should_continue: bool = False) -> str:
|
|
181
|
+
"""Build the continuation prompt for a compacted conversation.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
summary_text: The formatted summary text.
|
|
185
|
+
should_continue: If True, instructs the model to continue without asking.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
The continuation prompt to start the compacted conversation.
|
|
189
|
+
"""
|
|
190
|
+
formatted_summary = format_summary_response(summary_text)
|
|
191
|
+
prompt = f"""This session is being continued from a previous conversation that ran out of context. The conversation is summarized below:
|
|
192
|
+
{formatted_summary}"""
|
|
193
|
+
|
|
194
|
+
if should_continue:
|
|
195
|
+
return f"""{prompt}
|
|
196
|
+
|
|
197
|
+
Please continue the conversation from where we left it off without asking the user any further questions. Continue with the last task that you were asked to work on."""
|
|
198
|
+
|
|
199
|
+
return prompt
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
203
|
+
# Data Classes
|
|
204
|
+
# ─────────────────────────────────────────────────────────────────────────────
|
|
205
|
+
|
|
206
|
+
@dataclass
|
|
207
|
+
class CompactionResult:
|
|
208
|
+
"""Result of a conversation compaction operation."""
|
|
209
|
+
messages: List[ConversationMessage]
|
|
210
|
+
summary_text: str
|
|
211
|
+
continuation_prompt: str
|
|
212
|
+
tokens_before: int
|
|
213
|
+
tokens_after: int
|
|
214
|
+
tokens_saved: int
|
|
215
|
+
micro_tokens_saved: int
|
|
216
|
+
was_compacted: bool
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
@dataclass
|
|
220
|
+
class CompactionError:
|
|
221
|
+
"""Error during compaction."""
|
|
222
|
+
error_type: str # "not_enough_messages", "empty_summary", "exception"
|
|
223
|
+
message: str
|
|
224
|
+
exception: Optional[Exception] = None
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def extract_tool_ids_from_message(msg: ConversationMessage) -> Tuple[Set[str], Set[str]]:
|
|
228
|
+
"""Extract tool_use IDs and tool_result IDs from a message."""
|
|
229
|
+
tool_use_ids: Set[str] = set()
|
|
230
|
+
tool_result_ids: Set[str] = set()
|
|
231
|
+
|
|
232
|
+
content = getattr(getattr(msg, "message", None), "content", None)
|
|
233
|
+
if not isinstance(content, list):
|
|
234
|
+
return tool_use_ids, tool_result_ids
|
|
235
|
+
|
|
236
|
+
for block in content:
|
|
237
|
+
block_type = getattr(block, "type", None)
|
|
238
|
+
if block_type == "tool_use":
|
|
239
|
+
tool_id = getattr(block, "id", None) or getattr(block, "tool_use_id", None)
|
|
240
|
+
if tool_id:
|
|
241
|
+
tool_use_ids.add(tool_id)
|
|
242
|
+
elif block_type == "tool_result":
|
|
243
|
+
tool_id = getattr(block, "tool_use_id", None) or getattr(block, "id", None)
|
|
244
|
+
if tool_id:
|
|
245
|
+
tool_result_ids.add(tool_id)
|
|
246
|
+
|
|
247
|
+
return tool_use_ids, tool_result_ids
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def get_complete_tool_pairs_tail(
|
|
251
|
+
messages: List[ConversationMessage], target_count: int
|
|
252
|
+
) -> List[ConversationMessage]:
|
|
253
|
+
"""Return the last N messages, expanding to keep tool_use/tool_result pairs together."""
|
|
254
|
+
if target_count <= 0 or not messages:
|
|
255
|
+
return []
|
|
256
|
+
|
|
257
|
+
tail_start = max(0, len(messages) - target_count)
|
|
258
|
+
tail = messages[tail_start:]
|
|
259
|
+
|
|
260
|
+
result_ids_in_tail: Set[str] = set()
|
|
261
|
+
for msg in tail:
|
|
262
|
+
_, result_ids = extract_tool_ids_from_message(msg)
|
|
263
|
+
result_ids_in_tail.update(result_ids)
|
|
264
|
+
|
|
265
|
+
use_ids_in_tail: Set[str] = set()
|
|
266
|
+
for msg in tail:
|
|
267
|
+
use_ids, _ = extract_tool_ids_from_message(msg)
|
|
268
|
+
use_ids_in_tail.update(use_ids)
|
|
269
|
+
|
|
270
|
+
orphan_result_ids = result_ids_in_tail - use_ids_in_tail
|
|
271
|
+
if not orphan_result_ids:
|
|
272
|
+
return tail
|
|
273
|
+
|
|
274
|
+
for i in range(tail_start - 1, -1, -1):
|
|
275
|
+
msg = messages[i]
|
|
276
|
+
use_ids, _ = extract_tool_ids_from_message(msg)
|
|
277
|
+
matched = use_ids & orphan_result_ids
|
|
278
|
+
if matched:
|
|
279
|
+
tail_start = i
|
|
280
|
+
orphan_result_ids -= matched
|
|
281
|
+
use_ids_in_tail.update(use_ids)
|
|
282
|
+
|
|
283
|
+
_, new_result_ids = extract_tool_ids_from_message(msg)
|
|
284
|
+
new_orphans = new_result_ids - use_ids_in_tail
|
|
285
|
+
orphan_result_ids.update(new_orphans)
|
|
286
|
+
|
|
287
|
+
if not orphan_result_ids:
|
|
288
|
+
break
|
|
289
|
+
|
|
290
|
+
return messages[tail_start:]
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
async def summarize_conversation(
|
|
294
|
+
messages: List[ConversationMessage],
|
|
295
|
+
custom_instructions: str = "",
|
|
296
|
+
) -> str:
|
|
297
|
+
"""Summarize the given conversation using the configured model.
|
|
298
|
+
|
|
299
|
+
Uses a detailed prompt structure to capture technical details, code patterns,
|
|
300
|
+
and architectural decisions essential for continuing development work.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
messages: The conversation messages to summarize (uses last 60).
|
|
304
|
+
custom_instructions: Optional additional instructions for summarization.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
The summary text, or empty string if summarization fails.
|
|
308
|
+
"""
|
|
309
|
+
recent_messages = messages[-60:]
|
|
310
|
+
transcript = render_transcript(recent_messages)
|
|
311
|
+
|
|
312
|
+
logger.debug(
|
|
313
|
+
"[compaction] summarize_conversation: %d messages, transcript length=%d",
|
|
314
|
+
len(recent_messages),
|
|
315
|
+
len(transcript) if transcript else 0,
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if not transcript.strip():
|
|
319
|
+
logger.warning("[compaction] transcript is empty, cannot summarize")
|
|
320
|
+
return ""
|
|
321
|
+
|
|
322
|
+
# Use the detailed summary prompt from generate_summary_prompt
|
|
323
|
+
system_prompt = "You are a helpful AI assistant tasked with summarizing conversations."
|
|
324
|
+
user_prompt = generate_summary_prompt(custom_instructions)
|
|
325
|
+
user_content = f"{user_prompt}\n\nHere is the conversation to summarize:\n\n{transcript}"
|
|
326
|
+
|
|
327
|
+
assistant_response = await query_llm(
|
|
328
|
+
messages=[create_user_message(user_content)],
|
|
329
|
+
system_prompt=system_prompt,
|
|
330
|
+
tools=[],
|
|
331
|
+
max_thinking_tokens=0,
|
|
332
|
+
model="main",
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
result = extract_assistant_text(assistant_response)
|
|
336
|
+
logger.debug(
|
|
337
|
+
"[compaction] summarize_conversation returned: length=%d",
|
|
338
|
+
len(result) if result else 0,
|
|
339
|
+
)
|
|
340
|
+
return result
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
async def compact_conversation(
|
|
344
|
+
messages: List[ConversationMessage],
|
|
345
|
+
custom_instructions: str = "",
|
|
346
|
+
protocol: str = "anthropic",
|
|
347
|
+
tail_count: int = RECENT_MESSAGES_AFTER_COMPACT,
|
|
348
|
+
attachment_provider: Optional[Callable[[], List[ConversationMessage]]] = None,
|
|
349
|
+
) -> Union["CompactionResult", "CompactionError"]:
|
|
350
|
+
"""Compact a conversation by summarizing and rebuilding.
|
|
351
|
+
|
|
352
|
+
This is a pure logic function with no UI dependencies.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
messages: The conversation messages to compact.
|
|
356
|
+
custom_instructions: Optional instructions for the summarizer.
|
|
357
|
+
protocol: The API protocol ("anthropic" or "openai").
|
|
358
|
+
tail_count: Number of recent messages to preserve after compaction.
|
|
359
|
+
attachment_provider: Optional callable to provide attachment messages.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
CompactionResult on success, CompactionError on failure.
|
|
363
|
+
"""
|
|
364
|
+
if len(messages) < 2:
|
|
365
|
+
return CompactionError(
|
|
366
|
+
error_type="not_enough_messages",
|
|
367
|
+
message="Not enough conversation history to compact.",
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
tokens_before = estimate_conversation_tokens(messages, protocol=protocol)
|
|
371
|
+
|
|
372
|
+
micro = micro_compact_messages(messages, protocol=protocol)
|
|
373
|
+
messages_for_summary = micro.messages
|
|
374
|
+
|
|
375
|
+
# Summarize the conversation
|
|
376
|
+
|
|
377
|
+
non_progress_messages = [
|
|
378
|
+
m for m in messages_for_summary if getattr(m, "type", "") != "progress"
|
|
379
|
+
]
|
|
380
|
+
try:
|
|
381
|
+
summary_text = await summarize_conversation(
|
|
382
|
+
non_progress_messages, custom_instructions
|
|
383
|
+
)
|
|
384
|
+
except Exception as exc:
|
|
385
|
+
import traceback
|
|
386
|
+
logger.warning(
|
|
387
|
+
"[compaction] Error during compaction: %s: %s\n%s",
|
|
388
|
+
type(exc).__name__,
|
|
389
|
+
exc,
|
|
390
|
+
traceback.format_exc(),
|
|
391
|
+
)
|
|
392
|
+
return CompactionError(
|
|
393
|
+
error_type="exception",
|
|
394
|
+
message=f"Error during compaction: {exc}",
|
|
395
|
+
exception=exc,
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
if not summary_text.strip():
|
|
399
|
+
return CompactionError(
|
|
400
|
+
error_type="empty_summary",
|
|
401
|
+
message="Failed to summarize conversation for compaction.",
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
# Build continuation prompt using the new structured format
|
|
405
|
+
continuation_prompt = build_continuation_prompt(summary_text, should_continue=False)
|
|
406
|
+
|
|
407
|
+
recent_tail = get_complete_tool_pairs_tail(non_progress_messages, tail_count)
|
|
408
|
+
|
|
409
|
+
attachments: List[ConversationMessage] = []
|
|
410
|
+
if callable(attachment_provider):
|
|
411
|
+
try:
|
|
412
|
+
attachments = attachment_provider() or []
|
|
413
|
+
except Exception as exc:
|
|
414
|
+
logger.warning(
|
|
415
|
+
"[compaction] attachment_provider failed: %s: %s",
|
|
416
|
+
type(exc).__name__,
|
|
417
|
+
exc,
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
compacted_messages: List[ConversationMessage] = [create_user_message(continuation_prompt)]
|
|
421
|
+
compacted_messages.extend(attachments)
|
|
422
|
+
compacted_messages.extend(recent_tail)
|
|
423
|
+
|
|
424
|
+
tokens_after = estimate_conversation_tokens(compacted_messages, protocol=protocol)
|
|
425
|
+
tokens_saved = max(0, tokens_before - tokens_after)
|
|
426
|
+
|
|
427
|
+
return CompactionResult(
|
|
428
|
+
messages=compacted_messages,
|
|
429
|
+
summary_text=summary_text,
|
|
430
|
+
continuation_prompt=continuation_prompt,
|
|
431
|
+
tokens_before=tokens_before,
|
|
432
|
+
tokens_after=tokens_after,
|
|
433
|
+
tokens_saved=tokens_saved,
|
|
434
|
+
micro_tokens_saved=micro.tokens_saved,
|
|
435
|
+
was_compacted=True,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
# Legacy class for backward compatibility
|
|
440
|
+
class ConversationCompactor:
|
|
441
|
+
"""Legacy wrapper for conversation compaction.
|
|
442
|
+
|
|
443
|
+
Deprecated: Use compact_conversation() function directly instead.
|
|
444
|
+
This class is kept for backward compatibility.
|
|
445
|
+
"""
|
|
446
|
+
# Keep CompactionResult as a nested class for backward compatibility
|
|
447
|
+
CompactionResult = CompactionResult
|
|
448
|
+
|
|
449
|
+
def __init__(
|
|
450
|
+
self,
|
|
451
|
+
console: Optional[object] = None,
|
|
452
|
+
render_transcript_fn: Optional[Callable] = None,
|
|
453
|
+
extract_assistant_text_fn: Optional[Callable] = None,
|
|
454
|
+
attachment_provider: Optional[Callable[[], List[ConversationMessage]]] = None,
|
|
455
|
+
):
|
|
456
|
+
self._attachment_provider = attachment_provider
|
|
457
|
+
# console and render functions are ignored - kept for API compatibility
|
|
458
|
+
|
|
459
|
+
async def compact(
|
|
460
|
+
self,
|
|
461
|
+
messages: List[ConversationMessage],
|
|
462
|
+
custom_instructions: str,
|
|
463
|
+
protocol: str = "anthropic",
|
|
464
|
+
tail_count: int = RECENT_MESSAGES_AFTER_COMPACT,
|
|
465
|
+
) -> Optional["CompactionResult"]: # type: ignore[valid-type]
|
|
466
|
+
"""Compact the conversation. Returns None on error."""
|
|
467
|
+
result = await compact_conversation(
|
|
468
|
+
messages=messages,
|
|
469
|
+
custom_instructions=custom_instructions,
|
|
470
|
+
protocol=protocol,
|
|
471
|
+
tail_count=tail_count,
|
|
472
|
+
attachment_provider=self._attachment_provider,
|
|
473
|
+
)
|
|
474
|
+
if isinstance(result, CompactionError):
|
|
475
|
+
return None
|
|
476
|
+
return result
|