@nfreeness/clawdbot 0.124.21802 → 0.124.22101
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import { estimateTokens, generateSummary } from "@mariozechner/pi-coding-agent";
|
|
1
|
+
import { convertToLlm, estimateTokens, generateSummary, serializeConversation } from "@mariozechner/pi-coding-agent";
|
|
2
|
+
import { completeSimple } from "@mariozechner/pi-ai";
|
|
2
3
|
import { DEFAULT_CONTEXT_TOKENS } from "./defaults.js";
|
|
3
4
|
export const BASE_CHUNK_RATIO = 0.4;
|
|
4
5
|
export const MIN_CHUNK_RATIO = 0.15;
|
|
@@ -7,6 +8,122 @@ const DEFAULT_SUMMARY_FALLBACK = "No prior history.";
|
|
|
7
8
|
const DEFAULT_PARTS = 2;
|
|
8
9
|
const MERGE_SUMMARIES_INSTRUCTIONS = "Merge these partial summaries into a single cohesive summary. Preserve decisions," +
|
|
9
10
|
" TODOs, open questions, and any constraints.";
|
|
11
|
+
// ============================================================================
|
|
12
|
+
// Local summarization prompts (mirrored from upstream for i18n override support)
|
|
13
|
+
// ============================================================================
|
|
14
|
+
const SUMMARIZATION_SYSTEM_PROMPT = `You are a context summarization assistant. Your task is to read a conversation between a user and an AI coding assistant, then produce a structured summary following the exact format specified.
|
|
15
|
+
|
|
16
|
+
Do NOT continue the conversation. Do NOT respond to any questions in the conversation. ONLY output the structured summary.`;
|
|
17
|
+
const SUMMARIZATION_PROMPT = `The messages above are a conversation to summarize. Create a structured context checkpoint summary that another LLM will use to continue the work.
|
|
18
|
+
|
|
19
|
+
Use this EXACT format:
|
|
20
|
+
|
|
21
|
+
## Goal
|
|
22
|
+
[What is the user trying to accomplish? Can be multiple items if the session covers different tasks.]
|
|
23
|
+
|
|
24
|
+
## Constraints & Preferences
|
|
25
|
+
- [Any constraints, preferences, or requirements mentioned by user]
|
|
26
|
+
- [Or "(none)" if none were mentioned]
|
|
27
|
+
|
|
28
|
+
## Progress
|
|
29
|
+
### Done
|
|
30
|
+
- [x] [Completed tasks/changes]
|
|
31
|
+
|
|
32
|
+
### In Progress
|
|
33
|
+
- [ ] [Current work]
|
|
34
|
+
|
|
35
|
+
### Blocked
|
|
36
|
+
- [Issues preventing progress, if any]
|
|
37
|
+
|
|
38
|
+
## Key Decisions
|
|
39
|
+
- **[Decision]**: [Brief rationale]
|
|
40
|
+
|
|
41
|
+
## Next Steps
|
|
42
|
+
1. [Ordered list of what should happen next]
|
|
43
|
+
|
|
44
|
+
## Critical Context
|
|
45
|
+
- [Any data, examples, or references needed to continue]
|
|
46
|
+
- [Or "(none)" if not applicable]
|
|
47
|
+
|
|
48
|
+
Keep each section concise. Preserve exact file paths, function names, and error messages.`;
|
|
49
|
+
const UPDATE_SUMMARIZATION_PROMPT = `The messages above are NEW conversation messages to incorporate into the existing summary provided in <previous-summary> tags.
|
|
50
|
+
|
|
51
|
+
Update the existing structured summary with new information. RULES:
|
|
52
|
+
- PRESERVE all existing information from the previous summary
|
|
53
|
+
- ADD new progress, decisions, and context from the new messages
|
|
54
|
+
- UPDATE the Progress section: move items from "In Progress" to "Done" when completed
|
|
55
|
+
- UPDATE "Next Steps" based on what was accomplished
|
|
56
|
+
- PRESERVE exact file paths, function names, and error messages
|
|
57
|
+
- If something is no longer relevant, you may remove it
|
|
58
|
+
|
|
59
|
+
Use this EXACT format:
|
|
60
|
+
|
|
61
|
+
## Goal
|
|
62
|
+
[Preserve existing goals, add new ones if the task expanded]
|
|
63
|
+
|
|
64
|
+
## Constraints & Preferences
|
|
65
|
+
- [Preserve existing, add new ones discovered]
|
|
66
|
+
|
|
67
|
+
## Progress
|
|
68
|
+
### Done
|
|
69
|
+
- [x] [Include previously done items AND newly completed items]
|
|
70
|
+
|
|
71
|
+
### In Progress
|
|
72
|
+
- [ ] [Current work - update based on progress]
|
|
73
|
+
|
|
74
|
+
### Blocked
|
|
75
|
+
- [Current blockers - remove if resolved]
|
|
76
|
+
|
|
77
|
+
## Key Decisions
|
|
78
|
+
- **[Decision]**: [Brief rationale] (preserve all previous, add new)
|
|
79
|
+
|
|
80
|
+
## Next Steps
|
|
81
|
+
1. [Update based on current state]
|
|
82
|
+
|
|
83
|
+
## Critical Context
|
|
84
|
+
- [Preserve important context, add new if needed]
|
|
85
|
+
|
|
86
|
+
Keep each section concise. Preserve exact file paths, function names, and error messages.`;
|
|
87
|
+
/**
|
|
88
|
+
* Local override of generateSummary that injects customInstructions into the
|
|
89
|
+
* system prompt (rather than appending as a weak "Additional focus:" hint),
|
|
90
|
+
* so that language/style directives from customInstructions take effect even
|
|
91
|
+
* when the model is presented with long English conversation text.
|
|
92
|
+
*
|
|
93
|
+
* Falls back to the upstream generateSummary when customInstructions is absent.
|
|
94
|
+
*/
|
|
95
|
+
async function generateSummaryWithI18n(currentMessages, model, reserveTokens, apiKey, signal, customInstructions, previousSummary) {
|
|
96
|
+
if (!customInstructions) {
|
|
97
|
+
return generateSummary(currentMessages, model, reserveTokens, apiKey, signal, customInstructions, previousSummary);
|
|
98
|
+
}
|
|
99
|
+
const maxTokens = Math.floor(0.8 * reserveTokens);
|
|
100
|
+
// Inject customInstructions into system prompt so it becomes a hard constraint
|
|
101
|
+
const systemPrompt = `${SUMMARIZATION_SYSTEM_PROMPT}\n\n${customInstructions}`;
|
|
102
|
+
const basePrompt = previousSummary ? UPDATE_SUMMARIZATION_PROMPT : SUMMARIZATION_PROMPT;
|
|
103
|
+
// Serialize conversation to text so model doesn't try to continue it
|
|
104
|
+
const llmMessages = convertToLlm(currentMessages);
|
|
105
|
+
const conversationText = serializeConversation(llmMessages);
|
|
106
|
+
let promptText = `<conversation>\n${conversationText}\n</conversation>\n\n`;
|
|
107
|
+
if (previousSummary) {
|
|
108
|
+
promptText += `<previous-summary>\n${previousSummary}\n</previous-summary>\n\n`;
|
|
109
|
+
}
|
|
110
|
+
promptText += basePrompt;
|
|
111
|
+
const summarizationMessages = [
|
|
112
|
+
{
|
|
113
|
+
role: "user",
|
|
114
|
+
content: [{ type: "text", text: promptText }],
|
|
115
|
+
timestamp: Date.now(),
|
|
116
|
+
},
|
|
117
|
+
];
|
|
118
|
+
const response = await completeSimple(model, { systemPrompt, messages: summarizationMessages }, { maxTokens, signal, apiKey, reasoning: "high" });
|
|
119
|
+
if (response.stopReason === "error") {
|
|
120
|
+
throw new Error(`Summarization failed: ${response.errorMessage ?? "Unknown error"}`);
|
|
121
|
+
}
|
|
122
|
+
return response.content
|
|
123
|
+
.filter((c) => c.type === "text")
|
|
124
|
+
.map((c) => c.text)
|
|
125
|
+
.join("\n");
|
|
126
|
+
}
|
|
10
127
|
export function estimateMessagesTokens(messages) {
|
|
11
128
|
return messages.reduce((sum, message) => sum + estimateTokens(message), 0);
|
|
12
129
|
}
|
|
@@ -104,7 +221,7 @@ async function summarizeChunks(params) {
|
|
|
104
221
|
const chunks = chunkMessagesByMaxTokens(params.messages, params.maxChunkTokens);
|
|
105
222
|
let summary = params.previousSummary;
|
|
106
223
|
for (const chunk of chunks) {
|
|
107
|
-
summary = await
|
|
224
|
+
summary = await generateSummaryWithI18n(chunk, params.model, params.reserveTokens, params.apiKey, params.signal, params.customInstructions, summary);
|
|
108
225
|
}
|
|
109
226
|
return summary ?? DEFAULT_SUMMARY_FALLBACK;
|
|
110
227
|
}
|
|
@@ -183,7 +183,9 @@ export default function compactionSafeguardExtension(api) {
|
|
|
183
183
|
reserveTokens,
|
|
184
184
|
maxChunkTokens,
|
|
185
185
|
contextWindow: contextWindowTokens,
|
|
186
|
-
customInstructions:
|
|
186
|
+
customInstructions: customInstructions
|
|
187
|
+
? `${TURN_PREFIX_INSTRUCTIONS}\n\n${customInstructions}`
|
|
188
|
+
: TURN_PREFIX_INSTRUCTIONS,
|
|
187
189
|
previousSummary: undefined,
|
|
188
190
|
});
|
|
189
191
|
summary = `${historySummary}\n\n---\n\n**Turn Context (split turn):**\n\n${prefixSummary}`;
|
package/dist/build-info.json
CHANGED