@ricky-stevens/context-guardian 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +29 -0
- package/.claude-plugin/plugin.json +63 -0
- package/.github/workflows/ci.yml +66 -0
- package/CLAUDE.md +132 -0
- package/LICENSE +21 -0
- package/README.md +362 -0
- package/biome.json +34 -0
- package/bun.lock +31 -0
- package/hooks/precompact.mjs +73 -0
- package/hooks/session-start.mjs +133 -0
- package/hooks/stop.mjs +172 -0
- package/hooks/submit.mjs +133 -0
- package/lib/checkpoint.mjs +258 -0
- package/lib/compact-cli.mjs +124 -0
- package/lib/compact-output.mjs +350 -0
- package/lib/config.mjs +40 -0
- package/lib/content.mjs +33 -0
- package/lib/diagnostics.mjs +221 -0
- package/lib/estimate.mjs +254 -0
- package/lib/extract-helpers.mjs +869 -0
- package/lib/handoff.mjs +329 -0
- package/lib/logger.mjs +34 -0
- package/lib/mcp-tools.mjs +200 -0
- package/lib/paths.mjs +90 -0
- package/lib/stats.mjs +81 -0
- package/lib/statusline.mjs +123 -0
- package/lib/synthetic-session.mjs +273 -0
- package/lib/tokens.mjs +170 -0
- package/lib/tool-summary.mjs +399 -0
- package/lib/transcript.mjs +939 -0
- package/lib/trim.mjs +158 -0
- package/package.json +22 -0
- package/skills/compact/SKILL.md +20 -0
- package/skills/config/SKILL.md +70 -0
- package/skills/handoff/SKILL.md +26 -0
- package/skills/prune/SKILL.md +20 -0
- package/skills/stats/SKILL.md +100 -0
- package/sonar-project.properties +12 -0
- package/test/checkpoint.test.mjs +171 -0
- package/test/compact-cli.test.mjs +230 -0
- package/test/compact-output.test.mjs +284 -0
- package/test/compaction-e2e.test.mjs +809 -0
- package/test/content.test.mjs +86 -0
- package/test/diagnostics.test.mjs +188 -0
- package/test/edge-cases.test.mjs +543 -0
- package/test/estimate.test.mjs +262 -0
- package/test/extract-helpers-coverage.test.mjs +333 -0
- package/test/extract-helpers.test.mjs +234 -0
- package/test/handoff.test.mjs +738 -0
- package/test/integration.test.mjs +582 -0
- package/test/logger.test.mjs +70 -0
- package/test/manual-compaction-test.md +426 -0
- package/test/mcp-tools.test.mjs +443 -0
- package/test/paths.test.mjs +250 -0
- package/test/quick-compaction-test.md +191 -0
- package/test/stats.test.mjs +88 -0
- package/test/statusline.test.mjs +222 -0
- package/test/submit.test.mjs +232 -0
- package/test/synthetic-session.test.mjs +600 -0
- package/test/tokens.test.mjs +293 -0
- package/test/tool-summary.test.mjs +771 -0
- package/test/transcript-coverage.test.mjs +369 -0
- package/test/transcript.test.mjs +596 -0
- package/test/trim.test.mjs +356 -0
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Checkpoint creation and validation utilities.
|
|
3
|
+
*
|
|
4
|
+
* Provides the shared compaction pipeline: extract → cap → validate → save
|
|
5
|
+
* checkpoint file → compute stats. Used by the compact and prune skills
|
|
6
|
+
* (/cg:compact, /cg:prune) via compact-cli.mjs.
|
|
7
|
+
*
|
|
8
|
+
* @module checkpoint
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import fs from "node:fs";
|
|
12
|
+
import path from "node:path";
|
|
13
|
+
import { loadConfig, resolveMaxTokens } from "./config.mjs";
|
|
14
|
+
import { log } from "./logger.mjs";
|
|
15
|
+
import {
|
|
16
|
+
CHECKPOINTS_DIR,
|
|
17
|
+
ensureDataDir,
|
|
18
|
+
rotateCheckpoints,
|
|
19
|
+
stateFile,
|
|
20
|
+
} from "./paths.mjs";
|
|
21
|
+
import { formatCompactionStats } from "./stats.mjs";
|
|
22
|
+
import { estimateOverhead, estimateTokens, getTokenUsage } from "./tokens.mjs";
|
|
23
|
+
import { extractConversation, extractRecent } from "./transcript.mjs";
|
|
24
|
+
|
|
25
|
+
// ---------------------------------------------------------------------------
|
|
26
|
+
// Content validation
|
|
27
|
+
// ---------------------------------------------------------------------------
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Check if extracted content has actual conversation data, not just
|
|
31
|
+
* headers, empty strings, or placeholder text.
|
|
32
|
+
*
|
|
33
|
+
* @param {string} text - Extracted checkpoint content
|
|
34
|
+
* @returns {boolean} True if the content contains real conversation
|
|
35
|
+
*/
|
|
36
|
+
export function hasExtractedContent(text) {
|
|
37
|
+
return (
|
|
38
|
+
text &&
|
|
39
|
+
text !== "(no transcript available)" &&
|
|
40
|
+
text.length > 50 &&
|
|
41
|
+
(text.includes("User:") || text.includes("Assistant:"))
|
|
42
|
+
);
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// ---------------------------------------------------------------------------
|
|
46
|
+
// Checkpoint size cap
|
|
47
|
+
// ---------------------------------------------------------------------------
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Cap checkpoint content to prevent oversized additionalContext injections.
|
|
51
|
+
* Uses start+end trim: keeps the first half and last half, trims the middle.
|
|
52
|
+
*
|
|
53
|
+
* @param {string} content - The checkpoint text
|
|
54
|
+
* @param {number} maxTokens - The model's max token limit
|
|
55
|
+
* @returns {string} The original or trimmed content
|
|
56
|
+
*/
|
|
57
|
+
export function capCheckpointContent(content, maxTokens) {
|
|
58
|
+
// ~3.5 chars per token for English. Using 3x as conservative multiplier.
|
|
59
|
+
const maxChars = Math.max(50000, (maxTokens || 200000) * 3);
|
|
60
|
+
if (content.length <= maxChars) return content;
|
|
61
|
+
const half = Math.floor(maxChars / 2);
|
|
62
|
+
const trimmed = content.length - maxChars;
|
|
63
|
+
log(
|
|
64
|
+
`checkpoint-trimmed original=${content.length} kept=${maxChars} trimmed=${trimmed}`,
|
|
65
|
+
);
|
|
66
|
+
return (
|
|
67
|
+
content.slice(0, half) +
|
|
68
|
+
`\n\n> [${trimmed} chars trimmed from middle to fit context window]\n\n` +
|
|
69
|
+
content.slice(-half)
|
|
70
|
+
);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// ---------------------------------------------------------------------------
|
|
74
|
+
// State file writer
|
|
75
|
+
// ---------------------------------------------------------------------------
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Write a state file with post-compaction token estimates so
|
|
79
|
+
* /cg:stats works immediately after compaction.
|
|
80
|
+
*
|
|
81
|
+
* @param {string} sessionId - Current session ID
|
|
82
|
+
* @param {string} transcriptPath - Path to the transcript
|
|
83
|
+
* @param {number} tokens - Estimated token count
|
|
84
|
+
* @param {number} max - Max tokens for the model
|
|
85
|
+
* @param {string} rec - Recommendation text
|
|
86
|
+
*/
|
|
87
|
+
export function writeCompactionState(
|
|
88
|
+
sessionId,
|
|
89
|
+
transcriptPath,
|
|
90
|
+
tokens,
|
|
91
|
+
max,
|
|
92
|
+
rec,
|
|
93
|
+
{ payloadBytes = 0 } = {},
|
|
94
|
+
) {
|
|
95
|
+
try {
|
|
96
|
+
const c = loadConfig();
|
|
97
|
+
const th = c.threshold ?? 0.35;
|
|
98
|
+
const p = tokens / max;
|
|
99
|
+
|
|
100
|
+
// Carry forward baseline_overhead from existing state
|
|
101
|
+
let baselineOverhead = 0;
|
|
102
|
+
try {
|
|
103
|
+
const sf = stateFile(sessionId);
|
|
104
|
+
if (fs.existsSync(sf)) {
|
|
105
|
+
const prev = JSON.parse(fs.readFileSync(sf, "utf8"));
|
|
106
|
+
baselineOverhead = prev.baseline_overhead ?? 0;
|
|
107
|
+
}
|
|
108
|
+
} catch {}
|
|
109
|
+
|
|
110
|
+
ensureDataDir();
|
|
111
|
+
fs.writeFileSync(
|
|
112
|
+
stateFile(sessionId),
|
|
113
|
+
JSON.stringify({
|
|
114
|
+
current_tokens: tokens,
|
|
115
|
+
max_tokens: max,
|
|
116
|
+
pct: p,
|
|
117
|
+
pct_display: (p * 100).toFixed(1),
|
|
118
|
+
threshold: th,
|
|
119
|
+
threshold_display: Math.round(th * 100),
|
|
120
|
+
remaining_to_alert: Math.max(
|
|
121
|
+
0,
|
|
122
|
+
Math.round(
|
|
123
|
+
Math.round(th * 100) - Number.parseFloat((p * 100).toFixed(1)),
|
|
124
|
+
),
|
|
125
|
+
),
|
|
126
|
+
headroom: Math.max(0, Math.round(max * th - tokens)),
|
|
127
|
+
recommendation: rec,
|
|
128
|
+
source: "estimated",
|
|
129
|
+
model: "unknown",
|
|
130
|
+
smart_estimate_pct: 0,
|
|
131
|
+
recent_estimate_pct: 0,
|
|
132
|
+
baseline_overhead: baselineOverhead,
|
|
133
|
+
payload_bytes: payloadBytes,
|
|
134
|
+
session_id: sessionId,
|
|
135
|
+
transcript_path: transcriptPath,
|
|
136
|
+
ts: Date.now(),
|
|
137
|
+
}),
|
|
138
|
+
);
|
|
139
|
+
} catch (e) {
|
|
140
|
+
log(`writeCompactionState-error: ${e.message}`);
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// ---------------------------------------------------------------------------
|
|
145
|
+
// Shared compaction pipeline
|
|
146
|
+
// ---------------------------------------------------------------------------
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Perform the full compaction pipeline: extract → cap → validate → save
|
|
150
|
+
* checkpoint → compute stats.
|
|
151
|
+
*
|
|
152
|
+
* Returns the stats block for display, or null if extraction produced
|
|
153
|
+
* no meaningful content (caller should handle the empty case).
|
|
154
|
+
*
|
|
155
|
+
* @param {object} opts
|
|
156
|
+
* @param {string} opts.mode - "smart" or "recent"
|
|
157
|
+
* @param {string} opts.transcriptPath - Path to the JSONL transcript
|
|
158
|
+
* @param {string} opts.sessionId - Current session ID
|
|
159
|
+
* @param {object} [opts.preStats] - Pre-compaction token counts { currentTokens, maxTokens }
|
|
160
|
+
* @returns {{ statsBlock: string, stats: object, checkpointPath: string } | null}
|
|
161
|
+
*/
|
|
162
|
+
export function performCompaction(opts) {
|
|
163
|
+
const { mode, transcriptPath, sessionId, preStats } = opts;
|
|
164
|
+
const label = mode === "smart" ? "Smart Compact" : "Keep Recent 10";
|
|
165
|
+
|
|
166
|
+
ensureDataDir();
|
|
167
|
+
fs.mkdirSync(CHECKPOINTS_DIR, { recursive: true });
|
|
168
|
+
|
|
169
|
+
// Generate checkpoint filename
|
|
170
|
+
const stamp = new Date().toISOString().replaceAll(/[:.]/g, "-").slice(0, 19);
|
|
171
|
+
const checkpointPath = path.join(
|
|
172
|
+
CHECKPOINTS_DIR,
|
|
173
|
+
`session-${stamp}-${sessionId.slice(0, 8)}.md`,
|
|
174
|
+
);
|
|
175
|
+
|
|
176
|
+
// Extract and cap content
|
|
177
|
+
const usage = getTokenUsage(transcriptPath);
|
|
178
|
+
const capMax = usage?.max_tokens || resolveMaxTokens() || 200000;
|
|
179
|
+
let content =
|
|
180
|
+
mode === "smart"
|
|
181
|
+
? extractConversation(transcriptPath)
|
|
182
|
+
: extractRecent(transcriptPath, 10);
|
|
183
|
+
content = capCheckpointContent(content, capMax);
|
|
184
|
+
|
|
185
|
+
if (!hasExtractedContent(content)) return null;
|
|
186
|
+
|
|
187
|
+
// Save checkpoint file
|
|
188
|
+
const fullCheckpoint = `# Context Checkpoint (${label})\n> Created: ${new Date().toISOString()}\n\n${content}`;
|
|
189
|
+
fs.writeFileSync(checkpointPath, fullCheckpoint);
|
|
190
|
+
|
|
191
|
+
// Also copy to .context-guardian/ for user visibility
|
|
192
|
+
try {
|
|
193
|
+
const cgDir = path.join(process.cwd(), ".context-guardian");
|
|
194
|
+
fs.mkdirSync(cgDir, { recursive: true });
|
|
195
|
+
const cgCheckpointPath = path.join(
|
|
196
|
+
cgDir,
|
|
197
|
+
`cg-checkpoint-${stamp}-${sessionId.slice(0, 8)}.md`,
|
|
198
|
+
);
|
|
199
|
+
fs.writeFileSync(cgCheckpointPath, fullCheckpoint);
|
|
200
|
+
// Rotate — keep last 5 checkpoint copies
|
|
201
|
+
const cpFiles = fs
|
|
202
|
+
.readdirSync(cgDir)
|
|
203
|
+
.filter((f) => f.startsWith("cg-checkpoint-") && f.endsWith(".md"))
|
|
204
|
+
.sort()
|
|
205
|
+
.reverse();
|
|
206
|
+
for (const f of cpFiles.slice(5)) {
|
|
207
|
+
try {
|
|
208
|
+
fs.unlinkSync(path.join(cgDir, f));
|
|
209
|
+
} catch {}
|
|
210
|
+
}
|
|
211
|
+
} catch (e) {
|
|
212
|
+
log(`checkpoint-copy-error: ${e.message}`);
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Compute stats
|
|
216
|
+
const preTokens =
|
|
217
|
+
preStats?.currentTokens ||
|
|
218
|
+
usage?.current_tokens ||
|
|
219
|
+
estimateTokens(transcriptPath);
|
|
220
|
+
const preMax = preStats?.maxTokens || usage?.max_tokens || resolveMaxTokens();
|
|
221
|
+
|
|
222
|
+
// Read baseline overhead from state file if available
|
|
223
|
+
let baselineOverhead = 0;
|
|
224
|
+
try {
|
|
225
|
+
const sf = stateFile(sessionId);
|
|
226
|
+
if (fs.existsSync(sf)) {
|
|
227
|
+
const prev = JSON.parse(fs.readFileSync(sf, "utf8"));
|
|
228
|
+
baselineOverhead = prev.baseline_overhead ?? 0;
|
|
229
|
+
}
|
|
230
|
+
} catch {}
|
|
231
|
+
|
|
232
|
+
const overhead = estimateOverhead(
|
|
233
|
+
preTokens,
|
|
234
|
+
transcriptPath,
|
|
235
|
+
baselineOverhead,
|
|
236
|
+
);
|
|
237
|
+
|
|
238
|
+
// Measure transcript file size for payload reporting
|
|
239
|
+
let prePayloadBytes = 0;
|
|
240
|
+
try {
|
|
241
|
+
prePayloadBytes = fs.statSync(transcriptPath).size;
|
|
242
|
+
} catch {}
|
|
243
|
+
|
|
244
|
+
const { stats, block: statsBlock } = formatCompactionStats(
|
|
245
|
+
preTokens,
|
|
246
|
+
preMax,
|
|
247
|
+
fullCheckpoint,
|
|
248
|
+
{ overhead, prePayloadBytes },
|
|
249
|
+
);
|
|
250
|
+
|
|
251
|
+
rotateCheckpoints();
|
|
252
|
+
|
|
253
|
+
log(
|
|
254
|
+
`checkpoint-saved mode=${mode} file=${checkpointPath} pre=${preTokens} post=${stats.postTokens} saved=${stats.saved}`,
|
|
255
|
+
);
|
|
256
|
+
|
|
257
|
+
return { statsBlock, stats, checkpointPath };
|
|
258
|
+
}
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* CLI entry point for manual compaction via skills.
|
|
4
|
+
* Skills don't fire UserPromptSubmit, so this provides a direct path.
|
|
5
|
+
*
|
|
6
|
+
* Usage: node compact-cli.mjs <smart|recent|handoff> <session_id> <data_dir> [label]
|
|
7
|
+
* Output: single JSON line { success, statsBlock?, error? }
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
// Set CLAUDE_PLUGIN_DATA before any module reads it (paths.mjs uses it at import time)
|
|
11
|
+
const [mode, sessionId, dataDir, ...labelParts] = process.argv.slice(2);
|
|
12
|
+
const label = labelParts.join(" ").trim() || "";
|
|
13
|
+
if (dataDir) process.env.CLAUDE_PLUGIN_DATA = dataDir;
|
|
14
|
+
|
|
15
|
+
const crypto = await import("node:crypto");
|
|
16
|
+
const fs = await import("node:fs");
|
|
17
|
+
const { performCompaction } = await import("./checkpoint.mjs");
|
|
18
|
+
const { log } = await import("./logger.mjs");
|
|
19
|
+
const { stateFile } = await import("./paths.mjs");
|
|
20
|
+
|
|
21
|
+
function out(obj) {
|
|
22
|
+
process.stdout.write(JSON.stringify(obj));
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if (mode !== "smart" && mode !== "recent" && mode !== "handoff") {
|
|
26
|
+
out({
|
|
27
|
+
success: false,
|
|
28
|
+
error: "Invalid mode. Use smart, recent, or handoff.",
|
|
29
|
+
});
|
|
30
|
+
process.exit(0);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
let transcriptPath;
|
|
34
|
+
try {
|
|
35
|
+
transcriptPath = JSON.parse(
|
|
36
|
+
fs.readFileSync(stateFile(sessionId), "utf8"),
|
|
37
|
+
).transcript_path;
|
|
38
|
+
} catch {
|
|
39
|
+
out({ success: false, error: "No session data yet. Send a message first." });
|
|
40
|
+
process.exit(0);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if (!transcriptPath || !fs.existsSync(transcriptPath)) {
|
|
44
|
+
out({ success: false, error: "Transcript not found." });
|
|
45
|
+
process.exit(0);
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
log(`compact-cli mode=${mode} session=${sessionId}`);
|
|
49
|
+
|
|
50
|
+
// ---------------------------------------------------------------------------
|
|
51
|
+
// Handoff mode — extract conversation and write to project dir for cross-
|
|
52
|
+
// session continuity. Does not need /clear — handoff files persist across sessions.
|
|
53
|
+
// ---------------------------------------------------------------------------
|
|
54
|
+
if (mode === "handoff") {
|
|
55
|
+
const { performHandoff } = await import("./handoff.mjs");
|
|
56
|
+
const result = performHandoff({ transcriptPath, sessionId, label });
|
|
57
|
+
if (!result) {
|
|
58
|
+
out({
|
|
59
|
+
success: false,
|
|
60
|
+
error: "No extractable content. Try sending a few messages first.",
|
|
61
|
+
});
|
|
62
|
+
process.exit(0);
|
|
63
|
+
}
|
|
64
|
+
// Write synthetic JSONL for /resume cg:{label}
|
|
65
|
+
let handoffLabel;
|
|
66
|
+
try {
|
|
67
|
+
const { writeSyntheticSession } = await import("./synthetic-session.mjs");
|
|
68
|
+
handoffLabel =
|
|
69
|
+
label || new Date().toISOString().replaceAll(/[:.]/g, "-").slice(0, 19);
|
|
70
|
+
const { sessionUuid } = writeSyntheticSession({
|
|
71
|
+
checkpointContent: fs.readFileSync(result.handoffPath, "utf8"),
|
|
72
|
+
title: `cg:${handoffLabel}`,
|
|
73
|
+
type: "handoff",
|
|
74
|
+
projectCwd: process.cwd(),
|
|
75
|
+
});
|
|
76
|
+
log(`synthetic-session handoff uuid=${sessionUuid} label=${handoffLabel}`);
|
|
77
|
+
} catch (e) {
|
|
78
|
+
log(`synthetic-session-error: ${e.message}`);
|
|
79
|
+
}
|
|
80
|
+
const resumeTitle = `cg:${handoffLabel || "handoff"}`;
|
|
81
|
+
out({
|
|
82
|
+
success: true,
|
|
83
|
+
statsBlock: result.statsBlock,
|
|
84
|
+
resumeInstruction: `**To restore in a future session, type \`/resume ${resumeTitle}\`, or \`/resume\` to browse all sessions.**`,
|
|
85
|
+
});
|
|
86
|
+
process.exit(0);
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
const result = performCompaction({
|
|
90
|
+
mode,
|
|
91
|
+
transcriptPath,
|
|
92
|
+
sessionId,
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
if (!result) {
|
|
96
|
+
const alt = mode === "smart" ? "/cg:prune" : "/cg:compact";
|
|
97
|
+
out({ success: false, error: `No extractable content. Try ${alt} instead.` });
|
|
98
|
+
process.exit(0);
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
// Write synthetic JSONL for /resume cg:{hash}
|
|
102
|
+
let resumeTitle = "cg";
|
|
103
|
+
try {
|
|
104
|
+
const { writeSyntheticSession } = await import("./synthetic-session.mjs");
|
|
105
|
+
const shortHash = crypto.randomUUID().replaceAll("-", "").slice(0, 4);
|
|
106
|
+
resumeTitle = `cg:${shortHash}`;
|
|
107
|
+
const { sessionUuid } = writeSyntheticSession({
|
|
108
|
+
checkpointContent: fs.readFileSync(result.checkpointPath, "utf8"),
|
|
109
|
+
title: resumeTitle,
|
|
110
|
+
type: "compact",
|
|
111
|
+
projectCwd: process.cwd(),
|
|
112
|
+
});
|
|
113
|
+
log(`synthetic-session compact uuid=${sessionUuid}`);
|
|
114
|
+
} catch (e) {
|
|
115
|
+
log(`synthetic-session-error: ${e.message}`);
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// The resume instruction is a separate pre-formatted field so the SKILL.md
|
|
119
|
+
// can display it in bold after the box — no template interpolation by Claude.
|
|
120
|
+
out({
|
|
121
|
+
success: true,
|
|
122
|
+
statsBlock: result.statsBlock,
|
|
123
|
+
resumeInstruction: `**Type \`/resume ${resumeTitle}\` to restore the compacted session.**`,
|
|
124
|
+
});
|