@ricky-stevens/context-guardian 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +29 -0
- package/.claude-plugin/plugin.json +63 -0
- package/.github/workflows/ci.yml +66 -0
- package/CLAUDE.md +132 -0
- package/LICENSE +21 -0
- package/README.md +362 -0
- package/biome.json +34 -0
- package/bun.lock +31 -0
- package/hooks/precompact.mjs +73 -0
- package/hooks/session-start.mjs +133 -0
- package/hooks/stop.mjs +172 -0
- package/hooks/submit.mjs +133 -0
- package/lib/checkpoint.mjs +258 -0
- package/lib/compact-cli.mjs +124 -0
- package/lib/compact-output.mjs +350 -0
- package/lib/config.mjs +40 -0
- package/lib/content.mjs +33 -0
- package/lib/diagnostics.mjs +221 -0
- package/lib/estimate.mjs +254 -0
- package/lib/extract-helpers.mjs +869 -0
- package/lib/handoff.mjs +329 -0
- package/lib/logger.mjs +34 -0
- package/lib/mcp-tools.mjs +200 -0
- package/lib/paths.mjs +90 -0
- package/lib/stats.mjs +81 -0
- package/lib/statusline.mjs +123 -0
- package/lib/synthetic-session.mjs +273 -0
- package/lib/tokens.mjs +170 -0
- package/lib/tool-summary.mjs +399 -0
- package/lib/transcript.mjs +939 -0
- package/lib/trim.mjs +158 -0
- package/package.json +22 -0
- package/skills/compact/SKILL.md +20 -0
- package/skills/config/SKILL.md +70 -0
- package/skills/handoff/SKILL.md +26 -0
- package/skills/prune/SKILL.md +20 -0
- package/skills/stats/SKILL.md +100 -0
- package/sonar-project.properties +12 -0
- package/test/checkpoint.test.mjs +171 -0
- package/test/compact-cli.test.mjs +230 -0
- package/test/compact-output.test.mjs +284 -0
- package/test/compaction-e2e.test.mjs +809 -0
- package/test/content.test.mjs +86 -0
- package/test/diagnostics.test.mjs +188 -0
- package/test/edge-cases.test.mjs +543 -0
- package/test/estimate.test.mjs +262 -0
- package/test/extract-helpers-coverage.test.mjs +333 -0
- package/test/extract-helpers.test.mjs +234 -0
- package/test/handoff.test.mjs +738 -0
- package/test/integration.test.mjs +582 -0
- package/test/logger.test.mjs +70 -0
- package/test/manual-compaction-test.md +426 -0
- package/test/mcp-tools.test.mjs +443 -0
- package/test/paths.test.mjs +250 -0
- package/test/quick-compaction-test.md +191 -0
- package/test/stats.test.mjs +88 -0
- package/test/statusline.test.mjs +222 -0
- package/test/submit.test.mjs +232 -0
- package/test/synthetic-session.test.mjs +600 -0
- package/test/tokens.test.mjs +293 -0
- package/test/tool-summary.test.mjs +771 -0
- package/test/transcript-coverage.test.mjs +369 -0
- package/test/transcript.test.mjs +596 -0
- package/test/trim.test.mjs +356 -0
|
@@ -0,0 +1,293 @@
|
|
|
1
|
+
import assert from "node:assert/strict";
|
|
2
|
+
import fs from "node:fs";
|
|
3
|
+
import os from "node:os";
|
|
4
|
+
import path from "node:path";
|
|
5
|
+
import { afterEach, beforeEach, describe, it } from "node:test";
|
|
6
|
+
import {
|
|
7
|
+
estimateOverhead,
|
|
8
|
+
estimateTokens,
|
|
9
|
+
getTokenUsage,
|
|
10
|
+
} from "../lib/tokens.mjs";
|
|
11
|
+
|
|
12
|
+
let tmpDir;
|
|
13
|
+
let transcriptPath;
|
|
14
|
+
|
|
15
|
+
function writeLine(obj) {
|
|
16
|
+
fs.appendFileSync(transcriptPath, `${JSON.stringify(obj)}\n`);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
function makeUserMessage(text) {
|
|
20
|
+
return {
|
|
21
|
+
type: "user",
|
|
22
|
+
message: { role: "user", content: text },
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function makeAssistantMessage(text, usage, model) {
|
|
27
|
+
return {
|
|
28
|
+
type: "assistant",
|
|
29
|
+
message: {
|
|
30
|
+
role: "assistant",
|
|
31
|
+
model: model || "claude-sonnet-4-20250514",
|
|
32
|
+
content: [{ type: "text", text }],
|
|
33
|
+
usage: usage || undefined,
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
beforeEach(() => {
|
|
39
|
+
tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "cg-test-"));
|
|
40
|
+
transcriptPath = path.join(tmpDir, "transcript.jsonl");
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
afterEach(() => {
|
|
44
|
+
fs.rmSync(tmpDir, { recursive: true, force: true });
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
describe("getTokenUsage", () => {
|
|
48
|
+
it("returns null for missing transcript", () => {
|
|
49
|
+
assert.equal(getTokenUsage("/nonexistent/path"), null);
|
|
50
|
+
assert.equal(getTokenUsage(null), null);
|
|
51
|
+
assert.equal(getTokenUsage(undefined), null);
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
it("returns null for transcript with no usage data", () => {
|
|
55
|
+
writeLine(makeUserMessage("hello"));
|
|
56
|
+
writeLine({
|
|
57
|
+
type: "assistant",
|
|
58
|
+
message: { role: "assistant", content: [{ type: "text", text: "hi" }] },
|
|
59
|
+
});
|
|
60
|
+
assert.equal(getTokenUsage(transcriptPath), null);
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
it("extracts token counts from usage data", () => {
|
|
64
|
+
writeLine(makeUserMessage("hello"));
|
|
65
|
+
writeLine(
|
|
66
|
+
makeAssistantMessage("hi", {
|
|
67
|
+
input_tokens: 100,
|
|
68
|
+
cache_creation_input_tokens: 50,
|
|
69
|
+
cache_read_input_tokens: 30,
|
|
70
|
+
output_tokens: 20,
|
|
71
|
+
}),
|
|
72
|
+
);
|
|
73
|
+
|
|
74
|
+
const result = getTokenUsage(transcriptPath);
|
|
75
|
+
assert.equal(result.current_tokens, 180); // 100 + 50 + 30
|
|
76
|
+
assert.equal(result.output_tokens, 20);
|
|
77
|
+
});
|
|
78
|
+
|
|
79
|
+
it("returns the most recent usage (reads backwards)", () => {
|
|
80
|
+
writeLine(makeUserMessage("first"));
|
|
81
|
+
writeLine(
|
|
82
|
+
makeAssistantMessage("old", {
|
|
83
|
+
input_tokens: 50,
|
|
84
|
+
cache_creation_input_tokens: 0,
|
|
85
|
+
cache_read_input_tokens: 0,
|
|
86
|
+
output_tokens: 10,
|
|
87
|
+
}),
|
|
88
|
+
);
|
|
89
|
+
writeLine(makeUserMessage("second"));
|
|
90
|
+
writeLine(
|
|
91
|
+
makeAssistantMessage("new", {
|
|
92
|
+
input_tokens: 200,
|
|
93
|
+
cache_creation_input_tokens: 100,
|
|
94
|
+
cache_read_input_tokens: 50,
|
|
95
|
+
output_tokens: 30,
|
|
96
|
+
}),
|
|
97
|
+
);
|
|
98
|
+
|
|
99
|
+
const result = getTokenUsage(transcriptPath);
|
|
100
|
+
assert.equal(result.current_tokens, 350); // 200 + 100 + 50
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
it("detects Opus 4.6+ as 1M context", () => {
|
|
104
|
+
writeLine(makeUserMessage("hello"));
|
|
105
|
+
writeLine(
|
|
106
|
+
makeAssistantMessage(
|
|
107
|
+
"hi",
|
|
108
|
+
{
|
|
109
|
+
input_tokens: 100,
|
|
110
|
+
cache_creation_input_tokens: 0,
|
|
111
|
+
cache_read_input_tokens: 0,
|
|
112
|
+
output_tokens: 10,
|
|
113
|
+
},
|
|
114
|
+
"claude-opus-4-6-20260101",
|
|
115
|
+
),
|
|
116
|
+
);
|
|
117
|
+
|
|
118
|
+
const result = getTokenUsage(transcriptPath);
|
|
119
|
+
assert.equal(result.max_tokens, 1000000);
|
|
120
|
+
assert.equal(result.model, "claude-opus-4-6-20260101");
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
it("detects Sonnet as 200K context", () => {
|
|
124
|
+
writeLine(makeUserMessage("hello"));
|
|
125
|
+
writeLine(
|
|
126
|
+
makeAssistantMessage(
|
|
127
|
+
"hi",
|
|
128
|
+
{
|
|
129
|
+
input_tokens: 100,
|
|
130
|
+
cache_creation_input_tokens: 0,
|
|
131
|
+
cache_read_input_tokens: 0,
|
|
132
|
+
output_tokens: 10,
|
|
133
|
+
},
|
|
134
|
+
"claude-sonnet-4-20250514",
|
|
135
|
+
),
|
|
136
|
+
);
|
|
137
|
+
|
|
138
|
+
const result = getTokenUsage(transcriptPath);
|
|
139
|
+
assert.equal(result.max_tokens, 200000);
|
|
140
|
+
});
|
|
141
|
+
|
|
142
|
+
it("detects future Opus 5.x as 1M context", () => {
|
|
143
|
+
writeLine(makeUserMessage("hello"));
|
|
144
|
+
writeLine(
|
|
145
|
+
makeAssistantMessage(
|
|
146
|
+
"hi",
|
|
147
|
+
{
|
|
148
|
+
input_tokens: 100,
|
|
149
|
+
cache_creation_input_tokens: 0,
|
|
150
|
+
cache_read_input_tokens: 0,
|
|
151
|
+
output_tokens: 10,
|
|
152
|
+
},
|
|
153
|
+
"claude-opus-5-0",
|
|
154
|
+
),
|
|
155
|
+
);
|
|
156
|
+
|
|
157
|
+
const result = getTokenUsage(transcriptPath);
|
|
158
|
+
assert.equal(result.max_tokens, 1000000);
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
it("handles zero usage values", () => {
|
|
162
|
+
writeLine(makeUserMessage("hello"));
|
|
163
|
+
writeLine(
|
|
164
|
+
makeAssistantMessage("hi", {
|
|
165
|
+
input_tokens: 0,
|
|
166
|
+
cache_creation_input_tokens: 0,
|
|
167
|
+
cache_read_input_tokens: 0,
|
|
168
|
+
output_tokens: 0,
|
|
169
|
+
}),
|
|
170
|
+
);
|
|
171
|
+
|
|
172
|
+
const result = getTokenUsage(transcriptPath);
|
|
173
|
+
assert.equal(result.current_tokens, 0);
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
it("uses tiered read — finds usage in small transcripts", () => {
|
|
177
|
+
// A small transcript should be found in the first 32KB tier
|
|
178
|
+
writeLine(makeUserMessage("hello"));
|
|
179
|
+
writeLine(
|
|
180
|
+
makeAssistantMessage("hi", {
|
|
181
|
+
input_tokens: 100,
|
|
182
|
+
cache_creation_input_tokens: 0,
|
|
183
|
+
cache_read_input_tokens: 0,
|
|
184
|
+
output_tokens: 10,
|
|
185
|
+
}),
|
|
186
|
+
);
|
|
187
|
+
|
|
188
|
+
const result = getTokenUsage(transcriptPath);
|
|
189
|
+
assert.ok(result);
|
|
190
|
+
assert.equal(result.current_tokens, 100);
|
|
191
|
+
});
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
describe("estimateTokens", () => {
|
|
195
|
+
it("returns 0 for missing transcript", () => {
|
|
196
|
+
assert.equal(estimateTokens("/nonexistent/path"), 0);
|
|
197
|
+
assert.equal(estimateTokens(null), 0);
|
|
198
|
+
assert.equal(estimateTokens(undefined), 0);
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
it("estimates tokens from content bytes / 4", () => {
|
|
202
|
+
// "hello world" in a user message content
|
|
203
|
+
writeLine(makeUserMessage("hello world")); // 11 bytes text
|
|
204
|
+
writeLine(
|
|
205
|
+
makeAssistantMessage("response text here"), // 18 bytes text
|
|
206
|
+
);
|
|
207
|
+
|
|
208
|
+
const estimate = estimateTokens(transcriptPath);
|
|
209
|
+
assert.ok(estimate > 0);
|
|
210
|
+
});
|
|
211
|
+
|
|
212
|
+
it("counts from compact marker forward", () => {
|
|
213
|
+
// Pre-marker content should be excluded
|
|
214
|
+
writeLine(makeUserMessage("old message before compact"));
|
|
215
|
+
writeLine({
|
|
216
|
+
type: "user",
|
|
217
|
+
message: {
|
|
218
|
+
role: "user",
|
|
219
|
+
content:
|
|
220
|
+
"[SMART COMPACT — restored checkpoint]\n\nSome checkpoint content",
|
|
221
|
+
},
|
|
222
|
+
});
|
|
223
|
+
writeLine(makeUserMessage("new message after compact"));
|
|
224
|
+
|
|
225
|
+
const estimate = estimateTokens(transcriptPath);
|
|
226
|
+
// The estimate should be based on content from the marker onward,
|
|
227
|
+
// not the pre-marker "old message"
|
|
228
|
+
const fullEstimate = (() => {
|
|
229
|
+
// Estimate if we counted everything
|
|
230
|
+
const all = fs.readFileSync(transcriptPath, "utf8");
|
|
231
|
+
return Math.round(Buffer.byteLength(all, "utf8") / 4);
|
|
232
|
+
})();
|
|
233
|
+
assert.ok(estimate < fullEstimate);
|
|
234
|
+
});
|
|
235
|
+
|
|
236
|
+
it("recognizes # Context Checkpoint as marker", () => {
|
|
237
|
+
writeLine(makeUserMessage("old"));
|
|
238
|
+
writeLine({
|
|
239
|
+
type: "user",
|
|
240
|
+
message: {
|
|
241
|
+
role: "user",
|
|
242
|
+
content:
|
|
243
|
+
"# Context Checkpoint (Smart Compact)\n> Created: 2026-01-01\n\nUser: stuff",
|
|
244
|
+
},
|
|
245
|
+
});
|
|
246
|
+
writeLine(makeUserMessage("new"));
|
|
247
|
+
|
|
248
|
+
const estimate = estimateTokens(transcriptPath);
|
|
249
|
+
assert.ok(estimate > 0);
|
|
250
|
+
});
|
|
251
|
+
});
|
|
252
|
+
|
|
253
|
+
describe("estimateOverhead", () => {
|
|
254
|
+
it("returns baselineOverhead when > 0", () => {
|
|
255
|
+
assert.equal(estimateOverhead(50000, transcriptPath, 25000), 25000);
|
|
256
|
+
});
|
|
257
|
+
|
|
258
|
+
it("returns MIN_OVERHEAD when baselineOverhead is 0 and no transcriptPath", () => {
|
|
259
|
+
assert.equal(estimateOverhead(50000, null, 0), 15000);
|
|
260
|
+
assert.equal(estimateOverhead(50000, undefined, 0), 15000);
|
|
261
|
+
assert.equal(estimateOverhead(50000, "", 0), 15000);
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
it("returns MIN_OVERHEAD when baselineOverhead is 0 and currentTokens is 0", () => {
|
|
265
|
+
assert.equal(estimateOverhead(0, transcriptPath, 0), 15000);
|
|
266
|
+
});
|
|
267
|
+
|
|
268
|
+
it("returns MIN_OVERHEAD when baselineOverhead is undefined (default)", () => {
|
|
269
|
+
// No transcriptPath → hits the early return before file access
|
|
270
|
+
assert.equal(estimateOverhead(50000, null), 15000);
|
|
271
|
+
// No currentTokens → same
|
|
272
|
+
assert.equal(estimateOverhead(0, transcriptPath), 15000);
|
|
273
|
+
});
|
|
274
|
+
|
|
275
|
+
it("returns max(MIN_OVERHEAD, computed) for file-size-based calculation", () => {
|
|
276
|
+
// Write enough content so file size is meaningful
|
|
277
|
+
const content = "x".repeat(40000); // 40000 bytes → ~10000 estimated tokens
|
|
278
|
+
fs.writeFileSync(transcriptPath, content);
|
|
279
|
+
|
|
280
|
+
// currentTokens=50000, conversationTokens=40000/4=10000 → overhead=40000
|
|
281
|
+
const result = estimateOverhead(50000, transcriptPath, 0);
|
|
282
|
+
assert.equal(result, 40000);
|
|
283
|
+
|
|
284
|
+
// currentTokens=12000, conversationTokens=10000 → overhead=2000, clamped to MIN
|
|
285
|
+
const resultClamped = estimateOverhead(12000, transcriptPath, 0);
|
|
286
|
+
assert.equal(resultClamped, 15000);
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
it("returns MIN_OVERHEAD when file doesn't exist", () => {
|
|
290
|
+
const missing = path.join(tmpDir, "nonexistent.jsonl");
|
|
291
|
+
assert.equal(estimateOverhead(50000, missing, 0), 15000);
|
|
292
|
+
});
|
|
293
|
+
});
|