@sting8k/pi-vcc 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +149 -0
- package/index.ts +10 -0
- package/package.json +36 -0
- package/scripts/audit-sessions.ts +88 -0
- package/scripts/benchmark-real-sessions.ts +25 -0
- package/scripts/compare-before-after.ts +36 -0
- package/src/commands/pi-vcc.ts +11 -0
- package/src/core/build-sections.ts +119 -0
- package/src/core/content.ts +20 -0
- package/src/core/filter-noise.ts +42 -0
- package/src/core/format-recall.ts +23 -0
- package/src/core/format.ts +34 -0
- package/src/core/normalize.ts +62 -0
- package/src/core/redact.ts +8 -0
- package/src/core/render-entries.ts +48 -0
- package/src/core/report.ts +225 -0
- package/src/core/sanitize.ts +5 -0
- package/src/core/search-entries.ts +14 -0
- package/src/core/summarize.ts +81 -0
- package/src/core/tool-args.ts +14 -0
- package/src/details.ts +7 -0
- package/src/extract/decisions.ts +32 -0
- package/src/extract/files.ts +46 -0
- package/src/extract/findings.ts +27 -0
- package/src/extract/goals.ts +41 -0
- package/src/extract/preferences.ts +30 -0
- package/src/hooks/before-compact.ts +141 -0
- package/src/sections.ts +11 -0
- package/src/tools/recall.ts +85 -0
- package/src/types.ts +14 -0
- package/tests/build-sections.test.ts +56 -0
- package/tests/compile.test.ts +50 -0
- package/tests/extract-decisions.test.ts +30 -0
- package/tests/extract-files.test.ts +62 -0
- package/tests/extract-findings.test.ts +39 -0
- package/tests/extract-goals.test.ts +86 -0
- package/tests/extract-preferences.test.ts +30 -0
- package/tests/filter-noise.test.ts +61 -0
- package/tests/fixtures.ts +61 -0
- package/tests/format-recall.test.ts +30 -0
- package/tests/format.test.ts +47 -0
- package/tests/normalize.test.ts +97 -0
- package/tests/real-sessions.test.ts +38 -0
- package/tests/render-entries.test.ts +40 -0
- package/tests/report.test.ts +54 -0
- package/tests/sanitize.test.ts +24 -0
- package/tests/search-entries.test.ts +33 -0
- package/tests/support/load-session.ts +23 -0
- package/tests/support/real-sessions.ts +51 -0
package/README.md
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
# pi-vcc
|
|
2
|
+
|
|
3
|
+
Algorithmic conversation compactor for [Pi](https://github.com/badlogic/pi-mono). No LLM calls -- produces structured, transcript-preserving summaries using pure extraction and formatting.
|
|
4
|
+
|
|
5
|
+
Inspired by [VCC](https://github.com/lllyasviel/VCC) (View-oriented Conversation Compiler).
|
|
6
|
+
|
|
7
|
+
## Why pi-vcc
|
|
8
|
+
|
|
9
|
+
| | Pi default | pi-vcc |
|
|
10
|
+
|---|---|---|
|
|
11
|
+
| **Method** | LLM-generated summary | Algorithmic extraction, no LLM |
|
|
12
|
+
| **Determinism** | Non-deterministic, can hallucinate | Same input = same output, always |
|
|
13
|
+
| **Token reduction** | Varies | ~58% measured (30k -> 12.5k) |
|
|
14
|
+
| **History after compaction** | Gone -- agent only sees summary | Fully searchable via `vcc_recall` |
|
|
15
|
+
| **Repeated compactions** | Each rewrite risks losing more | Sections merge and accumulate |
|
|
16
|
+
| **Cost** | Burns tokens on summarization call | Zero -- no API calls |
|
|
17
|
+
| **Structure** | Free-form prose | 7 typed sections (goal, turns, actions, evidence, files, context, prefs) |
|
|
18
|
+
|
|
19
|
+
### Real session metrics
|
|
20
|
+
|
|
21
|
+
| Compaction | Before | After | Reduction |
|
|
22
|
+
|---|---|---|---|
|
|
23
|
+
| 1st | 25,832 | 18,974 | 26.5% |
|
|
24
|
+
| 2nd (full cut) | 30,020 | 12,507 | 58.3% |
|
|
25
|
+
| 3rd (merge) | 15,915 | ~8,800 | ~45% |
|
|
26
|
+
|
|
27
|
+
## Features
|
|
28
|
+
|
|
29
|
+
- **No LLM** -- purely algorithmic, zero extra API cost
|
|
30
|
+
- **~58% token reduction** with transcript-preserving structured output
|
|
31
|
+
- **Lossless recall** -- `vcc_recall` reads raw session JSONL, history stays searchable across compactions
|
|
32
|
+
- **Incremental merge** -- turns, actions, evidence, files accumulate; only volatile context refreshes
|
|
33
|
+
- **VCC-style tool collapsing** -- tool calls become deduplicated one-liners
|
|
34
|
+
- **Fallback cut** -- works even when Pi core returns nothing to summarize
|
|
35
|
+
- **Redaction** -- strips passwords, API keys, secrets
|
|
36
|
+
- **`/pi-vcc`** -- manual compaction on demand
|
|
37
|
+
|
|
38
|
+
## Install
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
pi install npm:@sting8k/pi-vcc
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Or from GitHub:
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
pi install https://github.com/sting8k/pi-vcc
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Or try without installing:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
pi -e https://github.com/sting8k/pi-vcc
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Usage
|
|
57
|
+
|
|
58
|
+
Once linked, pi-vcc hooks `session_before_compact` and handles compaction automatically. Output looks like:
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
[Session Goal]
|
|
62
|
+
- Fix the authentication bug in login flow
|
|
63
|
+
|
|
64
|
+
[Key Conversation Turns]
|
|
65
|
+
- [user] Fix the auth bug, users can't log in after password reset
|
|
66
|
+
- [assistant] Root cause is a missing token refresh. The session cookie...(truncated)
|
|
67
|
+
|
|
68
|
+
[Actions Taken]
|
|
69
|
+
- * Read "src/auth/session.ts"
|
|
70
|
+
- * Edit "src/auth/session.ts"
|
|
71
|
+
- * bash "bun test tests/auth.test.ts"
|
|
72
|
+
|
|
73
|
+
[Important Evidence]
|
|
74
|
+
- [Read] export function refreshSession(token) { if (!token) return null;...(truncated)
|
|
75
|
+
- [bash] Tests: 12 passed, 0 failed
|
|
76
|
+
|
|
77
|
+
[Files And Changes]
|
|
78
|
+
Modified:
|
|
79
|
+
- src/auth/session.ts
|
|
80
|
+
Read:
|
|
81
|
+
- src/auth/session.ts
|
|
82
|
+
|
|
83
|
+
[Outstanding Context]
|
|
84
|
+
- [bash] ERROR: lint check failed on line 42
|
|
85
|
+
|
|
86
|
+
[User Preferences]
|
|
87
|
+
- Prefer Vietnamese responses
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
Use `/pi-vcc` to trigger compaction manually.
|
|
91
|
+
|
|
92
|
+
## Recall (Lossless History)
|
|
93
|
+
|
|
94
|
+
Pi's default compaction discards old messages permanently. After compaction, the agent only sees the summary.
|
|
95
|
+
|
|
96
|
+
`vcc_recall` bypasses this by reading the raw session JSONL file directly. It parses every message entry in the file, renders each one into a searchable `RenderedEntry` with a stable index (matching the message's position in the JSONL), role, truncated summary, and associated file paths. This means entry `#41` always refers to the same message regardless of how many compactions have happened.
|
|
97
|
+
|
|
98
|
+
**Search** uses multi-term matching -- the query is split into terms and all must appear in the entry's role + summary + file paths. This searches across the entire session including compacted regions:
|
|
99
|
+
|
|
100
|
+
```
|
|
101
|
+
vcc_recall({ query: "auth token refresh" }) // all terms must match
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
**Browse** without a query returns the last 25 entries:
|
|
105
|
+
|
|
106
|
+
```
|
|
107
|
+
vcc_recall()
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
**Expand** switches to full mode -- entries are rendered without truncation, so you get the complete content for specific indices found via search:
|
|
111
|
+
|
|
112
|
+
```
|
|
113
|
+
vcc_recall({ expand: [41, 42] }) // full content, no clipping
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
Typical workflow: search brief -> find relevant entry indices -> expand those indices for full content.
|
|
117
|
+
|
|
118
|
+
> Some tool results are truncated by Pi core at save time. `expand` returns everything in the JSONL but can't recover what Pi already cut.
|
|
119
|
+
|
|
120
|
+
## Pipeline
|
|
121
|
+
|
|
122
|
+
1. **Normalize** -- raw Pi messages -> uniform blocks (user, assistant, tool_call, tool_result, thinking)
|
|
123
|
+
2. **Filter noise** -- strip system messages, empty blocks
|
|
124
|
+
3. **Build sections** -- extract goal, conversation turns (~128 tokens each), deduplicated tool one-liners, tool results as evidence, file paths, blockers, preferences
|
|
125
|
+
4. **Format** -- render into bracketed sections
|
|
126
|
+
5. **Redact** -- strip passwords, API keys, secrets
|
|
127
|
+
6. **Merge** -- if previous summary exists:
|
|
128
|
+
- Appendable (turns, actions, evidence, files, prefs): deduplicate and combine
|
|
129
|
+
- Volatile (outstanding context): replace with fresh
|
|
130
|
+
- Default (session goal): fresh wins
|
|
131
|
+
|
|
132
|
+
## Debug
|
|
133
|
+
|
|
134
|
+
Debug logging is off by default. Enable it in `~/.pi/agent/pi-vcc-config.json`:
|
|
135
|
+
|
|
136
|
+
```json
|
|
137
|
+
{ "debug": true }
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
When enabled, each compaction writes detailed info to `/tmp/pi-vcc-debug.json` -- message counts, cut boundary, summary preview, sections.
|
|
141
|
+
|
|
142
|
+
## Related Work
|
|
143
|
+
|
|
144
|
+
- [VCC](https://github.com/lllyasviel/VCC) -- the original transcript-preserving conversation compiler
|
|
145
|
+
- [Pi](https://github.com/badlogic/pi-mono) -- the AI coding agent this extension is built for
|
|
146
|
+
|
|
147
|
+
## License
|
|
148
|
+
|
|
149
|
+
MIT
|
package/index.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
|
2
|
+
import { registerBeforeCompactHook } from "./src/hooks/before-compact";
|
|
3
|
+
import { registerPiVccCommand } from "./src/commands/pi-vcc";
|
|
4
|
+
import { registerRecallTool } from "./src/tools/recall";
|
|
5
|
+
|
|
6
|
+
export default (pi: ExtensionAPI) => {
|
|
7
|
+
registerBeforeCompactHook(pi);
|
|
8
|
+
registerPiVccCommand(pi);
|
|
9
|
+
registerRecallTool(pi);
|
|
10
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@sting8k/pi-vcc",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Structured conversation compactor for pi - replaces default compact summary with an agent-optimized structured format.",
|
|
5
|
+
"keywords": [
|
|
6
|
+
"pi-package"
|
|
7
|
+
],
|
|
8
|
+
"homepage": "https://github.com/sting8k/pi-vcc#readme",
|
|
9
|
+
"bugs": {
|
|
10
|
+
"url": "https://github.com/sting8k/pi-vcc/issues"
|
|
11
|
+
},
|
|
12
|
+
"repository": {
|
|
13
|
+
"type": "git",
|
|
14
|
+
"url": "git+https://github.com/sting8k/pi-vcc.git"
|
|
15
|
+
},
|
|
16
|
+
"license": "ISC",
|
|
17
|
+
"author": "sting8k",
|
|
18
|
+
"type": "commonjs",
|
|
19
|
+
"main": "index.ts",
|
|
20
|
+
"directories": {
|
|
21
|
+
"test": "tests"
|
|
22
|
+
},
|
|
23
|
+
"scripts": {
|
|
24
|
+
"test": "bun test",
|
|
25
|
+
"bench": "bun run ./scripts/benchmark-real-sessions.ts"
|
|
26
|
+
},
|
|
27
|
+
"dependencies": {
|
|
28
|
+
"@mariozechner/pi-coding-agent": "*"
|
|
29
|
+
},
|
|
30
|
+
"devDependencies": {},
|
|
31
|
+
"pi": {
|
|
32
|
+
"extensions": [
|
|
33
|
+
"./index.ts"
|
|
34
|
+
]
|
|
35
|
+
}
|
|
36
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { basename, dirname } from "node:path";
|
|
2
|
+
import { compile } from "../src/core/summarize";
|
|
3
|
+
import { normalize } from "../src/core/normalize";
|
|
4
|
+
import { filterNoise } from "../src/core/filter-noise";
|
|
5
|
+
import { renderMessage } from "../src/core/render-entries";
|
|
6
|
+
import { prepareSessionSamples } from "../tests/support/real-sessions";
|
|
7
|
+
import { loadSessionMessages } from "../tests/support/load-session";
|
|
8
|
+
|
|
9
|
+
const SEP = "=".repeat(80);
|
|
10
|
+
const samples = await prepareSessionSamples(10);
|
|
11
|
+
|
|
12
|
+
for (const sample of samples) {
|
|
13
|
+
const loaded = loadSessionMessages(sample.copy);
|
|
14
|
+
const { messages } = loaded;
|
|
15
|
+
|
|
16
|
+
const rawBlocks = normalize(messages);
|
|
17
|
+
const filteredBlocks = filterNoise(rawBlocks);
|
|
18
|
+
const afterText = compile({ messages });
|
|
19
|
+
|
|
20
|
+
const rendered = messages.map((m, i) => renderMessage(m, i));
|
|
21
|
+
const beforeChars = rendered.reduce((s, e) => s + e.summary.length, 0);
|
|
22
|
+
|
|
23
|
+
const project = dirname(sample.source).split("--").filter(Boolean).pop() ?? "unknown";
|
|
24
|
+
|
|
25
|
+
const goalSection = afterText.match(/\[Session Goal\]\n([\s\S]*?)(?=\n\n\[|$)/)?.[1] ?? "(empty)";
|
|
26
|
+
const stateSection = afterText.match(/\[Current State\]\n([\s\S]*?)(?=\n\n\[|$)/)?.[1] ?? "(empty)";
|
|
27
|
+
const doneSection = afterText.match(/\[What Was Done\]\n([\s\S]*?)(?=\n\n\[|$)/)?.[1] ?? "(empty)";
|
|
28
|
+
const problemsSection = afterText.match(/\[Open Problems\]\n([\s\S]*?)(?=\n\n\[|$)/)?.[1] ?? "(empty)";
|
|
29
|
+
const nextSection = afterText.match(/\[Next Best Steps\]\n([\s\S]*?)(?=\n\n\[|$)/)?.[1] ?? "(empty)";
|
|
30
|
+
|
|
31
|
+
const doneLines = doneSection.split("\n").filter(l => l.trim());
|
|
32
|
+
const problemLines = problemsSection.split("\n").filter(l => l.trim());
|
|
33
|
+
|
|
34
|
+
// Detect issues
|
|
35
|
+
const issues: string[] = [];
|
|
36
|
+
|
|
37
|
+
// 1. Goal quality
|
|
38
|
+
const goalLines = goalSection.split("\n").map(l => l.replace(/^- /, "").trim()).filter(Boolean);
|
|
39
|
+
if (goalLines[0] && goalLines[0].length < 5) issues.push(`GOAL_TOO_SHORT: "${goalLines[0]}"`);
|
|
40
|
+
if (goalLines.length === 0) issues.push("GOAL_EMPTY");
|
|
41
|
+
|
|
42
|
+
// 2. Sensitive data in What Was Done
|
|
43
|
+
if (/sshpass|password|secret|token=|api[_-]?key/i.test(doneSection)) {
|
|
44
|
+
issues.push("SENSITIVE_DATA_IN_DONE");
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// 3. Raw code/minified JS in summary
|
|
48
|
+
if (/\{[a-zA-Z$_]+:[a-zA-Z$_]+,[a-zA-Z$_]+:/.test(afterText) || /var [a-zA-Z]+=/.test(afterText)) {
|
|
49
|
+
issues.push("RAW_CODE_LEAK");
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// 4. Open problems count
|
|
53
|
+
if (problemLines.length > 10) issues.push(`PROBLEMS_OVERCOUNT: ${problemLines.length}`);
|
|
54
|
+
|
|
55
|
+
// 5. Next steps empty
|
|
56
|
+
if (nextSection === "(empty)") issues.push("NEXT_STEPS_EMPTY");
|
|
57
|
+
|
|
58
|
+
// 6. What Was Done too verbose
|
|
59
|
+
if (doneLines.length > 15) issues.push(`DONE_TOO_VERBOSE: ${doneLines.length} lines`);
|
|
60
|
+
|
|
61
|
+
// 7. Summary too large (>10K chars)
|
|
62
|
+
if (afterText.length > 10000) issues.push(`SUMMARY_TOO_LARGE: ${afterText.length} chars`);
|
|
63
|
+
|
|
64
|
+
console.log(SEP);
|
|
65
|
+
console.log(`PROJECT: ${project}`);
|
|
66
|
+
console.log(`FILE: ${basename(sample.source)}`);
|
|
67
|
+
console.log(`Size: ${(sample.size / 1024).toFixed(0)}KB | Msgs: ${messages.length} | Blocks raw: ${rawBlocks.length} -> filtered: ${filteredBlocks.length}`);
|
|
68
|
+
console.log(`Before: ${beforeChars} chars | After: ${afterText.length} chars | Ratio: ${(beforeChars / afterText.length).toFixed(1)}x`);
|
|
69
|
+
console.log(`Issues: ${issues.length === 0 ? "NONE" : issues.join(", ")}`);
|
|
70
|
+
console.log("");
|
|
71
|
+
console.log("--- GOAL ---");
|
|
72
|
+
console.log(goalSection.slice(0, 300));
|
|
73
|
+
console.log("");
|
|
74
|
+
console.log("--- CURRENT STATE (first 300c) ---");
|
|
75
|
+
console.log(stateSection.slice(0, 300));
|
|
76
|
+
console.log("");
|
|
77
|
+
console.log("--- WHAT WAS DONE (first 5 lines) ---");
|
|
78
|
+
console.log(doneLines.slice(0, 5).join("\n"));
|
|
79
|
+
console.log(`... (${doneLines.length} total lines)`);
|
|
80
|
+
console.log("");
|
|
81
|
+
console.log("--- OPEN PROBLEMS (first 5 lines) ---");
|
|
82
|
+
console.log(problemLines.slice(0, 5).join("\n"));
|
|
83
|
+
console.log(`... (${problemLines.length} total lines)`);
|
|
84
|
+
console.log("");
|
|
85
|
+
console.log("--- NEXT STEPS ---");
|
|
86
|
+
console.log(nextSection.slice(0, 300));
|
|
87
|
+
console.log("");
|
|
88
|
+
}
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { performance } from "node:perf_hooks";
|
|
2
|
+
import { basename } from "node:path";
|
|
3
|
+
import { buildCompactReport } from "../src/core/report";
|
|
4
|
+
import { prepareSessionSamples } from "../tests/support/real-sessions";
|
|
5
|
+
import { loadSessionMessages } from "../tests/support/load-session";
|
|
6
|
+
|
|
7
|
+
const samples = await prepareSessionSamples(2);
|
|
8
|
+
for (const sample of samples) {
|
|
9
|
+
const loaded = loadSessionMessages(sample.copy);
|
|
10
|
+
const start = performance.now();
|
|
11
|
+
const report = buildCompactReport({ messages: loaded.messages });
|
|
12
|
+
const elapsedMs = performance.now() - start;
|
|
13
|
+
console.log(JSON.stringify({
|
|
14
|
+
sourceFile: basename(sample.source),
|
|
15
|
+
sourceSizeBytes: sample.size,
|
|
16
|
+
copiedToTemp: true,
|
|
17
|
+
loadedMessages: loaded.messageCount,
|
|
18
|
+
skippedMessages: loaded.skippedCount,
|
|
19
|
+
compileMs: Number(elapsedMs.toFixed(2)),
|
|
20
|
+
before: report.before,
|
|
21
|
+
after: report.after,
|
|
22
|
+
compression: report.compression,
|
|
23
|
+
recall: report.recall,
|
|
24
|
+
}, null, 2));
|
|
25
|
+
}
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { basename } from "node:path";
|
|
2
|
+
import { compile } from "../src/core/summarize";
|
|
3
|
+
import { renderMessage } from "../src/core/render-entries";
|
|
4
|
+
import { clip } from "../src/core/content";
|
|
5
|
+
import { prepareSessionSamples } from "../tests/support/real-sessions";
|
|
6
|
+
import { loadSessionMessages } from "../tests/support/load-session";
|
|
7
|
+
|
|
8
|
+
const SEP = "=".repeat(80);
|
|
9
|
+
const samples = await prepareSessionSamples(2);
|
|
10
|
+
|
|
11
|
+
for (const sample of samples) {
|
|
12
|
+
const loaded = loadSessionMessages(sample.copy);
|
|
13
|
+
const { messages } = loaded;
|
|
14
|
+
|
|
15
|
+
const rendered = messages.map((m, i) => renderMessage(m, i));
|
|
16
|
+
const beforeLines = rendered.map(
|
|
17
|
+
(e) => `#${e.index} [${e.role}] ${clip(e.summary, 300)}`,
|
|
18
|
+
);
|
|
19
|
+
const beforeText = beforeLines.join("\n");
|
|
20
|
+
const afterText = compile({ messages });
|
|
21
|
+
|
|
22
|
+
console.log(SEP);
|
|
23
|
+
console.log(`FILE: ${basename(sample.source)}`);
|
|
24
|
+
console.log(`Messages: ${messages.length} | Before chars: ${beforeText.length} | After chars: ${afterText.length}`);
|
|
25
|
+
console.log(`Compression: ${(beforeText.length / afterText.length).toFixed(1)}x`);
|
|
26
|
+
console.log(SEP);
|
|
27
|
+
|
|
28
|
+
console.log("\n--- BEFORE (raw context, first 40 + last 20 entries) ---\n");
|
|
29
|
+
for (const line of beforeLines.slice(0, 40)) console.log(line);
|
|
30
|
+
if (beforeLines.length > 60) console.log(`\n... (${beforeLines.length - 60} entries omitted) ...\n`);
|
|
31
|
+
for (const line of beforeLines.slice(-20)) console.log(line);
|
|
32
|
+
|
|
33
|
+
console.log("\n--- AFTER (pi-vcc compiled summary) ---\n");
|
|
34
|
+
console.log(afterText);
|
|
35
|
+
console.log("\n");
|
|
36
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
|
2
|
+
|
|
3
|
+
export const registerPiVccCommand = (pi: ExtensionAPI) => {
|
|
4
|
+
pi.registerCommand("pi-vcc", {
|
|
5
|
+
description: "Compact conversation with pi-vcc structured summary",
|
|
6
|
+
handler: async (_args, ctx) => {
|
|
7
|
+
ctx.compact();
|
|
8
|
+
ctx.ui.notify("Compacted with pi-vcc", "info");
|
|
9
|
+
},
|
|
10
|
+
});
|
|
11
|
+
};
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import type { FileOps, NormalizedBlock } from "../types";
|
|
2
|
+
import { clip, firstLine, nonEmptyLines } from "./content";
|
|
3
|
+
import { redact } from "./redact";
|
|
4
|
+
import type { SectionData } from "../sections";
|
|
5
|
+
import { extractGoals } from "../extract/goals";
|
|
6
|
+
import { extractFiles } from "../extract/files";
|
|
7
|
+
import { extractFindings } from "../extract/findings";
|
|
8
|
+
import { extractPreferences } from "../extract/preferences";
|
|
9
|
+
import { extractPath } from "./tool-args";
|
|
10
|
+
|
|
11
|
+
export interface BuildSectionsInput {
|
|
12
|
+
blocks: NormalizedBlock[];
|
|
13
|
+
fileOps?: FileOps;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const TOOL_SUMMARY_FIELDS: Record<string, string> = {
|
|
17
|
+
Read: "file_path", Edit: "file_path", Write: "file_path",
|
|
18
|
+
read: "file_path", edit: "file_path", write: "file_path",
|
|
19
|
+
Glob: "pattern", Grep: "pattern",
|
|
20
|
+
};
|
|
21
|
+
|
|
22
|
+
const toolOneLiner = (name: string, args: Record<string, unknown>): string => {
|
|
23
|
+
const field = TOOL_SUMMARY_FIELDS[name];
|
|
24
|
+
if (field && typeof args[field] === "string") {
|
|
25
|
+
return `* ${name} "${clip(args[field] as string, 60)}"`;
|
|
26
|
+
}
|
|
27
|
+
const path = extractPath(args);
|
|
28
|
+
if (path) return `* ${name} "${clip(path, 60)}"`;
|
|
29
|
+
if (name === "bash" || name === "Bash") {
|
|
30
|
+
const cmd = (args.command ?? args.description ?? "") as string;
|
|
31
|
+
return `* ${name} "${redact(clip(cmd, 80))}"`;
|
|
32
|
+
}
|
|
33
|
+
if (typeof args.query === "string") {
|
|
34
|
+
return `* ${name} "${clip(args.query as string, 60)}"`;
|
|
35
|
+
}
|
|
36
|
+
return `* ${name}`;
|
|
37
|
+
};
|
|
38
|
+
|
|
39
|
+
const extractActionsTaken = (blocks: NormalizedBlock[]): string[] => {
|
|
40
|
+
const raw: string[] = [];
|
|
41
|
+
for (const b of blocks) {
|
|
42
|
+
if (b.kind === "tool_call") raw.push(toolOneLiner(b.name, b.args));
|
|
43
|
+
}
|
|
44
|
+
const counts = new Map<string, number>();
|
|
45
|
+
for (const d of raw) counts.set(d, (counts.get(d) ?? 0) + 1);
|
|
46
|
+
return [...counts.entries()]
|
|
47
|
+
.map(([k, v]) => (v > 1 ? `${k} x${v}` : k))
|
|
48
|
+
.slice(0, 20);
|
|
49
|
+
};
|
|
50
|
+
|
|
51
|
+
const FILLER_RE = /^(ok|sure|done|got it|alright|let me|i('ll| will)|here'?s|understood)/i;
|
|
52
|
+
const BLOCKER_RE =
|
|
53
|
+
/\b(fail(ed|s|ure|ing)?|broken|cannot|can't|won't work|does not work|doesn't work|still (broken|failing|wrong)|blocked|blocker|not (fixed|resolved|working)|crash(es|ed|ing)?)\b/i;
|
|
54
|
+
|
|
55
|
+
const TRUNCATE_TOKENS = 128;
|
|
56
|
+
|
|
57
|
+
const truncateText = (text: string, limit = TRUNCATE_TOKENS): string => {
|
|
58
|
+
const words = text.split(/\s+/).filter(Boolean);
|
|
59
|
+
if (words.length <= limit) return text;
|
|
60
|
+
return words.slice(0, limit).join(" ") + "...(truncated)";
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
const extractKeyConversationTurns = (blocks: NormalizedBlock[]): string[] => {
|
|
64
|
+
const turns: string[] = [];
|
|
65
|
+
const conversational = blocks.filter(
|
|
66
|
+
(b) => b.kind === "user" || b.kind === "assistant",
|
|
67
|
+
);
|
|
68
|
+
const recent = conversational.slice(-12);
|
|
69
|
+
|
|
70
|
+
for (const b of recent) {
|
|
71
|
+
const text = b.text.trim();
|
|
72
|
+
if (!text || text.length < 10) continue;
|
|
73
|
+
if (b.kind === "user" && FILLER_RE.test(text)) continue;
|
|
74
|
+
const prefix = b.kind === "user" ? "[user] " : "[assistant] ";
|
|
75
|
+
turns.push(prefix + truncateText(text, TRUNCATE_TOKENS));
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
return turns.slice(-8);
|
|
79
|
+
};
|
|
80
|
+
|
|
81
|
+
const extractOutstandingContext = (blocks: NormalizedBlock[]): string[] => {
|
|
82
|
+
const items: string[] = [];
|
|
83
|
+
const tail = blocks.slice(-20);
|
|
84
|
+
|
|
85
|
+
for (const b of tail) {
|
|
86
|
+
if (b.kind === "tool_result" && b.isError) {
|
|
87
|
+
items.push(`[${b.name}] ${firstLine(b.text, 150)}`);
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (b.kind === "assistant" || b.kind === "user") {
|
|
92
|
+
for (const line of nonEmptyLines(b.text)) {
|
|
93
|
+
if (!BLOCKER_RE.test(line)) continue;
|
|
94
|
+
if (line.length < 15) continue;
|
|
95
|
+
const clipped = b.kind === "user" ? `[user] ${clip(line, 150)}` : clip(line, 150);
|
|
96
|
+
if (!items.includes(clipped)) items.push(clipped);
|
|
97
|
+
break;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return items.slice(0, 5);
|
|
103
|
+
};
|
|
104
|
+
|
|
105
|
+
export const buildSections = (input: BuildSectionsInput): SectionData => {
|
|
106
|
+
const { blocks, fileOps } = input;
|
|
107
|
+
const fa = extractFiles(blocks, fileOps);
|
|
108
|
+
return {
|
|
109
|
+
sessionGoal: extractGoals(blocks),
|
|
110
|
+
keyConversationTurns: extractKeyConversationTurns(blocks),
|
|
111
|
+
actionsTaken: extractActionsTaken(blocks),
|
|
112
|
+
importantEvidence: extractFindings(blocks),
|
|
113
|
+
filesRead: [...fa.read],
|
|
114
|
+
filesModified: [...fa.modified],
|
|
115
|
+
filesCreated: [...fa.created],
|
|
116
|
+
outstandingContext: extractOutstandingContext(blocks),
|
|
117
|
+
userPreferences: extractPreferences(blocks),
|
|
118
|
+
};
|
|
119
|
+
};
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { Message } from "@mariozechner/pi-ai";
|
|
2
|
+
|
|
3
|
+
export const clip = (text: string, max = 200): string =>
|
|
4
|
+
text.slice(0, max);
|
|
5
|
+
|
|
6
|
+
export const nonEmptyLines = (text: string): string[] =>
|
|
7
|
+
text.split("\n").map((line) => line.trim()).filter(Boolean);
|
|
8
|
+
|
|
9
|
+
export const firstLine = (text: string, max = 200): string =>
|
|
10
|
+
clip(text.split("\n")[0] ?? "", max);
|
|
11
|
+
|
|
12
|
+
export const textParts = (content: Message["content"]): string[] => {
|
|
13
|
+
if (typeof content === "string") return [content];
|
|
14
|
+
return content
|
|
15
|
+
.filter((part) => part.type === "text")
|
|
16
|
+
.map((part) => part.text);
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
export const textOf = (content: Message["content"]): string =>
|
|
20
|
+
textParts(content).join("\n");
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import type { NormalizedBlock } from "../types";
|
|
2
|
+
|
|
3
|
+
const NOISE_TOOLS = new Set([
|
|
4
|
+
"TodoWrite", "TodoRead", "ToolSearch", "WebSearch",
|
|
5
|
+
"AskUser", "ExitSpecMode", "GenerateDroid",
|
|
6
|
+
]);
|
|
7
|
+
|
|
8
|
+
const NOISE_STRINGS = [
|
|
9
|
+
"Continue from where you left off.",
|
|
10
|
+
"No response requested.",
|
|
11
|
+
"IMPORTANT: TodoWrite was not called yet.",
|
|
12
|
+
];
|
|
13
|
+
|
|
14
|
+
const XML_WRAPPER_RE = /<(system-reminder|ide_opened_file|command-message|context-window-usage)[^>]*>[\s\S]*?<\/\1>/g;
|
|
15
|
+
|
|
16
|
+
const isNoiseUserBlock = (text: string): boolean => {
|
|
17
|
+
const trimmed = text.trim();
|
|
18
|
+
if (NOISE_STRINGS.some((s) => trimmed.includes(s))) return true;
|
|
19
|
+
const stripped = trimmed.replace(XML_WRAPPER_RE, "").trim();
|
|
20
|
+
return stripped.length === 0;
|
|
21
|
+
};
|
|
22
|
+
|
|
23
|
+
const cleanUserText = (text: string): string =>
|
|
24
|
+
text.replace(XML_WRAPPER_RE, "").trim();
|
|
25
|
+
|
|
26
|
+
export const filterNoise = (blocks: NormalizedBlock[]): NormalizedBlock[] => {
|
|
27
|
+
const out: NormalizedBlock[] = [];
|
|
28
|
+
for (const b of blocks) {
|
|
29
|
+
if (b.kind === "thinking") continue;
|
|
30
|
+
if (b.kind === "tool_call" && NOISE_TOOLS.has(b.name)) continue;
|
|
31
|
+
if (b.kind === "tool_result" && NOISE_TOOLS.has(b.name)) continue;
|
|
32
|
+
if (b.kind === "user") {
|
|
33
|
+
if (isNoiseUserBlock(b.text)) continue;
|
|
34
|
+
const cleaned = cleanUserText(b.text);
|
|
35
|
+
if (!cleaned) continue;
|
|
36
|
+
out.push({ kind: "user", text: cleaned });
|
|
37
|
+
continue;
|
|
38
|
+
}
|
|
39
|
+
out.push(b);
|
|
40
|
+
}
|
|
41
|
+
return out;
|
|
42
|
+
};
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { RenderedEntry } from "./render-entries";
|
|
2
|
+
|
|
3
|
+
export const formatRecallOutput = (
|
|
4
|
+
entries: RenderedEntry[],
|
|
5
|
+
query?: string,
|
|
6
|
+
): string => {
|
|
7
|
+
if (entries.length === 0) {
|
|
8
|
+
return query
|
|
9
|
+
? `No matches for "${query}" in session history.`
|
|
10
|
+
: "No entries in session history.";
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
const header = query
|
|
14
|
+
? `Found ${entries.length} matches for "${query}":`
|
|
15
|
+
: `Session history (${entries.length} entries):`;
|
|
16
|
+
|
|
17
|
+
const lines = entries.map((e) => {
|
|
18
|
+
const fileSuffix = e.files?.length ? ` files:[${e.files.join(", ")}]` : "";
|
|
19
|
+
return `#${e.index} [${e.role}]${fileSuffix} ${e.summary}`;
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
return `${header}\n\n${lines.join("\n\n")}`;
|
|
23
|
+
};
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import type { SectionData } from "../sections";
|
|
2
|
+
|
|
3
|
+
const section = (title: string, items: string[]): string => {
|
|
4
|
+
if (items.length === 0) return "";
|
|
5
|
+
const body = items.map((i) => `- ${i}`).join("\n");
|
|
6
|
+
return `[${title}]\n${body}`;
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
const filesSection = (data: SectionData): string => {
|
|
10
|
+
const parts: string[] = [];
|
|
11
|
+
if (data.filesRead.length > 0)
|
|
12
|
+
parts.push("Read:\n" + data.filesRead.map((f) => ` - ${f}`).join("\n"));
|
|
13
|
+
if (data.filesModified.length > 0)
|
|
14
|
+
parts.push("Modified:\n" + data.filesModified.map((f) => ` - ${f}`).join("\n"));
|
|
15
|
+
if (data.filesCreated.length > 0)
|
|
16
|
+
parts.push("Created:\n" + data.filesCreated.map((f) => ` - ${f}`).join("\n"));
|
|
17
|
+
if (parts.length === 0) return "";
|
|
18
|
+
return `[Files And Changes]\n${parts.join("\n")}`;
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
export const formatSummary = (data: SectionData): string => {
|
|
22
|
+
const parts = [
|
|
23
|
+
section("Session Goal", data.sessionGoal),
|
|
24
|
+
section("Key Conversation Turns", data.keyConversationTurns),
|
|
25
|
+
section("Actions Taken", data.actionsTaken),
|
|
26
|
+
section("Important Evidence", data.importantEvidence),
|
|
27
|
+
filesSection(data),
|
|
28
|
+
section("Outstanding Context", data.outstandingContext),
|
|
29
|
+
section("User Preferences", data.userPreferences),
|
|
30
|
+
];
|
|
31
|
+
return parts.filter(Boolean).join("\n\n");
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
import type { Message } from "@mariozechner/pi-ai";
|
|
2
|
+
import type { NormalizedBlock } from "../types";
|
|
3
|
+
import { textOf } from "./content";
|
|
4
|
+
import { sanitize } from "./sanitize";
|
|
5
|
+
|
|
6
|
+
const normalizeOne = (msg: Message): NormalizedBlock[] => {
|
|
7
|
+
if (msg.role === "user") {
|
|
8
|
+
const blocks: NormalizedBlock[] = [];
|
|
9
|
+
const text = sanitize(textOf(msg.content));
|
|
10
|
+
if (text) blocks.push({ kind: "user", text });
|
|
11
|
+
if (typeof msg.content !== "string") {
|
|
12
|
+
for (const part of msg.content) {
|
|
13
|
+
if (part.type === "image") {
|
|
14
|
+
blocks.push({ kind: "user", text: `[image: ${part.mimeType}]` });
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
return blocks.length > 0 ? blocks : [{ kind: "user", text: "" }];
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
if (msg.role === "toolResult") {
|
|
22
|
+
return [{
|
|
23
|
+
kind: "tool_result",
|
|
24
|
+
name: msg.toolName,
|
|
25
|
+
text: sanitize(textOf(msg.content)),
|
|
26
|
+
isError: msg.isError,
|
|
27
|
+
}];
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
if (msg.role === "assistant") {
|
|
31
|
+
if (typeof msg.content === "string") {
|
|
32
|
+
return [{ kind: "assistant", text: sanitize(msg.content) }];
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
const blocks: NormalizedBlock[] = [];
|
|
36
|
+
for (const part of msg.content) {
|
|
37
|
+
if (part.type === "text") {
|
|
38
|
+
blocks.push({ kind: "assistant", text: sanitize(part.text) });
|
|
39
|
+
} else if (part.type === "thinking") {
|
|
40
|
+
blocks.push({
|
|
41
|
+
kind: "thinking",
|
|
42
|
+
text: sanitize(part.thinking),
|
|
43
|
+
redacted: part.redacted ?? false,
|
|
44
|
+
});
|
|
45
|
+
} else if (part.type === "toolCall") {
|
|
46
|
+
blocks.push({
|
|
47
|
+
kind: "tool_call",
|
|
48
|
+
name: part.name,
|
|
49
|
+
args: part.arguments,
|
|
50
|
+
});
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
return blocks;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
return [];
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
export const normalize = (messages: Message[]): NormalizedBlock[] =>
|
|
60
|
+
messages.flatMap(normalizeOne);
|
|
61
|
+
|
|
62
|
+
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
const SENSITIVE_RE =
|
|
2
|
+
/(?:sshpass\s+-p\s*'[^']*'|sshpass\s+-p\s*"[^"]*"|sshpass\s+-p\s*\S+|password[=:]\s*\S+|api[_-]?key[=:]\s*\S+|secret[=:]\s*\S+|token[=:]\s*[A-Za-z0-9_\-\.]{8,}|-i\s+\S+\.pem\b)/gi;
|
|
3
|
+
|
|
4
|
+
export const redact = (text: string): string =>
|
|
5
|
+
text.replace(SENSITIVE_RE, (m) => {
|
|
6
|
+
const prefix = m.split(/[=:\s]+/)[0];
|
|
7
|
+
return `${prefix} [REDACTED]`;
|
|
8
|
+
});
|