openrecall 0.2.2 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +8 -2
- package/src/agent.ts +97 -11
- package/src/dcp/auth.ts +37 -0
- package/src/dcp/commands/context.ts +265 -0
- package/src/dcp/commands/help.ts +73 -0
- package/src/dcp/commands/manual.ts +131 -0
- package/src/dcp/commands/stats.ts +73 -0
- package/src/dcp/commands/sweep.ts +263 -0
- package/src/dcp/config.ts +981 -0
- package/src/dcp/hooks.ts +224 -0
- package/src/dcp/index.ts +123 -0
- package/src/dcp/logger.ts +211 -0
- package/src/dcp/messages/index.ts +2 -0
- package/src/dcp/messages/inject.ts +316 -0
- package/src/dcp/messages/prune.ts +217 -0
- package/src/dcp/messages/utils.ts +269 -0
- package/src/dcp/prompts/_codegen/compress-nudge.generated.ts +15 -0
- package/src/dcp/prompts/_codegen/compress.generated.ts +56 -0
- package/src/dcp/prompts/_codegen/distill.generated.ts +33 -0
- package/src/dcp/prompts/_codegen/nudge.generated.ts +17 -0
- package/src/dcp/prompts/_codegen/prune.generated.ts +23 -0
- package/src/dcp/prompts/_codegen/system.generated.ts +57 -0
- package/src/dcp/prompts/index.ts +59 -0
- package/src/dcp/protected-file-patterns.ts +113 -0
- package/src/dcp/shared-utils.ts +26 -0
- package/src/dcp/state/index.ts +3 -0
- package/src/dcp/state/persistence.ts +196 -0
- package/src/dcp/state/state.ts +143 -0
- package/src/dcp/state/tool-cache.ts +112 -0
- package/src/dcp/state/types.ts +55 -0
- package/src/dcp/state/utils.ts +55 -0
- package/src/dcp/strategies/deduplication.ts +123 -0
- package/src/dcp/strategies/index.ts +4 -0
- package/src/dcp/strategies/purge-errors.ts +84 -0
- package/src/dcp/strategies/supersede-writes.ts +115 -0
- package/src/dcp/strategies/utils.ts +135 -0
- package/src/dcp/tools/compress.ts +218 -0
- package/src/dcp/tools/distill.ts +60 -0
- package/src/dcp/tools/index.ts +4 -0
- package/src/dcp/tools/prune-shared.ts +174 -0
- package/src/dcp/tools/prune.ts +36 -0
- package/src/dcp/tools/types.ts +11 -0
- package/src/dcp/tools/utils.ts +244 -0
- package/src/dcp/ui/notification.ts +273 -0
- package/src/dcp/ui/utils.ts +133 -0
- package/src/index.ts +101 -49
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import type { PluginConfig } from "../config"
|
|
2
|
+
import type { Logger } from "../logger"
|
|
3
|
+
import type { SessionState, WithParts } from "../state"
|
|
4
|
+
import { getFilePathsFromParameters, isProtected } from "../protected-file-patterns"
|
|
5
|
+
import { getTotalToolTokens } from "./utils"
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Deduplication strategy - prunes older tool calls that have identical
|
|
9
|
+
* tool name and parameters, keeping only the most recent occurrence.
|
|
10
|
+
* Modifies the session state in place to add pruned tool call IDs.
|
|
11
|
+
*/
|
|
12
|
+
export const deduplicate = (
|
|
13
|
+
state: SessionState,
|
|
14
|
+
logger: Logger,
|
|
15
|
+
config: PluginConfig,
|
|
16
|
+
messages: WithParts[],
|
|
17
|
+
): void => {
|
|
18
|
+
if (state.manualMode && !config.manualMode.automaticStrategies) {
|
|
19
|
+
return
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
if (!config.strategies.deduplication.enabled) {
|
|
23
|
+
return
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
const allToolIds = state.toolIdList
|
|
27
|
+
if (allToolIds.length === 0) {
|
|
28
|
+
return
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// Filter out IDs already pruned
|
|
32
|
+
const unprunedIds = allToolIds.filter((id) => !state.prune.tools.has(id))
|
|
33
|
+
|
|
34
|
+
if (unprunedIds.length === 0) {
|
|
35
|
+
return
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const protectedTools = config.strategies.deduplication.protectedTools
|
|
39
|
+
|
|
40
|
+
// Group by signature (tool name + normalized parameters)
|
|
41
|
+
const signatureMap = new Map<string, string[]>()
|
|
42
|
+
|
|
43
|
+
for (const id of unprunedIds) {
|
|
44
|
+
const metadata = state.toolParameters.get(id)
|
|
45
|
+
if (!metadata) {
|
|
46
|
+
// logger.warn(`Missing metadata for tool call ID: ${id}`)
|
|
47
|
+
continue
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
// Skip protected tools
|
|
51
|
+
if (protectedTools.includes(metadata.tool)) {
|
|
52
|
+
continue
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
const filePaths = getFilePathsFromParameters(metadata.tool, metadata.parameters)
|
|
56
|
+
if (isProtected(filePaths, config.protectedFilePatterns)) {
|
|
57
|
+
continue
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const signature = createToolSignature(metadata.tool, metadata.parameters)
|
|
61
|
+
if (!signatureMap.has(signature)) {
|
|
62
|
+
signatureMap.set(signature, [])
|
|
63
|
+
}
|
|
64
|
+
const ids = signatureMap.get(signature)
|
|
65
|
+
if (ids) {
|
|
66
|
+
ids.push(id)
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Find duplicates - keep only the most recent (last) in each group
|
|
71
|
+
const newPruneIds: string[] = []
|
|
72
|
+
|
|
73
|
+
for (const [, ids] of signatureMap.entries()) {
|
|
74
|
+
if (ids.length > 1) {
|
|
75
|
+
// All except last (most recent) should be pruned
|
|
76
|
+
const idsToRemove = ids.slice(0, -1)
|
|
77
|
+
newPruneIds.push(...idsToRemove)
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
state.stats.totalPruneTokens += getTotalToolTokens(state, newPruneIds)
|
|
82
|
+
|
|
83
|
+
if (newPruneIds.length > 0) {
|
|
84
|
+
for (const id of newPruneIds) {
|
|
85
|
+
const entry = state.toolParameters.get(id)
|
|
86
|
+
state.prune.tools.set(id, entry?.tokenCount ?? 0)
|
|
87
|
+
}
|
|
88
|
+
logger.debug(`Marked ${newPruneIds.length} duplicate tool calls for pruning`)
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
function createToolSignature(tool: string, parameters?: any): string {
|
|
93
|
+
if (!parameters) {
|
|
94
|
+
return tool
|
|
95
|
+
}
|
|
96
|
+
const normalized = normalizeParameters(parameters)
|
|
97
|
+
const sorted = sortObjectKeys(normalized)
|
|
98
|
+
return `${tool}::${JSON.stringify(sorted)}`
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
function normalizeParameters(params: any): any {
|
|
102
|
+
if (typeof params !== "object" || params === null) return params
|
|
103
|
+
if (Array.isArray(params)) return params
|
|
104
|
+
|
|
105
|
+
const normalized: any = {}
|
|
106
|
+
for (const [key, value] of Object.entries(params)) {
|
|
107
|
+
if (value !== undefined && value !== null) {
|
|
108
|
+
normalized[key] = value
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
return normalized
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
function sortObjectKeys(obj: any): any {
|
|
115
|
+
if (typeof obj !== "object" || obj === null) return obj
|
|
116
|
+
if (Array.isArray(obj)) return obj.map(sortObjectKeys)
|
|
117
|
+
|
|
118
|
+
const sorted: any = {}
|
|
119
|
+
for (const key of Object.keys(obj).sort()) {
|
|
120
|
+
sorted[key] = sortObjectKeys(obj[key])
|
|
121
|
+
}
|
|
122
|
+
return sorted
|
|
123
|
+
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
import type { PluginConfig } from "../config"
|
|
2
|
+
import type { Logger } from "../logger"
|
|
3
|
+
import type { SessionState, WithParts } from "../state"
|
|
4
|
+
import { getFilePathsFromParameters, isProtected } from "../protected-file-patterns"
|
|
5
|
+
import { getTotalToolTokens } from "./utils"
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Purge Errors strategy - prunes tool inputs for tools that errored
|
|
9
|
+
* after they are older than a configurable number of turns.
|
|
10
|
+
* The error message is preserved, but the (potentially large) inputs
|
|
11
|
+
* are removed to save context.
|
|
12
|
+
*
|
|
13
|
+
* Modifies the session state in place to add pruned tool call IDs.
|
|
14
|
+
*/
|
|
15
|
+
export const purgeErrors = (
|
|
16
|
+
state: SessionState,
|
|
17
|
+
logger: Logger,
|
|
18
|
+
config: PluginConfig,
|
|
19
|
+
messages: WithParts[],
|
|
20
|
+
): void => {
|
|
21
|
+
if (state.manualMode && !config.manualMode.automaticStrategies) {
|
|
22
|
+
return
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if (!config.strategies.purgeErrors.enabled) {
|
|
26
|
+
return
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const allToolIds = state.toolIdList
|
|
30
|
+
if (allToolIds.length === 0) {
|
|
31
|
+
return
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// Filter out IDs already pruned
|
|
35
|
+
const unprunedIds = allToolIds.filter((id) => !state.prune.tools.has(id))
|
|
36
|
+
|
|
37
|
+
if (unprunedIds.length === 0) {
|
|
38
|
+
return
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
const protectedTools = config.strategies.purgeErrors.protectedTools
|
|
42
|
+
const turnThreshold = config.strategies.purgeErrors.turns
|
|
43
|
+
|
|
44
|
+
const newPruneIds: string[] = []
|
|
45
|
+
|
|
46
|
+
for (const id of unprunedIds) {
|
|
47
|
+
const metadata = state.toolParameters.get(id)
|
|
48
|
+
if (!metadata) {
|
|
49
|
+
continue
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
// Skip protected tools
|
|
53
|
+
if (protectedTools.includes(metadata.tool)) {
|
|
54
|
+
continue
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const filePaths = getFilePathsFromParameters(metadata.tool, metadata.parameters)
|
|
58
|
+
if (isProtected(filePaths, config.protectedFilePatterns)) {
|
|
59
|
+
continue
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Only process error tools
|
|
63
|
+
if (metadata.status !== "error") {
|
|
64
|
+
continue
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
// Check if the tool is old enough to prune
|
|
68
|
+
const turnAge = state.currentTurn - metadata.turn
|
|
69
|
+
if (turnAge >= turnThreshold) {
|
|
70
|
+
newPruneIds.push(id)
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if (newPruneIds.length > 0) {
|
|
75
|
+
state.stats.totalPruneTokens += getTotalToolTokens(state, newPruneIds)
|
|
76
|
+
for (const id of newPruneIds) {
|
|
77
|
+
const entry = state.toolParameters.get(id)
|
|
78
|
+
state.prune.tools.set(id, entry?.tokenCount ?? 0)
|
|
79
|
+
}
|
|
80
|
+
logger.debug(
|
|
81
|
+
`Marked ${newPruneIds.length} error tool calls for pruning (older than ${turnThreshold} turns)`,
|
|
82
|
+
)
|
|
83
|
+
}
|
|
84
|
+
}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import type { PluginConfig } from "../config"
|
|
2
|
+
import type { Logger } from "../logger"
|
|
3
|
+
import type { SessionState, WithParts } from "../state"
|
|
4
|
+
import { getFilePathsFromParameters, isProtected } from "../protected-file-patterns"
|
|
5
|
+
import { getTotalToolTokens } from "./utils"
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Supersede Writes strategy - prunes write tool inputs for files that have
|
|
9
|
+
* subsequently been read. When a file is written and later read, the original
|
|
10
|
+
* write content becomes redundant since the current file state is captured
|
|
11
|
+
* in the read result.
|
|
12
|
+
*
|
|
13
|
+
* Modifies the session state in place to add pruned tool call IDs.
|
|
14
|
+
*/
|
|
15
|
+
export const supersedeWrites = (
|
|
16
|
+
state: SessionState,
|
|
17
|
+
logger: Logger,
|
|
18
|
+
config: PluginConfig,
|
|
19
|
+
messages: WithParts[],
|
|
20
|
+
): void => {
|
|
21
|
+
if (state.manualMode && !config.manualMode.automaticStrategies) {
|
|
22
|
+
return
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if (!config.strategies.supersedeWrites.enabled) {
|
|
26
|
+
return
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
const allToolIds = state.toolIdList
|
|
30
|
+
if (allToolIds.length === 0) {
|
|
31
|
+
return
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
// Filter out IDs already pruned
|
|
35
|
+
const unprunedIds = allToolIds.filter((id) => !state.prune.tools.has(id))
|
|
36
|
+
if (unprunedIds.length === 0) {
|
|
37
|
+
return
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// Track write tools by file path: filePath -> [{ id, index }]
|
|
41
|
+
// We track index to determine chronological order
|
|
42
|
+
const writesByFile = new Map<string, { id: string; index: number }[]>()
|
|
43
|
+
|
|
44
|
+
// Track read file paths with their index
|
|
45
|
+
const readsByFile = new Map<string, number[]>()
|
|
46
|
+
|
|
47
|
+
for (let i = 0; i < allToolIds.length; i++) {
|
|
48
|
+
const id = allToolIds[i]!
|
|
49
|
+
const metadata = state.toolParameters.get(id)
|
|
50
|
+
if (!metadata) {
|
|
51
|
+
continue
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const filePaths = getFilePathsFromParameters(metadata.tool, metadata.parameters)
|
|
55
|
+
if (filePaths.length === 0) {
|
|
56
|
+
continue
|
|
57
|
+
}
|
|
58
|
+
const filePath = filePaths[0]!
|
|
59
|
+
|
|
60
|
+
if (isProtected(filePaths, config.protectedFilePatterns)) {
|
|
61
|
+
continue
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if (metadata.tool === "write") {
|
|
65
|
+
if (!writesByFile.has(filePath)) {
|
|
66
|
+
writesByFile.set(filePath, [])
|
|
67
|
+
}
|
|
68
|
+
const writes = writesByFile.get(filePath)
|
|
69
|
+
if (writes) {
|
|
70
|
+
writes.push({ id, index: i })
|
|
71
|
+
}
|
|
72
|
+
} else if (metadata.tool === "read") {
|
|
73
|
+
if (!readsByFile.has(filePath)) {
|
|
74
|
+
readsByFile.set(filePath, [])
|
|
75
|
+
}
|
|
76
|
+
const reads = readsByFile.get(filePath)
|
|
77
|
+
if (reads) {
|
|
78
|
+
reads.push(i)
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Find writes that are superseded by subsequent reads
|
|
84
|
+
const newPruneIds: string[] = []
|
|
85
|
+
|
|
86
|
+
for (const [filePath, writes] of writesByFile.entries()) {
|
|
87
|
+
const reads = readsByFile.get(filePath)
|
|
88
|
+
if (!reads || reads.length === 0) {
|
|
89
|
+
continue
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// For each write, check if there's a read that comes after it
|
|
93
|
+
for (const write of writes) {
|
|
94
|
+
// Skip if already pruned
|
|
95
|
+
if (state.prune.tools.has(write.id)) {
|
|
96
|
+
continue
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Check if any read comes after this write
|
|
100
|
+
const hasSubsequentRead = reads.some((readIndex) => readIndex > write.index)
|
|
101
|
+
if (hasSubsequentRead) {
|
|
102
|
+
newPruneIds.push(write.id)
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
if (newPruneIds.length > 0) {
|
|
108
|
+
state.stats.totalPruneTokens += getTotalToolTokens(state, newPruneIds)
|
|
109
|
+
for (const id of newPruneIds) {
|
|
110
|
+
const entry = state.toolParameters.get(id)
|
|
111
|
+
state.prune.tools.set(id, entry?.tokenCount ?? 0)
|
|
112
|
+
}
|
|
113
|
+
logger.debug(`Marked ${newPruneIds.length} superseded write tool calls for pruning`)
|
|
114
|
+
}
|
|
115
|
+
}
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
import type { SessionState, WithParts } from "../state"
|
|
2
|
+
import type { AssistantMessage, UserMessage } from "@opencode-ai/sdk/v2"
|
|
3
|
+
import type { Logger } from "../logger"
|
|
4
|
+
import { countTokens as anthropicCountTokens } from "@anthropic-ai/tokenizer"
|
|
5
|
+
import { getLastUserMessage } from "../shared-utils"
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Get current token usage from the last assistant message.
|
|
9
|
+
* Returns total tokens (input + output + reasoning + cache).
|
|
10
|
+
*/
|
|
11
|
+
export function getCurrentTokenUsage(messages: WithParts[]): number {
|
|
12
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
13
|
+
const msg = messages[i]!
|
|
14
|
+
if (msg.info.role === "assistant") {
|
|
15
|
+
const assistantInfo = msg.info as AssistantMessage
|
|
16
|
+
if (assistantInfo.tokens?.output > 0) {
|
|
17
|
+
const input = assistantInfo.tokens?.input || 0
|
|
18
|
+
const output = assistantInfo.tokens?.output || 0
|
|
19
|
+
const reasoning = assistantInfo.tokens?.reasoning || 0
|
|
20
|
+
const cacheRead = assistantInfo.tokens?.cache?.read || 0
|
|
21
|
+
const cacheWrite = assistantInfo.tokens?.cache?.write || 0
|
|
22
|
+
return input + output + reasoning + cacheRead + cacheWrite
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
return 0
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export function getCurrentParams(
|
|
30
|
+
state: SessionState,
|
|
31
|
+
messages: WithParts[],
|
|
32
|
+
logger: Logger,
|
|
33
|
+
): {
|
|
34
|
+
providerId: string | undefined
|
|
35
|
+
modelId: string | undefined
|
|
36
|
+
agent: string | undefined
|
|
37
|
+
variant: string | undefined
|
|
38
|
+
} {
|
|
39
|
+
const userMsg = getLastUserMessage(messages)
|
|
40
|
+
if (!userMsg) {
|
|
41
|
+
logger.debug("No user message found when determining current params")
|
|
42
|
+
return {
|
|
43
|
+
providerId: undefined,
|
|
44
|
+
modelId: undefined,
|
|
45
|
+
agent: undefined,
|
|
46
|
+
variant: state.variant,
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
const userInfo = userMsg.info as UserMessage
|
|
50
|
+
const agent: string = userInfo.agent
|
|
51
|
+
const providerId: string | undefined = userInfo.model.providerID
|
|
52
|
+
const modelId: string | undefined = userInfo.model.modelID
|
|
53
|
+
const variant: string | undefined = state.variant ?? userInfo.variant
|
|
54
|
+
|
|
55
|
+
return { providerId, modelId, agent, variant }
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export function countTokens(text: string): number {
|
|
59
|
+
if (!text) return 0
|
|
60
|
+
try {
|
|
61
|
+
return anthropicCountTokens(text)
|
|
62
|
+
} catch {
|
|
63
|
+
return Math.round(text.length / 4)
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
export function estimateTokensBatch(texts: string[]): number {
|
|
68
|
+
if (texts.length === 0) return 0
|
|
69
|
+
return countTokens(texts.join(" "))
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
export function extractToolContent(part: any): string[] {
|
|
73
|
+
const contents: string[] = []
|
|
74
|
+
|
|
75
|
+
if (part.tool === "question") {
|
|
76
|
+
const questions = part.state?.input?.questions
|
|
77
|
+
if (questions !== undefined) {
|
|
78
|
+
const content = typeof questions === "string" ? questions : JSON.stringify(questions)
|
|
79
|
+
contents.push(content)
|
|
80
|
+
}
|
|
81
|
+
return contents
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
if (part.tool === "edit" || part.tool === "write") {
|
|
85
|
+
if (part.state?.input) {
|
|
86
|
+
const inputContent =
|
|
87
|
+
typeof part.state.input === "string"
|
|
88
|
+
? part.state.input
|
|
89
|
+
: JSON.stringify(part.state.input)
|
|
90
|
+
contents.push(inputContent)
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
if (part.state?.status === "completed" && part.state?.output) {
|
|
95
|
+
const content =
|
|
96
|
+
typeof part.state.output === "string"
|
|
97
|
+
? part.state.output
|
|
98
|
+
: JSON.stringify(part.state.output)
|
|
99
|
+
contents.push(content)
|
|
100
|
+
} else if (part.state?.status === "error" && part.state?.error) {
|
|
101
|
+
const content =
|
|
102
|
+
typeof part.state.error === "string"
|
|
103
|
+
? part.state.error
|
|
104
|
+
: JSON.stringify(part.state.error)
|
|
105
|
+
contents.push(content)
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
return contents
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
export function countToolTokens(part: any): number {
|
|
112
|
+
const contents = extractToolContent(part)
|
|
113
|
+
return estimateTokensBatch(contents)
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
export function getTotalToolTokens(state: SessionState, toolIds: string[]): number {
|
|
117
|
+
let total = 0
|
|
118
|
+
for (const id of toolIds) {
|
|
119
|
+
const entry = state.toolParameters.get(id)
|
|
120
|
+
total += entry?.tokenCount ?? 0
|
|
121
|
+
}
|
|
122
|
+
return total
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
export function countMessageTextTokens(msg: WithParts): number {
|
|
126
|
+
const texts: string[] = []
|
|
127
|
+
const parts = Array.isArray(msg.parts) ? msg.parts : []
|
|
128
|
+
for (const part of parts) {
|
|
129
|
+
if (part.type === "text") {
|
|
130
|
+
texts.push(part.text)
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
if (texts.length === 0) return 0
|
|
134
|
+
return estimateTokensBatch(texts)
|
|
135
|
+
}
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
import { tool } from "@opencode-ai/plugin"
|
|
2
|
+
import type { WithParts, CompressSummary } from "../state"
|
|
3
|
+
import type { PruneToolContext } from "./types"
|
|
4
|
+
import { ensureSessionInitialized } from "../state"
|
|
5
|
+
import { saveSessionState } from "../state/persistence"
|
|
6
|
+
import { loadPrompt } from "../prompts"
|
|
7
|
+
import { getCurrentParams, getTotalToolTokens, countMessageTextTokens } from "../strategies/utils"
|
|
8
|
+
import { findStringInMessages, collectToolIdsInRange, collectMessageIdsInRange } from "./utils"
|
|
9
|
+
import { sendCompressNotification } from "../ui/notification"
|
|
10
|
+
import { prune as applyPruneTransforms } from "../messages/prune"
|
|
11
|
+
|
|
12
|
+
const COMPRESS_TOOL_DESCRIPTION = loadPrompt("compress-tool-spec")
|
|
13
|
+
const COMPRESS_SUMMARY_PREFIX = "[Compressed conversation block]\n\n"
|
|
14
|
+
|
|
15
|
+
export function createCompressTool(ctx: PruneToolContext): ReturnType<typeof tool> {
|
|
16
|
+
return tool({
|
|
17
|
+
description: COMPRESS_TOOL_DESCRIPTION,
|
|
18
|
+
args: {
|
|
19
|
+
topic: tool.schema
|
|
20
|
+
.string()
|
|
21
|
+
.describe("Short label (3-5 words) for display - e.g., 'Auth System Exploration'"),
|
|
22
|
+
content: tool.schema
|
|
23
|
+
.object({
|
|
24
|
+
startString: tool.schema
|
|
25
|
+
.string()
|
|
26
|
+
.describe("Unique text from conversation marking the beginning of range"),
|
|
27
|
+
endString: tool.schema
|
|
28
|
+
.string()
|
|
29
|
+
.describe("Unique text marking the end of range"),
|
|
30
|
+
summary: tool.schema
|
|
31
|
+
.string()
|
|
32
|
+
.describe("Complete technical summary replacing all content in range"),
|
|
33
|
+
})
|
|
34
|
+
.describe("The compression details: boundaries and replacement summary"),
|
|
35
|
+
},
|
|
36
|
+
async execute(args, toolCtx) {
|
|
37
|
+
const { client, state, logger } = ctx
|
|
38
|
+
const sessionId = toolCtx.sessionID
|
|
39
|
+
|
|
40
|
+
await toolCtx.ask({
|
|
41
|
+
permission: "compress",
|
|
42
|
+
patterns: ["*"],
|
|
43
|
+
always: ["*"],
|
|
44
|
+
metadata: {},
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
const { topic, content } = args
|
|
48
|
+
const { startString, endString, summary } = content || {}
|
|
49
|
+
|
|
50
|
+
if (!topic || typeof topic !== "string") {
|
|
51
|
+
throw new Error("topic is required and must be a non-empty string")
|
|
52
|
+
}
|
|
53
|
+
if (!startString || typeof startString !== "string") {
|
|
54
|
+
throw new Error("content.startString is required and must be a non-empty string")
|
|
55
|
+
}
|
|
56
|
+
if (!endString || typeof endString !== "string") {
|
|
57
|
+
throw new Error("content.endString is required and must be a non-empty string")
|
|
58
|
+
}
|
|
59
|
+
if (!summary || typeof summary !== "string") {
|
|
60
|
+
throw new Error("content.summary is required and must be a non-empty string")
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// logger.info("Compress tool invoked")
|
|
64
|
+
// logger.info(
|
|
65
|
+
// JSON.stringify({
|
|
66
|
+
// startString: startString?.substring(0, 50) + "...",
|
|
67
|
+
// endString: endString?.substring(0, 50) + "...",
|
|
68
|
+
// topic: topic,
|
|
69
|
+
// summaryLength: summary?.length,
|
|
70
|
+
// }),
|
|
71
|
+
// )
|
|
72
|
+
|
|
73
|
+
const messagesResponse = await client.session.messages({
|
|
74
|
+
path: { id: sessionId },
|
|
75
|
+
})
|
|
76
|
+
const messages: WithParts[] = messagesResponse.data || messagesResponse
|
|
77
|
+
|
|
78
|
+
await ensureSessionInitialized(
|
|
79
|
+
client,
|
|
80
|
+
state,
|
|
81
|
+
sessionId,
|
|
82
|
+
logger,
|
|
83
|
+
messages,
|
|
84
|
+
ctx.config.manualMode.enabled,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
const transformedMessages = structuredClone(messages) as WithParts[]
|
|
88
|
+
applyPruneTransforms(state, logger, ctx.config, transformedMessages)
|
|
89
|
+
|
|
90
|
+
const startResult = findStringInMessages(
|
|
91
|
+
transformedMessages,
|
|
92
|
+
startString,
|
|
93
|
+
logger,
|
|
94
|
+
"startString",
|
|
95
|
+
)
|
|
96
|
+
const endResult = findStringInMessages(
|
|
97
|
+
transformedMessages,
|
|
98
|
+
endString,
|
|
99
|
+
logger,
|
|
100
|
+
"endString",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
let rawStartIndex = messages.findIndex((m) => m.info.id === startResult.messageId)
|
|
104
|
+
let rawEndIndex = messages.findIndex((m) => m.info.id === endResult.messageId)
|
|
105
|
+
|
|
106
|
+
// If a boundary matched inside a synthetic compress summary message,
|
|
107
|
+
// resolve it back to the summary's anchor message in the raw messages
|
|
108
|
+
if (rawStartIndex === -1) {
|
|
109
|
+
const summary = state.compressSummaries.find((s) => s.summary.includes(startString))
|
|
110
|
+
if (summary) {
|
|
111
|
+
rawStartIndex = messages.findIndex((m) => m.info.id === summary.anchorMessageId)
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
if (rawEndIndex === -1) {
|
|
115
|
+
const summary = state.compressSummaries.find((s) => s.summary.includes(endString))
|
|
116
|
+
if (summary) {
|
|
117
|
+
rawEndIndex = messages.findIndex((m) => m.info.id === summary.anchorMessageId)
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
if (rawStartIndex === -1 || rawEndIndex === -1) {
|
|
122
|
+
throw new Error(`Failed to map boundary matches back to raw messages`)
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
if (rawStartIndex > rawEndIndex) {
|
|
126
|
+
throw new Error(
|
|
127
|
+
`startString appears after endString in the conversation. Start must come before end.`,
|
|
128
|
+
)
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
const containedToolIds = collectToolIdsInRange(messages, rawStartIndex, rawEndIndex)
|
|
132
|
+
|
|
133
|
+
const containedMessageIds = collectMessageIdsInRange(
|
|
134
|
+
messages,
|
|
135
|
+
rawStartIndex,
|
|
136
|
+
rawEndIndex,
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
// Remove any existing summaries whose anchors are now inside this range
|
|
140
|
+
// This prevents duplicate injections when a larger compress subsumes a smaller one
|
|
141
|
+
const removedSummaries = state.compressSummaries.filter((s) =>
|
|
142
|
+
containedMessageIds.includes(s.anchorMessageId),
|
|
143
|
+
)
|
|
144
|
+
if (removedSummaries.length > 0) {
|
|
145
|
+
state.compressSummaries = state.compressSummaries.filter(
|
|
146
|
+
(s) => !containedMessageIds.includes(s.anchorMessageId),
|
|
147
|
+
)
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
const compressSummary: CompressSummary = {
|
|
151
|
+
anchorMessageId: startResult.messageId,
|
|
152
|
+
summary: COMPRESS_SUMMARY_PREFIX + summary,
|
|
153
|
+
}
|
|
154
|
+
state.compressSummaries.push(compressSummary)
|
|
155
|
+
|
|
156
|
+
const compressedMessageIds = containedMessageIds.filter(
|
|
157
|
+
(id) => !state.prune.messages.has(id),
|
|
158
|
+
)
|
|
159
|
+
const compressedToolIds = containedToolIds.filter((id) => !state.prune.tools.has(id))
|
|
160
|
+
|
|
161
|
+
let textTokens = 0
|
|
162
|
+
for (const msgId of compressedMessageIds) {
|
|
163
|
+
const msg = messages.find((m) => m.info.id === msgId)
|
|
164
|
+
if (msg) {
|
|
165
|
+
const tokens = countMessageTextTokens(msg)
|
|
166
|
+
textTokens += tokens
|
|
167
|
+
state.prune.messages.set(msgId, tokens)
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
const toolTokens = getTotalToolTokens(state, compressedToolIds)
|
|
171
|
+
for (const id of compressedToolIds) {
|
|
172
|
+
const entry = state.toolParameters.get(id)
|
|
173
|
+
state.prune.tools.set(id, entry?.tokenCount ?? 0)
|
|
174
|
+
}
|
|
175
|
+
const estimatedCompressedTokens = textTokens + toolTokens
|
|
176
|
+
|
|
177
|
+
state.stats.pruneTokenCounter += estimatedCompressedTokens
|
|
178
|
+
|
|
179
|
+
const rawStartResult = { messageId: startResult.messageId, messageIndex: rawStartIndex }
|
|
180
|
+
const rawEndResult = { messageId: endResult.messageId, messageIndex: rawEndIndex }
|
|
181
|
+
|
|
182
|
+
const currentParams = getCurrentParams(state, messages, logger)
|
|
183
|
+
await sendCompressNotification(
|
|
184
|
+
client,
|
|
185
|
+
logger,
|
|
186
|
+
ctx.config,
|
|
187
|
+
state,
|
|
188
|
+
sessionId,
|
|
189
|
+
compressedToolIds,
|
|
190
|
+
compressedMessageIds,
|
|
191
|
+
topic,
|
|
192
|
+
summary,
|
|
193
|
+
rawStartResult,
|
|
194
|
+
rawEndResult,
|
|
195
|
+
messages.length,
|
|
196
|
+
currentParams,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
state.stats.totalPruneTokens += state.stats.pruneTokenCounter
|
|
200
|
+
state.stats.pruneTokenCounter = 0
|
|
201
|
+
state.nudgeCounter = 0
|
|
202
|
+
|
|
203
|
+
// logger.info("Compress range created", {
|
|
204
|
+
// startMessageId: startResult.messageId,
|
|
205
|
+
// endMessageId: endResult.messageId,
|
|
206
|
+
// toolIdsRemoved: containedToolIds.length,
|
|
207
|
+
// messagesInRange: containedMessageIds.length,
|
|
208
|
+
// estimatedTokens: estimatedCompressedTokens,
|
|
209
|
+
// })
|
|
210
|
+
|
|
211
|
+
saveSessionState(state, logger).catch((err) =>
|
|
212
|
+
logger.error("Failed to persist state", { error: err.message }),
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
return `Compressed ${compressedMessageIds.length} messages (${compressedToolIds.length} tool calls) into summary. The content will be replaced with your summary.`
|
|
216
|
+
},
|
|
217
|
+
})
|
|
218
|
+
}
|