pi-honcho-memory 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,162 @@
1
+ import { StringEnum } from "@mariozechner/pi-ai";
2
+ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
3
+ import { Type } from "@sinclair/typebox";
4
+ import { ensureSession, getHandles } from "./client.js";
5
+ import { type ReasoningLevel, getRecallMode } from "./config.js";
6
+
7
+ const REASONING_LEVELS: readonly ReasoningLevel[] = ["minimal", "low", "medium", "high", "max"];
8
+
9
+ const bumpLevel = (level: ReasoningLevel, steps: number): ReasoningLevel => {
10
+ const idx = REASONING_LEVELS.indexOf(level);
11
+ const target = Math.min(idx + steps, REASONING_LEVELS.length - 1);
12
+ return REASONING_LEVELS[Math.max(0, target)];
13
+ };
14
+
15
+ const nextLevel = (level: string): ReasoningLevel | null => {
16
+ const idx = REASONING_LEVELS.indexOf(level as ReasoningLevel);
17
+ return idx >= 0 && idx < REASONING_LEVELS.length - 1 ? REASONING_LEVELS[idx + 1] : null;
18
+ };
19
+
20
+ export const dynamicLevel = (
21
+ query: string,
22
+ baseLevel: ReasoningLevel,
23
+ dynamic: boolean,
24
+ cap: ReasoningLevel | null,
25
+ ): ReasoningLevel => {
26
+ if (!dynamic) return baseLevel;
27
+ const len = query.length;
28
+ let level = baseLevel;
29
+ if (len >= 120) level = bumpLevel(level, 1);
30
+ if (len >= 400) level = bumpLevel(level, 1);
31
+ if (cap) {
32
+ const capIdx = REASONING_LEVELS.indexOf(cap);
33
+ const levelIdx = REASONING_LEVELS.indexOf(level);
34
+ if (levelIdx > capIdx) level = cap;
35
+ }
36
+ return level;
37
+ };
38
+
39
+ const ensureHandles = async () => {
40
+ if (getRecallMode() === "context") throw new Error("Memory tools are disabled in context-only recall mode.");
41
+ const handles = getHandles();
42
+ if (!handles) throw new Error("Honcho is not connected. Run /honcho:setup first.");
43
+ await ensureSession(handles);
44
+ return handles;
45
+ };
46
+
47
+ const formatSearch = (results: Array<{ peerId: string; content: string }>, preview: number): string => {
48
+ if (results.length === 0) return "No relevant memory found.";
49
+ return results
50
+ .map((entry, index) => `${index + 1}. [${entry.peerId}] ${entry.content.slice(0, preview)}`)
51
+ .join("\n\n");
52
+ };
53
+
54
+ const SEARCH_MAX_TOKENS = 2000;
55
+
56
+ export const registerTools = (pi: ExtensionAPI): void => {
57
+ pi.registerTool({
58
+ name: "honcho_profile",
59
+ label: "Honcho Profile",
60
+ description: "Retrieve what Honcho currently knows about the user profile.",
61
+ parameters: Type.Object({}),
62
+ async execute(_toolCallId, _params, _signal, _onUpdate, _ctx) {
63
+ const handles = await ensureHandles();
64
+ const context = await handles.session!.context({
65
+ summary: false,
66
+ peerPerspective: handles.aiPeer,
67
+ peerTarget: handles.userPeer,
68
+ tokens: handles.config.contextTokens,
69
+ });
70
+ return {
71
+ content: [{ type: "text", text: context.peerRepresentation?.trim() || "No profile memory available yet." }],
72
+ details: {},
73
+ };
74
+ },
75
+ });
76
+
77
+ pi.registerTool({
78
+ name: "honcho_search",
79
+ label: "Honcho Search",
80
+ description: "Search durable memory for prior conversations, facts, and decisions.",
81
+ parameters: Type.Object({ query: Type.String({ description: "Search query" }) }),
82
+ async execute(_toolCallId, params, _signal, _onUpdate, _ctx) {
83
+ const handles = await ensureHandles();
84
+ const limit = Math.min(handles.config.searchLimit, SEARCH_MAX_TOKENS);
85
+ const results = await handles.session!.search(params.query, { limit });
86
+ return {
87
+ content: [{ type: "text", text: formatSearch(results, handles.config.toolPreviewLength) }],
88
+ details: { count: results.length },
89
+ };
90
+ },
91
+ });
92
+
93
+ pi.registerTool({
94
+ name: "honcho_context",
95
+ label: "Honcho Context",
96
+ description: "Ask Honcho to synthesize memory context for the current question.",
97
+ parameters: Type.Object({
98
+ query: Type.String({ description: "Question to ask about long-term memory" }),
99
+ reasoningLevel: Type.Optional(StringEnum(["minimal", "low", "medium", "high", "max"] as const)),
100
+ }),
101
+ async execute(_toolCallId, params, _signal, _onUpdate, _ctx) {
102
+ const handles = await ensureHandles();
103
+ const truncatedQuery = params.query.slice(0, handles.config.dialecticMaxInputChars);
104
+ const level = params.reasoningLevel
105
+ ?? dynamicLevel(truncatedQuery, handles.config.reasoningLevel, handles.config.dialecticDynamic, handles.config.reasoningLevelCap);
106
+ let reply = await handles.aiPeer.chat(truncatedQuery, {
107
+ target: handles.userPeer,
108
+ session: handles.session!,
109
+ reasoningLevel: level,
110
+ });
111
+ if (!reply?.trim()) {
112
+ const bumped = nextLevel(level);
113
+ if (bumped) {
114
+ reply = await handles.aiPeer.chat(truncatedQuery, {
115
+ target: handles.userPeer,
116
+ session: handles.session!,
117
+ reasoningLevel: bumped,
118
+ });
119
+ }
120
+ }
121
+ const result = reply?.slice(0, handles.config.dialecticMaxChars) ?? "No additional context available.";
122
+ return {
123
+ content: [{ type: "text", text: result }],
124
+ details: {},
125
+ };
126
+ },
127
+ });
128
+
129
+ pi.registerTool({
130
+ name: "honcho_conclude",
131
+ label: "Honcho Conclude",
132
+ description: "Store a durable preference, fact, or decision in Honcho.",
133
+ parameters: Type.Object({ content: Type.String({ description: "Durable memory to store" }) }),
134
+ async execute(_toolCallId, params, _signal, _onUpdate, _ctx) {
135
+ const handles = await ensureHandles();
136
+ await handles.aiPeer.conclusionsOf(handles.userPeer).create({
137
+ content: params.content,
138
+ sessionId: handles.session!,
139
+ });
140
+ return {
141
+ content: [{ type: "text", text: `Saved durable memory: ${params.content}` }],
142
+ details: {},
143
+ };
144
+ },
145
+ });
146
+
147
+ pi.registerTool({
148
+ name: "honcho_seed_identity",
149
+ label: "Honcho Seed Identity",
150
+ description: "Seed the AI peer's identity representation in Honcho.",
151
+ parameters: Type.Object({ content: Type.String({ description: "AI identity description" }) }),
152
+ async execute(_toolCallId, params, _signal, _onUpdate, _ctx) {
153
+ const handles = await ensureHandles();
154
+ const tagged = `<ai_identity_seed source="manual">\n${params.content}\n</ai_identity_seed>`;
155
+ await handles.session!.addMessages([handles.aiPeer.message(tagged)]);
156
+ return {
157
+ content: [{ type: "text", text: "AI identity seeded." }],
158
+ details: {},
159
+ };
160
+ },
161
+ });
162
+ };
@@ -0,0 +1,216 @@
1
+ import type { AgentMessage } from "@mariozechner/pi-agent-core";
2
+ import type { WriteFrequency } from "./config.js";
3
+ import type { HonchoHandles } from "./client.js";
4
+
5
+ // ---------------------------------------------------------------------------
6
+ // Layer 1: Credential sanitization
7
+ // ---------------------------------------------------------------------------
8
+
9
+ const REDACT_PLACEHOLDER = "<REDACTED>";
10
+
11
+ /** Patterns that match credential values in context (keyword + value). */
12
+ const CONTEXTUAL_PATTERNS: Array<{ re: RegExp; label: string }> = [
13
+ // key=value or key: value patterns for known credential keywords
14
+ { re: /(?:api[_-]?key|apikey|secret[_-]?key|access[_-]?key|auth[_-]?token|bearer|password|passphrase|private[_-]?key|client[_-]?secret)\s*[:=]\s*['"]?([^\s'"`,;}{]{8,})['"]?/gi, label: "CREDENTIAL" },
15
+ // export VAR="value" for known env var names
16
+ { re: /(?:export\s+)?(?:API_KEY|SECRET_KEY|ACCESS_KEY|AUTH_TOKEN|PASSWORD|PASSPHRASE|PRIVATE_KEY|CLIENT_SECRET|DATABASE_URL|OPENAI_API_KEY|ANTHROPIC_API_KEY|EXA_API_KEY|HONCHO_API_KEY|AWS_SECRET_ACCESS_KEY|GITHUB_TOKEN|GITLAB_TOKEN|NPM_TOKEN)\s*=\s*['"]?([^\s'"`,;}{]{8,})['"]?/gi, label: "ENV_SECRET" },
17
+ ];
18
+
19
+ /** Patterns that match standalone credential formats (no keyword context needed). */
20
+ const STANDALONE_PATTERNS: Array<{ re: RegExp; label: string }> = [
21
+ // AWS access key IDs
22
+ { re: /\bAKIA[0-9A-Z]{16}\b/g, label: "AWS_KEY" },
23
+ // Bearer tokens
24
+ { re: /\bBearer\s+[A-Za-z0-9\-._~+/]{20,}=*/g, label: "BEARER_TOKEN" },
25
+ // GitHub personal access tokens
26
+ { re: /\bgh[ps]_[A-Za-z0-9]{36,}\b/g, label: "GITHUB_TOKEN" },
27
+ // GitLab tokens
28
+ { re: /\bglpat-[A-Za-z0-9\-]{20,}\b/g, label: "GITLAB_TOKEN" },
29
+ // Honcho API keys
30
+ { re: /\bhch-v\d+-[A-Za-z0-9]{20,}\b/g, label: "HONCHO_KEY" },
31
+ // npm tokens
32
+ { re: /\bnpm_[A-Za-z0-9]{36,}\b/g, label: "NPM_TOKEN" },
33
+ // Slack tokens
34
+ { re: /\bxox[bpors]-[A-Za-z0-9\-]{10,}\b/g, label: "SLACK_TOKEN" },
35
+ // OpenAI API keys
36
+ { re: /\bsk-[A-Za-z0-9\-]{20,}\b/g, label: "OPENAI_KEY" },
37
+ // Generic long hex secrets (64+ chars, likely SHA/HMAC keys)
38
+ { re: /\b[0-9a-f]{64,}\b/gi, label: "HEX_SECRET" },
39
+ ];
40
+
41
+ export const sanitizeCredentials = (text: string): string => {
42
+ let result = text;
43
+ for (const { re, label } of CONTEXTUAL_PATTERNS) {
44
+ re.lastIndex = 0;
45
+ result = result.replace(re, (match, value) =>
46
+ match.replace(value, `${REDACT_PLACEHOLDER}:${label}`),
47
+ );
48
+ }
49
+ for (const { re, label } of STANDALONE_PATTERNS) {
50
+ re.lastIndex = 0;
51
+ result = result.replace(re, `${REDACT_PLACEHOLDER}:${label}`);
52
+ }
53
+ return result;
54
+ };
55
+
56
+ // ---------------------------------------------------------------------------
57
+ // Layer 2: Tool output filtering
58
+ // ---------------------------------------------------------------------------
59
+
60
+ /** Detect and strip content that looks like raw file dumps or command output. */
61
+ export const stripToolOutput = (text: string): string => {
62
+ // Strip content between common tool output markers
63
+ let result = text;
64
+ // Remove fenced code blocks that look like file contents (```\nFILE_CONTENT\n```)
65
+ // but keep short code blocks (likely explanations, not dumps)
66
+ result = result.replace(/```[\w]*\n([\s\S]{500,}?)```/g, (match) => {
67
+ // Check if it contains credential-like patterns — if so, redact the whole block
68
+ if (/(?:password|secret|key|token|apikey)\s*[:=]/i.test(match)) {
69
+ return "```\n[tool output redacted — contained potential credentials]\n```";
70
+ }
71
+ return match;
72
+ });
73
+ return result;
74
+ };
75
+
76
+ // ---------------------------------------------------------------------------
77
+ // Text extraction
78
+ // ---------------------------------------------------------------------------
79
+
80
+ const extractText = (content: unknown): string => {
81
+ if (typeof content === "string") return content.trim();
82
+ if (!Array.isArray(content)) return "";
83
+ return content
84
+ .flatMap((entry) => {
85
+ if (typeof entry === "string") return [entry];
86
+ if (entry && typeof entry === "object" && "type" in entry && "text" in entry) {
87
+ const block = entry as { type?: string; text?: string };
88
+ if (block.type === "text" && typeof block.text === "string") return [block.text];
89
+ }
90
+ return [];
91
+ })
92
+ .join("\n")
93
+ .trim();
94
+ };
95
+
96
+ export const chunkTextSmart = (text: string, maxLen: number): string[] => {
97
+ if (text.length <= maxLen) return [text];
98
+ const chunks: string[] = [];
99
+ let remaining = text;
100
+ while (remaining.length > 0) {
101
+ if (remaining.length <= maxLen) { chunks.push(remaining); break; }
102
+ let cut = -1;
103
+ const search = remaining.slice(0, maxLen);
104
+ // Try paragraph boundary
105
+ const para = search.lastIndexOf("\n\n");
106
+ if (para > 0) cut = para + 2;
107
+ // Fall back to sentence
108
+ if (cut <= 0) {
109
+ const sent = search.lastIndexOf(". ");
110
+ if (sent > 0) cut = sent + 2;
111
+ }
112
+ // Fall back to word
113
+ if (cut <= 0) {
114
+ const word = search.lastIndexOf(" ");
115
+ if (word > 0) cut = word + 1;
116
+ }
117
+ // Hard cut
118
+ if (cut <= 0) cut = maxLen;
119
+ chunks.push(remaining.slice(0, cut));
120
+ remaining = remaining.slice(cut);
121
+ }
122
+ return chunks.map((c, i) => i === 0 ? c : `[continued] ${c}`);
123
+ };
124
+
125
+ interface PendingWrite {
126
+ handles: HonchoHandles;
127
+ payload: Array<{ role: "user" | "assistant"; text: string }>;
128
+ }
129
+
130
+ export class WriteScheduler {
131
+ private pending: PendingWrite[] = [];
132
+ private turnCount = 0;
133
+ private asyncQueue: Promise<void> = Promise.resolve();
134
+
135
+ constructor(private frequency: WriteFrequency | number) {}
136
+
137
+ private async sendWithRetry(handles: HonchoHandles, payload: PendingWrite["payload"]): Promise<void> {
138
+ const messages = payload.map((m) =>
139
+ m.role === "user" ? handles.userPeer.message(m.text) : handles.aiPeer.message(m.text),
140
+ );
141
+ try {
142
+ await handles.session!.addMessages(messages);
143
+ } catch (firstError) {
144
+ console.warn("[honcho-memory] upload failed, retrying in 2s:",
145
+ firstError instanceof Error ? firstError.message : firstError);
146
+ await new Promise((r) => setTimeout(r, 2000));
147
+ await handles.session!.addMessages(messages);
148
+ }
149
+ }
150
+
151
+ private enqueueAsync(write: PendingWrite): void {
152
+ this.asyncQueue = this.asyncQueue
153
+ .then(() => this.sendWithRetry(write.handles, write.payload))
154
+ .catch((error) => {
155
+ console.error("[honcho-memory] upload queue error:", error instanceof Error ? error.message : error);
156
+ });
157
+ }
158
+
159
+ private preparePayload(handles: HonchoHandles, messages: AgentMessage[]): PendingWrite["payload"] {
160
+ return messages
161
+ .filter((m) => m.role === "user" || m.role === "assistant")
162
+ .map((m) => ({ role: m.role as "user" | "assistant", text: extractText(m.content) }))
163
+ .filter((m) => m.text.length > 0)
164
+ .map((m) => ({ role: m.role, text: sanitizeCredentials(stripToolOutput(m.text)) }))
165
+ .filter((m) => m.text.length > 0)
166
+ .flatMap((m) =>
167
+ chunkTextSmart(m.text, handles.config.maxMessageLength).map((chunk) => ({ role: m.role, text: chunk })),
168
+ );
169
+ }
170
+
171
+ async onTurnEnd(handles: HonchoHandles, messages: AgentMessage[]): Promise<void> {
172
+ const payload = this.preparePayload(handles, messages);
173
+ if (payload.length === 0) return;
174
+
175
+ this.turnCount++;
176
+
177
+ if (this.frequency === "async") {
178
+ this.enqueueAsync({ handles, payload });
179
+ return;
180
+ }
181
+
182
+ if (this.frequency === "turn") {
183
+ await this.sendWithRetry(handles, payload);
184
+ return;
185
+ }
186
+
187
+ // "session" or N-turn: accumulate
188
+ this.pending.push({ handles, payload });
189
+
190
+ if (typeof this.frequency === "number" && this.turnCount % this.frequency === 0) {
191
+ await this.flushPending();
192
+ }
193
+ }
194
+
195
+ private async flushPending(): Promise<void> {
196
+ const batch = this.pending.splice(0);
197
+ for (const write of batch) {
198
+ try {
199
+ await this.sendWithRetry(write.handles, write.payload);
200
+ } catch (error) {
201
+ console.error("[honcho-memory] batch flush error:", error instanceof Error ? error.message : error);
202
+ }
203
+ }
204
+ }
205
+
206
+ async flush(): Promise<void> {
207
+ await this.flushPending();
208
+ await this.asyncQueue;
209
+ }
210
+
211
+ reset(): void {
212
+ this.pending = [];
213
+ this.turnCount = 0;
214
+ this.asyncQueue = Promise.resolve();
215
+ }
216
+ }
package/package.json ADDED
@@ -0,0 +1,43 @@
1
+ {
2
+ "name": "pi-honcho-memory",
3
+ "version": "0.1.0",
4
+ "description": "Honcho-backed persistent memory extension for PI coding agent. Full Hermes feature parity.",
5
+ "type": "module",
6
+ "license": "MIT",
7
+ "author": "acsezen",
8
+ "repository": {
9
+ "type": "git",
10
+ "url": "git+https://github.com/acsezen/pi-memory-honcho.git"
11
+ },
12
+ "homepage": "https://github.com/acsezen/pi-memory-honcho#readme",
13
+ "keywords": [
14
+ "pi-package",
15
+ "pi",
16
+ "honcho",
17
+ "hermes",
18
+ "memory",
19
+ "persistent-memory",
20
+ "ai-agent"
21
+ ],
22
+ "scripts": {
23
+ "typecheck": "tsc -p tsconfig.json --noEmit",
24
+ "test": "vitest run"
25
+ },
26
+ "dependencies": {
27
+ "@honcho-ai/sdk": "^2.0.1"
28
+ },
29
+ "devDependencies": {
30
+ "@types/node": "^22.0.0",
31
+ "vitest": "^4.1.2"
32
+ },
33
+ "peerDependencies": {
34
+ "@mariozechner/pi-ai": "*",
35
+ "@mariozechner/pi-coding-agent": "*",
36
+ "@sinclair/typebox": "*"
37
+ },
38
+ "pi": {
39
+ "extensions": [
40
+ "./extensions"
41
+ ]
42
+ }
43
+ }
@@ -0,0 +1,3 @@
1
+ // All major dependencies ship their own types.
2
+ // This file is kept as a placeholder — add shims here only for
3
+ // packages that genuinely lack type definitions.