@oh-my-pi/pi-coding-agent 14.6.2 → 14.6.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +71 -2
  2. package/README.md +21 -0
  3. package/package.json +23 -7
  4. package/src/cli/grievances-cli.ts +89 -4
  5. package/src/commands/grievances.ts +33 -7
  6. package/src/config/prompt-templates.ts +14 -7
  7. package/src/config/settings-schema.ts +585 -100
  8. package/src/config/settings.ts +42 -0
  9. package/src/discovery/helpers.ts +13 -6
  10. package/src/edit/index.ts +3 -3
  11. package/src/edit/line-hash.ts +73 -25
  12. package/src/edit/modes/hashline.lark +10 -3
  13. package/src/edit/modes/hashline.ts +104 -38
  14. package/src/edit/renderer.ts +3 -3
  15. package/src/hindsight/backend.ts +444 -0
  16. package/src/hindsight/bank.ts +131 -0
  17. package/src/hindsight/client.ts +445 -0
  18. package/src/hindsight/config.ts +165 -0
  19. package/src/hindsight/content.ts +205 -0
  20. package/src/hindsight/index.ts +6 -0
  21. package/src/hindsight/retain-queue.ts +166 -0
  22. package/src/hindsight/transcript.ts +71 -0
  23. package/src/main.ts +7 -10
  24. package/src/memories/index.ts +1 -1
  25. package/src/memory-backend/index.ts +4 -0
  26. package/src/memory-backend/local-backend.ts +30 -0
  27. package/src/memory-backend/off-backend.ts +16 -0
  28. package/src/memory-backend/resolve.ts +24 -0
  29. package/src/memory-backend/types.ts +69 -0
  30. package/src/modes/components/settings-defs.ts +50 -451
  31. package/src/modes/components/settings-selector.ts +2 -2
  32. package/src/modes/components/status-line/presets.ts +1 -1
  33. package/src/modes/controllers/command-controller.ts +6 -5
  34. package/src/modes/controllers/event-controller.ts +12 -0
  35. package/src/modes/controllers/selector-controller.ts +3 -12
  36. package/src/modes/theme/theme.ts +4 -0
  37. package/src/prompts/tools/github.md +3 -0
  38. package/src/prompts/tools/hashline.md +20 -16
  39. package/src/prompts/tools/read.md +10 -6
  40. package/src/prompts/tools/recall.md +5 -0
  41. package/src/prompts/tools/reflect.md +5 -0
  42. package/src/prompts/tools/retain.md +5 -0
  43. package/src/prompts/tools/search.md +1 -1
  44. package/src/sdk.ts +12 -9
  45. package/src/session/agent-session.ts +75 -3
  46. package/src/slash-commands/builtin-registry.ts +2 -12
  47. package/src/tools/ast-edit.ts +14 -5
  48. package/src/tools/ast-grep.ts +12 -3
  49. package/src/tools/find.ts +47 -7
  50. package/src/tools/gh-renderer.ts +10 -1
  51. package/src/tools/gh.ts +233 -5
  52. package/src/tools/hindsight-recall.ts +70 -0
  53. package/src/tools/hindsight-reflect.ts +57 -0
  54. package/src/tools/hindsight-retain.ts +63 -0
  55. package/src/tools/index.ts +17 -0
  56. package/src/tools/path-utils.ts +55 -0
  57. package/src/tools/read.ts +1 -1
  58. package/src/tools/search.ts +45 -8
@@ -0,0 +1,205 @@
1
+ /**
2
+ * Pure content utilities for the Hindsight backend.
3
+ *
4
+ * Ports the semantics of the upstream OpenCode plugin
5
+ * (vectorize-io/hindsight @ hindsight-integrations/opencode/src/content.ts):
6
+ * - tag stripping for anti-feedback (a recalled <memories> block must
7
+ * never end up retained as a new memory)
8
+ * - recall query composition + truncation under a character budget
9
+ * - retention transcript framing
10
+ */
11
+
12
+ export interface HindsightMessage {
13
+ role: string;
14
+ content: string;
15
+ }
16
+
17
+ export interface RecallResultLike {
18
+ text: string;
19
+ type?: string | null;
20
+ mentioned_at?: string | null;
21
+ }
22
+
23
+ const MEMORIES_REGEX = /<memories>[\s\S]*?<\/memories>/g;
24
+ const LEGACY_HINDSIGHT_MEMORIES_REGEX = /<hindsight_memories>[\s\S]*?<\/hindsight_memories>/g;
25
+ const LEGACY_RELEVANT_MEMORIES_REGEX = /<relevant_memories>[\s\S]*?<\/relevant_memories>/g;
26
+
27
+ /**
28
+ * Strip `<memories>` and legacy memory blocks.
29
+ *
30
+ * The recall path injects these tags into the system prompt; if they leak back
31
+ * into the retention transcript, every retain becomes a tighter feedback loop
32
+ * around the same memories. Always strip before retaining.
33
+ */
34
+ export function stripMemoryTags(content: string): string {
35
+ return content
36
+ .replace(MEMORIES_REGEX, "")
37
+ .replace(LEGACY_HINDSIGHT_MEMORIES_REGEX, "")
38
+ .replace(LEGACY_RELEVANT_MEMORIES_REGEX, "");
39
+ }
40
+
41
+ /** Format recall results into a bullet list for context injection. */
42
+ export function formatMemories(results: RecallResultLike[]): string {
43
+ if (results.length === 0) return "";
44
+ return results
45
+ .map(r => {
46
+ const typeStr = r.type ? ` [${r.type}]` : "";
47
+ const dateStr = r.mentioned_at ? ` (${r.mentioned_at})` : "";
48
+ return `- ${r.text}${typeStr}${dateStr}`;
49
+ })
50
+ .join("\n\n");
51
+ }
52
+
53
+ /** Format current UTC time for the recall preamble. */
54
+ export function formatCurrentTime(now: Date = new Date()): string {
55
+ const y = now.getUTCFullYear();
56
+ const m = String(now.getUTCMonth() + 1).padStart(2, "0");
57
+ const d = String(now.getUTCDate()).padStart(2, "0");
58
+ const h = String(now.getUTCHours()).padStart(2, "0");
59
+ const min = String(now.getUTCMinutes()).padStart(2, "0");
60
+ return `${y}-${m}-${d} ${h}:${min}`;
61
+ }
62
+
63
+ /**
64
+ * Slice messages to the last N turns, where a turn boundary is a user message.
65
+ * Returns the trailing tail starting at the (N-th from the end) user message.
66
+ */
67
+ export function sliceLastTurnsByUserBoundary(messages: HindsightMessage[], turns: number): HindsightMessage[] {
68
+ if (messages.length === 0 || turns <= 0) return [];
69
+
70
+ let userTurnsSeen = 0;
71
+ let startIndex = -1;
72
+
73
+ for (let i = messages.length - 1; i >= 0; i--) {
74
+ if (messages[i].role === "user") {
75
+ userTurnsSeen += 1;
76
+ if (userTurnsSeen >= turns) {
77
+ startIndex = i;
78
+ break;
79
+ }
80
+ }
81
+ }
82
+
83
+ return startIndex === -1 ? [...messages] : messages.slice(startIndex);
84
+ }
85
+
86
+ /**
87
+ * Compose a recall query from the latest user prompt plus optional prior context.
88
+ *
89
+ * When `recallContextTurns <= 1` the query is just the trimmed latest prompt.
90
+ * Otherwise we prepend a `Prior context:` block built from the trailing
91
+ * `recallContextTurns` user-bounded turns (memory tags stripped, latest prompt
92
+ * suppressed to avoid duplicating it inside the context block).
93
+ */
94
+ export function composeRecallQuery(
95
+ latestQuery: string,
96
+ messages: HindsightMessage[],
97
+ recallContextTurns: number,
98
+ ): string {
99
+ const latest = latestQuery.trim();
100
+ if (recallContextTurns <= 1 || messages.length === 0) return latest;
101
+
102
+ const contextual = sliceLastTurnsByUserBoundary(messages, recallContextTurns);
103
+ const contextLines: string[] = [];
104
+
105
+ for (const msg of contextual) {
106
+ const content = stripMemoryTags(msg.content).trim();
107
+ if (!content) continue;
108
+ if (msg.role === "user" && content === latest) continue;
109
+ contextLines.push(`${msg.role}: ${content}`);
110
+ }
111
+
112
+ if (contextLines.length === 0) return latest;
113
+ return ["Prior context:", contextLines.join("\n"), latest].join("\n\n");
114
+ }
115
+
116
+ /**
117
+ * Truncate a composed recall query to `maxChars`.
118
+ *
119
+ * Always preserves the latest user message. Drops oldest context lines first
120
+ * and degrades gracefully when even the latest message exceeds the budget.
121
+ */
122
+ export function truncateRecallQuery(query: string, latestQuery: string, maxChars: number): string {
123
+ if (maxChars <= 0 || query.length <= maxChars) return query;
124
+
125
+ const latest = latestQuery.trim();
126
+ const latestOnly = latest.length > maxChars ? latest.slice(0, maxChars) : latest;
127
+
128
+ if (!query.includes("Prior context:")) return latestOnly;
129
+
130
+ const contextMarker = "Prior context:\n\n";
131
+ const markerIndex = query.indexOf(contextMarker);
132
+ if (markerIndex === -1) return latestOnly;
133
+
134
+ const suffix = `\n\n${latest}`;
135
+ const suffixIndex = query.lastIndexOf(suffix);
136
+ if (suffixIndex === -1) return latestOnly;
137
+ if (suffix.length >= maxChars) return latestOnly;
138
+
139
+ const contextBody = query.slice(markerIndex + contextMarker.length, suffixIndex);
140
+ const contextLines = contextBody.split("\n").filter(Boolean);
141
+
142
+ const kept: string[] = [];
143
+ for (let i = contextLines.length - 1; i >= 0; i--) {
144
+ kept.unshift(contextLines[i]);
145
+ const candidate = `${contextMarker}${kept.join("\n")}${suffix}`;
146
+ if (candidate.length > maxChars) {
147
+ kept.shift();
148
+ break;
149
+ }
150
+ }
151
+
152
+ if (kept.length > 0) return `${contextMarker}${kept.join("\n")}${suffix}`;
153
+ return latestOnly;
154
+ }
155
+
156
+ export interface RetentionTranscript {
157
+ transcript: string | null;
158
+ messageCount: number;
159
+ }
160
+
161
+ /**
162
+ * Format messages into a retention transcript using `[role: ...]` markers.
163
+ *
164
+ * - When `retainFullWindow` is true, all messages are included (used when the
165
+ * caller pre-sliced the window itself).
166
+ * - Otherwise, only the last user turn (last user message → end) is retained.
167
+ *
168
+ * Messages are tag-stripped before framing to break the recall→retain loop.
169
+ * Returns `{ transcript: null }` when nothing meaningful survives.
170
+ */
171
+ export function prepareRetentionTranscript(
172
+ messages: HindsightMessage[],
173
+ retainFullWindow = false,
174
+ ): RetentionTranscript {
175
+ if (messages.length === 0) return { transcript: null, messageCount: 0 };
176
+
177
+ let targetMessages: HindsightMessage[];
178
+ if (retainFullWindow) {
179
+ targetMessages = messages;
180
+ } else {
181
+ let lastUserIdx = -1;
182
+ for (let i = messages.length - 1; i >= 0; i--) {
183
+ if (messages[i].role === "user") {
184
+ lastUserIdx = i;
185
+ break;
186
+ }
187
+ }
188
+ if (lastUserIdx === -1) return { transcript: null, messageCount: 0 };
189
+ targetMessages = messages.slice(lastUserIdx);
190
+ }
191
+
192
+ const parts: string[] = [];
193
+ for (const msg of targetMessages) {
194
+ const content = stripMemoryTags(msg.content).trim();
195
+ if (!content) continue;
196
+ parts.push(`[role: ${msg.role}]\n${content}\n[${msg.role}:end]`);
197
+ }
198
+
199
+ if (parts.length === 0) return { transcript: null, messageCount: 0 };
200
+
201
+ const transcript = parts.join("\n\n");
202
+ if (transcript.trim().length < 10) return { transcript: null, messageCount: 0 };
203
+
204
+ return { transcript, messageCount: parts.length };
205
+ }
@@ -0,0 +1,6 @@
1
+ export * from "./backend";
2
+ export * from "./bank";
3
+ export * from "./client";
4
+ export * from "./config";
5
+ export * from "./content";
6
+ export * from "./transcript";
@@ -0,0 +1,166 @@
1
+ /**
2
+ * Global, debounced batch queue for tool-initiated `retain` calls.
3
+ *
4
+ * The `retain` tool used to block on a single-item HTTP round trip per
5
+ * invocation. Now it pushes onto a per-session queue and returns immediately;
6
+ * a flush fires when:
7
+ * 1. the queue reaches `FLUSH_BATCH_SIZE`, or
8
+ * 2. `FLUSH_INTERVAL_MS` elapses since the queue first became non-empty.
9
+ *
10
+ * On batch failure we surface a UI-only notice via `session.emitNotice` —
11
+ * a single yellow "Hindsight: memory retention failed …" line in the TUI.
12
+ * The LLM is NOT told; the agent already received "Memory queued" and has
13
+ * moved on. This is purely so the user knows their facts didn't persist.
14
+ *
15
+ * Auto-retain (`retainSession` in backend.ts) is intentionally NOT routed
16
+ * through this queue — it submits a full transcript as one large item and
17
+ * already runs `async: true` server-side.
18
+ */
19
+
20
+ import { logger } from "@oh-my-pi/pi-utils";
21
+ import { getHindsightSessionState, type HindsightSessionState } from "./backend";
22
+ import { ensureBankMission } from "./bank";
23
+ import type { MemoryItemInput } from "./client";
24
+
25
+ const FLUSH_BATCH_SIZE = 16;
26
+ const FLUSH_INTERVAL_MS = 5_000;
27
+
28
+ interface PendingItem {
29
+ content: string;
30
+ context?: string;
31
+ }
32
+
33
+ interface SessionQueue {
34
+ items: PendingItem[];
35
+ timer?: NodeJS.Timeout;
36
+ /** Currently in-flight flush; subsequent flushes await it before running. */
37
+ flushing?: Promise<void>;
38
+ }
39
+
40
+ const QUEUES = new Map<string, SessionQueue>();
41
+
42
+ /** Push a memory item onto the session's retain queue. Returns immediately. */
43
+ export function enqueueRetain(sessionId: string, content: string, context?: string): void {
44
+ const queue = QUEUES.get(sessionId) ?? createQueue(sessionId);
45
+ queue.items.push({ content, context });
46
+
47
+ if (queue.items.length >= FLUSH_BATCH_SIZE) {
48
+ void flushSessionQueue(sessionId);
49
+ return;
50
+ }
51
+ if (!queue.timer) {
52
+ queue.timer = setTimeout(() => {
53
+ void flushSessionQueue(sessionId);
54
+ }, FLUSH_INTERVAL_MS);
55
+ // Don't pin the event loop alive just for a pending retain flush.
56
+ queue.timer.unref?.();
57
+ }
58
+ }
59
+
60
+ /** Flush a single session's queue. Safe to call when empty or already in flight. */
61
+ export async function flushSessionQueue(sessionId: string): Promise<void> {
62
+ const queue = QUEUES.get(sessionId);
63
+ if (!queue) return;
64
+
65
+ if (queue.timer) {
66
+ clearTimeout(queue.timer);
67
+ queue.timer = undefined;
68
+ }
69
+
70
+ if (queue.flushing) {
71
+ // Coalesce: wait for the in-flight flush, then drain anything that
72
+ // landed after it started so we don't strand items.
73
+ await queue.flushing;
74
+ if (queue.items.length > 0) {
75
+ await flushSessionQueue(sessionId);
76
+ }
77
+ return;
78
+ }
79
+
80
+ if (queue.items.length === 0) {
81
+ QUEUES.delete(sessionId);
82
+ return;
83
+ }
84
+
85
+ const items = queue.items.splice(0);
86
+ const flushPromise = doFlush(sessionId, items);
87
+ queue.flushing = flushPromise;
88
+ try {
89
+ await flushPromise;
90
+ } finally {
91
+ queue.flushing = undefined;
92
+ if (queue.items.length === 0 && !queue.timer) {
93
+ QUEUES.delete(sessionId);
94
+ }
95
+ }
96
+ }
97
+
98
+ /** Flush every pending session queue. Called from `clear`/`enqueue` backend hooks. */
99
+ export async function flushAllRetainQueues(): Promise<void> {
100
+ const ids = [...QUEUES.keys()];
101
+ await Promise.all(ids.map(id => flushSessionQueue(id)));
102
+ }
103
+
104
+ /** Test helper: clear timers and pending items without triggering flushes. */
105
+ export function clearRetainQueueForTest(): void {
106
+ for (const queue of QUEUES.values()) {
107
+ if (queue.timer) clearTimeout(queue.timer);
108
+ }
109
+ QUEUES.clear();
110
+ }
111
+
112
+ /** Test helper: peek at queued count for a session. */
113
+ export function getRetainQueueDepthForTest(sessionId: string): number {
114
+ return QUEUES.get(sessionId)?.items.length ?? 0;
115
+ }
116
+
117
+ async function doFlush(sessionId: string, items: PendingItem[]): Promise<void> {
118
+ const state = getHindsightSessionState(sessionId);
119
+ if (!state) {
120
+ // Session went away before we could flush. We can't notify anyone, so
121
+ // log and drop — these are best-effort facts, not transactional writes.
122
+ logger.warn("Hindsight retain queue: session vanished, dropping batch", {
123
+ sessionId,
124
+ items: items.length,
125
+ });
126
+ return;
127
+ }
128
+
129
+ try {
130
+ await ensureBankMission(state.client, state.bankId, state.config, state.missionsSet);
131
+ const batch: MemoryItemInput[] = items.map(item => ({
132
+ content: item.content,
133
+ context: item.context ?? state.config.retainContext,
134
+ metadata: { session_id: sessionId },
135
+ tags: state.retainTags,
136
+ }));
137
+ await state.client.retainBatch(state.bankId, batch, { async: true });
138
+ if (state.config.debug) {
139
+ logger.debug("Hindsight retain queue: batch flushed", {
140
+ sessionId,
141
+ bankId: state.bankId,
142
+ items: items.length,
143
+ });
144
+ }
145
+ } catch (err) {
146
+ const errorText = err instanceof Error ? err.message : String(err);
147
+ logger.warn("Hindsight retain queue: batch flush failed", {
148
+ sessionId,
149
+ bankId: state.bankId,
150
+ items: items.length,
151
+ error: errorText,
152
+ });
153
+ notifyRetainFailure(state, items.length, errorText);
154
+ }
155
+ }
156
+
157
+ function notifyRetainFailure(state: HindsightSessionState, count: number, errorText: string): void {
158
+ const noun = count === 1 ? "memory" : "memories";
159
+ state.session.emitNotice("warning", `Memory retention failed for ${count} ${noun}: ${errorText}`, "Hindsight");
160
+ }
161
+
162
+ function createQueue(sessionId: string): SessionQueue {
163
+ const queue: SessionQueue = { items: [] };
164
+ QUEUES.set(sessionId, queue);
165
+ return queue;
166
+ }
@@ -0,0 +1,71 @@
1
+ /**
2
+ * Pull plain-text user/assistant messages out of a session manager.
3
+ *
4
+ * The Hindsight retain/recall API only takes flat `{role, content}` records,
5
+ * so we drop tool calls, tool results, bash execution wrappers, custom
6
+ * messages, and anything else that isn't a primary conversation turn. Each
7
+ * surviving message's `TextContent` parts are joined with newlines.
8
+ */
9
+
10
+ import type { AssistantMessage } from "@oh-my-pi/pi-ai";
11
+ import type { SessionEntry } from "../session/session-manager";
12
+ import type { HindsightMessage } from "./content";
13
+
14
+ export interface ReadonlySessionManagerLike {
15
+ getEntries(): SessionEntry[];
16
+ }
17
+
18
+ /**
19
+ * Walk session entries top-to-bottom, returning a flat user/assistant list.
20
+ *
21
+ * Implementation choices:
22
+ * - Skip entries whose type isn't `"message"` (compaction, branch_summary,
23
+ * custom_message, tool exec records, ...). Those don't represent a
24
+ * conversational turn, only the LLM's plain-text utterances do.
25
+ * - Skip messages whose role isn't `"user"` or `"assistant"`. We deliberately
26
+ * ignore `toolResult`, `bashExecution`, `hookMessage`, etc. — they're noise
27
+ * for memory purposes.
28
+ * - For assistant messages, only `text` blocks contribute. Thinking and
29
+ * toolCall blocks are intentionally dropped: the user never saw them, so
30
+ * retaining them would prime recall on internal monologue.
31
+ */
32
+ export function extractMessages(sessionManager: ReadonlySessionManagerLike): HindsightMessage[] {
33
+ const messages: HindsightMessage[] = [];
34
+
35
+ for (const entry of sessionManager.getEntries()) {
36
+ if (entry.type !== "message") continue;
37
+ const msg = entry.message;
38
+ const role = msg.role;
39
+ if (role !== "user" && role !== "assistant") continue;
40
+
41
+ const text = role === "user" ? extractUserText(msg) : extractAssistantText(msg as AssistantMessage);
42
+ if (text.length === 0) continue;
43
+ messages.push({ role, content: text });
44
+ }
45
+
46
+ return messages;
47
+ }
48
+
49
+ function extractUserText(msg: { content: unknown }): string {
50
+ const content = msg.content;
51
+ if (typeof content === "string") return content;
52
+ if (!Array.isArray(content)) return "";
53
+
54
+ const parts: string[] = [];
55
+ for (const block of content) {
56
+ if (!block || typeof block !== "object") continue;
57
+ const maybeText = block as { type?: unknown; text?: unknown };
58
+ if (maybeText.type === "text" && typeof maybeText.text === "string") {
59
+ parts.push(maybeText.text);
60
+ }
61
+ }
62
+ return parts.join("\n");
63
+ }
64
+
65
+ function extractAssistantText(msg: AssistantMessage): string {
66
+ const parts: string[] = [];
67
+ for (const block of msg.content) {
68
+ if (block.type === "text" && block.text) parts.push(block.text);
69
+ }
70
+ return parts.join("\n");
71
+ }
package/src/main.ts CHANGED
@@ -11,9 +11,8 @@ import * as os from "node:os";
11
11
  import * as path from "node:path";
12
12
  import { createInterface } from "node:readline/promises";
13
13
  import type { ImageContent } from "@oh-my-pi/pi-ai";
14
- import { $env, getConfigDirName, getProjectDir, logger, postmortem, setProjectDir, VERSION } from "@oh-my-pi/pi-utils";
14
+ import { $env, getProjectDir, logger, postmortem, setProjectDir, VERSION } from "@oh-my-pi/pi-utils";
15
15
  import chalk from "chalk";
16
- import { invalidate as invalidateFsCache } from "./capability/fs";
17
16
  import type { Args } from "./cli/args";
18
17
  import { processFileArguments } from "./cli/file-processor";
19
18
  import { buildInitialMessage } from "./cli/initial-message";
@@ -25,7 +24,7 @@ import { resolveCliModel, resolveModelRoleValue, resolveModelScope, type ScopedM
25
24
  import { getDefault, type SettingPath, Settings, settings } from "./config/settings";
26
25
  import { initializeWithSettings } from "./discovery";
27
26
  import {
28
- clearClaudePluginRootsCache,
27
+ clearPluginRootsAndCaches,
29
28
  injectPluginDirRoots,
30
29
  preloadPluginRoots,
31
30
  resolveActiveProjectRegistryPath,
@@ -90,6 +89,10 @@ const RPC_DEFAULTED_SETTING_PATHS: SettingPath[] = [
90
89
  "task.maxRecursionDepth",
91
90
  "task.disabledAgents",
92
91
  "task.agentModelOverrides",
92
+ // Memory subsystems are off-by-default for RPC hosts; embedders that want
93
+ // memory should opt in explicitly through their own settings layer.
94
+ "memory.backend",
95
+ "memories.enabled",
93
96
  ];
94
97
 
95
98
  function applyRpcDefaultSettingOverrides(): void {
@@ -758,13 +761,7 @@ export async function runRootCommand(parsed: Args, rawArgs: string[]): Promise<v
758
761
  projectInstalledRegistryPath: (await resolveActiveProjectRegistryPath(getProjectDir())) ?? undefined,
759
762
  marketplacesCacheDir: getMarketplacesCacheDir(),
760
763
  pluginsCacheDir: getPluginsCacheDir(),
761
- clearPluginRootsCache: (extraPaths?: readonly string[]) => {
762
- const h = os.homedir();
763
- invalidateFsCache(path.join(h, ".claude", "plugins", "installed_plugins.json"));
764
- invalidateFsCache(path.join(h, getConfigDirName(), "plugins", "installed_plugins.json"));
765
- for (const p of extraPaths ?? []) invalidateFsCache(p);
766
- clearClaudePluginRootsCache();
767
- },
764
+ clearPluginRootsCache: clearPluginRootsAndCaches,
768
765
  });
769
766
  await mgr.refreshStaleMarketplaces();
770
767
  const updates = await mgr.checkForUpdates();
@@ -1077,7 +1077,7 @@ async function resolveMemoryModel(options: {
1077
1077
 
1078
1078
  function loadMemoryConfig(settings: Settings): MemoryRuntimeConfig {
1079
1079
  return {
1080
- enabled: settings.get("memories.enabled") ?? DEFAULTS.enabled,
1080
+ enabled: settings.get("memory.backend") === "local" || settings.get("memories.enabled") === true,
1081
1081
  maxRolloutsPerStartup: settings.get("memories.maxRolloutsPerStartup") ?? DEFAULTS.maxRolloutsPerStartup,
1082
1082
  maxRolloutAgeDays: settings.get("memories.maxRolloutAgeDays") ?? DEFAULTS.maxRolloutAgeDays,
1083
1083
  minRolloutIdleHours: settings.get("memories.minRolloutIdleHours") ?? DEFAULTS.minRolloutIdleHours,
@@ -0,0 +1,4 @@
1
+ export * from "./local-backend";
2
+ export * from "./off-backend";
3
+ export * from "./resolve";
4
+ export * from "./types";
@@ -0,0 +1,30 @@
1
+ import {
2
+ buildMemoryToolDeveloperInstructions,
3
+ clearMemoryData,
4
+ enqueueMemoryConsolidation,
5
+ startMemoryStartupTask,
6
+ } from "../memories";
7
+ import type { MemoryBackend } from "./types";
8
+
9
+ /**
10
+ * Wraps the existing `memories/` module as a `MemoryBackend`.
11
+ *
12
+ * No behavioural change — every call delegates to the legacy entry points so
13
+ * the local memory pipeline (rollout summarisation → SQLite → memory_summary.md)
14
+ * keeps working exactly as before.
15
+ */
16
+ export const localBackend: MemoryBackend = {
17
+ id: "local",
18
+ start(options) {
19
+ startMemoryStartupTask(options);
20
+ },
21
+ async buildDeveloperInstructions(agentDir, settings) {
22
+ return buildMemoryToolDeveloperInstructions(agentDir, settings);
23
+ },
24
+ async clear(agentDir, cwd) {
25
+ await clearMemoryData(agentDir, cwd);
26
+ },
27
+ async enqueue(agentDir, cwd) {
28
+ enqueueMemoryConsolidation(agentDir, cwd);
29
+ },
30
+ };
@@ -0,0 +1,16 @@
1
+ import type { MemoryBackend } from "./types";
2
+
3
+ /**
4
+ * No-op memory backend.
5
+ *
6
+ * Selected when `memory.backend` is `"off"`.
7
+ */
8
+ export const offBackend: MemoryBackend = {
9
+ id: "off",
10
+ async start() {},
11
+ async buildDeveloperInstructions() {
12
+ return undefined;
13
+ },
14
+ async clear() {},
15
+ async enqueue() {},
16
+ };
@@ -0,0 +1,24 @@
1
+ import type { Settings } from "../config/settings";
2
+ import { hindsightBackend } from "../hindsight";
3
+ import { localBackend } from "./local-backend";
4
+ import { offBackend } from "./off-backend";
5
+ import type { MemoryBackend } from "./types";
6
+
7
+ /**
8
+ * Pick the active memory backend for a Settings instance.
9
+ *
10
+ * Selection rules (single source of truth — every memory consumer routes
11
+ * through this):
12
+ * - `memory.backend === "hindsight"` → Hindsight remote memory
13
+ * - `memory.backend === "local"` → local pipeline
14
+ * - everything else → no-op
15
+ *
16
+ * `memories.enabled` remains accepted only as a legacy migration input. Once
17
+ * a config is loaded, `memory.backend` is the sole runtime selector.
18
+ */
19
+ export function resolveMemoryBackend(settings: Settings): MemoryBackend {
20
+ const id = settings.get("memory.backend");
21
+ if (id === "hindsight") return hindsightBackend;
22
+ if (id === "local") return localBackend;
23
+ return offBackend;
24
+ }
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Memory backend abstraction.
3
+ *
4
+ * Backends are mutually exclusive — `resolveMemoryBackend(settings)` returns
5
+ * exactly one. Implementations MUST be self-contained: they own the per-session
6
+ * state they create in `start()` and tear it down on `clear()`.
7
+ */
8
+
9
+ import type { AgentMessage } from "@oh-my-pi/pi-agent-core";
10
+ import type { ModelRegistry } from "../config/model-registry";
11
+ import type { Settings } from "../config/settings";
12
+ import type { AgentSession } from "../session/agent-session";
13
+
14
+ export type MemoryBackendId = "off" | "local" | "hindsight";
15
+
16
+ export interface MemoryBackendStartOptions {
17
+ session: AgentSession;
18
+ settings: Settings;
19
+ modelRegistry: ModelRegistry;
20
+ agentDir: string;
21
+ taskDepth: number;
22
+ }
23
+
24
+ export interface MemoryBackend {
25
+ readonly id: MemoryBackendId;
26
+
27
+ /**
28
+ * Wire any background work or session subscriptions for this backend.
29
+ *
30
+ * Called once per agent session at startup. Implementations MUST be
31
+ * non-throwing: failures should be logged and swallowed so a misconfigured
32
+ * memory backend cannot break the agent loop.
33
+ */
34
+ start(options: MemoryBackendStartOptions): void | Promise<void>;
35
+
36
+ /**
37
+ * Markdown injected as the system-prompt append section.
38
+ * Returned on every prompt rebuild via `refreshBaseSystemPrompt()`.
39
+ */
40
+ buildDeveloperInstructions(agentDir: string, settings: Settings): Promise<string | undefined>;
41
+
42
+ /** Wipe all persisted state for this backend (slash `/memory clear`). */
43
+ clear(agentDir: string, cwd: string): Promise<void>;
44
+
45
+ /** Force consolidation/retain to happen now (slash `/memory enqueue`). */
46
+ enqueue(agentDir: string, cwd: string): Promise<void>;
47
+
48
+ /**
49
+ * Optional hook to inject a backend-specific block into the current turn's
50
+ * system prompt before the agent starts generating.
51
+ *
52
+ * This is the only place a backend can affect the very first answer of a
53
+ * fresh session. The returned text is appended to the already-built base
54
+ * system prompt for this turn only; callers may separately cache it and
55
+ * surface it through `buildDeveloperInstructions()` on later rebuilds.
56
+ */
57
+ beforeAgentStartPrompt?(session: AgentSession, promptText: string): Promise<string | undefined>;
58
+
59
+ /**
60
+ * Optional hook to splice extra context into a compaction summarization.
61
+ *
62
+ * Called from the compaction call site before the LLM summary is requested.
63
+ * Returning a string appends one entry to the compaction's `extraContext`
64
+ * list (which becomes part of the summarization prompt). Return `undefined`
65
+ * to inject nothing — the local backend takes this branch because its
66
+ * summary is already part of the system prompt.
67
+ */
68
+ preCompactionContext?(messages: AgentMessage[], settings: Settings): Promise<string | undefined>;
69
+ }