@phren/cli 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +9 -9
  2. package/mcp/dist/capabilities/cli.js +1 -1
  3. package/mcp/dist/capabilities/mcp.js +1 -1
  4. package/mcp/dist/capabilities/vscode.js +1 -1
  5. package/mcp/dist/capabilities/web-ui.js +1 -1
  6. package/mcp/dist/cli-actions.js +54 -67
  7. package/mcp/dist/cli-config.js +4 -5
  8. package/mcp/dist/cli-extract.js +3 -2
  9. package/mcp/dist/cli-graph.js +17 -3
  10. package/mcp/dist/cli-hooks-output.js +1 -1
  11. package/mcp/dist/cli-hooks-session.js +1 -1
  12. package/mcp/dist/cli-hooks.js +5 -3
  13. package/mcp/dist/cli.js +1 -1
  14. package/mcp/dist/content-archive.js +21 -12
  15. package/mcp/dist/content-citation.js +13 -2
  16. package/mcp/dist/content-learning.js +6 -4
  17. package/mcp/dist/content-metadata.js +10 -0
  18. package/mcp/dist/core-finding.js +1 -1
  19. package/mcp/dist/data-access.js +10 -31
  20. package/mcp/dist/data-tasks.js +5 -26
  21. package/mcp/dist/embedding.js +0 -1
  22. package/mcp/dist/entrypoint.js +4 -0
  23. package/mcp/dist/finding-impact.js +1 -32
  24. package/mcp/dist/finding-journal.js +1 -1
  25. package/mcp/dist/finding-lifecycle.js +2 -7
  26. package/mcp/dist/governance-locks.js +6 -0
  27. package/mcp/dist/governance-policy.js +1 -7
  28. package/mcp/dist/governance-scores.js +1 -7
  29. package/mcp/dist/hooks.js +23 -0
  30. package/mcp/dist/init-config.js +1 -1
  31. package/mcp/dist/init-preferences.js +1 -1
  32. package/mcp/dist/init-setup.js +1 -50
  33. package/mcp/dist/init-shared.js +53 -1
  34. package/mcp/dist/init.js +21 -6
  35. package/mcp/dist/link-context.js +1 -1
  36. package/mcp/dist/link-doctor.js +11 -54
  37. package/mcp/dist/link.js +4 -53
  38. package/mcp/dist/mcp-extract-facts.js +11 -6
  39. package/mcp/dist/mcp-finding.js +10 -14
  40. package/mcp/dist/mcp-graph.js +6 -6
  41. package/mcp/dist/mcp-hooks.js +1 -1
  42. package/mcp/dist/mcp-search.js +3 -8
  43. package/mcp/dist/mcp-session.js +12 -2
  44. package/mcp/dist/memory-ui-assets.js +1 -36
  45. package/mcp/dist/memory-ui-graph.js +152 -50
  46. package/mcp/dist/memory-ui-page.js +7 -5
  47. package/mcp/dist/memory-ui-scripts.js +42 -36
  48. package/mcp/dist/phren-core.js +2 -0
  49. package/mcp/dist/phren-paths.js +1 -2
  50. package/mcp/dist/proactivity.js +5 -5
  51. package/mcp/dist/project-config.js +1 -1
  52. package/mcp/dist/provider-adapters.js +1 -1
  53. package/mcp/dist/query-correlation.js +22 -19
  54. package/mcp/dist/session-checkpoints.js +14 -14
  55. package/mcp/dist/shared-data-utils.js +28 -0
  56. package/mcp/dist/shared-fragment-graph.js +11 -11
  57. package/mcp/dist/shared-governance.js +1 -1
  58. package/mcp/dist/shared-retrieval.js +2 -10
  59. package/mcp/dist/shared-search-fallback.js +2 -12
  60. package/mcp/dist/shared.js +2 -3
  61. package/mcp/dist/shell-entry.js +1 -1
  62. package/mcp/dist/shell-input.js +62 -52
  63. package/mcp/dist/shell-palette.js +6 -1
  64. package/mcp/dist/shell-render.js +9 -5
  65. package/mcp/dist/shell-state-store.js +1 -4
  66. package/mcp/dist/shell-view.js +4 -4
  67. package/mcp/dist/shell.js +4 -54
  68. package/mcp/dist/status.js +2 -8
  69. package/mcp/dist/utils.js +1 -1
  70. package/package.json +1 -2
  71. package/skills/docs.md +11 -11
  72. package/starter/README.md +1 -1
  73. package/starter/global/CLAUDE.md +2 -2
  74. package/starter/global/skills/audit.md +10 -10
  75. package/mcp/dist/cli-hooks-retrieval.js +0 -2
  76. package/mcp/dist/impact-scoring.js +0 -22
package/README.md CHANGED
@@ -10,7 +10,7 @@
10
10
  </p>
11
11
 
12
12
  <p align="center">
13
- Every time you start a new session, your AI agent forgets everything it learned. Phren fixes that — findings, decisions, and patterns persist as markdown in a git repo you control. No database, no hosted service, no vendor lock-in. Works across sessions, projects, and machines.
13
+ Every time you start a new session, your AI agent forgets everything it learned. Phren fixes that. Findings, decisions, and patterns persist as markdown in a git repo you control. No database, no hosted service, no vendor lock-in.
14
14
  </p>
15
15
 
16
16
  ---
@@ -25,18 +25,18 @@ That single command creates `~/.phren`, wires up MCP, installs hooks, and gives
25
25
 
26
26
  ## What phren tracks
27
27
 
28
- - **Findings** bugs hit, patterns discovered, decisions and their reasoning. Tagged by type (`[pattern]`, `[decision]`, `[pitfall]`, `[observation]`) with per-type decay rates
29
- - **Fragments** named concepts (auth, build, React) that connect findings across projects. When you search for a topic, phren pulls in everything linked to that fragment
30
- - **Tasks** work items that persist across sessions with priority, pinning, and GitHub issue linking
31
- - **Sessions** conversation boundaries with summaries and checkpoints, so the next session picks up where this one left off
32
- - **Skills** reusable slash commands you teach phren. Drop them in `~/.phren/global/skills/` and they work everywhere
28
+ - **Findings**: bugs hit, patterns discovered, decisions and their reasoning. Tagged by type (`[pattern]`, `[decision]`, `[pitfall]`, `[observation]`) with per-type decay rates
29
+ - **Fragments**: named concepts (auth, build, React) that connect findings across projects. Search for a topic and phren pulls in everything linked to that fragment
30
+ - **Tasks**: work items that persist across sessions with priority, pinning, and GitHub issue linking
31
+ - **Sessions**: conversation boundaries with summaries and checkpoints, so the next session picks up where the last one left off
32
+ - **Skills**: reusable slash commands you teach phren. Drop them in `~/.phren/global/skills/` and they work everywhere
33
33
 
34
34
  ## How it works
35
35
 
36
36
  - **Surfaces relevant context on every prompt** via hooks. Agents build on what they know instead of starting fresh
37
- - **Trust scores decay over time** old findings lose confidence. Decisions never decay. Observations expire in 14 days
37
+ - **Trust scores decay over time.** Old findings lose confidence. Decisions never decay. Observations expire in 14 days
38
38
  - **Syncs across machines** through git push/pull. No coordination service
39
- - **Works with Claude Code, Copilot, Cursor, and Codex** one memory, every agent
39
+ - **Works with Claude Code, Copilot, Cursor, and Codex.** One store, every agent
40
40
  - **Shell and web UI** for browsing, searching, and triaging (`phren` or `phren web-ui`)
41
41
 
42
42
  ## Quick start
@@ -45,7 +45,7 @@ That single command creates `~/.phren`, wires up MCP, installs hooks, and gives
45
45
  npx @phren/cli init # set up phren (interactive walkthrough)
46
46
  ```
47
47
 
48
- Init detects your tools, registers MCP servers, and installs lifecycle hooks. After it finishes, open a prompt in any tracked project phren is already injecting context.
48
+ Init detects your tools, registers MCP servers, and installs lifecycle hooks. After it finishes, open a prompt in any tracked project. Phren is already injecting context.
49
49
 
50
50
  To add a project later, run `phren add` from that directory. To browse what phren knows, run `phren` to open the interactive shell.
51
51
 
@@ -1,6 +1,6 @@
1
1
  export const cliManifest = {
2
2
  surface: "cli",
3
- version: "1.31.1",
3
+ version: "0.0.13",
4
4
  actions: {
5
5
  // Finding management
6
6
  "finding.add": { implemented: true, handler: "cli-actions.ts:handleAddFinding" },
@@ -1,6 +1,6 @@
1
1
  export const mcpManifest = {
2
2
  surface: "mcp",
3
- version: "1.31.1",
3
+ version: "0.0.13",
4
4
  actions: {
5
5
  // Finding management
6
6
  "finding.add": { implemented: true, handler: "index.ts:add_finding" },
@@ -1,6 +1,6 @@
1
1
  export const vscodeManifest = {
2
2
  surface: "vscode",
3
- version: "1.31.1",
3
+ version: "0.0.13",
4
4
  actions: {
5
5
  // Finding management
6
6
  "finding.add": { implemented: true, handler: "extension.ts:phren.addFinding" },
@@ -1,6 +1,6 @@
1
1
  export const webUiManifest = {
2
2
  surface: "web-ui",
3
- version: "1.31.1",
3
+ version: "0.0.13",
4
4
  actions: {
5
5
  // Finding management
6
6
  "finding.add": { implemented: false, reason: "Web UI is read-only for findings (review queue only)" },
@@ -14,35 +14,27 @@ import { runSearch, runFragmentSearch, parseFragmentSearchArgs, runRelatedDocs,
14
14
  import { resolveRuntimeProfile } from "./runtime-profile.js";
15
15
  import { getProjectConsolidationStatus, CONSOLIDATION_ENTRY_THRESHOLD } from "./content-validate.js";
16
16
  import { listAllSessions } from "./mcp-session.js";
17
- export async function handleSearch(opts, profile) {
18
- const result = await runSearch(opts, getPhrenPath(), profile);
19
- if (result.lines.length > 0) {
17
+ async function runAndPrint(fn) {
18
+ const result = await fn();
19
+ if (result.lines.length > 0)
20
20
  console.log(result.lines.join("\n"));
21
- }
22
21
  if (result.exitCode !== 0)
23
22
  process.exit(result.exitCode);
24
23
  }
24
+ export async function handleSearch(opts, profile) {
25
+ await runAndPrint(() => runSearch(opts, getPhrenPath(), profile));
26
+ }
25
27
  export async function handleFragmentSearch(args, profile) {
26
28
  const opts = parseFragmentSearchArgs(args);
27
29
  if (!opts)
28
30
  return;
29
- const result = await runFragmentSearch(opts.query, getPhrenPath(), profile, opts);
30
- if (result.lines.length > 0) {
31
- console.log(result.lines.join("\n"));
32
- }
33
- if (result.exitCode !== 0)
34
- process.exit(result.exitCode);
31
+ await runAndPrint(() => runFragmentSearch(opts.query, getPhrenPath(), profile, opts));
35
32
  }
36
33
  export async function handleRelatedDocs(args, profile) {
37
34
  const opts = parseRelatedDocsArgs(args);
38
35
  if (!opts)
39
36
  return;
40
- const result = await runRelatedDocs(opts.entity, getPhrenPath(), profile, opts);
41
- if (result.lines.length > 0) {
42
- console.log(result.lines.join("\n"));
43
- }
44
- if (result.exitCode !== 0)
45
- process.exit(result.exitCode);
37
+ await runAndPrint(() => runRelatedDocs(opts.entity, getPhrenPath(), profile, opts));
46
38
  }
47
39
  export async function handleAddFinding(project, learning) {
48
40
  if (!project || !learning) {
@@ -184,41 +176,46 @@ export async function handleDoctor(args) {
184
176
  if ((process.env.PHREN_DEBUG))
185
177
  process.stderr.write(`[phren] doctor searchMissAnalysis: ${errorMessage(err)}\n`);
186
178
  }
179
+ const semStatus = await getSemanticSearchStatus(getPhrenPath(), profile || undefined);
180
+ if (!semStatus.ollamaUrl) {
181
+ console.log("- ok semantic-search: disabled (optional; enable for fuzzy/paraphrase-heavy retrieval)");
182
+ }
183
+ else if (!semStatus.available) {
184
+ console.log(`- warn semantic-search: Ollama not running at ${semStatus.ollamaUrl} (start Ollama or set PHREN_OLLAMA_URL=off to disable)`);
185
+ }
186
+ else if (!semStatus.modelReady) {
187
+ console.log(`- warn semantic-search: model ${semStatus.model} not pulled (run: ollama pull ${semStatus.model})`);
188
+ }
189
+ else {
190
+ console.log(`- ok semantic-search: ${semStatus.model} ready, ${semStatus.coverage}`);
191
+ }
192
+ process.exit(result.ok ? 0 : 1);
193
+ }
194
+ async function getSemanticSearchStatus(phrenPath, profile) {
187
195
  try {
188
196
  const { checkOllamaAvailable, checkModelAvailable, getOllamaUrl, getEmbeddingModel } = await import("./shared-ollama.js");
189
197
  const { getEmbeddingCache, formatEmbeddingCoverage } = await import("./shared-embedding-cache.js");
190
198
  const { listIndexedDocumentPaths } = await import("./shared-index.js");
191
199
  const ollamaUrl = getOllamaUrl();
192
- if (!ollamaUrl) {
193
- console.log("- ok semantic-search: disabled (optional; enable for fuzzy/paraphrase-heavy retrieval)");
194
- }
195
- else {
196
- const available = await checkOllamaAvailable();
197
- if (!available) {
198
- console.log(`- warn semantic-search: Ollama not running at ${ollamaUrl} (start Ollama or set PHREN_OLLAMA_URL=off to disable)`);
199
- }
200
- else {
201
- const model = getEmbeddingModel();
202
- const modelReady = await checkModelAvailable();
203
- if (!modelReady) {
204
- console.log(`- warn semantic-search: model ${model} not pulled (run: ollama pull ${model})`);
205
- }
206
- else {
207
- const phrenPath = getPhrenPath();
208
- const cache = getEmbeddingCache(phrenPath);
209
- await cache.load().catch(() => { });
210
- const allPaths = listIndexedDocumentPaths(phrenPath, profile || undefined);
211
- const coverage = cache.coverage(allPaths);
212
- console.log(`- ok semantic-search: ${model} ready, ${formatEmbeddingCoverage(coverage)}`);
213
- }
214
- }
215
- }
200
+ if (!ollamaUrl)
201
+ return { ollamaUrl: null };
202
+ const available = await checkOllamaAvailable();
203
+ if (!available)
204
+ return { ollamaUrl, available: false };
205
+ const model = getEmbeddingModel();
206
+ const modelReady = await checkModelAvailable();
207
+ if (!modelReady)
208
+ return { ollamaUrl, available: true, modelReady: false, model };
209
+ const cache = getEmbeddingCache(phrenPath);
210
+ await cache.load().catch(() => { });
211
+ const coverage = formatEmbeddingCoverage(cache.coverage(listIndexedDocumentPaths(phrenPath, profile)));
212
+ return { ollamaUrl, available: true, modelReady: true, model, coverage };
216
213
  }
217
214
  catch (err) {
218
215
  if ((process.env.PHREN_DEBUG))
219
- process.stderr.write(`[phren] doctor ollamaStatus: ${errorMessage(err)}\n`);
216
+ process.stderr.write(`[phren] getSemanticSearchStatus: ${errorMessage(err)}\n`);
217
+ return { ollamaUrl: null, status: "error", error: errorMessage(err) };
220
218
  }
221
- process.exit(result.ok ? 0 : 1);
222
219
  }
223
220
  export async function handleStatus() {
224
221
  const phrenPath = getPhrenPath();
@@ -231,34 +228,24 @@ export async function handleStatus() {
231
228
  console.log(`unsynced commits: ${runtime.lastSync?.unsyncedCommits ?? 0}`);
232
229
  if (runtime.lastSync?.lastPushDetail)
233
230
  console.log(`push detail: ${runtime.lastSync.lastPushDetail}`);
234
- try {
235
- const { getOllamaUrl, checkOllamaAvailable, checkModelAvailable, getEmbeddingModel } = await import("./shared-ollama.js");
236
- const { getEmbeddingCache, formatEmbeddingCoverage } = await import("./shared-embedding-cache.js");
237
- const { listIndexedDocumentPaths } = await import("./shared-index.js");
238
- const ollamaUrl = getOllamaUrl();
239
- if (!ollamaUrl) {
240
- console.log("semantic-search: disabled (optional)");
241
- return;
242
- }
243
- const available = await checkOllamaAvailable();
244
- if (!available) {
245
- console.log(`semantic-search: offline (${ollamaUrl})`);
246
- return;
231
+ const semStatus = await getSemanticSearchStatus(phrenPath, profile || undefined);
232
+ if (!semStatus.ollamaUrl) {
233
+ const errStatus = semStatus;
234
+ if (errStatus.status === "error") {
235
+ console.log(`semantic-search: error (${errStatus.error ?? "unknown"})`);
247
236
  }
248
- const model = getEmbeddingModel();
249
- const modelReady = await checkModelAvailable();
250
- if (!modelReady) {
251
- console.log(`semantic-search: model missing (${model})`);
252
- return;
237
+ else {
238
+ console.log("semantic-search: disabled (optional)");
253
239
  }
254
- const cache = getEmbeddingCache(phrenPath);
255
- await cache.load().catch(() => { });
256
- const coverage = cache.coverage(listIndexedDocumentPaths(phrenPath, profile || undefined));
257
- console.log(`semantic-search: ${model} ready, ${formatEmbeddingCoverage(coverage)}`);
258
240
  }
259
- catch (err) {
260
- if ((process.env.PHREN_DEBUG))
261
- process.stderr.write(`[phren] handleStatus semanticSearch: ${errorMessage(err)}\n`);
241
+ else if (!semStatus.available) {
242
+ console.log(`semantic-search: offline (${semStatus.ollamaUrl})`);
243
+ }
244
+ else if (!semStatus.modelReady) {
245
+ console.log(`semantic-search: model missing (${semStatus.model})`);
246
+ }
247
+ else {
248
+ console.log(`semantic-search: ${semStatus.model} ready, ${semStatus.coverage}`);
262
249
  }
263
250
  }
264
251
  export async function handleQualityFeedback(args) {
@@ -253,7 +253,6 @@ function handleConfigSynonyms(args) {
253
253
  printSynonymsUsage();
254
254
  process.exit(1);
255
255
  }
256
- const proactivityConfigSnapshot = buildProactivitySnapshot;
257
256
  function handleConfigProactivity(subcommand, args) {
258
257
  const phrenPath = getPhrenPath();
259
258
  const { project: projectArg, rest: filteredArgs } = parseProjectArg(args);
@@ -273,7 +272,7 @@ function handleConfigProactivity(subcommand, args) {
273
272
  }, null, 2));
274
273
  return;
275
274
  }
276
- console.log(JSON.stringify(proactivityConfigSnapshot(phrenPath), null, 2));
275
+ console.log(JSON.stringify(buildProactivitySnapshot(phrenPath), null, 2));
277
276
  return;
278
277
  }
279
278
  if (filteredArgs.length !== 1) {
@@ -315,7 +314,7 @@ function handleConfigProactivity(subcommand, args) {
315
314
  writeGovernanceInstallPreferences(phrenPath, { proactivityTask: level });
316
315
  break;
317
316
  }
318
- console.log(JSON.stringify(proactivityConfigSnapshot(phrenPath), null, 2));
317
+ console.log(JSON.stringify(buildProactivitySnapshot(phrenPath), null, 2));
319
318
  }
320
319
  function projectOwnershipConfigSnapshot(phrenPath) {
321
320
  const prefs = readInstallPreferences(phrenPath);
@@ -545,7 +544,7 @@ export async function handleIndexPolicy(args) {
545
544
  }
546
545
  const result = updateIndexPolicy(getPhrenPath(), patch);
547
546
  if (!result.ok) {
548
- console.log(result.error);
547
+ console.error(result.error);
549
548
  if (result.code === "PERMISSION_DENIED")
550
549
  process.exit(1);
551
550
  return;
@@ -635,7 +634,7 @@ export async function handleRetentionPolicy(args) {
635
634
  }
636
635
  const result = updateRetentionPolicy(phrenPath, patch);
637
636
  if (!result.ok) {
638
- console.log(result.error);
637
+ console.error(result.error);
639
638
  if (result.code === "PERMISSION_DENIED")
640
639
  process.exit(1);
641
640
  return;
@@ -240,9 +240,10 @@ export async function handleExtractMemories(projectArg, cwdArg, silent = false,
240
240
  console.log(`Skipped memory extraction for ${project}: findings proactivity is low.`);
241
241
  return;
242
242
  }
243
- const days = Number.parseInt((process.env.PHREN_MEMORY_EXTRACT_WINDOW_DAYS) || "30", 10);
243
+ const rawDays = Number.parseInt((process.env.PHREN_MEMORY_EXTRACT_WINDOW_DAYS) || "30", 10);
244
+ const days = Number.isNaN(rawDays) ? 30 : Math.max(1, rawDays);
244
245
  const threshold = Number.parseFloat((process.env.PHREN_MEMORY_AUTO_ACCEPT) || String(getRetentionPolicy(getPhrenPath()).autoAcceptThreshold));
245
- const records = parseGitLogRecords(repoRoot, Number.isNaN(days) ? 30 : days);
246
+ const records = parseGitLogRecords(repoRoot, days);
246
247
  const ghCandidates = isFeatureEnabled("PHREN_FEATURE_GH_MINING", false)
247
248
  ? await mineGithubCandidates(repoRoot)
248
249
  : [];
@@ -1,7 +1,7 @@
1
1
  import { getPhrenPath } from "./shared.js";
2
2
  import { buildIndex, queryRows } from "./shared-index.js";
3
3
  import { resolveRuntimeProfile } from "./runtime-profile.js";
4
- import { errorMessage } from "./utils.js";
4
+ import { isValidProjectName, errorMessage } from "./utils.js";
5
5
  /**
6
6
  * CLI: phren graph [--project <name>] [--limit <n>]
7
7
  * Displays the fragment knowledge graph as a table.
@@ -19,6 +19,10 @@ export async function handleGraphRead(args) {
19
19
  limit = Math.min(Math.max(parseInt(args[++i], 10) || 20, 1), 200);
20
20
  }
21
21
  }
22
+ if (project && !isValidProjectName(project)) {
23
+ console.error(`Invalid project name: "${project}".`);
24
+ process.exit(1);
25
+ }
22
26
  const db = await buildIndex(phrenPath, profile);
23
27
  let sql;
24
28
  let params;
@@ -75,6 +79,10 @@ export async function handleGraphLink(args) {
75
79
  process.exit(1);
76
80
  }
77
81
  const [project, findingText, fragmentName] = args;
82
+ if (!isValidProjectName(project)) {
83
+ console.error(`Invalid project name: "${project}".`);
84
+ process.exit(1);
85
+ }
78
86
  const phrenPath = getPhrenPath();
79
87
  const profile = resolveRuntimeProfile(phrenPath);
80
88
  const db = await buildIndex(phrenPath, profile);
@@ -95,7 +103,10 @@ export async function handleGraphLink(args) {
95
103
  try {
96
104
  db.run("INSERT OR IGNORE INTO entities (name, type, first_seen_at) VALUES (?, ?, ?)", [normalizedFragment, "fragment", new Date().toISOString().slice(0, 10)]);
97
105
  }
98
- catch { /* best effort */ }
106
+ catch (err) {
107
+ if (process.env.PHREN_DEBUG)
108
+ process.stderr.write(`[phren] graph link insert fragment: ${errorMessage(err)}\n`);
109
+ }
99
110
  const fragmentResult = db.exec("SELECT id FROM entities WHERE name = ? AND type = ?", [normalizedFragment, "fragment"]);
100
111
  if (!fragmentResult?.length || !fragmentResult[0]?.values?.length) {
101
112
  console.error("Failed to create fragment.");
@@ -105,7 +116,10 @@ export async function handleGraphLink(args) {
105
116
  try {
106
117
  db.run("INSERT OR IGNORE INTO entities (name, type, first_seen_at) VALUES (?, ?, ?)", [sourceDoc, "document", new Date().toISOString().slice(0, 10)]);
107
118
  }
108
- catch { /* best effort */ }
119
+ catch (err) {
120
+ if (process.env.PHREN_DEBUG)
121
+ process.stderr.write(`[phren] graph link insert document: ${errorMessage(err)}\n`);
122
+ }
109
123
  const docResult = db.exec("SELECT id FROM entities WHERE name = ? AND type = ?", [sourceDoc, "document"]);
110
124
  if (!docResult?.length || !docResult[0]?.values?.length) {
111
125
  console.error("Failed to create document fragment.");
@@ -116,7 +116,7 @@ export function buildHookOutput(selected, usedTokens, intent, gitCtx, detectedPr
116
116
  }
117
117
  }
118
118
  logImpact(phrenPathLocal, impactEntries);
119
- parts.push("<phren-context>");
119
+ parts.push("</phren-context>");
120
120
  const changedCount = gitCtx?.changedFiles.size ?? 0;
121
121
  if (gitCtx) {
122
122
  const fileHits = selected.filter((r) => fileRelevanceBoost(r.doc.path, gitCtx.changedFiles) > 0).length;
@@ -749,7 +749,7 @@ export async function handleHookStop() {
749
749
  }
750
750
  catch (err) {
751
751
  if (process.env.PHREN_DEBUG)
752
- process.stderr.write(`[phren] hookSessionStart transcriptParse: ${errorMessage(err)}\n`);
752
+ process.stderr.write(`[phren] hookStop transcriptParse: ${errorMessage(err)}\n`);
753
753
  }
754
754
  }
755
755
  captureInput = assistantTexts.join("\n");
@@ -164,7 +164,9 @@ export async function handleHookPrompt() {
164
164
  for (const kw of keywordEntries) {
165
165
  sessionTopics[kw] = (sessionTopics[kw] ?? 0) + 1;
166
166
  }
167
- fs.writeFileSync(topicFile, JSON.stringify(sessionTopics));
167
+ const topicTmp = `${topicFile}.tmp-${process.pid}`;
168
+ fs.writeFileSync(topicTmp, JSON.stringify(sessionTopics));
169
+ fs.renameSync(topicTmp, topicFile);
168
170
  // Find hot topics (3+ mentions this session)
169
171
  hotTopics = Object.entries(sessionTopics)
170
172
  .filter(([, count]) => count >= 3)
@@ -352,7 +354,7 @@ export async function handleHookPrompt() {
352
354
  parts.push(`Findings ready for consolidation:`);
353
355
  parts.push(notices.join("\n"));
354
356
  parts.push(`Run phren-consolidate when ready.`);
355
- parts.push(`<phren-notice>`);
357
+ parts.push(`</phren-notice>`);
356
358
  }
357
359
  if (noticeFile) {
358
360
  try {
@@ -374,7 +376,7 @@ export async function handleHookPrompt() {
374
376
  }
375
377
  catch (err) {
376
378
  const msg = errorMessage(err);
377
- process.stdout.write(`\n<phren-error>phren hook failed: ${msg}. Check ~/.phren/.runtime/debug.log for details.<phren-error>\n`);
379
+ process.stdout.write(`\n<phren-error>phren hook failed: ${msg}. Check ~/.phren/.runtime/debug.log for details.</phren-error>\n`);
378
380
  debugLog(`hook-prompt error: ${msg}`);
379
381
  process.exit(0);
380
382
  }
package/mcp/dist/cli.js CHANGED
@@ -108,7 +108,7 @@ export async function runCliCommand(command, args) {
108
108
  case "session-context":
109
109
  return handleSessionContext();
110
110
  default:
111
- console.error(`Unknown command: ${command}`);
111
+ console.error(`Unknown command: ${command}\nRun 'phren --help' for available commands.`);
112
112
  process.exit(1);
113
113
  }
114
114
  }
@@ -77,12 +77,11 @@ function parseActiveEntries(content) {
77
77
  /**
78
78
  * Check whether a bullet already exists in a reference file (already archived).
79
79
  */
80
- function isAlreadyArchived(referenceDir, bullet) {
80
+ /** Build a Set of normalized bullet strings from all .md files in referenceDir. */
81
+ function buildArchivedBulletSet(referenceDir) {
82
+ const bulletSet = new Set();
81
83
  if (!fs.existsSync(referenceDir))
82
- return false;
83
- const normalizedBullet = stripComments(bullet).replace(/^-\s+/, "").trim().toLowerCase();
84
- if (!normalizedBullet)
85
- return false;
84
+ return bulletSet;
86
85
  try {
87
86
  const stack = [referenceDir];
88
87
  while (stack.length > 0) {
@@ -100,17 +99,23 @@ function isAlreadyArchived(referenceDir, bullet) {
100
99
  if (!line.startsWith("- "))
101
100
  continue;
102
101
  const normalizedLine = stripComments(line).replace(/^-\s+/, "").trim().toLowerCase();
103
- if (normalizedLine === normalizedBullet)
104
- return true;
102
+ if (normalizedLine)
103
+ bulletSet.add(normalizedLine);
105
104
  }
106
105
  }
107
106
  }
108
107
  }
109
108
  catch (err) {
110
109
  if ((process.env.PHREN_DEBUG))
111
- process.stderr.write(`[phren] isDuplicateInReference: ${errorMessage(err)}\n`);
110
+ process.stderr.write(`[phren] buildArchivedBulletSet: ${errorMessage(err)}\n`);
112
111
  }
113
- return false;
112
+ return bulletSet;
113
+ }
114
+ function isAlreadyArchived(archivedSet, bullet) {
115
+ const normalizedBullet = stripComments(bullet).replace(/^-\s+/, "").trim().toLowerCase();
116
+ if (!normalizedBullet)
117
+ return false;
118
+ return archivedSet.has(normalizedBullet);
114
119
  }
115
120
  /**
116
121
  * Archive the oldest entries from FINDINGS.md into reference/{topic}.md files.
@@ -152,8 +157,11 @@ export function autoArchiveToReference(phrenPath, project, keepCount) {
152
157
  throw wxErr;
153
158
  }
154
159
  }
155
- catch {
156
- return phrenErr("Consolidation already running", PhrenError.LOCK_TIMEOUT);
160
+ catch (innerErr) {
161
+ if (innerErr.code === "EEXIST" || innerErr.code === "ENOENT") {
162
+ return phrenErr("Consolidation already running", PhrenError.LOCK_TIMEOUT);
163
+ }
164
+ throw innerErr;
157
165
  }
158
166
  }
159
167
  else {
@@ -173,9 +181,10 @@ export function autoArchiveToReference(phrenPath, project, keepCount) {
173
181
  const referenceDir = path.join(resolvedDir, "reference");
174
182
  const { topics } = readProjectTopics(phrenPath, project);
175
183
  const today = new Date().toISOString().slice(0, 10);
184
+ const archivedSet = buildArchivedBulletSet(referenceDir);
176
185
  const actuallyArchived = [];
177
186
  for (const entry of toArchive) {
178
- if (isAlreadyArchived(referenceDir, entry.bullet)) {
187
+ if (isAlreadyArchived(archivedSet, entry.bullet)) {
179
188
  debugLog(`auto_archive: skipping already-archived entry: "${entry.bullet.slice(0, 60)}"`);
180
189
  continue;
181
190
  }
@@ -1,5 +1,4 @@
1
1
  import * as fs from "fs";
2
- import { statSync } from "fs";
3
2
  import * as path from "path";
4
3
  import { debugLog, EXEC_TIMEOUT_MS, EXEC_TIMEOUT_QUICK_MS } from "./shared.js";
5
4
  import { errorMessage, runGitOrThrow } from "./utils.js";
@@ -170,8 +169,16 @@ function resolveCitationFile(citation) {
170
169
  }
171
170
  // Session-scoped caches for git I/O during citation validation.
172
171
  // Keyed by "repo\0commit" and "repo\0file\0line" respectively.
172
+ const MAX_CACHE_ENTRIES = 500;
173
173
  const commitExistsCache = new Map();
174
174
  const blameCache = new Map();
175
+ function evictOldest(cache) {
176
+ if (cache.size <= MAX_CACHE_ENTRIES)
177
+ return;
178
+ const first = cache.keys().next().value;
179
+ if (first !== undefined)
180
+ cache.delete(first);
181
+ }
175
182
  function commitExists(repoPath, commit) {
176
183
  const key = `${repoPath}\0${commit}`;
177
184
  const cached = commitExistsCache.get(key);
@@ -180,11 +187,13 @@ function commitExists(repoPath, commit) {
180
187
  try {
181
188
  runGitOrThrow(repoPath, ["cat-file", "-e", `${commit}^{commit}`], EXEC_TIMEOUT_QUICK_MS);
182
189
  commitExistsCache.set(key, true);
190
+ evictOldest(commitExistsCache);
183
191
  return true;
184
192
  }
185
193
  catch (err) {
186
194
  debugLog(`commitExists: commit ${commit} not found in ${repoPath}: ${errorMessage(err)}`);
187
195
  commitExistsCache.set(key, false);
196
+ evictOldest(commitExistsCache);
188
197
  return false;
189
198
  }
190
199
  }
@@ -197,11 +206,13 @@ function cachedBlame(repoPath, relFile, line) {
197
206
  const out = runGitOrThrow(repoPath, ["blame", "-L", `${line},${line}`, "--porcelain", relFile], 10_000).trim();
198
207
  const first = out.split("\n")[0] || "";
199
208
  blameCache.set(key, first);
209
+ evictOldest(blameCache);
200
210
  return first;
201
211
  }
202
212
  catch (err) {
203
213
  debugLog(`cachedBlame: git blame failed for ${relFile}:${line}: ${errorMessage(err)}`);
204
214
  blameCache.set(key, false);
215
+ evictOldest(blameCache);
205
216
  return false;
206
217
  }
207
218
  }
@@ -274,7 +285,7 @@ function confidenceForAge(ageDays, decay) {
274
285
  }
275
286
  function wasFileModifiedAfter(filePath, findingDate) {
276
287
  try {
277
- const stat = statSync(filePath);
288
+ const stat = fs.statSync(filePath);
278
289
  const fileModified = stat.mtime.toISOString().slice(0, 10);
279
290
  return fileModified > findingDate;
280
291
  }
@@ -378,7 +378,7 @@ export function addFindingToFile(phrenPath, project, learning, citationInput, op
378
378
  if (!result.ok)
379
379
  return result;
380
380
  if (typeof result.data === "string")
381
- return phrenOk(result.data);
381
+ return phrenOk({ message: result.data, status: "skipped" });
382
382
  appendAuditLog(phrenPath, "add_finding", `project=${project}${result.data.created ? " created=true" : ""} citation_commit=${result.data.citation.commit ?? "none"} citation_file=${result.data.citation.file ?? "none"}`);
383
383
  const cap = Number.parseInt((process.env.PHREN_FINDINGS_CAP) || "", 10) || DEFAULT_FINDINGS_CAP;
384
384
  const activeCount = countActiveFindings(result.data.content);
@@ -390,10 +390,12 @@ export function addFindingToFile(phrenPath, project, learning, citationInput, op
390
390
  }
391
391
  if (result.data.created) {
392
392
  const createdMsg = `Created FINDINGS.md for "${project}" and added insight.`;
393
- return phrenOk(result.data.tagWarning ? `${createdMsg} Warning: ${result.data.tagWarning}` : createdMsg);
393
+ const message = result.data.tagWarning ? `${createdMsg} Warning: ${result.data.tagWarning}` : createdMsg;
394
+ return phrenOk({ message, status: "created" });
394
395
  }
395
396
  const addedMsg = `Added finding to ${project}: ${result.data.bullet} (with citation metadata)`;
396
- return phrenOk(result.data.tagWarning ? `${addedMsg} Warning: ${result.data.tagWarning}` : addedMsg);
397
+ const message = result.data.tagWarning ? `${addedMsg} Warning: ${result.data.tagWarning}` : addedMsg;
398
+ return phrenOk({ message, status: "added" });
397
399
  }
398
400
  export function addFindingsToFile(phrenPath, project, learnings, opts) {
399
401
  if (!isValidProjectName(project))
@@ -402,8 +404,8 @@ export function addFindingsToFile(phrenPath, project, learnings, opts) {
402
404
  if (!resolvedDir)
403
405
  return phrenErr(`Invalid project name: "${project}".`, PhrenError.INVALID_PROJECT_NAME);
404
406
  const learningsPath = path.join(resolvedDir, "FINDINGS.md");
405
- const today = new Date().toISOString().slice(0, 10);
406
407
  const nowIso = new Date().toISOString();
408
+ const today = nowIso.slice(0, 10);
407
409
  const resolvedCitationInputResult = resolveFindingCitationInput(phrenPath, project);
408
410
  if (!resolvedCitationInputResult.ok)
409
411
  return resolvedCitationInputResult;
@@ -172,6 +172,16 @@ export function stripAllMetadata(line) {
172
172
  export function stripComments(text) {
173
173
  return text.replace(METADATA_REGEX.anyComment, "").trim();
174
174
  }
175
+ /** Normalize finding text for comparison: strips bullet prefix, HTML comments, confidence tags, normalizes whitespace, lowercases. */
176
+ export function normalizeFindingText(raw) {
177
+ return raw
178
+ .replace(/^-\s+/, "")
179
+ .replace(/<!--.*?-->/g, " ")
180
+ .replace(/\[confidence\s+[01](?:\.\d+)?\]/gi, " ")
181
+ .replace(/\s+/g, " ")
182
+ .trim()
183
+ .toLowerCase();
184
+ }
175
185
  // ---------------------------------------------------------------------------
176
186
  // Add helpers — append metadata comments to a line
177
187
  // ---------------------------------------------------------------------------
@@ -18,7 +18,7 @@ export function addFinding(phrenPath, project, finding, citation, findingType) {
18
18
  if (!result.ok) {
19
19
  return { ok: false, message: result.error };
20
20
  }
21
- return { ok: true, message: result.data, data: { project, finding: taggedFinding } };
21
+ return { ok: true, message: result.data.message, data: { project, finding: taggedFinding } };
22
22
  }
23
23
  /**
24
24
  * Remove a finding by partial text match.