@phren/cli 0.0.5 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -150,6 +150,26 @@ export async function handleHookPrompt() {
150
150
  if (!keywords)
151
151
  process.exit(0);
152
152
  debugLog(`hook-prompt keywords: "${keywords}"`);
153
+ // Session momentum: track topic frequencies within the session
154
+ let hotTopics = [];
155
+ if (sessionId) {
156
+ const topicFile = sessionMarker(getPhrenPath(), `topics-${sessionId}.json`);
157
+ let sessionTopics = {};
158
+ try {
159
+ if (fs.existsSync(topicFile)) {
160
+ sessionTopics = JSON.parse(fs.readFileSync(topicFile, 'utf8'));
161
+ }
162
+ }
163
+ catch { /* ignore parse errors */ }
164
+ for (const kw of keywordEntries) {
165
+ sessionTopics[kw] = (sessionTopics[kw] ?? 0) + 1;
166
+ }
167
+ fs.writeFileSync(topicFile, JSON.stringify(sessionTopics));
168
+ // Find hot topics (3+ mentions this session)
169
+ hotTopics = Object.entries(sessionTopics)
170
+ .filter(([, count]) => count >= 3)
171
+ .map(([topic]) => topic);
172
+ }
153
173
  const tIndex0 = Date.now();
154
174
  const db = await buildIndex(getPhrenPath(), profile);
155
175
  stage.indexMs = Date.now() - tIndex0;
@@ -197,20 +217,18 @@ export async function handleHookPrompt() {
197
217
  stage.rankMs = Date.now() - tRank0;
198
218
  if (!rows.length)
199
219
  process.exit(0);
200
- const safeTokenBudget = clampInt(process.env.PHREN_CONTEXT_TOKEN_BUDGET, 550, 180, 10000);
220
+ let safeTokenBudget = clampInt(process.env.PHREN_CONTEXT_TOKEN_BUDGET, 550, 180, 10000);
201
221
  const safeLineBudget = clampInt(process.env.PHREN_CONTEXT_SNIPPET_LINES, 6, 2, 100);
202
222
  const safeCharBudget = clampInt(process.env.PHREN_CONTEXT_SNIPPET_CHARS, 520, 120, 10000);
223
+ // Session momentum: boost token budget for hot topics
224
+ if (hotTopics.length > 0) {
225
+ safeTokenBudget = Math.min(Math.floor(safeTokenBudget * 1.3), parseInt(process.env.PHREN_MAX_INJECT_TOKENS ?? '2000', 10));
226
+ }
203
227
  const tSelect0 = Date.now();
204
228
  const { selected, usedTokens } = selectSnippets(rows, keywords, safeTokenBudget, safeLineBudget, safeCharBudget);
205
229
  stage.selectMs = Date.now() - tSelect0;
206
230
  if (!selected.length)
207
231
  process.exit(0);
208
- // Log query-to-finding correlations for future pre-warming (gated by env var)
209
- try {
210
- const { logCorrelations: logCorr } = await import("./query-correlation.js");
211
- logCorr(getPhrenPath(), keywords, selected, sessionId);
212
- }
213
- catch { /* non-fatal */ }
214
232
  // Injection budget: cap total injected tokens across all content
215
233
  const maxInjectTokens = clampInt(process.env.PHREN_MAX_INJECT_TOKENS, 2000, 200, 20000);
216
234
  let budgetSelected = selected;
@@ -5,7 +5,7 @@ import { debugLog, EXEC_TIMEOUT_MS, EXEC_TIMEOUT_QUICK_MS } from "./shared.js";
5
5
  import { errorMessage, runGitOrThrow } from "./utils.js";
6
6
  import { findingIdFromLine } from "./finding-impact.js";
7
7
  import { METADATA_REGEX, isArchiveStart, isArchiveEnd } from "./content-metadata.js";
8
- import { FINDING_TYPE_DECAY, extractFindingType } from "./finding-lifecycle.js";
8
+ import { FINDING_TYPE_DECAY, extractFindingType, parseFindingLifecycle } from "./finding-lifecycle.js";
9
9
  export const FINDING_PROVENANCE_SOURCES = [
10
10
  "human",
11
11
  "agent",
@@ -294,6 +294,7 @@ export function filterTrustedFindingsDetailed(content, opts) {
294
294
  ...(options.decay || {}),
295
295
  };
296
296
  const highImpactFindingIds = options.highImpactFindingIds;
297
+ const impactCounts = options.impactCounts;
297
298
  const project = options.project;
298
299
  const lines = content.split("\n");
299
300
  const out = [];
@@ -412,9 +413,29 @@ export function filterTrustedFindingsDetailed(content, opts) {
412
413
  confidence *= 0.9;
413
414
  if (project && highImpactFindingIds?.size) {
414
415
  const findingId = findingIdFromLine(line);
415
- if (highImpactFindingIds.has(findingId))
416
- confidence *= 1.15;
416
+ if (highImpactFindingIds.has(findingId)) {
417
+ // Get surface count for graduated boost
418
+ const surfaceCount = impactCounts?.get(findingId) ?? 3;
419
+ // Log-scaled: 3→1.15x, 10→1.28x, 30→1.38x, capped at 1.4x
420
+ const boost = Math.min(1.4, 1 + 0.1 * Math.log2(Math.max(3, surfaceCount)));
421
+ confidence *= boost;
422
+ // Decay resistance: confirmed findings decay 3x slower
423
+ if (effectiveDate) {
424
+ const realAge = ageDaysForDate(effectiveDate);
425
+ if (realAge !== null) {
426
+ const slowedAge = Math.floor(realAge / 3);
427
+ confidence = Math.max(confidence, confidenceForAge(slowedAge, decay));
428
+ }
429
+ }
430
+ }
417
431
  }
432
+ const lifecycle = parseFindingLifecycle(line);
433
+ if (lifecycle?.status === "superseded")
434
+ confidence *= 0.25;
435
+ if (lifecycle?.status === "retracted")
436
+ confidence *= 0.1;
437
+ if (lifecycle?.status === "contradicted")
438
+ confidence *= 0.4;
418
439
  confidence = Math.max(0, Math.min(1, confidence));
419
440
  if (confidence < minConfidence) {
420
441
  issues.push({ date: effectiveDate || "unknown", bullet: line, reason: "stale" });
@@ -11,7 +11,7 @@ import { isDuplicateFinding, scanForSecrets, normalizeObservationTags, resolveCo
11
11
  import { validateFindingsFormat, validateFinding } from "./content-validate.js";
12
12
  import { countActiveFindings, autoArchiveToReference } from "./content-archive.js";
13
13
  import { resolveAutoFindingTaskItem, resolveFindingTaskReference, resolveFindingSessionId, } from "./finding-context.js";
14
- import { buildLifecycleComments, parseFindingLifecycle, stripLifecycleComments, } from "./finding-lifecycle.js";
14
+ import { buildLifecycleComments, extractFindingType, parseFindingLifecycle, stripLifecycleComments, } from "./finding-lifecycle.js";
15
15
  import { METADATA_REGEX, } from "./content-metadata.js";
16
16
  /** Default cap for active findings before auto-archiving is triggered. */
17
17
  const DEFAULT_FINDINGS_CAP = 20;
@@ -107,6 +107,22 @@ function resolveFindingCitationInput(phrenPath, project, citationInput) {
107
107
  }
108
108
  return phrenOk(Object.keys(resolved).length > 0 ? resolved : undefined);
109
109
  }
110
+ export function autoDetectFindingType(text) {
111
+ const lower = text.toLowerCase();
112
+ if (/\b(we decided|decision:|chose .+ over|went with)\b/.test(lower))
113
+ return 'decision';
114
+ if (/\b(bug:|bug in|found a bug|broken|crashes|fails when)\b/.test(lower))
115
+ return 'bug';
116
+ if (/\b(workaround:|work around|temporary fix|hack:)\b/.test(lower))
117
+ return 'workaround';
118
+ if (/\b(pattern:|always .+ before|never .+ without|best practice)\b/.test(lower))
119
+ return 'pattern';
120
+ if (/\b(pitfall:|gotcha:|watch out|careful with|trap:)\b/.test(lower))
121
+ return 'pitfall';
122
+ if (/\b(currently|as of|right now|at the moment|observation:)\b/.test(lower))
123
+ return 'context';
124
+ return null;
125
+ }
110
126
  function prepareFinding(learning, project, fullHistory, extraAnnotations, citationInput, source, nowIso, inferredRepo, headCommit, phrenPath) {
111
127
  const secretType = scanForSecrets(learning);
112
128
  if (secretType) {
@@ -114,10 +130,17 @@ function prepareFinding(learning, project, fullHistory, extraAnnotations, citati
114
130
  }
115
131
  const today = (nowIso ?? new Date().toISOString()).slice(0, 10);
116
132
  const { text: tagNormalized, warning: tagWarning } = normalizeObservationTags(learning);
117
- const normalizedLearning = resolveCoref(tagNormalized, {
133
+ let normalizedLearning = resolveCoref(tagNormalized, {
118
134
  project,
119
135
  file: citationInput?.file,
120
136
  });
137
+ const existingType = extractFindingType('- ' + normalizedLearning);
138
+ if (!existingType) {
139
+ const detected = autoDetectFindingType(normalizedLearning);
140
+ if (detected) {
141
+ normalizedLearning = `[${detected}] ${normalizedLearning}`;
142
+ }
143
+ }
121
144
  const fid = crypto.randomBytes(4).toString("hex");
122
145
  const fidComment = `<!-- fid:${fid} -->`;
123
146
  const createdComment = `<!-- created: ${today} -->`;
@@ -54,6 +54,46 @@ function normalizeFindingGroupKey(item) {
54
54
  function findingTimelineDate(item) {
55
55
  return item.status_updated || item.date || "0000-00-00";
56
56
  }
57
+ function collectFindingBulletLines(lines) {
58
+ const bulletLines = [];
59
+ let inArchiveBlock = false;
60
+ for (let i = 0; i < lines.length; i++) {
61
+ const line = lines[i];
62
+ if (isArchiveStart(line)) {
63
+ inArchiveBlock = true;
64
+ continue;
65
+ }
66
+ if (isArchiveEnd(line)) {
67
+ inArchiveBlock = false;
68
+ continue;
69
+ }
70
+ if (!line.startsWith("- "))
71
+ continue;
72
+ bulletLines.push({ line, i, archived: inArchiveBlock });
73
+ }
74
+ return bulletLines;
75
+ }
76
+ function findMatchingFindingBullet(bulletLines, needle, match) {
77
+ const fidNeedle = needle.replace(/^fid:/, "");
78
+ const fidMatch = /^[a-z0-9]{8}$/.test(fidNeedle)
79
+ ? bulletLines.filter(({ line }) => new RegExp(`<!--\\s*fid:${fidNeedle}\\s*-->`).test(line))
80
+ : [];
81
+ const exactMatches = bulletLines.filter(({ line }) => line.replace(/^-\s+/, "").replace(/<!--.*?-->/g, "").trim().toLowerCase() === needle);
82
+ const partialMatches = bulletLines.filter(({ line }) => line.toLowerCase().includes(needle));
83
+ if (fidMatch.length === 1)
84
+ return { kind: "found", idx: fidMatch[0].i };
85
+ if (exactMatches.length === 1)
86
+ return { kind: "found", idx: exactMatches[0].i };
87
+ if (exactMatches.length > 1) {
88
+ return { kind: "ambiguous", error: `"${match}" is ambiguous (${exactMatches.length} exact matches). Use a more specific phrase.` };
89
+ }
90
+ if (partialMatches.length === 1)
91
+ return { kind: "found", idx: partialMatches[0].i };
92
+ if (partialMatches.length > 1) {
93
+ return { kind: "ambiguous", error: `"${match}" is ambiguous (${partialMatches.length} partial matches). Use a more specific phrase.` };
94
+ }
95
+ return { kind: "not_found" };
96
+ }
57
97
  export function readFindings(phrenPath, project, opts = {}) {
58
98
  const ensured = ensureProject(phrenPath, project);
59
99
  if (!ensured.ok)
@@ -210,35 +250,19 @@ export function removeFinding(phrenPath, project, match) {
210
250
  return withSafeLock(filePath, () => {
211
251
  const lines = fs.readFileSync(filePath, "utf8").split("\n");
212
252
  const needle = match.trim().toLowerCase();
213
- const bulletLines = lines.map((line, i) => ({ line, i })).filter(({ line }) => line.startsWith("- "));
214
- // 0) Stable finding ID match (fid:XXXXXXXX or just the 8-char hex)
215
- const fidNeedle = needle.replace(/^fid:/, "");
216
- const fidMatch = /^[a-z0-9]{8}$/.test(fidNeedle)
217
- ? bulletLines.filter(({ line }) => new RegExp(`<!--\\s*fid:${fidNeedle}\\s*-->`).test(line))
218
- : [];
219
- // 1) Exact text match (strip bullet prefix + metadata for comparison)
220
- const exactMatches = bulletLines.filter(({ line }) => line.replace(/^-\s+/, "").replace(/<!--.*?-->/g, "").trim().toLowerCase() === needle);
221
- // 2) Unique partial substring match
222
- const partialMatches = bulletLines.filter(({ line }) => line.toLowerCase().includes(needle));
223
- let idx;
224
- if (fidMatch.length === 1) {
225
- idx = fidMatch[0].i;
226
- }
227
- else if (exactMatches.length === 1) {
228
- idx = exactMatches[0].i;
229
- }
230
- else if (exactMatches.length > 1) {
231
- return phrenErr(`"${match}" is ambiguous (${exactMatches.length} exact matches). Use a more specific phrase.`, PhrenError.AMBIGUOUS_MATCH);
232
- }
233
- else if (partialMatches.length === 1) {
234
- idx = partialMatches[0].i;
235
- }
236
- else if (partialMatches.length > 1) {
237
- return phrenErr(`"${match}" is ambiguous (${partialMatches.length} partial matches). Use a more specific phrase.`, PhrenError.AMBIGUOUS_MATCH);
253
+ const bulletLines = collectFindingBulletLines(lines);
254
+ const activeMatch = findMatchingFindingBullet(bulletLines.filter(({ archived }) => !archived), needle, match);
255
+ if (activeMatch.kind === "ambiguous") {
256
+ return phrenErr(activeMatch.error, PhrenError.AMBIGUOUS_MATCH);
238
257
  }
239
- else {
258
+ if (activeMatch.kind === "not_found") {
259
+ const archivedMatch = findMatchingFindingBullet(bulletLines.filter(({ archived }) => archived), needle, match);
260
+ if (archivedMatch.kind === "ambiguous" || archivedMatch.kind === "found") {
261
+ return phrenErr(`Finding "${match}" is archived and read-only. Restore or re-add it before mutating history.`, PhrenError.VALIDATION_ERROR);
262
+ }
240
263
  return phrenErr(`No finding matching "${match}" in project "${project}". Try a different search term or check :findings view.`, PhrenError.NOT_FOUND);
241
264
  }
265
+ const idx = activeMatch.idx;
242
266
  const removeCount = isCitationLine(lines[idx + 1] || "") ? 2 : 1;
243
267
  const matched = lines[idx];
244
268
  lines.splice(idx, removeCount);
@@ -260,33 +284,19 @@ export function editFinding(phrenPath, project, oldText, newText) {
260
284
  return withSafeLock(findingsPath, () => {
261
285
  const lines = fs.readFileSync(findingsPath, "utf8").split("\n");
262
286
  const needle = oldText.trim().toLowerCase();
263
- const bulletLines = lines.map((line, i) => ({ line, i })).filter(({ line }) => line.startsWith("- "));
264
- // Stable finding ID match
265
- const fidNeedle = needle.replace(/^fid:/, "");
266
- const fidMatch = /^[a-z0-9]{8}$/.test(fidNeedle)
267
- ? bulletLines.filter(({ line }) => new RegExp(`<!--\\s*fid:${fidNeedle}\\s*-->`).test(line))
268
- : [];
269
- const exactMatches = bulletLines.filter(({ line }) => line.replace(/^-\s+/, "").replace(/<!--.*?-->/g, "").trim().toLowerCase() === needle);
270
- const partialMatches = bulletLines.filter(({ line }) => line.toLowerCase().includes(needle));
271
- let idx;
272
- if (fidMatch.length === 1) {
273
- idx = fidMatch[0].i;
274
- }
275
- else if (exactMatches.length === 1) {
276
- idx = exactMatches[0].i;
277
- }
278
- else if (exactMatches.length > 1) {
279
- return phrenErr(`"${oldText}" is ambiguous (${exactMatches.length} exact matches). Use a more specific phrase.`, PhrenError.AMBIGUOUS_MATCH);
287
+ const bulletLines = collectFindingBulletLines(lines);
288
+ const activeMatch = findMatchingFindingBullet(bulletLines.filter(({ archived }) => !archived), needle, oldText);
289
+ if (activeMatch.kind === "ambiguous") {
290
+ return phrenErr(activeMatch.error, PhrenError.AMBIGUOUS_MATCH);
280
291
  }
281
- else if (partialMatches.length === 1) {
282
- idx = partialMatches[0].i;
283
- }
284
- else if (partialMatches.length > 1) {
285
- return phrenErr(`"${oldText}" is ambiguous (${partialMatches.length} partial matches). Use a more specific phrase.`, PhrenError.AMBIGUOUS_MATCH);
286
- }
287
- else {
292
+ if (activeMatch.kind === "not_found") {
293
+ const archivedMatch = findMatchingFindingBullet(bulletLines.filter(({ archived }) => archived), needle, oldText);
294
+ if (archivedMatch.kind === "ambiguous" || archivedMatch.kind === "found") {
295
+ return phrenErr(`Finding "${oldText}" is archived and read-only. Restore or re-add it before mutating history.`, PhrenError.VALIDATION_ERROR);
296
+ }
288
297
  return phrenErr(`No finding matching "${oldText}" in project "${project}".`, PhrenError.NOT_FOUND);
289
298
  }
299
+ const idx = activeMatch.idx;
290
300
  // Preserve existing metadata comment (fid, citations, etc.)
291
301
  const existing = lines[idx];
292
302
  const metaMatch = existing.match(/(<!--.*?-->)/g);
@@ -144,6 +144,29 @@ export function getHighImpactFindings(phrenPath, minSurfaceCount = 3) {
144
144
  };
145
145
  return new Set(ids);
146
146
  }
147
+ export function getImpactSurfaceCounts(phrenPath, minSurfaces = 1) {
148
+ const file = impactLogFile(phrenPath);
149
+ if (!fs.existsSync(file))
150
+ return new Map();
151
+ const lines = fs.readFileSync(file, "utf8").split("\n").filter(Boolean);
152
+ const counts = new Map();
153
+ for (const line of lines) {
154
+ try {
155
+ const entry = JSON.parse(line);
156
+ if (entry.findingId) {
157
+ counts.set(entry.findingId, (counts.get(entry.findingId) ?? 0) + 1);
158
+ }
159
+ }
160
+ catch { }
161
+ }
162
+ // Filter by minimum
163
+ const filtered = new Map();
164
+ for (const [id, count] of counts) {
165
+ if (count >= minSurfaces)
166
+ filtered.set(id, count);
167
+ }
168
+ return filtered;
169
+ }
147
170
  export function markImpactEntriesCompletedForSession(phrenPath, sessionId, project) {
148
171
  if (!sessionId)
149
172
  return 0;
@@ -169,7 +169,7 @@ function findMostRecentSummaryWithProject(phrenPath) {
169
169
  if (fs.existsSync(fastPath)) {
170
170
  const data = JSON.parse(fs.readFileSync(fastPath, "utf-8"));
171
171
  if (data.summary)
172
- return { summary: data.summary, project: data.project };
172
+ return { summary: data.summary, project: data.project, endedAt: data.endedAt };
173
173
  }
174
174
  }
175
175
  catch (err) {
@@ -181,7 +181,7 @@ function findMostRecentSummaryWithProject(phrenPath) {
181
181
  if (results.length === 0)
182
182
  return { summary: null };
183
183
  const best = results[0]; // already sorted newest-mtime-first
184
- return { summary: best.data.summary, project: best.data.project };
184
+ return { summary: best.data.summary, project: best.data.project, endedAt: best.data.endedAt };
185
185
  }
186
186
  /** Resolve session file from an explicit sessionId or a previously-bound connectionId. */
187
187
  function resolveSessionFile(phrenPath, sessionId, connectionId) {
@@ -338,6 +338,45 @@ function hasCompletedTasksInSession(phrenPath, sessionId, project) {
338
338
  const artifacts = getSessionArtifacts(phrenPath, sessionId, project);
339
339
  return artifacts.tasks.some((task) => task.section === "Done" && task.checked);
340
340
  }
341
+ /** Compute what changed since the last session ended. */
342
+ export function computeSessionDiff(phrenPath, project, lastSessionEnd) {
343
+ const projectDir = path.join(phrenPath, project);
344
+ const findingsPath = path.join(projectDir, "FINDINGS.md");
345
+ if (!fs.existsSync(findingsPath))
346
+ return { newFindings: 0, superseded: 0, tasksCompleted: 0 };
347
+ const content = fs.readFileSync(findingsPath, "utf8");
348
+ const lines = content.split("\n");
349
+ let currentDate = null;
350
+ let newFindings = 0;
351
+ let superseded = 0;
352
+ const cutoff = lastSessionEnd.slice(0, 10);
353
+ for (const line of lines) {
354
+ const dateMatch = line.match(/^## (\d{4}-\d{2}-\d{2})$/);
355
+ if (dateMatch) {
356
+ currentDate = dateMatch[1];
357
+ continue;
358
+ }
359
+ if (!line.startsWith("- ") || !currentDate)
360
+ continue;
361
+ if (currentDate >= cutoff) {
362
+ newFindings++;
363
+ if (line.includes('status "superseded"'))
364
+ superseded++;
365
+ }
366
+ }
367
+ // Count completed tasks since last session
368
+ const tasksPath = path.join(projectDir, "TASKS.md");
369
+ let tasksCompleted = 0;
370
+ if (fs.existsSync(tasksPath)) {
371
+ const taskContent = fs.readFileSync(tasksPath, "utf8");
372
+ const doneMatch = taskContent.match(/## Done[\s\S]*/);
373
+ if (doneMatch) {
374
+ const doneLines = doneMatch[0].split("\n").filter(l => l.startsWith("- "));
375
+ tasksCompleted = doneLines.length;
376
+ }
377
+ }
378
+ return { newFindings, superseded, tasksCompleted };
379
+ }
341
380
  export function register(server, ctx) {
342
381
  const { phrenPath } = ctx;
343
382
  server.registerTool("session_start", {
@@ -364,6 +403,7 @@ export function register(server, ctx) {
364
403
  const priorEnded = prior ? null : findMostRecentSummaryWithProject(phrenPath);
365
404
  const priorSummary = prior?.summary ?? priorEnded?.summary ?? null;
366
405
  const priorProject = prior?.project ?? priorEnded?.project;
406
+ const priorEndedAt = prior?.endedAt ?? priorEnded?.endedAt;
367
407
  // Create new session with unique ID in its own file
368
408
  const sessionId = crypto.randomUUID();
369
409
  const next = {
@@ -447,6 +487,25 @@ export function register(server, ctx) {
447
487
  debugError("session_start checkpointsRead", err);
448
488
  }
449
489
  }
490
+ // Compute context diff since last session
491
+ if (activeProject && isValidProjectName(activeProject) && priorEndedAt) {
492
+ try {
493
+ const diff = computeSessionDiff(phrenPath, activeProject, priorEndedAt);
494
+ if (diff.newFindings > 0 || diff.superseded > 0 || diff.tasksCompleted > 0) {
495
+ const diffParts = [];
496
+ if (diff.newFindings > 0)
497
+ diffParts.push(`${diff.newFindings} new finding${diff.newFindings === 1 ? "" : "s"}`);
498
+ if (diff.superseded > 0)
499
+ diffParts.push(`${diff.superseded} superseded`);
500
+ if (diff.tasksCompleted > 0)
501
+ diffParts.push(`${diff.tasksCompleted} task${diff.tasksCompleted === 1 ? "" : "s"} in done`);
502
+ parts.push(`## Since last session\n${diffParts.join(", ")}.`);
503
+ }
504
+ }
505
+ catch (err) {
506
+ debugError("session_start contextDiff", err);
507
+ }
508
+ }
450
509
  const message = parts.length > 0
451
510
  ? `Session started (${sessionId.slice(0, 8)}).\n\n${parts.join("\n\n")}`
452
511
  : `Session started (${sessionId.slice(0, 8)}). No prior context found.`;
@@ -8,7 +8,7 @@ import { readCustomHooks } from "./hooks.js";
8
8
  import { hookConfigPaths, hookConfigRoots } from "./provider-adapters.js";
9
9
  import { getAllSkills } from "./skill-registry.js";
10
10
  import { resolveTaskFilePath, readTasks, TASKS_FILENAME } from "./data-tasks.js";
11
- import { buildIndex, queryRows } from "./shared-index.js";
11
+ import { buildIndex, queryDocBySourceKey, queryRows } from "./shared-index.js";
12
12
  import { readProjectTopics, classifyTopicForText } from "./project-topics.js";
13
13
  import { entryScoreKey } from "./governance-scores.js";
14
14
  function extractGithubUrl(content) {
@@ -271,10 +271,9 @@ export async function buildGraph(phrenPath, profile, focusProject) {
271
271
  const rows = queryRows(db, `SELECT e.id, e.name, e.type, COUNT(DISTINCT el.source_doc) as ref_count
272
272
  FROM entities e JOIN entity_links el ON el.target_id = e.id WHERE e.type != 'document'
273
273
  GROUP BY e.id, e.name, e.type ORDER BY ref_count DESC LIMIT 500`, []);
274
- const refRows = queryRows(db, `SELECT e.id, el.source_doc, d.content, d.filename
274
+ const refRows = queryRows(db, `SELECT e.id, el.source_doc
275
275
  FROM entities e
276
276
  JOIN entity_links el ON el.target_id = e.id
277
- LEFT JOIN docs d ON d.source_key = el.source_doc
278
277
  WHERE e.type != 'document'`, []);
279
278
  const refsByEntity = new Map();
280
279
  const seenEntityDoc = new Set();
@@ -291,8 +290,9 @@ export async function buildGraph(phrenPath, profile, focusProject) {
291
290
  continue;
292
291
  seenEntityDoc.add(entityDocKey);
293
292
  const project = projectFromSourceDoc(doc);
294
- const content = typeof row[2] === "string" ? row[2] : "";
295
- const filename = typeof row[3] === "string" ? row[3] : "";
293
+ const docRow = queryDocBySourceKey(db, phrenPath, doc);
294
+ const content = docRow?.content ?? "";
295
+ const filename = docRow?.filename ?? "";
296
296
  const scoreKey = project && filename && content ? entryScoreKey(project, filename, content) : undefined;
297
297
  const refs = refsByEntity.get(entityId) ?? [];
298
298
  refs.push({ doc, project, scoreKey });
@@ -3,6 +3,6 @@ export { checkConsolidationNeeded, validateFindingsFormat, stripTaskDoneSection,
3
3
  export { filterTrustedFindings, filterTrustedFindingsDetailed, } from "./content-citation.js";
4
4
  export { scanForSecrets, resolveCoref, isDuplicateFinding, detectConflicts, extractDynamicEntities, checkSemanticDedup, checkSemanticConflicts, } from "./content-dedup.js";
5
5
  export { countActiveFindings, autoArchiveToReference, } from "./content-archive.js";
6
- export { upsertCanonical, addFindingToFile, addFindingsToFile, } from "./content-learning.js";
6
+ export { upsertCanonical, addFindingToFile, addFindingsToFile, autoDetectFindingType, } from "./content-learning.js";
7
7
  export { FINDING_LIFECYCLE_STATUSES, FINDING_TYPE_DECAY, extractFindingType, parseFindingLifecycle, buildLifecycleComments, isInactiveFindingLine, } from "./finding-lifecycle.js";
8
8
  export { METADATA_REGEX, parseStatus, parseStatusField, parseSupersession, parseSupersedesRef, parseContradiction, parseAllContradictions, parseFindingId, parseCreatedDate, isCitationLine, isArchiveStart, isArchiveEnd, stripLifecycleMetadata, stripRelationMetadata, stripAllMetadata, stripComments, addMetadata, } from "./content-metadata.js";
@@ -6,12 +6,13 @@ import { globSync } from "glob";
6
6
  import { debugLog, appendIndexEvent, getProjectDirs, collectNativeMemoryFiles, runtimeFile, homeDir, readRootManifest, } from "./shared.js";
7
7
  import { getIndexPolicy, withFileLock } from "./shared-governance.js";
8
8
  import { stripTaskDoneSection } from "./shared-content.js";
9
+ import { isInactiveFindingLine } from "./finding-lifecycle.js";
9
10
  import { invalidateDfCache } from "./shared-search-fallback.js";
10
11
  import { errorMessage } from "./utils.js";
11
12
  import { beginUserFragmentBuildCache, endUserFragmentBuildCache, extractAndLinkFragments, ensureGlobalEntitiesTable, } from "./shared-fragment-graph.js";
12
13
  import { bootstrapSqlJs } from "./shared-sqljs.js";
13
14
  import { getProjectOwnershipMode, getProjectSourcePath, readProjectConfig } from "./project-config.js";
14
- import { buildSourceDocKey, queryDocRows, queryRows, } from "./index-query.js";
15
+ import { buildSourceDocKey, queryDocBySourceKey, queryDocRows, } from "./index-query.js";
15
16
  import { classifyTopicForText, readProjectTopics, } from "./project-topics.js";
16
17
  export { porterStem } from "./shared-stemmer.js";
17
18
  export { cosineFallback } from "./shared-search-fallback.js";
@@ -101,6 +102,11 @@ export function classifyFile(filename, relPath) {
101
102
  }
102
103
  const IMPORT_RE = /^@import\s+(.+)$/gm;
103
104
  const MAX_IMPORT_DEPTH = 5;
105
+ const IMPORT_ROOT_PREFIX = "shared/";
106
+ function isAllowedImportPath(importPath) {
107
+ const normalized = importPath.replace(/\\/g, "/");
108
+ return normalized.startsWith(IMPORT_ROOT_PREFIX) && normalized.toLowerCase().endsWith(".md");
109
+ }
104
110
  /**
105
111
  * Internal recursive helper for resolveImports. Tracks `seen` (cycle detection) and `depth` (runaway
106
112
  * recursion guard) — callers should never pass these; use the public `resolveImports` instead.
@@ -110,6 +116,9 @@ function _resolveImportsRecursive(content, phrenPath, seen, depth) {
110
116
  return content;
111
117
  return content.replace(IMPORT_RE, (_match, importPath) => {
112
118
  const trimmed = importPath.trim();
119
+ if (!isAllowedImportPath(trimmed)) {
120
+ return "<!-- @import blocked: only shared/*.md allowed -->";
121
+ }
113
122
  const globalRoot = path.resolve(phrenPath, "global");
114
123
  const resolved = path.join(globalRoot, trimmed);
115
124
  // Use lexical resolution first for the prefix check
@@ -464,6 +473,10 @@ export function normalizeIndexedContent(content, type, phrenPath, maxChars) {
464
473
  if (type === "task") {
465
474
  normalized = stripTaskDoneSection(normalized);
466
475
  }
476
+ if (type === "findings") {
477
+ const lines = normalized.split("\n");
478
+ normalized = lines.filter(line => !isInactiveFindingLine(line)).join("\n");
479
+ }
467
480
  if (typeof maxChars === "number" && maxChars >= 0) {
468
481
  normalized = normalized.slice(0, maxChars);
469
482
  }
@@ -782,8 +795,8 @@ function mergeManualLinks(db, phrenPath) {
782
795
  for (const link of manualLinks) {
783
796
  try {
784
797
  // Validate: skip manual links whose sourceDoc no longer exists in the index
785
- const docCheck = queryRows(db, "SELECT 1 FROM docs WHERE source_key = ? LIMIT 1", [link.sourceDoc]);
786
- if (!docCheck || docCheck.length === 0) {
798
+ const docCheck = queryDocBySourceKey(db, phrenPath, link.sourceDoc);
799
+ if (!docCheck) {
787
800
  if ((process.env.PHREN_DEBUG || process.env.PHREN_DEBUG))
788
801
  process.stderr.write(`[phren] manualLinks: pruning stale link to "${link.sourceDoc}"\n`);
789
802
  pruned = true;
@@ -3,7 +3,7 @@ import { getQualityMultiplier, entryScoreKey, } from "./shared-governance.js";
3
3
  import { queryDocRows, queryRows, cosineFallback, extractSnippet, getDocSourceKey, getEntityBoostDocs, decodeFiniteNumber, rowToDocWithRowid, } from "./shared-index.js";
4
4
  import { filterTrustedFindingsDetailed, } from "./shared-content.js";
5
5
  import { parseCitationComment } from "./content-citation.js";
6
- import { getHighImpactFindings } from "./finding-impact.js";
6
+ import { getHighImpactFindings, getImpactSurfaceCounts } from "./finding-impact.js";
7
7
  import { buildFtsQueryVariants, buildRelaxedFtsQuery, isFeatureEnabled, STOP_WORDS } from "./utils.js";
8
8
  import * as fs from "fs";
9
9
  import * as path from "path";
@@ -12,7 +12,6 @@ import { vectorFallback } from "./shared-search-fallback.js";
12
12
  import { getOllamaUrl, getCloudEmbeddingUrl } from "./shared-ollama.js";
13
13
  import { keywordFallbackSearch } from "./core-search.js";
14
14
  import { debugLog } from "./shared.js";
15
- import { getCorrelatedDocs } from "./query-correlation.js";
16
15
  // ── Scoring constants ─────────────────────────────────────────────────────────
17
16
  /** Number of docs sampled for token-overlap semantic fallback search. */
18
17
  const SEMANTIC_FALLBACK_SAMPLE_LIMIT = 100;
@@ -32,13 +31,13 @@ const LOW_FOCUS_SNIPPET_CHAR_FRACTION = 0.55;
32
31
  const TASK_RESCUE_MIN_OVERLAP = 0.3;
33
32
  const TASK_RESCUE_OVERLAP_MARGIN = 0.12;
34
33
  const TASK_RESCUE_SCORE_MARGIN = 0.6;
35
- /** Boost applied to docs that correlate with recurring query patterns. */
36
- const CORRELATION_BOOST = 1.5;
37
34
  /** Fraction of bullets that must be low-value before applying the low-value penalty. */
38
35
  const LOW_VALUE_BULLET_FRACTION = 0.5;
39
36
  // ── Intent and scoring helpers ───────────────────────────────────────────────
40
37
  export function detectTaskIntent(prompt) {
41
38
  const p = prompt.toLowerCase();
39
+ if (/\/\w+/.test(p) || /\b(skill|swarm|command|lineup|slash command)\b/.test(p))
40
+ return "skill";
42
41
  if (/(bug|error|fix|broken|regression|fail|stack trace)/.test(p))
43
42
  return "debug";
44
43
  if (/(review|audit|pr|pull request|nit|refactor)/.test(p))
@@ -50,6 +49,8 @@ export function detectTaskIntent(prompt) {
50
49
  return "general";
51
50
  }
52
51
  function intentBoost(intent, docType) {
52
+ if (intent === "skill" && docType === "skill")
53
+ return 4;
53
54
  if (intent === "debug" && (docType === "findings" || docType === "reference"))
54
55
  return 3;
55
56
  if (intent === "review" && (docType === "canonical" || docType === "changelog"))
@@ -346,10 +347,23 @@ export function searchDocuments(db, safeQuery, prompt, keywords, detectedProject
346
347
  if (ftsDocs.length === 0 && relaxedQuery && relaxedQuery !== safeQuery) {
347
348
  runScopedFtsQuery(relaxedQuery);
348
349
  }
350
+ // Tier 1.5: Fragment graph expansion
351
+ const fragmentExpansionDocs = [];
352
+ const queryLower = (prompt + " " + keywords).toLowerCase();
353
+ const fragmentBoostDocKeys = getEntityBoostDocs(db, queryLower);
354
+ for (const docKey of fragmentBoostDocKeys) {
355
+ if (ftsSeenKeys.has(docKey))
356
+ continue;
357
+ const rows = queryDocRows(db, "SELECT project, filename, type, content, path FROM docs WHERE path = ? LIMIT 1", [docKey]);
358
+ if (rows?.length) {
359
+ ftsSeenKeys.add(docKey);
360
+ fragmentExpansionDocs.push(rows[0]);
361
+ }
362
+ }
349
363
  // Tier 2: Token-overlap semantic — always run, scored independently
350
364
  const semanticDocs = semanticFallbackDocs(db, `${prompt}\n${keywords}`, detectedProject);
351
365
  // Merge with Reciprocal Rank Fusion so documents found by both tiers rank highest
352
- const merged = rrfMerge([ftsDocs, semanticDocs]);
366
+ const merged = rrfMerge([ftsDocs, fragmentExpansionDocs, semanticDocs]);
353
367
  if (merged.length === 0)
354
368
  return null;
355
369
  return merged.slice(0, 12);
@@ -501,6 +515,7 @@ export function applyTrustFilter(rows, ttlDays, minConfidence, decay, phrenPath)
501
515
  const queueItems = [];
502
516
  const auditEntries = [];
503
517
  const highImpactFindingIds = phrenPath ? getHighImpactFindings(phrenPath, 3) : undefined;
518
+ const impactCounts = phrenPath ? getImpactSurfaceCounts(phrenPath, 1) : undefined;
504
519
  const filtered = rows
505
520
  .map((doc) => {
506
521
  if (!TRUST_FILTERED_TYPES.has(doc.type))
@@ -511,6 +526,7 @@ export function applyTrustFilter(rows, ttlDays, minConfidence, decay, phrenPath)
511
526
  decay,
512
527
  project: doc.project,
513
528
  highImpactFindingIds,
529
+ impactCounts,
514
530
  });
515
531
  if (trust.issues.length > 0) {
516
532
  const stale = trust.issues.filter((i) => i.reason === "stale").map((i) => i.bullet);
@@ -608,15 +624,13 @@ export function rankResults(rows, intent, gitCtx, detectedProject, phrenPathLoca
608
624
  }
609
625
  }
610
626
  const getRecentDate = (doc) => recentDateCache.get(doc.path || `${doc.project}/${doc.filename}`) ?? "0000-00-00";
611
- // Query correlation: pre-warm docs that historically correlated with similar queries
612
- const correlatedDocKeys = query ? new Set(getCorrelatedDocs(phrenPathLocal, query, 5)) : new Set();
613
627
  // Precompute per-doc ranking metadata once — avoids recomputing inside sort comparator.
614
628
  const changedFiles = gitCtx?.changedFiles || new Set();
615
629
  const FILE_MATCH_BOOST = 1.5;
616
630
  const scored = ranked.map((doc) => {
617
631
  const globBoost = getProjectGlobBoost(phrenPathLocal, doc.project, cwd, gitCtx?.changedFiles);
618
632
  const key = entryScoreKey(doc.project, doc.filename, doc.content);
619
- const entity = entityBoostPaths.has(doc.path) ? 1.3 : 1;
633
+ const entity = entityBoostPaths.has(doc.path) ? 1.5 : 1;
620
634
  const date = getRecentDate(doc);
621
635
  const fileRel = fileRelevanceBoost(doc.path, changedFiles);
622
636
  const branchMat = branchMatchBoost(doc.content, gitCtx?.branch);
@@ -631,17 +645,19 @@ export function rankResults(rows, intent, gitCtx, detectedProject, phrenPathLoca
631
645
  && queryOverlap < WEAK_CROSS_PROJECT_OVERLAP_MAX
632
646
  ? WEAK_CROSS_PROJECT_OVERLAP_PENALTY
633
647
  : 0;
634
- const correlationKey = `${doc.project}/${doc.filename}`;
635
- const correlationBoost = correlatedDocKeys.has(correlationKey) ? CORRELATION_BOOST : 0;
648
+ // Boost skills whose filename matches a query token (e.g. "swarm" matches swarm.md)
649
+ const skillNameBoost = doc.type === "skill" && queryTokens.length > 0
650
+ ? queryTokens.some((t) => doc.filename.replace(/\.md$/i, "").toLowerCase() === t) ? 4 : 0
651
+ : 0;
636
652
  const score = Math.round((intentBoost(intent, doc.type) +
653
+ skillNameBoost +
637
654
  fileRel +
638
655
  branchMat +
639
656
  globBoost +
640
657
  qualityMult +
641
658
  entity +
642
659
  queryOverlap * queryOverlapWeight +
643
- recencyBoost(doc.type, date) +
644
- correlationBoost -
660
+ recencyBoost(doc.type, date) -
645
661
  weakCrossProjectPenalty -
646
662
  lowValuePenalty(doc.content, doc.type)) * crossProjectAgeMultiplier(doc, detectedProject, date) * 10000) / 10000;
647
663
  const fileMatch = fileRel > 0 || branchMat > 0;
@@ -706,24 +722,6 @@ export function rankResults(rows, intent, gitCtx, detectedProject, phrenPathLoca
706
722
  }
707
723
  return ranked;
708
724
  }
709
- /** Annotate snippet lines that carry contradiction metadata with visible markers. */
710
- export function annotateContradictions(snippet) {
711
- return snippet.split('\n').map(line => {
712
- const conflictMatch = line.match(/<!-- conflicts_with: "(.*?)" -->/);
713
- const contradictMatch = line.match(/<!-- phren:contradicts "(.*?)" -->/);
714
- const statusMatch = line.match(/phren:status "contradicted"/);
715
- if (conflictMatch) {
716
- return line.replace(conflictMatch[0], '') + ` [CONTRADICTED — conflicts with: "${conflictMatch[1]}"]`;
717
- }
718
- if (contradictMatch) {
719
- return line.replace(contradictMatch[0], '') + ` [CONTRADICTED — see: "${contradictMatch[1]}"]`;
720
- }
721
- if (statusMatch) {
722
- return line + ' [CONTRADICTED]';
723
- }
724
- return line;
725
- }).join('\n');
726
- }
727
725
  /** Mark snippet lines with stale citations (cited file missing or line content changed). */
728
726
  export function markStaleCitations(snippet) {
729
727
  const lines = snippet.split("\n");
@@ -773,10 +771,40 @@ export function markStaleCitations(snippet) {
773
771
  }
774
772
  return result.join("\n");
775
773
  }
774
+ function annotateContradictions(snippet) {
775
+ return snippet.split('\n').map(line => {
776
+ const conflictMatch = line.match(/<!-- conflicts_with: "(.*?)" -->/);
777
+ const contradictMatch = line.match(/<!-- phren:contradicts "(.*?)" -->/);
778
+ const statusMatch = line.match(/phren:status "contradicted"/);
779
+ if (conflictMatch) {
780
+ return line.replace(conflictMatch[0], '') + ` [CONTRADICTED — conflicts with: "${conflictMatch[1]}"]`;
781
+ }
782
+ if (contradictMatch) {
783
+ return line.replace(contradictMatch[0], '') + ` [CONTRADICTED — see: "${contradictMatch[1]}"]`;
784
+ }
785
+ if (statusMatch) {
786
+ return line + ' [CONTRADICTED]';
787
+ }
788
+ return line;
789
+ }).join('\n');
790
+ }
776
791
  export function selectSnippets(rows, keywords, tokenBudget, lineBudget, charBudget) {
777
792
  const selected = [];
778
793
  let usedTokens = 36;
779
794
  const queryTokens = tokenizeForOverlap(keywords);
795
+ const seenBullets = new Set();
796
+ // For each snippet being added, hash its bullet lines and skip duplicates
797
+ function dedupSnippetBullets(snippet) {
798
+ return snippet.split('\n').filter(line => {
799
+ if (!line.startsWith('- '))
800
+ return true; // Keep non-bullet lines (headers, etc)
801
+ const normalized = line.replace(/<!--.*?-->/g, '').trim().toLowerCase();
802
+ if (seenBullets.has(normalized))
803
+ return false;
804
+ seenBullets.add(normalized);
805
+ return true;
806
+ }).join('\n');
807
+ }
780
808
  for (const doc of rows) {
781
809
  let snippet = compactSnippet(extractSnippet(doc.content, keywords, 8), lineBudget, charBudget);
782
810
  if (!snippet.trim())
@@ -785,8 +813,10 @@ export function selectSnippets(rows, keywords, tokenBudget, lineBudget, charBudg
785
813
  if (TRUST_FILTERED_TYPES.has(doc.type)) {
786
814
  snippet = markStaleCitations(snippet);
787
815
  }
788
- // Surface contradiction metadata as visible annotations
789
816
  snippet = annotateContradictions(snippet);
817
+ snippet = dedupSnippetBullets(snippet);
818
+ if (!snippet.trim())
819
+ continue;
790
820
  let focusScore = queryTokens.length > 0
791
821
  ? overlapScore(queryTokens, `${doc.filename}\n${snippet}`)
792
822
  : 1;
@@ -1,10 +1,9 @@
1
1
  /**
2
2
  * Vitest globalSetup — runs once in the main process before any test workers spawn.
3
3
  *
4
- * Builds mcp/dist if it is missing or stale so every fork sees a complete,
5
- * consistent dist artifact. Running the build here (rather than inside each
6
- * fork via ensureCliBuilt) eliminates the race condition where one fork runs
7
- * `rm -rf mcp/dist` while another fork is checking fs.existsSync(CLI_PATH).
4
+ * Builds mcp/dist if it is missing so every fork sees a complete, consistent
5
+ * dist artifact before tests begin. Individual subprocess helpers can still
6
+ * repair a missing artifact later under a lock if some test mutates dist.
8
7
  *
9
8
  * `pretest` in package.json already calls `npm run build`, so in normal `npm test`
10
9
  * runs this is a fast no-op check. It is the safety net for:
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@phren/cli",
3
- "version": "0.0.5",
3
+ "version": "0.0.6",
4
4
  "description": "Knowledge layer for AI agents. Claude remembers you. Phren remembers your work.",
5
5
  "type": "module",
6
6
  "bin": {
@@ -34,7 +34,7 @@
34
34
  "vitest": "^4.0.18"
35
35
  },
36
36
  "scripts": {
37
- "build": "rm -rf mcp/dist && tsc -p mcp/tsconfig.json && chmod +x mcp/dist/index.js && cp mcp/src/synonyms*.json mcp/dist/",
37
+ "build": "node scripts/build.mjs",
38
38
  "dev": "tsx mcp/src/index.ts",
39
39
  "lint": "eslint mcp/src/ --ignore-pattern '*.test.ts'",
40
40
  "validate-docs": "bash scripts/validate-docs.sh",