@phren/cli 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +590 -0
- package/mcp/dist/capabilities/cli.js +61 -0
- package/mcp/dist/capabilities/index.js +15 -0
- package/mcp/dist/capabilities/mcp.js +61 -0
- package/mcp/dist/capabilities/types.js +57 -0
- package/mcp/dist/capabilities/vscode.js +61 -0
- package/mcp/dist/capabilities/web-ui.js +61 -0
- package/mcp/dist/cli-actions.js +302 -0
- package/mcp/dist/cli-config.js +580 -0
- package/mcp/dist/cli-extract.js +305 -0
- package/mcp/dist/cli-govern.js +371 -0
- package/mcp/dist/cli-graph.js +169 -0
- package/mcp/dist/cli-hooks-citations.js +44 -0
- package/mcp/dist/cli-hooks-context.js +56 -0
- package/mcp/dist/cli-hooks-globs.js +83 -0
- package/mcp/dist/cli-hooks-output.js +130 -0
- package/mcp/dist/cli-hooks-retrieval.js +2 -0
- package/mcp/dist/cli-hooks-session.js +1402 -0
- package/mcp/dist/cli-hooks.js +350 -0
- package/mcp/dist/cli-namespaces.js +989 -0
- package/mcp/dist/cli-ops.js +253 -0
- package/mcp/dist/cli-search.js +407 -0
- package/mcp/dist/cli.js +108 -0
- package/mcp/dist/content-archive.js +278 -0
- package/mcp/dist/content-citation.js +391 -0
- package/mcp/dist/content-dedup.js +622 -0
- package/mcp/dist/content-learning.js +472 -0
- package/mcp/dist/content-metadata.js +186 -0
- package/mcp/dist/content-validate.js +462 -0
- package/mcp/dist/core-finding.js +54 -0
- package/mcp/dist/core-project.js +36 -0
- package/mcp/dist/core-search.js +50 -0
- package/mcp/dist/data-access.js +400 -0
- package/mcp/dist/data-tasks.js +821 -0
- package/mcp/dist/embedding.js +344 -0
- package/mcp/dist/entrypoint.js +387 -0
- package/mcp/dist/finding-context.js +172 -0
- package/mcp/dist/finding-impact.js +181 -0
- package/mcp/dist/finding-journal.js +122 -0
- package/mcp/dist/finding-lifecycle.js +259 -0
- package/mcp/dist/governance-audit.js +22 -0
- package/mcp/dist/governance-locks.js +96 -0
- package/mcp/dist/governance-policy.js +648 -0
- package/mcp/dist/governance-scores.js +355 -0
- package/mcp/dist/hooks.js +449 -0
- package/mcp/dist/impact-scoring.js +22 -0
- package/mcp/dist/index-query.js +168 -0
- package/mcp/dist/index.js +205 -0
- package/mcp/dist/init-config.js +336 -0
- package/mcp/dist/init-preferences.js +62 -0
- package/mcp/dist/init-setup.js +1305 -0
- package/mcp/dist/init-shared.js +29 -0
- package/mcp/dist/init.js +1730 -0
- package/mcp/dist/link-checksums.js +62 -0
- package/mcp/dist/link-context.js +257 -0
- package/mcp/dist/link-doctor.js +591 -0
- package/mcp/dist/link-skills.js +212 -0
- package/mcp/dist/link.js +596 -0
- package/mcp/dist/logger.js +15 -0
- package/mcp/dist/machine-identity.js +38 -0
- package/mcp/dist/mcp-config.js +254 -0
- package/mcp/dist/mcp-data.js +315 -0
- package/mcp/dist/mcp-extract-facts.js +78 -0
- package/mcp/dist/mcp-extract.js +133 -0
- package/mcp/dist/mcp-finding.js +557 -0
- package/mcp/dist/mcp-graph.js +339 -0
- package/mcp/dist/mcp-hooks.js +256 -0
- package/mcp/dist/mcp-memory.js +58 -0
- package/mcp/dist/mcp-ops.js +328 -0
- package/mcp/dist/mcp-search.js +628 -0
- package/mcp/dist/mcp-session.js +651 -0
- package/mcp/dist/mcp-skills.js +189 -0
- package/mcp/dist/mcp-tasks.js +551 -0
- package/mcp/dist/mcp-types.js +7 -0
- package/mcp/dist/memory-ui-assets.js +6 -0
- package/mcp/dist/memory-ui-data.js +513 -0
- package/mcp/dist/memory-ui-graph.js +1910 -0
- package/mcp/dist/memory-ui-page.js +353 -0
- package/mcp/dist/memory-ui-scripts.js +1387 -0
- package/mcp/dist/memory-ui-server.js +1218 -0
- package/mcp/dist/memory-ui-styles.js +555 -0
- package/mcp/dist/memory-ui.js +9 -0
- package/mcp/dist/package-metadata.js +13 -0
- package/mcp/dist/phren-art.js +52 -0
- package/mcp/dist/phren-core.js +108 -0
- package/mcp/dist/phren-dotenv.js +67 -0
- package/mcp/dist/phren-paths.js +476 -0
- package/mcp/dist/proactivity.js +172 -0
- package/mcp/dist/profile-store.js +228 -0
- package/mcp/dist/project-config.js +85 -0
- package/mcp/dist/project-locator.js +25 -0
- package/mcp/dist/project-topics.js +1134 -0
- package/mcp/dist/provider-adapters.js +176 -0
- package/mcp/dist/runtime-profile.js +18 -0
- package/mcp/dist/session-checkpoints.js +131 -0
- package/mcp/dist/session-utils.js +68 -0
- package/mcp/dist/shared-content.js +8 -0
- package/mcp/dist/shared-embedding-cache.js +143 -0
- package/mcp/dist/shared-fragment-graph.js +456 -0
- package/mcp/dist/shared-governance.js +4 -0
- package/mcp/dist/shared-index.js +1334 -0
- package/mcp/dist/shared-ollama.js +192 -0
- package/mcp/dist/shared-paths.js +1 -0
- package/mcp/dist/shared-retrieval.js +796 -0
- package/mcp/dist/shared-search-fallback.js +375 -0
- package/mcp/dist/shared-sqljs.js +42 -0
- package/mcp/dist/shared-stemmer.js +171 -0
- package/mcp/dist/shared-vector-index.js +199 -0
- package/mcp/dist/shared.js +114 -0
- package/mcp/dist/shell-entry.js +209 -0
- package/mcp/dist/shell-input.js +943 -0
- package/mcp/dist/shell-palette.js +119 -0
- package/mcp/dist/shell-render.js +252 -0
- package/mcp/dist/shell-state-store.js +81 -0
- package/mcp/dist/shell-types.js +13 -0
- package/mcp/dist/shell-view-list.js +14 -0
- package/mcp/dist/shell-view.js +707 -0
- package/mcp/dist/shell.js +352 -0
- package/mcp/dist/skill-files.js +117 -0
- package/mcp/dist/skill-registry.js +279 -0
- package/mcp/dist/skill-state.js +28 -0
- package/mcp/dist/startup-embedding.js +57 -0
- package/mcp/dist/status.js +323 -0
- package/mcp/dist/synonyms.json +670 -0
- package/mcp/dist/task-hygiene.js +251 -0
- package/mcp/dist/task-lifecycle.js +347 -0
- package/mcp/dist/tasks-github.js +76 -0
- package/mcp/dist/telemetry.js +165 -0
- package/mcp/dist/test-global-setup.js +37 -0
- package/mcp/dist/tool-registry.js +104 -0
- package/mcp/dist/update.js +97 -0
- package/mcp/dist/utils.js +543 -0
- package/package.json +67 -0
- package/skills/README.md +7 -0
- package/skills/consolidate/SKILL.md +152 -0
- package/skills/discover/SKILL.md +175 -0
- package/skills/init/SKILL.md +216 -0
- package/skills/profiles/SKILL.md +121 -0
- package/skills/sync/SKILL.md +261 -0
- package/starter/README.md +74 -0
- package/starter/global/CLAUDE.md +89 -0
- package/starter/global/skills/humanize.md +30 -0
- package/starter/global/skills/pipeline.md +35 -0
- package/starter/global/skills/release.md +35 -0
- package/starter/machines.yaml +8 -0
- package/starter/my-api/.claude/skills/README.md +7 -0
- package/starter/my-api/CLAUDE.md +33 -0
- package/starter/my-api/FINDINGS.md +9 -0
- package/starter/my-api/summary.md +7 -0
- package/starter/my-api/tasks.md +7 -0
- package/starter/my-first-project/.claude/skills/README.md +7 -0
- package/starter/my-first-project/CLAUDE.md +49 -0
- package/starter/my-first-project/FINDINGS.md +24 -0
- package/starter/my-first-project/summary.md +11 -0
- package/starter/my-first-project/tasks.md +25 -0
- package/starter/my-frontend/.claude/skills/README.md +7 -0
- package/starter/my-frontend/CLAUDE.md +33 -0
- package/starter/my-frontend/FINDINGS.md +9 -0
- package/starter/my-frontend/summary.md +7 -0
- package/starter/my-frontend/tasks.md +7 -0
- package/starter/profiles/default.yaml +4 -0
- package/starter/profiles/personal.yaml +4 -0
- package/starter/profiles/work.yaml +4 -0
- package/starter/templates/README.md +7 -0
- package/starter/templates/frontend/CLAUDE.md +23 -0
- package/starter/templates/frontend/FINDINGS.md +7 -0
- package/starter/templates/frontend/reference/README.md +4 -0
- package/starter/templates/frontend/summary.md +7 -0
- package/starter/templates/frontend/tasks.md +11 -0
- package/starter/templates/library/CLAUDE.md +22 -0
- package/starter/templates/library/FINDINGS.md +7 -0
- package/starter/templates/library/reference/README.md +4 -0
- package/starter/templates/library/summary.md +7 -0
- package/starter/templates/library/tasks.md +11 -0
- package/starter/templates/monorepo/CLAUDE.md +21 -0
- package/starter/templates/monorepo/FINDINGS.md +7 -0
- package/starter/templates/monorepo/reference/README.md +4 -0
- package/starter/templates/monorepo/summary.md +7 -0
- package/starter/templates/monorepo/tasks.md +11 -0
- package/starter/templates/python-project/CLAUDE.md +21 -0
- package/starter/templates/python-project/FINDINGS.md +7 -0
- package/starter/templates/python-project/reference/README.md +4 -0
- package/starter/templates/python-project/summary.md +7 -0
- package/starter/templates/python-project/tasks.md +10 -0
|
@@ -0,0 +1,543 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
import { execFileSync, spawnSync } from "child_process";
|
|
4
|
+
import * as yaml from "js-yaml";
|
|
5
|
+
import { fileURLToPath } from "url";
|
|
6
|
+
import { findPhrenPath } from "./phren-paths.js";
|
|
7
|
+
import { bootstrapPhrenDotEnv } from "./phren-dotenv.js";
|
|
8
|
+
const _moduleDir = path.dirname(fileURLToPath(import.meta.url));
|
|
9
|
+
function loadSynonymsJson(fileName) {
|
|
10
|
+
const filePath = path.join(_moduleDir, fileName);
|
|
11
|
+
try {
|
|
12
|
+
return JSON.parse(fs.readFileSync(filePath, "utf8"));
|
|
13
|
+
}
|
|
14
|
+
catch (err) {
|
|
15
|
+
if ((process.env.PHREN_DEBUG || process.env.PHREN_DEBUG))
|
|
16
|
+
process.stderr.write(`[phren] ${fileName} load failed: ${err instanceof Error ? err.message : String(err)}\n`);
|
|
17
|
+
return {};
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
const _baseSynonymsJson = loadSynonymsJson("synonyms.json");
|
|
21
|
+
// ── Shared Git helper ────────────────────────────────────────────────────────
|
|
22
|
+
export function runGitOrThrow(cwd, args, timeoutMs) {
|
|
23
|
+
const result = spawnSync("git", args, {
|
|
24
|
+
cwd,
|
|
25
|
+
encoding: "utf8",
|
|
26
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
27
|
+
timeout: timeoutMs,
|
|
28
|
+
});
|
|
29
|
+
if (result.error)
|
|
30
|
+
throw result.error;
|
|
31
|
+
if (result.status !== 0) {
|
|
32
|
+
const stderr = (result.stderr ?? "").trim();
|
|
33
|
+
const suffix = stderr ? `: ${stderr}` : result.signal ? ` (signal: ${result.signal})` : "";
|
|
34
|
+
throw new Error(`git ${args.join(" ")} exited with status ${result.status ?? "unknown"}${suffix}`);
|
|
35
|
+
}
|
|
36
|
+
return result.stdout ?? "";
|
|
37
|
+
}
|
|
38
|
+
export function runGit(cwd, args, timeoutMs, debugLogFn) {
|
|
39
|
+
try {
|
|
40
|
+
return runGitOrThrow(cwd, args, timeoutMs).trim();
|
|
41
|
+
}
|
|
42
|
+
catch (err) {
|
|
43
|
+
const msg = errorMessage(err);
|
|
44
|
+
if (debugLogFn)
|
|
45
|
+
debugLogFn(`runGit: git ${args[0]} failed in ${cwd}: ${msg}`);
|
|
46
|
+
return null;
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
function needsCommandShell(cmd) {
|
|
50
|
+
return /\.(cmd|bat)$/i.test(path.basename(cmd));
|
|
51
|
+
}
|
|
52
|
+
export function normalizeExecCommand(cmd, platform = process.platform, whereOutput) {
|
|
53
|
+
if (platform !== "win32")
|
|
54
|
+
return { command: cmd, shell: false };
|
|
55
|
+
if (cmd.includes("\\") || cmd.includes("/") || /\.[A-Za-z0-9]+$/i.test(path.basename(cmd))) {
|
|
56
|
+
return { command: cmd, shell: needsCommandShell(cmd) };
|
|
57
|
+
}
|
|
58
|
+
const candidate = (whereOutput || "")
|
|
59
|
+
.split(/\r?\n/)
|
|
60
|
+
.map((line) => line.trim())
|
|
61
|
+
.find(Boolean);
|
|
62
|
+
const resolved = candidate || cmd;
|
|
63
|
+
return { command: resolved, shell: needsCommandShell(resolved) };
|
|
64
|
+
}
|
|
65
|
+
export function resolveExecCommand(cmd) {
|
|
66
|
+
if (process.platform !== "win32")
|
|
67
|
+
return { command: cmd, shell: false };
|
|
68
|
+
try {
|
|
69
|
+
const whereOutput = execFileSync("where.exe", [cmd], {
|
|
70
|
+
encoding: "utf8",
|
|
71
|
+
stdio: ["ignore", "pipe", "ignore"],
|
|
72
|
+
timeout: 5000,
|
|
73
|
+
});
|
|
74
|
+
return normalizeExecCommand(cmd, process.platform, whereOutput);
|
|
75
|
+
}
|
|
76
|
+
catch {
|
|
77
|
+
return normalizeExecCommand(cmd, process.platform, null);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
// ── Error message extractor ─────────────────────────────────────────────────
|
|
81
|
+
export function errorMessage(err) {
|
|
82
|
+
return err instanceof Error ? err.message : String(err);
|
|
83
|
+
}
|
|
84
|
+
// ── Feature flag and clamping helpers ────────────────────────────────────────
|
|
85
|
+
export function isFeatureEnabled(envName, defaultValue = true) {
|
|
86
|
+
bootstrapPhrenDotEnv();
|
|
87
|
+
const raw = process.env[envName];
|
|
88
|
+
if (!raw)
|
|
89
|
+
return defaultValue;
|
|
90
|
+
return !["0", "false", "off", "no"].includes(raw.trim().toLowerCase());
|
|
91
|
+
}
|
|
92
|
+
export function clampInt(raw, fallback, min, max) {
|
|
93
|
+
const parsed = Number.parseInt(raw || "", 10);
|
|
94
|
+
if (Number.isNaN(parsed))
|
|
95
|
+
return fallback;
|
|
96
|
+
return Math.min(max, Math.max(min, parsed));
|
|
97
|
+
}
|
|
98
|
+
// Base synonym map for fuzzy search expansion — source of truth is mcp/src/synonyms.json
|
|
99
|
+
const BASE_SYNONYMS = _baseSynonymsJson;
|
|
100
|
+
const LEARNED_SYNONYMS_FILE = "learned-synonyms.json";
|
|
101
|
+
function normalizeSynonymTerm(term) {
|
|
102
|
+
return term.toLowerCase().replace(/"/g, "").trim();
|
|
103
|
+
}
|
|
104
|
+
function normalizeSynonymValues(items, baseTerm) {
|
|
105
|
+
const normalizedBase = baseTerm ? normalizeSynonymTerm(baseTerm) : "";
|
|
106
|
+
const seen = new Set();
|
|
107
|
+
const normalized = [];
|
|
108
|
+
for (const raw of items) {
|
|
109
|
+
const term = normalizeSynonymTerm(raw);
|
|
110
|
+
if (!term || term.length <= 1 || term === normalizedBase || seen.has(term))
|
|
111
|
+
continue;
|
|
112
|
+
seen.add(term);
|
|
113
|
+
normalized.push(term);
|
|
114
|
+
}
|
|
115
|
+
return normalized;
|
|
116
|
+
}
|
|
117
|
+
function mergeSynonymMaps(...maps) {
|
|
118
|
+
const merged = {};
|
|
119
|
+
for (const map of maps) {
|
|
120
|
+
for (const [rawKey, rawValues] of Object.entries(map)) {
|
|
121
|
+
const key = normalizeSynonymTerm(rawKey);
|
|
122
|
+
if (!key)
|
|
123
|
+
continue;
|
|
124
|
+
const existing = merged[key] ?? [];
|
|
125
|
+
const values = normalizeSynonymValues([...(existing || []), ...(Array.isArray(rawValues) ? rawValues : [])], key);
|
|
126
|
+
if (values.length > 0)
|
|
127
|
+
merged[key] = values;
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
return merged;
|
|
131
|
+
}
|
|
132
|
+
// Common English stop words to strip from prompts before searching
|
|
133
|
+
export const STOP_WORDS = new Set([
|
|
134
|
+
"the", "is", "a", "an", "and", "or", "but", "in", "on", "at", "to", "for",
|
|
135
|
+
"of", "with", "by", "from", "it", "this", "that", "are", "was", "were",
|
|
136
|
+
"be", "been", "being", "have", "has", "had", "do", "does", "did", "will",
|
|
137
|
+
"would", "could", "should", "may", "might", "can", "shall", "not", "no",
|
|
138
|
+
"so", "if", "then", "than", "too", "very", "just", "about", "up", "out",
|
|
139
|
+
"my", "me", "i", "you", "your", "we", "our", "they", "them", "their",
|
|
140
|
+
"he", "she", "his", "her", "its", "what", "which", "who", "when", "where",
|
|
141
|
+
"how", "why", "all", "each", "every", "some", "any", "few", "more", "most",
|
|
142
|
+
"other", "into", "over", "such", "only", "own", "same", "also", "back",
|
|
143
|
+
"get", "got", "make", "made", "take", "like", "well", "here", "there",
|
|
144
|
+
"use", "using", "used", "need", "want", "look", "help", "please",
|
|
145
|
+
]);
|
|
146
|
+
// Extract meaningful keywords from a prompt, including bigrams (2-word noun phrases).
|
|
147
|
+
// Bigrams capture intent better than isolated words (e.g., "rate limit" vs "rate" + "limit").
|
|
148
|
+
export function extractKeywordEntries(text) {
|
|
149
|
+
const words = text
|
|
150
|
+
.toLowerCase()
|
|
151
|
+
.replace(/[^\w\s-]/g, " ")
|
|
152
|
+
.split(/\s+/)
|
|
153
|
+
.filter(w => w.length > 1 && !STOP_WORDS.has(w));
|
|
154
|
+
// Build bigrams from adjacent non-stop-words
|
|
155
|
+
const bigrams = [];
|
|
156
|
+
for (let i = 0; i < words.length - 1; i++) {
|
|
157
|
+
// Filter out bigrams where both tokens are stop words (words is already filtered,
|
|
158
|
+
// so this is defensive — the real stop-word bigram filter is in buildRobustFtsQuery)
|
|
159
|
+
bigrams.push(`${words[i]} ${words[i + 1]}`);
|
|
160
|
+
}
|
|
161
|
+
// Deduplicate and limit: prefer individual words first, then bigrams add extra signal
|
|
162
|
+
const seen = new Set();
|
|
163
|
+
const result = [];
|
|
164
|
+
for (const w of [...words, ...bigrams]) {
|
|
165
|
+
if (!seen.has(w)) {
|
|
166
|
+
seen.add(w);
|
|
167
|
+
result.push(w);
|
|
168
|
+
}
|
|
169
|
+
if (result.length >= 10)
|
|
170
|
+
break;
|
|
171
|
+
}
|
|
172
|
+
return result;
|
|
173
|
+
}
|
|
174
|
+
export function extractKeywords(text) {
|
|
175
|
+
return extractKeywordEntries(text).join(" ");
|
|
176
|
+
}
|
|
177
|
+
// Validate a project name: lowercase letters/numbers with optional hyphen/underscore separators.
|
|
178
|
+
// Must not start with a hyphen (breaks CLI flags) or dot (hidden dirs). Max 100 chars.
|
|
179
|
+
// Internal keys like "native:-home" bypass this — they never go through user-facing validation.
|
|
180
|
+
// Explicitly rejects traversal sequences, null bytes, and path separators as defense-in-depth.
|
|
181
|
+
export function isValidProjectName(name) {
|
|
182
|
+
if (!name || name.length === 0)
|
|
183
|
+
return false;
|
|
184
|
+
if (name.length > 100)
|
|
185
|
+
return false;
|
|
186
|
+
// Reject null bytes, path separators, and traversal patterns before the regex check
|
|
187
|
+
if (name.includes("\0") || name.includes("/") || name.includes("\\") || name.includes(".."))
|
|
188
|
+
return false;
|
|
189
|
+
return /^[a-z0-9][a-z0-9_-]*$/.test(name);
|
|
190
|
+
}
|
|
191
|
+
// Resolve a path inside the phren directory and reject anything that escapes it.
|
|
192
|
+
// Checks both lexical resolution and (when the path exists) real path after symlink
|
|
193
|
+
// resolution to prevent symlink-based traversal.
|
|
194
|
+
export function safeProjectPath(base, ...segments) {
|
|
195
|
+
// Reject segments containing null bytes
|
|
196
|
+
for (const seg of segments) {
|
|
197
|
+
if (seg.includes("\0"))
|
|
198
|
+
return null;
|
|
199
|
+
}
|
|
200
|
+
const resolvedBase = path.resolve(base);
|
|
201
|
+
const resolved = path.resolve(base, ...segments);
|
|
202
|
+
if (resolved !== resolvedBase && !resolved.startsWith(resolvedBase + path.sep))
|
|
203
|
+
return null;
|
|
204
|
+
// Walk up from resolved path to find the deepest existing ancestor and verify
|
|
205
|
+
// it resolves inside base after symlink resolution. This catches symlink escapes
|
|
206
|
+
// even when the final leaf doesn't exist yet.
|
|
207
|
+
try {
|
|
208
|
+
let check = resolved;
|
|
209
|
+
while (!fs.existsSync(check) && check !== resolvedBase) {
|
|
210
|
+
check = path.dirname(check);
|
|
211
|
+
}
|
|
212
|
+
if (fs.existsSync(check)) {
|
|
213
|
+
const realBase = fs.realpathSync.native(resolvedBase);
|
|
214
|
+
const realCheck = fs.realpathSync.native(check);
|
|
215
|
+
if (realCheck !== realBase && !realCheck.startsWith(realBase + path.sep))
|
|
216
|
+
return null;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
catch {
|
|
220
|
+
// If realpath fails (e.g. broken symlink), reject to be safe
|
|
221
|
+
return null;
|
|
222
|
+
}
|
|
223
|
+
return resolved;
|
|
224
|
+
}
|
|
225
|
+
const QUEUE_FILENAME = "MEMORY_QUEUE.md";
|
|
226
|
+
export function queueFilePath(phrenPath, project) {
|
|
227
|
+
if (!isValidProjectName(project)) {
|
|
228
|
+
throw new Error(`Invalid project name: ${project}`);
|
|
229
|
+
}
|
|
230
|
+
const result = safeProjectPath(phrenPath, project, QUEUE_FILENAME);
|
|
231
|
+
if (!result) {
|
|
232
|
+
throw new Error(`Path traversal detected for project: ${project}`);
|
|
233
|
+
}
|
|
234
|
+
return result;
|
|
235
|
+
}
|
|
236
|
+
// Sanitize user input before passing it to an FTS5 MATCH expression.
|
|
237
|
+
// Strips FTS5-specific syntax that could cause injection or parse errors.
|
|
238
|
+
export function sanitizeFts5Query(raw) {
|
|
239
|
+
if (!raw)
|
|
240
|
+
return "";
|
|
241
|
+
if (raw.length > 500)
|
|
242
|
+
raw = raw.slice(0, 500);
|
|
243
|
+
// Whitelist approach: only allow alphanumeric, spaces, hyphens, apostrophes, double quotes, asterisks
|
|
244
|
+
let q = raw.replace(/[^a-zA-Z0-9 \-'"*]/g, " ");
|
|
245
|
+
q = q.replace(/\s+/g, " ");
|
|
246
|
+
q = q.trim();
|
|
247
|
+
// Q83: FTS5 only accepts * as a prefix operator directly attached to a token
|
|
248
|
+
// (e.g. "foo*"). A bare trailing asterisk (or lone "*") produces invalid
|
|
249
|
+
// FTS5 syntax. Strip any asterisk that is not immediately preceded by a
|
|
250
|
+
// word character so the query remains valid.
|
|
251
|
+
q = q.replace(/(?<!\w)\*/g, "");
|
|
252
|
+
// Also strip a trailing asterisk that is preceded only by whitespace at word
|
|
253
|
+
// end of the whole query (handles "foo *" → "foo").
|
|
254
|
+
q = q.replace(/\s+\*$/g, "");
|
|
255
|
+
return q.trim();
|
|
256
|
+
}
|
|
257
|
+
function parseSynonymsYaml(filePath) {
|
|
258
|
+
if (!fs.existsSync(filePath))
|
|
259
|
+
return {};
|
|
260
|
+
try {
|
|
261
|
+
const parsed = yaml.load(fs.readFileSync(filePath, "utf8"), { schema: yaml.CORE_SCHEMA });
|
|
262
|
+
if (!parsed || typeof parsed !== "object" || Array.isArray(parsed))
|
|
263
|
+
return {};
|
|
264
|
+
const loaded = {};
|
|
265
|
+
for (const [rawKey, value] of Object.entries(parsed)) {
|
|
266
|
+
const key = String(rawKey).trim().toLowerCase();
|
|
267
|
+
if (!key || !Array.isArray(value))
|
|
268
|
+
continue;
|
|
269
|
+
const synonyms = value
|
|
270
|
+
.filter((item) => typeof item === "string")
|
|
271
|
+
.map((item) => item.replace(/"/g, "").trim())
|
|
272
|
+
.filter((item) => item.length > 1);
|
|
273
|
+
if (synonyms.length > 0)
|
|
274
|
+
loaded[key] = synonyms;
|
|
275
|
+
}
|
|
276
|
+
return loaded;
|
|
277
|
+
}
|
|
278
|
+
catch (err) {
|
|
279
|
+
if ((process.env.PHREN_DEBUG || process.env.PHREN_DEBUG))
|
|
280
|
+
process.stderr.write(`[phren] synonyms.yaml parse failed (${filePath}): ${err instanceof Error ? err.message : String(err)}\n`);
|
|
281
|
+
return {};
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
function loadUserSynonyms(project, phrenPath) {
|
|
285
|
+
const resolved = phrenPath ?? findPhrenPath();
|
|
286
|
+
if (!resolved)
|
|
287
|
+
return {};
|
|
288
|
+
const globalSynonyms = parseSynonymsYaml(path.join(resolved, "global", "synonyms.yaml"));
|
|
289
|
+
if (!project || !isValidProjectName(project))
|
|
290
|
+
return globalSynonyms;
|
|
291
|
+
const projectSynonyms = parseSynonymsYaml(path.join(resolved, project, "synonyms.yaml"));
|
|
292
|
+
return {
|
|
293
|
+
...globalSynonyms,
|
|
294
|
+
...projectSynonyms,
|
|
295
|
+
};
|
|
296
|
+
}
|
|
297
|
+
function parseLearnedSynonymsJson(filePath) {
|
|
298
|
+
if (!fs.existsSync(filePath))
|
|
299
|
+
return {};
|
|
300
|
+
try {
|
|
301
|
+
const parsed = JSON.parse(fs.readFileSync(filePath, "utf8"));
|
|
302
|
+
if (!parsed || typeof parsed !== "object" || Array.isArray(parsed))
|
|
303
|
+
return {};
|
|
304
|
+
const loaded = {};
|
|
305
|
+
for (const [rawKey, rawValue] of Object.entries(parsed)) {
|
|
306
|
+
if (!Array.isArray(rawValue))
|
|
307
|
+
continue;
|
|
308
|
+
const key = normalizeSynonymTerm(rawKey);
|
|
309
|
+
if (!key)
|
|
310
|
+
continue;
|
|
311
|
+
const synonyms = normalizeSynonymValues(rawValue.filter((v) => typeof v === "string"), key);
|
|
312
|
+
if (synonyms.length > 0)
|
|
313
|
+
loaded[key] = synonyms;
|
|
314
|
+
}
|
|
315
|
+
return loaded;
|
|
316
|
+
}
|
|
317
|
+
catch (err) {
|
|
318
|
+
if ((process.env.PHREN_DEBUG || process.env.PHREN_DEBUG))
|
|
319
|
+
process.stderr.write(`[phren] learned-synonyms parse failed (${filePath}): ${err instanceof Error ? err.message : String(err)}\n`);
|
|
320
|
+
return {};
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
export function learnedSynonymsPath(phrenPath, project) {
|
|
324
|
+
if (!isValidProjectName(project))
|
|
325
|
+
return null;
|
|
326
|
+
return safeProjectPath(phrenPath, project, LEARNED_SYNONYMS_FILE);
|
|
327
|
+
}
|
|
328
|
+
export function loadLearnedSynonyms(project, phrenPath) {
|
|
329
|
+
if (!project || !isValidProjectName(project))
|
|
330
|
+
return {};
|
|
331
|
+
const resolved = phrenPath ?? findPhrenPath();
|
|
332
|
+
if (!resolved)
|
|
333
|
+
return {};
|
|
334
|
+
const targetPath = learnedSynonymsPath(resolved, project);
|
|
335
|
+
if (!targetPath)
|
|
336
|
+
return {};
|
|
337
|
+
return parseLearnedSynonymsJson(targetPath);
|
|
338
|
+
}
|
|
339
|
+
export function loadSynonymMap(project, phrenPath) {
|
|
340
|
+
return mergeSynonymMaps(BASE_SYNONYMS, loadUserSynonyms(project, phrenPath), loadLearnedSynonyms(project, phrenPath));
|
|
341
|
+
}
|
|
342
|
+
export function learnSynonym(phrenPath, project, term, synonyms) {
|
|
343
|
+
if (!isValidProjectName(project))
|
|
344
|
+
throw new Error(`Invalid project name: ${project}`);
|
|
345
|
+
const targetPath = learnedSynonymsPath(phrenPath, project);
|
|
346
|
+
if (!targetPath)
|
|
347
|
+
throw new Error(`Path traversal detected for project: ${project}`);
|
|
348
|
+
const normalizedTerm = normalizeSynonymTerm(term);
|
|
349
|
+
if (!normalizedTerm || normalizedTerm.length <= 1) {
|
|
350
|
+
throw new Error("Invalid synonym term");
|
|
351
|
+
}
|
|
352
|
+
const normalizedSynonyms = normalizeSynonymValues(synonyms, normalizedTerm);
|
|
353
|
+
if (normalizedSynonyms.length === 0) {
|
|
354
|
+
return loadLearnedSynonyms(project, phrenPath);
|
|
355
|
+
}
|
|
356
|
+
fs.mkdirSync(path.dirname(targetPath), { recursive: true });
|
|
357
|
+
const existing = parseLearnedSynonymsJson(targetPath);
|
|
358
|
+
const next = mergeSynonymMaps(existing, { [normalizedTerm]: normalizedSynonyms });
|
|
359
|
+
const tmpPath = `${targetPath}.tmp-${Date.now()}`;
|
|
360
|
+
fs.writeFileSync(tmpPath, JSON.stringify(next, null, 2) + "\n", "utf8");
|
|
361
|
+
fs.renameSync(tmpPath, targetPath);
|
|
362
|
+
return next;
|
|
363
|
+
}
|
|
364
|
+
export function removeLearnedSynonym(phrenPath, project, term, synonyms) {
|
|
365
|
+
if (!isValidProjectName(project))
|
|
366
|
+
throw new Error(`Invalid project name: ${project}`);
|
|
367
|
+
const targetPath = learnedSynonymsPath(phrenPath, project);
|
|
368
|
+
if (!targetPath)
|
|
369
|
+
throw new Error(`Path traversal detected for project: ${project}`);
|
|
370
|
+
const normalizedTerm = normalizeSynonymTerm(term);
|
|
371
|
+
if (!normalizedTerm || normalizedTerm.length <= 1) {
|
|
372
|
+
throw new Error("Invalid synonym term");
|
|
373
|
+
}
|
|
374
|
+
const existing = parseLearnedSynonymsJson(targetPath);
|
|
375
|
+
if (!existing[normalizedTerm])
|
|
376
|
+
return existing;
|
|
377
|
+
if (!synonyms || synonyms.length === 0) {
|
|
378
|
+
delete existing[normalizedTerm];
|
|
379
|
+
}
|
|
380
|
+
else {
|
|
381
|
+
const drop = new Set(normalizeSynonymValues(synonyms));
|
|
382
|
+
existing[normalizedTerm] = (existing[normalizedTerm] || []).filter((item) => !drop.has(item));
|
|
383
|
+
if (existing[normalizedTerm].length === 0)
|
|
384
|
+
delete existing[normalizedTerm];
|
|
385
|
+
}
|
|
386
|
+
fs.mkdirSync(path.dirname(targetPath), { recursive: true });
|
|
387
|
+
if (Object.keys(existing).length === 0) {
|
|
388
|
+
try {
|
|
389
|
+
fs.unlinkSync(targetPath);
|
|
390
|
+
}
|
|
391
|
+
catch { }
|
|
392
|
+
return {};
|
|
393
|
+
}
|
|
394
|
+
const tmpPath = `${targetPath}.tmp-${Date.now()}`;
|
|
395
|
+
fs.writeFileSync(tmpPath, JSON.stringify(existing, null, 2) + "\n", "utf8");
|
|
396
|
+
fs.renameSync(tmpPath, targetPath);
|
|
397
|
+
return existing;
|
|
398
|
+
}
|
|
399
|
+
function buildFtsClauses(raw, project, phrenPath) {
|
|
400
|
+
const MAX_TOTAL_TERMS = 10;
|
|
401
|
+
const MAX_SYNONYM_GROUPS = 3;
|
|
402
|
+
// Step 1: Sanitize — strip FTS5 special chars, enforce length limits
|
|
403
|
+
const safe = sanitizeFts5Query(raw);
|
|
404
|
+
if (!safe)
|
|
405
|
+
return [];
|
|
406
|
+
// Step 2: Merge built-in and per-project synonym maps
|
|
407
|
+
const synonymsMap = loadSynonymMap(project, phrenPath);
|
|
408
|
+
// Step 3: Tokenize — split sanitized input into individual words (min length 2)
|
|
409
|
+
const baseWords = safe.split(/\s+/).filter((t) => t.length > 1);
|
|
410
|
+
if (baseWords.length === 0)
|
|
411
|
+
return [];
|
|
412
|
+
// Step 4: Filter stop words — remove common English words that add no search signal
|
|
413
|
+
const filteredTerms = baseWords.filter((t) => !STOP_WORDS.has(t.toLowerCase()));
|
|
414
|
+
// Step 5: Build bigrams — sliding window over adjacent filtered terms for phrase matching
|
|
415
|
+
const bigrams = [];
|
|
416
|
+
for (let i = 0; i < filteredTerms.length - 1; i++) {
|
|
417
|
+
bigrams.push(`${filteredTerms[i]} ${filteredTerms[i + 1]}`);
|
|
418
|
+
}
|
|
419
|
+
// Step 6: Match bigrams against synonym keys — bigram matches are promoted to quoted
|
|
420
|
+
// phrases and their constituent words are marked consumed (not repeated as singletons)
|
|
421
|
+
const consumedIndices = new Set();
|
|
422
|
+
const matchedBigrams = [];
|
|
423
|
+
for (let i = 0; i < bigrams.length; i++) {
|
|
424
|
+
const bg = bigrams[i].toLowerCase();
|
|
425
|
+
if (synonymsMap[bg]) {
|
|
426
|
+
consumedIndices.add(i);
|
|
427
|
+
consumedIndices.add(i + 1);
|
|
428
|
+
matchedBigrams.push(bigrams[i]);
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
// Step 7: Assemble and deduplicate core terms — matched bigrams (as quoted phrases)
|
|
432
|
+
// first, then unconsumed individual words; duplicates removed via seenTerms
|
|
433
|
+
const dedupedTerms = [];
|
|
434
|
+
const seenTerms = new Set();
|
|
435
|
+
for (const bg of matchedBigrams) {
|
|
436
|
+
const clean = bg.replace(/"/g, "").trim().toLowerCase();
|
|
437
|
+
if (!seenTerms.has(clean)) {
|
|
438
|
+
seenTerms.add(clean);
|
|
439
|
+
dedupedTerms.push(`"${bg.replace(/"/g, "").trim()}"`);
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
for (let i = 0; i < filteredTerms.length; i++) {
|
|
443
|
+
if (!consumedIndices.has(i)) {
|
|
444
|
+
const w = filteredTerms[i].replace(/"/g, "").trim();
|
|
445
|
+
const wLow = w.toLowerCase();
|
|
446
|
+
if (w.length > 1 && !seenTerms.has(wLow)) {
|
|
447
|
+
seenTerms.add(wLow);
|
|
448
|
+
dedupedTerms.push(`"${w}"`);
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
if (dedupedTerms.length === 0)
|
|
453
|
+
return [];
|
|
454
|
+
// Step 8: Expand synonyms — for up to MAX_SYNONYM_GROUPS core terms, add OR alternatives
|
|
455
|
+
// from the synonym map; total term count is capped at MAX_TOTAL_TERMS to keep queries sane
|
|
456
|
+
let totalTermCount = dedupedTerms.length;
|
|
457
|
+
let groupsExpanded = 0;
|
|
458
|
+
const expandedClauses = [];
|
|
459
|
+
for (const coreTerm of dedupedTerms) {
|
|
460
|
+
const termText = coreTerm.slice(1, -1).toLowerCase(); // strip surrounding quotes
|
|
461
|
+
const synonyms = [];
|
|
462
|
+
if (groupsExpanded < MAX_SYNONYM_GROUPS && synonymsMap[termText]) {
|
|
463
|
+
for (const syn of synonymsMap[termText]) {
|
|
464
|
+
if (totalTermCount >= MAX_TOTAL_TERMS)
|
|
465
|
+
break;
|
|
466
|
+
const cleanSyn = syn.replace(/"/g, "").trim();
|
|
467
|
+
if (cleanSyn.length > 1) {
|
|
468
|
+
synonyms.push(`"${cleanSyn}"`);
|
|
469
|
+
totalTermCount++;
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
groupsExpanded++;
|
|
473
|
+
}
|
|
474
|
+
if (synonyms.length > 0) {
|
|
475
|
+
expandedClauses.push(`(${coreTerm} OR ${synonyms.join(" OR ")})`);
|
|
476
|
+
}
|
|
477
|
+
else {
|
|
478
|
+
expandedClauses.push(coreTerm);
|
|
479
|
+
}
|
|
480
|
+
}
|
|
481
|
+
// Step 9: Join all clauses with AND — every core term (with its OR synonyms) must match
|
|
482
|
+
return expandedClauses;
|
|
483
|
+
}
|
|
484
|
+
function clauseSignalScore(clause) {
|
|
485
|
+
const normalized = clause
|
|
486
|
+
.replace(/[()"]/g, " ")
|
|
487
|
+
.replace(/\bOR\b/gi, " ")
|
|
488
|
+
.replace(/\s+/g, " ")
|
|
489
|
+
.trim()
|
|
490
|
+
.toLowerCase();
|
|
491
|
+
if (!normalized)
|
|
492
|
+
return 0;
|
|
493
|
+
const tokens = normalized.split(" ").filter(Boolean);
|
|
494
|
+
const longestToken = tokens.reduce((max, token) => Math.max(max, token.length), 0);
|
|
495
|
+
const phraseBonus = tokens.length > 1 ? 1.5 : 0;
|
|
496
|
+
const synonymBonus = /\bOR\b/i.test(clause) ? 0.5 : 0;
|
|
497
|
+
return longestToken + phraseBonus + synonymBonus;
|
|
498
|
+
}
|
|
499
|
+
// Build a defensive FTS5 MATCH query:
|
|
500
|
+
// - sanitizes user input
|
|
501
|
+
// - extracts bigrams and treats them as quoted phrases
|
|
502
|
+
// - expands known synonyms (capped at 10 total terms)
|
|
503
|
+
// - applies AND between core terms, with synonyms as OR alternatives
|
|
504
|
+
export function buildRobustFtsQuery(raw, project, phrenPath) {
|
|
505
|
+
const clauses = buildFtsClauses(raw, project, phrenPath);
|
|
506
|
+
if (clauses.length === 0)
|
|
507
|
+
return "";
|
|
508
|
+
return clauses.join(" AND ");
|
|
509
|
+
}
|
|
510
|
+
// Build a relaxed lexical rescue query that matches any 2 of the most informative
|
|
511
|
+
// clauses. This is only intended as a fallback when the stricter AND query returns
|
|
512
|
+
// nothing; it trades precision for recall while staying in the FTS index.
|
|
513
|
+
export function buildRelaxedFtsQuery(raw, project, phrenPath) {
|
|
514
|
+
const clauses = buildFtsClauses(raw, project, phrenPath);
|
|
515
|
+
if (clauses.length < 3)
|
|
516
|
+
return "";
|
|
517
|
+
const salientClauses = clauses
|
|
518
|
+
.map((clause, index) => ({ clause, index, score: clauseSignalScore(clause) }))
|
|
519
|
+
.sort((a, b) => {
|
|
520
|
+
const scoreDelta = b.score - a.score;
|
|
521
|
+
if (Math.abs(scoreDelta) > 0.01)
|
|
522
|
+
return scoreDelta;
|
|
523
|
+
return a.index - b.index;
|
|
524
|
+
})
|
|
525
|
+
.slice(0, Math.min(4, clauses.length))
|
|
526
|
+
.sort((a, b) => a.index - b.index);
|
|
527
|
+
if (salientClauses.length < 2)
|
|
528
|
+
return "";
|
|
529
|
+
const combos = [];
|
|
530
|
+
for (let i = 0; i < salientClauses.length - 1; i++) {
|
|
531
|
+
for (let j = i + 1; j < salientClauses.length; j++) {
|
|
532
|
+
combos.push(`(${salientClauses[i].clause} AND ${salientClauses[j].clause})`);
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
return combos.join(" OR ");
|
|
536
|
+
}
|
|
537
|
+
export function buildFtsQueryVariants(raw, project, phrenPath) {
|
|
538
|
+
const variants = [
|
|
539
|
+
buildRobustFtsQuery(raw, project, phrenPath),
|
|
540
|
+
buildRelaxedFtsQuery(raw, project, phrenPath),
|
|
541
|
+
].filter(Boolean);
|
|
542
|
+
return [...new Set(variants)];
|
|
543
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@phren/cli",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "Long-term memory for AI agents. Stored as markdown in a git repo you own.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"phren": "mcp/dist/index.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"mcp/dist",
|
|
11
|
+
"starter",
|
|
12
|
+
"skills"
|
|
13
|
+
],
|
|
14
|
+
"dependencies": {
|
|
15
|
+
"@modelcontextprotocol/sdk": "^1.27.1",
|
|
16
|
+
"chalk": "^5.6.2",
|
|
17
|
+
"glob": "^13.0.6",
|
|
18
|
+
"inquirer": "^12.10.0",
|
|
19
|
+
"js-yaml": "^4.1.1",
|
|
20
|
+
"sharp": "^0.34.5",
|
|
21
|
+
"sql.js-fts5": "^1.4.0",
|
|
22
|
+
"zod": "^4.3.6"
|
|
23
|
+
},
|
|
24
|
+
"devDependencies": {
|
|
25
|
+
"@playwright/test": "^1.58.2",
|
|
26
|
+
"@types/js-yaml": "^4.0.9",
|
|
27
|
+
"@types/node": "^25.3.5",
|
|
28
|
+
"@typescript-eslint/eslint-plugin": "^8.56.1",
|
|
29
|
+
"@typescript-eslint/parser": "^8.56.1",
|
|
30
|
+
"@vitest/coverage-v8": "^4.0.18",
|
|
31
|
+
"eslint": "^10.0.3",
|
|
32
|
+
"tsx": "^4.21.0",
|
|
33
|
+
"typescript": "^5.9.3",
|
|
34
|
+
"vitest": "^4.0.18"
|
|
35
|
+
},
|
|
36
|
+
"scripts": {
|
|
37
|
+
"build": "rm -rf mcp/dist && tsc -p mcp/tsconfig.json && chmod +x mcp/dist/index.js && cp mcp/src/synonyms*.json mcp/dist/",
|
|
38
|
+
"dev": "tsx mcp/src/index.ts",
|
|
39
|
+
"lint": "eslint mcp/src/ --ignore-pattern '*.test.ts'",
|
|
40
|
+
"validate-docs": "bash scripts/validate-docs.sh",
|
|
41
|
+
"pretest": "npm run build",
|
|
42
|
+
"test": "vitest run",
|
|
43
|
+
"test:e2e": "npm run build && playwright test",
|
|
44
|
+
"test:e2e:install": "playwright install chromium",
|
|
45
|
+
"bench": "tsx mcp/bench/locomo-runner.ts --sessions 3",
|
|
46
|
+
"bench:retrieval": "tsx scripts/bench-retrieval-modes.ts",
|
|
47
|
+
"bench:retrieval:synthetic": "tsx scripts/bench-retrieval-synthetic.ts",
|
|
48
|
+
"prepublishOnly": "npm run build && npm test"
|
|
49
|
+
},
|
|
50
|
+
"engines": {
|
|
51
|
+
"node": ">=20.0.0"
|
|
52
|
+
},
|
|
53
|
+
"keywords": [
|
|
54
|
+
"claude",
|
|
55
|
+
"claude-code",
|
|
56
|
+
"mcp",
|
|
57
|
+
"memory",
|
|
58
|
+
"knowledge"
|
|
59
|
+
],
|
|
60
|
+
"author": "Ala Arab",
|
|
61
|
+
"license": "MIT",
|
|
62
|
+
"repository": {
|
|
63
|
+
"type": "git",
|
|
64
|
+
"url": "git+https://github.com/alaarab/phren.git"
|
|
65
|
+
},
|
|
66
|
+
"homepage": "https://github.com/alaarab/phren#readme"
|
|
67
|
+
}
|
package/skills/README.md
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
# skills/
|
|
2
|
+
|
|
3
|
+
Phren slash commands that users invoke with `phren-<name>` (e.g. `phren-init`, `phren-sync`).
|
|
4
|
+
|
|
5
|
+
Each subdirectory contains a `SKILL.md` file with the full prompt that Claude executes when the skill is invoked. These are global skills shipped with the phren package -- they work across all projects.
|
|
6
|
+
|
|
7
|
+
Users interact with this directory when adding custom global skills or reading what built-in skills do. For project-specific skills, use `~/.phren/<project>/skills/` instead.
|