@phren/cli 0.0.36 → 0.0.38
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/mcp/dist/cli-hooks-stop.js +28 -0
- package/mcp/dist/content/learning.js +2 -2
- package/mcp/dist/governance/locks.js +5 -34
- package/mcp/dist/governance/policy.js +2 -2
- package/mcp/dist/init/init-configure.js +338 -0
- package/mcp/dist/init/init-hooks-mode.js +57 -0
- package/mcp/dist/init/init-mcp-mode.js +80 -0
- package/mcp/dist/init/init-uninstall.js +493 -0
- package/mcp/dist/init/init-walkthrough.js +524 -0
- package/mcp/dist/init/init.js +18 -1447
- package/mcp/dist/init/setup.js +15 -5
- package/mcp/dist/init-uninstall.js +11 -2
- package/mcp/dist/phren-paths.js +20 -3
- package/mcp/dist/shared/index.js +8 -0
- package/mcp/dist/task/lifecycle.js +1 -1
- package/package.json +2 -1
|
@@ -0,0 +1,524 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Interactive walkthrough for first-time phren init.
|
|
3
|
+
* Prompts the user through storage, identity, MCP, hooks, and feature configuration.
|
|
4
|
+
*/
|
|
5
|
+
import * as path from "path";
|
|
6
|
+
import { PROJECT_OWNERSHIP_MODES } from "../project-config.js";
|
|
7
|
+
import { getMachineName } from "../machine-identity.js";
|
|
8
|
+
import { expandHomePath, homePath } from "../shared.js";
|
|
9
|
+
import { errorMessage } from "../utils.js";
|
|
10
|
+
import { logger } from "../logger.js";
|
|
11
|
+
import { log } from "./shared.js";
|
|
12
|
+
import { detectProjectDir, inferInitScaffoldFromRepo, } from "./setup.js";
|
|
13
|
+
export function withFallbackColors(style) {
|
|
14
|
+
return {
|
|
15
|
+
header: style?.header ?? ((text) => text),
|
|
16
|
+
success: style?.success ?? ((text) => text),
|
|
17
|
+
warning: style?.warning ?? ((text) => text),
|
|
18
|
+
};
|
|
19
|
+
}
|
|
20
|
+
export async function createWalkthroughStyle() {
|
|
21
|
+
try {
|
|
22
|
+
const chalkModule = await import(String("chalk"));
|
|
23
|
+
const chalkAny = chalkModule.default
|
|
24
|
+
?? chalkModule.chalk
|
|
25
|
+
?? chalkModule;
|
|
26
|
+
const chalk = chalkAny;
|
|
27
|
+
return withFallbackColors({
|
|
28
|
+
header: (text) => chalk.bold.cyan(text),
|
|
29
|
+
success: (text) => chalk.green(text),
|
|
30
|
+
warning: (text) => chalk.yellow(text),
|
|
31
|
+
});
|
|
32
|
+
}
|
|
33
|
+
catch {
|
|
34
|
+
return withFallbackColors();
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
export async function createWalkthroughPrompts() {
|
|
38
|
+
try {
|
|
39
|
+
const inquirerModule = await import(String("inquirer"));
|
|
40
|
+
const maybeFns = inquirerModule;
|
|
41
|
+
if (typeof maybeFns.input === "function"
|
|
42
|
+
&& typeof maybeFns.confirm === "function"
|
|
43
|
+
&& typeof maybeFns.select === "function") {
|
|
44
|
+
return {
|
|
45
|
+
input: async (message, initialValue) => (await maybeFns.input({ message, default: initialValue })).trim(),
|
|
46
|
+
confirm: async (message, defaultValue = false) => Boolean(await maybeFns.confirm({ message, default: defaultValue })),
|
|
47
|
+
select: async (message, choices, defaultValue) => maybeFns.select({
|
|
48
|
+
message,
|
|
49
|
+
choices: choices.map((choice) => ({ value: choice.value, name: choice.name, description: choice.description })),
|
|
50
|
+
default: defaultValue,
|
|
51
|
+
}),
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
const prompt = maybeFns.default?.prompt ?? maybeFns.prompt;
|
|
55
|
+
if (typeof prompt === "function") {
|
|
56
|
+
return {
|
|
57
|
+
input: async (message, initialValue) => {
|
|
58
|
+
const answer = await prompt([{ type: "input", name: "value", message, default: initialValue }]);
|
|
59
|
+
return String(answer.value ?? "").trim();
|
|
60
|
+
},
|
|
61
|
+
confirm: async (message, defaultValue = false) => {
|
|
62
|
+
const answer = await prompt([{ type: "confirm", name: "value", message, default: defaultValue }]);
|
|
63
|
+
return Boolean(answer.value);
|
|
64
|
+
},
|
|
65
|
+
select: async (message, choices, defaultValue) => {
|
|
66
|
+
const answer = await prompt([{
|
|
67
|
+
type: "list",
|
|
68
|
+
name: "value",
|
|
69
|
+
message,
|
|
70
|
+
choices: choices.map((choice) => ({ value: choice.value, name: choice.name })),
|
|
71
|
+
default: defaultValue,
|
|
72
|
+
}]);
|
|
73
|
+
return String(answer.value);
|
|
74
|
+
},
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
catch {
|
|
79
|
+
// fallback below
|
|
80
|
+
}
|
|
81
|
+
const readline = await import("readline");
|
|
82
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
83
|
+
const ask = (message) => new Promise((resolve) => rl.question(message, resolve));
|
|
84
|
+
process.once("exit", () => rl.close());
|
|
85
|
+
return {
|
|
86
|
+
input: async (message, initialValue) => {
|
|
87
|
+
const prompt = initialValue ? `${message} (${initialValue}): ` : `${message}: `;
|
|
88
|
+
const answer = (await ask(prompt)).trim();
|
|
89
|
+
return answer || (initialValue ?? "");
|
|
90
|
+
},
|
|
91
|
+
confirm: async (message, defaultValue = false) => {
|
|
92
|
+
const suffix = defaultValue ? "[Y/n]" : "[y/N]";
|
|
93
|
+
const answer = (await ask(`${message} ${suffix}: `)).trim().toLowerCase();
|
|
94
|
+
if (!answer)
|
|
95
|
+
return defaultValue;
|
|
96
|
+
return answer === "y" || answer === "yes";
|
|
97
|
+
},
|
|
98
|
+
select: async (message, choices, defaultValue) => {
|
|
99
|
+
log(`${message}`);
|
|
100
|
+
for (const [index, choice] of choices.entries()) {
|
|
101
|
+
log(` ${index + 1}. ${choice.name}`);
|
|
102
|
+
}
|
|
103
|
+
const selected = (await ask(`Select [1-${choices.length}]${defaultValue ? " (Enter for default)" : ""}: `)).trim();
|
|
104
|
+
if (!selected && defaultValue)
|
|
105
|
+
return defaultValue;
|
|
106
|
+
const idx = Number.parseInt(selected, 10) - 1;
|
|
107
|
+
if (!Number.isNaN(idx) && idx >= 0 && idx < choices.length)
|
|
108
|
+
return choices[idx].value;
|
|
109
|
+
return defaultValue ?? choices[0].value;
|
|
110
|
+
},
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
function detectRepoRootForStorage(phrenPath) {
|
|
114
|
+
return detectProjectDir(process.cwd(), phrenPath);
|
|
115
|
+
}
|
|
116
|
+
// Interactive walkthrough for first-time init
|
|
117
|
+
export async function runWalkthrough(phrenPath) {
|
|
118
|
+
const prompts = await createWalkthroughPrompts();
|
|
119
|
+
const style = await createWalkthroughStyle();
|
|
120
|
+
const divider = style.header("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━");
|
|
121
|
+
const printSection = (title) => {
|
|
122
|
+
log("");
|
|
123
|
+
log(divider);
|
|
124
|
+
log(style.header(title));
|
|
125
|
+
log(divider);
|
|
126
|
+
};
|
|
127
|
+
const printSummary = (items) => {
|
|
128
|
+
printSection("Configuration Summary");
|
|
129
|
+
for (const item of items) {
|
|
130
|
+
log(style.success(`✓ ${item}`));
|
|
131
|
+
}
|
|
132
|
+
};
|
|
133
|
+
const { renderPhrenArt } = await import("../phren-art.js");
|
|
134
|
+
log("");
|
|
135
|
+
log(renderPhrenArt(" "));
|
|
136
|
+
log("");
|
|
137
|
+
printSection("Welcome");
|
|
138
|
+
log("Let's set up persistent memory for your AI agents.");
|
|
139
|
+
log("Every option can be changed later.\n");
|
|
140
|
+
printSection("Storage Location");
|
|
141
|
+
log("Where should phren store data?");
|
|
142
|
+
const storageChoice = await prompts.select("Storage location", [
|
|
143
|
+
{
|
|
144
|
+
value: "global",
|
|
145
|
+
name: "global (~/.phren/ - default, shared across projects)",
|
|
146
|
+
},
|
|
147
|
+
{
|
|
148
|
+
value: "project",
|
|
149
|
+
name: "per-project (<repo>/.phren/ - scoped to this repo, add to .gitignore)",
|
|
150
|
+
},
|
|
151
|
+
{
|
|
152
|
+
value: "custom",
|
|
153
|
+
name: "custom path",
|
|
154
|
+
},
|
|
155
|
+
], "global");
|
|
156
|
+
let storagePath = path.resolve(homePath(".phren"));
|
|
157
|
+
let storageRepoRoot;
|
|
158
|
+
if (storageChoice === "project") {
|
|
159
|
+
const repoRoot = detectRepoRootForStorage(phrenPath);
|
|
160
|
+
if (!repoRoot) {
|
|
161
|
+
throw new Error("Per-project storage requires running init from a repository directory.");
|
|
162
|
+
}
|
|
163
|
+
storageRepoRoot = repoRoot;
|
|
164
|
+
storagePath = path.join(repoRoot, ".phren");
|
|
165
|
+
}
|
|
166
|
+
else if (storageChoice === "custom") {
|
|
167
|
+
const customInput = await prompts.input("Custom phren path", phrenPath);
|
|
168
|
+
storagePath = path.resolve(expandHomePath(customInput || phrenPath));
|
|
169
|
+
}
|
|
170
|
+
printSection("Existing Phren");
|
|
171
|
+
log("If you've already set up phren on another machine, paste the git clone URL.");
|
|
172
|
+
log("Otherwise, leave blank.");
|
|
173
|
+
const cloneAnswer = await prompts.input("Clone URL (leave blank to skip)");
|
|
174
|
+
if (cloneAnswer) {
|
|
175
|
+
const cloneConfig = {
|
|
176
|
+
storageChoice,
|
|
177
|
+
storagePath,
|
|
178
|
+
storageRepoRoot,
|
|
179
|
+
machine: getMachineName(),
|
|
180
|
+
profile: "personal",
|
|
181
|
+
mcp: "on",
|
|
182
|
+
hooks: "on",
|
|
183
|
+
projectOwnershipDefault: "phren-managed",
|
|
184
|
+
findingsProactivity: "high",
|
|
185
|
+
taskProactivity: "high",
|
|
186
|
+
lowConfidenceThreshold: 0.7,
|
|
187
|
+
riskySections: ["Stale", "Conflicts"],
|
|
188
|
+
taskMode: "auto",
|
|
189
|
+
bootstrapCurrentProject: false,
|
|
190
|
+
ollamaEnabled: false,
|
|
191
|
+
autoCaptureEnabled: false,
|
|
192
|
+
semanticDedupEnabled: false,
|
|
193
|
+
semanticConflictEnabled: false,
|
|
194
|
+
findingSensitivity: "balanced",
|
|
195
|
+
cloneUrl: cloneAnswer,
|
|
196
|
+
domain: "software",
|
|
197
|
+
};
|
|
198
|
+
printSummary([
|
|
199
|
+
`Storage: ${storageChoice} (${storagePath})`,
|
|
200
|
+
`Existing memory clone: ${cloneAnswer}`,
|
|
201
|
+
`Machine: ${cloneConfig.machine}`,
|
|
202
|
+
`Profile: ${cloneConfig.profile}`,
|
|
203
|
+
"MCP: enabled",
|
|
204
|
+
"Hooks: enabled",
|
|
205
|
+
"Project ownership default: phren-managed",
|
|
206
|
+
"Task mode: auto",
|
|
207
|
+
"Domain: software",
|
|
208
|
+
]);
|
|
209
|
+
return cloneConfig;
|
|
210
|
+
}
|
|
211
|
+
const defaultMachine = getMachineName();
|
|
212
|
+
printSection("Identity");
|
|
213
|
+
const machine = await prompts.input("Machine name", defaultMachine);
|
|
214
|
+
const profile = await prompts.input("Profile name", "personal");
|
|
215
|
+
const repoForInference = detectProjectDir(process.cwd(), storagePath);
|
|
216
|
+
const inferredScaffold = repoForInference
|
|
217
|
+
? inferInitScaffoldFromRepo(repoForInference)
|
|
218
|
+
: null;
|
|
219
|
+
const inferredDomain = inferredScaffold?.domain ?? "software";
|
|
220
|
+
printSection("Project Domain");
|
|
221
|
+
log("What kind of project is this?");
|
|
222
|
+
if (repoForInference && inferredScaffold) {
|
|
223
|
+
log(`Detected repo signals from ${repoForInference} (${inferredScaffold.reason}).`);
|
|
224
|
+
if (inferredScaffold.referenceHints.length > 0) {
|
|
225
|
+
log(`Reference hints: ${inferredScaffold.referenceHints.join(", ")}`);
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
// Use inferred domain directly — adaptive init derives domain from repo content.
|
|
229
|
+
// Only ask if inference was weak (fell back to default "software" with no signals).
|
|
230
|
+
let domain = inferredDomain;
|
|
231
|
+
if (inferredDomain === "software" && !inferredScaffold) {
|
|
232
|
+
domain = await prompts.select("Project domain", [
|
|
233
|
+
{ value: "software", name: "software" },
|
|
234
|
+
{ value: "research", name: "research" },
|
|
235
|
+
{ value: "creative", name: "creative" },
|
|
236
|
+
{ value: "other", name: "other" },
|
|
237
|
+
], "software");
|
|
238
|
+
}
|
|
239
|
+
else {
|
|
240
|
+
log(`Domain: ${inferredDomain} (inferred from project content)`);
|
|
241
|
+
}
|
|
242
|
+
printSection("Project Ownership");
|
|
243
|
+
log("Choose who owns repo-facing instruction files for projects you add.");
|
|
244
|
+
log(" phren-managed: Phren may mirror CLAUDE.md / AGENTS.md into the repo");
|
|
245
|
+
log(" detached: Phren keeps its own docs but does not write into the repo");
|
|
246
|
+
log(" repo-managed: keep the repo's existing CLAUDE/AGENTS files as canonical");
|
|
247
|
+
log(" Change later: npx phren config project-ownership <mode>");
|
|
248
|
+
const projectOwnershipDefault = await prompts.select("Default project ownership", [
|
|
249
|
+
{ value: "detached", name: "detached (default)" },
|
|
250
|
+
{ value: "phren-managed", name: "phren-managed" },
|
|
251
|
+
{ value: "repo-managed", name: "repo-managed" },
|
|
252
|
+
], "detached");
|
|
253
|
+
printSection("MCP");
|
|
254
|
+
log("MCP mode registers phren as a tool server so your AI agent can call it");
|
|
255
|
+
log("directly: search memory, manage tasks, save findings, etc.");
|
|
256
|
+
log(" Recommended for: Claude Code, Cursor, Copilot CLI, Codex");
|
|
257
|
+
log(" Alternative: hooks-only mode (read-only context injection, any agent)");
|
|
258
|
+
log(" Change later: npx phren mcp-mode on|off");
|
|
259
|
+
const mcp = (await prompts.confirm("Enable MCP?", true)) ? "on" : "off";
|
|
260
|
+
printSection("Hooks");
|
|
261
|
+
log("Hooks run shell commands at session start, prompt submit, and session end.");
|
|
262
|
+
log(" - SessionStart: git pull (keeps memory in sync across machines)");
|
|
263
|
+
log(" - UserPromptSubmit: searches phren and injects relevant context");
|
|
264
|
+
log(" - Stop: commits and pushes any new findings after each response");
|
|
265
|
+
log(" What they touch: ~/.claude/settings.json (hooks section only)");
|
|
266
|
+
log(" Change later: npx phren hooks-mode on|off");
|
|
267
|
+
const hooks = (await prompts.confirm("Enable hooks?", true)) ? "on" : "off";
|
|
268
|
+
printSection("Semantic Search (Optional)");
|
|
269
|
+
log("Phren can use a local embedding model for semantic (fuzzy) search via Ollama.");
|
|
270
|
+
log(" Best fit: paraphrase-heavy or weak-lexical queries.");
|
|
271
|
+
log(" Skip it if you mostly search by filenames, symbols, commands, or exact phrases.");
|
|
272
|
+
log(" - Model: nomic-embed-text (274 MB, one-time download)");
|
|
273
|
+
log(" - Ollama runs locally, no cloud, no cost");
|
|
274
|
+
log(" - Falls back to FTS5 keyword search if disabled or unavailable");
|
|
275
|
+
log(" Change later: set PHREN_OLLAMA_URL=off to disable");
|
|
276
|
+
let ollamaEnabled = false;
|
|
277
|
+
try {
|
|
278
|
+
const { checkOllamaStatus } = await import("../shared/ollama.js");
|
|
279
|
+
const status = await checkOllamaStatus();
|
|
280
|
+
if (status === "ready") {
|
|
281
|
+
log(" Ollama detected with nomic-embed-text ready.");
|
|
282
|
+
ollamaEnabled = await prompts.confirm("Enable semantic search for fuzzy/paraphrase recovery?", false);
|
|
283
|
+
}
|
|
284
|
+
else if (status === "no_model") {
|
|
285
|
+
log(" Ollama detected, but nomic-embed-text is not pulled yet.");
|
|
286
|
+
ollamaEnabled = await prompts.confirm("Enable semantic search for fuzzy/paraphrase recovery? (will pull nomic-embed-text)", false);
|
|
287
|
+
if (ollamaEnabled) {
|
|
288
|
+
log(" Run after init: ollama pull nomic-embed-text");
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
else if (status === "not_running") {
|
|
292
|
+
log(" Ollama not detected. Install it to enable semantic search:");
|
|
293
|
+
log(" https://ollama.com → then: ollama pull nomic-embed-text");
|
|
294
|
+
ollamaEnabled = await prompts.confirm("Enable semantic search (Ollama not installed yet)?", false);
|
|
295
|
+
if (ollamaEnabled) {
|
|
296
|
+
log(style.success(" Semantic search enabled — will activate once Ollama is running."));
|
|
297
|
+
log(" To disable: set PHREN_OLLAMA_URL=off in your shell profile");
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
catch (err) {
|
|
302
|
+
logger.debug("init", `init ollamaCheck: ${errorMessage(err)}`);
|
|
303
|
+
}
|
|
304
|
+
printSection("Auto-Capture (Optional)");
|
|
305
|
+
log("After each session, phren scans the conversation for insight-signal phrases");
|
|
306
|
+
log("(\"always\", \"never\", \"pitfall\", \"gotcha\", etc.) and saves them automatically.");
|
|
307
|
+
log(" - Runs silently in the Stop hook; captured findings go to FINDINGS.md");
|
|
308
|
+
log(" - You can review and remove any auto-captured entry at any time");
|
|
309
|
+
log(" - Can be toggled: set PHREN_FEATURE_AUTO_CAPTURE=0 to disable");
|
|
310
|
+
const autoCaptureEnabled = await prompts.confirm("Enable auto-capture?", true);
|
|
311
|
+
let findingsProactivity = "high";
|
|
312
|
+
if (autoCaptureEnabled) {
|
|
313
|
+
log(" Findings capture level controls how eager phren is to save lessons automatically.");
|
|
314
|
+
log(" Change later: npx phren config proactivity.findings <high|medium|low>");
|
|
315
|
+
findingsProactivity = await prompts.select("Findings capture level", [
|
|
316
|
+
{ value: "high", name: "high (recommended)" },
|
|
317
|
+
{ value: "medium", name: "medium" },
|
|
318
|
+
{ value: "low", name: "low" },
|
|
319
|
+
], "high");
|
|
320
|
+
}
|
|
321
|
+
else {
|
|
322
|
+
findingsProactivity = "low";
|
|
323
|
+
}
|
|
324
|
+
printSection("Task Management");
|
|
325
|
+
log("Choose how phren handles tasks as you work.");
|
|
326
|
+
log(" auto (recommended): captures tasks naturally as you work, links findings to tasks");
|
|
327
|
+
log(" suggest: proposes tasks but waits for approval before writing");
|
|
328
|
+
log(" manual: tasks are fully manual — you add them yourself");
|
|
329
|
+
log(" off: never touch tasks automatically");
|
|
330
|
+
log(" Change later: npx phren config workflow set --taskMode=<mode>");
|
|
331
|
+
const taskMode = await prompts.select("Task mode", [
|
|
332
|
+
{ value: "auto", name: "auto (recommended)" },
|
|
333
|
+
{ value: "suggest", name: "suggest" },
|
|
334
|
+
{ value: "manual", name: "manual" },
|
|
335
|
+
{ value: "off", name: "off" },
|
|
336
|
+
], "auto");
|
|
337
|
+
let taskProactivity = "high";
|
|
338
|
+
if (taskMode === "auto" || taskMode === "suggest") {
|
|
339
|
+
log(" Task proactivity controls how much evidence phren needs before capturing tasks.");
|
|
340
|
+
log(" high (recommended): captures tasks as they come up naturally");
|
|
341
|
+
log(" medium: only when you explicitly mention a task");
|
|
342
|
+
log(" low: minimal auto-capture");
|
|
343
|
+
log(" Change later: npx phren config proactivity.tasks <high|medium|low>");
|
|
344
|
+
taskProactivity = await prompts.select("Task proactivity", [
|
|
345
|
+
{ value: "high", name: "high (recommended)" },
|
|
346
|
+
{ value: "medium", name: "medium" },
|
|
347
|
+
{ value: "low", name: "low" },
|
|
348
|
+
], "high");
|
|
349
|
+
}
|
|
350
|
+
printSection("Workflow Guardrails");
|
|
351
|
+
log("Choose how strict review gates should be for risky or low-confidence writes.");
|
|
352
|
+
log(" lowConfidenceThreshold: confidence cutoff used to mark writes as risky");
|
|
353
|
+
log(" riskySections: sections always treated as risky");
|
|
354
|
+
log(" Change later: npx phren config workflow set --lowConfidenceThreshold=0.7 --riskySections=Stale,Conflicts");
|
|
355
|
+
const thresholdAnswer = await prompts.input("Low-confidence threshold [0.0-1.0]", "0.7");
|
|
356
|
+
const lowConfidenceThreshold = parseLowConfidenceThreshold(thresholdAnswer, 0.7);
|
|
357
|
+
const riskySectionsAnswer = await prompts.input("Risky sections [Review,Stale,Conflicts]", "Stale,Conflicts");
|
|
358
|
+
const riskySections = parseRiskySectionsAnswer(riskySectionsAnswer, ["Stale", "Conflicts"]);
|
|
359
|
+
// Only offer semantic dedup/conflict when an LLM endpoint is explicitly configured.
|
|
360
|
+
// These features call /chat/completions, not an embedding endpoint, so we gate on
|
|
361
|
+
// PHREN_LLM_ENDPOINT (primary) or the presence of a known API key as a fallback.
|
|
362
|
+
// PHREN_EMBEDDING_API_URL alone is NOT sufficient — it only enables embeddings,
|
|
363
|
+
// not the LLM chat call that callLlm() makes.
|
|
364
|
+
const hasLlmApi = Boolean((process.env.PHREN_LLM_ENDPOINT) ||
|
|
365
|
+
process.env.ANTHROPIC_API_KEY ||
|
|
366
|
+
process.env.OPENAI_API_KEY);
|
|
367
|
+
let semanticDedupEnabled = false;
|
|
368
|
+
let semanticConflictEnabled = false;
|
|
369
|
+
if (hasLlmApi) {
|
|
370
|
+
printSection("LLM-Powered Memory Quality (Optional)");
|
|
371
|
+
log("Phren can use an LLM to catch near-duplicate or conflicting findings.");
|
|
372
|
+
log(" Requires: PHREN_LLM_ENDPOINT or ANTHROPIC_API_KEY/OPENAI_API_KEY set");
|
|
373
|
+
log("");
|
|
374
|
+
log("Semantic dedup: before saving a finding, ask the LLM whether it means the");
|
|
375
|
+
log("same thing as an existing one (catches same idea with different wording).");
|
|
376
|
+
semanticDedupEnabled = await prompts.confirm("Enable LLM-powered duplicate detection?", false);
|
|
377
|
+
log("");
|
|
378
|
+
log("Conflict detection: after saving a finding, check whether it contradicts an");
|
|
379
|
+
log("existing one (e.g. \"always use X\" vs \"never use X\"). Adds an inline annotation.");
|
|
380
|
+
semanticConflictEnabled = await prompts.confirm("Enable LLM-powered conflict detection?", false);
|
|
381
|
+
if (semanticDedupEnabled || semanticConflictEnabled) {
|
|
382
|
+
const currentModel = (process.env.PHREN_LLM_MODEL) || "gpt-4o-mini / claude-haiku-4-5-20251001 (default)";
|
|
383
|
+
log("");
|
|
384
|
+
log(" Cost note: each semantic check is ~80 input + ~5 output tokens, cached 24h.");
|
|
385
|
+
log(` Current model: ${currentModel}`);
|
|
386
|
+
const llmModel = (process.env.PHREN_LLM_MODEL);
|
|
387
|
+
const isExpensive = llmModel && /opus|sonnet|gpt-4(?!o-mini)/i.test(llmModel);
|
|
388
|
+
if (isExpensive) {
|
|
389
|
+
log(style.warning(` Warning: ${llmModel} is 20x more expensive than Haiku for yes/no checks.`));
|
|
390
|
+
log(" Consider: PHREN_LLM_MODEL=claude-haiku-4-5-20251001");
|
|
391
|
+
}
|
|
392
|
+
else {
|
|
393
|
+
log(" With Haiku: fractions of a cent/session. With Opus: ~$0.20/session for heavy use.");
|
|
394
|
+
log(" Tip: set PHREN_LLM_MODEL=claude-haiku-4-5-20251001 to keep costs low.");
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
}
|
|
398
|
+
printSection("Finding Sensitivity");
|
|
399
|
+
log("Controls how eagerly agents save findings to memory.");
|
|
400
|
+
log(" minimal — only when you explicitly ask");
|
|
401
|
+
log(" conservative — decisions and pitfalls only");
|
|
402
|
+
log(" balanced — non-obvious patterns, decisions, pitfalls, bugs (recommended)");
|
|
403
|
+
log(" aggressive — everything worth remembering, err on the side of capturing");
|
|
404
|
+
log(" Change later: npx phren config finding-sensitivity <level>");
|
|
405
|
+
const findingSensitivity = await prompts.select("Finding sensitivity", [
|
|
406
|
+
{ value: "balanced", name: "balanced (recommended)" },
|
|
407
|
+
{ value: "conservative", name: "conservative" },
|
|
408
|
+
{ value: "minimal", name: "minimal" },
|
|
409
|
+
{ value: "aggressive", name: "aggressive" },
|
|
410
|
+
], "balanced");
|
|
411
|
+
printSection("GitHub Sync");
|
|
412
|
+
log(`Phren stores memory as plain Markdown files in a git repo (${storagePath}).`);
|
|
413
|
+
log("Push it to a private GitHub repo to sync memory across machines.");
|
|
414
|
+
log(" Hooks will auto-commit + push after every session and pull on start.");
|
|
415
|
+
log(" Skip this if you just want to try phren locally first.");
|
|
416
|
+
const githubAnswer = await prompts.input("GitHub username (leave blank to skip)");
|
|
417
|
+
const githubUsername = githubAnswer || undefined;
|
|
418
|
+
let githubRepo;
|
|
419
|
+
if (githubUsername) {
|
|
420
|
+
const repoAnswer = await prompts.input("Repo name", "my-phren");
|
|
421
|
+
githubRepo = repoAnswer || "my-phren";
|
|
422
|
+
}
|
|
423
|
+
let bootstrapCurrentProject = false;
|
|
424
|
+
let bootstrapOwnership;
|
|
425
|
+
const detectedProject = detectProjectDir(process.cwd(), storagePath);
|
|
426
|
+
if (detectedProject) {
|
|
427
|
+
const detectedProjectName = path.basename(detectedProject);
|
|
428
|
+
printSection("Current Project");
|
|
429
|
+
log(`Detected project: ${detectedProjectName}`);
|
|
430
|
+
bootstrapCurrentProject = await prompts.confirm("Add this project to phren now?", true);
|
|
431
|
+
if (!bootstrapCurrentProject) {
|
|
432
|
+
bootstrapCurrentProject = false;
|
|
433
|
+
log(style.warning(` Skipped. Later: cd ${detectedProject} && npx phren add`));
|
|
434
|
+
}
|
|
435
|
+
else {
|
|
436
|
+
bootstrapOwnership = await prompts.select("Ownership for detected project", [
|
|
437
|
+
{ value: projectOwnershipDefault, name: `${projectOwnershipDefault} (default)` },
|
|
438
|
+
...PROJECT_OWNERSHIP_MODES
|
|
439
|
+
.filter((mode) => mode !== projectOwnershipDefault)
|
|
440
|
+
.map((mode) => ({ value: mode, name: mode })),
|
|
441
|
+
], projectOwnershipDefault);
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
const summaryItems = [
|
|
445
|
+
`Storage: ${storageChoice} (${storagePath})`,
|
|
446
|
+
`Machine: ${machine}`,
|
|
447
|
+
`Profile: ${profile}`,
|
|
448
|
+
`Domain: ${domain}`,
|
|
449
|
+
`Project ownership default: ${projectOwnershipDefault}`,
|
|
450
|
+
`MCP: ${mcp === "on" ? "enabled" : "disabled"}`,
|
|
451
|
+
`Hooks: ${hooks === "on" ? "enabled" : "disabled"}`,
|
|
452
|
+
`Auto-capture: ${autoCaptureEnabled ? "enabled" : "disabled"}`,
|
|
453
|
+
`Findings capture level: ${findingsProactivity}`,
|
|
454
|
+
`Task mode: ${taskMode}`,
|
|
455
|
+
`Task proactivity: ${taskProactivity}`,
|
|
456
|
+
`Low-confidence threshold: ${lowConfidenceThreshold}`,
|
|
457
|
+
`Risky sections: ${riskySections.join(", ")}`,
|
|
458
|
+
`Finding sensitivity: ${findingSensitivity}`,
|
|
459
|
+
`Semantic search: ${ollamaEnabled ? "enabled" : "disabled"}`,
|
|
460
|
+
`Semantic dedup: ${semanticDedupEnabled ? "enabled" : "disabled"}`,
|
|
461
|
+
`Semantic conflict detection: ${semanticConflictEnabled ? "enabled" : "disabled"}`,
|
|
462
|
+
`GitHub sync: ${githubUsername ? `${githubUsername}/${githubRepo ?? "my-phren"}` : "skipped"}`,
|
|
463
|
+
`Add detected project: ${bootstrapCurrentProject ? `yes (${bootstrapOwnership ?? projectOwnershipDefault})` : "no"}`,
|
|
464
|
+
];
|
|
465
|
+
if (inferredScaffold) {
|
|
466
|
+
summaryItems.push(`Inference: ${inferredScaffold.reason}`);
|
|
467
|
+
}
|
|
468
|
+
printSummary(summaryItems);
|
|
469
|
+
return {
|
|
470
|
+
storageChoice,
|
|
471
|
+
storagePath,
|
|
472
|
+
storageRepoRoot,
|
|
473
|
+
machine,
|
|
474
|
+
profile,
|
|
475
|
+
mcp,
|
|
476
|
+
hooks,
|
|
477
|
+
projectOwnershipDefault,
|
|
478
|
+
findingsProactivity,
|
|
479
|
+
taskProactivity,
|
|
480
|
+
lowConfidenceThreshold,
|
|
481
|
+
riskySections,
|
|
482
|
+
taskMode,
|
|
483
|
+
bootstrapCurrentProject,
|
|
484
|
+
bootstrapOwnership,
|
|
485
|
+
ollamaEnabled,
|
|
486
|
+
autoCaptureEnabled,
|
|
487
|
+
semanticDedupEnabled,
|
|
488
|
+
semanticConflictEnabled,
|
|
489
|
+
findingSensitivity,
|
|
490
|
+
githubUsername,
|
|
491
|
+
githubRepo,
|
|
492
|
+
domain,
|
|
493
|
+
inferredScaffold: inferredScaffold
|
|
494
|
+
? (domain === inferredScaffold.domain
|
|
495
|
+
? inferredScaffold
|
|
496
|
+
: { ...inferredScaffold, domain, topics: [] })
|
|
497
|
+
: undefined,
|
|
498
|
+
};
|
|
499
|
+
}
|
|
500
|
+
function parseLowConfidenceThreshold(raw, fallback) {
|
|
501
|
+
if (!raw)
|
|
502
|
+
return fallback;
|
|
503
|
+
const value = Number.parseFloat(raw.trim());
|
|
504
|
+
if (!Number.isFinite(value) || value < 0 || value > 1)
|
|
505
|
+
return fallback;
|
|
506
|
+
return value;
|
|
507
|
+
}
|
|
508
|
+
function parseRiskySectionsAnswer(raw, fallback) {
|
|
509
|
+
if (!raw)
|
|
510
|
+
return [...fallback];
|
|
511
|
+
const aliases = {
|
|
512
|
+
review: "Review",
|
|
513
|
+
stale: "Stale",
|
|
514
|
+
conflict: "Conflicts",
|
|
515
|
+
conflicts: "Conflicts",
|
|
516
|
+
};
|
|
517
|
+
const parsed = raw
|
|
518
|
+
.split(/[,\s]+/)
|
|
519
|
+
.map((token) => aliases[token.trim().toLowerCase()])
|
|
520
|
+
.filter((section) => Boolean(section));
|
|
521
|
+
if (!parsed.length)
|
|
522
|
+
return [...fallback];
|
|
523
|
+
return Array.from(new Set(parsed));
|
|
524
|
+
}
|