@phren/agent 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/agent-loop/index.js +214 -0
  2. package/dist/agent-loop/stream.js +124 -0
  3. package/dist/agent-loop/types.js +13 -0
  4. package/dist/agent-loop.js +7 -333
  5. package/dist/commands/info.js +146 -0
  6. package/dist/commands/memory.js +165 -0
  7. package/dist/commands/model.js +138 -0
  8. package/dist/commands/session.js +213 -0
  9. package/dist/commands.js +24 -643
  10. package/dist/index.js +9 -4
  11. package/dist/mcp-client.js +11 -7
  12. package/dist/multi/multi-commands.js +170 -0
  13. package/dist/multi/multi-events.js +81 -0
  14. package/dist/multi/multi-render.js +146 -0
  15. package/dist/multi/pane.js +28 -0
  16. package/dist/multi/tui-multi.js +39 -454
  17. package/dist/permissions/allowlist.js +2 -2
  18. package/dist/providers/anthropic.js +4 -2
  19. package/dist/providers/codex.js +9 -4
  20. package/dist/providers/openai-compat.js +6 -1
  21. package/dist/tools/glob.js +30 -6
  22. package/dist/tui/ansi.js +48 -0
  23. package/dist/tui/components/AgentMessage.js +5 -0
  24. package/dist/tui/components/App.js +68 -0
  25. package/dist/tui/components/Banner.js +44 -0
  26. package/dist/tui/components/ChatMessage.js +23 -0
  27. package/dist/tui/components/InputArea.js +23 -0
  28. package/dist/tui/components/Separator.js +7 -0
  29. package/dist/tui/components/StatusBar.js +25 -0
  30. package/dist/tui/components/SteerQueue.js +7 -0
  31. package/dist/tui/components/StreamingText.js +5 -0
  32. package/dist/tui/components/ThinkingIndicator.js +26 -0
  33. package/dist/tui/components/ToolCall.js +11 -0
  34. package/dist/tui/components/UserMessage.js +5 -0
  35. package/dist/tui/hooks/useKeyboardShortcuts.js +89 -0
  36. package/dist/tui/hooks/useSlashCommands.js +52 -0
  37. package/dist/tui/index.js +5 -0
  38. package/dist/tui/ink-entry.js +287 -0
  39. package/dist/tui/menu-mode.js +86 -0
  40. package/dist/tui/tool-render.js +43 -0
  41. package/dist/tui.js +149 -280
  42. package/package.json +9 -2
@@ -1,333 +1,7 @@
1
- import { createSpinner, formatTurnHeader, formatToolCall } from "./spinner.js";
2
- import { searchErrorRecovery } from "./memory/error-recovery.js";
3
- import { shouldPrune, pruneMessages } from "./context/pruner.js";
4
- import { estimateMessageTokens } from "./context/token-counter.js";
5
- import { withRetry } from "./providers/retry.js";
6
- import { createCaptureState, analyzeAndCapture } from "./memory/auto-capture.js";
7
- import { AntiPatternTracker } from "./memory/anti-patterns.js";
8
- import { createFlushConfig, checkFlushNeeded } from "./memory/context-flush.js";
9
- import { injectPlanPrompt, requestPlanApproval } from "./plan.js";
10
- import { detectLintCommand, detectTestCommand, runPostEditCheck } from "./tools/lint-test.js";
11
- import { createCheckpoint } from "./checkpoint.js";
12
- const MAX_TOOL_CONCURRENCY = 5;
13
- export function createSession(contextLimit) {
14
- return {
15
- messages: [],
16
- turns: 0,
17
- toolCalls: 0,
18
- captureState: createCaptureState(),
19
- antiPatterns: new AntiPatternTracker(),
20
- flushConfig: createFlushConfig(contextLimit ?? 200_000),
21
- };
22
- }
23
- /** Run tool blocks with concurrency limit. */
24
- async function runToolsConcurrently(blocks, registry) {
25
- const results = [];
26
- for (let i = 0; i < blocks.length; i += MAX_TOOL_CONCURRENCY) {
27
- const batch = blocks.slice(i, i + MAX_TOOL_CONCURRENCY);
28
- const batchResults = await Promise.all(batch.map(async (block) => {
29
- const TOOL_TIMEOUT_MS = 120_000;
30
- try {
31
- const result = await Promise.race([
32
- registry.execute(block.name, block.input),
33
- new Promise((_, reject) => setTimeout(() => reject(new Error(`Tool '${block.name}' timed out after ${TOOL_TIMEOUT_MS / 1000}s`)), TOOL_TIMEOUT_MS)),
34
- ]);
35
- return { block, output: result.output, is_error: !!result.is_error };
36
- }
37
- catch (err) {
38
- const msg = err instanceof Error ? err.message : String(err);
39
- return { block, output: msg, is_error: true };
40
- }
41
- }));
42
- results.push(...batchResults);
43
- }
44
- return results;
45
- }
46
- /** Consume a chatStream into ContentBlock[] + stop_reason, streaming text via callback. */
47
- async function consumeStream(stream, costTracker, onTextDelta) {
48
- const content = [];
49
- let stop_reason = "end_turn";
50
- let currentText = "";
51
- // Map block index -> tool state for Anthropic-style index-based IDs
52
- const toolsByIndex = new Map();
53
- for await (const delta of stream) {
54
- if (delta.type === "text_delta") {
55
- (onTextDelta ?? process.stdout.write.bind(process.stdout))(delta.text);
56
- currentText += delta.text;
57
- }
58
- else if (delta.type === "tool_use_start") {
59
- // Flush accumulated text
60
- if (currentText) {
61
- content.push({ type: "text", text: currentText });
62
- currentText = "";
63
- }
64
- toolsByIndex.set(delta.id, { id: delta.id, name: delta.name, jsonParts: [] });
65
- }
66
- else if (delta.type === "tool_use_delta") {
67
- const tool = toolsByIndex.get(delta.id);
68
- if (tool)
69
- tool.jsonParts.push(delta.json);
70
- }
71
- else if (delta.type === "tool_use_end") {
72
- const tool = toolsByIndex.get(delta.id);
73
- if (tool) {
74
- const jsonStr = tool.jsonParts.join("");
75
- let input = {};
76
- try {
77
- input = JSON.parse(jsonStr);
78
- }
79
- catch {
80
- process.stderr.write(`\x1b[33m[warning] Malformed tool_use JSON for ${tool.name} (${tool.id}), skipping block\x1b[0m\n`);
81
- continue;
82
- }
83
- content.push({ type: "tool_use", id: tool.id, name: tool.name, input });
84
- }
85
- }
86
- else if (delta.type === "done") {
87
- stop_reason = delta.stop_reason;
88
- if (costTracker && delta.usage) {
89
- costTracker.recordUsage(delta.usage.input_tokens, delta.usage.output_tokens);
90
- }
91
- }
92
- }
93
- // Flush remaining text
94
- if (currentText) {
95
- if (!currentText.endsWith("\n")) {
96
- (onTextDelta ?? process.stdout.write.bind(process.stdout))("\n");
97
- }
98
- content.push({ type: "text", text: currentText });
99
- }
100
- return { content, stop_reason };
101
- }
102
- export async function runTurn(userInput, session, config, hooks) {
103
- const { provider, registry, maxTurns, verbose, costTracker } = config;
104
- let systemPrompt = config.systemPrompt;
105
- const toolDefs = registry.getDefinitions();
106
- const spinner = createSpinner();
107
- const useStream = typeof provider.chatStream === "function";
108
- const status = hooks?.onStatus ?? ((msg) => process.stderr.write(msg));
109
- // Plan mode: modify system prompt for first turn
110
- let planPending = config.plan && session.turns === 0;
111
- if (planPending) {
112
- systemPrompt = injectPlanPrompt(systemPrompt);
113
- }
114
- // Append user message
115
- session.messages.push({ role: "user", content: userInput });
116
- let turnToolCalls = 0;
117
- const turnStart = session.turns;
118
- while (session.turns - turnStart < maxTurns) {
119
- // Budget check
120
- if (costTracker?.isOverBudget()) {
121
- status(`\x1b[33m[budget exceeded: ${costTracker.formatCost()}]\x1b[0m\n`);
122
- break;
123
- }
124
- if (verbose && session.turns > turnStart) {
125
- status(`\n${formatTurnHeader(session.turns + 1, turnToolCalls)}\n`);
126
- }
127
- // Check if context flush is needed (one-time per session) — must run before pruning
128
- const contextLimit = provider.contextWindow ?? 200_000;
129
- const flushPrompt = checkFlushNeeded(systemPrompt, session.messages, session.flushConfig);
130
- if (flushPrompt) {
131
- session.messages.push({ role: "user", content: flushPrompt });
132
- if (verbose)
133
- status("[context flush injected]\n");
134
- }
135
- // Prune context if approaching limit
136
- if (shouldPrune(systemPrompt, session.messages, { contextLimit })) {
137
- const preCount = session.messages.length;
138
- const preTokens = estimateMessageTokens(session.messages);
139
- session.messages = pruneMessages(session.messages, { contextLimit, keepRecentTurns: 6 });
140
- const postCount = session.messages.length;
141
- const postTokens = estimateMessageTokens(session.messages);
142
- const reduction = preTokens > 0 ? ((1 - postTokens / preTokens) * 100).toFixed(0) : "0";
143
- const fmtPre = preTokens >= 1000 ? `${(preTokens / 1000).toFixed(1)}k` : String(preTokens);
144
- const fmtPost = postTokens >= 1000 ? `${(postTokens / 1000).toFixed(1)}k` : String(postTokens);
145
- status(`\x1b[2m[context pruned: ${preCount} → ${postCount} messages, ~${fmtPre} → ~${fmtPost} tokens, ${reduction}% reduction]\x1b[0m\n`);
146
- }
147
- // For plan mode first turn, pass empty tools so LLM can't call any
148
- const turnTools = planPending ? [] : toolDefs;
149
- let assistantContent;
150
- let stopReason;
151
- if (useStream) {
152
- // Streaming path — retry the initial connection (before consuming deltas)
153
- const stream = await withRetry(async () => provider.chatStream(systemPrompt, session.messages, turnTools), undefined, verbose);
154
- const result = await consumeStream(stream, costTracker, hooks?.onTextDelta);
155
- assistantContent = result.content;
156
- stopReason = result.stop_reason;
157
- }
158
- else {
159
- // Batch path
160
- spinner.start("Thinking...");
161
- const response = await withRetry(() => provider.chat(systemPrompt, session.messages, turnTools), undefined, verbose);
162
- spinner.stop();
163
- assistantContent = response.content;
164
- stopReason = response.stop_reason;
165
- // Track cost from batch response
166
- if (costTracker && response.usage) {
167
- costTracker.recordUsage(response.usage.input_tokens, response.usage.output_tokens);
168
- }
169
- // Print text blocks (streaming already prints inline)
170
- for (const block of assistantContent) {
171
- if (block.type === "text" && block.text) {
172
- if (hooks?.onTextBlock) {
173
- hooks.onTextBlock(block.text);
174
- }
175
- else {
176
- process.stdout.write(block.text);
177
- if (!block.text.endsWith("\n"))
178
- process.stdout.write("\n");
179
- }
180
- }
181
- }
182
- }
183
- session.messages.push({ role: "assistant", content: assistantContent });
184
- session.turns++;
185
- // Show turn cost
186
- if (verbose && costTracker) {
187
- status(`\x1b[2m cost: ${costTracker.formatCost()}\x1b[0m\n`);
188
- }
189
- // Plan mode gate: after first response, ask for approval
190
- if (planPending) {
191
- planPending = false;
192
- const { approved, feedback } = await requestPlanApproval();
193
- if (!approved) {
194
- // Always restore original system prompt on rejection to prevent plan prompt leaking
195
- systemPrompt = config.systemPrompt;
196
- const msg = feedback
197
- ? `The user rejected the plan with feedback: ${feedback}\nPlease revise your plan.`
198
- : "The user rejected the plan. Task aborted.";
199
- if (feedback) {
200
- // Let the LLM revise — add feedback as user message and continue
201
- session.messages.push({ role: "user", content: msg });
202
- continue;
203
- }
204
- break;
205
- }
206
- // Approved — restore original system prompt and continue with tools enabled
207
- systemPrompt = config.systemPrompt;
208
- session.messages.push({ role: "user", content: "Plan approved. Proceed with execution." });
209
- continue;
210
- }
211
- // If max_tokens, warn user and inject continuation prompt
212
- if (stopReason === "max_tokens") {
213
- status("\x1b[33m[response truncated: max_tokens reached, requesting continuation]\x1b[0m\n");
214
- session.messages.push({ role: "user", content: "Your response was truncated due to length. Please continue where you left off." });
215
- continue;
216
- }
217
- // If no tool use, we're done
218
- if (stopReason !== "tool_use")
219
- break;
220
- // Execute tool calls with concurrency
221
- const toolUseBlocks = assistantContent.filter((b) => b.type === "tool_use");
222
- // Log all tool calls upfront
223
- if (hooks?.onToolStart) {
224
- for (const block of toolUseBlocks)
225
- hooks.onToolStart(block.name, block.input, toolUseBlocks.length);
226
- }
227
- else {
228
- for (const block of toolUseBlocks)
229
- status(formatToolCall(block.name, block.input) + "\n");
230
- }
231
- if (!hooks?.onToolStart)
232
- spinner.start(`Running ${toolUseBlocks.length} tool${toolUseBlocks.length > 1 ? "s" : ""}...`);
233
- const execResults = await runToolsConcurrently(toolUseBlocks, registry);
234
- if (!hooks?.onToolStart)
235
- spinner.stop();
236
- const toolResults = [];
237
- for (const { block, output, is_error } of execResults) {
238
- session.toolCalls++;
239
- turnToolCalls++;
240
- let finalOutput = output;
241
- // Record for anti-pattern tracking
242
- session.antiPatterns.recordAttempt(block.name, block.input, !is_error, output);
243
- // Append phren recovery context on tool errors
244
- if (is_error && config.phrenCtx) {
245
- try {
246
- const recovery = await searchErrorRecovery(config.phrenCtx, output);
247
- if (recovery)
248
- finalOutput += recovery;
249
- }
250
- catch { /* best effort */ }
251
- // Auto-capture error patterns
252
- try {
253
- await analyzeAndCapture(config.phrenCtx, output, session.captureState);
254
- }
255
- catch { /* best effort */ }
256
- }
257
- if (hooks?.onToolEnd) {
258
- hooks.onToolEnd(block.name, block.input, finalOutput, is_error, 0);
259
- }
260
- else if (verbose) {
261
- const preview = finalOutput.slice(0, 200);
262
- status(`\x1b[2m ← ${is_error ? "ERROR: " : ""}${preview}${finalOutput.length > 200 ? "..." : ""}\x1b[0m\n`);
263
- }
264
- toolResults.push({
265
- type: "tool_result",
266
- tool_use_id: block.id,
267
- content: finalOutput,
268
- is_error,
269
- });
270
- }
271
- // Post-edit lint/test check
272
- const mutatingTools = new Set(["edit_file", "write_file"]);
273
- const hasMutation = toolUseBlocks.some(b => mutatingTools.has(b.name));
274
- if (hasMutation && config.lintTestConfig) {
275
- const cwd = process.cwd();
276
- const lintCmd = config.lintTestConfig.lintCmd ?? detectLintCommand(cwd);
277
- const testCmd = config.lintTestConfig.testCmd ?? detectTestCommand(cwd);
278
- const lintFailures = [];
279
- for (const cmd of [lintCmd, testCmd].filter(Boolean)) {
280
- const check = runPostEditCheck(cmd, cwd);
281
- if (!check.passed) {
282
- if (verbose)
283
- status(`\x1b[33m[post-edit check failed: ${cmd}]\x1b[0m\n`);
284
- lintFailures.push(`Post-edit check failed (${cmd}):\n${check.output.slice(0, 2000)}`);
285
- }
286
- }
287
- if (lintFailures.length > 0) {
288
- // Inject as plain text in the tool results user message (not as a fabricated tool_result)
289
- toolResults.push({
290
- type: "text",
291
- text: lintFailures.join("\n\n"),
292
- });
293
- }
294
- }
295
- // Create checkpoint before mutating tool results are committed to conversation
296
- if (hasMutation) {
297
- createCheckpoint(process.cwd(), `turn-${session.turns}`);
298
- }
299
- // Add tool results as a user message
300
- session.messages.push({ role: "user", content: toolResults });
301
- // Steering input injection (TUI mid-turn input)
302
- const steer = hooks?.getSteeringInput?.();
303
- if (steer) {
304
- session.messages.push({ role: "user", content: steer });
305
- }
306
- }
307
- // Extract text from the last assistant message in this turn
308
- const lastAssistant = [...session.messages].reverse().find((m) => m.role === "assistant");
309
- let text = "";
310
- if (lastAssistant && Array.isArray(lastAssistant.content)) {
311
- text = lastAssistant.content
312
- .filter((b) => b.type === "text")
313
- .map((b) => b.text)
314
- .join("\n");
315
- }
316
- else if (lastAssistant && typeof lastAssistant.content === "string") {
317
- text = lastAssistant.content;
318
- }
319
- return { text, turns: session.turns - turnStart, toolCalls: turnToolCalls };
320
- }
321
- /** One-shot agent run — thin wrapper around createSession + runTurn. */
322
- export async function runAgent(task, config) {
323
- const contextLimit = config.provider.contextWindow ?? 200_000;
324
- const session = createSession(contextLimit);
325
- const result = await runTurn(task, session, config, config.hooks);
326
- return {
327
- finalText: result.text,
328
- turns: result.turns,
329
- toolCalls: result.toolCalls,
330
- totalCost: config.costTracker?.formatCost(),
331
- messages: session.messages,
332
- };
333
- }
1
+ /**
2
+ * Barrel re-export the actual implementation lives in agent-loop/ directory.
3
+ * This file exists so all existing `from "./agent-loop.js"` imports continue working.
4
+ */
5
+ export { createSession, } from "./agent-loop/types.js";
6
+ export { consumeStream, runToolsConcurrently } from "./agent-loop/stream.js";
7
+ export { runTurn, runAgent } from "./agent-loop/index.js";
@@ -0,0 +1,146 @@
1
+ import { estimateMessageTokens } from "../context/token-counter.js";
2
+ import { execSync } from "node:child_process";
3
+ const DIM = "\x1b[2m";
4
+ const BOLD = "\x1b[1m";
5
+ const CYAN = "\x1b[36m";
6
+ const GREEN = "\x1b[32m";
7
+ const RED = "\x1b[31m";
8
+ const YELLOW = "\x1b[33m";
9
+ const RESET = "\x1b[0m";
10
+ export function helpCommand(_parts, _ctx) {
11
+ process.stderr.write(`${DIM}Commands:
12
+ /help Show this help
13
+ /model Interactive model + reasoning picker
14
+ /model add <id> Add a custom model
15
+ /model remove <id> Remove a custom model
16
+ /provider Show configured providers + auth status
17
+ /turns Show turn and tool call counts
18
+ /clear Clear conversation history and terminal screen
19
+ /cwd Show current working directory
20
+ /files Quick file tree (max depth 2, first 30 files)
21
+ /cost Show token usage and estimated cost
22
+ /plan Show conversation plan (tool calls so far)
23
+ /undo Undo last user message and response
24
+ /history [n|full] Show last N messages (default 10) with rich formatting
25
+ /compact Compact conversation to save context space
26
+ /context Show context window usage and provider info
27
+ /mode Toggle input mode (steering ↔ queue)
28
+ /spawn <name> <task> Spawn a background agent
29
+ /agents List running agents
30
+ /session Show session info (id, duration, stats)
31
+ /session save Save conversation checkpoint
32
+ /session export Export conversation as JSON
33
+ /diff [--staged] Show git diff with syntax highlighting
34
+ /git <cmd> Run common git commands (status, log, stash, stash pop)
35
+ /ask <question> Quick LLM query (no tools, not added to session)
36
+ /mem search <query> Search phren memory directly
37
+ /mem findings [project] Show recent findings
38
+ /mem tasks [project] Show tasks
39
+ /mem add <finding> Quick-add a finding
40
+ /preset [name|save|delete|list] Config presets
41
+ /exit Exit the REPL${RESET}\n`);
42
+ return true;
43
+ }
44
+ export function turnsCommand(_parts, ctx) {
45
+ const tokens = estimateMessageTokens(ctx.session.messages);
46
+ const pct = ctx.contextLimit > 0 ? ((tokens / ctx.contextLimit) * 100).toFixed(1) : "?";
47
+ const costLine = ctx.costTracker ? ` Cost: $${ctx.costTracker.totalCost.toFixed(4)}` : "";
48
+ process.stderr.write(`${DIM}Turns: ${ctx.session.turns} Tool calls: ${ctx.session.toolCalls} ` +
49
+ `Messages: ${ctx.session.messages.length} Tokens: ~${tokens} (${pct}%)${costLine}${RESET}\n`);
50
+ return true;
51
+ }
52
+ export function clearCommand(_parts, ctx) {
53
+ ctx.session.messages.length = 0;
54
+ ctx.session.turns = 0;
55
+ ctx.session.toolCalls = 0;
56
+ ctx.undoStack.length = 0;
57
+ process.stdout.write("\x1b[2J\x1b[H");
58
+ process.stderr.write(`${DIM}Conversation cleared.${RESET}\n`);
59
+ return true;
60
+ }
61
+ export function cwdCommand(_parts, _ctx) {
62
+ process.stderr.write(`${DIM}${process.cwd()}${RESET}\n`);
63
+ return true;
64
+ }
65
+ export function filesCommand(_parts, _ctx) {
66
+ try {
67
+ const countRaw = execSync("find . -type f -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/dist/*' | wc -l", { encoding: "utf-8", timeout: 5_000, cwd: process.cwd() }).trim();
68
+ const total = parseInt(countRaw, 10) || 0;
69
+ const listRaw = execSync("find . -maxdepth 2 -type f -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/dist/*' | sort | head -30", { encoding: "utf-8", timeout: 5_000, cwd: process.cwd() }).trim();
70
+ if (!listRaw) {
71
+ process.stderr.write(`${DIM}No files found.${RESET}\n`);
72
+ }
73
+ else {
74
+ const lines = listRaw.split("\n");
75
+ const label = total > lines.length ? `${total} files (showing first ${lines.length})` : `${total} files`;
76
+ process.stderr.write(`${DIM}${label}\n${listRaw}${RESET}\n`);
77
+ }
78
+ }
79
+ catch (err) {
80
+ const e = err;
81
+ process.stderr.write(`${RED}${e.stderr || e.message || "find failed"}${RESET}\n`);
82
+ }
83
+ return true;
84
+ }
85
+ export function costCommand(_parts, ctx) {
86
+ const ct = ctx.costTracker;
87
+ if (ct) {
88
+ process.stderr.write(`${DIM}Tokens — input: ${ct.inputTokens} output: ${ct.outputTokens} est. cost: $${ct.totalCost.toFixed(4)}${RESET}\n`);
89
+ }
90
+ else {
91
+ process.stderr.write(`${DIM}Cost tracking not available.${RESET}\n`);
92
+ }
93
+ return true;
94
+ }
95
+ export function planCommand(_parts, ctx) {
96
+ const tools = [];
97
+ for (const msg of ctx.session.messages) {
98
+ if (typeof msg.content !== "string") {
99
+ for (const block of msg.content) {
100
+ if (block.type === "tool_use") {
101
+ tools.push(block.name);
102
+ }
103
+ }
104
+ }
105
+ }
106
+ if (tools.length === 0) {
107
+ process.stderr.write(`${DIM}No tool calls yet.${RESET}\n`);
108
+ }
109
+ else {
110
+ process.stderr.write(`${DIM}Tool calls (${tools.length}): ${tools.join(" → ")}${RESET}\n`);
111
+ }
112
+ return true;
113
+ }
114
+ export function undoCommand(_parts, ctx) {
115
+ if (ctx.session.messages.length < 2) {
116
+ process.stderr.write(`${DIM}Nothing to undo.${RESET}\n`);
117
+ return true;
118
+ }
119
+ let removed = 0;
120
+ while (ctx.session.messages.length > 0) {
121
+ const last = ctx.session.messages.pop();
122
+ removed++;
123
+ if (last?.role === "user" && typeof last.content === "string")
124
+ break;
125
+ }
126
+ process.stderr.write(`${DIM}Undid ${removed} messages.${RESET}\n`);
127
+ return true;
128
+ }
129
+ export function contextCommand(_parts, ctx) {
130
+ const ctxTokens = estimateMessageTokens(ctx.session.messages);
131
+ const ctxPct = ctx.contextLimit > 0 ? (ctxTokens / ctx.contextLimit) * 100 : 0;
132
+ const ctxPctStr = ctxPct.toFixed(1);
133
+ const ctxWindowK = ctx.contextLimit >= 1000 ? `${(ctx.contextLimit / 1000).toFixed(0)}k` : String(ctx.contextLimit);
134
+ const ctxTokensStr = ctxTokens >= 1000 ? `~${(ctxTokens / 1000).toFixed(1)}k` : `~${ctxTokens}`;
135
+ const filled = Math.round(ctxPct / 10);
136
+ const bar = "\u2588".repeat(Math.min(filled, 10)) + "\u2591".repeat(Math.max(10 - filled, 0));
137
+ const barColor = ctxPct > 80 ? RED : ctxPct > 50 ? YELLOW : GREEN;
138
+ const providerLabel = ctx.providerName ?? "unknown";
139
+ const modelLabel = ctx.currentModel ?? "default";
140
+ process.stderr.write(`${DIM} Messages: ${ctx.session.messages.length}\n` +
141
+ ` Tokens: ${ctxTokensStr} / ${ctxWindowK} (${ctxPctStr}%)\n` +
142
+ ` Provider: ${providerLabel} (${modelLabel})\n` +
143
+ ` Context window: ${ctxWindowK}\n` +
144
+ ` ${barColor}[${bar}]${RESET}${DIM} ${ctxPctStr}%${RESET}\n`);
145
+ return true;
146
+ }
@@ -0,0 +1,165 @@
1
+ import { renderMarkdown } from "../multi/markdown.js";
2
+ import { buildIndex } from "@phren/cli/shared";
3
+ import { searchKnowledgeRows, rankResults } from "@phren/cli/shared/retrieval";
4
+ import { readFindings } from "@phren/cli/data/access";
5
+ import { readTasks } from "@phren/cli/data/tasks";
6
+ import { addFinding } from "@phren/cli/core/finding";
7
+ const DIM = "\x1b[2m";
8
+ const BOLD = "\x1b[1m";
9
+ const CYAN = "\x1b[36m";
10
+ const GREEN = "\x1b[32m";
11
+ const RED = "\x1b[31m";
12
+ const RESET = "\x1b[0m";
13
+ export function memCommand(parts, ctx) {
14
+ const sub = parts[1]?.toLowerCase();
15
+ if (!ctx.phrenCtx) {
16
+ process.stderr.write(`${DIM}No phren context available.${RESET}\n`);
17
+ return true;
18
+ }
19
+ const pCtx = ctx.phrenCtx;
20
+ if (!sub || sub === "help") {
21
+ process.stderr.write(`${DIM}Usage:
22
+ /mem search <query> Search phren memory
23
+ /mem findings [project] Show recent findings
24
+ /mem tasks [project] Show tasks
25
+ /mem add <finding> Quick-add a finding${RESET}\n`);
26
+ return true;
27
+ }
28
+ if (sub === "search") {
29
+ const query = parts.slice(2).join(" ").trim();
30
+ if (!query) {
31
+ process.stderr.write(`${DIM}Usage: /mem search <query>${RESET}\n`);
32
+ return true;
33
+ }
34
+ return (async () => {
35
+ try {
36
+ const db = await buildIndex(pCtx.phrenPath, pCtx.profile);
37
+ const result = await searchKnowledgeRows(db, {
38
+ query,
39
+ maxResults: 10,
40
+ filterProject: pCtx.project || null,
41
+ filterType: null,
42
+ phrenPath: pCtx.phrenPath,
43
+ });
44
+ const ranked = rankResults(result.rows ?? [], query, null, pCtx.project || null, pCtx.phrenPath, db);
45
+ if (ranked.length === 0) {
46
+ process.stderr.write(`${DIM}No results found.${RESET}\n`);
47
+ }
48
+ else {
49
+ const lines = ranked.slice(0, 10).map((r, i) => {
50
+ const snippet = r.content?.slice(0, 200) ?? "";
51
+ return ` ${CYAN}${i + 1}.${RESET} ${DIM}[${r.project}/${r.filename}]${RESET} ${snippet}`;
52
+ });
53
+ process.stderr.write(lines.join("\n") + "\n");
54
+ }
55
+ }
56
+ catch (err) {
57
+ process.stderr.write(`${RED}Search failed: ${err instanceof Error ? err.message : String(err)}${RESET}\n`);
58
+ }
59
+ return true;
60
+ })();
61
+ }
62
+ if (sub === "findings") {
63
+ const project = parts[2] || pCtx.project;
64
+ if (!project) {
65
+ process.stderr.write(`${DIM}Usage: /mem findings <project>${RESET}\n`);
66
+ return true;
67
+ }
68
+ const result = readFindings(pCtx.phrenPath, project);
69
+ if (!result.ok) {
70
+ process.stderr.write(`${RED}${result.error}${RESET}\n`);
71
+ return true;
72
+ }
73
+ const items = result.data ?? [];
74
+ if (items.length === 0) {
75
+ process.stderr.write(`${DIM}No findings for ${project}.${RESET}\n`);
76
+ return true;
77
+ }
78
+ const recent = items.slice(-15);
79
+ const lines = recent.map((f) => ` ${DIM}${f.date}${RESET} ${f.text.slice(0, 120)}${f.text.length > 120 ? "..." : ""}`);
80
+ process.stderr.write(`${DIM}-- Findings (${items.length} total, showing last ${recent.length}) --${RESET}\n`);
81
+ process.stderr.write(lines.join("\n") + "\n");
82
+ return true;
83
+ }
84
+ if (sub === "tasks") {
85
+ const project = parts[2] || pCtx.project;
86
+ if (!project) {
87
+ process.stderr.write(`${DIM}Usage: /mem tasks <project>${RESET}\n`);
88
+ return true;
89
+ }
90
+ const result = readTasks(pCtx.phrenPath, project);
91
+ if (!result.ok) {
92
+ process.stderr.write(`${RED}${result.error}${RESET}\n`);
93
+ return true;
94
+ }
95
+ const sections = [];
96
+ for (const [section, items] of Object.entries(result.data.items)) {
97
+ if (section === "Done")
98
+ continue;
99
+ if (items.length === 0)
100
+ continue;
101
+ const lines = items.map((t) => {
102
+ const icon = t.checked ? `${GREEN}\u2713${RESET}` : `${DIM}\u25CB${RESET}`;
103
+ return ` ${icon} ${t.line}`;
104
+ });
105
+ sections.push(`${BOLD}${section}${RESET}\n${lines.join("\n")}`);
106
+ }
107
+ if (sections.length === 0) {
108
+ process.stderr.write(`${DIM}No active tasks for ${project}.${RESET}\n`);
109
+ }
110
+ else {
111
+ process.stderr.write(sections.join("\n") + "\n");
112
+ }
113
+ return true;
114
+ }
115
+ if (sub === "add") {
116
+ const finding = parts.slice(2).join(" ").trim();
117
+ if (!finding) {
118
+ process.stderr.write(`${DIM}Usage: /mem add <finding text>${RESET}\n`);
119
+ return true;
120
+ }
121
+ const project = pCtx.project;
122
+ if (!project) {
123
+ process.stderr.write(`${DIM}No project context. Cannot add finding without a project.${RESET}\n`);
124
+ return true;
125
+ }
126
+ const result = addFinding(pCtx.phrenPath, project, finding);
127
+ if (result.ok) {
128
+ process.stderr.write(`${GREEN}-> Finding saved to ${project}.${RESET}\n`);
129
+ }
130
+ else {
131
+ process.stderr.write(`${RED}${result.message ?? "Failed to save finding."}${RESET}\n`);
132
+ }
133
+ return true;
134
+ }
135
+ process.stderr.write(`${DIM}Unknown /mem subcommand: ${sub}. Try /mem help${RESET}\n`);
136
+ return true;
137
+ }
138
+ export function askCommand(parts, ctx) {
139
+ const question = parts.slice(1).join(" ").trim();
140
+ if (!question) {
141
+ process.stderr.write(`${DIM}Usage: /ask <question>${RESET}\n`);
142
+ return true;
143
+ }
144
+ if (!ctx.provider) {
145
+ process.stderr.write(`${DIM}Provider not available for /ask.${RESET}\n`);
146
+ return true;
147
+ }
148
+ const provider = ctx.provider;
149
+ const sysPrompt = ctx.systemPrompt ?? "You are a helpful assistant.";
150
+ return (async () => {
151
+ process.stderr.write(`${DIM}\u25C6 quick answer (no tools):${RESET}\n`);
152
+ try {
153
+ const response = await provider.chat(sysPrompt, [{ role: "user", content: question }], []);
154
+ for (const block of response.content) {
155
+ if (block.type === "text") {
156
+ process.stderr.write(renderMarkdown(block.text) + "\n");
157
+ }
158
+ }
159
+ }
160
+ catch (err) {
161
+ process.stderr.write(`${RED}${err instanceof Error ? err.message : String(err)}${RESET}\n`);
162
+ }
163
+ return true;
164
+ })();
165
+ }