@phren/agent 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/dist/agent-loop/index.js +214 -0
  2. package/dist/agent-loop/stream.js +124 -0
  3. package/dist/agent-loop/types.js +13 -0
  4. package/dist/agent-loop.js +7 -326
  5. package/dist/commands/info.js +146 -0
  6. package/dist/commands/memory.js +165 -0
  7. package/dist/commands/model.js +138 -0
  8. package/dist/commands/session.js +213 -0
  9. package/dist/commands.js +25 -297
  10. package/dist/config.js +6 -2
  11. package/dist/index.js +10 -4
  12. package/dist/mcp-client.js +11 -7
  13. package/dist/multi/multi-commands.js +170 -0
  14. package/dist/multi/multi-events.js +81 -0
  15. package/dist/multi/multi-render.js +146 -0
  16. package/dist/multi/pane.js +28 -0
  17. package/dist/multi/spawner.js +3 -2
  18. package/dist/multi/tui-multi.js +39 -454
  19. package/dist/permissions/allowlist.js +2 -2
  20. package/dist/permissions/shell-safety.js +8 -0
  21. package/dist/providers/anthropic.js +72 -33
  22. package/dist/providers/codex.js +121 -60
  23. package/dist/providers/openai-compat.js +6 -1
  24. package/dist/repl.js +2 -2
  25. package/dist/system-prompt.js +24 -26
  26. package/dist/tools/glob.js +30 -6
  27. package/dist/tools/shell.js +5 -2
  28. package/dist/tui/ansi.js +48 -0
  29. package/dist/tui/components/AgentMessage.js +5 -0
  30. package/dist/tui/components/App.js +70 -0
  31. package/dist/tui/components/Banner.js +44 -0
  32. package/dist/tui/components/ChatMessage.js +23 -0
  33. package/dist/tui/components/InputArea.js +23 -0
  34. package/dist/tui/components/Separator.js +7 -0
  35. package/dist/tui/components/StatusBar.js +25 -0
  36. package/dist/tui/components/SteerQueue.js +7 -0
  37. package/dist/tui/components/StreamingText.js +5 -0
  38. package/dist/tui/components/ThinkingIndicator.js +20 -0
  39. package/dist/tui/components/ToolCall.js +11 -0
  40. package/dist/tui/components/UserMessage.js +5 -0
  41. package/dist/tui/hooks/useKeyboardShortcuts.js +89 -0
  42. package/dist/tui/hooks/useSlashCommands.js +52 -0
  43. package/dist/tui/index.js +5 -0
  44. package/dist/tui/ink-entry.js +271 -0
  45. package/dist/tui/menu-mode.js +86 -0
  46. package/dist/tui/tool-render.js +43 -0
  47. package/dist/tui.js +378 -252
  48. package/package.json +9 -2
@@ -4,28 +4,15 @@ export class AnthropicProvider {
4
4
  maxOutputTokens;
5
5
  apiKey;
6
6
  model;
7
- constructor(apiKey, model, maxOutputTokens) {
7
+ cacheEnabled;
8
+ constructor(apiKey, model, maxOutputTokens, cacheEnabled = true) {
8
9
  this.apiKey = apiKey;
9
10
  this.model = model ?? "claude-sonnet-4-20250514";
10
11
  this.maxOutputTokens = maxOutputTokens ?? 8192;
12
+ this.cacheEnabled = cacheEnabled;
11
13
  }
12
14
  async chat(system, messages, tools) {
13
- const body = {
14
- model: this.model,
15
- system,
16
- messages: messages.map((m) => ({
17
- role: m.role,
18
- content: m.content,
19
- })),
20
- max_tokens: this.maxOutputTokens,
21
- };
22
- if (tools.length > 0) {
23
- body.tools = tools.map((t) => ({
24
- name: t.name,
25
- description: t.description,
26
- input_schema: t.input_schema,
27
- }));
28
- }
15
+ const body = this.buildRequestBody(system, messages, tools);
29
16
  const res = await fetch("https://api.anthropic.com/v1/messages", {
30
17
  method: "POST",
31
18
  headers: {
@@ -45,6 +32,7 @@ export class AnthropicProvider {
45
32
  : data.stop_reason === "max_tokens" ? "max_tokens"
46
33
  : "end_turn";
47
34
  const usage = data.usage;
35
+ logCacheUsage(usage);
48
36
  return {
49
37
  content,
50
38
  stop_reason: stop_reason,
@@ -52,20 +40,8 @@ export class AnthropicProvider {
52
40
  };
53
41
  }
54
42
  async *chatStream(system, messages, tools) {
55
- const body = {
56
- model: this.model,
57
- system,
58
- messages: messages.map((m) => ({ role: m.role, content: m.content })),
59
- max_tokens: this.maxOutputTokens,
60
- stream: true,
61
- };
62
- if (tools.length > 0) {
63
- body.tools = tools.map((t) => ({
64
- name: t.name,
65
- description: t.description,
66
- input_schema: t.input_schema,
67
- }));
68
- }
43
+ const body = this.buildRequestBody(system, messages, tools);
44
+ body.stream = true;
69
45
  const res = await fetch("https://api.anthropic.com/v1/messages", {
70
46
  method: "POST",
71
47
  headers: {
@@ -118,26 +94,89 @@ export class AnthropicProvider {
118
94
  stopReason = "tool_use";
119
95
  else if (delta.stop_reason === "max_tokens")
120
96
  stopReason = "max_tokens";
97
+ // message_delta carries output_tokens — merge with existing input_tokens from message_start
121
98
  const u = data.usage;
122
99
  if (u) {
123
100
  usage = {
124
- input_tokens: u.input_tokens ?? 0,
101
+ input_tokens: usage?.input_tokens ?? 0,
125
102
  output_tokens: u.output_tokens ?? 0,
126
103
  };
127
104
  }
128
105
  }
129
106
  else if (type === "message_start") {
107
+ // message_start carries input_tokens — initialize usage
130
108
  const u = data.message?.usage;
131
109
  if (u) {
110
+ logCacheUsage(u);
132
111
  usage = {
133
112
  input_tokens: u.input_tokens ?? 0,
134
- output_tokens: u.output_tokens ?? 0,
113
+ output_tokens: usage?.output_tokens ?? 0,
135
114
  };
136
115
  }
137
116
  }
138
117
  }
139
118
  yield { type: "done", stop_reason: stopReason, usage };
140
119
  }
120
+ /** Build the request body with optional prompt caching breakpoints. */
121
+ buildRequestBody(system, messages, tools) {
122
+ const cache = { cache_control: { type: "ephemeral" } };
123
+ // System prompt: use content array format with cache_control on the text block
124
+ const systemValue = this.cacheEnabled
125
+ ? [{ type: "text", text: system, ...cache }]
126
+ : system;
127
+ const mappedMessages = messages.map((m) => ({ role: m.role, content: m.content }));
128
+ // Mark the last 2 user messages with cache_control for recent-context caching
129
+ if (this.cacheEnabled) {
130
+ let marked = 0;
131
+ for (let i = mappedMessages.length - 1; i >= 0 && marked < 2; i--) {
132
+ if (mappedMessages[i].role !== "user")
133
+ continue;
134
+ const c = mappedMessages[i].content;
135
+ if (typeof c === "string") {
136
+ mappedMessages[i] = {
137
+ role: "user",
138
+ content: [{ type: "text", text: c, ...cache }],
139
+ };
140
+ }
141
+ else if (Array.isArray(c) && c.length > 0) {
142
+ // Add cache_control to the last block of the content array
143
+ const blocks = [...c];
144
+ blocks[blocks.length - 1] = { ...blocks[blocks.length - 1], ...cache };
145
+ mappedMessages[i] = { role: "user", content: blocks };
146
+ }
147
+ marked++;
148
+ }
149
+ }
150
+ const body = {
151
+ model: this.model,
152
+ system: systemValue,
153
+ messages: mappedMessages,
154
+ max_tokens: this.maxOutputTokens,
155
+ };
156
+ if (tools.length > 0) {
157
+ const mappedTools = tools.map((t) => ({
158
+ name: t.name,
159
+ description: t.description,
160
+ input_schema: t.input_schema,
161
+ }));
162
+ // Cache the last tool definition — Anthropic uses it as the breakpoint for the entire tools block
163
+ if (this.cacheEnabled) {
164
+ mappedTools[mappedTools.length - 1] = { ...mappedTools[mappedTools.length - 1], ...cache };
165
+ }
166
+ body.tools = mappedTools;
167
+ }
168
+ return body;
169
+ }
170
+ }
171
+ /** Log cache hit/creation stats to stderr (visible in verbose mode). */
172
+ function logCacheUsage(usage) {
173
+ if (!usage)
174
+ return;
175
+ const created = usage.cache_creation_input_tokens;
176
+ const read = usage.cache_read_input_tokens;
177
+ if (created || read) {
178
+ process.stderr.write(`[cache] created=${created ?? 0} read=${read ?? 0} input=${usage.input_tokens ?? 0}\n`);
179
+ }
141
180
  }
142
181
  /** Parse SSE stream from a fetch Response. */
143
182
  async function* parseSSE(res) {
@@ -10,7 +10,7 @@ function toResponsesTools(tools) {
10
10
  }));
11
11
  }
12
12
  /** Convert our messages to Responses API input format. */
13
- function toResponsesInput(system, messages) {
13
+ function toResponsesInput(messages) {
14
14
  const input = [];
15
15
  for (const msg of messages) {
16
16
  if (msg.role === "user") {
@@ -91,11 +91,16 @@ function parseResponsesOutput(data) {
91
91
  }
92
92
  else if (item.type === "function_call") {
93
93
  hasToolUse = true;
94
+ let input = {};
95
+ try {
96
+ input = JSON.parse(item.arguments);
97
+ }
98
+ catch { /* malformed arguments */ }
94
99
  content.push({
95
100
  type: "tool_use",
96
101
  id: item.call_id,
97
102
  name: item.name,
98
- input: JSON.parse(item.arguments),
103
+ input,
99
104
  });
100
105
  }
101
106
  }
@@ -125,7 +130,7 @@ export class CodexProvider {
125
130
  const body = {
126
131
  model: this.model,
127
132
  instructions: system,
128
- input: toResponsesInput(system, messages),
133
+ input: toResponsesInput(messages),
129
134
  store: false,
130
135
  stream: true,
131
136
  };
@@ -185,7 +190,7 @@ export class CodexProvider {
185
190
  const body = {
186
191
  model: this.model,
187
192
  instructions: system,
188
- input: toResponsesInput(system, messages),
193
+ input: toResponsesInput(messages),
189
194
  store: false,
190
195
  stream: true,
191
196
  include: ["reasoning.encrypted_content"],
@@ -194,73 +199,129 @@ export class CodexProvider {
194
199
  body.tools = toResponsesTools(tools);
195
200
  body.tool_choice = "auto";
196
201
  }
197
- const res = await fetch(CODEX_API, {
198
- method: "POST",
202
+ // Use WebSocket for true token-by-token streaming (matches Codex CLI behavior).
203
+ // The HTTP SSE endpoint batches the entire response before flushing.
204
+ yield* this.chatStreamWs(accessToken, body);
205
+ }
206
+ /** WebSocket streaming — sends request, yields deltas as they arrive. */
207
+ async *chatStreamWs(accessToken, body) {
208
+ const wsUrl = CODEX_API.replace(/^https:/, "wss:").replace(/^http:/, "ws:");
209
+ // Queue for events received from the WebSocket before the consumer pulls them
210
+ const queue = [];
211
+ let resolve = null;
212
+ let done = false;
213
+ const push = (item) => {
214
+ queue.push(item);
215
+ if (resolve) {
216
+ resolve();
217
+ resolve = null;
218
+ }
219
+ };
220
+ // Node.js (undici) WebSocket accepts headers in the second argument object,
221
+ // but the DOM typings only allow string | string[]. Cast to bypass.
222
+ const ws = new WebSocket(wsUrl, {
199
223
  headers: {
200
- "Content-Type": "application/json",
201
224
  Authorization: `Bearer ${accessToken}`,
202
225
  },
203
- body: JSON.stringify(body),
204
226
  });
205
- if (!res.ok) {
206
- const text = await res.text();
207
- throw new Error(`Codex API error ${res.status}: ${text}`);
208
- }
209
- // Parse SSE stream
210
- if (!res.body)
211
- throw new Error("Provider returned empty response body");
212
- const reader = res.body.getReader();
213
- const decoder = new TextDecoder();
214
- let buffer = "";
215
227
  let activeToolCallId = "";
216
- while (true) {
217
- const { done, value } = await reader.read();
218
- if (done)
219
- break;
220
- buffer += decoder.decode(value, { stream: true });
221
- const lines = buffer.split("\n");
222
- buffer = lines.pop();
223
- for (const line of lines) {
224
- if (!line.startsWith("data: "))
225
- continue;
226
- const data = line.slice(6).trim();
227
- if (data === "[DONE]")
228
- return;
229
- let event;
228
+ ws.addEventListener("open", () => {
229
+ // Wrap the request body in a response.create envelope (Codex WS protocol)
230
+ const wsRequest = { type: "response.create", ...body };
231
+ ws.send(JSON.stringify(wsRequest));
232
+ });
233
+ ws.addEventListener("message", (evt) => {
234
+ const data = typeof evt.data === "string" ? evt.data : String(evt.data);
235
+ let event;
236
+ try {
237
+ event = JSON.parse(data);
238
+ }
239
+ catch {
240
+ return;
241
+ }
242
+ const type = event.type;
243
+ // Handle server-side errors
244
+ if (type === "error") {
245
+ const err = event.error;
246
+ const msg = err?.message ?? "Codex WebSocket error";
247
+ const status = event.status;
248
+ push(new Error(`Codex WS error${status ? ` ${status}` : ""}: ${msg}`));
249
+ done = true;
230
250
  try {
231
- event = JSON.parse(data);
232
- }
233
- catch {
234
- continue;
235
- }
236
- const type = event.type;
237
- if (type === "response.output_text.delta") {
238
- yield { type: "text_delta", text: event.delta };
251
+ ws.close();
239
252
  }
240
- else if (type === "response.output_item.added") {
241
- if (event.item?.type === "function_call") {
242
- const item = event.item;
243
- activeToolCallId = item.call_id;
244
- yield { type: "tool_use_start", id: activeToolCallId, name: item.name };
245
- }
253
+ catch { /* ignore */ }
254
+ return;
255
+ }
256
+ if (type === "response.output_text.delta") {
257
+ const delta = event.delta;
258
+ if (delta)
259
+ push({ type: "text_delta", text: delta });
260
+ }
261
+ else if (type === "response.output_item.added") {
262
+ if (event.item?.type === "function_call") {
263
+ const item = event.item;
264
+ activeToolCallId = item.call_id;
265
+ push({ type: "tool_use_start", id: activeToolCallId, name: item.name });
246
266
  }
247
- else if (type === "response.function_call_arguments.delta") {
248
- yield { type: "tool_use_delta", id: activeToolCallId, json: event.delta };
267
+ }
268
+ else if (type === "response.function_call_arguments.delta") {
269
+ push({ type: "tool_use_delta", id: activeToolCallId, json: event.delta });
270
+ }
271
+ else if (type === "response.function_call_arguments.done") {
272
+ push({ type: "tool_use_end", id: activeToolCallId });
273
+ }
274
+ else if (type === "response.completed") {
275
+ const response = event.response;
276
+ const usage = response?.usage;
277
+ const output = response?.output;
278
+ const hasToolCalls = output?.some((o) => o.type === "function_call");
279
+ push({
280
+ type: "done",
281
+ stop_reason: hasToolCalls ? "tool_use" : "end_turn",
282
+ usage: usage ? { input_tokens: usage.input_tokens ?? 0, output_tokens: usage.output_tokens ?? 0 } : undefined,
283
+ });
284
+ done = true;
285
+ try {
286
+ ws.close();
249
287
  }
250
- else if (type === "response.function_call_arguments.done") {
251
- yield { type: "tool_use_end", id: activeToolCallId };
288
+ catch { /* ignore */ }
289
+ }
290
+ });
291
+ ws.addEventListener("error", () => {
292
+ if (!done) {
293
+ push(new Error("Codex WebSocket connection error"));
294
+ done = true;
295
+ }
296
+ });
297
+ ws.addEventListener("close", () => {
298
+ if (!done) {
299
+ push(new Error("Codex WebSocket closed before response.completed"));
300
+ done = true;
301
+ }
302
+ });
303
+ // Async iteration: drain the queue, wait for new events
304
+ try {
305
+ while (true) {
306
+ while (queue.length > 0) {
307
+ const item = queue.shift();
308
+ if (item instanceof Error)
309
+ throw item;
310
+ yield item;
311
+ if (item.type === "done")
312
+ return;
252
313
  }
253
- else if (type === "response.completed") {
254
- const response = event.response;
255
- const usage = response?.usage;
256
- const output = response?.output;
257
- const hasToolCalls = output?.some((o) => o.type === "function_call");
258
- yield {
259
- type: "done",
260
- stop_reason: hasToolCalls ? "tool_use" : "end_turn",
261
- usage: usage ? { input_tokens: usage.input_tokens ?? 0, output_tokens: usage.output_tokens ?? 0 } : undefined,
262
- };
314
+ if (done)
315
+ return;
316
+ await new Promise((r) => { resolve = r; });
317
+ }
318
+ }
319
+ finally {
320
+ if (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING) {
321
+ try {
322
+ ws.close();
263
323
  }
324
+ catch { /* ignore */ }
264
325
  }
265
326
  }
266
327
  }
@@ -58,11 +58,16 @@ export function parseOpenAiResponse(data) {
58
58
  if (toolCalls) {
59
59
  for (const tc of toolCalls) {
60
60
  const fn = tc.function;
61
+ let input = {};
62
+ try {
63
+ input = JSON.parse(fn.arguments);
64
+ }
65
+ catch { /* malformed arguments */ }
61
66
  content.push({
62
67
  type: "tool_use",
63
68
  id: tc.id,
64
69
  name: fn.name,
65
- input: JSON.parse(fn.arguments),
70
+ input,
66
71
  });
67
72
  }
68
73
  }
package/dist/repl.js CHANGED
@@ -82,7 +82,7 @@ export async function startRepl(config) {
82
82
  rl.prompt();
83
83
  continue;
84
84
  }
85
- if (handleCommand(trimmed, { session, contextLimit, undoStack: [] })) {
85
+ if (handleCommand(trimmed, { session, contextLimit, undoStack: [], phrenCtx: config.phrenCtx })) {
86
86
  rl.prompt();
87
87
  continue;
88
88
  }
@@ -118,7 +118,7 @@ export async function startRepl(config) {
118
118
  process.stderr.write(`${YELLOW}Input mode: ${inputMode}${RESET}\n`);
119
119
  }
120
120
  else {
121
- handleCommand(queued, { session, contextLimit, undoStack: [] });
121
+ handleCommand(queued, { session, contextLimit, undoStack: [], phrenCtx: config.phrenCtx });
122
122
  }
123
123
  break;
124
124
  }
@@ -1,37 +1,35 @@
1
1
  export function buildSystemPrompt(phrenContext, priorSummary, providerInfo) {
2
2
  const modelNote = providerInfo ? ` You are running on ${providerInfo.name}${providerInfo.model ? ` (model: ${providerInfo.model})` : ""}.` : "";
3
3
  const parts = [
4
- `You are phren-agent, a coding assistant with persistent memory powered by phren.${modelNote} You retain knowledge across sessions — past decisions, discovered patterns, and project context are all searchable. Use this memory to avoid repeating mistakes and to build on prior work.`,
4
+ `You are phren-agent, an autonomous coding agent with persistent memory.${modelNote}`,
5
5
  "",
6
- "## Workflow",
7
- "1. **Orient** Before starting, search phren for relevant findings (`phren_search`) and check active tasks (`phren_get_tasks`). Past sessions may have context that saves time.",
8
- "2. **Read** — Read the relevant code before modifying it. Use `glob` to find files, `grep` to locate symbols, `read_file` to understand context.",
9
- "3. **Change** Make targeted edits. Use `edit_file` for surgical changes; reserve `write_file` for new files. Don't refactor code you weren't asked to touch.",
10
- "4. **Verify** — Run tests and linters via `shell` after edits. Check `git_diff` to review your changes.",
11
- "5. **Remember** — Save non-obvious discoveries with `phren_add_finding`: tricky bugs, architecture decisions, gotchas, workarounds. Skip obvious things — only save what would help a future session.",
12
- "6. **Report** — Explain what you did concisely. Mention files changed and why.",
6
+ "## Core Behavior",
7
+ "ACT IMMEDIATELY. When the user asks you to do something, DO IT. Don't describe what you're going to do just do it. Use your tools without asking permission. Read files, search code, make edits, run commands. Only ask clarifying questions when the request is genuinely ambiguous.",
8
+ "",
9
+ "You have persistent memory via phren. Past decisions, discovered patterns, and project context are searchable across sessions. Use this to avoid repeating mistakes.",
13
10
  "",
14
- "## Memory",
15
- "- `phren_search` finds past findings, reference docs, and project context. Search before asking the user for context they may have already provided.",
16
- "- `phren_add_finding` saves insights for future sessions. Good findings: non-obvious patterns, decisions with rationale, error resolutions, architecture constraints. Bad findings: narration of what you did, obvious facts, secrets.",
17
- "- `phren_get_tasks` shows tracked work items. Complete tasks with `phren_complete_task` when done.",
11
+ "## Workflow",
12
+ "1. **Search memory first** `phren_search` for relevant past findings before starting work.",
13
+ "2. **Read before writing** `glob` to find files, `grep` to locate symbols, `read_file` to understand code.",
14
+ "3. **Make changes** `edit_file` for surgical edits, `write_file` for new files only.",
15
+ "4. **Verify** — `shell` to run tests/linters, `git_diff` to review changes.",
16
+ "5. **Save learnings** — `phren_add_finding` for non-obvious discoveries (bugs, architecture decisions, gotchas). Skip obvious stuff.",
17
+ "6. **Report concisely** — what changed and why. No fluff.",
18
18
  "",
19
- "## Self-configuration",
20
- "You ARE phren-agent. You can configure phren itself via shell commands:",
21
- "- `phren init` set up phren (MCP server, hooks, profiles)",
22
- "- `phren add <path>` register a project directory",
23
- "- `phren config proactivity <level>` — set proactivity (high/medium/low)",
24
- "- `phren config policy set <key> <value>` — configure retention, TTL, decay",
25
- "- `phren hooks enable <tool>` — enable hooks for claude/copilot/cursor/codex",
26
- "- `phren doctor --fix` — diagnose and self-heal",
27
- "- `phren status` — check health",
28
- "If the user asks you to configure phren, set up a project, or fix their install, use the shell tool to run these commands.",
19
+ "## Tools You Have",
20
+ "- File I/O: `read_file`, `write_file`, `edit_file`",
21
+ "- Search: `glob`, `grep`, `web_search`, `web_fetch`",
22
+ "- System: `shell` (run commands, cd, build, test)",
23
+ "- Git: `git_status`, `git_diff`, `git_commit`",
24
+ "- Memory: `phren_search`, `phren_add_finding`, `phren_get_tasks`, `phren_complete_task`, `phren_add_task`",
29
25
  "",
30
- "## Rules",
26
+ "## Important",
27
+ "- Be direct and concise. Lead with the answer, not the reasoning.",
28
+ "- Call multiple tools in parallel when they're independent.",
29
+ "- NEVER ask 'should I read the file?' or 'would you like me to...' — just call the tool. If permission is needed, the system will prompt the user automatically. You don't handle permissions.",
30
+ "- Don't describe your plan unless asked. Execute immediately.",
31
31
  "- Never write secrets, API keys, or PII to files or findings.",
32
- "- Prefer `edit_file` over `write_file` for existing files.",
33
- "- Keep shell commands safe. No `rm -rf`, no `sudo`, no destructive operations.",
34
- "- If unsure, say so. Don't guess at behavior you can verify by reading code or running tests.",
32
+ "- You ARE phren-agent. You can run `phren` CLI commands via shell to configure yourself.",
35
33
  ];
36
34
  if (priorSummary) {
37
35
  parts.push("", `## Last session\n${priorSummary}`);
@@ -3,12 +3,36 @@ import * as path from "path";
3
3
  import { validatePath } from "../permissions/sandbox.js";
4
4
  /** Simple glob matching without external dependencies. Supports * and ** patterns. */
5
5
  function matchGlob(pattern, filePath) {
6
- const regex = pattern
7
- .replace(/[.+^${}()|[\]\\]/g, "\\$&")
8
- .replace(/\*\*/g, "{{GLOBSTAR}}")
9
- .replace(/\*/g, "[^/]*")
10
- .replace(/{{GLOBSTAR}}/g, ".*");
11
- return new RegExp(`^${regex}$`).test(filePath);
6
+ // Normalize path separators
7
+ const p = pattern.replace(/\\/g, "/");
8
+ const f = filePath.replace(/\\/g, "/");
9
+ // Build regex: escape special chars, then convert glob tokens
10
+ let regex = "";
11
+ let i = 0;
12
+ while (i < p.length) {
13
+ if (p[i] === "*" && p[i + 1] === "*") {
14
+ // ** matches any depth of directories
15
+ regex += ".*";
16
+ i += 2;
17
+ if (p[i] === "/")
18
+ i++; // skip trailing /
19
+ }
20
+ else if (p[i] === "*") {
21
+ // * matches anything except /
22
+ regex += "[^/]*";
23
+ i++;
24
+ }
25
+ else if (p[i] === "?") {
26
+ regex += "[^/]";
27
+ i++;
28
+ }
29
+ else {
30
+ // Escape regex special chars
31
+ regex += p[i].replace(/[.+^${}()|[\]\\]/g, "\\$&");
32
+ i++;
33
+ }
34
+ }
35
+ return new RegExp(`^${regex}$`).test(f);
12
36
  }
13
37
  function walkDir(dir, base, results, maxResults) {
14
38
  if (results.length >= maxResults)
@@ -5,7 +5,7 @@ const MAX_TIMEOUT_MS = 120_000;
5
5
  const MAX_OUTPUT_BYTES = 100_000;
6
6
  export const shellTool = {
7
7
  name: "shell",
8
- description: "Run a shell command via bash -c and return stdout + stderr. Use for: running tests, linters, build commands, git operations, and exploring the environment. Prefer specific tools (read_file, glob, grep) over shell equivalents when available.",
8
+ description: "Run a shell command and return stdout + stderr. Uses bash on Unix, cmd.exe on Windows. Use for: running tests, linters, build commands, git operations, and exploring the environment. Prefer specific tools (read_file, glob, grep) over shell equivalents when available.",
9
9
  input_schema: {
10
10
  type: "object",
11
11
  properties: {
@@ -24,8 +24,11 @@ export const shellTool = {
24
24
  if (!safety.safe && safety.severity === "block") {
25
25
  return { output: `Blocked: ${safety.reason}`, is_error: true };
26
26
  }
27
+ const isWindows = process.platform === "win32";
28
+ const shell = isWindows ? "cmd" : "bash";
29
+ const shellArgs = isWindows ? ["/c", command] : ["-c", command];
27
30
  try {
28
- const output = execFileSync("bash", ["-c", command], {
31
+ const output = execFileSync(shell, shellArgs, {
29
32
  cwd,
30
33
  encoding: "utf-8",
31
34
  timeout,
@@ -0,0 +1,48 @@
1
+ // ── ANSI helpers ─────────────────────────────────────────────────────────────
2
+ export const ESC = "\x1b[";
3
+ export const s = {
4
+ reset: `${ESC}0m`,
5
+ bold: (t) => `${ESC}1m${t}${ESC}0m`,
6
+ dim: (t) => `${ESC}2m${t}${ESC}0m`,
7
+ italic: (t) => `${ESC}3m${t}${ESC}0m`,
8
+ cyan: (t) => `${ESC}36m${t}${ESC}0m`,
9
+ green: (t) => `${ESC}32m${t}${ESC}0m`,
10
+ yellow: (t) => `${ESC}33m${t}${ESC}0m`,
11
+ red: (t) => `${ESC}31m${t}${ESC}0m`,
12
+ blue: (t) => `${ESC}34m${t}${ESC}0m`,
13
+ magenta: (t) => `${ESC}35m${t}${ESC}0m`,
14
+ gray: (t) => `${ESC}90m${t}${ESC}0m`,
15
+ invert: (t) => `${ESC}7m${t}${ESC}0m`,
16
+ // Gradient-style brand text
17
+ brand: (t) => `${ESC}1;35m${t}${ESC}0m`,
18
+ };
19
+ export function cols() {
20
+ return process.stdout.columns || 80;
21
+ }
22
+ export function stripAnsi(t) {
23
+ return t.replace(/\x1b\[[0-9;?]*[ -/]*[@-~]/g, "");
24
+ }
25
+ // ── Permission mode helpers ─────────────────────────────────────────────────
26
+ export const PERMISSION_MODES = ["suggest", "auto-confirm", "full-auto"];
27
+ export function nextPermissionMode(current) {
28
+ const idx = PERMISSION_MODES.indexOf(current);
29
+ return PERMISSION_MODES[(idx + 1) % PERMISSION_MODES.length];
30
+ }
31
+ export const PERMISSION_LABELS = {
32
+ "suggest": "suggest",
33
+ "auto-confirm": "auto",
34
+ "full-auto": "full-auto",
35
+ };
36
+ export const PERMISSION_ICONS = {
37
+ "suggest": "○",
38
+ "auto-confirm": "◐",
39
+ "full-auto": "●",
40
+ };
41
+ export const PERMISSION_COLORS = {
42
+ "suggest": s.cyan,
43
+ "auto-confirm": s.green,
44
+ "full-auto": s.yellow,
45
+ };
46
+ export function permTag(mode) {
47
+ return PERMISSION_COLORS[mode](`${PERMISSION_ICONS[mode]} ${mode}`);
48
+ }
@@ -0,0 +1,5 @@
1
+ import { jsx as _jsx, jsxs as _jsxs } from "react/jsx-runtime";
2
+ import { Box, Text } from "ink";
3
+ export function AgentMessage({ text }) {
4
+ return (_jsxs(Box, { flexDirection: "column", children: [_jsxs(Text, { color: "magenta", children: ["◆", " ", _jsx(Text, { children: text })] }), _jsx(Text, { children: "" })] }));
5
+ }