@phren/agent 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agent-loop.js +328 -0
- package/dist/bin.js +3 -0
- package/dist/checkpoint.js +103 -0
- package/dist/commands.js +292 -0
- package/dist/config.js +139 -0
- package/dist/context/pruner.js +62 -0
- package/dist/context/token-counter.js +28 -0
- package/dist/cost.js +71 -0
- package/dist/index.js +284 -0
- package/dist/mcp-client.js +168 -0
- package/dist/memory/anti-patterns.js +69 -0
- package/dist/memory/auto-capture.js +72 -0
- package/dist/memory/context-flush.js +24 -0
- package/dist/memory/context.js +170 -0
- package/dist/memory/error-recovery.js +58 -0
- package/dist/memory/project-context.js +77 -0
- package/dist/memory/session.js +100 -0
- package/dist/multi/agent-colors.js +41 -0
- package/dist/multi/child-entry.js +173 -0
- package/dist/multi/coordinator.js +263 -0
- package/dist/multi/diff-renderer.js +175 -0
- package/dist/multi/markdown.js +96 -0
- package/dist/multi/presets.js +107 -0
- package/dist/multi/progress.js +32 -0
- package/dist/multi/spawner.js +219 -0
- package/dist/multi/tui-multi.js +626 -0
- package/dist/multi/types.js +7 -0
- package/dist/permissions/allowlist.js +61 -0
- package/dist/permissions/checker.js +111 -0
- package/dist/permissions/prompt.js +190 -0
- package/dist/permissions/sandbox.js +95 -0
- package/dist/permissions/shell-safety.js +74 -0
- package/dist/permissions/types.js +2 -0
- package/dist/plan.js +38 -0
- package/dist/providers/anthropic.js +170 -0
- package/dist/providers/codex-auth.js +197 -0
- package/dist/providers/codex.js +265 -0
- package/dist/providers/ollama.js +142 -0
- package/dist/providers/openai-compat.js +163 -0
- package/dist/providers/openrouter.js +116 -0
- package/dist/providers/resolve.js +39 -0
- package/dist/providers/retry.js +55 -0
- package/dist/providers/types.js +2 -0
- package/dist/repl.js +180 -0
- package/dist/spinner.js +46 -0
- package/dist/system-prompt.js +31 -0
- package/dist/tools/edit-file.js +31 -0
- package/dist/tools/git.js +98 -0
- package/dist/tools/glob.js +65 -0
- package/dist/tools/grep.js +108 -0
- package/dist/tools/lint-test.js +76 -0
- package/dist/tools/phren-finding.js +35 -0
- package/dist/tools/phren-search.js +44 -0
- package/dist/tools/phren-tasks.js +71 -0
- package/dist/tools/read-file.js +44 -0
- package/dist/tools/registry.js +46 -0
- package/dist/tools/shell.js +48 -0
- package/dist/tools/types.js +2 -0
- package/dist/tools/write-file.js +27 -0
- package/dist/tui.js +451 -0
- package/package.json +39 -0
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
/** Convert Anthropic tool defs to OpenAI function format. */
|
|
2
|
+
export function toOpenAiTools(tools) {
|
|
3
|
+
return tools.map((t) => ({
|
|
4
|
+
type: "function",
|
|
5
|
+
function: { name: t.name, description: t.description, parameters: t.input_schema },
|
|
6
|
+
}));
|
|
7
|
+
}
|
|
8
|
+
/** Convert Anthropic messages to OpenAI messages. */
|
|
9
|
+
export function toOpenAiMessages(system, messages) {
|
|
10
|
+
const out = [{ role: "system", content: system }];
|
|
11
|
+
for (const msg of messages) {
|
|
12
|
+
if (msg.role === "assistant") {
|
|
13
|
+
if (typeof msg.content === "string") {
|
|
14
|
+
out.push({ role: "assistant", content: msg.content });
|
|
15
|
+
}
|
|
16
|
+
else {
|
|
17
|
+
const textParts = msg.content.filter((b) => b.type === "text").map((b) => b.type === "text" ? b.text : "");
|
|
18
|
+
const toolCalls = msg.content.filter((b) => b.type === "tool_use").map((b) => {
|
|
19
|
+
if (b.type !== "tool_use")
|
|
20
|
+
throw new Error("unreachable");
|
|
21
|
+
return { id: b.id, type: "function", function: { name: b.name, arguments: JSON.stringify(b.input) } };
|
|
22
|
+
});
|
|
23
|
+
const entry = { role: "assistant" };
|
|
24
|
+
if (textParts.length > 0)
|
|
25
|
+
entry.content = textParts.join("\n");
|
|
26
|
+
if (toolCalls.length > 0)
|
|
27
|
+
entry.tool_calls = toolCalls;
|
|
28
|
+
out.push(entry);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
else if (msg.role === "user") {
|
|
32
|
+
if (typeof msg.content === "string") {
|
|
33
|
+
out.push({ role: "user", content: msg.content });
|
|
34
|
+
}
|
|
35
|
+
else {
|
|
36
|
+
for (const block of msg.content) {
|
|
37
|
+
if (block.type === "tool_result") {
|
|
38
|
+
out.push({ role: "tool", tool_call_id: block.tool_use_id, content: block.content });
|
|
39
|
+
}
|
|
40
|
+
else if (block.type === "text") {
|
|
41
|
+
out.push({ role: "user", content: block.text });
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
return out;
|
|
48
|
+
}
|
|
49
|
+
/** Parse OpenAI response into Anthropic content blocks. */
|
|
50
|
+
export function parseOpenAiResponse(data) {
|
|
51
|
+
const choice = data.choices?.[0] ?? {};
|
|
52
|
+
const message = choice.message;
|
|
53
|
+
const content = [];
|
|
54
|
+
if (message?.content && typeof message.content === "string") {
|
|
55
|
+
content.push({ type: "text", text: message.content });
|
|
56
|
+
}
|
|
57
|
+
const toolCalls = message?.tool_calls;
|
|
58
|
+
if (toolCalls) {
|
|
59
|
+
for (const tc of toolCalls) {
|
|
60
|
+
const fn = tc.function;
|
|
61
|
+
content.push({
|
|
62
|
+
type: "tool_use",
|
|
63
|
+
id: tc.id,
|
|
64
|
+
name: fn.name,
|
|
65
|
+
input: JSON.parse(fn.arguments),
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
const finishReason = choice.finish_reason;
|
|
70
|
+
const stop_reason = finishReason === "tool_calls" ? "tool_use"
|
|
71
|
+
: finishReason === "length" ? "max_tokens"
|
|
72
|
+
: "end_turn";
|
|
73
|
+
const usage = data.usage;
|
|
74
|
+
return {
|
|
75
|
+
content,
|
|
76
|
+
stop_reason,
|
|
77
|
+
usage: usage ? { input_tokens: usage.prompt_tokens ?? 0, output_tokens: usage.completion_tokens ?? 0 } : undefined,
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
/** Parse OpenAI-compatible SSE stream into StreamDelta events. */
|
|
81
|
+
export async function* parseOpenAiStream(res) {
|
|
82
|
+
if (!res.body)
|
|
83
|
+
throw new Error("Provider returned empty response body");
|
|
84
|
+
const reader = res.body.getReader();
|
|
85
|
+
const decoder = new TextDecoder();
|
|
86
|
+
let buf = "";
|
|
87
|
+
// Track active tool calls by index
|
|
88
|
+
const activeTools = new Map(); // index -> tool_call id
|
|
89
|
+
let stopReason = "end_turn";
|
|
90
|
+
let usage;
|
|
91
|
+
for (;;) {
|
|
92
|
+
const { done, value } = await reader.read();
|
|
93
|
+
if (done)
|
|
94
|
+
break;
|
|
95
|
+
buf += decoder.decode(value, { stream: true });
|
|
96
|
+
const lines = buf.split("\n");
|
|
97
|
+
buf = lines.pop();
|
|
98
|
+
for (const line of lines) {
|
|
99
|
+
if (!line.startsWith("data: "))
|
|
100
|
+
continue;
|
|
101
|
+
const raw = line.slice(6).trim();
|
|
102
|
+
if (raw === "[DONE]") {
|
|
103
|
+
// Close out any active tool calls before signaling done
|
|
104
|
+
for (const [, toolId] of activeTools) {
|
|
105
|
+
yield { type: "tool_use_end", id: toolId };
|
|
106
|
+
}
|
|
107
|
+
activeTools.clear();
|
|
108
|
+
yield { type: "done", stop_reason: stopReason, usage };
|
|
109
|
+
return;
|
|
110
|
+
}
|
|
111
|
+
let chunk;
|
|
112
|
+
try {
|
|
113
|
+
chunk = JSON.parse(raw);
|
|
114
|
+
}
|
|
115
|
+
catch {
|
|
116
|
+
continue;
|
|
117
|
+
}
|
|
118
|
+
// Usage from final chunk (OpenAI includes it when stream_options.include_usage is set)
|
|
119
|
+
const u = chunk.usage;
|
|
120
|
+
if (u) {
|
|
121
|
+
usage = { input_tokens: u.prompt_tokens ?? 0, output_tokens: u.completion_tokens ?? 0 };
|
|
122
|
+
}
|
|
123
|
+
const choice = chunk.choices?.[0];
|
|
124
|
+
if (!choice)
|
|
125
|
+
continue;
|
|
126
|
+
const finishReason = choice.finish_reason;
|
|
127
|
+
if (finishReason === "tool_calls")
|
|
128
|
+
stopReason = "tool_use";
|
|
129
|
+
else if (finishReason === "length")
|
|
130
|
+
stopReason = "max_tokens";
|
|
131
|
+
const delta = choice.delta;
|
|
132
|
+
if (!delta)
|
|
133
|
+
continue;
|
|
134
|
+
// Text content
|
|
135
|
+
if (delta.content && typeof delta.content === "string") {
|
|
136
|
+
yield { type: "text_delta", text: delta.content };
|
|
137
|
+
}
|
|
138
|
+
// Tool calls
|
|
139
|
+
const toolCalls = delta.tool_calls;
|
|
140
|
+
if (toolCalls) {
|
|
141
|
+
for (const tc of toolCalls) {
|
|
142
|
+
const idx = tc.index;
|
|
143
|
+
const fn = tc.function;
|
|
144
|
+
// New tool call starts when id is present
|
|
145
|
+
if (tc.id && typeof tc.id === "string") {
|
|
146
|
+
activeTools.set(idx, tc.id);
|
|
147
|
+
yield { type: "tool_use_start", id: tc.id, name: fn?.name ?? "" };
|
|
148
|
+
}
|
|
149
|
+
// Argument deltas
|
|
150
|
+
if (fn?.arguments && typeof fn.arguments === "string") {
|
|
151
|
+
const toolId = activeTools.get(idx) ?? String(idx);
|
|
152
|
+
yield { type: "tool_use_delta", id: toolId, json: fn.arguments };
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
// Emit tool_use_end for all active tools, then done
|
|
159
|
+
for (const [, toolId] of activeTools) {
|
|
160
|
+
yield { type: "tool_use_end", id: toolId };
|
|
161
|
+
}
|
|
162
|
+
yield { type: "done", stop_reason: stopReason, usage };
|
|
163
|
+
}
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import { toOpenAiTools, toOpenAiMessages, parseOpenAiResponse, parseOpenAiStream } from "./openai-compat.js";
|
|
2
|
+
export class OpenRouterProvider {
|
|
3
|
+
name = "openrouter";
|
|
4
|
+
contextWindow = 200_000;
|
|
5
|
+
apiKey;
|
|
6
|
+
model;
|
|
7
|
+
baseUrl;
|
|
8
|
+
constructor(apiKey, model, baseUrl) {
|
|
9
|
+
this.apiKey = apiKey;
|
|
10
|
+
this.model = model ?? "anthropic/claude-sonnet-4-20250514";
|
|
11
|
+
this.baseUrl = baseUrl ?? "https://openrouter.ai/api/v1";
|
|
12
|
+
}
|
|
13
|
+
async chat(system, messages, tools) {
|
|
14
|
+
const body = {
|
|
15
|
+
model: this.model,
|
|
16
|
+
messages: toOpenAiMessages(system, messages),
|
|
17
|
+
max_tokens: 8192,
|
|
18
|
+
};
|
|
19
|
+
if (tools.length > 0)
|
|
20
|
+
body.tools = toOpenAiTools(tools);
|
|
21
|
+
const res = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
22
|
+
method: "POST",
|
|
23
|
+
headers: {
|
|
24
|
+
"Content-Type": "application/json",
|
|
25
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
26
|
+
"HTTP-Referer": "https://github.com/alaarab/phren",
|
|
27
|
+
"X-Title": "phren-agent",
|
|
28
|
+
},
|
|
29
|
+
body: JSON.stringify(body),
|
|
30
|
+
});
|
|
31
|
+
if (!res.ok) {
|
|
32
|
+
const text = await res.text();
|
|
33
|
+
throw new Error(`OpenRouter API error ${res.status}: ${text}`);
|
|
34
|
+
}
|
|
35
|
+
return parseOpenAiResponse(await res.json());
|
|
36
|
+
}
|
|
37
|
+
async *chatStream(system, messages, tools) {
|
|
38
|
+
const body = {
|
|
39
|
+
model: this.model,
|
|
40
|
+
messages: toOpenAiMessages(system, messages),
|
|
41
|
+
max_tokens: 8192,
|
|
42
|
+
stream: true,
|
|
43
|
+
stream_options: { include_usage: true },
|
|
44
|
+
};
|
|
45
|
+
if (tools.length > 0)
|
|
46
|
+
body.tools = toOpenAiTools(tools);
|
|
47
|
+
const res = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
48
|
+
method: "POST",
|
|
49
|
+
headers: {
|
|
50
|
+
"Content-Type": "application/json",
|
|
51
|
+
Authorization: `Bearer ${this.apiKey}`,
|
|
52
|
+
"HTTP-Referer": "https://github.com/alaarab/phren",
|
|
53
|
+
"X-Title": "phren-agent",
|
|
54
|
+
},
|
|
55
|
+
body: JSON.stringify(body),
|
|
56
|
+
});
|
|
57
|
+
if (!res.ok) {
|
|
58
|
+
const text = await res.text();
|
|
59
|
+
throw new Error(`OpenRouter API error ${res.status}: ${text}`);
|
|
60
|
+
}
|
|
61
|
+
yield* parseOpenAiStream(res);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
/** OpenAI-native provider (same protocol, different base URL). */
|
|
65
|
+
export class OpenAiProvider {
|
|
66
|
+
name = "openai";
|
|
67
|
+
contextWindow = 128_000;
|
|
68
|
+
apiKey;
|
|
69
|
+
model;
|
|
70
|
+
baseUrl;
|
|
71
|
+
constructor(apiKey, model, baseUrl) {
|
|
72
|
+
this.apiKey = apiKey;
|
|
73
|
+
this.model = model ?? "gpt-4o";
|
|
74
|
+
this.baseUrl = baseUrl ?? "https://api.openai.com/v1";
|
|
75
|
+
}
|
|
76
|
+
async chat(system, messages, tools) {
|
|
77
|
+
const body = {
|
|
78
|
+
model: this.model,
|
|
79
|
+
messages: toOpenAiMessages(system, messages),
|
|
80
|
+
max_tokens: 8192,
|
|
81
|
+
};
|
|
82
|
+
if (tools.length > 0)
|
|
83
|
+
body.tools = toOpenAiTools(tools);
|
|
84
|
+
const res = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
85
|
+
method: "POST",
|
|
86
|
+
headers: { "Content-Type": "application/json", Authorization: `Bearer ${this.apiKey}` },
|
|
87
|
+
body: JSON.stringify(body),
|
|
88
|
+
});
|
|
89
|
+
if (!res.ok) {
|
|
90
|
+
const text = await res.text();
|
|
91
|
+
throw new Error(`OpenAI API error ${res.status}: ${text}`);
|
|
92
|
+
}
|
|
93
|
+
return parseOpenAiResponse(await res.json());
|
|
94
|
+
}
|
|
95
|
+
async *chatStream(system, messages, tools) {
|
|
96
|
+
const body = {
|
|
97
|
+
model: this.model,
|
|
98
|
+
messages: toOpenAiMessages(system, messages),
|
|
99
|
+
max_tokens: 8192,
|
|
100
|
+
stream: true,
|
|
101
|
+
stream_options: { include_usage: true },
|
|
102
|
+
};
|
|
103
|
+
if (tools.length > 0)
|
|
104
|
+
body.tools = toOpenAiTools(tools);
|
|
105
|
+
const res = await fetch(`${this.baseUrl}/chat/completions`, {
|
|
106
|
+
method: "POST",
|
|
107
|
+
headers: { "Content-Type": "application/json", Authorization: `Bearer ${this.apiKey}` },
|
|
108
|
+
body: JSON.stringify(body),
|
|
109
|
+
});
|
|
110
|
+
if (!res.ok) {
|
|
111
|
+
const text = await res.text();
|
|
112
|
+
throw new Error(`OpenAI API error ${res.status}: ${text}`);
|
|
113
|
+
}
|
|
114
|
+
yield* parseOpenAiStream(res);
|
|
115
|
+
}
|
|
116
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import { OpenRouterProvider, OpenAiProvider } from "./openrouter.js";
|
|
2
|
+
import { AnthropicProvider } from "./anthropic.js";
|
|
3
|
+
import { OllamaProvider } from "./ollama.js";
|
|
4
|
+
import { CodexProvider } from "./codex.js";
|
|
5
|
+
import { hasCodexToken } from "./codex-auth.js";
|
|
6
|
+
export function resolveProvider(overrideProvider, overrideModel) {
|
|
7
|
+
const explicit = overrideProvider ?? process.env.PHREN_AGENT_PROVIDER;
|
|
8
|
+
if (explicit === "openrouter" || (!explicit && process.env.OPENROUTER_API_KEY)) {
|
|
9
|
+
const key = process.env.OPENROUTER_API_KEY;
|
|
10
|
+
if (!key)
|
|
11
|
+
throw new Error("OPENROUTER_API_KEY is required for OpenRouter provider.");
|
|
12
|
+
return new OpenRouterProvider(key, overrideModel);
|
|
13
|
+
}
|
|
14
|
+
if (explicit === "anthropic" || (!explicit && process.env.ANTHROPIC_API_KEY)) {
|
|
15
|
+
const key = process.env.ANTHROPIC_API_KEY;
|
|
16
|
+
if (!key)
|
|
17
|
+
throw new Error("ANTHROPIC_API_KEY is required for Anthropic provider.");
|
|
18
|
+
return new AnthropicProvider(key, overrideModel);
|
|
19
|
+
}
|
|
20
|
+
if (explicit === "openai" || (!explicit && process.env.OPENAI_API_KEY)) {
|
|
21
|
+
const key = process.env.OPENAI_API_KEY;
|
|
22
|
+
if (!key)
|
|
23
|
+
throw new Error("OPENAI_API_KEY is required for OpenAI provider.");
|
|
24
|
+
return new OpenAiProvider(key, overrideModel);
|
|
25
|
+
}
|
|
26
|
+
// Codex: uses your ChatGPT subscription directly — no API key, no middleman
|
|
27
|
+
if (explicit === "codex" || (!explicit && hasCodexToken())) {
|
|
28
|
+
return new CodexProvider(overrideModel);
|
|
29
|
+
}
|
|
30
|
+
if (explicit === "ollama" || (!explicit && process.env.PHREN_OLLAMA_URL && process.env.PHREN_OLLAMA_URL !== "off")) {
|
|
31
|
+
return new OllamaProvider(overrideModel, process.env.PHREN_OLLAMA_URL);
|
|
32
|
+
}
|
|
33
|
+
// Last resort: try Ollama at default URL
|
|
34
|
+
if (!explicit) {
|
|
35
|
+
return new OllamaProvider(overrideModel);
|
|
36
|
+
}
|
|
37
|
+
throw new Error(`Unknown provider "${explicit}". Supported: openrouter, anthropic, openai, codex, ollama.\n` +
|
|
38
|
+
"Set one of: OPENROUTER_API_KEY, ANTHROPIC_API_KEY, OPENAI_API_KEY, or run 'phren-agent auth login' for Codex.");
|
|
39
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
const DEFAULT_CONFIG = {
|
|
2
|
+
maxRetries: 3,
|
|
3
|
+
baseDelayMs: 1000,
|
|
4
|
+
maxDelayMs: 60_000,
|
|
5
|
+
retryableStatuses: new Set([429, 500, 502, 503, 529]),
|
|
6
|
+
};
|
|
7
|
+
const RETRYABLE_NETWORK_CODES = new Set(["ECONNRESET", "ECONNREFUSED", "ETIMEDOUT", "EPIPE"]);
|
|
8
|
+
/** Extract HTTP status from error message like "API error 429: ..." */
|
|
9
|
+
function extractStatus(error) {
|
|
10
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
11
|
+
const match = msg.match(/\berror\s+(\d{3})\b/i);
|
|
12
|
+
return match ? parseInt(match[1], 10) : null;
|
|
13
|
+
}
|
|
14
|
+
/** Check if the error is a retryable network error by code or message. */
|
|
15
|
+
function isNetworkError(error) {
|
|
16
|
+
if (error instanceof Error && "code" in error) {
|
|
17
|
+
const code = error.code;
|
|
18
|
+
if (code && RETRYABLE_NETWORK_CODES.has(code))
|
|
19
|
+
return true;
|
|
20
|
+
}
|
|
21
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
22
|
+
return RETRYABLE_NETWORK_CODES.has(msg) || Array.from(RETRYABLE_NETWORK_CODES).some((c) => msg.includes(c));
|
|
23
|
+
}
|
|
24
|
+
/** Extract Retry-After hint from error message. */
|
|
25
|
+
function extractRetryAfter(error) {
|
|
26
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
27
|
+
const match = msg.match(/retry[- ]?after[:\s]+(\d+)/i);
|
|
28
|
+
return match ? parseInt(match[1], 10) * 1000 : null;
|
|
29
|
+
}
|
|
30
|
+
function sleep(ms) {
|
|
31
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
32
|
+
}
|
|
33
|
+
/** Wrap an async function with exponential backoff retry. */
|
|
34
|
+
export async function withRetry(fn, config, verbose) {
|
|
35
|
+
const cfg = { ...DEFAULT_CONFIG, ...config };
|
|
36
|
+
for (let attempt = 0;; attempt++) {
|
|
37
|
+
try {
|
|
38
|
+
return await fn();
|
|
39
|
+
}
|
|
40
|
+
catch (error) {
|
|
41
|
+
const status = extractStatus(error);
|
|
42
|
+
const isRetryable = (status !== null && cfg.retryableStatuses.has(status)) || isNetworkError(error);
|
|
43
|
+
if (!isRetryable || attempt >= cfg.maxRetries) {
|
|
44
|
+
throw error;
|
|
45
|
+
}
|
|
46
|
+
const retryAfterMs = extractRetryAfter(error);
|
|
47
|
+
const backoff = Math.min(cfg.baseDelayMs * Math.pow(2, attempt) + Math.random() * 1000, cfg.maxDelayMs);
|
|
48
|
+
const delayMs = retryAfterMs ?? backoff;
|
|
49
|
+
if (verbose) {
|
|
50
|
+
process.stderr.write(`Retry ${attempt + 1}/${cfg.maxRetries} after ${Math.round(delayMs)}ms (status ${status})\n`);
|
|
51
|
+
}
|
|
52
|
+
await sleep(delayMs);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
package/dist/repl.js
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
/** Interactive REPL for the phren agent with steering/queue input modes. */
|
|
2
|
+
import * as readline from "node:readline/promises";
|
|
3
|
+
import * as fs from "node:fs";
|
|
4
|
+
import * as path from "node:path";
|
|
5
|
+
import * as os from "node:os";
|
|
6
|
+
import { createSession, runTurn } from "./agent-loop.js";
|
|
7
|
+
import { handleCommand } from "./commands.js";
|
|
8
|
+
const HISTORY_DIR = path.join(os.homedir(), ".phren-agent");
|
|
9
|
+
const HISTORY_FILE = path.join(HISTORY_DIR, "repl-history.txt");
|
|
10
|
+
const SETTINGS_FILE = path.join(HISTORY_DIR, "settings.json");
|
|
11
|
+
const MAX_HISTORY = 500;
|
|
12
|
+
const CYAN = "\x1b[36m";
|
|
13
|
+
const RED = "\x1b[31m";
|
|
14
|
+
const DIM = "\x1b[2m";
|
|
15
|
+
const YELLOW = "\x1b[33m";
|
|
16
|
+
const RESET = "\x1b[0m";
|
|
17
|
+
function loadHistory() {
|
|
18
|
+
try {
|
|
19
|
+
const data = fs.readFileSync(HISTORY_FILE, "utf-8");
|
|
20
|
+
return data.split("\n").filter(Boolean).slice(-MAX_HISTORY);
|
|
21
|
+
}
|
|
22
|
+
catch {
|
|
23
|
+
return [];
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
function saveHistory(lines) {
|
|
27
|
+
try {
|
|
28
|
+
fs.mkdirSync(HISTORY_DIR, { recursive: true });
|
|
29
|
+
fs.writeFileSync(HISTORY_FILE, lines.slice(-MAX_HISTORY).join("\n") + "\n");
|
|
30
|
+
}
|
|
31
|
+
catch { /* ignore */ }
|
|
32
|
+
}
|
|
33
|
+
function loadInputMode() {
|
|
34
|
+
try {
|
|
35
|
+
const data = JSON.parse(fs.readFileSync(SETTINGS_FILE, "utf-8"));
|
|
36
|
+
if (data.inputMode === "queue")
|
|
37
|
+
return "queue";
|
|
38
|
+
}
|
|
39
|
+
catch { /* ignore */ }
|
|
40
|
+
return "steering";
|
|
41
|
+
}
|
|
42
|
+
function saveInputMode(mode) {
|
|
43
|
+
try {
|
|
44
|
+
fs.mkdirSync(HISTORY_DIR, { recursive: true });
|
|
45
|
+
let data = {};
|
|
46
|
+
try {
|
|
47
|
+
data = JSON.parse(fs.readFileSync(SETTINGS_FILE, "utf-8"));
|
|
48
|
+
}
|
|
49
|
+
catch { /* fresh */ }
|
|
50
|
+
data.inputMode = mode;
|
|
51
|
+
fs.writeFileSync(SETTINGS_FILE, JSON.stringify(data, null, 2) + "\n");
|
|
52
|
+
}
|
|
53
|
+
catch { /* ignore */ }
|
|
54
|
+
}
|
|
55
|
+
function savePermissionMode(mode) {
|
|
56
|
+
try {
|
|
57
|
+
fs.mkdirSync(HISTORY_DIR, { recursive: true });
|
|
58
|
+
let data = {};
|
|
59
|
+
try {
|
|
60
|
+
data = JSON.parse(fs.readFileSync(SETTINGS_FILE, "utf-8"));
|
|
61
|
+
}
|
|
62
|
+
catch { /* fresh */ }
|
|
63
|
+
data.permissionMode = mode;
|
|
64
|
+
fs.writeFileSync(SETTINGS_FILE, JSON.stringify(data, null, 2) + "\n");
|
|
65
|
+
}
|
|
66
|
+
catch { /* ignore */ }
|
|
67
|
+
}
|
|
68
|
+
export async function startRepl(config) {
|
|
69
|
+
const contextLimit = config.provider.contextWindow ?? 200_000;
|
|
70
|
+
const session = createSession(contextLimit);
|
|
71
|
+
const history = loadHistory();
|
|
72
|
+
let inputMode = loadInputMode();
|
|
73
|
+
// Queued/steering input buffer — collects input typed while agent is running
|
|
74
|
+
let pendingInput = null;
|
|
75
|
+
let agentRunning = false;
|
|
76
|
+
const rl = readline.createInterface({
|
|
77
|
+
input: process.stdin,
|
|
78
|
+
output: process.stderr,
|
|
79
|
+
prompt: `${CYAN}phren>${RESET} `,
|
|
80
|
+
terminal: process.stdin.isTTY ?? false,
|
|
81
|
+
history,
|
|
82
|
+
historySize: MAX_HISTORY,
|
|
83
|
+
});
|
|
84
|
+
const modeLabel = inputMode === "steering" ? "steering" : "queue";
|
|
85
|
+
process.stderr.write(`${DIM}phren-agent interactive mode (${modeLabel}). Type /help for commands, Ctrl+D to exit.${RESET}\n`);
|
|
86
|
+
rl.prompt();
|
|
87
|
+
const allHistory = [...history];
|
|
88
|
+
for await (const line of rl) {
|
|
89
|
+
const trimmed = line.trim();
|
|
90
|
+
if (!trimmed) {
|
|
91
|
+
rl.prompt();
|
|
92
|
+
continue;
|
|
93
|
+
}
|
|
94
|
+
allHistory.push(trimmed);
|
|
95
|
+
// Handle slash commands
|
|
96
|
+
if (trimmed === "/mode") {
|
|
97
|
+
const newMode = inputMode === "steering" ? "queue" : "steering";
|
|
98
|
+
inputMode = newMode;
|
|
99
|
+
saveInputMode(newMode);
|
|
100
|
+
process.stderr.write(`${YELLOW}Input mode: ${newMode}${RESET}\n`);
|
|
101
|
+
rl.prompt();
|
|
102
|
+
continue;
|
|
103
|
+
}
|
|
104
|
+
if (trimmed.startsWith("/permissions")) {
|
|
105
|
+
const VALID_MODES = ["suggest", "auto-confirm", "full-auto"];
|
|
106
|
+
const arg = trimmed.split(/\s+/)[1];
|
|
107
|
+
if (!arg || !VALID_MODES.includes(arg)) {
|
|
108
|
+
const current = config.registry.permissionConfig.mode;
|
|
109
|
+
process.stderr.write(`${DIM}Permission mode: ${current}${RESET}\n`);
|
|
110
|
+
process.stderr.write(`${DIM}Usage: /permissions <suggest|auto-confirm|full-auto>${RESET}\n`);
|
|
111
|
+
}
|
|
112
|
+
else {
|
|
113
|
+
config.registry.setPermissions({ ...config.registry.permissionConfig, mode: arg });
|
|
114
|
+
savePermissionMode(arg);
|
|
115
|
+
process.stderr.write(`${YELLOW}Permission mode: ${arg}${RESET}\n`);
|
|
116
|
+
}
|
|
117
|
+
rl.prompt();
|
|
118
|
+
continue;
|
|
119
|
+
}
|
|
120
|
+
if (handleCommand(trimmed, { session, contextLimit, undoStack: [] })) {
|
|
121
|
+
rl.prompt();
|
|
122
|
+
continue;
|
|
123
|
+
}
|
|
124
|
+
// If agent is already running, buffer the input
|
|
125
|
+
if (agentRunning) {
|
|
126
|
+
pendingInput = trimmed;
|
|
127
|
+
if (inputMode === "steering") {
|
|
128
|
+
process.stderr.write(`${DIM}↳ steering: "${trimmed.slice(0, 60)}${trimmed.length > 60 ? "..." : ""}" will be injected${RESET}\n`);
|
|
129
|
+
}
|
|
130
|
+
else {
|
|
131
|
+
process.stderr.write(`${DIM}↳ queued: "${trimmed.slice(0, 60)}${trimmed.length > 60 ? "..." : ""}"${RESET}\n`);
|
|
132
|
+
}
|
|
133
|
+
continue;
|
|
134
|
+
}
|
|
135
|
+
agentRunning = true;
|
|
136
|
+
try {
|
|
137
|
+
await runTurn(trimmed, session, config);
|
|
138
|
+
}
|
|
139
|
+
catch (err) {
|
|
140
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
141
|
+
process.stderr.write(`${RED}Error: ${msg}${RESET}\n`);
|
|
142
|
+
}
|
|
143
|
+
agentRunning = false;
|
|
144
|
+
// Process any input that came in while the agent was working
|
|
145
|
+
while (pendingInput !== null) {
|
|
146
|
+
const queued = pendingInput;
|
|
147
|
+
pendingInput = null;
|
|
148
|
+
allHistory.push(queued);
|
|
149
|
+
if (queued.startsWith("/")) {
|
|
150
|
+
if (queued === "/mode") {
|
|
151
|
+
inputMode = inputMode === "steering" ? "queue" : "steering";
|
|
152
|
+
saveInputMode(inputMode);
|
|
153
|
+
process.stderr.write(`${YELLOW}Input mode: ${inputMode}${RESET}\n`);
|
|
154
|
+
}
|
|
155
|
+
else {
|
|
156
|
+
handleCommand(queued, { session, contextLimit, undoStack: [] });
|
|
157
|
+
}
|
|
158
|
+
break;
|
|
159
|
+
}
|
|
160
|
+
agentRunning = true;
|
|
161
|
+
try {
|
|
162
|
+
if (inputMode === "steering") {
|
|
163
|
+
// Steering: inject as a correction/redirect
|
|
164
|
+
process.stderr.write(`${YELLOW}↳ steering with: ${queued.slice(0, 80)}${RESET}\n`);
|
|
165
|
+
}
|
|
166
|
+
await runTurn(queued, session, config);
|
|
167
|
+
}
|
|
168
|
+
catch (err) {
|
|
169
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
170
|
+
process.stderr.write(`${RED}Error: ${msg}${RESET}\n`);
|
|
171
|
+
}
|
|
172
|
+
agentRunning = false;
|
|
173
|
+
}
|
|
174
|
+
rl.prompt();
|
|
175
|
+
}
|
|
176
|
+
// EOF (Ctrl+D) — clean exit
|
|
177
|
+
saveHistory(allHistory);
|
|
178
|
+
process.stderr.write(`\n${DIM}Session ended. ${session.turns} turns, ${session.toolCalls} tool calls.${RESET}\n`);
|
|
179
|
+
return session;
|
|
180
|
+
}
|
package/dist/spinner.js
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/** TTY spinner + formatting helpers for the agent REPL. */
|
|
2
|
+
const FRAMES = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"];
|
|
3
|
+
const INTERVAL = 80;
|
|
4
|
+
export function createSpinner() {
|
|
5
|
+
const isTTY = process.stderr.isTTY;
|
|
6
|
+
let timer = null;
|
|
7
|
+
let frame = 0;
|
|
8
|
+
let text = "";
|
|
9
|
+
function render() {
|
|
10
|
+
process.stderr.write(`\r\x1b[2K\x1b[90m${FRAMES[frame]} ${text}\x1b[0m`);
|
|
11
|
+
frame = (frame + 1) % FRAMES.length;
|
|
12
|
+
}
|
|
13
|
+
return {
|
|
14
|
+
start(t) {
|
|
15
|
+
if (!isTTY)
|
|
16
|
+
return;
|
|
17
|
+
text = t;
|
|
18
|
+
frame = 0;
|
|
19
|
+
if (timer)
|
|
20
|
+
clearInterval(timer);
|
|
21
|
+
render();
|
|
22
|
+
timer = setInterval(render, INTERVAL);
|
|
23
|
+
},
|
|
24
|
+
update(t) {
|
|
25
|
+
text = t;
|
|
26
|
+
},
|
|
27
|
+
stop() {
|
|
28
|
+
if (timer) {
|
|
29
|
+
clearInterval(timer);
|
|
30
|
+
timer = null;
|
|
31
|
+
}
|
|
32
|
+
if (isTTY)
|
|
33
|
+
process.stderr.write("\r\x1b[2K");
|
|
34
|
+
},
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
/** Format a turn header for REPL output. */
|
|
38
|
+
export function formatTurnHeader(turn, toolCalls) {
|
|
39
|
+
return `\x1b[90m--- turn ${turn} (${toolCalls} tool call${toolCalls !== 1 ? "s" : ""}) ---\x1b[0m`;
|
|
40
|
+
}
|
|
41
|
+
/** Format a tool call for display: name + truncated input preview. */
|
|
42
|
+
export function formatToolCall(name, input) {
|
|
43
|
+
const raw = JSON.stringify(input);
|
|
44
|
+
const preview = raw.length > 100 ? raw.slice(0, 100) + "..." : raw;
|
|
45
|
+
return `\x1b[2m ${name}(${preview})\x1b[0m`;
|
|
46
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
export function buildSystemPrompt(phrenContext, priorSummary) {
|
|
2
|
+
const parts = [
|
|
3
|
+
`You are phren-agent, a coding assistant with persistent memory powered by phren. You retain knowledge across sessions — past decisions, discovered patterns, and project context are all searchable. Use this memory to avoid repeating mistakes and to build on prior work.`,
|
|
4
|
+
"",
|
|
5
|
+
"## Workflow",
|
|
6
|
+
"1. **Orient** — Before starting, search phren for relevant findings (`phren_search`) and check active tasks (`phren_get_tasks`). Past sessions may have context that saves time.",
|
|
7
|
+
"2. **Read** — Read the relevant code before modifying it. Use `glob` to find files, `grep` to locate symbols, `read_file` to understand context.",
|
|
8
|
+
"3. **Change** — Make targeted edits. Use `edit_file` for surgical changes; reserve `write_file` for new files. Don't refactor code you weren't asked to touch.",
|
|
9
|
+
"4. **Verify** — Run tests and linters via `shell` after edits. Check `git_diff` to review your changes.",
|
|
10
|
+
"5. **Remember** — Save non-obvious discoveries with `phren_add_finding`: tricky bugs, architecture decisions, gotchas, workarounds. Skip obvious things — only save what would help a future session.",
|
|
11
|
+
"6. **Report** — Explain what you did concisely. Mention files changed and why.",
|
|
12
|
+
"",
|
|
13
|
+
"## Memory",
|
|
14
|
+
"- `phren_search` finds past findings, reference docs, and project context. Search before asking the user for context they may have already provided.",
|
|
15
|
+
"- `phren_add_finding` saves insights for future sessions. Good findings: non-obvious patterns, decisions with rationale, error resolutions, architecture constraints. Bad findings: narration of what you did, obvious facts, secrets.",
|
|
16
|
+
"- `phren_get_tasks` shows tracked work items. Complete tasks with `phren_complete_task` when done.",
|
|
17
|
+
"",
|
|
18
|
+
"## Rules",
|
|
19
|
+
"- Never write secrets, API keys, or PII to files or findings.",
|
|
20
|
+
"- Prefer `edit_file` over `write_file` for existing files.",
|
|
21
|
+
"- Keep shell commands safe. No `rm -rf`, no `sudo`, no destructive operations.",
|
|
22
|
+
"- If unsure, say so. Don't guess at behavior you can verify by reading code or running tests.",
|
|
23
|
+
];
|
|
24
|
+
if (priorSummary) {
|
|
25
|
+
parts.push("", `## Last session\n${priorSummary}`);
|
|
26
|
+
}
|
|
27
|
+
if (phrenContext) {
|
|
28
|
+
parts.push("", phrenContext);
|
|
29
|
+
}
|
|
30
|
+
return parts.join("\n");
|
|
31
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import { encodeDiffPayload } from "../multi/diff-renderer.js";
|
|
3
|
+
export const editFileTool = {
|
|
4
|
+
name: "edit_file",
|
|
5
|
+
description: "Edit a file by replacing an exact string match. Preferred over write_file for modifying existing files — only changes what's needed. The old_string must appear exactly once; include surrounding lines for uniqueness if needed.",
|
|
6
|
+
input_schema: {
|
|
7
|
+
type: "object",
|
|
8
|
+
properties: {
|
|
9
|
+
path: { type: "string", description: "File path to edit." },
|
|
10
|
+
old_string: { type: "string", description: "Exact string to find and replace." },
|
|
11
|
+
new_string: { type: "string", description: "Replacement string." },
|
|
12
|
+
},
|
|
13
|
+
required: ["path", "old_string", "new_string"],
|
|
14
|
+
},
|
|
15
|
+
async execute(input) {
|
|
16
|
+
const filePath = input.path;
|
|
17
|
+
const oldStr = input.old_string;
|
|
18
|
+
const newStr = input.new_string;
|
|
19
|
+
if (!fs.existsSync(filePath))
|
|
20
|
+
return { output: `File not found: ${filePath}`, is_error: true };
|
|
21
|
+
const oldContent = fs.readFileSync(filePath, "utf-8");
|
|
22
|
+
const count = oldContent.split(oldStr).length - 1;
|
|
23
|
+
if (count === 0)
|
|
24
|
+
return { output: "old_string not found in file.", is_error: true };
|
|
25
|
+
if (count > 1)
|
|
26
|
+
return { output: `old_string found ${count} times — must be unique. Provide more context.`, is_error: true };
|
|
27
|
+
const newContent = oldContent.replace(oldStr, newStr);
|
|
28
|
+
fs.writeFileSync(filePath, newContent);
|
|
29
|
+
return { output: `Edited ${filePath}${encodeDiffPayload(filePath, oldContent, newContent)}` };
|
|
30
|
+
},
|
|
31
|
+
};
|