@strayl/agent 0.1.3 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/package.json +5 -1
  2. package/skills/api-creation/SKILL.md +631 -0
  3. package/skills/authentication/SKILL.md +294 -0
  4. package/skills/frontend-design/SKILL.md +108 -0
  5. package/skills/landing-creation/SKILL.md +125 -0
  6. package/skills/reference/SKILL.md +149 -0
  7. package/skills/web-application-creation/SKILL.md +231 -0
  8. package/src/agent.ts +0 -465
  9. package/src/checkpoints/manager.ts +0 -112
  10. package/src/context/manager.ts +0 -185
  11. package/src/context/summarizer.ts +0 -104
  12. package/src/context/trim.ts +0 -55
  13. package/src/emitter.ts +0 -14
  14. package/src/hitl/manager.ts +0 -77
  15. package/src/hitl/transport.ts +0 -13
  16. package/src/index.ts +0 -116
  17. package/src/llm/client.ts +0 -276
  18. package/src/llm/gemini-native.ts +0 -307
  19. package/src/llm/models.ts +0 -64
  20. package/src/middleware/compose.ts +0 -24
  21. package/src/middleware/credential-scrubbing.ts +0 -31
  22. package/src/middleware/forbidden-packages.ts +0 -107
  23. package/src/middleware/plan-mode.ts +0 -143
  24. package/src/middleware/prompt-caching.ts +0 -21
  25. package/src/middleware/tool-compression.ts +0 -25
  26. package/src/middleware/tool-filter.ts +0 -13
  27. package/src/prompts/implementation-mode.md +0 -16
  28. package/src/prompts/plan-mode.md +0 -51
  29. package/src/prompts/system.ts +0 -173
  30. package/src/skills/loader.ts +0 -53
  31. package/src/stdin-listener.ts +0 -61
  32. package/src/subagents/definitions.ts +0 -72
  33. package/src/subagents/manager.ts +0 -161
  34. package/src/todos/manager.ts +0 -61
  35. package/src/tools/builtin/delete.ts +0 -29
  36. package/src/tools/builtin/edit.ts +0 -74
  37. package/src/tools/builtin/exec.ts +0 -216
  38. package/src/tools/builtin/glob.ts +0 -104
  39. package/src/tools/builtin/grep.ts +0 -115
  40. package/src/tools/builtin/ls.ts +0 -54
  41. package/src/tools/builtin/move.ts +0 -31
  42. package/src/tools/builtin/read.ts +0 -69
  43. package/src/tools/builtin/write.ts +0 -42
  44. package/src/tools/executor.ts +0 -51
  45. package/src/tools/external/database.ts +0 -285
  46. package/src/tools/external/enter-plan-mode.ts +0 -34
  47. package/src/tools/external/generate-image.ts +0 -110
  48. package/src/tools/external/hitl-tools.ts +0 -118
  49. package/src/tools/external/preview.ts +0 -28
  50. package/src/tools/external/proxy-fetch.ts +0 -51
  51. package/src/tools/external/task.ts +0 -38
  52. package/src/tools/external/wait.ts +0 -20
  53. package/src/tools/external/web-fetch.ts +0 -57
  54. package/src/tools/external/web-search.ts +0 -61
  55. package/src/tools/registry.ts +0 -36
  56. package/src/tools/zod-to-json-schema.ts +0 -86
  57. package/src/types.ts +0 -151
@@ -1,112 +0,0 @@
1
- import fs from "node:fs/promises";
2
- import path from "node:path";
3
- import type { Message, TodoItem, Usage, CheckpointData } from "../types.js";
4
- import type { Emitter } from "../emitter.js";
5
-
6
- export type Checkpoint = CheckpointData;
7
-
8
- export class CheckpointManager {
9
- private dir: string;
10
- private checkpoints: Map<string, Checkpoint> = new Map();
11
-
12
- constructor(workDir: string, sessionId: string) {
13
- this.dir = path.join(workDir, ".strayl", "checkpoints", sessionId);
14
- }
15
-
16
- async init(): Promise<void> {
17
- await fs.mkdir(this.dir, { recursive: true });
18
-
19
- // Load existing checkpoints from disk
20
- try {
21
- const files = await fs.readdir(this.dir);
22
- for (const file of files) {
23
- if (!file.endsWith(".json")) continue;
24
- try {
25
- const content = await fs.readFile(path.join(this.dir, file), "utf-8");
26
- const cp = JSON.parse(content) as Checkpoint;
27
- this.checkpoints.set(cp.id, cp);
28
- } catch {
29
- // Corrupted checkpoint — skip
30
- }
31
- }
32
- } catch {
33
- // No checkpoints dir yet — fine
34
- }
35
- }
36
-
37
- async save(
38
- iteration: number,
39
- messages: Message[],
40
- todos: TodoItem[],
41
- usage: Usage,
42
- emitter: Emitter,
43
- ): Promise<string> {
44
- const id = `cp_${iteration}_${Date.now()}`;
45
- const checkpoint: Checkpoint = {
46
- id,
47
- iteration,
48
- timestamp: Date.now(),
49
- messages: structuredClone(messages),
50
- todos: structuredClone(todos),
51
- usage: { ...usage },
52
- };
53
-
54
- this.checkpoints.set(id, checkpoint);
55
-
56
- // Write atomically: tmp → rename
57
- const tmpPath = path.join(this.dir, `${id}.tmp`);
58
- const finalPath = path.join(this.dir, `${id}.json`);
59
- await fs.writeFile(tmpPath, JSON.stringify(checkpoint));
60
- await fs.rename(tmpPath, finalPath);
61
-
62
- emitter.emit({ type: "checkpoint-saved", id, iteration, checkpoint });
63
-
64
- // Keep only last 50 checkpoints on disk
65
- await this.pruneOld(50);
66
-
67
- return id;
68
- }
69
-
70
- get(id: string): Checkpoint | undefined {
71
- return this.checkpoints.get(id);
72
- }
73
-
74
- /** Get the most recent checkpoint at or before the given iteration */
75
- getByIteration(iteration: number): Checkpoint | undefined {
76
- let best: Checkpoint | undefined;
77
- for (const cp of this.checkpoints.values()) {
78
- if (cp.iteration <= iteration) {
79
- if (!best || cp.iteration > best.iteration) {
80
- best = cp;
81
- }
82
- }
83
- }
84
- return best;
85
- }
86
-
87
- /** Get the latest checkpoint */
88
- latest(): Checkpoint | undefined {
89
- let best: Checkpoint | undefined;
90
- for (const cp of this.checkpoints.values()) {
91
- if (!best || cp.timestamp > best.timestamp) {
92
- best = cp;
93
- }
94
- }
95
- return best;
96
- }
97
-
98
- list(): Checkpoint[] {
99
- return [...this.checkpoints.values()].sort((a, b) => a.iteration - b.iteration);
100
- }
101
-
102
- private async pruneOld(keep: number): Promise<void> {
103
- const sorted = this.list();
104
- if (sorted.length <= keep) return;
105
-
106
- const toRemove = sorted.slice(0, sorted.length - keep);
107
- for (const cp of toRemove) {
108
- this.checkpoints.delete(cp.id);
109
- await fs.unlink(path.join(this.dir, `${cp.id}.json`)).catch(() => {});
110
- }
111
- }
112
- }
@@ -1,185 +0,0 @@
1
- import type { Message, Usage, ContentPart } from "../types.js";
2
- import type { Emitter } from "../emitter.js";
3
- import type { LLMClient } from "../llm/client.js";
4
- import { summarizeMessages } from "./summarizer.js";
5
- import { trimContext } from "./trim.js";
6
-
7
- export class ContextManager {
8
- private msgs: Message[] = [];
9
- private totalUsageData: Usage = { input_tokens: 0, output_tokens: 0, cost: 0 };
10
- private peakInput = 0;
11
- private existingSummary: string | null;
12
- private pendingSummary: Promise<string> | null = null;
13
- readonly maxInputTokens: number;
14
- private summarizationThreshold: number;
15
- private preSummarizationRatio: number;
16
-
17
- constructor(config: {
18
- maxInputTokens: number;
19
- summarizationThreshold?: number;
20
- preSummarizationRatio?: number;
21
- previousSummary?: string;
22
- }) {
23
- this.maxInputTokens = config.maxInputTokens;
24
- // Pre-summarize at 65% (background, non-blocking — best UX)
25
- this.preSummarizationRatio = config.preSummarizationRatio ?? 0.65;
26
- // Hard summarize at 83% (Claude Code threshold — leaves room for completion)
27
- this.summarizationThreshold = config.summarizationThreshold ?? Math.floor(config.maxInputTokens * 0.83);
28
- this.existingSummary = config.previousSummary ?? null;
29
- }
30
-
31
- addSystem(content: string): void {
32
- this.msgs.push({ role: "system", content });
33
- }
34
-
35
- addUser(content: string, images?: Array<{ base64: string; contentType: string }>): void {
36
- if (images && images.length > 0) {
37
- const parts: ContentPart[] = [{ type: "text", text: content }];
38
- for (const img of images) {
39
- parts.push({
40
- type: "image_url",
41
- image_url: { url: `data:${img.contentType};base64,${img.base64}` },
42
- });
43
- }
44
- this.msgs.push({ role: "user", content: parts });
45
- } else {
46
- this.msgs.push({ role: "user", content });
47
- }
48
- }
49
-
50
- addAssistant(text: string, toolCalls?: import("../types.js").ToolCall[]): void {
51
- const msg: Message = { role: "assistant", content: text || null };
52
- if (toolCalls && toolCalls.length > 0) {
53
- msg.tool_calls = toolCalls;
54
- }
55
- this.msgs.push(msg);
56
- }
57
-
58
- addToolResult(toolCallId: string, toolName: string, result: string): void {
59
- this.msgs.push({
60
- role: "tool",
61
- content: result,
62
- tool_call_id: toolCallId,
63
- name: toolName,
64
- });
65
- }
66
-
67
- messages(): Message[] {
68
- return [...this.msgs];
69
- }
70
-
71
- /** Replace messages (used by checkpoint restore) */
72
- restoreMessages(msgs: Message[]): void {
73
- this.msgs = structuredClone(msgs);
74
- this.pendingSummary = null;
75
- }
76
-
77
- recordUsage(usage: { input_tokens: number; output_tokens: number; cost?: number }): void {
78
- this.totalUsageData.input_tokens += usage.input_tokens;
79
- this.totalUsageData.output_tokens += usage.output_tokens;
80
- if (usage.cost) this.totalUsageData.cost = (this.totalUsageData.cost ?? 0) + usage.cost;
81
- if (usage.input_tokens > this.peakInput) this.peakInput = usage.input_tokens;
82
- }
83
-
84
- totalUsage(): Usage {
85
- return { ...this.totalUsageData };
86
- }
87
-
88
- peakInputTokens(): number {
89
- return this.peakInput;
90
- }
91
-
92
- // ~4 chars per token estimation
93
- estimateTokens(): number {
94
- let total = 0;
95
- for (const msg of this.msgs) {
96
- const content = typeof msg.content === "string"
97
- ? msg.content
98
- : JSON.stringify(msg.content ?? "");
99
- total += Math.ceil(content.length / 4);
100
- if (msg.tool_calls) total += Math.ceil(JSON.stringify(msg.tool_calls).length / 4);
101
- }
102
- return total;
103
- }
104
-
105
- shouldPreSummarize(): boolean {
106
- return this.estimateTokens() > this.maxInputTokens * this.preSummarizationRatio;
107
- }
108
-
109
- shouldSummarize(): boolean {
110
- return this.estimateTokens() > this.summarizationThreshold;
111
- }
112
-
113
- maybeTriggerPreSummarization(client: LLMClient, emitter: Emitter): void {
114
- if (!this.shouldPreSummarize() || this.pendingSummary) return;
115
- this.pendingSummary = this.createSummary(client).catch(() => "");
116
- }
117
-
118
- async applyPendingSummary(emitter: Emitter): Promise<boolean> {
119
- if (!this.pendingSummary) return false;
120
- try {
121
- const summary = await this.pendingSummary;
122
- this.pendingSummary = null;
123
- if (summary) {
124
- this.replaceSummarizedMessages(summary, emitter);
125
- return true;
126
- }
127
- return false;
128
- } catch {
129
- this.pendingSummary = null;
130
- return false;
131
- }
132
- }
133
-
134
- async summarize(client: LLMClient, emitter: Emitter): Promise<void> {
135
- try {
136
- const summary = await this.createSummary(client);
137
- if (summary) {
138
- this.replaceSummarizedMessages(summary, emitter);
139
- }
140
- } catch {
141
- // Summarization is optimization, not critical
142
- }
143
- }
144
-
145
- private async createSummary(client: LLMClient): Promise<string> {
146
- return summarizeMessages(this.msgs, this.existingSummary, client);
147
- }
148
-
149
- private replaceSummarizedMessages(summary: string, emitter: Emitter): void {
150
- const KEEP = 8;
151
- if (this.msgs.length <= KEEP + 2) return; // Not enough to summarize
152
-
153
- // Keep system messages (first 1-2) and last KEEP messages
154
- const systemMsgs = this.msgs.filter(m => m.role === "system");
155
- const recentMsgs = this.msgs.slice(-KEEP);
156
-
157
- // Don't break tool call pairs
158
- let startIdx = this.msgs.length - KEEP;
159
- while (startIdx > systemMsgs.length && this.msgs[startIdx]?.role === "tool") {
160
- startIdx--;
161
- }
162
- const keptRecent = this.msgs.slice(startIdx);
163
-
164
- const removedCount = this.msgs.length - systemMsgs.length - keptRecent.length;
165
-
166
- this.existingSummary = summary;
167
- this.msgs = [
168
- ...systemMsgs,
169
- { role: "user", content: `[Previous conversation summary]\n${summary}` },
170
- { role: "assistant", content: "I understand the context from the summary. Let me continue." },
171
- ...keptRecent,
172
- ];
173
-
174
- emitter.emit({
175
- type: "summarized",
176
- removed_msgs: removedCount,
177
- kept_msgs: keptRecent.length,
178
- summary_chars: summary.length,
179
- });
180
- }
181
-
182
- applyTrim(): void {
183
- this.msgs = trimContext(this.msgs, this.maxInputTokens);
184
- }
185
- }
@@ -1,104 +0,0 @@
1
- import type { Message } from "../types.js";
2
- import type { LLMClient } from "../llm/client.js";
3
- import { SUMMARIZATION_MODEL } from "../llm/models.js";
4
- import { LLMClient as LLMClientClass } from "../llm/client.js";
5
-
6
- const SUMMARIZATION_KEEP_MESSAGES = 20;
7
- const SUMMARIZATION_MAX_CHARS = 200_000;
8
- const MSG_TRUNCATE_THRESHOLD = 4000;
9
- const MSG_TRUNCATE_HEAD = 2000;
10
- const MSG_TRUNCATE_TAIL = 1500;
11
-
12
- const FRESH_SUMMARY_PROMPT = `You are a conversation summarizer for a coding agent.
13
- Extract the essential context so the agent can continue working without the original messages.
14
-
15
- You MUST include:
16
- - **Task**: What is being built/fixed and why
17
- - **Decisions**: Key architectural and implementation decisions with rationale
18
- - **Files changed**: Every file created, modified, or deleted (full relative paths)
19
- - **Current state**: What is done, what is in progress, what remains
20
- - **Unresolved issues**: Bugs, errors, or blockers not yet fixed
21
- - **Environment**: Packages installed, APIs configured, env vars set
22
-
23
- Be concise but maximize recall — losing a detail here means the agent loses it forever.
24
- Use bullet points. Do NOT include raw command output or tool call arguments.`;
25
-
26
- const INCREMENTAL_SUMMARY_PROMPT = `You are updating an existing conversation summary for a coding agent.
27
-
28
- Existing summary:
29
- {existing_summary}
30
-
31
- Incorporate the new messages below:
32
- - Add new files changed (full relative paths), decisions, and progress
33
- - Update current state to reflect the latest status
34
- - Remove information that has been superseded
35
- - Keep unresolved issues until they are explicitly fixed
36
- - Be concise but maximize recall — lost details cannot be recovered
37
-
38
- Output the complete updated summary, not a diff.`;
39
-
40
- export async function summarizeMessages(
41
- messages: Message[],
42
- existingSummary: string | null,
43
- _parentClient: LLMClient,
44
- ): Promise<string> {
45
- // Create a dedicated client for summarization using a cheap model
46
- const sumClient = new LLMClientClass({
47
- modelTier: SUMMARIZATION_MODEL,
48
- env: process.env as Record<string, string>,
49
- });
50
-
51
- // Format messages for summarization
52
- const formatted = formatMessagesForSummary(messages);
53
-
54
- const systemPrompt = existingSummary
55
- ? INCREMENTAL_SUMMARY_PROMPT.replace("{existing_summary}", existingSummary)
56
- : FRESH_SUMMARY_PROMPT;
57
-
58
- const sumMessages: Message[] = [
59
- { role: "system", content: systemPrompt },
60
- { role: "user", content: formatted },
61
- ];
62
-
63
- // Collect response (no streaming needed)
64
- let result = "";
65
- for await (const chunk of sumClient.stream(sumMessages, [])) {
66
- if (chunk.type === "text") result += chunk.text;
67
- }
68
-
69
- return result;
70
- }
71
-
72
- function formatMessagesForSummary(messages: Message[]): string {
73
- // Skip system messages and previous summary messages
74
- const relevant = messages.filter(
75
- m => m.role !== "system" && !(typeof m.content === "string" && m.content.startsWith("[Previous conversation summary]")),
76
- );
77
-
78
- // Take last N messages
79
- const toSummarize = relevant.slice(-SUMMARIZATION_KEEP_MESSAGES);
80
-
81
- let totalChars = 0;
82
- const parts: string[] = [];
83
-
84
- for (const msg of toSummarize) {
85
- let content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content ?? "");
86
-
87
- // Truncate long messages
88
- if (content.length > MSG_TRUNCATE_THRESHOLD) {
89
- content = content.slice(0, MSG_TRUNCATE_HEAD) +
90
- `\n[... ${content.length - MSG_TRUNCATE_HEAD - MSG_TRUNCATE_TAIL} chars truncated ...]\n` +
91
- content.slice(-MSG_TRUNCATE_TAIL);
92
- }
93
-
94
- const label = msg.role === "tool" ? `tool(${msg.name})` : msg.role;
95
- const line = `[${label}]: ${content}`;
96
-
97
- totalChars += line.length;
98
- if (totalChars > SUMMARIZATION_MAX_CHARS) break;
99
-
100
- parts.push(line);
101
- }
102
-
103
- return parts.join("\n\n");
104
- }
@@ -1,55 +0,0 @@
1
- import type { Message } from "../types.js";
2
-
3
- export function trimContext(messages: Message[], maxInputTokens: number): Message[] {
4
- const estimateTokens = (msgs: Message[]): number => {
5
- let total = 0;
6
- for (const msg of msgs) {
7
- const content = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content ?? "");
8
- total += Math.ceil(content.length / 4);
9
- if (msg.tool_calls) total += Math.ceil(JSON.stringify(msg.tool_calls).length / 4);
10
- }
11
- return total;
12
- };
13
-
14
- const currentTokens = estimateTokens(messages);
15
- if (currentTokens <= maxInputTokens) return messages;
16
-
17
- // Keep first 2 messages (system prompt)
18
- const kept: Message[] = messages.slice(0, 2);
19
- const budget = maxInputTokens - estimateTokens(kept) - 100; // 100 token margin for trim notice
20
-
21
- // Walk backwards from end, adding messages until budget is exhausted
22
- const recent: Message[] = [];
23
- let recentTokens = 0;
24
-
25
- for (let i = messages.length - 1; i >= 2; i--) {
26
- const msg = messages[i];
27
- const msgTokens = Math.ceil(
28
- (typeof msg.content === "string" ? msg.content.length : JSON.stringify(msg.content ?? "").length) / 4,
29
- ) + (msg.tool_calls ? Math.ceil(JSON.stringify(msg.tool_calls).length / 4) : 0);
30
-
31
- if (recentTokens + msgTokens > budget) break;
32
- recent.unshift(msg);
33
- recentTokens += msgTokens;
34
- }
35
-
36
- // Don't start with a tool message (breaks assistant↔tool pairing)
37
- while (recent.length > 0 && recent[0].role === "tool") {
38
- recent.shift();
39
- }
40
-
41
- const removedCount = messages.length - kept.length - recent.length;
42
-
43
- if (removedCount > 0) {
44
- kept.push({
45
- role: "user",
46
- content: `[Context trimmed: ${removedCount} older messages removed to fit within context window]`,
47
- });
48
- kept.push({
49
- role: "assistant",
50
- content: "Understood, I'll continue with the available context.",
51
- });
52
- }
53
-
54
- return [...kept, ...recent];
55
- }
package/src/emitter.ts DELETED
@@ -1,14 +0,0 @@
1
- import type { AgentEvent } from "./types.js";
2
-
3
- export class Emitter {
4
- private seq = 0;
5
-
6
- emit(event: AgentEvent): void {
7
- this.seq++;
8
- process.stdout.write(JSON.stringify({ ...event, _seq: this.seq }) + "\n");
9
- }
10
-
11
- currentSeq(): number {
12
- return this.seq;
13
- }
14
- }
@@ -1,77 +0,0 @@
1
- import fs from "node:fs/promises";
2
- import path from "node:path";
3
- import crypto from "node:crypto";
4
-
5
- /** Convert arbitrary tool call ID into a filesystem-safe filename */
6
- function safeFileName(id: string): string {
7
- // If ID is short and clean, use it directly
8
- if (/^[a-zA-Z0-9_-]{1,100}$/.test(id)) return id;
9
- // Otherwise hash it — Gemini IDs contain | and base64 chars that break shells/filesystems
10
- return crypto.createHash("sha256").update(id).digest("hex").slice(0, 32);
11
- }
12
-
13
- export class HITLManager {
14
- private dir: string;
15
- // Map original ID → safe filename (so proxy can look up by either)
16
- private idMap = new Map<string, string>();
17
-
18
- constructor(hitlDir?: string) {
19
- this.dir = hitlDir ?? "/tmp/hitl";
20
- }
21
-
22
- async init(): Promise<void> {
23
- await fs.mkdir(this.dir, { recursive: true });
24
- }
25
-
26
- /** Get the safe filename for a tool call ID */
27
- safeId(id: string): string {
28
- const cached = this.idMap.get(id);
29
- if (cached) return cached;
30
- const safe = safeFileName(id);
31
- this.idMap.set(id, safe);
32
- return safe;
33
- }
34
-
35
- async waitForResponse(id: string): Promise<{ decision: string; data?: unknown }> {
36
- const safe = this.safeId(id);
37
- const filePath = path.join(this.dir, `${safe}.json`);
38
- const maxWait = 30 * 60 * 1000; // 30 minutes
39
- const start = Date.now();
40
-
41
- while (Date.now() - start < maxWait) {
42
- if (await this.isCancelled()) {
43
- return { decision: "reject" };
44
- }
45
-
46
- try {
47
- const content = await fs.readFile(filePath, "utf-8");
48
- const response = JSON.parse(content);
49
- await fs.unlink(filePath).catch(() => {});
50
- return response;
51
- } catch {
52
- // File doesn't exist yet
53
- }
54
-
55
- await new Promise(r => setTimeout(r, 500));
56
- }
57
-
58
- return { decision: "reject" }; // Timeout
59
- }
60
-
61
- async writeResponse(id: string, response: { decision: string; data?: unknown }): Promise<void> {
62
- const safe = this.safeId(id);
63
- const tmpPath = path.join(this.dir, `${safe}.tmp`);
64
- const finalPath = path.join(this.dir, `${safe}.json`);
65
- await fs.writeFile(tmpPath, JSON.stringify(response));
66
- await fs.rename(tmpPath, finalPath);
67
- }
68
-
69
- async isCancelled(): Promise<boolean> {
70
- try {
71
- await fs.access(path.join(this.dir, "_cancel"));
72
- return true;
73
- } catch {
74
- return false;
75
- }
76
- }
77
- }
@@ -1,13 +0,0 @@
1
- import fs from "node:fs/promises";
2
- import path from "node:path";
3
-
4
- export async function writeHITLResponse(
5
- dir: string,
6
- id: string,
7
- response: unknown,
8
- ): Promise<void> {
9
- const tmpPath = path.join(dir, `${id}.tmp`);
10
- const finalPath = path.join(dir, `${id}.json`);
11
- await fs.writeFile(tmpPath, JSON.stringify(response));
12
- await fs.rename(tmpPath, finalPath);
13
- }
package/src/index.ts DELETED
@@ -1,116 +0,0 @@
1
- #!/usr/bin/env node
2
- import { parseArgs } from "node:util";
3
- import fs from "node:fs/promises";
4
- import path from "node:path";
5
- import { runAgent } from "./agent.js";
6
-
7
- const { values } = parseArgs({
8
- options: {
9
- model: { type: "string", default: "pro" },
10
- mode: { type: "string", default: "normal" },
11
- "session-id": { type: "string" },
12
- prompt: { type: "string" },
13
- image: { type: "string", multiple: true },
14
- "blocked-tools": { type: "string", default: "" },
15
- "extra-prompt-file": { type: "string" },
16
- "hitl-dir": { type: "string", default: "/tmp/hitl" },
17
- "skills-dir": { type: "string", default: "./skills" },
18
- "previous-summary": { type: "string" },
19
- "max-iterations": { type: "string", default: "200" },
20
- "work-dir": { type: "string" },
21
- "restore-checkpoint": { type: "string" },
22
- },
23
- });
24
-
25
- // Session ID
26
- const sessionId = values["session-id"] ?? crypto.randomUUID();
27
-
28
- // Read user message from --prompt only (stdin is reserved for live commands)
29
- const userMessage = values.prompt;
30
-
31
- if (!userMessage) {
32
- process.stderr.write("No user message provided. Use --prompt flag.\n");
33
- process.exit(1);
34
- }
35
-
36
- // Load images if specified
37
- const MIME_TYPES: Record<string, string> = {
38
- ".png": "image/png",
39
- ".jpg": "image/jpeg",
40
- ".jpeg": "image/jpeg",
41
- ".gif": "image/gif",
42
- ".webp": "image/webp",
43
- ".svg": "image/svg+xml",
44
- ".bmp": "image/bmp",
45
- };
46
-
47
- const images: Array<{ base64: string; contentType: string }> = [];
48
- if (values.image) {
49
- for (const imgPath of values.image) {
50
- try {
51
- const resolved = path.resolve(imgPath);
52
- const buffer = await fs.readFile(resolved);
53
- const ext = path.extname(resolved).toLowerCase();
54
- const contentType = MIME_TYPES[ext] || "image/png";
55
- images.push({ base64: buffer.toString("base64"), contentType });
56
- } catch {
57
- process.stderr.write(`Failed to read image: ${imgPath}\n`);
58
- process.exit(1);
59
- }
60
- }
61
- }
62
-
63
- // Load checkpoint if specified
64
- let restoreCheckpoint: import("./types.js").CheckpointData | undefined;
65
- if (values["restore-checkpoint"]) {
66
- try {
67
- const cpContent = await fs.readFile(values["restore-checkpoint"], "utf-8");
68
- restoreCheckpoint = JSON.parse(cpContent);
69
- } catch {
70
- process.stderr.write(`Failed to read checkpoint file: ${values["restore-checkpoint"]}\n`);
71
- process.exit(1);
72
- }
73
- }
74
-
75
- // Load extra prompt file if specified
76
- let systemPromptExtra: string | undefined;
77
- if (values["extra-prompt-file"]) {
78
- try {
79
- systemPromptExtra = await fs.readFile(values["extra-prompt-file"], "utf-8");
80
- } catch (e) {
81
- process.stderr.write(`Failed to read extra prompt file: ${values["extra-prompt-file"]}\n`);
82
- process.exit(1);
83
- }
84
- }
85
-
86
- // Validate mode
87
- const mode = values.mode as "normal" | "plan" | "implement";
88
- if (!["normal", "plan", "implement"].includes(mode)) {
89
- process.stderr.write(`Invalid mode: ${values.mode}. Use normal, plan, or implement.\n`);
90
- process.exit(1);
91
- }
92
-
93
- try {
94
- await runAgent({
95
- model: values.model!,
96
- mode,
97
- userMessage,
98
- sessionId,
99
- workDir: values["work-dir"] ?? process.cwd(),
100
- env: process.env as Record<string, string>,
101
- systemPromptExtra,
102
- blockedTools: values["blocked-tools"]?.split(",").filter(Boolean),
103
- maxIterations: parseInt(values["max-iterations"]!),
104
- hitlDir: values["hitl-dir"],
105
- skillsDir: values["skills-dir"],
106
- previousSummary: values["previous-summary"],
107
- restoreCheckpoint,
108
- images: images.length > 0 ? images : undefined,
109
- });
110
- } catch (e) {
111
- const msg = e instanceof Error ? e.message : String(e);
112
- process.stderr.write(`Agent error: ${msg}\n`);
113
- process.exit(1);
114
- }
115
-
116
- process.exit(0);