@mindstudio-ai/remy 0.1.34 → 0.1.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/dist/headless.js +998 -586
  2. package/dist/index.js +1086 -584
  3. package/dist/prompt/compiled/media-cdn.md +1 -1
  4. package/dist/prompt/sources/llms.txt +1618 -0
  5. package/dist/prompt/static/instructions.md +1 -1
  6. package/dist/prompt/static/team.md +1 -1
  7. package/dist/subagents/.notes-background-agents.md +60 -48
  8. package/dist/subagents/browserAutomation/prompt.md +14 -11
  9. package/dist/subagents/designExpert/data/sources/dev/index.html +901 -0
  10. package/dist/subagents/designExpert/data/sources/dev/serve.mjs +244 -0
  11. package/dist/subagents/designExpert/data/sources/dev/specimens-fonts.html +126 -0
  12. package/dist/subagents/designExpert/data/sources/dev/specimens-pairings.html +114 -0
  13. package/dist/subagents/designExpert/data/{fonts.json → sources/fonts.json} +0 -97
  14. package/dist/subagents/designExpert/data/sources/inspiration.json +392 -0
  15. package/dist/subagents/designExpert/prompt.md +36 -12
  16. package/dist/subagents/designExpert/prompts/animation.md +14 -6
  17. package/dist/subagents/designExpert/prompts/color.md +25 -5
  18. package/dist/subagents/designExpert/prompts/{icons.md → components.md} +17 -5
  19. package/dist/subagents/designExpert/prompts/frontend-design-notes.md +17 -122
  20. package/dist/subagents/designExpert/prompts/identity.md +15 -61
  21. package/dist/subagents/designExpert/prompts/images.md +35 -10
  22. package/dist/subagents/designExpert/prompts/layout.md +14 -9
  23. package/dist/subagents/designExpert/prompts/typography.md +39 -0
  24. package/package.json +2 -2
  25. package/dist/actions/buildFromInitialSpec.md +0 -15
  26. package/dist/actions/publish.md +0 -12
  27. package/dist/actions/sync.md +0 -19
  28. package/dist/compiled/README.md +0 -100
  29. package/dist/compiled/auth.md +0 -77
  30. package/dist/compiled/design.md +0 -251
  31. package/dist/compiled/dev-and-deploy.md +0 -69
  32. package/dist/compiled/interfaces.md +0 -238
  33. package/dist/compiled/manifest.md +0 -107
  34. package/dist/compiled/media-cdn.md +0 -51
  35. package/dist/compiled/methods.md +0 -225
  36. package/dist/compiled/msfm.md +0 -222
  37. package/dist/compiled/platform.md +0 -105
  38. package/dist/compiled/scenarios.md +0 -103
  39. package/dist/compiled/sdk-actions.md +0 -146
  40. package/dist/compiled/tables.md +0 -263
  41. package/dist/static/authoring.md +0 -101
  42. package/dist/static/coding.md +0 -29
  43. package/dist/static/identity.md +0 -1
  44. package/dist/static/instructions.md +0 -31
  45. package/dist/static/intake.md +0 -44
  46. package/dist/static/lsp.md +0 -4
  47. package/dist/static/projectContext.ts +0 -160
  48. package/dist/static/team.md +0 -39
  49. package/dist/subagents/designExpert/data/inspiration.json +0 -392
  50. package/dist/subagents/designExpert/prompts/instructions.md +0 -18
  51. /package/dist/subagents/designExpert/data/{compile-font-descriptions.sh → sources/compile-font-descriptions.sh} +0 -0
  52. /package/dist/subagents/designExpert/data/{compile-inspiration.sh → sources/compile-inspiration.sh} +0 -0
  53. /package/dist/subagents/designExpert/data/{inspiration.raw.json → sources/inspiration.raw.json} +0 -0
  54. /package/dist/subagents/designExpert/{prompts/tool-prompts → data/sources/prompts}/design-analysis.md +0 -0
  55. /package/dist/subagents/designExpert/{prompts/tool-prompts → data/sources/prompts}/font-analysis.md +0 -0
package/dist/headless.js CHANGED
@@ -1,15 +1,56 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __export = (target, all) => {
3
+ for (var name in all)
4
+ __defProp(target, name, { get: all[name], enumerable: true });
5
+ };
6
+
1
7
  // src/headless.ts
2
8
  import { createInterface } from "readline";
3
- import fs21 from "fs";
4
- import path14 from "path";
5
9
 
6
- // src/config.ts
7
- import fs2 from "fs";
10
+ // src/assets.ts
11
+ import fs from "fs";
8
12
  import path from "path";
13
+ var ROOT = findRoot(
14
+ import.meta.dirname ?? path.dirname(new URL(import.meta.url).pathname)
15
+ );
16
+ function findRoot(start) {
17
+ let dir = start;
18
+ while (dir !== path.dirname(dir)) {
19
+ if (fs.existsSync(path.join(dir, "package.json"))) {
20
+ return dir;
21
+ }
22
+ dir = path.dirname(dir);
23
+ }
24
+ return start;
25
+ }
26
+ var ASSETS_BASE = fs.existsSync(path.join(ROOT, "dist", "prompt")) ? path.join(ROOT, "dist") : path.join(ROOT, "src");
27
+ function assetPath(...segments) {
28
+ return path.join(ASSETS_BASE, ...segments);
29
+ }
30
+ function readAsset(...segments) {
31
+ const full = assetPath(...segments);
32
+ try {
33
+ return fs.readFileSync(full, "utf-8").trim();
34
+ } catch {
35
+ throw new Error(`Required asset missing: ${full}`);
36
+ }
37
+ }
38
+ function readJsonAsset(fallback, ...segments) {
39
+ const full = assetPath(...segments);
40
+ try {
41
+ return JSON.parse(fs.readFileSync(full, "utf-8"));
42
+ } catch {
43
+ return fallback;
44
+ }
45
+ }
46
+
47
+ // src/config.ts
48
+ import fs3 from "fs";
49
+ import path2 from "path";
9
50
  import os from "os";
10
51
 
11
52
  // src/logger.ts
12
- import fs from "fs";
53
+ import fs2 from "fs";
13
54
  var LEVELS = {
14
55
  error: 0,
15
56
  warn: 1,
@@ -62,7 +103,7 @@ var log = {
62
103
  };
63
104
 
64
105
  // src/config.ts
65
- var CONFIG_PATH = path.join(
106
+ var CONFIG_PATH = path2.join(
66
107
  os.homedir(),
67
108
  ".mindstudio-local-tunnel",
68
109
  "config.json"
@@ -70,7 +111,7 @@ var CONFIG_PATH = path.join(
70
111
  var DEFAULT_BASE_URL = "https://api.mindstudio.ai";
71
112
  function loadConfigFile() {
72
113
  try {
73
- const raw = fs2.readFileSync(CONFIG_PATH, "utf-8");
114
+ const raw = fs3.readFileSync(CONFIG_PATH, "utf-8");
74
115
  log.debug("Loaded config file", { path: CONFIG_PATH });
75
116
  return JSON.parse(raw);
76
117
  } catch (err) {
@@ -102,10 +143,6 @@ function resolveConfig(flags) {
102
143
  return { apiKey, baseUrl: baseUrl2 };
103
144
  }
104
145
 
105
- // src/prompt/index.ts
106
- import fs4 from "fs";
107
- import path3 from "path";
108
-
109
146
  // src/tools/_helpers/sidecar.ts
110
147
  var baseUrl = null;
111
148
  function setSidecarBaseUrl(url) {
@@ -150,8 +187,8 @@ async function lspRequest(endpoint, body) {
150
187
  }
151
188
 
152
189
  // src/prompt/static/projectContext.ts
153
- import fs3 from "fs";
154
- import path2 from "path";
190
+ import fs4 from "fs";
191
+ import path3 from "path";
155
192
  var AGENT_INSTRUCTION_FILES = [
156
193
  "CLAUDE.md",
157
194
  "claude.md",
@@ -171,7 +208,7 @@ var AGENT_INSTRUCTION_FILES = [
171
208
  function loadProjectInstructions() {
172
209
  for (const file of AGENT_INSTRUCTION_FILES) {
173
210
  try {
174
- const content = fs3.readFileSync(file, "utf-8").trim();
211
+ const content = fs4.readFileSync(file, "utf-8").trim();
175
212
  if (content) {
176
213
  return `
177
214
  ## Project Instructions (${file})
@@ -184,7 +221,7 @@ ${content}`;
184
221
  }
185
222
  function loadProjectManifest() {
186
223
  try {
187
- const manifest = fs3.readFileSync("mindstudio.json", "utf-8");
224
+ const manifest = fs4.readFileSync("mindstudio.json", "utf-8");
188
225
  return `
189
226
  ## Project Manifest (mindstudio.json)
190
227
  \`\`\`json
@@ -225,9 +262,9 @@ ${entries.join("\n")}`;
225
262
  function walkMdFiles(dir) {
226
263
  const results = [];
227
264
  try {
228
- const entries = fs3.readdirSync(dir, { withFileTypes: true });
265
+ const entries = fs4.readdirSync(dir, { withFileTypes: true });
229
266
  for (const entry of entries) {
230
- const full = path2.join(dir, entry.name);
267
+ const full = path3.join(dir, entry.name);
231
268
  if (entry.isDirectory()) {
232
269
  results.push(...walkMdFiles(full));
233
270
  } else if (entry.name.endsWith(".md")) {
@@ -240,7 +277,7 @@ function walkMdFiles(dir) {
240
277
  }
241
278
  function parseFrontmatter(filePath) {
242
279
  try {
243
- const content = fs3.readFileSync(filePath, "utf-8");
280
+ const content = fs4.readFileSync(filePath, "utf-8");
244
281
  const match = content.match(/^---\n([\s\S]*?)\n---/);
245
282
  if (!match) {
246
283
  return { name: "", description: "", type: "" };
@@ -256,7 +293,7 @@ function parseFrontmatter(filePath) {
256
293
  }
257
294
  function loadProjectFileListing() {
258
295
  try {
259
- const entries = fs3.readdirSync(".", { withFileTypes: true });
296
+ const entries = fs4.readdirSync(".", { withFileTypes: true });
260
297
  const listing = entries.filter((e) => e.name !== ".git" && e.name !== "node_modules").sort((a, b) => {
261
298
  if (a.isDirectory() && !b.isDirectory()) {
262
299
  return -1;
@@ -277,19 +314,10 @@ ${listing}
277
314
  }
278
315
 
279
316
  // src/prompt/index.ts
280
- var PROMPT_DIR = import.meta.dirname ?? path3.dirname(new URL(import.meta.url).pathname);
281
- function requireFile(filePath) {
282
- const full = path3.join(PROMPT_DIR, filePath);
283
- try {
284
- return fs4.readFileSync(full, "utf-8").trim();
285
- } catch {
286
- throw new Error(`Required prompt file missing: ${full}`);
287
- }
288
- }
289
317
  function resolveIncludes(template) {
290
318
  const result = template.replace(
291
319
  /\{\{([^}]+)\}\}/g,
292
- (_, filePath) => requireFile(filePath.trim())
320
+ (_, filePath) => readAsset("prompt", filePath.trim())
293
321
  );
294
322
  return result.replace(/\n{3,}/g, "\n\n").trim();
295
323
  }
@@ -1284,7 +1312,12 @@ function runCli(cmd, options) {
1284
1312
  const entry = JSON.parse(trimmed);
1285
1313
  if (entry.type === "log" && entry.value) {
1286
1314
  const prefix = entry.tag ? `[${entry.tag}]` : "[log]";
1287
- logs.push(`${prefix} ${entry.value}`);
1315
+ const formatted = `${prefix} ${entry.value}`;
1316
+ if (options?.onLog) {
1317
+ options.onLog(formatted);
1318
+ } else {
1319
+ logs.push(formatted);
1320
+ }
1288
1321
  }
1289
1322
  } catch {
1290
1323
  }
@@ -1296,7 +1329,7 @@ function runCli(cmd, options) {
1296
1329
  }, timeout);
1297
1330
  child.on("close", (code) => {
1298
1331
  clearTimeout(timer);
1299
- const logBlock = logs.length > 0 ? logs.join("\n") + "\n\n" : "";
1332
+ const logBlock = !options?.onLog && logs.length > 0 ? logs.join("\n") + "\n\n" : "";
1300
1333
  const out = stdout.trim();
1301
1334
  if (out) {
1302
1335
  resolve(logBlock + out);
@@ -1328,11 +1361,12 @@ var askMindStudioSdkTool = {
1328
1361
  required: ["query"]
1329
1362
  }
1330
1363
  },
1331
- async execute(input) {
1364
+ async execute(input, context) {
1332
1365
  const query = input.query;
1333
1366
  return runCli(`mindstudio ask ${JSON.stringify(query)}`, {
1334
1367
  timeout: 2e5,
1335
- maxBuffer: 512 * 1024
1368
+ maxBuffer: 512 * 1024,
1369
+ onLog: context?.onLog
1336
1370
  });
1337
1371
  }
1338
1372
  };
@@ -1357,7 +1391,7 @@ var fetchUrlTool = {
1357
1391
  required: ["url"]
1358
1392
  }
1359
1393
  },
1360
- async execute(input) {
1394
+ async execute(input, context) {
1361
1395
  const url = input.url;
1362
1396
  const screenshot = input.screenshot;
1363
1397
  const pageOptions = { onlyMainContent: true };
@@ -1365,7 +1399,8 @@ var fetchUrlTool = {
1365
1399
  pageOptions.screenshot = true;
1366
1400
  }
1367
1401
  return runCli(
1368
- `mindstudio scrape-url --url ${JSON.stringify(url)} --page-options ${JSON.stringify(JSON.stringify(pageOptions))} --no-meta`
1402
+ `mindstudio scrape-url --url ${JSON.stringify(url)} --page-options ${JSON.stringify(JSON.stringify(pageOptions))} --no-meta`,
1403
+ { onLog: context?.onLog }
1369
1404
  );
1370
1405
  }
1371
1406
  };
@@ -1386,11 +1421,11 @@ var searchGoogleTool = {
1386
1421
  required: ["query"]
1387
1422
  }
1388
1423
  },
1389
- async execute(input) {
1424
+ async execute(input, context) {
1390
1425
  const query = input.query;
1391
1426
  return runCli(
1392
1427
  `mindstudio search-google --query ${JSON.stringify(query)} --export-type json --output-key results --no-meta`,
1393
- { maxBuffer: 512 * 1024 }
1428
+ { maxBuffer: 512 * 1024, onLog: context?.onLog }
1394
1429
  );
1395
1430
  }
1396
1431
  };
@@ -1420,9 +1455,9 @@ var setProjectNameTool = {
1420
1455
  import fs9 from "fs/promises";
1421
1456
  var DEFAULT_MAX_LINES2 = 500;
1422
1457
  function isBinary(buffer) {
1423
- const sample2 = buffer.subarray(0, 8192);
1424
- for (let i = 0; i < sample2.length; i++) {
1425
- if (sample2[i] === 0) {
1458
+ const sample3 = buffer.subarray(0, 8192);
1459
+ for (let i = 0; i < sample3.length; i++) {
1460
+ if (sample3[i] === 0) {
1426
1461
  return true;
1427
1462
  }
1428
1463
  }
@@ -2074,21 +2109,19 @@ var runMethodTool = {
2074
2109
  };
2075
2110
 
2076
2111
  // src/tools/_helpers/screenshot.ts
2077
- var SCREENSHOT_ANALYSIS_PROMPT = "Describe everything visible on screen from top to bottom \u2014 every element, its position, its size relative to the viewport, its colors, its content. Be thorough and spatial. After the inventory, note anything that looks visually broken (overlapping elements, clipped text, misaligned components).";
2112
+ var SCREENSHOT_ANALYSIS_PROMPT = "Describe everything visible on screen from top to bottom \u2014 every element, its position, its size relative to the viewport, its colors, its content. Be comprehensive, thorough, and spatial. After the inventory, note anything that looks visually broken (overlapping elements, clipped text, misaligned components). Respond only with your analysis as Markdown and absolutely no other text. Do not use emojis - use unicode if you need symbols.";
2078
2113
  async function captureAndAnalyzeScreenshot(promptOrOptions) {
2079
2114
  let prompt;
2080
- let fullPage = false;
2115
+ let onLog;
2081
2116
  if (typeof promptOrOptions === "object" && promptOrOptions !== null) {
2082
2117
  prompt = promptOrOptions.prompt;
2083
- fullPage = promptOrOptions.fullPage ?? false;
2118
+ onLog = promptOrOptions.onLog;
2084
2119
  } else {
2085
2120
  prompt = promptOrOptions;
2086
2121
  }
2087
- const ssResult = await sidecarRequest(
2088
- "/screenshot",
2089
- { fullPage },
2090
- { timeout: 12e4 }
2091
- );
2122
+ const ssResult = await sidecarRequest("/screenshot-full-page", void 0, {
2123
+ timeout: 12e4
2124
+ });
2092
2125
  log.debug("Screenshot response", { ssResult });
2093
2126
  const url = ssResult?.url || ssResult?.screenshotUrl;
2094
2127
  if (!url) {
@@ -2102,7 +2135,7 @@ async function captureAndAnalyzeScreenshot(promptOrOptions) {
2102
2135
  const analysisPrompt = prompt || SCREENSHOT_ANALYSIS_PROMPT;
2103
2136
  const analysis = await runCli(
2104
2137
  `mindstudio analyze-image --prompt ${JSON.stringify(analysisPrompt)} --image-url ${JSON.stringify(url)} --output-key analysis --no-meta`,
2105
- { timeout: 2e5 }
2138
+ { timeout: 2e5, onLog }
2106
2139
  );
2107
2140
  return JSON.stringify({ url, analysis });
2108
2141
  }
@@ -2111,26 +2144,22 @@ async function captureAndAnalyzeScreenshot(promptOrOptions) {
2111
2144
  var screenshotTool = {
2112
2145
  definition: {
2113
2146
  name: "screenshot",
2114
- description: "Capture a screenshot of the app preview and get a description of what's on screen. Optionally provide a specific question about what you're looking for. By default captures the viewport (what the user sees). Set fullPage to capture the entire scrollable page.",
2147
+ description: "Capture a full-height screenshot of the app preview and get a description of what's on screen. Optionally provide a specific question about what you're looking for..",
2115
2148
  inputSchema: {
2116
2149
  type: "object",
2117
2150
  properties: {
2118
2151
  prompt: {
2119
2152
  type: "string",
2120
2153
  description: "Optional question about the screenshot. If omitted, returns a general description of what's visible."
2121
- },
2122
- fullPage: {
2123
- type: "boolean",
2124
- description: "Capture the full scrollable page instead of just the viewport. Use when you need to see below-the-fold content."
2125
2154
  }
2126
2155
  }
2127
2156
  }
2128
2157
  },
2129
- async execute(input) {
2158
+ async execute(input, context) {
2130
2159
  try {
2131
2160
  return await captureAndAnalyzeScreenshot({
2132
2161
  prompt: input.prompt,
2133
- fullPage: input.fullPage
2162
+ onLog: context?.onLog
2134
2163
  });
2135
2164
  } catch (err) {
2136
2165
  return `Error taking screenshot: ${err.message}`;
@@ -2138,6 +2167,85 @@ var screenshotTool = {
2138
2167
  }
2139
2168
  };
2140
2169
 
2170
+ // src/statusWatcher.ts
2171
+ function startStatusWatcher(config) {
2172
+ const { apiConfig, getContext, onStatus, interval = 3e3, signal } = config;
2173
+ let lastLabel = "";
2174
+ let inflight = false;
2175
+ let stopped = false;
2176
+ const url = `${apiConfig.baseUrl}/_internal/v2/agent/remy/generate-status`;
2177
+ async function tick() {
2178
+ if (stopped || signal?.aborted || inflight) {
2179
+ return;
2180
+ }
2181
+ inflight = true;
2182
+ try {
2183
+ const ctx = getContext();
2184
+ if (!ctx.assistantText && !ctx.lastToolName) {
2185
+ log.debug("Status watcher: no context, skipping");
2186
+ return;
2187
+ }
2188
+ log.debug("Status watcher: requesting label", {
2189
+ textLength: ctx.assistantText.length,
2190
+ lastToolName: ctx.lastToolName
2191
+ });
2192
+ const res = await fetch(url, {
2193
+ method: "POST",
2194
+ headers: {
2195
+ "Content-Type": "application/json",
2196
+ Authorization: `Bearer ${apiConfig.apiKey}`
2197
+ },
2198
+ body: JSON.stringify({
2199
+ assistantText: ctx.assistantText.slice(-500),
2200
+ lastToolName: ctx.lastToolName,
2201
+ lastToolResult: ctx.lastToolResult?.slice(-200),
2202
+ onboardingState: ctx.onboardingState,
2203
+ userMessage: ctx.userMessage?.slice(-200)
2204
+ }),
2205
+ signal
2206
+ });
2207
+ if (!res.ok) {
2208
+ log.debug("Status watcher: endpoint returned non-ok", {
2209
+ status: res.status
2210
+ });
2211
+ return;
2212
+ }
2213
+ const data = await res.json();
2214
+ if (!data.label) {
2215
+ log.debug("Status watcher: no label in response");
2216
+ return;
2217
+ }
2218
+ if (data.label === lastLabel) {
2219
+ log.debug("Status watcher: duplicate label, skipping", {
2220
+ label: data.label
2221
+ });
2222
+ return;
2223
+ }
2224
+ lastLabel = data.label;
2225
+ if (stopped) {
2226
+ return;
2227
+ }
2228
+ log.debug("Status watcher: emitting", { label: data.label });
2229
+ onStatus(data.label);
2230
+ } catch (err) {
2231
+ log.debug("Status watcher: error", { error: err?.message ?? "unknown" });
2232
+ } finally {
2233
+ inflight = false;
2234
+ }
2235
+ }
2236
+ const timer = setInterval(tick, interval);
2237
+ tick().catch(() => {
2238
+ });
2239
+ log.debug("Status watcher started", { interval });
2240
+ return {
2241
+ stop() {
2242
+ stopped = true;
2243
+ clearInterval(timer);
2244
+ log.debug("Status watcher stopped");
2245
+ }
2246
+ };
2247
+ }
2248
+
2141
2249
  // src/subagents/common/cleanMessages.ts
2142
2250
  function cleanMessagesForApi(messages) {
2143
2251
  return messages.map((msg) => {
@@ -2172,7 +2280,7 @@ async function runSubAgent(config) {
2172
2280
  const {
2173
2281
  system,
2174
2282
  task,
2175
- tools,
2283
+ tools: tools2,
2176
2284
  externalTools,
2177
2285
  executeTool: executeTool2,
2178
2286
  apiConfig,
@@ -2181,19 +2289,47 @@ async function runSubAgent(config) {
2181
2289
  signal,
2182
2290
  parentToolId,
2183
2291
  onEvent,
2184
- resolveExternalTool
2292
+ resolveExternalTool,
2293
+ toolRegistry
2185
2294
  } = config;
2186
2295
  const emit2 = (e) => {
2187
2296
  onEvent({ ...e, parentToolId });
2188
2297
  };
2189
2298
  const messages = [{ role: "user", content: task }];
2299
+ function getPartialText(blocks) {
2300
+ return blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
2301
+ }
2302
+ function abortResult(blocks) {
2303
+ if (signal?.reason === "graceful") {
2304
+ const partial = getPartialText(blocks);
2305
+ return {
2306
+ text: partial ? `[INTERRUPTED]
2307
+
2308
+ ${partial}` : "[INTERRUPTED] Sub-agent was interrupted before producing output.",
2309
+ messages
2310
+ };
2311
+ }
2312
+ return { text: "Error: cancelled", messages };
2313
+ }
2314
+ let lastToolResult = "";
2190
2315
  while (true) {
2191
2316
  if (signal?.aborted) {
2192
- return { text: "Error: cancelled", messages };
2317
+ return abortResult([]);
2193
2318
  }
2194
2319
  const contentBlocks = [];
2195
2320
  let thinkingStartedAt = 0;
2196
2321
  let stopReason = "end_turn";
2322
+ let currentToolNames = "";
2323
+ const statusWatcher = startStatusWatcher({
2324
+ apiConfig,
2325
+ getContext: () => ({
2326
+ assistantText: getPartialText(contentBlocks),
2327
+ lastToolName: currentToolNames || void 0,
2328
+ lastToolResult: lastToolResult || void 0
2329
+ }),
2330
+ onStatus: (label) => emit2({ type: "status", message: label }),
2331
+ signal
2332
+ });
2197
2333
  const fullSystem = `${system}
2198
2334
 
2199
2335
  Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " ").replace(/\.\d+Z$/, " UTC")}`;
@@ -2204,7 +2340,7 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2204
2340
  subAgentId,
2205
2341
  system: fullSystem,
2206
2342
  messages: cleanMessagesForApi(messages),
2207
- tools,
2343
+ tools: tools2,
2208
2344
  signal
2209
2345
  })) {
2210
2346
  if (signal?.aborted) {
@@ -2269,7 +2405,8 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2269
2405
  }
2270
2406
  }
2271
2407
  if (signal?.aborted) {
2272
- return { text: "Error: cancelled", messages };
2408
+ statusWatcher.stop();
2409
+ return abortResult(contentBlocks);
2273
2410
  }
2274
2411
  messages.push({
2275
2412
  role: "assistant",
@@ -2279,6 +2416,7 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2279
2416
  (b) => b.type === "tool"
2280
2417
  );
2281
2418
  if (stopReason !== "tool_use" || toolCalls.length === 0) {
2419
+ statusWatcher.stop();
2282
2420
  const text = contentBlocks.filter((b) => b.type === "text").map((b) => b.text).join("");
2283
2421
  return { text, messages };
2284
2422
  }
@@ -2287,40 +2425,82 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2287
2425
  count: toolCalls.length,
2288
2426
  tools: toolCalls.map((tc) => tc.name)
2289
2427
  });
2428
+ currentToolNames = toolCalls.map((tc) => tc.name).join(", ");
2290
2429
  const results = await Promise.all(
2291
2430
  toolCalls.map(async (tc) => {
2292
2431
  if (signal?.aborted) {
2293
2432
  return { id: tc.id, result: "Error: cancelled", isError: true };
2294
2433
  }
2295
- try {
2296
- let result;
2297
- if (externalTools.has(tc.name) && resolveExternalTool) {
2298
- result = await resolveExternalTool(tc.id, tc.name, tc.input);
2299
- } else {
2300
- result = await executeTool2(tc.name, tc.input, tc.id);
2434
+ let settle;
2435
+ const resultPromise = new Promise((res) => {
2436
+ settle = (result, isError) => res({ id: tc.id, result, isError });
2437
+ });
2438
+ let toolAbort = new AbortController();
2439
+ const cascadeAbort = () => toolAbort.abort();
2440
+ signal?.addEventListener("abort", cascadeAbort, { once: true });
2441
+ let settled = false;
2442
+ const safeSettle = (result, isError) => {
2443
+ if (settled) {
2444
+ return;
2301
2445
  }
2302
- const isError = result.startsWith("Error");
2303
- emit2({
2304
- type: "tool_done",
2305
- id: tc.id,
2306
- name: tc.name,
2307
- result,
2308
- isError
2309
- });
2310
- return { id: tc.id, result, isError };
2311
- } catch (err) {
2312
- const errorMsg = `Error: ${err.message}`;
2313
- emit2({
2314
- type: "tool_done",
2315
- id: tc.id,
2316
- name: tc.name,
2317
- result: errorMsg,
2318
- isError: true
2319
- });
2320
- return { id: tc.id, result: errorMsg, isError: true };
2321
- }
2446
+ settled = true;
2447
+ signal?.removeEventListener("abort", cascadeAbort);
2448
+ settle(result, isError);
2449
+ };
2450
+ const run = async (input) => {
2451
+ try {
2452
+ let result;
2453
+ if (externalTools.has(tc.name) && resolveExternalTool) {
2454
+ result = await resolveExternalTool(tc.id, tc.name, input);
2455
+ } else {
2456
+ const onLog = (line) => emit2({
2457
+ type: "tool_input_delta",
2458
+ id: tc.id,
2459
+ name: tc.name,
2460
+ result: line
2461
+ });
2462
+ result = await executeTool2(tc.name, input, tc.id, onLog);
2463
+ }
2464
+ safeSettle(result, result.startsWith("Error"));
2465
+ } catch (err) {
2466
+ safeSettle(`Error: ${err.message}`, true);
2467
+ }
2468
+ };
2469
+ const entry = {
2470
+ id: tc.id,
2471
+ name: tc.name,
2472
+ input: tc.input,
2473
+ parentToolId,
2474
+ abortController: toolAbort,
2475
+ startedAt: Date.now(),
2476
+ settle: safeSettle,
2477
+ rerun: (newInput) => {
2478
+ settled = false;
2479
+ toolAbort = new AbortController();
2480
+ signal?.addEventListener("abort", () => toolAbort.abort(), {
2481
+ once: true
2482
+ });
2483
+ entry.abortController = toolAbort;
2484
+ entry.input = newInput;
2485
+ run(newInput);
2486
+ }
2487
+ };
2488
+ toolRegistry?.register(entry);
2489
+ run(tc.input);
2490
+ const r = await resultPromise;
2491
+ toolRegistry?.unregister(tc.id);
2492
+ emit2({
2493
+ type: "tool_done",
2494
+ id: tc.id,
2495
+ name: tc.name,
2496
+ result: r.result,
2497
+ isError: r.isError
2498
+ });
2499
+ return r;
2322
2500
  })
2323
2501
  );
2502
+ statusWatcher.stop();
2503
+ lastToolResult = results.at(-1)?.result ?? "";
2324
2504
  for (const r of results) {
2325
2505
  const block = contentBlocks.find(
2326
2506
  (b) => b.type === "tool" && b.id === r.id
@@ -2328,6 +2508,7 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2328
2508
  if (block?.type === "tool") {
2329
2509
  block.result = r.result;
2330
2510
  block.isError = r.isError;
2511
+ block.completedAt = Date.now();
2331
2512
  }
2332
2513
  messages.push({
2333
2514
  role: "user",
@@ -2365,7 +2546,7 @@ var BROWSER_TOOLS = [
2365
2546
  "styles",
2366
2547
  "screenshot"
2367
2548
  ],
2368
- description: "snapshot: accessibility tree of the page (waits for network to settle). click: click an element (animated cursor, full event sequence). type: type text into input (one char at a time, works with React/Vue/Svelte). select: select a dropdown option by text. wait: wait for an element to appear (polls 100ms, waits for network). navigate: navigate to a URL within the app (waits for load, subsequent steps run on new page). evaluate: run JS in the page. styles: read computed CSS styles from elements (pass properties array with camelCase names, or omit for defaults). screenshot: full-page viewport-stitched screenshot (returns base64 JPEG with dimensions)."
2549
+ description: "snapshot: accessibility tree of the page (waits for network to settle). click: click an element (animated cursor, full event sequence). type: type text into input (one char at a time, works with React/Vue/Svelte). select: select a dropdown option by text. wait: wait for an element to appear (polls 100ms, waits for network). navigate: navigate to a URL within the app (waits for load, subsequent steps run on new page). evaluate: run JS in the page. styles: read computed CSS styles from elements (pass properties array with camelCase names, or omit for defaults). screenshot: full-page viewport-stitched screenshot (returns CDN url with dimensions)."
2369
2550
  },
2370
2551
  ref: {
2371
2552
  type: "string",
@@ -2421,8 +2602,8 @@ var BROWSER_TOOLS = [
2421
2602
  }
2422
2603
  },
2423
2604
  {
2424
- name: "screenshot",
2425
- description: "Capture a screenshot of the current page. Returns a CDN URL with dimensions.",
2605
+ name: "screenshotFullPage",
2606
+ description: "Capture a full-height screenshot of the current page. Returns a CDN URL with full text analysis and description.",
2426
2607
  inputSchema: {
2427
2608
  type: "object",
2428
2609
  properties: {}
@@ -2441,11 +2622,7 @@ var BROWSER_EXTERNAL_TOOLS = /* @__PURE__ */ new Set(["browserCommand"]);
2441
2622
 
2442
2623
  // src/subagents/browserAutomation/prompt.ts
2443
2624
  import fs13 from "fs";
2444
- import path7 from "path";
2445
- var base = import.meta.dirname ?? path7.dirname(new URL(import.meta.url).pathname);
2446
- var local = path7.join(base, "prompt.md");
2447
- var PROMPT_PATH = fs13.existsSync(local) ? local : path7.join(base, "subagents", "browserAutomation", "prompt.md");
2448
- var BASE_PROMPT = fs13.readFileSync(PROMPT_PATH, "utf-8").trim();
2625
+ var BASE_PROMPT = readAsset("subagents/browserAutomation", "prompt.md");
2449
2626
  function getBrowserAutomationPrompt() {
2450
2627
  try {
2451
2628
  const appSpec = fs13.readFileSync("src/app.md", "utf-8").trim();
@@ -2463,7 +2640,7 @@ ${appSpec}
2463
2640
  var browserAutomationTool = {
2464
2641
  definition: {
2465
2642
  name: "runAutomatedBrowserTest",
2466
- description: "Run an automated browser test against the live preview. Describe what to test \u2014 the agent figures out how. Use after writing or modifying frontend code, to reproduce user-reported issues, or to test end-to-end flows.",
2643
+ description: "Run an automated browser test against the live preview. Describe what to test \u2014 the agent figures out how. Use after meaningful changes frontend code, to reproduce user-reported issues, or to test end-to-end flows.",
2467
2644
  inputSchema: {
2468
2645
  type: "object",
2469
2646
  properties: {
@@ -2496,10 +2673,10 @@ var browserAutomationTool = {
2496
2673
  task: input.task,
2497
2674
  tools: BROWSER_TOOLS,
2498
2675
  externalTools: BROWSER_EXTERNAL_TOOLS,
2499
- executeTool: async (name) => {
2500
- if (name === "screenshot") {
2676
+ executeTool: async (name, _input, _toolCallId, onLog) => {
2677
+ if (name === "screenshotFullPage") {
2501
2678
  try {
2502
- return await captureAndAnalyzeScreenshot();
2679
+ return await captureAndAnalyzeScreenshot({ onLog });
2503
2680
  } catch (err) {
2504
2681
  return `Error taking screenshot: ${err.message}`;
2505
2682
  }
@@ -2529,7 +2706,7 @@ var browserAutomationTool = {
2529
2706
  try {
2530
2707
  const parsed = JSON.parse(result2);
2531
2708
  const screenshotSteps = (parsed.steps || []).filter(
2532
- (s) => s.command === "screenshot" && s.result?.url
2709
+ (s) => s.command === "screenshotViewport" && s.result?.url
2533
2710
  );
2534
2711
  if (screenshotSteps.length > 0) {
2535
2712
  const batchInput = screenshotSteps.map((s) => ({
@@ -2547,7 +2724,7 @@ var browserAutomationTool = {
2547
2724
  const analyses = JSON.parse(batchResult);
2548
2725
  let ai = 0;
2549
2726
  for (const step of parsed.steps) {
2550
- if (step.command === "screenshot" && step.result?.url && ai < analyses.length) {
2727
+ if (step.command === "screenshotViewport" && step.result?.url && ai < analyses.length) {
2551
2728
  step.result.analysis = analyses[ai]?.output?.analysis || analyses[ai]?.output || "";
2552
2729
  ai++;
2553
2730
  }
@@ -2563,264 +2740,415 @@ var browserAutomationTool = {
2563
2740
  }
2564
2741
  }
2565
2742
  return result2;
2566
- }
2743
+ },
2744
+ toolRegistry: context.toolRegistry
2567
2745
  });
2568
2746
  context.subAgentMessages?.set(context.toolCallId, result.messages);
2569
2747
  return result.text;
2570
2748
  }
2571
2749
  };
2572
2750
 
2573
- // src/subagents/designExpert/tools.ts
2574
- import fs14 from "fs";
2575
- import path8 from "path";
2576
- var base2 = import.meta.dirname ?? path8.dirname(new URL(import.meta.url).pathname);
2577
- function resolvePath(filename) {
2578
- const local4 = path8.join(base2, filename);
2579
- return fs14.existsSync(local4) ? local4 : path8.join(base2, "subagents", "designExpert", filename);
2751
+ // src/subagents/designExpert/tools/searchGoogle.ts
2752
+ var searchGoogle_exports = {};
2753
+ __export(searchGoogle_exports, {
2754
+ definition: () => definition,
2755
+ execute: () => execute
2756
+ });
2757
+ var definition = {
2758
+ name: "searchGoogle",
2759
+ description: 'Search Google for web results. Reserch modern design trends in industries or verticals, "best [domain] apps 2026", ui patterns, or find something specific if the the user has an explicit reference. Prioritize authoritative sources like Figma and other design leaders, avoid random blog spam. Pick one or more URLs from the results and then use `fetchUrl` to get their text content.',
2760
+ inputSchema: {
2761
+ type: "object",
2762
+ properties: {
2763
+ query: {
2764
+ type: "string",
2765
+ description: "The search query."
2766
+ }
2767
+ },
2768
+ required: ["query"]
2769
+ }
2770
+ };
2771
+ async function execute(input, onLog) {
2772
+ return runCli(
2773
+ `mindstudio search-google --query ${JSON.stringify(input.query)} --export-type json --output-key results --no-meta`,
2774
+ { onLog }
2775
+ );
2580
2776
  }
2581
- var DESIGN_REFERENCE_PROMPT = fs14.readFileSync(resolvePath("prompts/tool-prompts/design-analysis.md"), "utf-8").trim();
2582
- var DESIGN_EXPERT_TOOLS = [
2583
- {
2584
- name: "searchGoogle",
2585
- description: 'Search Google for web results. Reserch modern design trends in industries or verticals, "best [domain] apps 2026", ui patterns, etc. Prioritize authoritative sources like Figma and other design leaders, avoid random blog spam. Pick one or more URLs from the results and then use `fetchUrl` to get their text content.',
2586
- inputSchema: {
2587
- type: "object",
2588
- properties: {
2589
- query: {
2590
- type: "string",
2591
- description: "The search query."
2592
- }
2593
- },
2594
- required: ["query"]
2595
- }
2596
- },
2597
- {
2598
- name: "fetchUrl",
2599
- description: "Fetch the content of a web page as markdown. Optionally capture a screenshot to see the visual design. Use when reading sites from search results or specific things the user wants to incorporate.",
2600
- inputSchema: {
2601
- type: "object",
2602
- properties: {
2603
- url: {
2604
- type: "string",
2605
- description: "The URL to fetch."
2606
- },
2607
- screenshot: {
2608
- type: "boolean",
2609
- description: "Capture a screenshot of the page. Use when you need to see the visual design, not just the text."
2610
- }
2777
+
2778
+ // src/subagents/designExpert/tools/fetchUrl.ts
2779
+ var fetchUrl_exports = {};
2780
+ __export(fetchUrl_exports, {
2781
+ definition: () => definition2,
2782
+ execute: () => execute2
2783
+ });
2784
+ var definition2 = {
2785
+ name: "fetchUrl",
2786
+ description: "Fetch the content of a web page as markdown. Use when reading sites from search results or specific things the user wants to incorporate.",
2787
+ inputSchema: {
2788
+ type: "object",
2789
+ properties: {
2790
+ url: {
2791
+ type: "string",
2792
+ description: "The URL to fetch."
2793
+ }
2794
+ },
2795
+ required: ["url"]
2796
+ }
2797
+ };
2798
+ async function execute2(input, onLog) {
2799
+ const pageOptions = { onlyMainContent: true };
2800
+ if (input.screenshot) {
2801
+ pageOptions.screenshot = true;
2802
+ }
2803
+ return runCli(
2804
+ `mindstudio scrape-url --url ${JSON.stringify(input.url)} --page-options ${JSON.stringify(JSON.stringify(pageOptions))} --no-meta`,
2805
+ { onLog }
2806
+ );
2807
+ }
2808
+
2809
+ // src/subagents/designExpert/tools/analyzeDesign.ts
2810
+ var analyzeDesign_exports = {};
2811
+ __export(analyzeDesign_exports, {
2812
+ definition: () => definition3,
2813
+ execute: () => execute3
2814
+ });
2815
+ var DESIGN_REFERENCE_PROMPT = `
2816
+ You are analyzing a screenshot of a real website or app for a designer's personal technique/inspiration reference notes.
2817
+
2818
+ Analyze the image and think about what makes the site or app special and unique. What is it doing that is unique, different, original, and creative? What makes it special? What isn't working? What doesn't look or feel good?
2819
+
2820
+ Then, provide the following analysis:
2821
+
2822
+ ## Context
2823
+ What is this page, and what does it look like? Very briefly note the industry/vertical and purpose, then describe the composition with enough context to frame the analysis that follows \u2014 what's on the page, where things are positioned, what does the viewport look and feel like. Give enough detail that someone who can't see the image could understand the spatial references in the techniques section. Do not mention specific brand names. Keep it concise.
2824
+
2825
+ ## Colors
2826
+ List the palette as hex values with short labels. Just the swatches \u2014 no "strategy" paragraph.
2827
+
2828
+ ## Typography
2829
+ Brief description of the types used on the page. If you can identify the actual typeface name, provide it, otherwise provide a concrete description (e.g., "ultra-condensed grotesque, ~900 weight, tracked tight at maybe -0.03em, all-caps"). Include size relationships if notable (e.g., "hero text is viewport-width, body is 14px").
2830
+
2831
+ ## Techniques
2832
+ Identify the specific design moves that make this page interesting and unique, described in terms of how a designer with a technical background would write them down as notes in their notebook for inspiration. Focus only on the non-obvious, hard-to-think-of techniques \u2014 the things that make this page gallery-worthy. Skip basics like "high contrast CTA" or "generous whitespace" that any competent designer already knows.
2833
+
2834
+ Respond only with your analysis as Markdown and absolutely no other text. Do not use emojis - use unicode if you need symbols.
2835
+ `;
2836
+ var definition3 = {
2837
+ name: "analyzeDesign",
2838
+ description: "Analyze the visual design of a website or image URL. Websites are automatically screenshotted first. If no prompt is provided, performs a full design reference analysis (mood, color, typography, layout, distinctiveness). Provide a custom prompt to ask a specific design question instead.",
2839
+ inputSchema: {
2840
+ type: "object",
2841
+ properties: {
2842
+ url: {
2843
+ type: "string",
2844
+ description: "URL to analyze. Can be an image URL or a website URL (will be screenshotted)."
2611
2845
  },
2612
- required: ["url"]
2846
+ prompt: {
2847
+ type: "string",
2848
+ description: "Optional custom analysis prompt. If omitted, performs the standard design reference analysis."
2849
+ }
2850
+ },
2851
+ required: ["url"]
2852
+ }
2853
+ };
2854
+ async function execute3(input, onLog) {
2855
+ const url = input.url;
2856
+ const analysisPrompt = input.prompt || DESIGN_REFERENCE_PROMPT;
2857
+ const isImageUrl = /\.(png|jpe?g|webp|gif|svg|avif)(\?|$)/i.test(url);
2858
+ let imageUrl = url;
2859
+ if (!isImageUrl) {
2860
+ const ssUrl = await runCli(
2861
+ `mindstudio screenshot-url --url ${JSON.stringify(url)} --mode viewport --width 1440 --delay 2000 --output-key screenshotUrl --no-meta`,
2862
+ { timeout: 12e4, onLog }
2863
+ );
2864
+ if (ssUrl.startsWith("Error")) {
2865
+ return `Could not screenshot ${url}: ${ssUrl}`;
2613
2866
  }
2614
- },
2615
- {
2616
- name: "analyzeReferenceImageOrUrl",
2617
- description: "Analyze any visual \u2014 pass an image URL or a website URL. Websites are automatically screenshotted first. If no prompt is provided, performs a full design reference analysis (mood, color, typography, layout, distinctiveness). Provide a custom prompt to ask a specific question instead.",
2618
- inputSchema: {
2619
- type: "object",
2620
- properties: {
2621
- url: {
2622
- type: "string",
2623
- description: "URL to analyze. Can be an image URL or a website URL (will be screenshotted)."
2624
- },
2625
- prompt: {
2626
- type: "string",
2627
- description: "Optional custom analysis prompt. If omitted, performs the standard design reference analysis."
2628
- }
2867
+ imageUrl = ssUrl;
2868
+ }
2869
+ const analysis = await runCli(
2870
+ `mindstudio analyze-image --prompt ${JSON.stringify(analysisPrompt)} --image-url ${JSON.stringify(imageUrl)} --output-key analysis --no-meta`,
2871
+ { timeout: 2e5, onLog }
2872
+ );
2873
+ return isImageUrl ? analysis : `Screenshot: ${imageUrl}
2874
+
2875
+ ${analysis}`;
2876
+ }
2877
+
2878
+ // src/subagents/designExpert/tools/analyzeImage.ts
2879
+ var analyzeImage_exports = {};
2880
+ __export(analyzeImage_exports, {
2881
+ definition: () => definition4,
2882
+ execute: () => execute4
2883
+ });
2884
+ var DEFAULT_PROMPT = "Describe everything visible in this image \u2014 every element, its position, its size relative to the frame, its colors, its content. Be comprhensive, thorough and spatial. After the inventory, note anything that looks visually broken (overlapping elements, clipped text, misaligned components). Respond only with your analysis as Markdown and absolutely no other text. Do not use emojis - use unicode if you need symbols.";
2885
+ var definition4 = {
2886
+ name: "analyzeImage",
2887
+ description: "Analyze an image by URL. Returns a detailed description of everything visible. Provide a custom prompt to ask a specific question instead of the default full description.",
2888
+ inputSchema: {
2889
+ type: "object",
2890
+ properties: {
2891
+ imageUrl: {
2892
+ type: "string",
2893
+ description: "The image URL to analyze."
2629
2894
  },
2630
- required: ["url"]
2631
- }
2632
- },
2633
- {
2634
- name: "screenshot",
2635
- description: "Capture a screenshot of the app preview. Returns a CDN URL with visual analysis. Use to review the current state of the UI being built. By default captures the viewport. Set fullPage to capture the entire scrollable page.",
2636
- inputSchema: {
2637
- type: "object",
2638
- properties: {
2639
- prompt: {
2640
- type: "string",
2641
- description: "Optional specific question about the screenshot."
2642
- },
2643
- fullPage: {
2644
- type: "boolean",
2645
- description: "Capture the full scrollable page instead of just the viewport. Use when you need to see below-the-fold content."
2646
- }
2895
+ prompt: {
2896
+ type: "string",
2897
+ description: "Optional custom analysis prompt. If omitted, describes everything visible in the image."
2647
2898
  }
2648
- }
2649
- },
2650
- {
2651
- name: "runBrowserTest",
2652
- description: "Run an automated browser test against the live app preview. Use to verify implementation details via getComputedStyle: font-family names, exact colors, spacing, borders, shadows, font sizes, transforms. Only use this to evaluate computed CSS properties that can't be deduced from sceenshots.",
2653
- inputSchema: {
2654
- type: "object",
2655
- properties: {
2656
- task: {
2657
- type: "string",
2658
- description: 'What to verify, in natural language. Focus on measurable properties: "Check the hero cards have border-radius: 24px and box-shadow" or "Verify the background color of the CTA section is #C4FF0D".'
2659
- }
2660
- },
2661
- required: ["task"]
2662
- }
2663
- },
2664
- {
2665
- name: "generateImages",
2666
- description: "Generate images using AI (Seedream). Returns CDN URLs with a quality analysis for each image. Produces high-quality results for both photorealistic images and abstract/creative visuals. Pass multiple prompts to generate in parallel. No need to analyze images separately after generating \u2014 the analysis is included.",
2667
- inputSchema: {
2668
- type: "object",
2669
- properties: {
2670
- prompts: {
2671
- type: "array",
2672
- items: {
2673
- type: "string"
2674
- },
2675
- description: "One or more image generation prompts. Be detailed: describe style, mood, composition, colors. Multiple prompts run in parallel."
2676
- },
2677
- width: {
2678
- type: "number",
2679
- description: "Image width in pixels. Default 2048. Range: 2048-4096."
2680
- },
2681
- height: {
2682
- type: "number",
2683
- description: "Image height in pixels. Default 2048. Range: 2048-4096."
2684
- }
2685
- },
2686
- required: ["prompts"]
2687
- }
2899
+ },
2900
+ required: ["imageUrl"]
2688
2901
  }
2689
- ];
2690
- async function executeDesignExpertTool(name, input, context, toolCallId) {
2691
- switch (name) {
2692
- case "screenshot": {
2693
- try {
2694
- return await captureAndAnalyzeScreenshot({
2695
- prompt: input.prompt,
2696
- fullPage: input.fullPage
2697
- });
2698
- } catch (err) {
2699
- return `Error taking screenshot: ${err.message}`;
2902
+ };
2903
+ async function execute4(input, onLog) {
2904
+ const imageUrl = input.imageUrl;
2905
+ const prompt = input.prompt || DEFAULT_PROMPT;
2906
+ const analysis = await runCli(
2907
+ `mindstudio analyze-image --prompt ${JSON.stringify(prompt)} --image-url ${JSON.stringify(imageUrl)} --output-key analysis --no-meta`,
2908
+ { timeout: 2e5, onLog }
2909
+ );
2910
+ return JSON.stringify({ url: imageUrl, analysis });
2911
+ }
2912
+
2913
+ // src/subagents/designExpert/tools/screenshot.ts
2914
+ var screenshot_exports = {};
2915
+ __export(screenshot_exports, {
2916
+ definition: () => definition5,
2917
+ execute: () => execute5
2918
+ });
2919
+ var definition5 = {
2920
+ name: "screenshot",
2921
+ description: "Capture a full-height screenshot of the current app preview. Returns a CDN URL along with visual analysis. Use to review the current state of the UI being built. Remember, the screenshot analysis is not overly precise - for example, it cannot reliably identify specific fonts by name \u2014 it can only describe what letterforms look like.",
2922
+ inputSchema: {
2923
+ type: "object",
2924
+ properties: {
2925
+ prompt: {
2926
+ type: "string",
2927
+ description: "Optional specific question about the screenshot."
2700
2928
  }
2701
2929
  }
2702
- case "searchGoogle":
2703
- return runCli(
2704
- `mindstudio search-google --query ${JSON.stringify(input.query)} --export-type json --output-key results --no-meta`
2705
- );
2706
- case "fetchUrl": {
2707
- const pageOptions = { onlyMainContent: true };
2708
- if (input.screenshot) {
2709
- pageOptions.screenshot = true;
2930
+ }
2931
+ };
2932
+ async function execute5(input, onLog) {
2933
+ try {
2934
+ return await captureAndAnalyzeScreenshot({
2935
+ prompt: input.prompt,
2936
+ onLog
2937
+ });
2938
+ } catch (err) {
2939
+ return `Error taking screenshot: ${err.message}`;
2940
+ }
2941
+ }
2942
+
2943
+ // src/subagents/designExpert/tools/generateImages.ts
2944
+ var generateImages_exports = {};
2945
+ __export(generateImages_exports, {
2946
+ definition: () => definition6,
2947
+ execute: () => execute6
2948
+ });
2949
+
2950
+ // src/subagents/designExpert/tools/_seedream.ts
2951
+ var ANALYZE_PROMPT = "You are reviewing this image for a visual designer sourcing assets for a project. Describe: what the image depicts, the mood and color palette, how the lighting and composition work, whether there are any issues (unwanted text, artifacts, distortions), and how it could be used in a layout (hero background, feature section, card texture, etc). Be concise and practical. Respond only with your analysis as Markdown and absolutely no other text. Do not use emojis - use unicode if you need symbols.";
2952
+ async function seedreamGenerate(opts) {
2953
+ const { prompts, sourceImages, transparentBackground, onLog } = opts;
2954
+ const width = opts.width || 2048;
2955
+ const height = opts.height || 2048;
2956
+ const config = { width, height };
2957
+ if (sourceImages?.length) {
2958
+ config.images = sourceImages;
2959
+ }
2960
+ let imageUrls;
2961
+ if (prompts.length === 1) {
2962
+ const step = JSON.stringify({
2963
+ prompt: prompts[0],
2964
+ imageModelOverride: {
2965
+ model: "seedream-4.5",
2966
+ config
2967
+ }
2968
+ });
2969
+ const url = await runCli(
2970
+ `mindstudio generate-image ${JSON.stringify(step)} --output-key imageUrl --no-meta`,
2971
+ { jsonLogs: true, timeout: 2e5, onLog }
2972
+ );
2973
+ imageUrls = [url];
2974
+ } else {
2975
+ const steps = prompts.map((prompt) => ({
2976
+ stepType: "generateImage",
2977
+ step: {
2978
+ prompt,
2979
+ imageModelOverride: {
2980
+ model: "seedream-4.5",
2981
+ config
2982
+ }
2710
2983
  }
2711
- return runCli(
2712
- `mindstudio scrape-url --url ${JSON.stringify(input.url)} --page-options ${JSON.stringify(JSON.stringify(pageOptions))} --no-meta`
2984
+ }));
2985
+ const batchResult = await runCli(
2986
+ `mindstudio batch ${JSON.stringify(JSON.stringify(steps))} --no-meta`,
2987
+ { jsonLogs: true, timeout: 2e5, onLog }
2988
+ );
2989
+ try {
2990
+ const parsed = JSON.parse(batchResult);
2991
+ imageUrls = parsed.map(
2992
+ (r) => r.output?.imageUrl ?? `Error: ${r.error}`
2713
2993
  );
2994
+ } catch {
2995
+ return batchResult;
2714
2996
  }
2715
- case "analyzeReferenceImageOrUrl": {
2716
- const url = input.url;
2717
- const analysisPrompt = input.prompt || DESIGN_REFERENCE_PROMPT;
2718
- const isImageUrl = /\.(png|jpe?g|webp|gif|svg|avif)(\?|$)/i.test(url);
2719
- let imageUrl = url;
2720
- if (!isImageUrl) {
2721
- const ssUrl = await runCli(
2722
- `mindstudio screenshot-url --url ${JSON.stringify(url)} --mode viewport --width 1440 --delay 2000 --output-key screenshotUrl --no-meta`,
2723
- { timeout: 12e4 }
2724
- );
2725
- if (ssUrl.startsWith("Error")) {
2726
- return `Could not screenshot ${url}: ${ssUrl}`;
2997
+ }
2998
+ if (transparentBackground) {
2999
+ imageUrls = await Promise.all(
3000
+ imageUrls.map(async (url) => {
3001
+ if (url.startsWith("Error")) {
3002
+ return url;
2727
3003
  }
2728
- imageUrl = ssUrl;
3004
+ const result = await runCli(
3005
+ `mindstudio remove-background-from-image --image-url ${JSON.stringify(url)} --output-key imageUrl --no-meta`,
3006
+ { timeout: 2e5, onLog }
3007
+ );
3008
+ return result.startsWith("Error") ? url : result;
3009
+ })
3010
+ );
3011
+ }
3012
+ const images = await Promise.all(
3013
+ imageUrls.map(async (url, i) => {
3014
+ if (url.startsWith("Error")) {
3015
+ return { prompt: prompts[i], error: url };
2729
3016
  }
2730
3017
  const analysis = await runCli(
2731
- `mindstudio analyze-image --prompt ${JSON.stringify(analysisPrompt)} --image-url ${JSON.stringify(imageUrl)} --output-key analysis --no-meta`,
2732
- { timeout: 2e5 }
3018
+ `mindstudio analyze-image --prompt ${JSON.stringify(ANALYZE_PROMPT)} --image-url ${JSON.stringify(url)} --output-key analysis --no-meta`,
3019
+ { timeout: 2e5, onLog }
2733
3020
  );
2734
- return isImageUrl ? analysis : `Screenshot: ${imageUrl}
3021
+ return { url, prompt: prompts[i], analysis, width, height };
3022
+ })
3023
+ );
3024
+ return JSON.stringify({ images });
3025
+ }
2735
3026
 
2736
- ${analysis}`;
2737
- }
2738
- case "generateImages": {
2739
- const prompts = input.prompts;
2740
- const width = input.width || 2048;
2741
- const height = input.height || 2048;
2742
- const ANALYZE_PROMPT = "You are reviewing this image for a visual designer sourcing assets for a project. Describe: what the image depicts, the mood and color palette, how the lighting and composition work, whether there are any issues (unwanted text, artifacts, distortions), and how it could be used in a layout (hero background, feature section, card texture, etc). Be concise and practical.";
2743
- let imageUrls;
2744
- if (prompts.length === 1) {
2745
- const step = JSON.stringify({
2746
- prompt: prompts[0],
2747
- imageModelOverride: {
2748
- model: "seedream-4.5",
2749
- config: { width, height }
2750
- }
2751
- });
2752
- const url = await runCli(
2753
- `mindstudio generate-image '${step}' --output-key imageUrl --no-meta`,
2754
- { jsonLogs: true, timeout: 2e5 }
2755
- );
2756
- imageUrls = [url];
2757
- } else {
2758
- const steps = prompts.map((prompt) => ({
2759
- stepType: "generateImage",
2760
- step: {
2761
- prompt,
2762
- imageModelOverride: {
2763
- model: "seedream-4.5",
2764
- config: { width, height }
2765
- }
2766
- }
2767
- }));
2768
- const batchResult = await runCli(
2769
- `mindstudio batch '${JSON.stringify(steps)}' --no-meta`,
2770
- { jsonLogs: true, timeout: 2e5 }
2771
- );
2772
- try {
2773
- const parsed = JSON.parse(batchResult);
2774
- imageUrls = parsed.results.map(
2775
- (r) => r.output?.imageUrl ?? `Error: ${r.error}`
2776
- );
2777
- } catch {
2778
- return batchResult;
2779
- }
3027
+ // src/subagents/designExpert/tools/generateImages.ts
3028
+ var definition6 = {
3029
+ name: "generateImages",
3030
+ description: "Generate images using AI. Returns CDN URLs with a quality analysis for each image. Produces high-quality results for everything from photorealistic images and abstract/creative visuals. Pass multiple prompts to generate in parallel. No need to analyze images separately after generating \u2014 the analysis is included.",
3031
+ inputSchema: {
3032
+ type: "object",
3033
+ properties: {
3034
+ prompts: {
3035
+ type: "array",
3036
+ items: {
3037
+ type: "string"
3038
+ },
3039
+ description: "One or more image generation prompts. Be detailed: describe style, mood, composition, colors. Multiple prompts run in parallel."
3040
+ },
3041
+ width: {
3042
+ type: "number",
3043
+ description: "Image width in pixels. Default 2048. Range: 2048-4096."
3044
+ },
3045
+ height: {
3046
+ type: "number",
3047
+ description: "Image height in pixels. Default 2048. Range: 2048-4096."
3048
+ },
3049
+ transparentBackground: {
3050
+ type: "boolean",
3051
+ description: "Remove the background from generated images, producing transparent PNGs. Useful for icons, logos, product shots, and assets that need to be composited onto other backgrounds."
2780
3052
  }
2781
- const images = await Promise.all(
2782
- imageUrls.map(async (url, i) => {
2783
- if (url.startsWith("Error")) {
2784
- return { prompt: prompts[i], error: url };
2785
- }
2786
- const analysis = await runCli(
2787
- `mindstudio analyze-image --prompt ${JSON.stringify(ANALYZE_PROMPT)} --image-url ${JSON.stringify(url)} --output-key analysis --no-meta`,
2788
- { timeout: 2e5 }
2789
- );
2790
- return { url, prompt: prompts[i], analysis, width, height };
2791
- })
2792
- );
2793
- return `%%JSON%%${JSON.stringify({ images })}`;
2794
- }
2795
- case "runBrowserTest": {
2796
- if (!context) {
2797
- return "Error: browser testing requires execution context (only available in headless mode)";
3053
+ },
3054
+ required: ["prompts"]
3055
+ }
3056
+ };
3057
+ async function execute6(input, onLog) {
3058
+ return seedreamGenerate({
3059
+ prompts: input.prompts,
3060
+ width: input.width,
3061
+ height: input.height,
3062
+ transparentBackground: input.transparentBackground,
3063
+ onLog
3064
+ });
3065
+ }
3066
+
3067
+ // src/subagents/designExpert/tools/editImages.ts
3068
+ var editImages_exports = {};
3069
+ __export(editImages_exports, {
3070
+ definition: () => definition7,
3071
+ execute: () => execute7
3072
+ });
3073
+ var definition7 = {
3074
+ name: "editImages",
3075
+ description: "Edit or transform existing images using AI. Provide one or more source image URLs as reference and a prompt describing the desired edit. Use for compositing, style transfer, subject transformation, blending multiple references, or incorporating one or more ferences into something new. Returns CDN URLs with analysis.",
3076
+ inputSchema: {
3077
+ type: "object",
3078
+ properties: {
3079
+ prompts: {
3080
+ type: "array",
3081
+ items: {
3082
+ type: "string"
3083
+ },
3084
+ description: "One or more edit prompts describing how to transform the source images. Multiple prompts run in parallel, each using the same source images."
3085
+ },
3086
+ sourceImages: {
3087
+ type: "array",
3088
+ items: {
3089
+ type: "string"
3090
+ },
3091
+ description: "One or more source/reference image URLs. These are used as the basis for the edit \u2014 the AI will use them as reference for style, subject, or composition."
3092
+ },
3093
+ width: {
3094
+ type: "number",
3095
+ description: "Output width in pixels. Default 2048. Range: 2048-4096."
3096
+ },
3097
+ height: {
3098
+ type: "number",
3099
+ description: "Output height in pixels. Default 2048. Range: 2048-4096."
3100
+ },
3101
+ transparentBackground: {
3102
+ type: "boolean",
3103
+ description: "Remove the background from output images, producing transparent PNGs."
2798
3104
  }
2799
- return browserAutomationTool.execute(
2800
- { task: input.task },
2801
- {
2802
- ...context,
2803
- toolCallId: toolCallId || context.toolCallId
2804
- }
2805
- );
2806
- }
2807
- default:
2808
- return `Error: unknown tool "${name}"`;
3105
+ },
3106
+ required: ["prompts", "sourceImages"]
2809
3107
  }
3108
+ };
3109
+ async function execute7(input, onLog) {
3110
+ return seedreamGenerate({
3111
+ prompts: input.prompts,
3112
+ sourceImages: input.sourceImages,
3113
+ width: input.width,
3114
+ height: input.height,
3115
+ transparentBackground: input.transparentBackground,
3116
+ onLog
3117
+ });
3118
+ }
3119
+
3120
+ // src/subagents/designExpert/tools/index.ts
3121
+ var tools = {
3122
+ searchGoogle: searchGoogle_exports,
3123
+ fetchUrl: fetchUrl_exports,
3124
+ analyzeDesign: analyzeDesign_exports,
3125
+ analyzeImage: analyzeImage_exports,
3126
+ screenshot: screenshot_exports,
3127
+ generateImages: generateImages_exports,
3128
+ editImages: editImages_exports
3129
+ };
3130
+ var DESIGN_EXPERT_TOOLS = Object.values(tools).map(
3131
+ (t) => t.definition
3132
+ );
3133
+ async function executeDesignExpertTool(name, input, context, toolCallId, onLog) {
3134
+ const tool = tools[name];
3135
+ if (!tool) {
3136
+ return `Error: unknown tool "${name}"`;
3137
+ }
3138
+ return tool.execute(input, onLog);
2810
3139
  }
2811
3140
 
2812
3141
  // src/subagents/designExpert/prompt.ts
2813
- import fs16 from "fs";
2814
- import path10 from "path";
3142
+ import fs15 from "fs";
2815
3143
 
2816
3144
  // src/subagents/common/context.ts
2817
- import fs15 from "fs";
2818
- import path9 from "path";
3145
+ import fs14 from "fs";
3146
+ import path7 from "path";
2819
3147
  function walkMdFiles2(dir, skip) {
2820
3148
  const files = [];
2821
3149
  try {
2822
- for (const entry of fs15.readdirSync(dir, { withFileTypes: true })) {
2823
- const full = path9.join(dir, entry.name);
3150
+ for (const entry of fs14.readdirSync(dir, { withFileTypes: true })) {
3151
+ const full = path7.join(dir, entry.name);
2824
3152
  if (entry.isDirectory()) {
2825
3153
  if (!skip?.has(entry.name)) {
2826
3154
  files.push(...walkMdFiles2(full, skip));
@@ -2840,7 +3168,7 @@ function loadFilesAsXml(dir, tag, skip) {
2840
3168
  }
2841
3169
  const sections = files.map((f) => {
2842
3170
  try {
2843
- const content = fs15.readFileSync(f, "utf-8").trim();
3171
+ const content = fs14.readFileSync(f, "utf-8").trim();
2844
3172
  return `<file path="${f}">
2845
3173
  ${content}
2846
3174
  </file>`;
@@ -2909,38 +3237,11 @@ The first-party SDK (@mindstudio-ai/agent) provides access to 200+ AI models (Op
2909
3237
  </platform_brief>`;
2910
3238
  }
2911
3239
 
2912
- // src/subagents/designExpert/prompt.ts
2913
- var base3 = import.meta.dirname ?? path10.dirname(new URL(import.meta.url).pathname);
2914
- function resolvePath2(filename) {
2915
- const local4 = path10.join(base3, filename);
2916
- return fs16.existsSync(local4) ? local4 : path10.join(base3, "subagents", "designExpert", filename);
2917
- }
2918
- function readFile(filename) {
2919
- return fs16.readFileSync(resolvePath2(filename), "utf-8").trim();
2920
- }
2921
- function readJson(filename, fallback) {
2922
- try {
2923
- return JSON.parse(fs16.readFileSync(resolvePath2(filename), "utf-8"));
2924
- } catch {
2925
- return fallback;
2926
- }
2927
- }
2928
- var RUNTIME_PLACEHOLDERS = /* @__PURE__ */ new Set([
2929
- "fonts_to_consider",
2930
- "inspiration_images"
2931
- ]);
2932
- var PROMPT_TEMPLATE = readFile("prompt.md").replace(/\{\{([^}]+)\}\}/g, (match, key) => {
2933
- const k = key.trim();
2934
- return RUNTIME_PLACEHOLDERS.has(k) ? match : readFile(k);
2935
- }).replace(/\n{3,}/g, "\n\n");
2936
- var fontData = readJson("data/fonts.json", {
2937
- cssUrlPattern: "",
2938
- fonts: [],
2939
- pairings: []
2940
- });
2941
- var inspirationImages = readJson("data/inspiration.json", {
2942
- images: []
2943
- }).images;
3240
+ // src/subagents/designExpert/data/getFontLibrarySample.ts
3241
+ var fontData = readJsonAsset(
3242
+ { cssUrlPattern: "", fonts: [], pairings: [] },
3243
+ "subagents/designExpert/data/sources/fonts.json"
3244
+ );
2944
3245
  function sample(arr, n) {
2945
3246
  if (arr.length <= n) {
2946
3247
  return [...arr];
@@ -2952,10 +3253,12 @@ function sample(arr, n) {
2952
3253
  }
2953
3254
  return copy.slice(0, n);
2954
3255
  }
2955
- function getDesignExpertPrompt() {
2956
- const fonts = sample(fontData.fonts, 30);
2957
- const pairings = sample(fontData.pairings, 20);
2958
- const images = sample(inspirationImages, 15);
3256
+ function getFontLibrarySample() {
3257
+ const fonts = sample(fontData.fonts, 60);
3258
+ const pairings = sample(fontData.pairings, 30);
3259
+ if (!fonts.length) {
3260
+ return "";
3261
+ }
2959
3262
  const fontList = fonts.map((f) => {
2960
3263
  let cssInfo = "";
2961
3264
  if (f.source === "fontshare") {
@@ -2971,35 +3274,72 @@ function getDesignExpertPrompt() {
2971
3274
  const pairingList = pairings.map(
2972
3275
  (p) => `- **${p.heading.font}** (${p.heading.weight}) heading + **${p.body.font}** (${p.body.weight}) body`
2973
3276
  ).join("\n");
2974
- const fontsSection = fonts.length ? `<fonts_to_consider>
2975
- ## Fonts to consider
3277
+ return `
3278
+ ## Font Library
3279
+
3280
+ A random sample from a curated font library. Use these as starting points for font selection.
2976
3281
 
2977
- A random sample from Fontshare, Open Foundry, and Google Fonts. Use these as starting points for font selection.
2978
- CSS URL pattern: ${fontData.cssUrlPattern}
3282
+ ### Fonts
2979
3283
 
2980
3284
  ${fontList}
2981
3285
 
2982
- ### Suggested pairings
3286
+ ### Pairings
2983
3287
 
2984
- ${pairingList}
2985
- </fonts_to_consider>` : "";
2986
- const imageList = images.map((img) => `- ${img.analysis}`).join("\n\n");
2987
- const inspirationSection = images.length ? `<design_inspiration>
2988
- ## Design inspiration
3288
+ ${pairingList}`.trim();
3289
+ }
3290
+
3291
+ // src/subagents/designExpert/data/getDesignReferencesSample.ts
3292
+ var inspirationImages = readJsonAsset(
3293
+ { images: [] },
3294
+ "subagents/designExpert/data/sources/inspiration.json"
3295
+ ).images;
3296
+ function sample2(arr, n) {
3297
+ if (arr.length <= n) {
3298
+ return [...arr];
3299
+ }
3300
+ const copy = [...arr];
3301
+ for (let i = copy.length - 1; i > 0; i--) {
3302
+ const j = Math.floor(Math.random() * (i + 1));
3303
+ [copy[i], copy[j]] = [copy[j], copy[i]];
3304
+ }
3305
+ return copy.slice(0, n);
3306
+ }
3307
+ function getDesignReferencesSample() {
3308
+ const images = sample2(inspirationImages, 30);
3309
+ if (!images.length) {
3310
+ return "";
3311
+ }
3312
+ const imageList = images.map((img, i) => `### Reference ${i + 1}
3313
+ ${img.analysis}`).join("\n\n");
3314
+ return `
3315
+ ## Design References
2989
3316
 
2990
3317
  This is what the bar looks like. These are real sites that made it onto curated design galleries because they did something bold, intentional, and memorable. Use them as inspiration and let the takeaways guide your work. Your designs should feel like they belong in this company.
2991
3318
 
2992
- ${imageList}
2993
- </design_inspiration>` : "";
3319
+ ${imageList}`.trim();
3320
+ }
3321
+
3322
+ // src/subagents/designExpert/prompt.ts
3323
+ var SUBAGENT = "subagents/designExpert";
3324
+ var RUNTIME_PLACEHOLDERS = /* @__PURE__ */ new Set(["font_library", "design_references"]);
3325
+ var PROMPT_TEMPLATE = readAsset(SUBAGENT, "prompt.md").replace(/\{\{([^}]+)\}\}/g, (match, key) => {
3326
+ const k = key.trim();
3327
+ return RUNTIME_PLACEHOLDERS.has(k) ? match : readAsset(SUBAGENT, k);
3328
+ }).replace(/\n{3,}/g, "\n\n");
3329
+ function getDesignExpertPrompt() {
2994
3330
  const specContext = loadSpecContext();
2995
3331
  let prompt = PROMPT_TEMPLATE.replace(
2996
- "{{fonts_to_consider}}",
2997
- fontsSection
2998
- ).replace("{{inspiration_images}}", inspirationSection);
3332
+ "{{font_library}}",
3333
+ getFontLibrarySample()
3334
+ ).replace("{{design_references}}", getDesignReferencesSample());
2999
3335
  if (specContext) {
3000
3336
  prompt += `
3001
3337
 
3002
3338
  ${specContext}`;
3339
+ }
3340
+ try {
3341
+ fs15.writeFileSync(`.design-prompt.md`, prompt);
3342
+ } catch {
3003
3343
  }
3004
3344
  return prompt;
3005
3345
  }
@@ -3017,7 +3357,7 @@ var designExpertTool = {
3017
3357
  properties: {
3018
3358
  task: {
3019
3359
  type: "string",
3020
- description: "What you need, in natural language. Include context about the app when relevant."
3360
+ description: "What you need, in natural language. Include context about the project when relevant."
3021
3361
  }
3022
3362
  },
3023
3363
  required: ["task"]
@@ -3032,14 +3372,15 @@ var designExpertTool = {
3032
3372
  task: input.task,
3033
3373
  tools: DESIGN_EXPERT_TOOLS,
3034
3374
  externalTools: /* @__PURE__ */ new Set(),
3035
- executeTool: (name, input2, toolCallId) => executeDesignExpertTool(name, input2, context, toolCallId),
3375
+ executeTool: (name, input2, toolCallId, onLog) => executeDesignExpertTool(name, input2, context, toolCallId, onLog),
3036
3376
  apiConfig: context.apiConfig,
3037
3377
  model: context.model,
3038
3378
  subAgentId: "visualDesignExpert",
3039
3379
  signal: context.signal,
3040
3380
  parentToolId: context.toolCallId,
3041
3381
  onEvent: context.onEvent,
3042
- resolveExternalTool: context.resolveExternalTool
3382
+ resolveExternalTool: context.resolveExternalTool,
3383
+ toolRegistry: context.toolRegistry
3043
3384
  });
3044
3385
  context.subAgentMessages?.set(context.toolCallId, result.messages);
3045
3386
  return result.text;
@@ -3145,8 +3486,8 @@ var VISION_TOOLS = [
3145
3486
  ];
3146
3487
 
3147
3488
  // src/subagents/productVision/executor.ts
3148
- import fs17 from "fs";
3149
- import path11 from "path";
3489
+ import fs16 from "fs";
3490
+ import path8 from "path";
3150
3491
  var ROADMAP_DIR = "src/roadmap";
3151
3492
  function formatRequires(requires) {
3152
3493
  return requires.length === 0 ? "[]" : `[${requires.map((r) => `"${r}"`).join(", ")}]`;
@@ -3162,10 +3503,10 @@ async function executeVisionTool(name, input) {
3162
3503
  requires,
3163
3504
  body
3164
3505
  } = input;
3165
- const filePath = path11.join(ROADMAP_DIR, `${slug}.md`);
3506
+ const filePath = path8.join(ROADMAP_DIR, `${slug}.md`);
3166
3507
  try {
3167
- fs17.mkdirSync(ROADMAP_DIR, { recursive: true });
3168
- const oldContent = fs17.existsSync(filePath) ? fs17.readFileSync(filePath, "utf-8") : "";
3508
+ fs16.mkdirSync(ROADMAP_DIR, { recursive: true });
3509
+ const oldContent = fs16.existsSync(filePath) ? fs16.readFileSync(filePath, "utf-8") : "";
3169
3510
  const content = `---
3170
3511
  name: ${itemName}
3171
3512
  type: roadmap
@@ -3177,7 +3518,7 @@ requires: ${formatRequires(requires)}
3177
3518
 
3178
3519
  ${body}
3179
3520
  `;
3180
- fs17.writeFileSync(filePath, content, "utf-8");
3521
+ fs16.writeFileSync(filePath, content, "utf-8");
3181
3522
  const lineCount = content.split("\n").length;
3182
3523
  const label = oldContent ? "Updated" : "Wrote";
3183
3524
  return `${label} ${filePath} (${lineCount} lines)
@@ -3188,12 +3529,12 @@ ${unifiedDiff(filePath, oldContent, content)}`;
3188
3529
  }
3189
3530
  case "updateRoadmapItem": {
3190
3531
  const { slug } = input;
3191
- const filePath = path11.join(ROADMAP_DIR, `${slug}.md`);
3532
+ const filePath = path8.join(ROADMAP_DIR, `${slug}.md`);
3192
3533
  try {
3193
- if (!fs17.existsSync(filePath)) {
3534
+ if (!fs16.existsSync(filePath)) {
3194
3535
  return `Error: ${filePath} does not exist`;
3195
3536
  }
3196
- const oldContent = fs17.readFileSync(filePath, "utf-8");
3537
+ const oldContent = fs16.readFileSync(filePath, "utf-8");
3197
3538
  let content = oldContent;
3198
3539
  if (input.status) {
3199
3540
  content = content.replace(
@@ -3246,7 +3587,7 @@ ${input.appendHistory}
3246
3587
  `;
3247
3588
  }
3248
3589
  }
3249
- fs17.writeFileSync(filePath, content, "utf-8");
3590
+ fs16.writeFileSync(filePath, content, "utf-8");
3250
3591
  const lineCount = content.split("\n").length;
3251
3592
  return `Updated ${filePath} (${lineCount} lines)
3252
3593
  ${unifiedDiff(filePath, oldContent, content)}`;
@@ -3256,13 +3597,13 @@ ${unifiedDiff(filePath, oldContent, content)}`;
3256
3597
  }
3257
3598
  case "deleteRoadmapItem": {
3258
3599
  const { slug } = input;
3259
- const filePath = path11.join(ROADMAP_DIR, `${slug}.md`);
3600
+ const filePath = path8.join(ROADMAP_DIR, `${slug}.md`);
3260
3601
  try {
3261
- if (!fs17.existsSync(filePath)) {
3602
+ if (!fs16.existsSync(filePath)) {
3262
3603
  return `Error: ${filePath} does not exist`;
3263
3604
  }
3264
- const oldContent = fs17.readFileSync(filePath, "utf-8");
3265
- fs17.unlinkSync(filePath);
3605
+ const oldContent = fs16.readFileSync(filePath, "utf-8");
3606
+ fs16.unlinkSync(filePath);
3266
3607
  return `Deleted ${filePath}
3267
3608
  ${unifiedDiff(filePath, oldContent, "")}`;
3268
3609
  } catch (err) {
@@ -3275,12 +3616,7 @@ ${unifiedDiff(filePath, oldContent, "")}`;
3275
3616
  }
3276
3617
 
3277
3618
  // src/subagents/productVision/prompt.ts
3278
- import fs18 from "fs";
3279
- import path12 from "path";
3280
- var base4 = import.meta.dirname ?? path12.dirname(new URL(import.meta.url).pathname);
3281
- var local2 = path12.join(base4, "prompt.md");
3282
- var PROMPT_PATH2 = fs18.existsSync(local2) ? local2 : path12.join(base4, "subagents", "productVision", "prompt.md");
3283
- var BASE_PROMPT2 = fs18.readFileSync(PROMPT_PATH2, "utf-8").trim();
3619
+ var BASE_PROMPT2 = readAsset("subagents/productVision", "prompt.md");
3284
3620
  function getProductVisionPrompt() {
3285
3621
  const specContext = loadSpecContext();
3286
3622
  const roadmapContext = loadRoadmapContext();
@@ -3326,17 +3662,14 @@ var productVisionTool = {
3326
3662
  signal: context.signal,
3327
3663
  parentToolId: context.toolCallId,
3328
3664
  onEvent: context.onEvent,
3329
- resolveExternalTool: context.resolveExternalTool
3665
+ resolveExternalTool: context.resolveExternalTool,
3666
+ toolRegistry: context.toolRegistry
3330
3667
  });
3331
3668
  context.subAgentMessages?.set(context.toolCallId, result.messages);
3332
3669
  return result.text;
3333
3670
  }
3334
3671
  };
3335
3672
 
3336
- // src/subagents/codeSanityCheck/index.ts
3337
- import fs19 from "fs";
3338
- import path13 from "path";
3339
-
3340
3673
  // src/subagents/codeSanityCheck/tools.ts
3341
3674
  var SANITY_CHECK_TOOLS = [
3342
3675
  {
@@ -3429,10 +3762,7 @@ var SANITY_CHECK_TOOLS = [
3429
3762
  ];
3430
3763
 
3431
3764
  // src/subagents/codeSanityCheck/index.ts
3432
- var base5 = import.meta.dirname ?? path13.dirname(new URL(import.meta.url).pathname);
3433
- var local3 = path13.join(base5, "prompt.md");
3434
- var PROMPT_PATH3 = fs19.existsSync(local3) ? local3 : path13.join(base5, "subagents", "codeSanityCheck", "prompt.md");
3435
- var BASE_PROMPT3 = fs19.readFileSync(PROMPT_PATH3, "utf-8").trim();
3765
+ var BASE_PROMPT3 = readAsset("subagents/codeSanityCheck", "prompt.md");
3436
3766
  var codeSanityCheckTool = {
3437
3767
  definition: {
3438
3768
  name: "codeSanityCheck",
@@ -3470,7 +3800,8 @@ var codeSanityCheckTool = {
3470
3800
  signal: context.signal,
3471
3801
  parentToolId: context.toolCallId,
3472
3802
  onEvent: context.onEvent,
3473
- resolveExternalTool: context.resolveExternalTool
3803
+ resolveExternalTool: context.resolveExternalTool,
3804
+ toolRegistry: context.toolRegistry
3474
3805
  });
3475
3806
  context.subAgentMessages?.set(context.toolCallId, result.messages);
3476
3807
  return result.text;
@@ -3482,7 +3813,7 @@ function getSpecTools() {
3482
3813
  return [readSpecTool, writeSpecTool, editSpecTool, listSpecFilesTool];
3483
3814
  }
3484
3815
  function getCodeTools() {
3485
- const tools = [
3816
+ const tools2 = [
3486
3817
  readFileTool,
3487
3818
  writeFileTool,
3488
3819
  editFileTool,
@@ -3497,9 +3828,9 @@ function getCodeTools() {
3497
3828
  browserAutomationTool
3498
3829
  ];
3499
3830
  if (isLspConfigured()) {
3500
- tools.push(lspDiagnosticsTool, restartProcessTool);
3831
+ tools2.push(lspDiagnosticsTool, restartProcessTool);
3501
3832
  }
3502
- return tools;
3833
+ return tools2;
3503
3834
  }
3504
3835
  function getCommonTools() {
3505
3836
  return [
@@ -3559,11 +3890,11 @@ function executeTool(name, input, context) {
3559
3890
  }
3560
3891
 
3561
3892
  // src/session.ts
3562
- import fs20 from "fs";
3893
+ import fs17 from "fs";
3563
3894
  var SESSION_FILE = ".remy-session.json";
3564
3895
  function loadSession(state) {
3565
3896
  try {
3566
- const raw = fs20.readFileSync(SESSION_FILE, "utf-8");
3897
+ const raw = fs17.readFileSync(SESSION_FILE, "utf-8");
3567
3898
  const data = JSON.parse(raw);
3568
3899
  if (Array.isArray(data.messages) && data.messages.length > 0) {
3569
3900
  state.messages = sanitizeMessages(data.messages);
@@ -3611,7 +3942,7 @@ function sanitizeMessages(messages) {
3611
3942
  }
3612
3943
  function saveSession(state) {
3613
3944
  try {
3614
- fs20.writeFileSync(
3945
+ fs17.writeFileSync(
3615
3946
  SESSION_FILE,
3616
3947
  JSON.stringify({ messages: state.messages }, null, 2),
3617
3948
  "utf-8"
@@ -3622,7 +3953,7 @@ function saveSession(state) {
3622
3953
  function clearSession(state) {
3623
3954
  state.messages = [];
3624
3955
  try {
3625
- fs20.unlinkSync(SESSION_FILE);
3956
+ fs17.unlinkSync(SESSION_FILE);
3626
3957
  } catch {
3627
3958
  }
3628
3959
  }
@@ -3790,85 +4121,6 @@ function parsePartialJson(jsonString) {
3790
4121
  return parseAny();
3791
4122
  }
3792
4123
 
3793
- // src/statusWatcher.ts
3794
- function startStatusWatcher(config) {
3795
- const { apiConfig, getContext, onStatus, interval = 3e3, signal } = config;
3796
- let lastLabel = "";
3797
- let inflight = false;
3798
- let stopped = false;
3799
- const url = `${apiConfig.baseUrl}/_internal/v2/agent/remy/generate-status`;
3800
- async function tick() {
3801
- if (stopped || signal?.aborted || inflight) {
3802
- return;
3803
- }
3804
- inflight = true;
3805
- try {
3806
- const ctx = getContext();
3807
- if (!ctx.assistantText && !ctx.lastToolName) {
3808
- log.debug("Status watcher: no context, skipping");
3809
- return;
3810
- }
3811
- log.debug("Status watcher: requesting label", {
3812
- textLength: ctx.assistantText.length,
3813
- lastToolName: ctx.lastToolName
3814
- });
3815
- const res = await fetch(url, {
3816
- method: "POST",
3817
- headers: {
3818
- "Content-Type": "application/json",
3819
- Authorization: `Bearer ${apiConfig.apiKey}`
3820
- },
3821
- body: JSON.stringify({
3822
- assistantText: ctx.assistantText.slice(-500),
3823
- lastToolName: ctx.lastToolName,
3824
- lastToolResult: ctx.lastToolResult?.slice(-200),
3825
- onboardingState: ctx.onboardingState,
3826
- userMessage: ctx.userMessage?.slice(-200)
3827
- }),
3828
- signal
3829
- });
3830
- if (!res.ok) {
3831
- log.debug("Status watcher: endpoint returned non-ok", {
3832
- status: res.status
3833
- });
3834
- return;
3835
- }
3836
- const data = await res.json();
3837
- if (!data.label) {
3838
- log.debug("Status watcher: no label in response");
3839
- return;
3840
- }
3841
- if (data.label === lastLabel) {
3842
- log.debug("Status watcher: duplicate label, skipping", {
3843
- label: data.label
3844
- });
3845
- return;
3846
- }
3847
- lastLabel = data.label;
3848
- if (stopped) {
3849
- return;
3850
- }
3851
- log.debug("Status watcher: emitting", { label: data.label });
3852
- onStatus(data.label);
3853
- } catch (err) {
3854
- log.debug("Status watcher: error", { error: err?.message ?? "unknown" });
3855
- } finally {
3856
- inflight = false;
3857
- }
3858
- }
3859
- const timer = setInterval(tick, interval);
3860
- tick().catch(() => {
3861
- });
3862
- log.debug("Status watcher started", { interval });
3863
- return {
3864
- stop() {
3865
- stopped = true;
3866
- clearInterval(timer);
3867
- log.debug("Status watcher stopped");
3868
- }
3869
- };
3870
- }
3871
-
3872
4124
  // src/errors.ts
3873
4125
  var patterns = [
3874
4126
  [
@@ -3932,13 +4184,14 @@ async function runTurn(params) {
3932
4184
  signal,
3933
4185
  onEvent,
3934
4186
  resolveExternalTool,
3935
- hidden
4187
+ hidden,
4188
+ toolRegistry
3936
4189
  } = params;
3937
- const tools = getToolDefinitions(onboardingState);
4190
+ const tools2 = getToolDefinitions(onboardingState);
3938
4191
  log.info("Turn started", {
3939
4192
  messageLength: userMessage.length,
3940
- toolCount: tools.length,
3941
- tools: tools.map((t) => t.name),
4193
+ toolCount: tools2.length,
4194
+ tools: tools2.map((t) => t.name),
3942
4195
  ...attachments && attachments.length > 0 && {
3943
4196
  attachmentCount: attachments.length,
3944
4197
  attachmentUrls: attachments.map((a) => a.url)
@@ -3985,6 +4238,20 @@ async function runTurn(params) {
3985
4238
  let thinkingStartedAt = 0;
3986
4239
  const toolInputAccumulators = /* @__PURE__ */ new Map();
3987
4240
  let stopReason = "end_turn";
4241
+ let subAgentText = "";
4242
+ let currentToolNames = "";
4243
+ const statusWatcher = startStatusWatcher({
4244
+ apiConfig,
4245
+ getContext: () => ({
4246
+ assistantText: subAgentText || getTextContent(contentBlocks).slice(-500),
4247
+ lastToolName: currentToolNames || getToolCalls(contentBlocks).filter((tc) => !STATUS_EXCLUDED_TOOLS.has(tc.name)).at(-1)?.name || lastCompletedTools || void 0,
4248
+ lastToolResult: lastCompletedResult || void 0,
4249
+ onboardingState,
4250
+ userMessage
4251
+ }),
4252
+ onStatus: (label) => onEvent({ type: "status", message: label }),
4253
+ signal
4254
+ });
3988
4255
  async function handlePartialInput(acc, id, name, partial) {
3989
4256
  const tool = getToolByName(name);
3990
4257
  if (!tool?.streaming) {
@@ -4048,18 +4315,6 @@ async function runTurn(params) {
4048
4315
  onEvent({ type: "tool_input_delta", id, name, result: content });
4049
4316
  }
4050
4317
  }
4051
- const statusWatcher = startStatusWatcher({
4052
- apiConfig,
4053
- getContext: () => ({
4054
- assistantText: getTextContent(contentBlocks).slice(-500),
4055
- lastToolName: getToolCalls(contentBlocks).filter((tc) => !STATUS_EXCLUDED_TOOLS.has(tc.name)).at(-1)?.name || lastCompletedTools || void 0,
4056
- lastToolResult: lastCompletedResult || void 0,
4057
- onboardingState,
4058
- userMessage
4059
- }),
4060
- onStatus: (label) => onEvent({ type: "status", message: label }),
4061
- signal
4062
- });
4063
4318
  try {
4064
4319
  for await (const event of streamChatWithRetry(
4065
4320
  {
@@ -4067,7 +4322,7 @@ async function runTurn(params) {
4067
4322
  model,
4068
4323
  system,
4069
4324
  messages: cleanMessagesForApi(state.messages),
4070
- tools,
4325
+ tools: tools2,
4071
4326
  signal
4072
4327
  },
4073
4328
  {
@@ -4180,10 +4435,9 @@ async function runTurn(params) {
4180
4435
  } else {
4181
4436
  throw err;
4182
4437
  }
4183
- } finally {
4184
- statusWatcher.stop();
4185
4438
  }
4186
4439
  if (signal?.aborted) {
4440
+ statusWatcher.stop();
4187
4441
  if (contentBlocks.length > 0) {
4188
4442
  contentBlocks.push({
4189
4443
  type: "text",
@@ -4205,6 +4459,7 @@ async function runTurn(params) {
4205
4459
  });
4206
4460
  const toolCalls = getToolCalls(contentBlocks);
4207
4461
  if (stopReason !== "tool_use" || toolCalls.length === 0) {
4462
+ statusWatcher.stop();
4208
4463
  saveSession(state);
4209
4464
  onEvent({ type: "turn_done" });
4210
4465
  return;
@@ -4213,8 +4468,7 @@ async function runTurn(params) {
4213
4468
  count: toolCalls.length,
4214
4469
  tools: toolCalls.map((tc) => tc.name)
4215
4470
  });
4216
- let subAgentText = "";
4217
- const origOnEvent = onEvent;
4471
+ currentToolNames = toolCalls.filter((tc) => !STATUS_EXCLUDED_TOOLS.has(tc.name)).map((tc) => tc.name).join(", ");
4218
4472
  const wrappedOnEvent = (e) => {
4219
4473
  if ("parentToolId" in e && e.parentToolId) {
4220
4474
  if (e.type === "text") {
@@ -4223,80 +4477,103 @@ async function runTurn(params) {
4223
4477
  subAgentText = `Using ${e.name}`;
4224
4478
  }
4225
4479
  }
4226
- origOnEvent(e);
4480
+ onEvent(e);
4227
4481
  };
4228
- const toolStatusWatcher = startStatusWatcher({
4229
- apiConfig,
4230
- getContext: () => ({
4231
- assistantText: subAgentText || getTextContent(contentBlocks).slice(-500),
4232
- lastToolName: toolCalls.filter((tc) => !STATUS_EXCLUDED_TOOLS.has(tc.name)).map((tc) => tc.name).join(", ") || void 0,
4233
- lastToolResult: lastCompletedResult || void 0,
4234
- onboardingState,
4235
- userMessage
4236
- }),
4237
- onStatus: (label) => origOnEvent({ type: "status", message: label }),
4238
- signal
4239
- });
4240
4482
  const subAgentMessages = /* @__PURE__ */ new Map();
4241
4483
  const results = await Promise.all(
4242
4484
  toolCalls.map(async (tc) => {
4243
4485
  if (signal?.aborted) {
4244
- return {
4245
- id: tc.id,
4246
- result: "Error: cancelled",
4247
- isError: true
4248
- };
4486
+ return { id: tc.id, result: "Error: cancelled", isError: true };
4249
4487
  }
4250
4488
  const toolStart = Date.now();
4251
- try {
4252
- let result;
4253
- if (EXTERNAL_TOOLS.has(tc.name) && resolveExternalTool) {
4254
- saveSession(state);
4255
- log.info("Waiting for external tool result", {
4256
- name: tc.name,
4257
- id: tc.id
4258
- });
4259
- result = await resolveExternalTool(tc.id, tc.name, tc.input);
4260
- } else {
4261
- result = await executeTool(tc.name, tc.input, {
4262
- apiConfig,
4263
- model,
4264
- signal,
4265
- onEvent: wrappedOnEvent,
4266
- resolveExternalTool,
4267
- toolCallId: tc.id,
4268
- subAgentMessages
4489
+ let settle;
4490
+ const resultPromise = new Promise((res) => {
4491
+ settle = (result, isError) => res({ id: tc.id, result, isError });
4492
+ });
4493
+ let toolAbort = new AbortController();
4494
+ const cascadeAbort = () => toolAbort.abort();
4495
+ signal?.addEventListener("abort", cascadeAbort, { once: true });
4496
+ let settled = false;
4497
+ const safeSettle = (result, isError) => {
4498
+ if (settled) {
4499
+ return;
4500
+ }
4501
+ settled = true;
4502
+ signal?.removeEventListener("abort", cascadeAbort);
4503
+ settle(result, isError);
4504
+ };
4505
+ const run = async (input) => {
4506
+ try {
4507
+ let result;
4508
+ if (EXTERNAL_TOOLS.has(tc.name) && resolveExternalTool) {
4509
+ saveSession(state);
4510
+ log.info("Waiting for external tool result", {
4511
+ name: tc.name,
4512
+ id: tc.id
4513
+ });
4514
+ result = await resolveExternalTool(tc.id, tc.name, input);
4515
+ } else {
4516
+ result = await executeTool(tc.name, input, {
4517
+ apiConfig,
4518
+ model,
4519
+ signal: toolAbort.signal,
4520
+ onEvent: wrappedOnEvent,
4521
+ resolveExternalTool,
4522
+ toolCallId: tc.id,
4523
+ subAgentMessages,
4524
+ toolRegistry,
4525
+ onLog: (line) => wrappedOnEvent({
4526
+ type: "tool_input_delta",
4527
+ id: tc.id,
4528
+ name: tc.name,
4529
+ result: line
4530
+ })
4531
+ });
4532
+ }
4533
+ safeSettle(result, result.startsWith("Error"));
4534
+ } catch (err) {
4535
+ safeSettle(`Error: ${err.message}`, true);
4536
+ }
4537
+ };
4538
+ const entry = {
4539
+ id: tc.id,
4540
+ name: tc.name,
4541
+ input: tc.input,
4542
+ abortController: toolAbort,
4543
+ startedAt: toolStart,
4544
+ settle: safeSettle,
4545
+ rerun: (newInput) => {
4546
+ settled = false;
4547
+ toolAbort = new AbortController();
4548
+ signal?.addEventListener("abort", () => toolAbort.abort(), {
4549
+ once: true
4269
4550
  });
4551
+ entry.abortController = toolAbort;
4552
+ entry.input = newInput;
4553
+ run(newInput);
4270
4554
  }
4271
- const isError = result.startsWith("Error");
4272
- log.info("Tool completed", {
4273
- name: tc.name,
4274
- elapsed: `${Date.now() - toolStart}ms`,
4275
- isError,
4276
- resultLength: result.length
4277
- });
4278
- onEvent({
4279
- type: "tool_done",
4280
- id: tc.id,
4281
- name: tc.name,
4282
- result,
4283
- isError
4284
- });
4285
- return { id: tc.id, result, isError };
4286
- } catch (err) {
4287
- const errorMsg = `Error: ${err.message}`;
4288
- onEvent({
4289
- type: "tool_done",
4290
- id: tc.id,
4291
- name: tc.name,
4292
- result: errorMsg,
4293
- isError: true
4294
- });
4295
- return { id: tc.id, result: errorMsg, isError: true };
4296
- }
4555
+ };
4556
+ toolRegistry?.register(entry);
4557
+ run(tc.input);
4558
+ const r = await resultPromise;
4559
+ toolRegistry?.unregister(tc.id);
4560
+ log.info("Tool completed", {
4561
+ name: tc.name,
4562
+ elapsed: `${Date.now() - toolStart}ms`,
4563
+ isError: r.isError,
4564
+ resultLength: r.result.length
4565
+ });
4566
+ onEvent({
4567
+ type: "tool_done",
4568
+ id: tc.id,
4569
+ name: tc.name,
4570
+ result: r.result,
4571
+ isError: r.isError
4572
+ });
4573
+ return r;
4297
4574
  })
4298
4575
  );
4299
- toolStatusWatcher.stop();
4576
+ statusWatcher.stop();
4300
4577
  for (const r of results) {
4301
4578
  const block = contentBlocks.find(
4302
4579
  (b) => b.type === "tool" && b.id === r.id
@@ -4304,6 +4581,7 @@ async function runTurn(params) {
4304
4581
  if (block?.type === "tool") {
4305
4582
  block.result = r.result;
4306
4583
  block.isError = r.isError;
4584
+ block.completedAt = Date.now();
4307
4585
  const msgs = subAgentMessages.get(r.id);
4308
4586
  if (msgs) {
4309
4587
  block.subAgentMessages = msgs;
@@ -4328,11 +4606,81 @@ async function runTurn(params) {
4328
4606
  }
4329
4607
  }
4330
4608
 
4609
+ // src/toolRegistry.ts
4610
+ var ToolRegistry = class {
4611
+ entries = /* @__PURE__ */ new Map();
4612
+ onEvent;
4613
+ register(entry) {
4614
+ this.entries.set(entry.id, entry);
4615
+ }
4616
+ unregister(id) {
4617
+ this.entries.delete(id);
4618
+ }
4619
+ get(id) {
4620
+ return this.entries.get(id);
4621
+ }
4622
+ /**
4623
+ * Stop a running tool.
4624
+ *
4625
+ * - graceful: abort and settle with [INTERRUPTED] + partial result
4626
+ * - hard: abort and settle with a generic error
4627
+ *
4628
+ * Returns true if the tool was found and stopped.
4629
+ */
4630
+ stop(id, mode) {
4631
+ const entry = this.entries.get(id);
4632
+ if (!entry) {
4633
+ return false;
4634
+ }
4635
+ entry.abortController.abort(mode);
4636
+ if (mode === "graceful") {
4637
+ const partial = entry.getPartialResult?.() ?? "";
4638
+ const result = partial ? `[INTERRUPTED]
4639
+
4640
+ ${partial}` : "[INTERRUPTED] Tool execution was stopped.";
4641
+ entry.settle(result, false);
4642
+ } else {
4643
+ entry.settle("Error: tool was cancelled", true);
4644
+ }
4645
+ this.onEvent?.({
4646
+ type: "tool_stopped",
4647
+ id: entry.id,
4648
+ name: entry.name,
4649
+ mode,
4650
+ ...entry.parentToolId && { parentToolId: entry.parentToolId }
4651
+ });
4652
+ this.entries.delete(id);
4653
+ return true;
4654
+ }
4655
+ /**
4656
+ * Restart a running tool with the same or patched input.
4657
+ * The original controllable promise stays pending and settles
4658
+ * when the new execution finishes.
4659
+ *
4660
+ * Returns true if the tool was found and restarted.
4661
+ */
4662
+ restart(id, patchedInput) {
4663
+ const entry = this.entries.get(id);
4664
+ if (!entry) {
4665
+ return false;
4666
+ }
4667
+ entry.abortController.abort("restart");
4668
+ const newInput = patchedInput ? { ...entry.input, ...patchedInput } : entry.input;
4669
+ this.onEvent?.({
4670
+ type: "tool_restarted",
4671
+ id: entry.id,
4672
+ name: entry.name,
4673
+ input: newInput,
4674
+ ...entry.parentToolId && { parentToolId: entry.parentToolId }
4675
+ });
4676
+ entry.rerun(newInput);
4677
+ return true;
4678
+ }
4679
+ };
4680
+
4331
4681
  // src/headless.ts
4332
- var BASE_DIR = import.meta.dirname ?? path14.dirname(new URL(import.meta.url).pathname);
4333
- var ACTIONS_DIR = path14.join(BASE_DIR, "actions");
4334
4682
  function loadActionPrompt(name) {
4335
- return fs21.readFileSync(path14.join(ACTIONS_DIR, `${name}.md`), "utf-8").trim();
4683
+ return readAsset("prompt", "actions", `${name}.md`);
4336
4684
  }
4337
4685
  function emit(event, data, requestId) {
4338
4686
  const payload = { event, ...data };
@@ -4396,6 +4744,7 @@ async function startHeadless(opts = {}) {
4396
4744
  const EXTERNAL_TOOL_TIMEOUT_MS = 3e5;
4397
4745
  const pendingTools = /* @__PURE__ */ new Map();
4398
4746
  const earlyResults = /* @__PURE__ */ new Map();
4747
+ const toolRegistry = new ToolRegistry();
4399
4748
  const USER_FACING_TOOLS = /* @__PURE__ */ new Set([
4400
4749
  "promptUser",
4401
4750
  "confirmDestructiveAction",
@@ -4500,14 +4849,46 @@ async function startHeadless(opts = {}) {
4500
4849
  rid
4501
4850
  );
4502
4851
  return;
4852
+ case "tool_stopped":
4853
+ emit(
4854
+ "tool_stopped",
4855
+ {
4856
+ id: e.id,
4857
+ name: e.name,
4858
+ mode: e.mode,
4859
+ ...e.parentToolId && { parentToolId: e.parentToolId }
4860
+ },
4861
+ rid
4862
+ );
4863
+ return;
4864
+ case "tool_restarted":
4865
+ emit(
4866
+ "tool_restarted",
4867
+ {
4868
+ id: e.id,
4869
+ name: e.name,
4870
+ input: e.input,
4871
+ ...e.parentToolId && { parentToolId: e.parentToolId }
4872
+ },
4873
+ rid
4874
+ );
4875
+ return;
4503
4876
  case "status":
4504
- emit("status", { message: e.message }, rid);
4877
+ emit(
4878
+ "status",
4879
+ {
4880
+ message: e.message,
4881
+ ...e.parentToolId && { parentToolId: e.parentToolId }
4882
+ },
4883
+ rid
4884
+ );
4505
4885
  return;
4506
4886
  case "error":
4507
4887
  emit("error", { error: e.error }, rid);
4508
4888
  return;
4509
4889
  }
4510
4890
  }
4891
+ toolRegistry.onEvent = onEvent;
4511
4892
  async function handleMessage(parsed, requestId) {
4512
4893
  if (running) {
4513
4894
  emit(
@@ -4559,7 +4940,8 @@ async function startHeadless(opts = {}) {
4559
4940
  signal: currentAbort.signal,
4560
4941
  onEvent,
4561
4942
  resolveExternalTool,
4562
- hidden: isCommand
4943
+ hidden: isCommand,
4944
+ toolRegistry
4563
4945
  });
4564
4946
  if (!completedEmitted) {
4565
4947
  emit(
@@ -4613,6 +4995,36 @@ async function startHeadless(opts = {}) {
4613
4995
  emit("completed", { success: true }, requestId);
4614
4996
  return;
4615
4997
  }
4998
+ if (action === "stop_tool") {
4999
+ const id = parsed.id;
5000
+ const mode = parsed.mode ?? "hard";
5001
+ const found = toolRegistry.stop(id, mode);
5002
+ if (found) {
5003
+ emit("completed", { success: true }, requestId);
5004
+ } else {
5005
+ emit(
5006
+ "completed",
5007
+ { success: false, error: "Tool not found" },
5008
+ requestId
5009
+ );
5010
+ }
5011
+ return;
5012
+ }
5013
+ if (action === "restart_tool") {
5014
+ const id = parsed.id;
5015
+ const patchedInput = parsed.input;
5016
+ const found = toolRegistry.restart(id, patchedInput);
5017
+ if (found) {
5018
+ emit("completed", { success: true }, requestId);
5019
+ } else {
5020
+ emit(
5021
+ "completed",
5022
+ { success: false, error: "Tool not found" },
5023
+ requestId
5024
+ );
5025
+ }
5026
+ return;
5027
+ }
4616
5028
  if (action === "message") {
4617
5029
  await handleMessage(parsed, requestId);
4618
5030
  return;