@mindstudio-ai/remy 0.1.39 → 0.1.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/dist/headless.js +274 -191
  2. package/dist/index.js +324 -222
  3. package/dist/prompt/compiled/design.md +65 -216
  4. package/dist/prompt/compiled/interfaces.md +3 -1
  5. package/dist/prompt/compiled/msfm.md +2 -2
  6. package/dist/prompt/static/team.md +1 -1
  7. package/dist/subagents/designExpert/data/sources/compile-ui-inspiration.sh +132 -0
  8. package/dist/subagents/designExpert/data/sources/dev/index.html +105 -0
  9. package/dist/subagents/designExpert/data/sources/dev/serve.mjs +45 -0
  10. package/dist/subagents/designExpert/data/sources/fonts.json +1 -153
  11. package/dist/subagents/designExpert/data/sources/prompts/ui-analysis.md +25 -0
  12. package/dist/subagents/designExpert/data/sources/ui_inspiration.json +83 -0
  13. package/dist/subagents/designExpert/data/sources/ui_inspiration_compiled.json +328 -0
  14. package/dist/subagents/designExpert/prompt.md +13 -14
  15. package/dist/subagents/designExpert/prompts/color.md +1 -1
  16. package/dist/subagents/designExpert/prompts/components.md +0 -7
  17. package/dist/subagents/designExpert/prompts/frontend-design-notes.md +1 -0
  18. package/dist/subagents/designExpert/prompts/identity.md +3 -10
  19. package/dist/subagents/designExpert/prompts/images.md +2 -8
  20. package/dist/subagents/designExpert/prompts/instructions.md +22 -0
  21. package/dist/subagents/designExpert/prompts/layout.md +1 -1
  22. package/dist/subagents/designExpert/prompts/ui-patterns.md +7 -0
  23. package/dist/subagents/designExpert/tools/images/enhance-image-prompt.md +48 -0
  24. package/package.json +1 -1
  25. package/dist/prompt/sources/frontend-design-notes.md +0 -153
  26. package/dist/prompt/sources/media-cdn.md +0 -46
package/dist/headless.js CHANGED
@@ -44,11 +44,6 @@ function readJsonAsset(fallback, ...segments) {
44
44
  }
45
45
  }
46
46
 
47
- // src/config.ts
48
- import fs3 from "fs";
49
- import path2 from "path";
50
- import os from "os";
51
-
52
47
  // src/logger.ts
53
48
  import fs2 from "fs";
54
49
  var LEVELS = {
@@ -60,9 +55,6 @@ var LEVELS = {
60
55
  var currentLevel = LEVELS.error;
61
56
  var writeFn = () => {
62
57
  };
63
- function timestamp() {
64
- return (/* @__PURE__ */ new Date()).toISOString();
65
- }
66
58
  var MAX_VALUE_LENGTH = 200;
67
59
  function truncateValues(obj) {
68
60
  const result = {};
@@ -77,32 +69,35 @@ function truncateValues(obj) {
77
69
  }
78
70
  return result;
79
71
  }
80
- function write(level, msg, data) {
72
+ function write(level, module, msg, data) {
81
73
  if (LEVELS[level] > currentLevel) {
82
74
  return;
83
75
  }
84
- const parts = [`[${timestamp()}]`, level.toUpperCase().padEnd(5), msg];
76
+ const entry = {
77
+ ts: Date.now(),
78
+ level,
79
+ module,
80
+ msg
81
+ };
85
82
  if (data) {
86
- parts.push(JSON.stringify(truncateValues(data)));
83
+ Object.assign(entry, truncateValues(data));
87
84
  }
88
- writeFn(parts.join(" "));
85
+ writeFn(JSON.stringify(entry));
86
+ }
87
+ function createLogger(module) {
88
+ return {
89
+ error: (msg, data) => write("error", module, msg, data),
90
+ warn: (msg, data) => write("warn", module, msg, data),
91
+ info: (msg, data) => write("info", module, msg, data),
92
+ debug: (msg, data) => write("debug", module, msg, data)
93
+ };
89
94
  }
90
- var log = {
91
- error(msg, data) {
92
- write("error", msg, data);
93
- },
94
- warn(msg, data) {
95
- write("warn", msg, data);
96
- },
97
- info(msg, data) {
98
- write("info", msg, data);
99
- },
100
- debug(msg, data) {
101
- write("debug", msg, data);
102
- }
103
- };
104
95
 
105
96
  // src/config.ts
97
+ import fs3 from "fs";
98
+ import path2 from "path";
99
+ import os from "os";
100
+ var log = createLogger("config");
106
101
  var CONFIG_PATH = path2.join(
107
102
  os.homedir(),
108
103
  ".mindstudio-local-tunnel",
@@ -144,10 +139,11 @@ function resolveConfig(flags) {
144
139
  }
145
140
 
146
141
  // src/tools/_helpers/sidecar.ts
142
+ var log2 = createLogger("sidecar");
147
143
  var baseUrl = null;
148
144
  function setSidecarBaseUrl(url) {
149
145
  baseUrl = url;
150
- log.info("Sidecar configured", { url });
146
+ log2.info("Configured", { url });
151
147
  }
152
148
  function isSidecarConfigured() {
153
149
  return baseUrl !== null;
@@ -157,7 +153,6 @@ async function sidecarRequest(endpoint, body = {}, options) {
157
153
  throw new Error("Sidecar not available");
158
154
  }
159
155
  const url = `${baseUrl}${endpoint}`;
160
- log.debug("Sidecar request", { endpoint, body });
161
156
  try {
162
157
  const res = await fetch(url, {
163
158
  method: "POST",
@@ -166,7 +161,7 @@ async function sidecarRequest(endpoint, body = {}, options) {
166
161
  signal: options?.timeout ? AbortSignal.timeout(options.timeout) : void 0
167
162
  });
168
163
  if (!res.ok) {
169
- log.error("Sidecar error", { endpoint, status: res.status });
164
+ log2.error("Sidecar error", { endpoint, status: res.status });
170
165
  throw new Error(`Sidecar error: ${res.status}`);
171
166
  }
172
167
  return res.json();
@@ -174,7 +169,7 @@ async function sidecarRequest(endpoint, body = {}, options) {
174
169
  if (err.message.startsWith("Sidecar error")) {
175
170
  throw err;
176
171
  }
177
- log.error("Sidecar connection error", { endpoint, error: err.message });
172
+ log2.error("Sidecar connection error", { endpoint, error: err.message });
178
173
  throw new Error(`Sidecar connection error: ${err.message}`);
179
174
  }
180
175
  }
@@ -429,25 +424,19 @@ ${viewContext?.activeFile ? `Active file: ${viewContext.activeFile}` : ""}
429
424
  }
430
425
 
431
426
  // src/api.ts
427
+ var log3 = createLogger("api");
432
428
  async function* streamChat(params) {
433
- const { baseUrl: baseUrl2, apiKey, signal, ...body } = params;
429
+ const { baseUrl: baseUrl2, apiKey, signal, requestId, ...body } = params;
434
430
  const url = `${baseUrl2}/_internal/v2/agent/remy/chat`;
435
431
  const startTime = Date.now();
436
432
  const messagesWithAttachments = body.messages.filter(
437
433
  (m) => m.attachments && m.attachments.length > 0
438
434
  );
439
- log.info("POST agent/chat", {
440
- url,
435
+ log3.info("API request", {
436
+ requestId,
441
437
  model: body.model,
442
438
  messageCount: body.messages.length,
443
- toolCount: body.tools.length,
444
- ...messagesWithAttachments.length > 0 && {
445
- attachments: messagesWithAttachments.map((m) => ({
446
- role: m.role,
447
- attachmentCount: m.attachments.length,
448
- urls: m.attachments.map((a) => a.url)
449
- }))
450
- }
439
+ toolCount: body.tools.length
451
440
  });
452
441
  let res;
453
442
  try {
@@ -462,15 +451,15 @@ async function* streamChat(params) {
462
451
  });
463
452
  } catch (err) {
464
453
  if (signal?.aborted) {
465
- log.info("Request aborted by signal");
454
+ log3.warn("Request aborted", { requestId });
466
455
  throw err;
467
456
  }
468
- log.error("Network error", { error: err.message });
457
+ log3.error("Network error", { requestId, error: err.message });
469
458
  yield { type: "error", error: `Network error: ${err.message}` };
470
459
  return;
471
460
  }
472
461
  const ttfb = Date.now() - startTime;
473
- log.info(`Response ${res.status}`, { ttfb: `${ttfb}ms` });
462
+ log3.info("API response", { requestId, status: res.status, ttfbMs: ttfb });
474
463
  if (!res.ok) {
475
464
  let errorMessage = `HTTP ${res.status}`;
476
465
  try {
@@ -483,7 +472,11 @@ async function* streamChat(params) {
483
472
  }
484
473
  } catch {
485
474
  }
486
- log.error("API error", { status: res.status, error: errorMessage });
475
+ log3.error("API error", {
476
+ requestId,
477
+ status: res.status,
478
+ error: errorMessage
479
+ });
487
480
  yield { type: "error", error: errorMessage };
488
481
  return;
489
482
  }
@@ -508,7 +501,10 @@ async function* streamChat(params) {
508
501
  } catch {
509
502
  clearTimeout(stallTimer);
510
503
  await reader.cancel();
511
- log.error("Stream stalled", { elapsed: `${Date.now() - startTime}ms` });
504
+ log3.error("Stream stalled", {
505
+ requestId,
506
+ durationMs: Date.now() - startTime
507
+ });
512
508
  yield {
513
509
  type: "error",
514
510
  error: "Stream stalled \u2014 no data received for 5 minutes"
@@ -530,8 +526,9 @@ async function* streamChat(params) {
530
526
  const event = JSON.parse(line.slice(6));
531
527
  if (event.type === "done") {
532
528
  const elapsed = Date.now() - startTime;
533
- log.info("Stream complete", {
534
- elapsed: `${elapsed}ms`,
529
+ log3.info("Stream complete", {
530
+ requestId,
531
+ durationMs: elapsed,
535
532
  stopReason: event.stopReason,
536
533
  inputTokens: event.usage.inputTokens,
537
534
  outputTokens: event.usage.outputTokens
@@ -564,11 +561,6 @@ async function* streamChatWithRetry(params, options) {
564
561
  for await (const event of streamChat(params)) {
565
562
  if (event.type === "error") {
566
563
  if (isRetryableError(event.error) && attempt < MAX_RETRIES - 1) {
567
- log.warn("Retryable error, will retry", {
568
- attempt: attempt + 1,
569
- maxRetries: MAX_RETRIES,
570
- error: event.error
571
- });
572
564
  options?.onRetry?.(attempt, event.error);
573
565
  retryableFailure = true;
574
566
  break;
@@ -583,7 +575,12 @@ async function* streamChatWithRetry(params, options) {
583
575
  return;
584
576
  }
585
577
  const backoff = INITIAL_BACKOFF_MS * 2 ** attempt;
586
- log.info("Retrying after backoff", { backoffMs: backoff });
578
+ log3.warn("Retrying", {
579
+ requestId: params.requestId,
580
+ attempt: attempt + 1,
581
+ maxRetries: MAX_RETRIES,
582
+ backoffMs: backoff
583
+ });
587
584
  await sleep(backoff);
588
585
  continue;
589
586
  }
@@ -1491,9 +1488,9 @@ var setProjectMetadataTool = {
1491
1488
  import fs9 from "fs/promises";
1492
1489
  var DEFAULT_MAX_LINES2 = 500;
1493
1490
  function isBinary(buffer) {
1494
- const sample3 = buffer.subarray(0, 8192);
1495
- for (let i = 0; i < sample3.length; i++) {
1496
- if (sample3[i] === 0) {
1491
+ const sample4 = buffer.subarray(0, 8192);
1492
+ for (let i = 0; i < sample4.length; i++) {
1493
+ if (sample4[i] === 0) {
1497
1494
  return true;
1498
1495
  }
1499
1496
  }
@@ -2158,7 +2155,6 @@ async function captureAndAnalyzeScreenshot(promptOrOptions) {
2158
2155
  const ssResult = await sidecarRequest("/screenshot-full-page", void 0, {
2159
2156
  timeout: 12e4
2160
2157
  });
2161
- log.debug("Screenshot response", { ssResult });
2162
2158
  const url = ssResult?.url || ssResult?.screenshotUrl;
2163
2159
  if (!url) {
2164
2160
  throw new Error(
@@ -2218,13 +2214,8 @@ function startStatusWatcher(config) {
2218
2214
  try {
2219
2215
  const ctx = getContext();
2220
2216
  if (!ctx.assistantText && !ctx.lastToolName) {
2221
- log.debug("Status watcher: no context, skipping");
2222
2217
  return;
2223
2218
  }
2224
- log.debug("Status watcher: requesting label", {
2225
- textLength: ctx.assistantText.length,
2226
- lastToolName: ctx.lastToolName
2227
- });
2228
2219
  const res = await fetch(url, {
2229
2220
  method: "POST",
2230
2221
  headers: {
@@ -2241,30 +2232,18 @@ function startStatusWatcher(config) {
2241
2232
  signal
2242
2233
  });
2243
2234
  if (!res.ok) {
2244
- log.debug("Status watcher: endpoint returned non-ok", {
2245
- status: res.status
2246
- });
2247
2235
  return;
2248
2236
  }
2249
2237
  const data = await res.json();
2250
- if (!data.label) {
2251
- log.debug("Status watcher: no label in response");
2252
- return;
2253
- }
2254
- if (data.label === lastLabel) {
2255
- log.debug("Status watcher: duplicate label, skipping", {
2256
- label: data.label
2257
- });
2238
+ if (!data.label || data.label === lastLabel) {
2258
2239
  return;
2259
2240
  }
2260
2241
  lastLabel = data.label;
2261
2242
  if (stopped) {
2262
2243
  return;
2263
2244
  }
2264
- log.debug("Status watcher: emitting", { label: data.label });
2265
2245
  onStatus(data.label);
2266
- } catch (err) {
2267
- log.debug("Status watcher: error", { error: err?.message ?? "unknown" });
2246
+ } catch {
2268
2247
  } finally {
2269
2248
  inflight = false;
2270
2249
  }
@@ -2272,12 +2251,10 @@ function startStatusWatcher(config) {
2272
2251
  const timer = setInterval(tick, interval);
2273
2252
  tick().catch(() => {
2274
2253
  });
2275
- log.debug("Status watcher started", { interval });
2276
2254
  return {
2277
2255
  stop() {
2278
2256
  stopped = true;
2279
2257
  clearInterval(timer);
2280
- log.debug("Status watcher stopped");
2281
2258
  }
2282
2259
  };
2283
2260
  }
@@ -2318,6 +2295,7 @@ function cleanMessagesForApi(messages) {
2318
2295
  }
2319
2296
 
2320
2297
  // src/subagents/runner.ts
2298
+ var log4 = createLogger("sub-agent");
2321
2299
  async function runSubAgent(config) {
2322
2300
  const {
2323
2301
  system,
@@ -2333,14 +2311,19 @@ async function runSubAgent(config) {
2333
2311
  onEvent,
2334
2312
  resolveExternalTool,
2335
2313
  toolRegistry,
2314
+ requestId,
2336
2315
  background,
2337
2316
  onBackgroundComplete
2338
2317
  } = config;
2339
2318
  const bgAbort = background ? new AbortController() : null;
2340
2319
  const signal = background ? bgAbort.signal : parentSignal;
2320
+ const agentName = subAgentId || "sub-agent";
2321
+ const runStart = Date.now();
2322
+ log4.info("Sub-agent started", { requestId, parentToolId, agentName });
2341
2323
  const emit2 = (e) => {
2342
2324
  onEvent({ ...e, parentToolId });
2343
2325
  };
2326
+ let turns = 0;
2344
2327
  const run = async () => {
2345
2328
  const messages = [{ role: "user", content: task }];
2346
2329
  function getPartialText(blocks) {
@@ -2360,6 +2343,7 @@ ${partial}` : "[INTERRUPTED] Agent was interrupted before producing output.",
2360
2343
  }
2361
2344
  let lastToolResult = "";
2362
2345
  while (true) {
2346
+ turns++;
2363
2347
  if (signal?.aborted) {
2364
2348
  return abortResult([]);
2365
2349
  }
@@ -2385,6 +2369,7 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2385
2369
  {
2386
2370
  ...apiConfig,
2387
2371
  model,
2372
+ requestId,
2388
2373
  subAgentId,
2389
2374
  system: fullSystem,
2390
2375
  messages: cleanMessagesForApi(messages),
@@ -2475,7 +2460,8 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2475
2460
  const text = getPartialText(contentBlocks);
2476
2461
  return { text, messages };
2477
2462
  }
2478
- log.info("Sub-agent executing tools", {
2463
+ log4.info("Tools executing", {
2464
+ requestId,
2479
2465
  parentToolId,
2480
2466
  count: toolCalls.length,
2481
2467
  tools: toolCalls.map((tc) => tc.name)
@@ -2574,15 +2560,37 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2574
2560
  }
2575
2561
  }
2576
2562
  };
2563
+ const wrapRun = async () => {
2564
+ try {
2565
+ const result = await run();
2566
+ log4.info("Sub-agent complete", {
2567
+ requestId,
2568
+ parentToolId,
2569
+ agentName,
2570
+ durationMs: Date.now() - runStart,
2571
+ turns
2572
+ });
2573
+ return result;
2574
+ } catch (err) {
2575
+ log4.warn("Sub-agent error", {
2576
+ requestId,
2577
+ parentToolId,
2578
+ agentName,
2579
+ error: err.message
2580
+ });
2581
+ throw err;
2582
+ }
2583
+ };
2577
2584
  if (!background) {
2578
- return run();
2585
+ return wrapRun();
2579
2586
  }
2587
+ log4.info("Sub-agent backgrounded", { requestId, parentToolId, agentName });
2580
2588
  const ack = await generateBackgroundAck({
2581
2589
  apiConfig,
2582
2590
  agentName: subAgentId || "agent",
2583
2591
  task
2584
2592
  });
2585
- run().then((finalResult) => onBackgroundComplete?.(finalResult)).catch(
2593
+ wrapRun().then((finalResult) => onBackgroundComplete?.(finalResult)).catch(
2586
2594
  (err) => onBackgroundComplete?.({ text: `Error: ${err.message}`, messages: [] })
2587
2595
  );
2588
2596
  return { text: ack, messages: [], backgrounded: true };
@@ -2705,6 +2713,7 @@ ${appSpec}
2705
2713
  }
2706
2714
 
2707
2715
  // src/subagents/browserAutomation/index.ts
2716
+ var log5 = createLogger("browser-automation");
2708
2717
  var browserAutomationTool = {
2709
2718
  definition: {
2710
2719
  name: "runAutomatedBrowserTest",
@@ -2764,6 +2773,7 @@ var browserAutomationTool = {
2764
2773
  subAgentId: "browserAutomation",
2765
2774
  signal: context.signal,
2766
2775
  parentToolId: context.toolCallId,
2776
+ requestId: context.requestId,
2767
2777
  onEvent: context.onEvent,
2768
2778
  resolveExternalTool: async (id, name, input2) => {
2769
2779
  if (!context.resolveExternalTool) {
@@ -2798,7 +2808,7 @@ var browserAutomationTool = {
2798
2808
  }
2799
2809
  }
2800
2810
  } catch {
2801
- log.debug("Failed to parse batch analysis result", {
2811
+ log5.debug("Failed to parse batch analysis result", {
2802
2812
  batchResult
2803
2813
  });
2804
2814
  }
@@ -2885,6 +2895,8 @@ You are analyzing a screenshot of a real website or app for a designer's persona
2885
2895
 
2886
2896
  Analyze the image and think about what makes the site or app special and unique. What is it doing that is unique, different, original, and creative? What makes it special? What isn't working? What doesn't look or feel good?
2887
2897
 
2898
+ Important: First, look at the screenshot and use your judgement to identify what the user wants notes about. If the screenshot is of a website for design case studies, or a blog post writeup about a new product or design, assume that the user is interested in a reference for the site/app/product being talked about - it is unlikely they are interested in a design audit of dribble.com, for example.
2899
+
2888
2900
  Then, provide the following analysis:
2889
2901
 
2890
2902
  ## Context
@@ -3006,29 +3018,68 @@ async function execute5(input, onLog) {
3006
3018
  }
3007
3019
  }
3008
3020
 
3009
- // src/subagents/designExpert/tools/generateImages.ts
3021
+ // src/subagents/designExpert/tools/images/generateImages.ts
3010
3022
  var generateImages_exports = {};
3011
3023
  __export(generateImages_exports, {
3012
3024
  definition: () => definition6,
3013
3025
  execute: () => execute6
3014
3026
  });
3015
3027
 
3016
- // src/subagents/designExpert/tools/_seedream.ts
3017
- var ANALYZE_PROMPT = "You are reviewing this image for a visual designer sourcing assets for a project. Describe: what the image depicts, the mood and color palette, how the lighting and composition work, whether there are any issues (unwanted text, artifacts, distortions), and how it could be used in a layout (hero background, feature section, card texture, etc). Be concise and practical. Respond only with your analysis as Markdown and absolutely no other text. Do not use emojis - use unicode if you need symbols.";
3018
- async function seedreamGenerate(opts) {
3019
- const { prompts, sourceImages, transparentBackground, onLog } = opts;
3020
- const width = opts.width || 2048;
3021
- const height = opts.height || 2048;
3022
- const config = { width, height };
3023
- if (sourceImages?.length) {
3024
- config.images = sourceImages;
3028
+ // src/subagents/designExpert/tools/images/enhancePrompt.ts
3029
+ var SYSTEM_PROMPT = readAsset(
3030
+ "subagents/designExpert/tools/images/enhance-image-prompt.md"
3031
+ );
3032
+ async function enhanceImagePrompt(params) {
3033
+ const { brief, aspectRatio, transparentBackground, onLog } = params;
3034
+ const orientation = aspectRatio === "1:1" ? "square" : ["16:9", "4:3", "3:2"].includes(aspectRatio) ? "landscape" : "portrait";
3035
+ const contextParts = [
3036
+ `Aspect ratio: ${aspectRatio} (${orientation})`
3037
+ ];
3038
+ if (transparentBackground) {
3039
+ contextParts.push(
3040
+ "Transparent background: yes \u2014 the background will be removed. Focus on the subject as an isolated element."
3041
+ );
3025
3042
  }
3043
+ const message = `<context>
3044
+ ${contextParts.join("\n")}
3045
+ </context>
3046
+
3047
+ <brief>
3048
+ ${brief}
3049
+ </brief>`;
3050
+ const enhanced = await runCli(
3051
+ `mindstudio generate-text --prompt ${JSON.stringify(SYSTEM_PROMPT)} --message ${JSON.stringify(message)} --output-key enhanced --no-meta`,
3052
+ { timeout: 6e4, onLog }
3053
+ );
3054
+ return enhanced.trim();
3055
+ }
3056
+
3057
+ // src/subagents/designExpert/tools/images/imageGenerator.ts
3058
+ var ANALYZE_PROMPT = "You are reviewing this image for a visual designer sourcing assets for a project. Describe: what the image depicts, the mood and color palette, how the lighting and composition work, any text present in the image, whether there are any issues (artifacts, distortions), and how it could be used in a layout for an app or website. Be concise and practical. Respond only with your analysis as Markdown and absolutely no other text. Do not use emojis - use unicode if you need symbols.";
3059
+ async function generateImageAssets(opts) {
3060
+ const { prompts, sourceImages, transparentBackground, onLog } = opts;
3061
+ const aspectRatio = opts.aspectRatio || "1:1";
3062
+ const config = {
3063
+ aspect_ratio: aspectRatio,
3064
+ ...sourceImages?.length && { source_images: sourceImages }
3065
+ };
3066
+ const isEdit = !!sourceImages?.length;
3067
+ const enhancedPrompts = isEdit ? prompts : await Promise.all(
3068
+ prompts.map(
3069
+ (brief) => enhanceImagePrompt({
3070
+ brief,
3071
+ aspectRatio,
3072
+ transparentBackground,
3073
+ onLog
3074
+ })
3075
+ )
3076
+ );
3026
3077
  let imageUrls;
3027
- if (prompts.length === 1) {
3078
+ if (enhancedPrompts.length === 1) {
3028
3079
  const step = JSON.stringify({
3029
- prompt: prompts[0],
3080
+ prompt: enhancedPrompts[0],
3030
3081
  imageModelOverride: {
3031
- model: "seedream-4.5",
3082
+ model: "gemini-3.1-flash-image",
3032
3083
  config
3033
3084
  }
3034
3085
  });
@@ -3038,12 +3089,12 @@ async function seedreamGenerate(opts) {
3038
3089
  );
3039
3090
  imageUrls = [url];
3040
3091
  } else {
3041
- const steps = prompts.map((prompt) => ({
3092
+ const steps = enhancedPrompts.map((prompt) => ({
3042
3093
  stepType: "generateImage",
3043
3094
  step: {
3044
3095
  prompt,
3045
3096
  imageModelOverride: {
3046
- model: "seedream-4.5",
3097
+ model: "gemini-3.1-flash-image",
3047
3098
  config
3048
3099
  }
3049
3100
  }
@@ -3080,22 +3131,32 @@ async function seedreamGenerate(opts) {
3080
3131
  const images = await Promise.all(
3081
3132
  imageUrls.map(async (url, i) => {
3082
3133
  if (url.startsWith("Error")) {
3083
- return { prompt: prompts[i], error: url };
3134
+ return {
3135
+ prompt: prompts[i],
3136
+ ...!isEdit && { enhancedPrompt: enhancedPrompts[i] },
3137
+ error: url
3138
+ };
3084
3139
  }
3085
3140
  const analysis = await runCli(
3086
3141
  `mindstudio analyze-image --prompt ${JSON.stringify(ANALYZE_PROMPT)} --image-url ${JSON.stringify(url)} --output-key analysis --no-meta`,
3087
3142
  { timeout: 2e5, onLog }
3088
3143
  );
3089
- return { url, prompt: prompts[i], analysis, width, height };
3144
+ return {
3145
+ url,
3146
+ prompt: prompts[i],
3147
+ ...!isEdit && { enhancedPrompt: enhancedPrompts[i] },
3148
+ analysis,
3149
+ aspectRatio
3150
+ };
3090
3151
  })
3091
3152
  );
3092
3153
  return JSON.stringify({ images });
3093
3154
  }
3094
3155
 
3095
- // src/subagents/designExpert/tools/generateImages.ts
3156
+ // src/subagents/designExpert/tools/images/generateImages.ts
3096
3157
  var definition6 = {
3097
3158
  name: "generateImages",
3098
- description: "Generate images using AI. Returns CDN URLs with a quality analysis for each image. Produces high-quality results for everything from photorealistic images and abstract/creative visuals. Pass multiple prompts to generate in parallel. No need to analyze images separately after generating \u2014 the analysis is included.",
3159
+ description: "Generate images. Returns CDN URLs with a quality analysis for each image. Produces high-quality results for everything from photorealistic images and abstract/creative visuals. Pass multiple prompts to generate in parallel. No need to analyze images separately after generating \u2014 the analysis is included.",
3099
3160
  inputSchema: {
3100
3161
  type: "object",
3101
3162
  properties: {
@@ -3104,15 +3165,12 @@ var definition6 = {
3104
3165
  items: {
3105
3166
  type: "string"
3106
3167
  },
3107
- description: "One or more image generation prompts. Be detailed: describe style, mood, composition, colors. Multiple prompts run in parallel."
3108
- },
3109
- width: {
3110
- type: "number",
3111
- description: "Image width in pixels. Default 2048. Range: 2048-4096."
3168
+ description: "One or more image briefs describing what you want. Focus on subject, mood, style, and intended use \u2014 the tool optimizes your brief into a model-ready prompt automatically. Multiple briefs run in parallel."
3112
3169
  },
3113
- height: {
3114
- type: "number",
3115
- description: "Image height in pixels. Default 2048. Range: 2048-4096."
3170
+ aspectRatio: {
3171
+ type: "string",
3172
+ enum: ["1:1", "16:9", "9:16", "3:4", "4:3", "2:3", "3:2"],
3173
+ description: "Aspect ratio. Default 1:1."
3116
3174
  },
3117
3175
  transparentBackground: {
3118
3176
  type: "boolean",
@@ -3123,16 +3181,15 @@ var definition6 = {
3123
3181
  }
3124
3182
  };
3125
3183
  async function execute6(input, onLog) {
3126
- return seedreamGenerate({
3184
+ return generateImageAssets({
3127
3185
  prompts: input.prompts,
3128
- width: input.width,
3129
- height: input.height,
3186
+ aspectRatio: input.aspectRatio,
3130
3187
  transparentBackground: input.transparentBackground,
3131
3188
  onLog
3132
3189
  });
3133
3190
  }
3134
3191
 
3135
- // src/subagents/designExpert/tools/editImages.ts
3192
+ // src/subagents/designExpert/tools/images/editImages.ts
3136
3193
  var editImages_exports = {};
3137
3194
  __export(editImages_exports, {
3138
3195
  definition: () => definition7,
@@ -3140,7 +3197,7 @@ __export(editImages_exports, {
3140
3197
  });
3141
3198
  var definition7 = {
3142
3199
  name: "editImages",
3143
- description: "Edit or transform existing images using AI. Provide one or more source image URLs as reference and a prompt describing the desired edit. Use for compositing, style transfer, subject transformation, blending multiple references, or incorporating one or more ferences into something new. Returns CDN URLs with analysis.",
3200
+ description: "Edit or transform existing images. Provide one or more source image URLs as reference and a prompt describing the desired edit. Use for compositing, style transfer, subject transformation, blending multiple references, or incorporating one or more references into something new. Returns CDN URLs with analysis.",
3144
3201
  inputSchema: {
3145
3202
  type: "object",
3146
3203
  properties: {
@@ -3149,7 +3206,7 @@ var definition7 = {
3149
3206
  items: {
3150
3207
  type: "string"
3151
3208
  },
3152
- description: "One or more edit prompts describing how to transform the source images. Multiple prompts run in parallel, each using the same source images."
3209
+ description: "One or more edit briefs describing the desired transformation. Focus on what to change relative to the source material. Multiple briefs run in parallel, each using the same source images."
3153
3210
  },
3154
3211
  sourceImages: {
3155
3212
  type: "array",
@@ -3158,13 +3215,10 @@ var definition7 = {
3158
3215
  },
3159
3216
  description: "One or more source/reference image URLs. These are used as the basis for the edit \u2014 the AI will use them as reference for style, subject, or composition."
3160
3217
  },
3161
- width: {
3162
- type: "number",
3163
- description: "Output width in pixels. Default 2048. Range: 2048-4096."
3164
- },
3165
- height: {
3166
- type: "number",
3167
- description: "Output height in pixels. Default 2048. Range: 2048-4096."
3218
+ aspectRatio: {
3219
+ type: "string",
3220
+ enum: ["1:1", "16:9", "9:16", "3:4", "4:3", "2:3", "3:2"],
3221
+ description: "Output aspect ratio. Default 1:1."
3168
3222
  },
3169
3223
  transparentBackground: {
3170
3224
  type: "boolean",
@@ -3175,11 +3229,10 @@ var definition7 = {
3175
3229
  }
3176
3230
  };
3177
3231
  async function execute7(input, onLog) {
3178
- return seedreamGenerate({
3232
+ return generateImageAssets({
3179
3233
  prompts: input.prompts,
3180
3234
  sourceImages: input.sourceImages,
3181
- width: input.width,
3182
- height: input.height,
3235
+ aspectRatio: input.aspectRatio,
3183
3236
  transparentBackground: input.transparentBackground,
3184
3237
  onLog
3185
3238
  });
@@ -3345,15 +3398,11 @@ function getFontLibrarySample() {
3345
3398
  return `
3346
3399
  ## Font Library
3347
3400
 
3348
- A random sample from a curated font library. Use these as starting points for font selection.
3401
+ This is your personal library of fonts you love. Use it as a starting point when thinking about anything related to typography.
3349
3402
 
3350
3403
  ### Fonts
3351
3404
 
3352
- ${fontList}
3353
-
3354
- ### Pairings
3355
-
3356
- ${pairingList}`.trim();
3405
+ ${fontList}`.trim();
3357
3406
  }
3358
3407
 
3359
3408
  // src/subagents/designExpert/data/getDesignReferencesSample.ts
@@ -3373,23 +3422,58 @@ function sample2(arr, n) {
3373
3422
  return copy.slice(0, n);
3374
3423
  }
3375
3424
  function getDesignReferencesSample() {
3376
- const images = sample2(inspirationImages, 30);
3425
+ const images = sample2(inspirationImages, 25);
3377
3426
  if (!images.length) {
3378
3427
  return "";
3379
3428
  }
3380
3429
  const imageList = images.map((img, i) => `### Reference ${i + 1}
3381
3430
  ${img.analysis}`).join("\n\n");
3382
3431
  return `
3383
- ## Design References
3432
+ ## Visual Design References
3384
3433
 
3385
- This is what the bar looks like. These are real sites that made it onto curated design galleries because they did something bold, intentional, and memorable. Use them as inspiration and let the takeaways guide your work. Your designs should feel like they belong in this company.
3434
+ This is your personal reference library of visual design you love. The apps and sites featured within made it into your library because they did something bold, intentional, and memorable. Use them as reference, inspiration, and let the takeaways guide your work.
3386
3435
 
3387
3436
  ${imageList}`.trim();
3388
3437
  }
3389
3438
 
3439
+ // src/subagents/designExpert/data/getUiInspirationSample.ts
3440
+ var uiScreens = readJsonAsset(
3441
+ { screens: [] },
3442
+ "subagents/designExpert/data/sources/ui_inspiration_compiled.json"
3443
+ ).screens;
3444
+ function sample3(arr, n) {
3445
+ if (arr.length <= n) {
3446
+ return [...arr];
3447
+ }
3448
+ const copy = [...arr];
3449
+ for (let i = copy.length - 1; i > 0; i--) {
3450
+ const j = Math.floor(Math.random() * (i + 1));
3451
+ [copy[i], copy[j]] = [copy[j], copy[i]];
3452
+ }
3453
+ return copy.slice(0, n);
3454
+ }
3455
+ function getUiInspirationSample() {
3456
+ const screens = sample3(uiScreens, 25);
3457
+ if (!screens.length) {
3458
+ return "";
3459
+ }
3460
+ const screenList = screens.map((s, i) => `### Screen ${i + 1}
3461
+ ${s.analysis}`).join("\n\n");
3462
+ return `
3463
+ ## UI Case Studies
3464
+
3465
+ These are your personal notes, collected over the years, about UI patterns you've encountered in the wild that you love. You re-use aspects of them liberally in your work, reference them as ground truths, as well as use them to synthesize new ideas and refine your sense of what good UI feels and looks like. The work you do must always feel like it belongs in this company.
3466
+
3467
+ ${screenList}`.trim();
3468
+ }
3469
+
3390
3470
  // src/subagents/designExpert/prompt.ts
3391
3471
  var SUBAGENT = "subagents/designExpert";
3392
- var RUNTIME_PLACEHOLDERS = /* @__PURE__ */ new Set(["font_library", "design_references"]);
3472
+ var RUNTIME_PLACEHOLDERS = /* @__PURE__ */ new Set([
3473
+ "font_library",
3474
+ "visual_design_references",
3475
+ "ui_case_studies"
3476
+ ]);
3393
3477
  var PROMPT_TEMPLATE = readAsset(SUBAGENT, "prompt.md").replace(/\{\{([^}]+)\}\}/g, (match, key) => {
3394
3478
  const k = key.trim();
3395
3479
  return RUNTIME_PLACEHOLDERS.has(k) ? match : readAsset(SUBAGENT, k);
@@ -3399,7 +3483,7 @@ function getDesignExpertPrompt() {
3399
3483
  let prompt = PROMPT_TEMPLATE.replace(
3400
3484
  "{{font_library}}",
3401
3485
  getFontLibrarySample()
3402
- ).replace("{{design_references}}", getDesignReferencesSample());
3486
+ ).replace("{{visual_design_references}}", getDesignReferencesSample()).replace("{{ui_case_studies}}", getUiInspirationSample());
3403
3487
  if (specContext) {
3404
3488
  prompt += `
3405
3489
 
@@ -3414,7 +3498,7 @@ ${specContext}`;
3414
3498
 
3415
3499
  // src/subagents/designExpert/index.ts
3416
3500
  var DESCRIPTION = `
3417
- Visual design expert. Describe the situation and what you need \u2014 the agent decides what to deliver. It reads the spec files automatically. Include relevant user requirements and context it can't get from the spec, but do not list specific deliverables or tell it how to do its job.
3501
+ Visual design expert. Describe the situation and what you need \u2014 the agent decides what to deliver. It reads the spec files automatically. Include relevant user requirements and context it can't get from the spec, but do not list specific deliverables or tell it how to do its job. Do not suggest implementation details or ideas - only relay what is needed.
3418
3502
  `.trim();
3419
3503
  var designExpertTool = {
3420
3504
  definition: {
@@ -3450,6 +3534,7 @@ var designExpertTool = {
3450
3534
  subAgentId: "visualDesignExpert",
3451
3535
  signal: context.signal,
3452
3536
  parentToolId: context.toolCallId,
3537
+ requestId: context.requestId,
3453
3538
  onEvent: context.onEvent,
3454
3539
  resolveExternalTool: context.resolveExternalTool,
3455
3540
  toolRegistry: context.toolRegistry,
@@ -3746,6 +3831,7 @@ var productVisionTool = {
3746
3831
  subAgentId: "productVision",
3747
3832
  signal: context.signal,
3748
3833
  parentToolId: context.toolCallId,
3834
+ requestId: context.requestId,
3749
3835
  onEvent: context.onEvent,
3750
3836
  resolveExternalTool: context.resolveExternalTool,
3751
3837
  toolRegistry: context.toolRegistry,
@@ -3893,6 +3979,7 @@ var codeSanityCheckTool = {
3893
3979
  subAgentId: "codeSanityCheck",
3894
3980
  signal: context.signal,
3895
3981
  parentToolId: context.toolCallId,
3982
+ requestId: context.requestId,
3896
3983
  onEvent: context.onEvent,
3897
3984
  resolveExternalTool: context.resolveExternalTool,
3898
3985
  toolRegistry: context.toolRegistry
@@ -3985,6 +4072,7 @@ function executeTool(name, input, context) {
3985
4072
 
3986
4073
  // src/session.ts
3987
4074
  import fs17 from "fs";
4075
+ var log6 = createLogger("session");
3988
4076
  var SESSION_FILE = ".remy-session.json";
3989
4077
  function loadSession(state) {
3990
4078
  try {
@@ -3992,6 +4080,7 @@ function loadSession(state) {
3992
4080
  const data = JSON.parse(raw);
3993
4081
  if (Array.isArray(data.messages) && data.messages.length > 0) {
3994
4082
  state.messages = sanitizeMessages(data.messages);
4083
+ log6.info("Session loaded", { messageCount: state.messages.length });
3995
4084
  return true;
3996
4085
  }
3997
4086
  } catch {
@@ -4041,7 +4130,9 @@ function saveSession(state) {
4041
4130
  JSON.stringify({ messages: state.messages }, null, 2),
4042
4131
  "utf-8"
4043
4132
  );
4044
- } catch {
4133
+ log6.info("Session saved", { messageCount: state.messages.length });
4134
+ } catch (err) {
4135
+ log6.warn("Session save failed", { error: err.message });
4045
4136
  }
4046
4137
  }
4047
4138
  function clearSession(state) {
@@ -4242,6 +4333,7 @@ function friendlyError(raw) {
4242
4333
  }
4243
4334
 
4244
4335
  // src/agent.ts
4336
+ var log7 = createLogger("agent");
4245
4337
  function getTextContent(blocks) {
4246
4338
  return blocks.filter((b) => b.type === "text").map((b) => b.text).join("");
4247
4339
  }
@@ -4279,17 +4371,17 @@ async function runTurn(params) {
4279
4371
  onEvent,
4280
4372
  resolveExternalTool,
4281
4373
  hidden,
4374
+ requestId,
4282
4375
  toolRegistry,
4283
4376
  onBackgroundComplete
4284
4377
  } = params;
4285
4378
  const tools2 = getToolDefinitions(onboardingState);
4286
- log.info("Turn started", {
4287
- messageLength: userMessage.length,
4379
+ log7.info("Turn started", {
4380
+ requestId,
4381
+ model,
4288
4382
  toolCount: tools2.length,
4289
- tools: tools2.map((t) => t.name),
4290
4383
  ...attachments && attachments.length > 0 && {
4291
- attachmentCount: attachments.length,
4292
- attachmentUrls: attachments.map((a) => a.url)
4384
+ attachmentCount: attachments.length
4293
4385
  }
4294
4386
  });
4295
4387
  onEvent({ type: "turn_started" });
@@ -4299,10 +4391,6 @@ async function runTurn(params) {
4299
4391
  }
4300
4392
  if (attachments && attachments.length > 0) {
4301
4393
  userMsg.attachments = attachments;
4302
- log.debug("Attachments added to user message", {
4303
- count: attachments.length,
4304
- urls: attachments.map((a) => a.url)
4305
- });
4306
4394
  }
4307
4395
  state.messages.push(userMsg);
4308
4396
  const STATUS_EXCLUDED_TOOLS = /* @__PURE__ */ new Set([
@@ -4363,11 +4451,6 @@ async function runTurn(params) {
4363
4451
  }
4364
4452
  acc.lastEmittedCount = result.emittedCount;
4365
4453
  acc.started = true;
4366
- log.debug("Streaming partial tool_start", {
4367
- id,
4368
- name,
4369
- emittedCount: result.emittedCount
4370
- });
4371
4454
  onEvent({
4372
4455
  type: "tool_start",
4373
4456
  id,
@@ -4383,10 +4466,6 @@ async function runTurn(params) {
4383
4466
  }
4384
4467
  if (!acc.started) {
4385
4468
  acc.started = true;
4386
- log.debug("Streaming content tool: emitting early tool_start", {
4387
- id,
4388
- name
4389
- });
4390
4469
  onEvent({ type: "tool_start", id, name, input: partial });
4391
4470
  }
4392
4471
  if (transform) {
@@ -4394,18 +4473,8 @@ async function runTurn(params) {
4394
4473
  if (result === null) {
4395
4474
  return;
4396
4475
  }
4397
- log.debug("Streaming content tool: emitting tool_input_delta", {
4398
- id,
4399
- name,
4400
- resultLength: result.length
4401
- });
4402
4476
  onEvent({ type: "tool_input_delta", id, name, result });
4403
4477
  } else {
4404
- log.debug("Streaming content tool: emitting tool_input_delta", {
4405
- id,
4406
- name,
4407
- contentLength: content.length
4408
- });
4409
4478
  onEvent({ type: "tool_input_delta", id, name, result: content });
4410
4479
  }
4411
4480
  }
@@ -4414,6 +4483,7 @@ async function runTurn(params) {
4414
4483
  {
4415
4484
  ...apiConfig,
4416
4485
  model,
4486
+ requestId,
4417
4487
  system,
4418
4488
  messages: cleanMessagesForApi(state.messages),
4419
4489
  tools: tools2,
@@ -4465,12 +4535,6 @@ async function runTurn(params) {
4465
4535
  case "tool_input_delta": {
4466
4536
  const acc = getOrCreateAccumulator2(event.id, event.name);
4467
4537
  acc.json += event.delta;
4468
- log.debug("Received tool_input_delta", {
4469
- id: event.id,
4470
- name: event.name,
4471
- deltaLength: event.delta.length,
4472
- accumulatedLength: acc.json.length
4473
- });
4474
4538
  try {
4475
4539
  const partial = parsePartialJson(acc.json);
4476
4540
  await handlePartialInput(acc, event.id, event.name, partial);
@@ -4480,11 +4544,6 @@ async function runTurn(params) {
4480
4544
  }
4481
4545
  case "tool_input_args": {
4482
4546
  const acc = getOrCreateAccumulator2(event.id, event.name);
4483
- log.debug("Received tool_input_args", {
4484
- id: event.id,
4485
- name: event.name,
4486
- keys: Object.keys(event.args)
4487
- });
4488
4547
  await handlePartialInput(acc, event.id, event.name, event.args);
4489
4548
  break;
4490
4549
  }
@@ -4501,11 +4560,10 @@ async function runTurn(params) {
4501
4560
  const tool = getToolByName(event.name);
4502
4561
  const wasStreamed = acc?.started ?? false;
4503
4562
  const isInputStreaming = !!tool?.streaming?.partialInput;
4504
- log.info("Tool call received", {
4505
- id: event.id,
4506
- name: event.name,
4507
- wasStreamed,
4508
- isInputStreaming
4563
+ log7.info("Tool received", {
4564
+ requestId,
4565
+ toolCallId: event.id,
4566
+ name: event.name
4509
4567
  });
4510
4568
  if (!wasStreamed || isInputStreaming) {
4511
4569
  onEvent({
@@ -4559,7 +4617,8 @@ async function runTurn(params) {
4559
4617
  onEvent({ type: "turn_done" });
4560
4618
  return;
4561
4619
  }
4562
- log.info("Executing tools", {
4620
+ log7.info("Tools executing", {
4621
+ requestId,
4563
4622
  count: toolCalls.length,
4564
4623
  tools: toolCalls.map((tc) => tc.name)
4565
4624
  });
@@ -4602,9 +4661,10 @@ async function runTurn(params) {
4602
4661
  let result;
4603
4662
  if (EXTERNAL_TOOLS.has(tc.name) && resolveExternalTool) {
4604
4663
  saveSession(state);
4605
- log.info("Waiting for external tool result", {
4606
- name: tc.name,
4607
- id: tc.id
4664
+ log7.info("Waiting for external tool result", {
4665
+ requestId,
4666
+ toolCallId: tc.id,
4667
+ name: tc.name
4608
4668
  });
4609
4669
  result = await resolveExternalTool(tc.id, tc.name, input);
4610
4670
  } else {
@@ -4615,6 +4675,7 @@ async function runTurn(params) {
4615
4675
  onEvent: wrappedOnEvent,
4616
4676
  resolveExternalTool,
4617
4677
  toolCallId: tc.id,
4678
+ requestId,
4618
4679
  subAgentMessages,
4619
4680
  toolRegistry,
4620
4681
  onBackgroundComplete,
@@ -4653,11 +4714,12 @@ async function runTurn(params) {
4653
4714
  run(tc.input);
4654
4715
  const r = await resultPromise;
4655
4716
  toolRegistry?.unregister(tc.id);
4656
- log.info("Tool completed", {
4717
+ log7.info("Tool completed", {
4718
+ requestId,
4719
+ toolCallId: tc.id,
4657
4720
  name: tc.name,
4658
- elapsed: `${Date.now() - toolStart}ms`,
4659
- isError: r.isError,
4660
- resultLength: r.result.length
4721
+ durationMs: Date.now() - toolStart,
4722
+ isError: r.isError
4661
4723
  });
4662
4724
  onEvent({
4663
4725
  type: "tool_done",
@@ -4703,6 +4765,7 @@ async function runTurn(params) {
4703
4765
  }
4704
4766
 
4705
4767
  // src/toolRegistry.ts
4768
+ var log8 = createLogger("tool-registry");
4706
4769
  var ToolRegistry = class {
4707
4770
  entries = /* @__PURE__ */ new Map();
4708
4771
  onEvent;
@@ -4728,6 +4791,7 @@ var ToolRegistry = class {
4728
4791
  if (!entry) {
4729
4792
  return false;
4730
4793
  }
4794
+ log8.info("Tool stopped", { toolCallId: id, name: entry.name, mode });
4731
4795
  entry.abortController.abort(mode);
4732
4796
  if (mode === "graceful") {
4733
4797
  const partial = entry.getPartialResult?.() ?? "";
@@ -4760,6 +4824,7 @@ ${partial}` : "[INTERRUPTED] Tool execution was stopped.";
4760
4824
  if (!entry) {
4761
4825
  return false;
4762
4826
  }
4827
+ log8.info("Tool restarted", { toolCallId: id, name: entry.name });
4763
4828
  entry.abortController.abort("restart");
4764
4829
  const newInput = patchedInput ? { ...entry.input, ...patchedInput } : entry.input;
4765
4830
  this.onEvent?.({
@@ -4775,6 +4840,7 @@ ${partial}` : "[INTERRUPTED] Tool execution was stopped.";
4775
4840
  };
4776
4841
 
4777
4842
  // src/headless.ts
4843
+ var log9 = createLogger("headless");
4778
4844
  function loadActionPrompt(name) {
4779
4845
  return readAsset("prompt", "actions", `${name}.md`);
4780
4846
  }
@@ -4873,6 +4939,11 @@ ${xmlParts}
4873
4939
  }
4874
4940
  }
4875
4941
  }
4942
+ log9.info("Background complete", {
4943
+ toolCallId,
4944
+ name,
4945
+ requestId: currentRequestId
4946
+ });
4876
4947
  onEvent({
4877
4948
  type: "tool_background_complete",
4878
4949
  id: toolCallId,
@@ -5065,6 +5136,7 @@ ${xmlParts}
5065
5136
  currentRequestId = requestId;
5066
5137
  currentAbort = new AbortController();
5067
5138
  completedEmitted = false;
5139
+ const turnStart = Date.now();
5068
5140
  const attachments = parsed.attachments;
5069
5141
  if (attachments?.length) {
5070
5142
  console.warn(
@@ -5096,6 +5168,7 @@ ${xmlParts}
5096
5168
  system,
5097
5169
  model: opts.model,
5098
5170
  onboardingState,
5171
+ requestId,
5099
5172
  signal: currentAbort.signal,
5100
5173
  onEvent,
5101
5174
  resolveExternalTool,
@@ -5110,11 +5183,20 @@ ${xmlParts}
5110
5183
  requestId
5111
5184
  );
5112
5185
  }
5186
+ log9.info("Turn complete", {
5187
+ requestId,
5188
+ durationMs: Date.now() - turnStart
5189
+ });
5113
5190
  } catch (err) {
5114
5191
  if (!completedEmitted) {
5115
5192
  emit("error", { error: err.message }, requestId);
5116
5193
  emit("completed", { success: false, error: err.message }, requestId);
5117
5194
  }
5195
+ log9.warn("Command failed", {
5196
+ action: "message",
5197
+ requestId,
5198
+ error: err.message
5199
+ });
5118
5200
  }
5119
5201
  currentAbort = null;
5120
5202
  currentRequestId = void 0;
@@ -5130,6 +5212,7 @@ ${xmlParts}
5130
5212
  return;
5131
5213
  }
5132
5214
  const { action, requestId } = parsed;
5215
+ log9.info("Command received", { action, requestId });
5133
5216
  if (action === "tool_result" && parsed.id) {
5134
5217
  const id = parsed.id;
5135
5218
  const result = parsed.result ?? "";