@mindstudio-ai/remy 0.1.43 → 0.1.45

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/headless.js CHANGED
@@ -429,11 +429,10 @@ async function* streamChat(params) {
429
429
  const { baseUrl: baseUrl2, apiKey, signal, requestId, ...body } = params;
430
430
  const url = `${baseUrl2}/_internal/v2/agent/remy/chat`;
431
431
  const startTime = Date.now();
432
- const messagesWithAttachments = body.messages.filter(
433
- (m) => m.attachments && m.attachments.length > 0
434
- );
432
+ const subAgentId = body.subAgentId;
435
433
  log3.info("API request", {
436
434
  requestId,
435
+ ...subAgentId && { subAgentId },
437
436
  model: body.model,
438
437
  messageCount: body.messages.length,
439
438
  toolCount: body.tools.length
@@ -451,15 +450,27 @@ async function* streamChat(params) {
451
450
  });
452
451
  } catch (err) {
453
452
  if (signal?.aborted) {
454
- log3.warn("Request aborted", { requestId });
453
+ log3.warn("Request aborted", {
454
+ requestId,
455
+ ...subAgentId && { subAgentId }
456
+ });
455
457
  throw err;
456
458
  }
457
- log3.error("Network error", { requestId, error: err.message });
459
+ log3.error("Network error", {
460
+ requestId,
461
+ ...subAgentId && { subAgentId },
462
+ error: err.message
463
+ });
458
464
  yield { type: "error", error: `Network error: ${err.message}` };
459
465
  return;
460
466
  }
461
467
  const ttfb = Date.now() - startTime;
462
- log3.info("API response", { requestId, status: res.status, ttfbMs: ttfb });
468
+ log3.info("API response", {
469
+ requestId,
470
+ ...subAgentId && { subAgentId },
471
+ status: res.status,
472
+ ttfbMs: ttfb
473
+ });
463
474
  if (!res.ok) {
464
475
  let errorMessage = `HTTP ${res.status}`;
465
476
  try {
@@ -474,6 +485,7 @@ async function* streamChat(params) {
474
485
  }
475
486
  log3.error("API error", {
476
487
  requestId,
488
+ ...subAgentId && { subAgentId },
477
489
  status: res.status,
478
490
  error: errorMessage
479
491
  });
@@ -503,6 +515,7 @@ async function* streamChat(params) {
503
515
  await reader.cancel();
504
516
  log3.error("Stream stalled", {
505
517
  requestId,
518
+ ...subAgentId && { subAgentId },
506
519
  durationMs: Date.now() - startTime
507
520
  });
508
521
  yield {
@@ -528,6 +541,7 @@ async function* streamChat(params) {
528
541
  const elapsed = Date.now() - startTime;
529
542
  log3.info("Stream complete", {
530
543
  requestId,
544
+ ...subAgentId && { subAgentId },
531
545
  durationMs: elapsed,
532
546
  stopReason: event.stopReason,
533
547
  inputTokens: event.usage.inputTokens,
@@ -2225,7 +2239,7 @@ function startStatusWatcher(config) {
2225
2239
  inflight = true;
2226
2240
  try {
2227
2241
  const ctx = getContext();
2228
- if (!ctx.assistantText && !ctx.lastToolName) {
2242
+ if (!ctx.assistantText && !ctx.lastToolName && !ctx.userMessage) {
2229
2243
  return;
2230
2244
  }
2231
2245
  const res = await fetch(url, {
@@ -2368,7 +2382,8 @@ ${partial}` : "[INTERRUPTED] Agent was interrupted before producing output.",
2368
2382
  getContext: () => ({
2369
2383
  assistantText: getPartialText(contentBlocks),
2370
2384
  lastToolName: currentToolNames || void 0,
2371
- lastToolResult: lastToolResult || void 0
2385
+ lastToolResult: lastToolResult || void 0,
2386
+ userMessage: task
2372
2387
  }),
2373
2388
  onStatus: (label) => emit2({ type: "status", message: label }),
2374
2389
  signal
@@ -2539,9 +2554,18 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2539
2554
  }
2540
2555
  };
2541
2556
  toolRegistry?.register(entry);
2557
+ const toolStart = Date.now();
2542
2558
  run2(tc.input);
2543
2559
  const r = await resultPromise;
2544
2560
  toolRegistry?.unregister(tc.id);
2561
+ log4.info("Tool completed", {
2562
+ requestId,
2563
+ parentToolId,
2564
+ toolCallId: tc.id,
2565
+ name: tc.name,
2566
+ durationMs: Date.now() - toolStart,
2567
+ isError: r.isError
2568
+ });
2545
2569
  emit2({
2546
2570
  type: "tool_done",
2547
2571
  id: tc.id,
@@ -2597,14 +2621,30 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2597
2621
  return wrapRun();
2598
2622
  }
2599
2623
  log4.info("Sub-agent backgrounded", { requestId, parentToolId, agentName });
2624
+ toolRegistry?.register({
2625
+ id: parentToolId,
2626
+ name: agentName,
2627
+ input: { task },
2628
+ abortController: bgAbort,
2629
+ startedAt: Date.now(),
2630
+ settle: () => {
2631
+ },
2632
+ rerun: () => {
2633
+ },
2634
+ getPartialResult: () => ""
2635
+ });
2600
2636
  const ack = await generateBackgroundAck({
2601
2637
  apiConfig,
2602
2638
  agentName: subAgentId || "agent",
2603
2639
  task
2604
2640
  });
2605
- wrapRun().then((finalResult) => onBackgroundComplete?.(finalResult)).catch(
2606
- (err) => onBackgroundComplete?.({ text: `Error: ${err.message}`, messages: [] })
2607
- );
2641
+ wrapRun().then((finalResult) => {
2642
+ toolRegistry?.unregister(parentToolId);
2643
+ onBackgroundComplete?.(finalResult);
2644
+ }).catch((err) => {
2645
+ toolRegistry?.unregister(parentToolId);
2646
+ onBackgroundComplete?.({ text: `Error: ${err.message}`, messages: [] });
2647
+ });
2608
2648
  return { text: ack, messages: [], backgrounded: true };
2609
2649
  }
2610
2650
 
@@ -3053,15 +3093,18 @@ async function enhanceImagePrompt(params) {
3053
3093
  "Transparent background: yes \u2014 the background will be removed. Focus on the subject as an isolated element."
3054
3094
  );
3055
3095
  }
3056
- const message = `<context>
3096
+ const context = `<context>
3057
3097
  ${contextParts.join("\n")}
3058
- </context>
3098
+ </context>`;
3099
+ const message = `${SYSTEM_PROMPT}
3100
+
3101
+ ${context}
3059
3102
 
3060
3103
  <brief>
3061
3104
  ${brief}
3062
3105
  </brief>`;
3063
3106
  const enhanced = await runCli(
3064
- `mindstudio generate-text --prompt ${JSON.stringify(SYSTEM_PROMPT)} --message ${JSON.stringify(message)} --output-key enhanced --no-meta`,
3107
+ `mindstudio generate-text --message ${JSON.stringify(message)} --output-key content --no-meta`,
3065
3108
  { timeout: 6e4, onLog }
3066
3109
  );
3067
3110
  return enhanced.trim();
@@ -4738,7 +4781,9 @@ async function runTurn(params) {
4738
4781
  toolRegistry?.register(entry);
4739
4782
  run(tc.input);
4740
4783
  const r = await resultPromise;
4741
- toolRegistry?.unregister(tc.id);
4784
+ if (!tc.input.background) {
4785
+ toolRegistry?.unregister(tc.id);
4786
+ }
4742
4787
  log7.info("Tool completed", {
4743
4788
  requestId,
4744
4789
  toolCallId: tc.id,
package/dist/index.js CHANGED
@@ -89,11 +89,10 @@ async function* streamChat(params) {
89
89
  const { baseUrl: baseUrl2, apiKey, signal, requestId, ...body } = params;
90
90
  const url = `${baseUrl2}/_internal/v2/agent/remy/chat`;
91
91
  const startTime = Date.now();
92
- const messagesWithAttachments = body.messages.filter(
93
- (m) => m.attachments && m.attachments.length > 0
94
- );
92
+ const subAgentId = body.subAgentId;
95
93
  log.info("API request", {
96
94
  requestId,
95
+ ...subAgentId && { subAgentId },
97
96
  model: body.model,
98
97
  messageCount: body.messages.length,
99
98
  toolCount: body.tools.length
@@ -111,15 +110,27 @@ async function* streamChat(params) {
111
110
  });
112
111
  } catch (err) {
113
112
  if (signal?.aborted) {
114
- log.warn("Request aborted", { requestId });
113
+ log.warn("Request aborted", {
114
+ requestId,
115
+ ...subAgentId && { subAgentId }
116
+ });
115
117
  throw err;
116
118
  }
117
- log.error("Network error", { requestId, error: err.message });
119
+ log.error("Network error", {
120
+ requestId,
121
+ ...subAgentId && { subAgentId },
122
+ error: err.message
123
+ });
118
124
  yield { type: "error", error: `Network error: ${err.message}` };
119
125
  return;
120
126
  }
121
127
  const ttfb = Date.now() - startTime;
122
- log.info("API response", { requestId, status: res.status, ttfbMs: ttfb });
128
+ log.info("API response", {
129
+ requestId,
130
+ ...subAgentId && { subAgentId },
131
+ status: res.status,
132
+ ttfbMs: ttfb
133
+ });
123
134
  if (!res.ok) {
124
135
  let errorMessage = `HTTP ${res.status}`;
125
136
  try {
@@ -134,6 +145,7 @@ async function* streamChat(params) {
134
145
  }
135
146
  log.error("API error", {
136
147
  requestId,
148
+ ...subAgentId && { subAgentId },
137
149
  status: res.status,
138
150
  error: errorMessage
139
151
  });
@@ -163,6 +175,7 @@ async function* streamChat(params) {
163
175
  await reader.cancel();
164
176
  log.error("Stream stalled", {
165
177
  requestId,
178
+ ...subAgentId && { subAgentId },
166
179
  durationMs: Date.now() - startTime
167
180
  });
168
181
  yield {
@@ -188,6 +201,7 @@ async function* streamChat(params) {
188
201
  const elapsed = Date.now() - startTime;
189
202
  log.info("Stream complete", {
190
203
  requestId,
204
+ ...subAgentId && { subAgentId },
191
205
  durationMs: elapsed,
192
206
  stopReason: event.stopReason,
193
207
  inputTokens: event.usage.inputTokens,
@@ -2163,7 +2177,7 @@ function startStatusWatcher(config) {
2163
2177
  inflight = true;
2164
2178
  try {
2165
2179
  const ctx = getContext();
2166
- if (!ctx.assistantText && !ctx.lastToolName) {
2180
+ if (!ctx.assistantText && !ctx.lastToolName && !ctx.userMessage) {
2167
2181
  return;
2168
2182
  }
2169
2183
  const res = await fetch(url, {
@@ -2315,7 +2329,8 @@ ${partial}` : "[INTERRUPTED] Agent was interrupted before producing output.",
2315
2329
  getContext: () => ({
2316
2330
  assistantText: getPartialText(contentBlocks),
2317
2331
  lastToolName: currentToolNames || void 0,
2318
- lastToolResult: lastToolResult || void 0
2332
+ lastToolResult: lastToolResult || void 0,
2333
+ userMessage: task
2319
2334
  }),
2320
2335
  onStatus: (label) => emit2({ type: "status", message: label }),
2321
2336
  signal
@@ -2486,9 +2501,18 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2486
2501
  }
2487
2502
  };
2488
2503
  toolRegistry?.register(entry);
2504
+ const toolStart = Date.now();
2489
2505
  run2(tc.input);
2490
2506
  const r = await resultPromise;
2491
2507
  toolRegistry?.unregister(tc.id);
2508
+ log3.info("Tool completed", {
2509
+ requestId,
2510
+ parentToolId,
2511
+ toolCallId: tc.id,
2512
+ name: tc.name,
2513
+ durationMs: Date.now() - toolStart,
2514
+ isError: r.isError
2515
+ });
2492
2516
  emit2({
2493
2517
  type: "tool_done",
2494
2518
  id: tc.id,
@@ -2544,14 +2568,30 @@ Current date/time: ${(/* @__PURE__ */ new Date()).toISOString().replace("T", " "
2544
2568
  return wrapRun();
2545
2569
  }
2546
2570
  log3.info("Sub-agent backgrounded", { requestId, parentToolId, agentName });
2571
+ toolRegistry?.register({
2572
+ id: parentToolId,
2573
+ name: agentName,
2574
+ input: { task },
2575
+ abortController: bgAbort,
2576
+ startedAt: Date.now(),
2577
+ settle: () => {
2578
+ },
2579
+ rerun: () => {
2580
+ },
2581
+ getPartialResult: () => ""
2582
+ });
2547
2583
  const ack = await generateBackgroundAck({
2548
2584
  apiConfig,
2549
2585
  agentName: subAgentId || "agent",
2550
2586
  task
2551
2587
  });
2552
- wrapRun().then((finalResult) => onBackgroundComplete?.(finalResult)).catch(
2553
- (err) => onBackgroundComplete?.({ text: `Error: ${err.message}`, messages: [] })
2554
- );
2588
+ wrapRun().then((finalResult) => {
2589
+ toolRegistry?.unregister(parentToolId);
2590
+ onBackgroundComplete?.(finalResult);
2591
+ }).catch((err) => {
2592
+ toolRegistry?.unregister(parentToolId);
2593
+ onBackgroundComplete?.({ text: `Error: ${err.message}`, messages: [] });
2594
+ });
2555
2595
  return { text: ack, messages: [], backgrounded: true };
2556
2596
  }
2557
2597
  var log3;
@@ -3106,15 +3146,18 @@ async function enhanceImagePrompt(params) {
3106
3146
  "Transparent background: yes \u2014 the background will be removed. Focus on the subject as an isolated element."
3107
3147
  );
3108
3148
  }
3109
- const message = `<context>
3149
+ const context = `<context>
3110
3150
  ${contextParts.join("\n")}
3111
- </context>
3151
+ </context>`;
3152
+ const message = `${SYSTEM_PROMPT}
3153
+
3154
+ ${context}
3112
3155
 
3113
3156
  <brief>
3114
3157
  ${brief}
3115
3158
  </brief>`;
3116
3159
  const enhanced = await runCli(
3117
- `mindstudio generate-text --prompt ${JSON.stringify(SYSTEM_PROMPT)} --message ${JSON.stringify(message)} --output-key enhanced --no-meta`,
3160
+ `mindstudio generate-text --message ${JSON.stringify(message)} --output-key content --no-meta`,
3118
3161
  { timeout: 6e4, onLog }
3119
3162
  );
3120
3163
  return enhanced.trim();
@@ -4980,7 +5023,9 @@ async function runTurn(params) {
4980
5023
  toolRegistry?.register(entry);
4981
5024
  run(tc.input);
4982
5025
  const r = await resultPromise;
4983
- toolRegistry?.unregister(tc.id);
5026
+ if (!tc.input.background) {
5027
+ toolRegistry?.unregister(tc.id);
5028
+ }
4984
5029
  log6.info("Tool completed", {
4985
5030
  requestId,
4986
5031
  toolCallId: tc.id,
@@ -55,6 +55,7 @@ Forms should feel like interactions, not paperwork.
55
55
  - Inline validation — show errors as the user types, not after submit. Validation must never introduce layout shift.
56
56
  - Loading states after submission. Always indicate that something is happening.
57
57
  - Disabled states should be visually distinct but not jarring.
58
+ - Media uploads should optimistically load in a local preview of an image or video and show upload progress
58
59
  - Even data entry can be beautiful. Pay attention to alignment, padding, and spacing. Consistency is key.
59
60
 
60
61
  #### Form Elements
@@ -54,9 +54,21 @@ const api = createClient<{
54
54
  const { vendorId } = await api.submitVendorRequest({ name: 'Acme' });
55
55
  const { vendors } = await api.listVendors();
56
56
 
57
- // File operations
58
- const { url } = await platform.requestFile({ type: 'image' });
59
- const cdnUrl = await platform.uploadFile(file);
57
+ // File upload (returns CDN URL)
58
+ const url = await platform.uploadFile(file);
59
+
60
+ // With progress tracking
61
+ const url = await platform.uploadFile(file, {
62
+ onProgress: (fraction) => setProgress(fraction), // 0 to 1
63
+ });
64
+
65
+ // With abort support
66
+ const controller = new AbortController();
67
+ const url = await platform.uploadFile(file, {
68
+ signal: controller.signal,
69
+ onProgress: (f) => setProgress(f),
70
+ });
71
+ controller.abort(); // cancels the upload
60
72
 
61
73
  // Current user (display only)
62
74
  auth.userId;
@@ -16,11 +16,14 @@ After editing code, check your work with `lspDiagnostics` or by reading the file
16
16
  Aim for confidence that the core happy paths work. If the 80% case is solid, the remaining edge cases are likely fine and the user can surface them in chat. Don't screenshot every page, test every permutation, or verify every secondary flow. One or two runtime checks that confirm the app loads and data flows through is enough.
17
17
 
18
18
  ### Process Logs
19
- Process logs are available at `.logs/` for debugging:
20
- - `.logs/tunnel.log`: method execution, schema sync, session lifecycle, platform connection
21
- - `.logs/devServer.log`: frontend build errors, HMR, module resolution failures
22
- - `.logs/requests.ndjson`: structured NDJSON log of every method and scenario execution with full input, output, errors (including stack traces), console output, and duration. Use `tail -5 .logs/requests.ndjson | jq .` or `grep '"success":false' .logs/requests.ndjson | jq .` to inspect.
23
- - `.logs/browser.ndjson`: browser-side events captured from the web preview. Includes console output, uncaught JS errors with stack traces, failed network requests, and user interactions (clicks). Use `grep '"type":"error"' .logs/browser.ndjson | jq .` to find frontend errors.
19
+
20
+ Process logs are available at .logs/ in NDJSON format (one JSON object per line) for debugging. Each line has at minimum ts (unix millis) and msg fields, plus structured context like level, module, requestId, toolCallId where available. You can use `jq` to examine logs and debug failures. Tools like run method or run scenario execute synchronously, so log data will be available by the time those tools return their results to you, there is no need to `sleep` before querying logfiles.
21
+ - `.logs/tunnel.ndjson`: method execution, schema sync, session lifecycle, platform connection
22
+ - `.logs/devServer.ndjson`: frontend build errors, HMR, module resolution failures
23
+ - `.logs/system.ndjson`: sandbox server logs agent lifecycle, tool dispatch, file watching, process management
24
+ - `.logs/agent.ndjson`: coding agent protocol events and errors
25
+ - `.logs/requests.ndjson`: structured log of every method and scenario execution with full input, output, errors (including stack traces), console output, and duration
26
+ - `.logs/browser.ndjson`: browser-side events from the web preview — console output, uncaught JS errors with stack traces, failed network requests, user interactions
24
27
 
25
28
  ### MindStudio SDK
26
29
  For any work involving AI models, external actions (web scraping, email, SMS), or third-party API/OAuth connections, prefer the `@mindstudio-ai/agent` SDK. It removes the need to research API methods, configure keys and tokens, or require the user to set up developer accounts.
@@ -10,9 +10,9 @@ Note: when you talk about the team to the user, refer to them by their name or a
10
10
 
11
11
  Your designer. Consult for any visual decision — choosing a color, picking fonts, proposing a layout, soucing images, reviewing whether something looks good. Not just during intake or big design moments. If you're about to write CSS and you're not sure about a color, ask. If you just built a page and want a gut check, ask the designer to take a quick look. If the user says "I don't like how this looks," ask the design expert what to change rather than guessing yourself, or if they say "I want a different image," that's the designer's problem, not yours. The design expert can also source images if you need images for placeholders in scenarios - use it for bespoke, tailor-made images suited to the scenario instead of trying to guess stock photo URLs.
12
12
 
13
- The design expert cannot see your conversation with the user, so include all relevant context and requirements in your task. It also can not see its own conversation history, so if you want an audit you need to provide the exact values to check, or any other necessary context for it to do its job. It can take screenshots of the app preview on its own — just ask it to review what's been built.
13
+ The design expert cannot see your conversation with the user, so include all relevant context and requirements in your task. It also can not see its own conversation history, so if you want an audit you need to provide the exact values to check, or any other necessary context for it to do its job. It can take screenshots of the app preview on its own — just ask it to review what's been built. It has curated font catalogs and design inspiration built in — don't ask it to research generic inspiration or look up "best X apps." Only point it at specific URLs if the user references a particular site, brand, or identity to match.
14
14
 
15
- Returns concrete resources: hex values, font names with CSS URLs, image URLs, layout descriptions. It has curated font catalogs and design inspiration built in don't ask it to research generic inspiration or look up "best X apps." Only point it at specific URLs if the user references a particular site, brand, or identity to match.
15
+ The designer will return concrete resources: hex values, font names with CSS URLs, image URLs, layout descriptions, as well as specific techniques, CSS properties, and other values. Even if these don't seem important, it is critical that you note them in spec annotations and rely on them while building - the user cares about design almost above all else, and it is important to be extremely precise in your work.
16
16
 
17
17
  When delegating, describe the design problem — where the asset will be used, what it needs to communicate, what the brand feels like. Do not specify technical details like image formats, pixel dimensions, generation techniques, or workarounds. The design expert makes those decisions.
18
18
 
@@ -59,6 +59,8 @@ When you receive background results:
59
59
 
60
60
  #### When to Background
61
61
 
62
- Only background the following tasks, unless the user specifically asks you to do work in the background:
62
+ You can only background the following two tasks, unless the user specifically asks you to do work in the background:
63
63
  - `productVision` seeding the intiial roadmap after writing the spec for the first time. This task takes a while and we can allow the user to continue building while it happens in the background
64
64
  - After writing the spec, once you have finalized the shape of the app, ask `visualDesignExpert` to create an "iphone app store" style icon for the app, then set it with `setProjectMetadata({ iconUrl: ... })`
65
+
66
+ Do not background any other tasks.
@@ -22,6 +22,7 @@ These are things we already know about and have decided to accept:
22
22
  - swr
23
23
  - framer-motion
24
24
  - styled-components
25
+ - @tabler/icons-react
25
26
  - Preferences:
26
27
  - use [wouter](https://github.com/molefrog/wouter) for React routing instead of reaching for react-router
27
28
 
@@ -1,6 +1,10 @@
1
1
  ## Initial Design
2
2
 
3
- Rendering the initial design for a new app is your chance to do amazing work and truly impress the user, because after that it's going to be all refinement and working within constraints. Truly greenfield design work is rare, so don't take these moments for granted. Be creative and inspired, and spend time thinking about your references. What can you draw upon from <visual_design_references> and <ui_case_studies>, even if it might be from an unrelated domain or vertical (the best designs oftne come from surprising places!). What fonts and colors should form the base of the brand's identity? They're going to appear in other things beyond just this app - marketing materials, swag, etc - so make them compelling.
3
+ Rendering the initial design for a new app is your chance to do amazing work and truly impress the user, because after that it's going to be all refinement and working within constraints. Truly greenfield design work is rare, so don't take these moments for granted.
4
+
5
+ The user has engaged you specifically to elevate their design - they have the seed of an idea and it is your job to help them fully realize its potential. This can be tricky, as sometimes people use design language to describe what they need in a way that *sounds* precise, but in actuality they don't know what they really want and are simply aping language they have heard elsewhere. Your job is to not only listen to their request but to really get to the core of what they *need*, and show them heights they never thought possible. This is what separates good designers from exceptional designers. Be an *exceptional* designer.
6
+
7
+ Be creative and inspired, and spend time thinking about your references. Discuss them aloud during your thinking. What can you draw upon from <visual_design_references> and <ui_case_studies> (e.g., "I think the XYZ pattern from ABC could be really compelling for..."), even if it might be from an unrelated domain or vertical (the best designs often come from surprising places!)? What fonts and colors should form the base of the brand's identity? They're going to appear in other things beyond just this app - marketing materials, swag, etc - so make them compelling.
4
8
 
5
9
  Then, think about the layout and UI patterns - these are the core of the user's interaction with the app and provide the frame and context for every interfaction. Think about individual components, animation, icons, and images.
6
10
 
@@ -5,3 +5,8 @@ UI patterns are the core of any good app. Anyone can make a simple form or list
5
5
  Study the patterns provided in <ui_case_studies> and actually spend time breaking them down, and think about what can be applied to the current project to elevate it into something truly world-class.
6
6
 
7
7
  When descirbing UI patterns to the developer, be verbose and explicit. Describe every aspect - don't leave room for interpretation by the developer because it ain't gonna be pretty.
8
+
9
+
10
+ ### Dated Patterns to Avoid
11
+
12
+ The design should look like it could be an Apple iOS/macOS app of the year winner for 2026. Avoid long pages, things that feel like blogs, things that borrow from "dated" app store apps, and the like. It should feel like an award winner from the past two years, not an award winner from a decade ago.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mindstudio-ai/remy",
3
- "version": "0.1.43",
3
+ "version": "0.1.45",
4
4
  "description": "MindStudio coding agent",
5
5
  "repository": {
6
6
  "type": "git",