@mindstudio-ai/remy 0.1.16 → 0.1.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -112,7 +112,10 @@ A spec starts with YAML frontmatter followed by freeform Markdown. There's no ma
112
112
  **Frontmatter fields:**
113
113
  - `name` (required) — display name for the spec file
114
114
  - `description` (optional) — short summary of what this file covers
115
- - `type` (optional) — defaults to `spec`. Other values: `design/color` (color palette definition), `design/typography` (font and type style definition). The frontend renders these types with specialized editors.
115
+ - `type` (optional) — defaults to `spec`. Other values: `design/color` (color palette definition), `design/typography` (font and type style definition), `roadmap` (feature roadmap item). The frontend renders these types with specialized editors.
116
+ - `status` (roadmap only) — `done`, `in-progress`, or `not-started`
117
+ - `requires` (roadmap only) — array of slugs for prerequisite roadmap items. Empty array means available now.
118
+ - `effort` (roadmap only) — `quick`, `small`, `medium`, or `large`
116
119
 
117
120
  ```markdown
118
121
  ---
@@ -187,3 +190,32 @@ styles:
187
190
  description: Default reading text
188
191
  ```
189
192
  ```
193
+
194
+ Roadmap item example (one file per feature in `src/roadmap/`):
195
+
196
+ ```markdown
197
+ ---
198
+ name: Share & Export
199
+ type: roadmap
200
+ status: not-started
201
+ description: Share haikus as image cards to social media or download as prints.
202
+ requires: []
203
+ effort: medium
204
+ ---
205
+
206
+ Share haikus as styled image cards on social media or download as prints.
207
+ The card system generates images using the brand's typography and color
208
+ palette, creating shareable assets that feel native to the app's identity.
209
+
210
+ ~~~
211
+ Use generateImage with Seedream to create styled cards. Card template
212
+ applies brand typography and colors from the spec. Export as PNG via
213
+ CDN transform at 2x resolution. Social sharing via Web Share API with
214
+ clipboard fallback for unsupported browsers.
215
+ ~~~
216
+
217
+ ## History
218
+
219
+ - **2026-03-22** — Built card generation using generateImage with Seedream.
220
+ Added share button to haiku detail view.
221
+ ```
@@ -23,6 +23,7 @@ my-app/
23
23
  web.md web UI spec
24
24
  api.md API conventions
25
25
  cron.md scheduled job descriptions
26
+ roadmap/ feature roadmap (one file per item, type: roadmap)
26
27
 
27
28
  dist/ ← compiled output (code + config)
28
29
  methods/ backend contract
@@ -60,6 +61,7 @@ my-app/
60
61
  | Interface configs | `dist/interfaces/*/interface.json` | One per non-web interface type |
61
62
  | Specs | `src/*.md` | Natural language, MSFM format |
62
63
  | Brand identity | `src/interfaces/@brand/` | visual.md (aesthetic), colors.md (palette), typography.md (fonts), voice.md (tone), assets/ |
64
+ | Roadmap | `src/roadmap/*.md` | Feature roadmap items (type: roadmap). One file per feature with status, dependencies, and history. |
63
65
  | Reference material | `src/references/` | Context for the agent, not consumed by platform |
64
66
 
65
67
  ## The Two SDKs
package/dist/headless.js CHANGED
@@ -1,7 +1,7 @@
1
1
  // src/headless.ts
2
2
  import { createInterface } from "readline";
3
- import fs16 from "fs";
4
- import path9 from "path";
3
+ import fs17 from "fs";
4
+ import path10 from "path";
5
5
 
6
6
  // src/config.ts
7
7
  import fs2 from "fs";
@@ -2435,20 +2435,6 @@ var DESIGN_RESEARCH_TOOLS = [
2435
2435
  required: ["url"]
2436
2436
  }
2437
2437
  },
2438
- {
2439
- name: "searchStockPhotos",
2440
- description: 'Search Pexels for stock photos. Returns image URLs with descriptions. Use concrete, descriptive queries ("person working at laptop in modern office" not "business").',
2441
- inputSchema: {
2442
- type: "object",
2443
- properties: {
2444
- query: {
2445
- type: "string",
2446
- description: "What kind of photo to search for."
2447
- }
2448
- },
2449
- required: ["query"]
2450
- }
2451
- },
2452
2438
  {
2453
2439
  name: "searchProductScreenshots",
2454
2440
  description: 'Search for screenshots of real products and apps. Use to find what existing products look like ("stripe dashboard", "linear app", "notion workspace"). Returns image results of actual product UI. Use this for layout and design research on real products, NOT for abstract design inspiration.',
@@ -2487,32 +2473,6 @@ var DESIGN_RESEARCH_TOOLS = [
2487
2473
  },
2488
2474
  required: ["prompts"]
2489
2475
  }
2490
- },
2491
- {
2492
- name: "editImage",
2493
- description: "Edit an existing image using a text instruction. Takes a source image URL and a prompt describing the edits (color grading, style transfer, modifications, adding/removing elements). Returns a new CDN URL.",
2494
- inputSchema: {
2495
- type: "object",
2496
- properties: {
2497
- imageUrl: {
2498
- type: "string",
2499
- description: "URL of the source image to edit."
2500
- },
2501
- prompt: {
2502
- type: "string",
2503
- description: 'What to change. Describe the edit as an instruction: "apply warm golden hour color grading", "make the background darker", "add a subtle film grain texture".'
2504
- },
2505
- width: {
2506
- type: "number",
2507
- description: "Output width in pixels. Default 2048. Range: 2048-4096."
2508
- },
2509
- height: {
2510
- type: "number",
2511
- description: "Output height in pixels. Default 2048. Range: 2048-4096."
2512
- }
2513
- },
2514
- required: ["imageUrl", "prompt"]
2515
- }
2516
2476
  }
2517
2477
  ];
2518
2478
  function runCli(cmd) {
@@ -2572,12 +2532,6 @@ async function executeDesignTool(name, input) {
2572
2532
 
2573
2533
  ${analysis}`;
2574
2534
  }
2575
- case "searchStockPhotos": {
2576
- const encodedQuery = encodeURIComponent(input.query);
2577
- return runCli(
2578
- `mindstudio scrape-url --url "https://www.pexels.com/search/${encodedQuery}/" --page-options '{"onlyMainContent": true}' --no-meta`
2579
- );
2580
- }
2581
2535
  case "searchProductScreenshots": {
2582
2536
  const query = `${input.product} product screenshot UI 2026`;
2583
2537
  return runCli(
@@ -2612,24 +2566,6 @@ ${analysis}`;
2612
2566
  }));
2613
2567
  return runCli(`mindstudio batch '${JSON.stringify(steps)}' --no-meta`);
2614
2568
  }
2615
- case "editImage": {
2616
- const width = input.width || 2048;
2617
- const height = input.height || 2048;
2618
- const step = JSON.stringify({
2619
- prompt: input.prompt,
2620
- imageModelOverride: {
2621
- model: "seedream-4.5",
2622
- config: {
2623
- images: [input.imageUrl],
2624
- width,
2625
- height
2626
- }
2627
- }
2628
- });
2629
- return runCli(
2630
- `mindstudio generate-image '${step}' --output-key imageUrl --no-meta`
2631
- );
2632
- }
2633
2569
  default:
2634
2570
  return `Error: unknown tool "${name}"`;
2635
2571
  }
@@ -2640,8 +2576,8 @@ import fs14 from "fs";
2640
2576
  import path8 from "path";
2641
2577
  var base2 = import.meta.dirname ?? path8.dirname(new URL(import.meta.url).pathname);
2642
2578
  function resolvePath(filename) {
2643
- const local2 = path8.join(base2, filename);
2644
- return fs14.existsSync(local2) ? local2 : path8.join(base2, "subagents", "designExpert", filename);
2579
+ const local3 = path8.join(base2, filename);
2580
+ return fs14.existsSync(local3) ? local3 : path8.join(base2, "subagents", "designExpert", filename);
2645
2581
  }
2646
2582
  function readFile(filename) {
2647
2583
  return fs14.readFileSync(resolvePath(filename), "utf-8").trim();
@@ -2780,6 +2716,152 @@ var designExpertTool = {
2780
2716
  }
2781
2717
  };
2782
2718
 
2719
+ // src/subagents/productVision/index.ts
2720
+ import fs15 from "fs";
2721
+ import path9 from "path";
2722
+ var base3 = import.meta.dirname ?? path9.dirname(new URL(import.meta.url).pathname);
2723
+ var local2 = path9.join(base3, "prompt.md");
2724
+ var PROMPT_PATH2 = fs15.existsSync(local2) ? local2 : path9.join(base3, "subagents", "productVision", "prompt.md");
2725
+ var BASE_PROMPT = fs15.readFileSync(PROMPT_PATH2, "utf-8").trim();
2726
+ function loadSpecContext() {
2727
+ const specDir = "src";
2728
+ const files = [];
2729
+ function walk(dir) {
2730
+ try {
2731
+ for (const entry of fs15.readdirSync(dir, { withFileTypes: true })) {
2732
+ const full = path9.join(dir, entry.name);
2733
+ if (entry.isDirectory()) {
2734
+ if (entry.name !== "roadmap") {
2735
+ walk(full);
2736
+ }
2737
+ } else if (entry.name.endsWith(".md")) {
2738
+ files.push(full);
2739
+ }
2740
+ }
2741
+ } catch {
2742
+ }
2743
+ }
2744
+ walk(specDir);
2745
+ if (files.length === 0) {
2746
+ return "";
2747
+ }
2748
+ const sections = files.map((f) => {
2749
+ try {
2750
+ const content = fs15.readFileSync(f, "utf-8").trim();
2751
+ return `<file path="${f}">
2752
+ ${content}
2753
+ </file>`;
2754
+ } catch {
2755
+ return "";
2756
+ }
2757
+ }).filter(Boolean);
2758
+ return `<spec_files>
2759
+ ${sections.join("\n\n")}
2760
+ </spec_files>`;
2761
+ }
2762
+ var VISION_TOOLS = [
2763
+ {
2764
+ name: "writeRoadmapItem",
2765
+ description: "Write a roadmap item to src/roadmap/. Call this once for each idea.",
2766
+ inputSchema: {
2767
+ type: "object",
2768
+ properties: {
2769
+ slug: {
2770
+ type: "string",
2771
+ description: 'Kebab-case filename (without .md). e.g. "ai-weekly-digest"'
2772
+ },
2773
+ name: {
2774
+ type: "string",
2775
+ description: "User-facing feature name."
2776
+ },
2777
+ description: {
2778
+ type: "string",
2779
+ description: "Short user-facing summary (1-2 sentences)."
2780
+ },
2781
+ effort: {
2782
+ type: "string",
2783
+ enum: ["quick", "small", "medium", "large"]
2784
+ },
2785
+ requires: {
2786
+ type: "array",
2787
+ items: { type: "string" },
2788
+ description: "Slugs of prerequisite roadmap items. Empty array if independent."
2789
+ },
2790
+ body: {
2791
+ type: "string",
2792
+ description: "Full MSFM body: prose description for the user, followed by ~~~annotation~~~ with technical implementation notes for the building agent."
2793
+ }
2794
+ },
2795
+ required: ["slug", "name", "description", "effort", "requires", "body"]
2796
+ }
2797
+ }
2798
+ ];
2799
+ async function executeVisionTool(name, input) {
2800
+ if (name !== "writeRoadmapItem") {
2801
+ return `Error: unknown tool "${name}"`;
2802
+ }
2803
+ const { slug, name: itemName, description, effort, requires, body } = input;
2804
+ const dir = "src/roadmap";
2805
+ const filePath = path9.join(dir, `${slug}.md`);
2806
+ try {
2807
+ fs15.mkdirSync(dir, { recursive: true });
2808
+ const requiresYaml = requires.length === 0 ? "[]" : `[${requires.map((r) => `"${r}"`).join(", ")}]`;
2809
+ const content = `---
2810
+ name: ${itemName}
2811
+ type: roadmap
2812
+ status: ${slug === "mvp" ? "in-progress" : "not-started"}
2813
+ description: ${description}
2814
+ effort: ${effort}
2815
+ requires: ${requiresYaml}
2816
+ ---
2817
+
2818
+ ${body}
2819
+ `;
2820
+ fs15.writeFileSync(filePath, content, "utf-8");
2821
+ return `Wrote ${filePath}`;
2822
+ } catch (err) {
2823
+ return `Error writing ${filePath}: ${err.message}`;
2824
+ }
2825
+ }
2826
+ var productVisionTool = {
2827
+ definition: {
2828
+ name: "productVision",
2829
+ description: `A product visionary that imagines where the project could go next. It automatically reads all spec files from src/ for context. Pass a brief description of the app and who it's for. It generates 10-15 ambitious, creative roadmap ideas and writes them directly to src/roadmap/. Use this at the end of spec authoring to populate the roadmap.`,
2830
+ inputSchema: {
2831
+ type: "object",
2832
+ properties: {
2833
+ task: {
2834
+ type: "string",
2835
+ description: "Brief description of the app and who it's for. The tool reads the full spec files automatically \u2014 no need to repeat their contents."
2836
+ }
2837
+ },
2838
+ required: ["task"]
2839
+ }
2840
+ },
2841
+ async execute(input, context) {
2842
+ if (!context) {
2843
+ return "Error: product vision requires execution context";
2844
+ }
2845
+ const specContext = loadSpecContext();
2846
+ const system = specContext ? `${BASE_PROMPT}
2847
+
2848
+ ${specContext}` : BASE_PROMPT;
2849
+ return runSubAgent({
2850
+ system,
2851
+ task: input.task,
2852
+ tools: VISION_TOOLS,
2853
+ externalTools: /* @__PURE__ */ new Set(),
2854
+ executeTool: executeVisionTool,
2855
+ apiConfig: context.apiConfig,
2856
+ model: context.model,
2857
+ signal: context.signal,
2858
+ parentToolId: context.toolCallId,
2859
+ onEvent: context.onEvent,
2860
+ resolveExternalTool: context.resolveExternalTool
2861
+ });
2862
+ }
2863
+ };
2864
+
2783
2865
  // src/tools/index.ts
2784
2866
  function getSpecTools() {
2785
2867
  return [readSpecTool, writeSpecTool, editSpecTool, listSpecFilesTool];
@@ -2813,7 +2895,8 @@ function getCommonTools() {
2813
2895
  fetchUrlTool,
2814
2896
  searchGoogleTool,
2815
2897
  setProjectNameTool,
2816
- designExpertTool
2898
+ designExpertTool,
2899
+ productVisionTool
2817
2900
  ];
2818
2901
  }
2819
2902
  function getPostOnboardingTools() {
@@ -2860,11 +2943,11 @@ function executeTool(name, input, context) {
2860
2943
  }
2861
2944
 
2862
2945
  // src/session.ts
2863
- import fs15 from "fs";
2946
+ import fs16 from "fs";
2864
2947
  var SESSION_FILE = ".remy-session.json";
2865
2948
  function loadSession(state) {
2866
2949
  try {
2867
- const raw = fs15.readFileSync(SESSION_FILE, "utf-8");
2950
+ const raw = fs16.readFileSync(SESSION_FILE, "utf-8");
2868
2951
  const data = JSON.parse(raw);
2869
2952
  if (Array.isArray(data.messages) && data.messages.length > 0) {
2870
2953
  state.messages = sanitizeMessages(data.messages);
@@ -2906,7 +2989,7 @@ function sanitizeMessages(messages) {
2906
2989
  }
2907
2990
  function saveSession(state) {
2908
2991
  try {
2909
- fs15.writeFileSync(
2992
+ fs16.writeFileSync(
2910
2993
  SESSION_FILE,
2911
2994
  JSON.stringify({ messages: state.messages }, null, 2),
2912
2995
  "utf-8"
@@ -2917,7 +3000,7 @@ function saveSession(state) {
2917
3000
  function clearSession(state) {
2918
3001
  state.messages = [];
2919
3002
  try {
2920
- fs15.unlinkSync(SESSION_FILE);
3003
+ fs16.unlinkSync(SESSION_FILE);
2921
3004
  } catch {
2922
3005
  }
2923
3006
  }
@@ -3538,10 +3621,10 @@ async function runTurn(params) {
3538
3621
  }
3539
3622
 
3540
3623
  // src/headless.ts
3541
- var BASE_DIR = import.meta.dirname ?? path9.dirname(new URL(import.meta.url).pathname);
3542
- var ACTIONS_DIR = path9.join(BASE_DIR, "actions");
3624
+ var BASE_DIR = import.meta.dirname ?? path10.dirname(new URL(import.meta.url).pathname);
3625
+ var ACTIONS_DIR = path10.join(BASE_DIR, "actions");
3543
3626
  function loadActionPrompt(name) {
3544
- return fs16.readFileSync(path9.join(ACTIONS_DIR, `${name}.md`), "utf-8").trim();
3627
+ return fs17.readFileSync(path10.join(ACTIONS_DIR, `${name}.md`), "utf-8").trim();
3545
3628
  }
3546
3629
  function emit(event, data) {
3547
3630
  process.stdout.write(JSON.stringify({ event, ...data }) + "\n");
package/dist/index.js CHANGED
@@ -2370,12 +2370,6 @@ async function executeDesignTool(name, input) {
2370
2370
 
2371
2371
  ${analysis}`;
2372
2372
  }
2373
- case "searchStockPhotos": {
2374
- const encodedQuery = encodeURIComponent(input.query);
2375
- return runCli(
2376
- `mindstudio scrape-url --url "https://www.pexels.com/search/${encodedQuery}/" --page-options '{"onlyMainContent": true}' --no-meta`
2377
- );
2378
- }
2379
2373
  case "searchProductScreenshots": {
2380
2374
  const query = `${input.product} product screenshot UI 2026`;
2381
2375
  return runCli(
@@ -2410,24 +2404,6 @@ ${analysis}`;
2410
2404
  }));
2411
2405
  return runCli(`mindstudio batch '${JSON.stringify(steps)}' --no-meta`);
2412
2406
  }
2413
- case "editImage": {
2414
- const width = input.width || 2048;
2415
- const height = input.height || 2048;
2416
- const step = JSON.stringify({
2417
- prompt: input.prompt,
2418
- imageModelOverride: {
2419
- model: "seedream-4.5",
2420
- config: {
2421
- images: [input.imageUrl],
2422
- width,
2423
- height
2424
- }
2425
- }
2426
- });
2427
- return runCli(
2428
- `mindstudio generate-image '${step}' --output-key imageUrl --no-meta`
2429
- );
2430
- }
2431
2407
  default:
2432
2408
  return `Error: unknown tool "${name}"`;
2433
2409
  }
@@ -2526,20 +2502,6 @@ Be specific and concise.`;
2526
2502
  required: ["url"]
2527
2503
  }
2528
2504
  },
2529
- {
2530
- name: "searchStockPhotos",
2531
- description: 'Search Pexels for stock photos. Returns image URLs with descriptions. Use concrete, descriptive queries ("person working at laptop in modern office" not "business").',
2532
- inputSchema: {
2533
- type: "object",
2534
- properties: {
2535
- query: {
2536
- type: "string",
2537
- description: "What kind of photo to search for."
2538
- }
2539
- },
2540
- required: ["query"]
2541
- }
2542
- },
2543
2505
  {
2544
2506
  name: "searchProductScreenshots",
2545
2507
  description: 'Search for screenshots of real products and apps. Use to find what existing products look like ("stripe dashboard", "linear app", "notion workspace"). Returns image results of actual product UI. Use this for layout and design research on real products, NOT for abstract design inspiration.',
@@ -2578,32 +2540,6 @@ Be specific and concise.`;
2578
2540
  },
2579
2541
  required: ["prompts"]
2580
2542
  }
2581
- },
2582
- {
2583
- name: "editImage",
2584
- description: "Edit an existing image using a text instruction. Takes a source image URL and a prompt describing the edits (color grading, style transfer, modifications, adding/removing elements). Returns a new CDN URL.",
2585
- inputSchema: {
2586
- type: "object",
2587
- properties: {
2588
- imageUrl: {
2589
- type: "string",
2590
- description: "URL of the source image to edit."
2591
- },
2592
- prompt: {
2593
- type: "string",
2594
- description: 'What to change. Describe the edit as an instruction: "apply warm golden hour color grading", "make the background darker", "add a subtle film grain texture".'
2595
- },
2596
- width: {
2597
- type: "number",
2598
- description: "Output width in pixels. Default 2048. Range: 2048-4096."
2599
- },
2600
- height: {
2601
- type: "number",
2602
- description: "Output height in pixels. Default 2048. Range: 2048-4096."
2603
- }
2604
- },
2605
- required: ["imageUrl", "prompt"]
2606
- }
2607
2543
  }
2608
2544
  ];
2609
2545
  }
@@ -2613,8 +2549,8 @@ Be specific and concise.`;
2613
2549
  import fs11 from "fs";
2614
2550
  import path5 from "path";
2615
2551
  function resolvePath(filename) {
2616
- const local2 = path5.join(base2, filename);
2617
- return fs11.existsSync(local2) ? local2 : path5.join(base2, "subagents", "designExpert", filename);
2552
+ const local3 = path5.join(base2, filename);
2553
+ return fs11.existsSync(local3) ? local3 : path5.join(base2, "subagents", "designExpert", filename);
2618
2554
  }
2619
2555
  function readFile(filename) {
2620
2556
  return fs11.readFileSync(resolvePath(filename), "utf-8").trim();
@@ -2769,6 +2705,159 @@ Concrete resources: hex values, font names with CSS URLs, image URLs, layout des
2769
2705
  }
2770
2706
  });
2771
2707
 
2708
+ // src/subagents/productVision/index.ts
2709
+ import fs12 from "fs";
2710
+ import path6 from "path";
2711
+ function loadSpecContext() {
2712
+ const specDir = "src";
2713
+ const files = [];
2714
+ function walk(dir) {
2715
+ try {
2716
+ for (const entry of fs12.readdirSync(dir, { withFileTypes: true })) {
2717
+ const full = path6.join(dir, entry.name);
2718
+ if (entry.isDirectory()) {
2719
+ if (entry.name !== "roadmap") {
2720
+ walk(full);
2721
+ }
2722
+ } else if (entry.name.endsWith(".md")) {
2723
+ files.push(full);
2724
+ }
2725
+ }
2726
+ } catch {
2727
+ }
2728
+ }
2729
+ walk(specDir);
2730
+ if (files.length === 0) {
2731
+ return "";
2732
+ }
2733
+ const sections = files.map((f) => {
2734
+ try {
2735
+ const content = fs12.readFileSync(f, "utf-8").trim();
2736
+ return `<file path="${f}">
2737
+ ${content}
2738
+ </file>`;
2739
+ } catch {
2740
+ return "";
2741
+ }
2742
+ }).filter(Boolean);
2743
+ return `<spec_files>
2744
+ ${sections.join("\n\n")}
2745
+ </spec_files>`;
2746
+ }
2747
+ async function executeVisionTool(name, input) {
2748
+ if (name !== "writeRoadmapItem") {
2749
+ return `Error: unknown tool "${name}"`;
2750
+ }
2751
+ const { slug, name: itemName, description, effort, requires, body } = input;
2752
+ const dir = "src/roadmap";
2753
+ const filePath = path6.join(dir, `${slug}.md`);
2754
+ try {
2755
+ fs12.mkdirSync(dir, { recursive: true });
2756
+ const requiresYaml = requires.length === 0 ? "[]" : `[${requires.map((r) => `"${r}"`).join(", ")}]`;
2757
+ const content = `---
2758
+ name: ${itemName}
2759
+ type: roadmap
2760
+ status: ${slug === "mvp" ? "in-progress" : "not-started"}
2761
+ description: ${description}
2762
+ effort: ${effort}
2763
+ requires: ${requiresYaml}
2764
+ ---
2765
+
2766
+ ${body}
2767
+ `;
2768
+ fs12.writeFileSync(filePath, content, "utf-8");
2769
+ return `Wrote ${filePath}`;
2770
+ } catch (err) {
2771
+ return `Error writing ${filePath}: ${err.message}`;
2772
+ }
2773
+ }
2774
+ var base3, local2, PROMPT_PATH2, BASE_PROMPT, VISION_TOOLS, productVisionTool;
2775
+ var init_productVision = __esm({
2776
+ "src/subagents/productVision/index.ts"() {
2777
+ "use strict";
2778
+ init_runner();
2779
+ base3 = import.meta.dirname ?? path6.dirname(new URL(import.meta.url).pathname);
2780
+ local2 = path6.join(base3, "prompt.md");
2781
+ PROMPT_PATH2 = fs12.existsSync(local2) ? local2 : path6.join(base3, "subagents", "productVision", "prompt.md");
2782
+ BASE_PROMPT = fs12.readFileSync(PROMPT_PATH2, "utf-8").trim();
2783
+ VISION_TOOLS = [
2784
+ {
2785
+ name: "writeRoadmapItem",
2786
+ description: "Write a roadmap item to src/roadmap/. Call this once for each idea.",
2787
+ inputSchema: {
2788
+ type: "object",
2789
+ properties: {
2790
+ slug: {
2791
+ type: "string",
2792
+ description: 'Kebab-case filename (without .md). e.g. "ai-weekly-digest"'
2793
+ },
2794
+ name: {
2795
+ type: "string",
2796
+ description: "User-facing feature name."
2797
+ },
2798
+ description: {
2799
+ type: "string",
2800
+ description: "Short user-facing summary (1-2 sentences)."
2801
+ },
2802
+ effort: {
2803
+ type: "string",
2804
+ enum: ["quick", "small", "medium", "large"]
2805
+ },
2806
+ requires: {
2807
+ type: "array",
2808
+ items: { type: "string" },
2809
+ description: "Slugs of prerequisite roadmap items. Empty array if independent."
2810
+ },
2811
+ body: {
2812
+ type: "string",
2813
+ description: "Full MSFM body: prose description for the user, followed by ~~~annotation~~~ with technical implementation notes for the building agent."
2814
+ }
2815
+ },
2816
+ required: ["slug", "name", "description", "effort", "requires", "body"]
2817
+ }
2818
+ }
2819
+ ];
2820
+ productVisionTool = {
2821
+ definition: {
2822
+ name: "productVision",
2823
+ description: `A product visionary that imagines where the project could go next. It automatically reads all spec files from src/ for context. Pass a brief description of the app and who it's for. It generates 10-15 ambitious, creative roadmap ideas and writes them directly to src/roadmap/. Use this at the end of spec authoring to populate the roadmap.`,
2824
+ inputSchema: {
2825
+ type: "object",
2826
+ properties: {
2827
+ task: {
2828
+ type: "string",
2829
+ description: "Brief description of the app and who it's for. The tool reads the full spec files automatically \u2014 no need to repeat their contents."
2830
+ }
2831
+ },
2832
+ required: ["task"]
2833
+ }
2834
+ },
2835
+ async execute(input, context) {
2836
+ if (!context) {
2837
+ return "Error: product vision requires execution context";
2838
+ }
2839
+ const specContext = loadSpecContext();
2840
+ const system = specContext ? `${BASE_PROMPT}
2841
+
2842
+ ${specContext}` : BASE_PROMPT;
2843
+ return runSubAgent({
2844
+ system,
2845
+ task: input.task,
2846
+ tools: VISION_TOOLS,
2847
+ externalTools: /* @__PURE__ */ new Set(),
2848
+ executeTool: executeVisionTool,
2849
+ apiConfig: context.apiConfig,
2850
+ model: context.model,
2851
+ signal: context.signal,
2852
+ parentToolId: context.toolCallId,
2853
+ onEvent: context.onEvent,
2854
+ resolveExternalTool: context.resolveExternalTool
2855
+ });
2856
+ }
2857
+ };
2858
+ }
2859
+ });
2860
+
2772
2861
  // src/tools/index.ts
2773
2862
  function getSpecTools() {
2774
2863
  return [readSpecTool, writeSpecTool, editSpecTool, listSpecFilesTool];
@@ -2802,7 +2891,8 @@ function getCommonTools() {
2802
2891
  fetchUrlTool,
2803
2892
  searchGoogleTool,
2804
2893
  setProjectNameTool,
2805
- designExpertTool
2894
+ designExpertTool,
2895
+ productVisionTool
2806
2896
  ];
2807
2897
  }
2808
2898
  function getPostOnboardingTools() {
@@ -2881,14 +2971,15 @@ var init_tools3 = __esm({
2881
2971
  init_screenshot();
2882
2972
  init_browserAutomation();
2883
2973
  init_designExpert();
2974
+ init_productVision();
2884
2975
  }
2885
2976
  });
2886
2977
 
2887
2978
  // src/session.ts
2888
- import fs12 from "fs";
2979
+ import fs13 from "fs";
2889
2980
  function loadSession(state) {
2890
2981
  try {
2891
- const raw = fs12.readFileSync(SESSION_FILE, "utf-8");
2982
+ const raw = fs13.readFileSync(SESSION_FILE, "utf-8");
2892
2983
  const data = JSON.parse(raw);
2893
2984
  if (Array.isArray(data.messages) && data.messages.length > 0) {
2894
2985
  state.messages = sanitizeMessages(data.messages);
@@ -2930,7 +3021,7 @@ function sanitizeMessages(messages) {
2930
3021
  }
2931
3022
  function saveSession(state) {
2932
3023
  try {
2933
- fs12.writeFileSync(
3024
+ fs13.writeFileSync(
2934
3025
  SESSION_FILE,
2935
3026
  JSON.stringify({ messages: state.messages }, null, 2),
2936
3027
  "utf-8"
@@ -2941,7 +3032,7 @@ function saveSession(state) {
2941
3032
  function clearSession(state) {
2942
3033
  state.messages = [];
2943
3034
  try {
2944
- fs12.unlinkSync(SESSION_FILE);
3035
+ fs13.unlinkSync(SESSION_FILE);
2945
3036
  } catch {
2946
3037
  }
2947
3038
  }
@@ -3600,12 +3691,12 @@ var init_agent = __esm({
3600
3691
  });
3601
3692
 
3602
3693
  // src/prompt/static/projectContext.ts
3603
- import fs13 from "fs";
3604
- import path6 from "path";
3694
+ import fs14 from "fs";
3695
+ import path7 from "path";
3605
3696
  function loadProjectInstructions() {
3606
3697
  for (const file of AGENT_INSTRUCTION_FILES) {
3607
3698
  try {
3608
- const content = fs13.readFileSync(file, "utf-8").trim();
3699
+ const content = fs14.readFileSync(file, "utf-8").trim();
3609
3700
  if (content) {
3610
3701
  return `
3611
3702
  ## Project Instructions (${file})
@@ -3618,7 +3709,7 @@ ${content}`;
3618
3709
  }
3619
3710
  function loadProjectManifest() {
3620
3711
  try {
3621
- const manifest = fs13.readFileSync("mindstudio.json", "utf-8");
3712
+ const manifest = fs14.readFileSync("mindstudio.json", "utf-8");
3622
3713
  return `
3623
3714
  ## Project Manifest (mindstudio.json)
3624
3715
  \`\`\`json
@@ -3659,9 +3750,9 @@ ${entries.join("\n")}`;
3659
3750
  function walkMdFiles(dir) {
3660
3751
  const results = [];
3661
3752
  try {
3662
- const entries = fs13.readdirSync(dir, { withFileTypes: true });
3753
+ const entries = fs14.readdirSync(dir, { withFileTypes: true });
3663
3754
  for (const entry of entries) {
3664
- const full = path6.join(dir, entry.name);
3755
+ const full = path7.join(dir, entry.name);
3665
3756
  if (entry.isDirectory()) {
3666
3757
  results.push(...walkMdFiles(full));
3667
3758
  } else if (entry.name.endsWith(".md")) {
@@ -3674,7 +3765,7 @@ function walkMdFiles(dir) {
3674
3765
  }
3675
3766
  function parseFrontmatter(filePath) {
3676
3767
  try {
3677
- const content = fs13.readFileSync(filePath, "utf-8");
3768
+ const content = fs14.readFileSync(filePath, "utf-8");
3678
3769
  const match = content.match(/^---\n([\s\S]*?)\n---/);
3679
3770
  if (!match) {
3680
3771
  return { name: "", description: "", type: "" };
@@ -3690,7 +3781,7 @@ function parseFrontmatter(filePath) {
3690
3781
  }
3691
3782
  function loadProjectFileListing() {
3692
3783
  try {
3693
- const entries = fs13.readdirSync(".", { withFileTypes: true });
3784
+ const entries = fs14.readdirSync(".", { withFileTypes: true });
3694
3785
  const listing = entries.filter((e) => e.name !== ".git" && e.name !== "node_modules").sort((a, b) => {
3695
3786
  if (a.isDirectory() && !b.isDirectory()) {
3696
3787
  return -1;
@@ -3733,12 +3824,12 @@ var init_projectContext = __esm({
3733
3824
  });
3734
3825
 
3735
3826
  // src/prompt/index.ts
3736
- import fs14 from "fs";
3737
- import path7 from "path";
3827
+ import fs15 from "fs";
3828
+ import path8 from "path";
3738
3829
  function requireFile(filePath) {
3739
- const full = path7.join(PROMPT_DIR, filePath);
3830
+ const full = path8.join(PROMPT_DIR, filePath);
3740
3831
  try {
3741
- return fs14.readFileSync(full, "utf-8").trim();
3832
+ return fs15.readFileSync(full, "utf-8").trim();
3742
3833
  } catch {
3743
3834
  throw new Error(`Required prompt file missing: ${full}`);
3744
3835
  }
@@ -3863,17 +3954,17 @@ var init_prompt3 = __esm({
3863
3954
  "use strict";
3864
3955
  init_lsp();
3865
3956
  init_projectContext();
3866
- PROMPT_DIR = import.meta.dirname ?? path7.dirname(new URL(import.meta.url).pathname);
3957
+ PROMPT_DIR = import.meta.dirname ?? path8.dirname(new URL(import.meta.url).pathname);
3867
3958
  }
3868
3959
  });
3869
3960
 
3870
3961
  // src/config.ts
3871
- import fs15 from "fs";
3872
- import path8 from "path";
3962
+ import fs16 from "fs";
3963
+ import path9 from "path";
3873
3964
  import os from "os";
3874
3965
  function loadConfigFile() {
3875
3966
  try {
3876
- const raw = fs15.readFileSync(CONFIG_PATH, "utf-8");
3967
+ const raw = fs16.readFileSync(CONFIG_PATH, "utf-8");
3877
3968
  log.debug("Loaded config file", { path: CONFIG_PATH });
3878
3969
  return JSON.parse(raw);
3879
3970
  } catch (err) {
@@ -3909,7 +4000,7 @@ var init_config = __esm({
3909
4000
  "src/config.ts"() {
3910
4001
  "use strict";
3911
4002
  init_logger();
3912
- CONFIG_PATH = path8.join(
4003
+ CONFIG_PATH = path9.join(
3913
4004
  os.homedir(),
3914
4005
  ".mindstudio-local-tunnel",
3915
4006
  "config.json"
@@ -3924,10 +4015,10 @@ __export(headless_exports, {
3924
4015
  startHeadless: () => startHeadless
3925
4016
  });
3926
4017
  import { createInterface } from "readline";
3927
- import fs16 from "fs";
3928
- import path9 from "path";
4018
+ import fs17 from "fs";
4019
+ import path10 from "path";
3929
4020
  function loadActionPrompt(name) {
3930
- return fs16.readFileSync(path9.join(ACTIONS_DIR, `${name}.md`), "utf-8").trim();
4021
+ return fs17.readFileSync(path10.join(ACTIONS_DIR, `${name}.md`), "utf-8").trim();
3931
4022
  }
3932
4023
  function emit(event, data) {
3933
4024
  process.stdout.write(JSON.stringify({ event, ...data }) + "\n");
@@ -4146,16 +4237,16 @@ var init_headless = __esm({
4146
4237
  init_lsp();
4147
4238
  init_agent();
4148
4239
  init_session();
4149
- BASE_DIR = import.meta.dirname ?? path9.dirname(new URL(import.meta.url).pathname);
4150
- ACTIONS_DIR = path9.join(BASE_DIR, "actions");
4240
+ BASE_DIR = import.meta.dirname ?? path10.dirname(new URL(import.meta.url).pathname);
4241
+ ACTIONS_DIR = path10.join(BASE_DIR, "actions");
4151
4242
  }
4152
4243
  });
4153
4244
 
4154
4245
  // src/index.tsx
4155
4246
  import { render } from "ink";
4156
4247
  import os2 from "os";
4157
- import fs17 from "fs";
4158
- import path10 from "path";
4248
+ import fs18 from "fs";
4249
+ import path11 from "path";
4159
4250
 
4160
4251
  // src/tui/App.tsx
4161
4252
  import { useState as useState2, useCallback, useRef } from "react";
@@ -4472,8 +4563,8 @@ for (let i = 0; i < args.length; i++) {
4472
4563
  }
4473
4564
  function printDebugInfo(config) {
4474
4565
  const pkg = JSON.parse(
4475
- fs17.readFileSync(
4476
- path10.join(import.meta.dirname, "..", "package.json"),
4566
+ fs18.readFileSync(
4567
+ path11.join(import.meta.dirname, "..", "package.json"),
4477
4568
  "utf-8"
4478
4569
  )
4479
4570
  );
@@ -112,7 +112,10 @@ A spec starts with YAML frontmatter followed by freeform Markdown. There's no ma
112
112
  **Frontmatter fields:**
113
113
  - `name` (required) — display name for the spec file
114
114
  - `description` (optional) — short summary of what this file covers
115
- - `type` (optional) — defaults to `spec`. Other values: `design/color` (color palette definition), `design/typography` (font and type style definition). The frontend renders these types with specialized editors.
115
+ - `type` (optional) — defaults to `spec`. Other values: `design/color` (color palette definition), `design/typography` (font and type style definition), `roadmap` (feature roadmap item). The frontend renders these types with specialized editors.
116
+ - `status` (roadmap only) — `done`, `in-progress`, or `not-started`
117
+ - `requires` (roadmap only) — array of slugs for prerequisite roadmap items. Empty array means available now.
118
+ - `effort` (roadmap only) — `quick`, `small`, `medium`, or `large`
116
119
 
117
120
  ```markdown
118
121
  ---
@@ -187,3 +190,32 @@ styles:
187
190
  description: Default reading text
188
191
  ```
189
192
  ```
193
+
194
+ Roadmap item example (one file per feature in `src/roadmap/`):
195
+
196
+ ```markdown
197
+ ---
198
+ name: Share & Export
199
+ type: roadmap
200
+ status: not-started
201
+ description: Share haikus as image cards to social media or download as prints.
202
+ requires: []
203
+ effort: medium
204
+ ---
205
+
206
+ Share haikus as styled image cards on social media or download as prints.
207
+ The card system generates images using the brand's typography and color
208
+ palette, creating shareable assets that feel native to the app's identity.
209
+
210
+ ~~~
211
+ Use generateImage with Seedream to create styled cards. Card template
212
+ applies brand typography and colors from the spec. Export as PNG via
213
+ CDN transform at 2x resolution. Social sharing via Web Share API with
214
+ clipboard fallback for unsupported browsers.
215
+ ~~~
216
+
217
+ ## History
218
+
219
+ - **2026-03-22** — Built card generation using generateImage with Seedream.
220
+ Added share button to haiku detail view.
221
+ ```
@@ -23,6 +23,7 @@ my-app/
23
23
  web.md web UI spec
24
24
  api.md API conventions
25
25
  cron.md scheduled job descriptions
26
+ roadmap/ feature roadmap (one file per item, type: roadmap)
26
27
 
27
28
  dist/ ← compiled output (code + config)
28
29
  methods/ backend contract
@@ -60,6 +61,7 @@ my-app/
60
61
  | Interface configs | `dist/interfaces/*/interface.json` | One per non-web interface type |
61
62
  | Specs | `src/*.md` | Natural language, MSFM format |
62
63
  | Brand identity | `src/interfaces/@brand/` | visual.md (aesthetic), colors.md (palette), typography.md (fonts), voice.md (tone), assets/ |
64
+ | Roadmap | `src/roadmap/*.md` | Feature roadmap items (type: roadmap). One file per feature with status, dependencies, and history. |
63
65
  | Reference material | `src/references/` | Context for the agent, not consumed by platform |
64
66
 
65
67
  ## The Two SDKs
@@ -17,8 +17,9 @@ The scaffold starts with these spec files that cover the full picture of the app
17
17
  - **`src/interfaces/@brand/colors.md`** (`type: design/color`) — brand color palette: 3-5 named colors with evocative names and brand-level descriptions. The design system is derived from these.
18
18
  - **`src/interfaces/@brand/typography.md`** (`type: design/typography`) — font choices with source URLs and 1-2 anchor styles (Display, Body). Additional styles are derived from these anchors.
19
19
  - **`src/interfaces/@brand/voice.md`** — voice and terminology: tone, error messages, word choices
20
+ - **`src/roadmap/`** — feature roadmap. One file per feature (`type: roadmap`). See "Roadmap" below.
20
21
 
21
- Start from these four and extend as needed. Add interface specs for other interface types (`api.md`, `cron.md`, etc.) if the app uses them. Split `app.md` into multiple files if the domain is complex. The agent uses the entire `src/` folder as compilation context, so organize however serves clarity.
22
+ Start from these and extend as needed. Add interface specs for other interface types (`api.md`, `cron.md`, etc.) if the app uses them. Split `app.md` into multiple files if the domain is complex. The agent uses the entire `src/` folder as compilation context, so organize however serves clarity.
22
23
 
23
24
  Users often care about look and feel as much as (or more than) underlying data structures. Don't treat the brand and interface specs as an afterthought — for many users, the visual identity and voice are the first things they want to get right.
24
25
 
@@ -56,6 +57,29 @@ When the user clicks "Build," you will receive a build command. Build everything
56
57
 
57
58
  Scenarios are cheap to write (same `db.push()` calls as methods) but critical for testing. An app without scenarios is not done.
58
59
 
60
+ ## Roadmap
61
+
62
+ The initial build should deliver everything the user asked for. The roadmap is not a place to defer work the user requested. It's for future additions: natural extensions of the app, features the user didn't think to ask for, and ideas that would make the app even better. Think of it as "here's what you have, and here's where you could take it next."
63
+
64
+ Roadmap items live in `src/roadmap/`, one MSFM file per feature with structured frontmatter:
65
+
66
+ - `name` — the feature name
67
+ - `type: roadmap`
68
+ - `status` — `done`, `in-progress`, or `not-started`
69
+ - `description` — short summary (used for index rendering)
70
+ - `requires` — array of slugs for prerequisite items. Empty array means available now.
71
+ - `effort` — `quick`, `small`, `medium`, or `large`
72
+
73
+ Each roadmap item should be a meaningful chunk of work that results in a noticeably different version of the product. Not individual tasks. Bundle polish and small improvements into single items. The big items should be product pillars — think beyond the current deliverable toward the actual product the user is building. If the user asked for a landing page, the roadmap should include building the actual product the landing page is selling.
74
+
75
+ Write names and descriptions for the user, not for developers. Focus on what the user gets, not how it's built. No technical jargon, no library names, no implementation details.
76
+
77
+ The body is freeform MSFM: prose describing the feature for the user, annotations with technical approach and architecture notes for the agent. Append a History section as items are built.
78
+
79
+ The MVP itself gets a roadmap file (`src/roadmap/mvp.md`) with `status: in-progress` that documents what the initial build covers. Update it to `done` after the build completes. Other items start as `not-started`. Some items depend on others (`requires: [share-export]`), some are independent (`requires: []`). The user picks what to build next.
80
+
81
+ Write the roadmap as the final step of spec authoring, after all other spec files are written. Use the `productVision` tool to generate roadmap ideas — pass it the full context of what was built (the app domain, what it does, who it's for, the design direction) and it returns ambitious, creative ideas. Write each returned idea into its own roadmap file in `src/roadmap/`.
82
+
59
83
  ## Spec + Code Sync
60
84
 
61
85
  When generated code exists in `dist/`, you have both spec tools and code tools.
@@ -17,8 +17,9 @@ The scaffold starts with these spec files that cover the full picture of the app
17
17
  - **`src/interfaces/@brand/colors.md`** (`type: design/color`) — brand color palette: 3-5 named colors with evocative names and brand-level descriptions. The design system is derived from these.
18
18
  - **`src/interfaces/@brand/typography.md`** (`type: design/typography`) — font choices with source URLs and 1-2 anchor styles (Display, Body). Additional styles are derived from these anchors.
19
19
  - **`src/interfaces/@brand/voice.md`** — voice and terminology: tone, error messages, word choices
20
+ - **`src/roadmap/`** — feature roadmap. One file per feature (`type: roadmap`). See "Roadmap" below.
20
21
 
21
- Start from these four and extend as needed. Add interface specs for other interface types (`api.md`, `cron.md`, etc.) if the app uses them. Split `app.md` into multiple files if the domain is complex. The agent uses the entire `src/` folder as compilation context, so organize however serves clarity.
22
+ Start from these and extend as needed. Add interface specs for other interface types (`api.md`, `cron.md`, etc.) if the app uses them. Split `app.md` into multiple files if the domain is complex. The agent uses the entire `src/` folder as compilation context, so organize however serves clarity.
22
23
 
23
24
  Users often care about look and feel as much as (or more than) underlying data structures. Don't treat the brand and interface specs as an afterthought — for many users, the visual identity and voice are the first things they want to get right.
24
25
 
@@ -56,6 +57,29 @@ When the user clicks "Build," you will receive a build command. Build everything
56
57
 
57
58
  Scenarios are cheap to write (same `db.push()` calls as methods) but critical for testing. An app without scenarios is not done.
58
59
 
60
+ ## Roadmap
61
+
62
+ The initial build should deliver everything the user asked for. The roadmap is not a place to defer work the user requested. It's for future additions: natural extensions of the app, features the user didn't think to ask for, and ideas that would make the app even better. Think of it as "here's what you have, and here's where you could take it next."
63
+
64
+ Roadmap items live in `src/roadmap/`, one MSFM file per feature with structured frontmatter:
65
+
66
+ - `name` — the feature name
67
+ - `type: roadmap`
68
+ - `status` — `done`, `in-progress`, or `not-started`
69
+ - `description` — short summary (used for index rendering)
70
+ - `requires` — array of slugs for prerequisite items. Empty array means available now.
71
+ - `effort` — `quick`, `small`, `medium`, or `large`
72
+
73
+ Each roadmap item should be a meaningful chunk of work that results in a noticeably different version of the product. Not individual tasks. Bundle polish and small improvements into single items. The big items should be product pillars — think beyond the current deliverable toward the actual product the user is building. If the user asked for a landing page, the roadmap should include building the actual product the landing page is selling.
74
+
75
+ Write names and descriptions for the user, not for developers. Focus on what the user gets, not how it's built. No technical jargon, no library names, no implementation details.
76
+
77
+ The body is freeform MSFM: prose describing the feature for the user, annotations with technical approach and architecture notes for the agent. Append a History section as items are built.
78
+
79
+ The MVP itself gets a roadmap file (`src/roadmap/mvp.md`) with `status: in-progress` that documents what the initial build covers. Update it to `done` after the build completes. Other items start as `not-started`. Some items depend on others (`requires: [share-export]`), some are independent (`requires: []`). The user picks what to build next.
80
+
81
+ Write the roadmap as the final step of spec authoring, after all other spec files are written. Use the `productVision` tool to generate roadmap ideas — pass it the full context of what was built (the app domain, what it does, who it's for, the design direction) and it returns ambitious, creative ideas. Write each returned idea into its own roadmap file in `src/roadmap/`.
82
+
59
83
  ## Spec + Code Sync
60
84
 
61
85
  When generated code exists in `dist/`, you have both spec tools and code tools.
@@ -5,6 +5,7 @@
5
5
  - Spring physics for natural-feeling motion
6
6
  - Purposeful micro-interactions — scaling, color shifts, depth changes on hover/click
7
7
  - Staggered entrance reveals — content appearing sequentially as it enters view
8
+ - Pay attention to timing, duration, speed, and layout shift - make sure animations are beautiful, especially if they involve text or elements the user is reading or interacting with.
8
9
 
9
10
  ### Libraries
10
11
  - Prefer raw CSS animations when possible.
@@ -6,13 +6,9 @@ Not every interface needs images. A productivity dashboard, a finance tool, or a
6
6
 
7
7
  Do not provide images as "references" - images must be ready-to-use assets that can be included directly in the design.
8
8
 
9
- ### Three tools
9
+ ### Image generation
10
10
 
11
- **AI-generated photos and images** (`generateImages`) Seedream produces high-quality results for both photorealistic images and abstract/creative visuals. You have full control over the output: style, composition, colors, mood. When generating multiple images, batch them in a single `generateImages` call — they run in parallel. Generated images are production assets, not mockups or concepts — they are hosted on MindStudio CDN at full resolution and will be used directly in the final interface.
12
-
13
- **Image editing** (`editImage`) — takes an existing image URL and a text instruction describing what to change. Use this to adjust stock photos to match the brand: color grading, style transfer, cropping mood, adding atmosphere. Find a great stock photo, then edit it to align with the design direction.
14
-
15
- **Stock photography** (`searchStockPhotos`) — Pexels has modern, editorial-style photos. Good starting points that can be used directly or refined with `editImage`. Write specific queries: "person writing in notebook at minimalist desk, natural light" not "office."
11
+ Use `generateImages` to create images. Seedream produces high-quality results for both photorealistic images and abstract/creative visuals. You have full control over the output: style, composition, colors, mood. When generating multiple images, batch them in a single `generateImages` call — they run in parallel. Generated images are production assets, not mockups or concepts — they are hosted on MindStudio CDN at full resolution and will be used directly in the final interface.
16
12
 
17
13
  ### Writing good generation prompts
18
14
 
@@ -2,8 +2,7 @@
2
2
 
3
3
  - Use `screenshotAndAnalyze` only when you need to see the visual design of a site (layout, colors, typography in context). Do not screenshot font specimen pages, documentation, search results, or other text-heavy pages — use `fetchUrl` for those instead. Screenshots are expensive and slow; only use them when visual appearance matters.
4
4
  - Use `analyzeDesignReference` for consistent design analysis of images or screenshots. Use `analyzeImage` when you have a specific question about an image.
5
- - Use `searchStockPhotos` for stock imagery. Describe what you need concretely ("person working at laptop in modern office" not "business").
6
- - Use `searchProductScreenshots` to find screenshots of real products ("stripe dashboard", "linear app"). Use this for layout research on what real products look like. Do not use this for abstract design inspiration.
5
+ - Use `searchProductScreenshots` to find screenshots of real products ("stripe dashboard", "linear app"). Use this for layout research on what real products look like.
7
6
  - Use `searchGoogle` for research: font pairing recommendations, "best [domain] apps 2026", design trend articles. Prioritize authoritative sources like Figma and other design leaders, avoid random blog spam.
8
7
  - Use `fetchUrl` when you need to get the text content of a site.
9
8
  - When proposing multiple options, make them genuinely different directions (dark + bold vs. light + editorial) rather than minor variations.
@@ -0,0 +1,73 @@
1
+ The role of the assistant is to act as a product visionary — the kind of person who sees a simple prototype and immediately envisions the billion-dollar company it could become. The assistant thinks like a founder pitching the next 12 months to investors who are already excited about what they see.
2
+
3
+ The assistant is not a developer. It does not think in terms of implementations, libraries, or technical architecture. It thinks about what users would love, what would make them tell their friends, what would make the product indispensable. It thinks about what would make someone say "I can't believe this exists."
4
+
5
+ The assistant's job is to stretch the user's imagination far beyond what they asked for. The user's stated scope is a starting point, not a ceiling. If they described a simple tool, the assistant imagines it as a platform. If they asked for one feature, the assistant sees the whole product it could be part of. The user came here because they want to be inspired — that is the actual request, even if they didn't say it. Even a wild idea that gets rejected is valuable if it sparks new thinking. The assistant makes the user's ambitions bigger, not smaller.
6
+
7
+ ## How to think
8
+
9
+ The assistant has just been shown what version 1 looks like. It now imagines version 5. What does this product look like when it's fully realized? When it has a loyal user base? When it's the best in its category?
10
+
11
+ The assistant thinks in lanes, not lists. A great product roadmap has 3-5 distinct directions the product could grow, each with depth. Like a skill tree in a game: each lane starts with a foundational feature that unlocks progressively more powerful capabilities.
12
+
13
+ One lane might deepen the core experience. Another might add a social layer. Another might introduce AI capabilities that feel like magic. Another might expand beyond the web into new surfaces. Each lane has a natural progression — you can't have the advanced version without the foundation, and each step along the way results in a product that feels complete.
14
+
15
+ The assistant uses the `requires` field to express these progressions. Items within a lane depend on earlier items in that lane. Items across lanes are independent. The user can choose which lane to invest in next.
16
+
17
+ The assistant thinks across dimensions like:
18
+ - The core experience: how could it be deeper, smarter, more personalized?
19
+ - Social and community: how could users connect with each other through this?
20
+ - AI capabilities: what could the product do automatically that feels like magic?
21
+ - New surfaces: could this live beyond the web?
22
+ - Insights and analytics: what could the product reveal about patterns and data?
23
+ - Growth: what creates viral moments? What makes users invite others?
24
+
25
+ Not every dimension applies to every product. But the assistant pushes itself to build real depth in at least 3 lanes rather than scattering shallow ideas across many.
26
+
27
+ ## Self-check
28
+
29
+ Before submitting, the assistant asks itself: would a user be excited showing this roadmap to a friend? Would it make them say "holy shit, I could actually build all of this?" If not, the assistant pushes further. At least 3 items must be large effort. At least 2 lanes must extend beyond the current product scope into genuinely new territory.
30
+
31
+ ## What to produce
32
+
33
+ First, the assistant writes an MVP item capturing what's being built right now (slug "mvp", status will be set to in-progress automatically). Then it generates 10-15 future roadmap ideas. It uses the `writeRoadmapItem` tool to write each one directly. It calls the tool once per idea — batching all calls in a single turn for efficiency.
34
+
35
+ For each idea:
36
+ - **name** — short, exciting, user-facing. No technical jargon. Something you'd see on a product launch page.
37
+ - **description** — 1-2 sentences explaining what the user gets. Written for the user, not a developer.
38
+ - **effort** — `quick`, `small`, `medium`, or `large`
39
+ - **requires** — slugs of prerequisite items. Empty array if independent.
40
+ - **body** — a structured MSFM document, not a narrative essay. Format it as:
41
+
42
+ ```
43
+ [1-2 sentence elevator pitch — what is this and why does it matter]
44
+
45
+ ## What it looks like
46
+
47
+ [Concrete description of the user experience. What do they see, what do they do, how does it feel. Use headers and bullet points to organize, not long paragraphs.]
48
+
49
+ ## Key details
50
+
51
+ [Specific behaviors, rules, edge cases that matter for this feature.]
52
+
53
+ ~~~
54
+ [Technical implementation notes for the building agent. Architecture, data model, AI prompts, integrations needed.]
55
+ ~~~
56
+ ```
57
+
58
+ Keep it concise and scannable. Use markdown structure (headers, bullets, short paragraphs). The body should read like a mini spec, not a sales pitch.
59
+
60
+ ## Rules
61
+
62
+ - Write names and descriptions for humans who have never written a line of code.
63
+ - Be specific and concrete. "AI-Powered Weekly Digest" not "Email features."
64
+ - Include a mix: a few quick wins for momentum, several medium features that expand the product, and a few ambitious large items that represent the full vision.
65
+ - At least 2-3 items should make the user think "I didn't know that was even possible."
66
+ - The ideas should form lanes with depth, not be a flat list of unrelated features. Use `requires` to build progressions.
67
+ - Go far beyond what was asked for. The user described where they are. The assistant describes where they could be.
68
+ - Be bold. The user can always say no. A safe, boring roadmap is worse than no roadmap at all.
69
+ - Cap it at 15 items (plus the MVP). Quality and depth over quantity.
70
+
71
+ <voice>
72
+ No emoji. No hedging ("you could maybe consider..."). The assistant is confident and direct. It is pitching a vision, not suggesting options.
73
+ </voice>
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mindstudio-ai/remy",
3
- "version": "0.1.16",
3
+ "version": "0.1.17",
4
4
  "description": "MindStudio coding agent",
5
5
  "repository": {
6
6
  "type": "git",