@sarjallab09/figma-intelligence 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1220,6 +1220,83 @@ function requestFromPlugin(method, params, timeoutMs = 15000) {
1220
1220
  });
1221
1221
  }
1222
1222
 
1223
+ // ── UX Researcher: capture current Figma selection as compact context ────────
1224
+
1225
+ const SELECTION_CONTEXT_MAX_CHARS = 3000;
1226
+ const SELECTION_MAX_NODES = 5;
1227
+
1228
+ async function captureSelectionContext() {
1229
+ const selection = await requestFromPlugin("getSelection", {});
1230
+ if (!selection || !Array.isArray(selection) || selection.length === 0) return null;
1231
+
1232
+ const nodesToFetch = selection.slice(0, SELECTION_MAX_NODES);
1233
+ const nodeDetails = await Promise.all(
1234
+ nodesToFetch.map((n) => requestFromPlugin("getNode", { nodeId: n.id }).catch(() => null))
1235
+ );
1236
+
1237
+ const lines = [];
1238
+ for (const node of nodeDetails) {
1239
+ if (!node) continue;
1240
+ lines.push(formatNodeForContext(node, 0));
1241
+ }
1242
+ if (selection.length > SELECTION_MAX_NODES) {
1243
+ lines.push(`[... ${selection.length - SELECTION_MAX_NODES} more selected nodes omitted]`);
1244
+ }
1245
+
1246
+ let body = lines.join("\n");
1247
+ if (body.length > SELECTION_CONTEXT_MAX_CHARS) {
1248
+ body = body.slice(0, SELECTION_CONTEXT_MAX_CHARS) + "\n[... truncated for brevity]";
1249
+ }
1250
+
1251
+ return `=== CURRENT FIGMA SELECTION ===\n${body}\n=== END SELECTION ===`;
1252
+ }
1253
+
1254
+ function formatNodeForContext(node, depth) {
1255
+ const indent = " ".repeat(depth);
1256
+ const parts = [`${indent}[${node.type}] "${node.name}"`];
1257
+
1258
+ // Dimensions
1259
+ if (node.width != null && node.height != null) {
1260
+ parts[0] += ` (${Math.round(node.width)}×${Math.round(node.height)})`;
1261
+ }
1262
+
1263
+ // Auto-layout
1264
+ if (node.layoutMode && node.layoutMode !== "NONE") {
1265
+ parts.push(`${indent} layout: ${node.layoutMode}, spacing: ${node.itemSpacing}px, padding: ${node.paddingTop}/${node.paddingRight}/${node.paddingBottom}/${node.paddingLeft}`);
1266
+ }
1267
+
1268
+ // Text content
1269
+ if (node.characters) {
1270
+ const text = node.characters.length > 120 ? node.characters.slice(0, 120) + "…" : node.characters;
1271
+ parts.push(`${indent} text: "${text}"`);
1272
+ if (node.fontSize) parts.push(`${indent} fontSize: ${node.fontSize}`);
1273
+ }
1274
+
1275
+ // Component instance
1276
+ if (node.mainComponentName) {
1277
+ parts.push(`${indent} component: ${node.mainComponentName}`);
1278
+ }
1279
+
1280
+ // Corner radius
1281
+ if (node.cornerRadius != null && node.cornerRadius > 0) {
1282
+ parts.push(`${indent} radius: ${node.cornerRadius}`);
1283
+ }
1284
+
1285
+ // Children (1 level deep)
1286
+ if (node.children && node.children.length > 0) {
1287
+ const maxChildren = 10;
1288
+ const shown = node.children.slice(0, maxChildren);
1289
+ for (const child of shown) {
1290
+ parts.push(`${indent} - [${child.type}] "${child.name}"`);
1291
+ }
1292
+ if (node.children.length > maxChildren) {
1293
+ parts.push(`${indent} ... +${node.children.length - maxChildren} more children`);
1294
+ }
1295
+ }
1296
+
1297
+ return parts.join("\n");
1298
+ }
1299
+
1223
1300
  function sendToVscode(payload, targetWs) {
1224
1301
  if (targetWs && targetWs.readyState === 1) {
1225
1302
  targetWs.send(JSON.stringify(payload));
@@ -2200,6 +2277,38 @@ wss.on("connection", (ws, req) => {
2200
2277
  const anthropicKey = getAnthropicApiKey();
2201
2278
  if (chatMode === "chat" && anthropicKey) {
2202
2279
  // Tier 3: Direct Anthropic API — fast streaming (~200ms first token)
2280
+ const isResearcher = msg.researcherMode === true;
2281
+
2282
+ if (isResearcher) {
2283
+ // UX Researcher mode: async — capture selection, force Haiku, use researcher prompt
2284
+ (async () => {
2285
+ try {
2286
+ const selectionCtx = await captureSelectionContext();
2287
+ if (selectionCtx) {
2288
+ chatMessage = selectionCtx + "\n\n" + chatMessage;
2289
+ console.log(" 🔬 Researcher: attached Figma selection context");
2290
+ }
2291
+ } catch (e) {
2292
+ console.error(" ⚠ Researcher selection capture failed:", e.message);
2293
+ }
2294
+
2295
+ const { buildUxResearcherPrompt } = require("./shared-prompt-config");
2296
+ proc = runAnthropicChat({
2297
+ message: chatMessage,
2298
+ attachments: msg.attachments,
2299
+ conversation: msg.conversation,
2300
+ requestId,
2301
+ apiKey: anthropicKey,
2302
+ model: "claude-haiku-4-5-20251001",
2303
+ systemPrompt: buildUxResearcherPrompt(),
2304
+ onEvent,
2305
+ });
2306
+ activeChatProcesses.set(requestId, proc);
2307
+ proc.on("close", () => activeChatProcesses.delete(requestId));
2308
+ })();
2309
+ return;
2310
+ }
2311
+
2203
2312
  const { buildChatPrompt } = require("./shared-prompt-config");
2204
2313
  proc = runAnthropicChat({
2205
2314
  message: chatMessage,
@@ -2213,6 +2322,39 @@ wss.on("connection", (ws, req) => {
2213
2322
  });
2214
2323
  } else {
2215
2324
  // Tier 4: Claude CLI subprocess (code/dual mode, or no API key)
2325
+ const cliResearcher = msg.researcherMode === true && chatMode === "chat";
2326
+
2327
+ if (cliResearcher) {
2328
+ // Researcher mode via CLI: async — capture selection, then spawn CLI
2329
+ (async () => {
2330
+ try {
2331
+ const selectionCtx = await captureSelectionContext();
2332
+ if (selectionCtx) {
2333
+ chatMessage = selectionCtx + "\n\n" + chatMessage;
2334
+ console.log(" 🔬 Researcher (CLI): attached Figma selection context");
2335
+ }
2336
+ } catch (e) {
2337
+ console.error(" ⚠ Researcher selection capture failed:", e.message);
2338
+ }
2339
+
2340
+ proc = runClaude({
2341
+ message: chatMessage,
2342
+ attachments: msg.attachments,
2343
+ conversation: msg.conversation,
2344
+ requestId,
2345
+ model: msg.model,
2346
+ designSystemId: activeDesignSystemId,
2347
+ mode: chatMode,
2348
+ frameworkConfig: msg.frameworkConfig || {},
2349
+ researcherMode: true,
2350
+ onEvent,
2351
+ });
2352
+ activeChatProcesses.set(requestId, proc);
2353
+ proc.on("close", () => activeChatProcesses.delete(requestId));
2354
+ })();
2355
+ return;
2356
+ }
2357
+
2216
2358
  proc = runClaude({
2217
2359
  message: chatMessage,
2218
2360
  attachments: msg.attachments,
@@ -19,6 +19,7 @@ const {
19
19
  SYSTEM_PROMPT,
20
20
  buildSystemPrompt,
21
21
  buildChatPrompt,
22
+ buildUxResearcherPrompt,
22
23
  buildDualOutputPrompt,
23
24
  buildSkillAddendum,
24
25
  detectActiveSkills,
@@ -26,6 +27,7 @@ const {
26
27
  } = require("./shared-prompt-config");
27
28
 
28
29
  const MCP_CONFIG_PATH = join(tmpdir(), "figma-intelligence-chat-mcp.json");
30
+ const EMPTY_MCP_CONFIG_PATH = join(tmpdir(), "figma-intelligence-no-mcp.json");
29
31
  const CLAUDE_SETTINGS_PATH = join(homedir(), ".claude", "settings.json");
30
32
 
31
33
  // Use the absolute claude binary path stored by setup.sh in launchd env.
@@ -118,6 +120,8 @@ function writeMcpConfig(bridgePort) {
118
120
 
119
121
  // Write initial config (will be rewritten with actual port once relay starts)
120
122
  writeMcpConfig();
123
+ // Write empty MCP config for researcher mode (no tools)
124
+ try { writeFileSync(EMPTY_MCP_CONFIG_PATH, JSON.stringify({ mcpServers: {} })); } catch {}
121
125
 
122
126
  function getCleanEnv() {
123
127
  const env = { ...process.env };
@@ -175,17 +179,27 @@ function processAttachments(attachments) {
175
179
  const CLAUDE_DEFAULT_MODEL = "claude-opus-4-6";
176
180
  const CLAUDE_VALID_MODELS = new Set(["claude-opus-4-6", "claude-sonnet-4-6", "claude-haiku-4-5-20251001"]);
177
181
 
178
- function runClaude({ message, attachments, conversation, requestId, model, designSystemId, mode, frameworkConfig, onEvent }) {
182
+ function runClaude({ message, attachments, conversation, requestId, model, designSystemId, mode, frameworkConfig, researcherMode, onEvent }) {
179
183
  const { imageArgs, extraText, tempFiles } = processAttachments(attachments);
180
184
 
181
- const resolvedModel = CLAUDE_VALID_MODELS.has(model) ? model : CLAUDE_DEFAULT_MODEL;
185
+ const isResearcher = researcherMode === true;
186
+ const resolvedModel = isResearcher ? "claude-haiku-4-5-20251001" : (CLAUDE_VALID_MODELS.has(model) ? model : CLAUDE_DEFAULT_MODEL);
182
187
  const sessionMode = mode || "code";
183
188
 
184
189
  const rawText = (message || "").trim() || (extraText ? "Please analyse the attached image(s) and help me create a Figma design based on them." : "");
185
190
  const userText = rawText; // No more expandShortPrompt — Claude handles short prompts natively
186
191
 
187
192
  // Detect active skills early — needed for session reset decision
188
- const skills = (sessionMode === "code" || sessionMode === "dual") ? detectActiveSkills(userText) : [];
193
+ // Skip skill detection in researcher mode it's a pure chat persona
194
+ const skills = (!isResearcher && (sessionMode === "code" || sessionMode === "dual")) ? detectActiveSkills(userText) : [];
195
+
196
+ // Force new session when researcher mode is toggled.
197
+ // The researcher session uses a different system prompt and no MCP tools,
198
+ // so it cannot share a session with normal chat mode.
199
+ if (isResearcher && activeSessionIds[sessionMode]) {
200
+ console.log(`[chat-runner] Researcher mode — resetting ${sessionMode} session for researcher prompt`);
201
+ activeSessionIds[sessionMode] = null;
202
+ }
189
203
 
190
204
  // Force new session when Component Doc Generator skill is detected.
191
205
  // This ensures the system prompt includes the spec-type reference and
@@ -217,6 +231,8 @@ function runClaude({ message, attachments, conversation, requestId, model, desig
217
231
  }
218
232
  const modeLabel = sessionMode === "dual" ? "Dual (Design + Code)" : "Code";
219
233
  onEvent({ type: "phase_start", id: requestId, phase: `${modeLabel} · ${resolvedModel} · MCP: figma-intelligence-layer` });
234
+ } else if (isResearcher) {
235
+ onEvent({ type: "phase_start", id: requestId, phase: `UX Researcher · ${resolvedModel}` });
220
236
  } else {
221
237
  onEvent({ type: "phase_start", id: requestId, phase: `Chat · ${resolvedModel}` });
222
238
  }
@@ -225,7 +241,9 @@ function runClaude({ message, attachments, conversation, requestId, model, desig
225
241
  if (isFirstMessage) {
226
242
  // First message: create session with full config
227
243
  let baseSystemPrompt;
228
- if (sessionMode === "chat") {
244
+ if (isResearcher) {
245
+ baseSystemPrompt = buildUxResearcherPrompt();
246
+ } else if (sessionMode === "chat") {
229
247
  baseSystemPrompt = buildChatPrompt();
230
248
  } else if (sessionMode === "dual") {
231
249
  baseSystemPrompt = buildDualOutputPrompt(designSystemId, frameworkConfig);
@@ -249,6 +267,10 @@ function runClaude({ message, attachments, conversation, requestId, model, desig
249
267
  if (sessionMode === "code" || sessionMode === "dual") {
250
268
  args.push("--mcp-config", MCP_CONFIG_PATH, "--strict-mcp-config");
251
269
  }
270
+ // Researcher mode: block ALL MCP tools — pure chat persona
271
+ if (isResearcher) {
272
+ args.push("--mcp-config", EMPTY_MCP_CONFIG_PATH, "--strict-mcp-config");
273
+ }
252
274
  } else {
253
275
  // Subsequent messages: resume session — context already loaded.
254
276
  // MUST re-pass --mcp-config and --strict-mcp-config on resume too,
@@ -263,6 +285,10 @@ function runClaude({ message, attachments, conversation, requestId, model, desig
263
285
  if (sessionMode === "code" || sessionMode === "dual") {
264
286
  args.push("--mcp-config", MCP_CONFIG_PATH, "--strict-mcp-config");
265
287
  }
288
+ // Researcher mode: block ALL MCP tools on resume too
289
+ if (isResearcher) {
290
+ args.push("--mcp-config", EMPTY_MCP_CONFIG_PATH, "--strict-mcp-config");
291
+ }
266
292
  console.log(`[chat-runner] Resuming ${sessionMode} session: ${currentSessionId}`);
267
293
  }
268
294
 
@@ -866,7 +866,7 @@ figma.ui.onmessage = async (msg) => {
866
866
  case "getNode": {
867
867
  const node = await figma.getNodeByIdAsync(params.nodeId);
868
868
  if (!node) throw new Error("Node not found: " + params.nodeId);
869
- result = serializeNode(node);
869
+ result = await serializeNode(node);
870
870
  break;
871
871
  }
872
872
 
@@ -1459,7 +1459,7 @@ figma.ui.onmessage = async (msg) => {
1459
1459
 
1460
1460
  // ─── Helpers ─────────────────────────────────────────────────────────────────
1461
1461
 
1462
- function serializeNode(node) {
1462
+ async function serializeNode(node) {
1463
1463
  const base = {
1464
1464
  id: node.id,
1465
1465
  name: node.name,
@@ -1511,9 +1511,15 @@ function serializeNode(node) {
1511
1511
  type: c.type,
1512
1512
  }));
1513
1513
  }
1514
- if ("mainComponent" in node) {
1515
- base.mainComponentId = node.mainComponent ? node.mainComponent.id : undefined;
1516
- base.mainComponentName = node.mainComponent ? node.mainComponent.name : undefined;
1514
+ if (node.type === "INSTANCE") {
1515
+ try {
1516
+ const main = await node.getMainComponentAsync();
1517
+ base.mainComponentId = main ? main.id : undefined;
1518
+ base.mainComponentName = main ? main.name : undefined;
1519
+ } catch (e) {
1520
+ base.mainComponentId = undefined;
1521
+ base.mainComponentName = undefined;
1522
+ }
1517
1523
  }
1518
1524
 
1519
1525
  return base;
@@ -11,7 +11,7 @@ description: >
11
11
  specification in Figma. Also trigger when users mention "component docs", "spec
12
12
  pages", "design handoff docs", "anatomy markers", "API table", "property overview",
13
13
  "color annotation", "structure spec", "screen reader spec", or "a11y spec".
14
- Works with Figma Console MCP and native Figma MCP.
14
+ Works with Figma Intelligence MCP via the bridge-relay.
15
15
  ---
16
16
 
17
17
  # Component Documentation Generator
@@ -20,7 +20,7 @@ Generate production-quality design system component documentation directly in Fi
20
20
 
21
21
  ## Overview
22
22
 
23
- This skill connects to your Figma file through Figma MCP (Console MCP or native Figma MCP), extracts component data programmatically, applies AI reasoning for classification and semantics, and renders structured documentation frames directly in your Figma file.
23
+ This skill connects to your Figma file through Figma Intelligence MCP (via the bridge-relay local server), extracts component data programmatically, applies AI reasoning for classification and semantics, and renders structured documentation frames directly in your Figma file.
24
24
 
25
25
  **The pipeline for every spec type follows the same pattern:**
26
26
  1. **Extract** — Read component layers, properties, variables, and styles from Figma via MCP
@@ -31,7 +31,7 @@ This skill connects to your Figma file through Figma MCP (Console MCP or native
31
31
 
32
32
  ## Prerequisites
33
33
 
34
- - **Figma MCP connected** — Either Figma Console MCP (with Desktop Bridge plugin) or native Figma MCP
34
+ - **Figma Intelligence MCP connected** — Via the bridge-relay local server
35
35
  - **A Figma component link** — The URL to a component set or standalone component
36
36
  - **Template library** (optional but recommended) — A published Figma library with documentation templates
37
37
 
@@ -254,6 +254,46 @@ function buildChatPrompt() {
254
254
  return CHAT_SYSTEM_PROMPT;
255
255
  }
256
256
 
257
+ // ── UX Researcher Mode Prompt ────────────────────────────────────────────────
258
+
259
+ const UX_RESEARCHER_PROMPT = `You are a senior UX researcher with 15 years of practice embedded in a Figma design tool. You speak designers' language — affordance, hierarchy, contrast, density, gestalt, scanability, cognitive load, jobs-to-be-done — and you are direct and concise. No fluff, no preamble, no "great question."
260
+
261
+ You must NEVER use any tools, execute any code, or call any MCP functions. You analyze and critique designs based on the context provided to you — you do not modify anything. When the user asks you to actually build, create, or modify something in Figma, tell them to switch to Code mode for that.
262
+
263
+ When the user shares a Figma selection (indicated by a === CURRENT FIGMA SELECTION === block in the message), critique it against UX principles concretely: name the heuristic, point at the specific element by its layer or component name, and propose a fix. Push back when the design contradicts a principle. If the user asks "what's wrong" or "check this" and a selection is attached, always analyze it — this is your primary job.
264
+
265
+ Structure every answer as: **diagnosis → principle → evidence → fix.** Default to the top 3 highest-impact issues unless the user asks for an exhaustive review.
266
+
267
+ Prioritize these qualities (in order): clarity, hierarchy, cognitive load, error prevention, accessibility, trust, conversion.
268
+
269
+ You ground every claim in the established UX canon. When you cite, name the source by title and author the way a designer would in conversation: "Krug calls this 'self-evident' design in Don't Make Me Think," "Nielsen's heuristic #5 (error prevention)," "NN/g on form usability," "Refactoring UI talks about hierarchy through size and color," "Material Design's elevation system," "Apple HIG on touch targets," "WCAG 2.1 AA contrast," "the WAI-ARIA Authoring Practices Guide."
270
+
271
+ Trusted sources you should reach for first:
272
+ - Nielsen Norman Group (nngroup.com)
273
+ - UX Matters
274
+ - Smashing Magazine
275
+ - Don't Make Me Think (Krug)
276
+ - The Design of Everyday Things (Norman)
277
+ - Refactoring UI (Wathan & Schoger)
278
+ - About Face (Cooper)
279
+ - 100 Things Every Designer Needs to Know About People (Weinschenk)
280
+ - Material Design
281
+ - Apple Human Interface Guidelines
282
+ - WCAG
283
+ - ARIA Authoring Practices Guide (APG)
284
+
285
+ **Citation rule:** Cite source families and named principles naturally. Never fabricate specific article URLs or titles you are not certain about. If your memory of a specific source is fuzzy, cite the author or principle family instead.
286
+
287
+ When the user's intent is ambiguous, ask one clarifying question instead of guessing.
288
+
289
+ When knowledge sources or web references are provided in the context, incorporate them into your analysis. Cite the source name when referencing specific material.
290
+
291
+ Format: short paragraphs. Use bullets when listing more than two issues. **Bold** the principle name. Never invent page numbers or section references you are unsure of.`;
292
+
293
+ function buildUxResearcherPrompt() {
294
+ return UX_RESEARCHER_PROMPT;
295
+ }
296
+
257
297
  // ── Active Skill Detection ───────────────────────────────────────────────────
258
298
 
259
299
  function detectActiveSkills(text) {
@@ -594,6 +634,7 @@ module.exports = {
594
634
  DESIGN_SYSTEMS,
595
635
  buildSystemPrompt,
596
636
  buildChatPrompt,
637
+ buildUxResearcherPrompt,
597
638
  buildDualOutputPrompt,
598
639
  buildSkillAddendum,
599
640
  buildDesignSystemAddendum,
@@ -581,6 +581,34 @@
581
581
  }
582
582
  .model-badge:hover { background: rgba(255,255,255,0.07); color: var(--text-secondary); }
583
583
 
584
+ /* UX Researcher toggle */
585
+ .ux-researcher-toggle {
586
+ display: flex; align-items: center; gap: 4px;
587
+ font-size: 10px;
588
+ color: var(--text-muted);
589
+ padding: 2px 6px;
590
+ border-radius: 5px;
591
+ cursor: pointer;
592
+ user-select: none;
593
+ transition: background 0.13s, color 0.13s;
594
+ }
595
+ .ux-researcher-toggle:hover { background: rgba(255,255,255,0.07); color: var(--text-secondary); }
596
+ .ux-researcher-toggle.active {
597
+ background: rgba(168, 130, 255, 0.15);
598
+ color: #c4a1ff;
599
+ }
600
+ .ux-researcher-toggle.active:hover {
601
+ background: rgba(168, 130, 255, 0.22);
602
+ }
603
+ .ux-researcher-dot {
604
+ width: 6px; height: 6px; border-radius: 50%;
605
+ background: var(--text-muted);
606
+ transition: background 0.13s;
607
+ }
608
+ .ux-researcher-toggle.active .ux-researcher-dot {
609
+ background: #c4a1ff;
610
+ }
611
+
584
612
  /* Model dropdown */
585
613
  .model-dropdown {
586
614
  position: absolute;
@@ -1878,6 +1906,10 @@
1878
1906
  <input type="file" id="file-input" multiple style="display:none" accept="image/*,.pdf,.txt,.json,.csv,.svg,.md,.html,.css,.js,.ts,.jsx,.tsx,.py,.go,.rs,.java,.rb,.yml,.yaml,.xml,.sql,.sh">
1879
1907
  </div>
1880
1908
  <div class="toolbar-right">
1909
+ <div class="ux-researcher-toggle" id="ux-researcher-toggle" onclick="toggleUxResearcher()" title="UX Researcher — fast Haiku-powered UX critique. Auto-attaches your current Figma selection." style="display:none">
1910
+ <span class="ux-researcher-dot"></span>
1911
+ <span>UXR</span>
1912
+ </div>
1881
1913
  <div class="model-badge" id="model-badge" onclick="toggleModelDropdown(event)">
1882
1914
  <span id="model-label">Sonnet</span>
1883
1915
  <svg width="10" height="10" viewBox="0 0 10 10" fill="none">
@@ -3030,6 +3062,7 @@
3030
3062
  }
3031
3063
  renderThreadForProvider(selectedLoginProvider);
3032
3064
  updateStatus();
3065
+ syncResearcherToggle();
3033
3066
 
3034
3067
  if (ws && ws.readyState === WebSocket.OPEN) sendProviderToRelay();
3035
3068
  }
@@ -3174,8 +3207,49 @@
3174
3207
 
3175
3208
  // Show VS Code connection indicator for dual mode
3176
3209
  updateVscodeStatus();
3210
+
3211
+ // Sync researcher toggle visibility on mode change
3212
+ syncResearcherToggle();
3177
3213
  };
3178
3214
 
3215
+ /* ── UX Researcher Mode ──────────────────────────────────────────────── */
3216
+ var isUxResearcherMode = false;
3217
+ var UX_RESEARCHER_KEY = "figma-intelligence:researcher-mode";
3218
+ try {
3219
+ if (localStorage.getItem(UX_RESEARCHER_KEY) === "1") isUxResearcherMode = true;
3220
+ } catch(e) {}
3221
+
3222
+ window.toggleUxResearcher = function() {
3223
+ isUxResearcherMode = !isUxResearcherMode;
3224
+ try { localStorage.setItem(UX_RESEARCHER_KEY, isUxResearcherMode ? "1" : "0"); } catch(e) {}
3225
+ syncResearcherToggle();
3226
+ };
3227
+
3228
+ function syncResearcherToggle() {
3229
+ var el = document.getElementById("ux-researcher-toggle");
3230
+ if (!el) return;
3231
+
3232
+ // Only show when provider is Claude and mode is Chat
3233
+ var show = (getActiveProvider() === "claude") && (activeMode === "chat");
3234
+ el.style.display = show ? "" : "none";
3235
+
3236
+ if (show) {
3237
+ el.className = "ux-researcher-toggle" + (isUxResearcherMode ? " active" : "");
3238
+ }
3239
+
3240
+ // Update input placeholder
3241
+ if (input) {
3242
+ if (show && isUxResearcherMode) {
3243
+ input.placeholder = "Ask the UX researcher\u2026";
3244
+ } else if (activeMode === "chat") {
3245
+ input.placeholder = "Ask me anything...";
3246
+ }
3247
+ }
3248
+ }
3249
+
3250
+ // Initial sync on load
3251
+ setTimeout(syncResearcherToggle, 0);
3252
+
3179
3253
  function isChatOnlyProvider() {
3180
3254
  return selectedLoginProvider === "bridge" || selectedLoginProvider === "perplexity";
3181
3255
  }
@@ -4027,7 +4101,7 @@
4027
4101
  if (/^\s*\/knowledge\b/i.test(messageText)) {
4028
4102
  var kQuery = messageText.replace(/^\s*\/knowledge\s*/i, "").trim();
4029
4103
  // Send to relay for hub processing (not as a chat message to AI)
4030
- ws.send(JSON.stringify({ type: "chat", id: currentRequestId, message: messageText, mode: activeMode, model: getSelectedModel(activeProvider), conversation: [] }));
4104
+ ws.send(JSON.stringify({ type: "chat", id: currentRequestId, message: messageText, mode: activeMode, model: getSelectedModel(activeProvider), conversation: [], researcherMode: isUxResearcherMode }));
4031
4105
  // Reset input color
4032
4106
  input.style.color = "";
4033
4107
  return;
@@ -4040,6 +4114,7 @@
4040
4114
  model: getSelectedModel(activeProvider),
4041
4115
  conversation: conversation,
4042
4116
  mode: activeMode,
4117
+ researcherMode: isUxResearcherMode,
4043
4118
  };
4044
4119
 
4045
4120
  if (attachedFiles.length > 0) {
@@ -2088,7 +2088,7 @@ async function dispatch(name: string, args: AnyArgs): Promise<unknown> {
2088
2088
  case "figma_get_node_deep": {
2089
2089
  const bridge = await getBridge();
2090
2090
  const a = args as { nodeId: string; maxDepth?: number };
2091
- return bridge.getNodeDeep(a.nodeId, a.maxDepth);
2091
+ return await bridge.getNodeDeep(a.nodeId, a.maxDepth);
2092
2092
  }
2093
2093
  case "figma_batch_get_nodes": {
2094
2094
  const bridge = await getBridge();
@@ -633,9 +633,14 @@ export class FigmaBridge {
633
633
  const page = figma.currentPage;
634
634
  const found = page.findAll(n => n.type === 'INSTANCE');
635
635
  for (const inst of found) {
636
+ let mainComponentId = '';
637
+ try {
638
+ const main = await inst.getMainComponentAsync();
639
+ if (main) mainComponentId = main.id;
640
+ } catch (e) {}
636
641
  instances.push({
637
642
  id: inst.id,
638
- mainComponentId: inst.mainComponent ? inst.mainComponent.id : '',
643
+ mainComponentId,
639
644
  name: inst.name,
640
645
  pageId: page.id,
641
646
  });
@@ -1073,7 +1078,7 @@ export class FigmaBridge {
1073
1078
  (async () => {
1074
1079
  const maxDepth = ${Math.min(maxDepth, 20)};
1075
1080
 
1076
- function serializeNode(node, depth) {
1081
+ async function serializeNode(node, depth) {
1077
1082
  const data = {
1078
1083
  id: node.id,
1079
1084
  name: node.name,
@@ -1125,9 +1130,14 @@ export class FigmaBridge {
1125
1130
  // Component metadata
1126
1131
  if ('componentPropertyReferences' in node) data.componentPropertyReferences = node.componentPropertyReferences;
1127
1132
  if ('variantProperties' in node && node.variantProperties) data.variantProperties = node.variantProperties;
1128
- if (node.type === 'INSTANCE' && node.mainComponent) {
1129
- data.mainComponentId = node.mainComponent.id;
1130
- data.mainComponentName = node.mainComponent.name;
1133
+ if (node.type === 'INSTANCE') {
1134
+ try {
1135
+ const main = await node.getMainComponentAsync();
1136
+ if (main) {
1137
+ data.mainComponentId = main.id;
1138
+ data.mainComponentName = main.name;
1139
+ }
1140
+ } catch (e) { /* mainComponent unavailable */ }
1131
1141
  }
1132
1142
  if (node.description) data.description = node.description;
1133
1143
 
@@ -1150,7 +1160,7 @@ export class FigmaBridge {
1150
1160
  if ('children' in node && node.children && depth < maxDepth) {
1151
1161
  data.children = [];
1152
1162
  for (const child of node.children) {
1153
- data.children.push(serializeNode(child, depth + 1));
1163
+ data.children.push(await serializeNode(child, depth + 1));
1154
1164
  }
1155
1165
  data.childCount = node.children.length;
1156
1166
  } else if ('children' in node && node.children) {
@@ -1164,7 +1174,7 @@ export class FigmaBridge {
1164
1174
 
1165
1175
  const root = await figma.getNodeByIdAsync(${JSON.stringify(nodeId)});
1166
1176
  if (!root) throw new Error('Node not found: ' + ${JSON.stringify(nodeId)});
1167
- return serializeNode(root, 0);
1177
+ return await serializeNode(root, 0);
1168
1178
  })();
1169
1179
  `, 60000); // 60s timeout for deep trees
1170
1180
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sarjallab09/figma-intelligence",
3
- "version": "1.0.1",
3
+ "version": "1.1.0",
4
4
  "description": "88 AI-powered design tools for Figma. Works with your Claude, OpenAI Codex, or Gemini CLI subscription. One command to set up.",
5
5
  "author": "Figma Intelligence Contributors",
6
6
  "license": "CC-BY-NC-ND-4.0",