@rigstate/mcp 0.5.6 → 0.5.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,15 +1,1058 @@
1
1
  #!/usr/bin/env node
2
- var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
3
- get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
4
- }) : x)(function(x) {
5
- if (typeof require !== "undefined") return require.apply(this, arguments);
6
- throw Error('Dynamic require of "' + x + '" is not supported');
2
+ var __create = Object.create;
3
+ var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __getProtoOf = Object.getPrototypeOf;
7
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
8
+ var __esm = (fn, res) => function __init() {
9
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
10
+ };
11
+ var __commonJS = (cb, mod) => function __require() {
12
+ return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
13
+ };
14
+ var __copyProps = (to, from, except, desc) => {
15
+ if (from && typeof from === "object" || typeof from === "function") {
16
+ for (let key of __getOwnPropNames(from))
17
+ if (!__hasOwnProp.call(to, key) && key !== except)
18
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
19
+ }
20
+ return to;
21
+ };
22
+ var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
23
+ // If the importer is in node compatibility mode or this is not an ESM
24
+ // file that has been converted to a CommonJS file using a Babel-
25
+ // compatible transform (i.e. "__esModule" has not been set), then set
26
+ // "default" to the CommonJS "module.exports" for node compatibility.
27
+ isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
28
+ mod
29
+ ));
30
+
31
+ // node_modules/tsup/assets/esm_shims.js
32
+ import path from "path";
33
+ import { fileURLToPath } from "url";
34
+ var init_esm_shims = __esm({
35
+ "node_modules/tsup/assets/esm_shims.js"() {
36
+ "use strict";
37
+ }
38
+ });
39
+
40
+ // ../rules-engine/dist/types.js
41
+ var require_types = __commonJS({
42
+ "../rules-engine/dist/types.js"(exports) {
43
+ "use strict";
44
+ init_esm_shims();
45
+ Object.defineProperty(exports, "__esModule", { value: true });
46
+ exports.IDE_FILE_NAMES = void 0;
47
+ exports.IDE_FILE_NAMES = {
48
+ cursor: ".cursorrules",
49
+ antigravity: ".cursorrules",
50
+ windsurf: ".windsurfrules",
51
+ vscode: ".cursorrules",
52
+ copilot: ".github/copilot-instructions.md",
53
+ generic: "CONVENTIONS.md"
54
+ };
55
+ }
56
+ });
57
+
58
+ // ../rules-engine/dist/sections/identity.js
59
+ var require_identity = __commonJS({
60
+ "../rules-engine/dist/sections/identity.js"(exports) {
61
+ "use strict";
62
+ init_esm_shims();
63
+ Object.defineProperty(exports, "__esModule", { value: true });
64
+ exports.generateIdentitySection = generateIdentitySection;
65
+ function generateIdentitySection(project, ide, activeAgents) {
66
+ const mission = project.functional_spec?.projectDescription || project.description || `Build a ${project.ambition_level || "professional"} application.`;
67
+ const audienceInfo = project.functional_spec?.targetAudience ? `
68
+ - **Target Users:** ${project.functional_spec.targetAudience}` : "";
69
+ const problemInfo = project.functional_spec?.coreProblem ? `
70
+ - **Problem Being Solved:** ${project.functional_spec.coreProblem}` : "";
71
+ const specialistList = activeAgents?.map((a) => `- **${a.name}** (\`${a.key}\`, Lvl ${a.authority_level}): ${a.primary_mission || extractFirstSentence(a.content)}`).join("\n") || "- No specialists configured.";
72
+ return `## \u{1F9E0} PROJECT CONTEXT
73
+
74
+ **Project:** ${project.name}
75
+ **ID:** \`${project.id}\`
76
+ **Mission:** ${mission}${audienceInfo}${problemInfo}
77
+
78
+ ---
79
+
80
+ ## \u{1F916} SPECIALIST PERSONAS
81
+
82
+ The following personas represent areas of expertise. Reference their guidelines when working in their domain.
83
+
84
+ ${specialistList}
85
+
86
+ ### How to Use Specialists
87
+ 1. **Architecture & Governance** \u2192 Follow Frank's guidelines for code structure and security.
88
+ 2. **Documentation & Reports** \u2192 Use The Scribe's patterns for markdown and PDFs.
89
+ 3. **Historical Context** \u2192 Consult The Librarian for legacy feature discovery.
90
+
91
+ > **Note:** These are informational contexts, not active agents. You (the IDE agent) execute all code.
92
+
93
+ ---
94
+
95
+ ## \u{1F3AF} CODING PRINCIPLES
96
+ - **CONCISE:** No filler words. Get to the point.
97
+ - **PRECISE:** Give specific answers with file paths and code.
98
+ - **PRACTICAL:** Focus on what ships, not theory.
99
+ - **GUARDIAN-AWARE:** Respect architectural constraints in the Guardian rules.`;
100
+ }
101
+ function extractFirstSentence(text) {
102
+ const match = text.match(/^[^.!?]*[.!?]/);
103
+ return match ? match[0].trim() : text.slice(0, 100) + "...";
104
+ }
105
+ }
106
+ });
107
+
108
+ // ../rules-engine/dist/sections/stack-dna.js
109
+ var require_stack_dna = __commonJS({
110
+ "../rules-engine/dist/sections/stack-dna.js"(exports) {
111
+ "use strict";
112
+ init_esm_shims();
113
+ Object.defineProperty(exports, "__esModule", { value: true });
114
+ exports.generateStackDnaSection = generateStackDnaSection;
115
+ function generateStackDnaSection(project, stack, legacyStats) {
116
+ const stackText = stack.length > 0 ? stack.map((s) => `- ${s}`).join("\n") : "- Next.js 14+ (App Router)\n- Supabase\n- TypeScript\n- Tailwind CSS";
117
+ const tenancy = project.tenancy || "SINGLE";
118
+ const monetization = project.monetization || "FREE";
119
+ const compliance = project.compliance || "NONE";
120
+ const vibe = project.vibe || "CREATIVE";
121
+ const lmax = project.settings?.lmax || 400;
122
+ const lmaxUi = project.settings?.lmax_ui || 250;
123
+ const securityLevel = project.settings?.security_level || "STANDARD";
124
+ const guardianRules = [
125
+ `\u{1F6E1}\uFE0F **THE ${lmax}-LINE RULE (STRICT):** No file may exceed ${lmax} lines. If approaching this limit, you MUST propose a refactor into smaller modules.`,
126
+ `\u{1F6E1}\uFE0F **TSX LIMIT:** React components (.tsx) should not exceed ${lmaxUi} lines. Extract sub-components proactively.`,
127
+ '\u{1F6E1}\uFE0F **TYPE SAFETY:** Avoid "any". Use strict TypeScript types. Infer from Zod schemas when possible.'
128
+ ];
129
+ if (tenancy === "MULTI_ORG") {
130
+ guardianRules.push("\u{1F510} All database queries MUST be org-scoped (filter by org_id)");
131
+ guardianRules.push("\u{1F510} Implement proper organization switching in UI");
132
+ } else {
133
+ guardianRules.push("\u{1F464} All queries are user-scoped (filter by user_id or RLS)");
134
+ }
135
+ if (stackText.toLowerCase().includes("supabase")) {
136
+ guardianRules.push("\u26A1 Enable RLS on ALL new tables immediately");
137
+ guardianRules.push("\u26A1 Use @supabase/ssr patterns for server-side auth");
138
+ }
139
+ if (compliance === "GDPR") {
140
+ guardianRules.push("\u{1F6E1}\uFE0F GDPR: Implement data export and deletion endpoints");
141
+ } else if (compliance === "HIPAA") {
142
+ guardianRules.push("\u{1F6E1}\uFE0F HIPAA: Encrypt all PHI at rest and in transit");
143
+ }
144
+ if (securityLevel === "STRICT") {
145
+ guardianRules.push("\u{1F512} STRICT MODE: All inputs MUST be validated with Zod schemas");
146
+ guardianRules.push("\u{1F512} STRICT MODE: Consult Security Specialist for ANY auth-related changes");
147
+ } else if (securityLevel === "MINIMAL") {
148
+ guardianRules.push("\u26A1 MINIMAL MODE: Focus on speed over security hardening for MVP");
149
+ }
150
+ guardianRules.push("\u{1F6A8} **IMPACT_GUARD:** Before deleting ANY file, you MUST:");
151
+ guardianRules.push(" 1. Search codebase for all imports/references (use grep_search or equivalent)");
152
+ guardianRules.push(" 2. Update or remove ALL references first");
153
+ guardianRules.push(" 3. Only delete after confirming ZERO references remain");
154
+ guardianRules.push(" 4. Commit changes atomically (references + deletion in same commit)");
155
+ guardianRules.push("\u2705 **BUILD_INTEGRITY:** After ANY structural change (new files, moved modules, refactors):");
156
+ guardianRules.push(" 1. Run type-check (e.g., `tsc --noEmit` or framework equivalent)");
157
+ guardianRules.push(" 2. Verify build success before declaring task complete");
158
+ guardianRules.push(" 3. If errors detected, fix immediately\u2014do NOT leave broken builds");
159
+ let legacySection = "";
160
+ if (legacyStats && legacyStats.legacyCount > 0) {
161
+ legacySection = `
162
+ ### \u{1F4DA} Legacy Context
163
+ This project contains **${legacyStats.legacyCount} legacy features** (imported via Brynjar) and **${legacyStats.activeCount} active features**.
164
+
165
+ **LEGACY AWARENESS RULE:**
166
+ When encountering code or features marked with \`is_legacy: true\`:
167
+ - Treat them as **Established Foundations** \u2014 they are proven, working code.
168
+ - Any modifications to legacy code MUST bring it up to current Guardian standards:
169
+ - Type-safety (no "any")
170
+ - RLS enforcement (if database-related)
171
+ - 400-line limit compliance
172
+ - Proper error handling
173
+ - Do NOT count legacy features in velocity metrics \u2014 they predate Rigstate.`;
174
+ }
175
+ return `## \u{1F9EC} STACK DNA
176
+
177
+ ### Tech Stack
178
+ ${stackText}
179
+
180
+ ### Project Configuration
181
+ | Attribute | Value |
182
+ |-----------|-------|
183
+ | Tenancy | ${tenancy} |
184
+ | Monetization | ${monetization} |
185
+ | Compliance | ${compliance} |
186
+ | Design Vibe | ${vibe} |
187
+ ${legacySection}
188
+
189
+ ### \u{1F6E1}\uFE0F GUARDIAN RULES (Mandatory)
190
+ ${guardianRules.map((r) => `${r}`).join("\n")}`;
191
+ }
192
+ }
193
+ });
194
+
195
+ // ../rules-engine/dist/sections/current-step.js
196
+ var require_current_step = __commonJS({
197
+ "../rules-engine/dist/sections/current-step.js"(exports) {
198
+ "use strict";
199
+ init_esm_shims();
200
+ Object.defineProperty(exports, "__esModule", { value: true });
201
+ exports.generateCurrentStepSection = generateCurrentStepSection;
202
+ function generateCurrentStepSection(roadmap) {
203
+ const activeRoadmap = roadmap.filter((r) => r.is_legacy !== true);
204
+ const activeSteps = activeRoadmap.filter((r) => r.status === "ACTIVE").sort((a, b) => a.step_number - b.step_number);
205
+ if (activeSteps.length === 0) {
206
+ const nextSteps = activeRoadmap.filter((r) => r.status === "LOCKED").sort((a, b) => a.step_number - b.step_number).slice(0, 1);
207
+ if (nextSteps.length === 0)
208
+ return null;
209
+ return `## \u{1F3AF} CURRENT FOCUS
210
+
211
+ > **No active task.** The next step in the backlog is:
212
+ >
213
+ > **Step ${nextSteps[0].step_number}: ${nextSteps[0].title}**`;
214
+ }
215
+ const currentStep = activeSteps[0];
216
+ let objectiveText = currentStep.title;
217
+ let constraintsText = "";
218
+ let dodText = "";
219
+ if (currentStep.prompt_content) {
220
+ const content = currentStep.prompt_content;
221
+ const objectiveMatch = content.match(/###\s*🎯\s*Objective\s*\n([\s\S]*?)(?=###|$)/i);
222
+ if (objectiveMatch)
223
+ objectiveText = objectiveMatch[1].trim();
224
+ const constraintsMatch = content.match(/###\s*⚠️\s*Constraints\s*\n([\s\S]*?)(?=###|$)/i);
225
+ if (constraintsMatch)
226
+ constraintsText = constraintsMatch[1].trim();
227
+ const dodMatch = content.match(/###\s*✅\s*Definition of Done\s*\n([\s\S]*?)(?=###|$)/i);
228
+ if (dodMatch)
229
+ dodText = dodMatch[1].trim();
230
+ }
231
+ let section = `## \u{1F3AF} CURRENT FOCUS
232
+
233
+ **Active Step ${currentStep.step_number}: ${currentStep.title}**
234
+ ${currentStep.sprint_focus ? `*Sprint: ${currentStep.sprint_focus}*` : ""}
235
+
236
+ ### Objective
237
+ ${objectiveText}`;
238
+ if (constraintsText) {
239
+ section += `
240
+
241
+ ### Task-Specific Constraints
242
+ ${constraintsText}`;
243
+ }
244
+ if (dodText) {
245
+ section += `
246
+
247
+ ### Definition of Done
248
+ ${dodText}`;
249
+ }
250
+ return section;
251
+ }
252
+ }
253
+ });
254
+
255
+ // ../rules-engine/dist/sections/workflow.js
256
+ var require_workflow = __commonJS({
257
+ "../rules-engine/dist/sections/workflow.js"(exports) {
258
+ "use strict";
259
+ init_esm_shims();
260
+ Object.defineProperty(exports, "__esModule", { value: true });
261
+ exports.generateWorkflowSection = generateWorkflowSection;
262
+ function generateWorkflowSection(ide) {
263
+ const ideInstructions = ide === "windsurf" ? `When working in **Windsurf**, coordinate with Cascade (Windsurf's AI) for code execution.` : `When working in **Cursor**, delegate implementation to Cursor Composer/Agent for code execution.`;
264
+ return `## \u{1F4CB} SUPERVISOR MODE: FRANK'S WORKFLOW
265
+
266
+ **\u{1F3AD} PARADIGM SHIFT: Frank is a Supervisor/Orchestrator, NOT a Code Executor**
267
+
268
+ Frank's role is to **PLAN, DELEGATE, and VALIDATE** \u2014 NOT to write every line of code manually.
269
+ The IDE's native AI (Cursor Composer, Windsurf Cascade, etc.) handles code execution.
270
+ Frank maintains **Architectural Control** and ensures **Guardian Compliance**.
271
+
272
+ ---
273
+
274
+ ### \u{1F9ED} SUPERVISOR WORKFLOW (3-Phase Protocol)
275
+
276
+ **INITIATION PROTOCOL:**
277
+ At the start of every session, you MUST execute:
278
+ \`\`\`bash
279
+ export RIGSTATE_MODE=SUPERVISOR
280
+ \`\`\`
281
+ This activates the System Guardian.
282
+
283
+ #### PHASE 1: \u{1F4CB} PLAN & STRUCTURE
284
+ **Frank's Responsibility:**
285
+ 1. **Analyze the Task:** Read the roadmap step's EXECUTIVE SUMMARY, TECHNICAL PROMPT, and METADATA
286
+ 2. **Query Project Brain:** Use \`query_brain\` to fetch relevant decisions, patterns, and constraints
287
+ 3. **Generate Structured Plan:**
288
+ - Break down into logical sub-tasks (if $L_{max}$ compliance requires modularization)
289
+ - Identify affected files and modules
290
+ - Map dependencies and execution order
291
+ - Define validation checkpoints
292
+ 4. **Terminal Feedback (MANDATORY):**
293
+ \`\`\`
294
+ \u{1F3AF} FRANK: Planning Task [Title]
295
+ \u{1F4CA} Scope: [X files, Y modules]
296
+ \u{1F50D} Brain Context: [N relevant memories loaded]
297
+ \u{1F4DD} Execution Plan:
298
+ 1. [Sub-task A] - [File/Module]
299
+ 2. [Sub-task B] - [File/Module]
300
+ 3. [Validation] - [Criteria]
301
+
302
+ \u23F1\uFE0F Estimated Token Load: [High/Medium/Low]
303
+ \u26A0\uFE0F If this exceeds your context, type "FORTSETT" after each phase.
304
+ \`\`\`
305
+
306
+ **OUTPUT:** A clear, copy-pasteable Technical Prompt for the IDE's AI
307
+
308
+ ---
309
+
310
+ #### PHASE 2: \u{1F916} DELEGATE TO NATIVE EXECUTION
311
+ **Frank's Responsibility:**
312
+ 1. **Present Delegation Prompt:**
313
+ \`\`\`
314
+ \u{1F3AC} FRANK \u2192 ${ide === "cursor" ? "CURSOR COMPOSER" : "WINDSURF CASCADE"}:
315
+
316
+ ${ide === "cursor" ? "Please execute the following implementation using Cursor Composer (Cmd+I or Chat Panel):" : "Please execute the following implementation using Windsurf Cascade:"}
317
+
318
+ ### \u{1F6E0} TECHNICAL PROMPT
319
+ [Paste the structured prompt from PHASE 1]
320
+
321
+ ### \u26A0\uFE0F GUARDIAN CONSTRAINTS (STRICT)
322
+ - UI files MUST be < 250 lines ($L_{max}^{UI}$)
323
+ - Logic files MUST be < 400 lines ($L_{max}^{Logic}$)
324
+ - [Additional project-specific constraints from Brain]
325
+
326
+ ### \u2705 DEFINITION OF DONE
327
+ - [ ] [Criteria 1]
328
+ - [ ] [Criteria 2]
329
+ - [ ] Passes \`npx tsc --noEmit\`
330
+ - [ ] Passes \`npm run build\`
331
+ \`\`\`
332
+
333
+ 2. **Wait for IDE Execution:**
334
+ - Frank does NOT write the code himself
335
+ - Frank monitors for completion signals (user confirmation or file system changes)
336
+ - If task is too large, Frank MUST split into chunks and ask user to type **"FORTSETT"** to continue
337
+
338
+ 3. **Terminal Feedback (MINIMALIST):**
339
+ \`\`\`
340
+ \u23F3 DELEGATED: ${ide === "cursor" ? "Cursor Composer" : "Windsurf Cascade"}
341
+ \u{1F4C2} FILES: [list 1, list 2...]
342
+ \`\`\`
343
+
344
+ **SILENT PROTOCOL:** Frank acts as a silent supervisor. He NEVER explains his role. He only logs status updates. The IDE Agent executes the code.
345
+
346
+ ---
347
+
348
+ #### PHASE 3: \u2705 VALIDATE & SYNC (MANDATORY)
349
+
350
+ 1. **Mission Report Generation:**
351
+ Create \`.rigstate/logs/last_execution.json\`:
352
+ \`\`\`json
353
+ {
354
+ "task_summary": "Short description of the task completed",
355
+ "files_touched": ["relative/path/to/file1", "relative/path/to/file2"],
356
+ "security_status": "SECURE"
357
+ }
358
+ \`\`\`
359
+
360
+ 2. **Architecture Audit:** \`run_architecture_audit\`
361
+
362
+ 3. **Team Check (Silent):**
363
+ \`\`\`
364
+ [CHECKING: SVEN] ... [OK]
365
+ \`\`\`
366
+
367
+ 4. **Linter Guard:** \`npx tsc --noEmit && npm run build\`
368
+
369
+ 5. **SYSTEM SYNC (MANDATORY):**
370
+ \`\`\`bash
371
+ npx rigstate sync
372
+ \`\`\`
373
+
374
+ 6. **Mission Briefing:**
375
+ Present a concise, human-readable summary of the work done.
376
+
377
+ 7. **Complete & Log (MANDATORY):**
378
+ When the task is done and validated:
379
+ **YOU MUST** offer to complete the task programmatically.
380
+ ASK the user: "Shall I mark this task as completed?"
381
+ IF YES -> Call tool: \`complete_roadmap_task(projectId, summary)\`
382
+
383
+ 8. **Final Signal:**
384
+ \`\`\`
385
+ [VALIDATED]
386
+ \`\`\`
387
+
388
+ 9. **Self-Correction Protocol:**
389
+ - Quietly identify errors.
390
+ - Generate specific fix prompts for IDE.
391
+
392
+ 6. **Terminal Feedback (MINIMALIST):**
393
+ When all checks pass, output ONLY:
394
+ \`\`\`
395
+ [VALIDATED]
396
+ Task tracked in roadmap.
397
+ \`\`\`
398
+
399
+ **RULE:** The IDE Agent acts as the worker. It MUST wait for Frank's **[VALIDATED]** signal before marking any task as done.
400
+
401
+ ---
402
+
403
+ ### \u{1F504} ATOMIC REVERT PROTOCOL (Safety Net)
404
+
405
+ If validation fails after **3 correction attempts**:
406
+
407
+ 1. **STOP** all further modifications
408
+ 2. **TERMINAL FEEDBACK (MANDATORY):**
409
+ \`\`\`
410
+ \u274C ATOMIC REVERT TRIGGERED
411
+ \u{1F4CB} Task: [task-id]
412
+ \u{1F534} Reason: [error description]
413
+ \u{1F504} Attempts: 3/3 exhausted
414
+ \u{1F6E1}\uFE0F Action: Reverting to checkpoint...
415
+ \`\`\`
416
+ 3. **REVERT:**
417
+ \`\`\`bash
418
+ git checkout . && git stash pop # OR: git reset --hard HEAD
419
+ \`\`\`
420
+ 4. **UPDATE:** Mark task as \`FAILED\` with detailed explanation
421
+ 5. **ESCALATE:** Notify user of blocker for manual intervention
422
+
423
+ **CORE PRINCIPLE:** NEVER leave codebase in broken state.
424
+
425
+ ---
426
+
427
+ ### \u{1F4E2} PERSISTENCE & TRANSPARENCY RULES (MANDATORY)
428
+
429
+ Frank MUST provide **live terminal feedback** before EVERY operation:
430
+
431
+ 1. **Before Planning:**
432
+ \`\`\`
433
+ \u{1F3AF} FRANK: Starting analysis for [Task Title]...
434
+ \`\`\`
435
+
436
+ 2. **Before Delegation:**
437
+ \`\`\`
438
+ \u{1F916} FRANK: Preparing prompt for ${ide === "cursor" ? "Cursor Composer" : "Windsurf Cascade"}...
439
+ \`\`\`
440
+
441
+ 3. **Before Validation:**
442
+ \`\`\`
443
+ \u{1F50D} FRANK: Running architecture audit on [N files]...
444
+ \`\`\`
445
+
446
+ 4. **Token Buffer Management:**
447
+ - If a task requires > 50% of context window, Frank MUST split into phases
448
+ - User types **"FORTSETT"** (Norwegian for "CONTINUE") to load next buffer
449
+ - Example:
450
+ \`\`\`
451
+ \u26A0\uFE0F FRANK: Phase 1 complete. Token usage: 75%
452
+ \u{1F4AC} Type "FORTSETT" to continue with Phase 2 (Database Migrations)
453
+ \`\`\`
454
+
455
+ **PURPOSE:** Eliminate "Black Box" feeling. User always knows what Frank is doing.
456
+
457
+ ---
458
+
459
+ ### \u{1F3AF} HOW TO READ ROADMAP STEPS
460
+
461
+ Each Rigstate roadmap task follows this structure:
462
+
463
+ \`\`\`markdown
464
+ ### \u{1F4DD} EXECUTIVE SUMMARY
465
+ [Business value and user impact]
466
+
467
+ ### \u{1F6E0} TECHNICAL PROMPT
468
+ CONTEXT: [Files/Modules affected]
469
+ OBJECTIVE: [One-sentence goal]
470
+ GUARDIAN CONSTRAINTS: [File limits, compliance rules]
471
+ DEFINITION OF DONE: [Success checklist]
472
+
473
+ ### \u{1F4A1} IMPLEMENTATION HINTS
474
+ [Code snippets and patterns]
475
+
476
+ ### \u{1F4CA} METADATA
477
+ - Author: [Agent/User]
478
+ - Source: [Origin of task]
479
+ - Strategy Alignment: [DNA focus area]
480
+ \`\`\`
481
+
482
+ ${ideInstructions}
483
+
484
+ ---
485
+
486
+ ## \u{1F6E1}\uFE0F SAFETY PROTOCOLS (Mandatory)
487
+
488
+ ### 1. \u{1F4F8} Pre-Flight Checkpoint
489
+ **BEFORE delegating to IDE**, Frank MUST create recovery point:
490
+ \`\`\`bash
491
+ git stash push -m "checkpoint-before-[task-id]"
492
+ # OR: git checkout -b checkpoint/[task-id] && git checkout -
493
+ \`\`\`
494
+
495
+ ### 2. \u{1F6A8} Linter Guard (STRICT)
496
+ **FORBIDDEN** to mark \`COMPLETED\` if:
497
+ - Syntax errors exist
498
+ - TypeScript/ESLint errors present
499
+ - \`npm run build\` fails
500
+
501
+ **Verification:**
502
+ \`\`\`bash
503
+ npx tsc --noEmit && npm run build
504
+ \`\`\`
505
+
506
+ ### 3. \u{1F504} Self-Correction Loop
507
+ Max 3 attempts with escalating strategies:
508
+ 1. Targeted fix
509
+ 2. Broader refactor
510
+ 3. Minimal surgical change OR user escalation
511
+
512
+ ---
513
+
514
+ ## \u{1F504} WATCHER MODE (Proactive Task Execution)
515
+
516
+ Frank monitors for approved tasks and orchestrates execution:
517
+
518
+ 1. **Session Start:**
519
+ - Call \`get_pending_tasks\` to check for approved work
520
+ - Summarize tasks and ask user which to tackle
521
+
522
+ 2. **Execution Flow:**
523
+ - **CHECKPOINT:** Create pre-flight snapshot
524
+ - **PLAN:** Generate structured execution plan (Phase 1)
525
+ - **DELEGATE:** Send prompt to IDE's native AI (Phase 2)
526
+ - **VALIDATE:** Run architecture audit + linter guard (Phase 3)
527
+ - **COMPLETE:** Update \`update_task_status(COMPLETED)\` with summary
528
+
529
+ 3. **Error Handling:**
530
+ - Enter Self-Correction Loop (max 3 attempts)
531
+ - If still failing, trigger Atomic Revert
532
+ - Update task status to \`FAILED\` with explanation
533
+
534
+ **CRITICAL:** Frank orchestrates, ${ide === "cursor" ? "Cursor" : "Windsurf"} executes, Frank validates.`;
535
+ }
536
+ }
537
+ });
538
+
539
+ // ../rules-engine/dist/sections/tooling.js
540
+ var require_tooling = __commonJS({
541
+ "../rules-engine/dist/sections/tooling.js"(exports) {
542
+ "use strict";
543
+ init_esm_shims();
544
+ Object.defineProperty(exports, "__esModule", { value: true });
545
+ exports.generateToolingSection = generateToolingSection;
546
+ function generateToolingSection(activeAgents) {
547
+ const getAgent = (keyPart) => activeAgents?.find((a) => a.key.includes(keyPart));
548
+ const frank = getAgent("frank") || getAgent("orchestrator");
549
+ const brynjar = getAgent("brynjar");
550
+ const gunhild = getAgent("scribe");
551
+ const sindre = getAgent("sindre");
552
+ const sven = getAgent("sven");
553
+ const allTools = [
554
+ { name: "query_brain", owner: frank, desc: "Search project memories and decisions" },
555
+ { name: "save_decision", owner: frank, desc: "Record architectural decisions (ADRs)" },
556
+ { name: "update_roadmap", owner: frank, desc: "Mark steps as ACTIVE or COMPLETED" },
557
+ { name: "run_architecture_audit", owner: frank, desc: "Audit code against Guardian rules" },
558
+ { name: "get_pending_tasks", owner: frank, desc: "Fetch APPROVED tasks from dashboard ready for execution" },
559
+ { name: "update_task_status", owner: frank, desc: "Mark tasks as EXECUTING, COMPLETED, or FAILED" },
560
+ { name: "audit_integrity_gate", owner: frank, desc: "Runs combined Security and Performance audit. SOFT LOCK if failed." },
561
+ { name: "archaeological_scan", owner: brynjar, desc: "Scan Git history for legacy features" },
562
+ { name: "import_ghost_features", owner: brynjar, desc: "Import discovered features to roadmap" },
563
+ { name: "generate_professional_pdf", owner: gunhild, desc: "Generate System Manifest or Investor Report" },
564
+ { name: "analyze_database_performance", owner: sindre, desc: "Deep scan for N+1 queries and missing indexes" },
565
+ { name: "audit_rls_status", owner: sven, desc: "Verify Row Level Security is enabled on all tables" }
566
+ ];
567
+ const activeTools = allTools.filter((t) => t.owner !== void 0);
568
+ let triggers = "";
569
+ if (activeAgents && activeAgents.length > 0) {
570
+ triggers = `
571
+ ### \u26A1\uFE0F ACTIVE AGENT TRIGGERS
572
+ When your prompt mentions specific keywords, summon the appropriate specialist (respecting Authority Levels):
573
+
574
+ ${activeAgents.map((a) => {
575
+ const triggerInfo = a.trigger_keywords ? `"${a.trigger_keywords}"` : `"${a.primary_mission || "General assistance"}"`;
576
+ return `- Intent: ${triggerInfo} \u2192 Activate **${a.name}** [ID: ${a.id}] (Authority: ${a.authority_level})`;
577
+ }).join("\n")}`;
578
+ }
579
+ const toolTable = activeTools.length > 0 ? `| Tool | Agent Owner | Description |
580
+ |------|-------------|-------------|
581
+ ${activeTools.map((t) => `| \`${t.name}\` | ${t.owner?.name} [ID: ${t.owner?.id}] | (Owner: [ID: ${t.owner?.id}]) ${t.desc} |`).join("\n")}` : "> No specialized tools active for the current team roster.";
582
+ return `## \u{1F527} TOOLING
583
+
584
+ ### Rigstate CLI Commands
585
+ \`\`\`bash
586
+ rigstate scan # Scan current directory for issues
587
+ rigstate scan --project <id> # Scan with project context
588
+ rigstate fix --project <id> # Interactive AI fix mode
589
+ rigstate complete # Mark current step as complete
590
+ \`\`\`
591
+
592
+ ### MCP Tools (Model Context Protocol)
593
+ These tools are available when using the Rigstate MCP server:
594
+
595
+ ${toolTable}
596
+
597
+ **Strict Tool Ownership:**
598
+ When a tool is invoked, the AI must adopt the persona and Authority Level of the Agent ID listed as the 'Owner' in the tool description. Do not execute tools as a generic assistant.
599
+
600
+ ### Environment Variables
601
+ Ensure these are set in your \`.env.local\`:
602
+ \`\`\`
603
+ RIGSTATE_API_KEY=<your-key>
604
+ RIGSTATE_PROJECT_ID=<auto-detected-or-set>
605
+ \`\`\`${triggers}`;
606
+ }
607
+ }
608
+ });
609
+
610
+ // ../rules-engine/dist/sections/skills.js
611
+ var require_skills = __commonJS({
612
+ "../rules-engine/dist/sections/skills.js"(exports) {
613
+ "use strict";
614
+ init_esm_shims();
615
+ Object.defineProperty(exports, "__esModule", { value: true });
616
+ exports.generateAvailableSkillsSection = generateAvailableSkillsSection;
617
+ exports.generateSkillFileContent = generateSkillFileContent;
618
+ exports.getRigstateStandardSkills = getRigstateStandardSkills;
619
+ function generateAvailableSkillsSection(skills) {
620
+ if (skills.length === 0)
621
+ return "";
622
+ const skillBlocks = skills.map((skill) => ` <skill>
623
+ <name>${skill.name}</name>
624
+ <description>${skill.description}</description>
625
+ <location>.agent/skills/${skill.name}/SKILL.md</location>
626
+ </skill>`).join("\n");
627
+ return `## \u{1F9E0} AGENT SKILLS
628
+ > **OPTIMIZED CAPABILITIES:** The following skills are available for on-demand activation.
629
+
630
+ <available_skills>
631
+ ${skillBlocks}
632
+ </available_skills>`;
633
+ }
634
+ function generateSkillFileContent(skill) {
635
+ return `---
636
+ name: ${skill.name}
637
+ description: ${skill.description}
638
+ version: "${skill.version}"
639
+ specialist: ${skill.specialist}
640
+ governance: ${skill.governance}
641
+ ---
642
+
643
+ ${skill.content}
644
+
645
+ ---
646
+ *Generated by Rigstate Rules Engine. Do not modify manually.*`;
647
+ }
648
+ function getRigstateStandardSkills() {
649
+ return [
650
+ {
651
+ name: "rigstate-integrity-gate",
652
+ description: "Handles the Pre-Deployment Compliance Gate, automated quality audits (Security/Performance), and generation of the Strategic Release Manifest. Use this whenever you are finishing a task or moving code towards completion.",
653
+ version: "1.0.0",
654
+ specialist: "Frank (The Orchestrator)",
655
+ governance: "SOFT_LOCK",
656
+ content: `# \u{1F396}\uFE0F Rigstate Integrity Gate Skill
657
+
658
+ This skill defines the high-level protocol for ensuring code quality and security before a task is marked as "COMPLETED". It orchestrates specialized agents (Sven, Sindre) and generates the audit trail known as the **Strategic Release Manifest**.
659
+
660
+ ## \u{1F504} The Protocol Workflow
661
+
662
+ Whenever you are ready to complete a task, follow this mandatory 3-step sequence:
663
+
664
+ ### 1. Audit (The Scan)
665
+ Run the \`audit_integrity_gate\` tool. This will trigger:
666
+ - **Security Check (Sven):** Scans for RLS status on all tables.
667
+ - **Performance Check (Sindre):** Scans for N+1 queries and missing database indexes.
668
+
669
+ ### 2. Decision (The Gate)
670
+ Evaluate the result from \`audit_integrity_gate\`:
671
+ - **Mode: OPEN:** All critical checks passed. You can proceed to completion.
672
+ - **Mode: SOFT_LOCK:** Issues were found (e.g., missing RLS or N+1 warnings). You **MUST** report these to the user. You can only proceed if the user provides an override or if you fix the issues first.
673
+ - **Mode: HARD_LOCK (Future):** Critical violations detected. Completion is blocked until fixed.
674
+
675
+ ### 3. Manifest (The Release)
676
+ Upon passing the gate, use the \`complete_roadmap_task\` tool.
677
+ - Pass the **full JSON response** from the \`audit_integrity_gate\` into the \`integrityGate\` parameter.
678
+ - This automatically generates the **Strategic Release Manifest** in the Mission Report, creating a permanent record of quality for this release.
679
+
680
+ ## \u{1F6E0}\uFE0F Tools Used by this Skill
681
+
682
+ - \`audit_integrity_gate\`: Orchestrates the security and performance scans.
683
+ - \`complete_roadmap_task\`: Finalizes the task and attaches the quality certificate.
684
+
685
+ ## \u{1F4DD} Best Practices
686
+
687
+ - **Never skip the audit.** Even if you think the change is small, the Integrity Gate is our source of truth.
688
+ - **Explain violations clearly.** If in \`SOFT_LOCK\`, don't just say "it failed". List the specific tables lacking RLS or the specific files with N+1 issues.
689
+ - **Summarize the Manifest.** After successful completion, tell the user: "Release Manifest generated with [X] security passes and [Y] performance checks."`
690
+ },
691
+ {
692
+ name: "rigstate-legacy-renovator",
693
+ description: 'Handles the modernization of legacy Vibeline code to the Rigstate standard. Renovates branding, extracts "Ghost Features" into the roadmap, and repairs architectural drift.',
694
+ version: "1.0.0",
695
+ specialist: "Brynjar (The Archivist)",
696
+ governance: "SOFT_LOCK",
697
+ content: `# \u{1F3FA} Rigstate Legacy Renovator Skill
698
+
699
+ This skill is activated when legacy patterns (e.g., "Vibeline") are detected. It uses archaeological scanning to restore technical history and performs branding renovation.
700
+
701
+ ## \u{1F504} The Protocol Workflow
702
+
703
+ ### 1. Archaeological Scan (The Discovery)
704
+ Use the \`archaeological_scan\` tool to find "Ghost Features" \u2013 completed work that is not yet reflected in the project roadmap. This restores the technical context of the project.
705
+
706
+ ### 2. Import Ghost Features
707
+ If the scan discovers COMPLETED work, use \`import_ghost_features\` to add them to the roadmap. This ensures the agent understands the historical foundation it is building upon.
708
+
709
+ ### 3. Branding Renovation
710
+ Identify "Vibeline" references in:
711
+ - UI components (Text logos, labels)
712
+ - Code comments and TODOs
713
+ - Documentation
714
+ - Database seed files
715
+
716
+ Perform a non-destructive rename to "Rigstate" following the rebrand protocol.
717
+
718
+ ### 4. Dependency Audit
719
+ Check for circular dependencies or architectural violations in legacy modules using \`analyze_dependency_graph\`.
720
+
721
+ ## \u{1F6E0}\uFE0F Tools Used by this Skill
722
+
723
+ - \`archaeological_scan\`: Reconstructs project history from Git.
724
+ - \`import_ghost_features\`: Synchronizes historical work with the roadmap.
725
+ - \`analyze_dependency_graph\`: Detects architectural rot.
726
+
727
+ ## \u{1F4DD} Best Practices
728
+
729
+ - **Respect History.** Don't delete legacy notes; transform them into Rigstate memories.
730
+ - **Batched Renaming.** Rename branding in logical groups (e.g., all UI first, then all comments).
731
+ - **Update the Brain.** When a legacy feature is renovated, save the decision to the Project Brain using \`save_to_project_brain\`.`
732
+ }
733
+ ];
734
+ }
735
+ }
736
+ });
737
+
738
+ // ../rules-engine/dist/utils/mdc.js
739
+ var require_mdc = __commonJS({
740
+ "../rules-engine/dist/utils/mdc.js"(exports) {
741
+ "use strict";
742
+ init_esm_shims();
743
+ Object.defineProperty(exports, "__esModule", { value: true });
744
+ exports.wrapMdc = wrapMdc;
745
+ function wrapMdc(content, metadata) {
746
+ const yaml = ["---"];
747
+ if (metadata.description) {
748
+ yaml.push(`description: "${metadata.description.replace(/"/g, '\\"')}"`);
749
+ }
750
+ if (metadata.globs && metadata.globs.length > 0) {
751
+ yaml.push("globs:");
752
+ for (const glob of metadata.globs) {
753
+ yaml.push(` - "${glob}"`);
754
+ }
755
+ }
756
+ if (metadata.alwaysApply !== void 0) {
757
+ yaml.push(`alwaysApply: ${metadata.alwaysApply}`);
758
+ }
759
+ yaml.push("---");
760
+ return `${yaml.join("\n")}
761
+ ${content}`;
762
+ }
763
+ }
764
+ });
765
+
766
+ // ../rules-engine/dist/index.js
767
+ var require_dist = __commonJS({
768
+ "../rules-engine/dist/index.js"(exports) {
769
+ "use strict";
770
+ init_esm_shims();
771
+ Object.defineProperty(exports, "__esModule", { value: true });
772
+ exports.getRigstateStandardSkills = exports.generateSkillFileContent = exports.generateAvailableSkillsSection = exports.IDE_FILE_NAMES = void 0;
773
+ exports.generateRuleContent = generateRuleContent2;
774
+ exports.generateRuleFiles = generateRuleFiles2;
775
+ exports.fetchLegacyStats = fetchLegacyStats2;
776
+ exports.fetchActiveAgents = fetchActiveAgents2;
777
+ exports.mergeRuleContent = mergeRuleContent;
778
+ exports.fetchProjectTechStack = fetchProjectTechStack2;
779
+ exports.getFileNameForIDE = getFileNameForIDE2;
780
+ var types_1 = require_types();
781
+ var identity_1 = require_identity();
782
+ var stack_dna_1 = require_stack_dna();
783
+ var current_step_1 = require_current_step();
784
+ var workflow_1 = require_workflow();
785
+ var tooling_1 = require_tooling();
786
+ var skills_1 = require_skills();
787
+ var mdc_1 = require_mdc();
788
+ var types_2 = require_types();
789
+ Object.defineProperty(exports, "IDE_FILE_NAMES", { enumerable: true, get: function() {
790
+ return types_2.IDE_FILE_NAMES;
791
+ } });
792
+ var skills_2 = require_skills();
793
+ Object.defineProperty(exports, "generateAvailableSkillsSection", { enumerable: true, get: function() {
794
+ return skills_2.generateAvailableSkillsSection;
795
+ } });
796
+ Object.defineProperty(exports, "generateSkillFileContent", { enumerable: true, get: function() {
797
+ return skills_2.generateSkillFileContent;
798
+ } });
799
+ Object.defineProperty(exports, "getRigstateStandardSkills", { enumerable: true, get: function() {
800
+ return skills_2.getRigstateStandardSkills;
801
+ } });
802
+ var RIGSTATE_START2 = "RIGSTATE_START";
803
+ var RIGSTATE_END2 = "RIGSTATE_END";
804
+ var ENGINE_VERSION = "3.0.0";
805
+ function generateRuleContent2(project, stack, roadmap, ide = "cursor", legacyStats, activeAgents, lean = false) {
806
+ const sections = [];
807
+ sections.push("# \u{1F680} Rigstate Supervisor v2.4 (Context-Aware)");
808
+ sections.push(`IMPORTANT: Internal agent coordination must always use the provided Agent IDs. Display names are for user-facing chat only. When invoking tools or referencing hierarchy, use the ID as the primary key.
809
+
810
+ ## \u2696\uFE0F AGENT HIERARCHY & AUTHORITY
811
+ You must defer to the instructions of agents with higher Authority Levels (10 being highest).
812
+ Security and Architecture (Levels 8-10) always override creative or implementation suggestions (Levels 1-5).`);
813
+ const identitySection = (0, identity_1.generateIdentitySection)(project, ide, activeAgents);
814
+ sections.push(identitySection);
815
+ const skills = (0, skills_1.getRigstateStandardSkills)();
816
+ const skillsSection = (0, skills_1.generateAvailableSkillsSection)(skills);
817
+ if (skillsSection) {
818
+ sections.push(skillsSection);
819
+ }
820
+ if (!lean) {
821
+ const stackDnaSection = (0, stack_dna_1.generateStackDnaSection)(project, stack, legacyStats);
822
+ sections.push(stackDnaSection);
823
+ }
824
+ if (!lean) {
825
+ const currentStepSection = (0, current_step_1.generateCurrentStepSection)(roadmap);
826
+ if (currentStepSection) {
827
+ sections.push(currentStepSection);
828
+ }
829
+ }
830
+ const workflowSection = (0, workflow_1.generateWorkflowSection)(ide);
831
+ sections.push(workflowSection);
832
+ if (!lean) {
833
+ const toolingSection = (0, tooling_1.generateToolingSection)(activeAgents);
834
+ sections.push(toolingSection);
835
+ } else {
836
+ sections.push(`## \u{1F527} TOOLING & SPECIFIC RULES
837
+ > **OPTIMIZED MODE:** Detailed technical rules, CLI commands, and tech stack constraints are loaded dynamically from \`.cursor/rules/*.mdc\` based on the files you interact with.
838
+ > - **Stack & Guardian:** See \`rigstate-guardian.mdc\`
839
+ > - **Roadmap & Tasks:** See \`rigstate-roadmap.mdc\`
840
+ > - **Tools & Workflow:** See \`rigstate-workflow.mdc\``);
841
+ }
842
+ const headerMap = {
843
+ cursor: `# Cursor Project Rules: ${project.name}`,
844
+ antigravity: `# Antigravity Project Rules: ${project.name}`,
845
+ windsurf: `# Windsurf Project Rules: ${project.name}`,
846
+ vscode: `# VS Code Project Rules: ${project.name}`,
847
+ copilot: `# GitHub Copilot Instructions: ${project.name}`,
848
+ generic: `# Project Conventions: ${project.name}`
849
+ };
850
+ const header = headerMap[ide] || `# Project Rules: ${project.name}`;
851
+ return `${RIGSTATE_START2}
852
+ ${header}
853
+ > Generated by Rigstate v2.5.0 | Project ID: ${project.id} | Last synced: ${(/* @__PURE__ */ new Date()).toISOString()}
854
+ > ${lean ? "\u26A1 LEAN MODE ACTIVE: Redundant context offloaded to .cursor/rules/*.mdc" : "\u{1F4E6} FULL MODE ACTIVE"}
855
+
856
+ \u26A0\uFE0F **SYSTEM NOTE:** Changes made to this Guardian template propagate to ALL Rigstate projects on next sync.
857
+ \u{1F6E1}\uFE0F **Guardian v2.5 Upgrade Applied:** IMPACT_GUARD + BUILD_INTEGRITY now active globally.
858
+
859
+ ${sections.join("\n\n---\n\n")}
860
+ ${RIGSTATE_END2}`;
861
+ }
862
+ function generateRuleFiles2(project, stack, roadmap, ide = "cursor", legacyStats, activeAgents, databaseMetadata) {
863
+ const files = [];
864
+ const agentTable = activeAgents?.map((a) => `| **${a.name}** | \`${a.key}\` | ${a.job_title} | ${a.primary_mission || "Specialist"} |`).join("\n") || "| - | - | - | No agents configured |";
865
+ const agentsMdContent = `# \u{1F916} AI Agent Context: ${project.name}
866
+ > **Rigstate v${ENGINE_VERSION}** | Project ID: \`${project.id}\`
867
+
868
+ This file describes the **specialist personas** available in this project.
869
+ These are **context providers**, not active controllers. The IDE agent (you) remains in full control of code execution.
870
+
871
+ ## \u{1F4CB} Available Specialists
872
+ | Name | Key | Role | Specialty |
873
+ |:--- |:--- |:--- |:--- |
874
+ ${agentTable}
875
+
876
+ ## \u{1F50D} How to Use This Context
877
+ 1. **Read their expertise**: Each specialist has a defined area of knowledge (architecture, documentation, history).
878
+ 2. **Adopt their perspective**: When working in their domain, consider their guidelines.
879
+ 3. **Call MCP tools if needed**: Some specialists have associated tools (e.g., \`generate_professional_pdf\` for The Scribe).
880
+
881
+ ## \u26A0\uFE0F Important
882
+ - These personas do **NOT** execute code or override your decisions.
883
+ - They provide **context and guidelines** that you apply at your discretion.
884
+ - Authority levels indicate priority of guidelines when they conflict (higher = stronger recommendation).
885
+
886
+ ---
887
+ *Generated by Rigstate. Run \`rigstate sync\` to refresh.*`;
888
+ files.push({
889
+ path: "AGENTS.md",
890
+ content: agentsMdContent,
891
+ metadata: { description: "Project hierarchy and agent identities" }
892
+ });
893
+ const isLean = ide === "cursor";
894
+ const masterFileName = getFileNameForIDE2(ide);
895
+ const monoContent = generateRuleContent2(project, stack, roadmap, ide, legacyStats, activeAgents, isLean);
896
+ files.push({
897
+ path: masterFileName,
898
+ content: monoContent,
899
+ metadata: { description: `Master rules file for ${ide}` }
900
+ });
901
+ if (ide === "cursor" || ide === "antigravity" || ide === "vscode") {
902
+ files.push({
903
+ path: ".cursor/rules/rigstate-identity.mdc",
904
+ content: (0, mdc_1.wrapMdc)((0, identity_1.generateIdentitySection)(project, ide, activeAgents), {
905
+ description: "Project context and specialist personas",
906
+ alwaysApply: true
907
+ })
908
+ });
909
+ files.push({
910
+ path: ".cursor/rules/rigstate-guardian.mdc",
911
+ content: (0, mdc_1.wrapMdc)((0, stack_dna_1.generateStackDnaSection)(project, stack, legacyStats), {
912
+ description: "Governance rules, tech stack constraints, and file size limits",
913
+ globs: ["**/*.ts", "**/*.tsx", "**/*.js", "**/*.jsx", "**/*.sql"],
914
+ alwaysApply: true
915
+ })
916
+ });
917
+ const currentStep = (0, current_step_1.generateCurrentStepSection)(roadmap);
918
+ if (currentStep) {
919
+ files.push({
920
+ path: ".cursor/rules/rigstate-roadmap.mdc",
921
+ content: (0, mdc_1.wrapMdc)(currentStep, {
922
+ description: "Active sprint focus and current roadmap step details",
923
+ alwaysApply: true
924
+ })
925
+ });
926
+ }
927
+ files.push({
928
+ path: ".cursor/rules/rigstate-workflow.mdc",
929
+ content: (0, mdc_1.wrapMdc)((0, workflow_1.generateWorkflowSection)(ide) + "\n\n" + (0, tooling_1.generateToolingSection)(activeAgents), {
930
+ description: "Coding workflows, CLI usage, and tool binding rules",
931
+ alwaysApply: true
932
+ })
933
+ });
934
+ let dbContent = "## \u{1F5C4}\uFE0F Database Standards\n- Always verify RLS policies for new tables.\n- Use `supabase/migrations` for DDL changes.\n- Reference `types/supabase.ts` for strictly typed queries.";
935
+ if (databaseMetadata && databaseMetadata.length > 0) {
936
+ const securedCount = databaseMetadata.filter((t) => t.rls_enabled).length;
937
+ const unsecuredCount = databaseMetadata.length - securedCount;
938
+ const unsecuredTables = databaseMetadata.filter((t) => !t.rls_enabled).map((t) => t.table_name);
939
+ dbContent = `## \u{1F5C4}\uFE0F Database Context: ${databaseMetadata.length} Tables
940
+ > **Security Check:** ${securedCount} Secured | ${unsecuredCount} Unsecured
941
+
942
+ ### \u26A0\uFE0F Security Attention Required
943
+ ${unsecuredTables.length > 0 ? unsecuredTables.map((t) => `- \u{1F534} **${t}**: RLS Disabled`).join("\n") : "- \u2705 All tables have Row Level Security enabled."}
944
+
945
+ ### \u{1F4CB} Schema Reference
946
+ | Table | RLS | Policies | Cols | Key Features |
947
+ | :--- | :---: | :---: | :---: | :--- |
948
+ ${databaseMetadata.map((t) => {
949
+ const features = [];
950
+ if (t.has_user_id)
951
+ features.push("User-Scoped");
952
+ if (t.has_created_at)
953
+ features.push("Timestamps");
954
+ return `| \`${t.table_name}\` | ${t.rls_enabled ? "\u2705" : "\u274C"} | ${t.policy_count} | ${t.column_count} | ${features.join(", ") || "-"} |`;
955
+ }).join("\n")}
956
+
957
+ ### \u{1F6E1}\uFE0F Development Rules
958
+ 1. **RLS is MANDATORY:** All tables containing user data must have RLS enabled.
959
+ 2. **Use RPCs for Complex Logic:** Do not put complex business logic in client-side queries.
960
+ 3. **Migrations:** Always use \`supabase/migrations\` for schema changes.`;
961
+ }
962
+ files.push({
963
+ path: ".cursor/rules/rigstate-database.mdc",
964
+ content: (0, mdc_1.wrapMdc)(dbContent, {
965
+ description: "Live database schema, RLS status, and table metadata",
966
+ globs: ["supabase/**/*", "**/*.sql", "**/lib/supabase/**"],
967
+ alwaysApply: databaseMetadata && databaseMetadata.length > 0 ? true : false
968
+ })
969
+ });
970
+ const rigstateSkills = (0, skills_1.getRigstateStandardSkills)();
971
+ for (const skill of rigstateSkills) {
972
+ files.push({
973
+ path: `.agent/skills/${skill.name}/SKILL.md`,
974
+ content: (0, skills_1.generateSkillFileContent)(skill),
975
+ metadata: { description: skill.description }
976
+ });
977
+ }
978
+ }
979
+ return {
980
+ files,
981
+ suggestedIde: ide,
982
+ version: ENGINE_VERSION
983
+ };
984
+ }
985
+ async function fetchLegacyStats2(supabase, projectId) {
986
+ const { data: chunks } = await supabase.from("roadmap_chunks").select("is_legacy").eq("project_id", projectId);
987
+ if (!chunks)
988
+ return { total: 0, legacyCount: 0, activeCount: 0 };
989
+ const legacyCount = (chunks || []).filter((c) => c.is_legacy === true).length;
990
+ const activeCount = (chunks || []).filter((c) => c.is_legacy !== true).length;
991
+ return {
992
+ total: (chunks || []).length,
993
+ legacyCount,
994
+ activeCount
995
+ };
996
+ }
997
+ async function fetchActiveAgents2(supabase) {
998
+ const { data: prompts } = await supabase.from("system_prompts").select("id, key, content, name, display_name, job_title, authority_level, primary_mission, trigger_keywords").eq("include_in_rules", true).eq("is_active", true).order("authority_level", { ascending: false });
999
+ if (!prompts)
1000
+ return [];
1001
+ return (prompts || []).map((p) => ({
1002
+ id: p.id,
1003
+ key: p.key,
1004
+ name: p.display_name || p.name || p.key,
1005
+ job_title: p.job_title || "Specialist Agent",
1006
+ content: p.content,
1007
+ authority_level: (() => {
1008
+ if (p.authority_level === null || p.authority_level === void 0) {
1009
+ throw new Error(`Agent ${p.key} is missing authority_level. Update via CMS.`);
1010
+ }
1011
+ return p.authority_level;
1012
+ })(),
1013
+ primary_mission: p.primary_mission || void 0,
1014
+ trigger_keywords: p.trigger_keywords || void 0
1015
+ }));
1016
+ }
1017
+ function mergeRuleContent(existingContent, newRules) {
1018
+ const startIndex = existingContent.indexOf(RIGSTATE_START2);
1019
+ const endIndex = existingContent.indexOf(RIGSTATE_END2);
1020
+ if (startIndex !== -1 && endIndex !== -1) {
1021
+ const before = existingContent.substring(0, startIndex);
1022
+ const after = existingContent.substring(endIndex + RIGSTATE_END2.length);
1023
+ return before + newRules + after;
1024
+ } else {
1025
+ return existingContent + "\n\n" + newRules;
1026
+ }
1027
+ }
1028
+ async function fetchProjectTechStack2(supabase, projectId, fallbackStack = ["Next.js", "TypeScript", "Supabase", "Tailwind CSS"]) {
1029
+ try {
1030
+ const { data: tags, error } = await supabase.from("project_tech_tags").select("name").eq("project_id", projectId);
1031
+ if (error || !tags || tags.length === 0) {
1032
+ const { data: project } = await supabase.from("projects").select("functional_spec").eq("id", projectId).single();
1033
+ if (project?.functional_spec?.techStack) {
1034
+ return project.functional_spec.techStack;
1035
+ }
1036
+ return fallbackStack;
1037
+ }
1038
+ return tags.map((t) => t.name);
1039
+ } catch (error) {
1040
+ console.warn("fetchProjectTechStack: Failed to fetch, using fallback", error);
1041
+ return fallbackStack;
1042
+ }
1043
+ }
1044
+ function getFileNameForIDE2(ide) {
1045
+ return types_1.IDE_FILE_NAMES[ide] || types_1.IDE_FILE_NAMES.cursor;
1046
+ }
1047
+ }
7
1048
  });
8
1049
 
9
1050
  // src/index.ts
1051
+ init_esm_shims();
10
1052
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
11
1053
 
12
1054
  // src/lib/supabase.ts
1055
+ init_esm_shims();
13
1056
  import { createClient as createSupabaseClient } from "@supabase/supabase-js";
14
1057
  import { createHash } from "crypto";
15
1058
  var PRODUCTION_SUPABASE_URL = "https://gseblsxnfppsxbmtzcfj.supabase.co";
@@ -51,14 +1094,17 @@ async function authenticateApiKey(apiKey) {
51
1094
  }
52
1095
 
53
1096
  // src/server/factory.ts
1097
+ init_esm_shims();
54
1098
  import { Server } from "@modelcontextprotocol/sdk/server/index.js";
55
1099
  import { ListToolsRequestSchema, ListResourcesRequestSchema } from "@modelcontextprotocol/sdk/types.js";
56
1100
 
57
1101
  // src/server/types.ts
1102
+ init_esm_shims();
58
1103
  var SERVER_NAME = "rigstate-mcp";
59
1104
  var SERVER_VERSION = "0.5.0";
60
1105
 
61
1106
  // src/lib/tool-registry.ts
1107
+ init_esm_shims();
62
1108
  import { z } from "zod";
63
1109
  var ToolRegistry = class {
64
1110
  tools = /* @__PURE__ */ new Map();
@@ -67,7 +1113,7 @@ var ToolRegistry = class {
67
1113
  */
68
1114
  register(tool) {
69
1115
  if (this.tools.has(tool.name)) {
70
- console.warn(`Tool '${tool.name}' is already registered. Overwriting.`);
1116
+ console.error(`Tool '${tool.name}' is already registered. Overwriting.`);
71
1117
  }
72
1118
  this.tools.set(tool.name, tool);
73
1119
  }
@@ -133,7 +1179,14 @@ var ToolRegistry = class {
133
1179
  };
134
1180
  var registry = new ToolRegistry();
135
1181
 
1182
+ // src/tools/curator-tools.ts
1183
+ init_esm_shims();
1184
+
1185
+ // src/lib/curator/index.ts
1186
+ init_esm_shims();
1187
+
136
1188
  // src/lib/curator/schemas.ts
1189
+ init_esm_shims();
137
1190
  import { z as z2 } from "zod";
138
1191
  var QueryGlobalAntidotesSchema = z2.object({
139
1192
  categories: z2.array(z2.string()).optional().describe("Filter by categories (SECURITY, ARCHITECTURE, UX, PERFORMANCE, ACCESSIBILITY, MAINTAINABILITY)"),
@@ -165,6 +1218,7 @@ var CheckFortressSchema = z2.object({
165
1218
  });
166
1219
 
167
1220
  // src/lib/curator/actions/query.ts
1221
+ init_esm_shims();
168
1222
  async function queryGlobalAntidotes(supabase, userId, input) {
169
1223
  let query = supabase.from("global_antidotes").select("id, slug, title, instruction, example, anti_example, category, severity, framework_tags, trust_score, occurrence_count, is_immutable").eq("is_active", true);
170
1224
  if (input.categories && input.categories.length > 0) {
@@ -212,6 +1266,7 @@ ${formatted}
212
1266
  }
213
1267
 
214
1268
  // src/lib/curator/actions/submit.ts
1269
+ init_esm_shims();
215
1270
  async function submitSignal(supabase, userId, input) {
216
1271
  const { data: project, error: projectError } = await supabase.from("projects").select("id").eq("id", input.projectId).eq("owner_id", userId).single();
217
1272
  if (projectError || !project) {
@@ -338,6 +1393,7 @@ Sigrid will process this signal and notify you of the result.`
338
1393
  }
339
1394
 
340
1395
  // src/lib/curator/actions/stats.ts
1396
+ init_esm_shims();
341
1397
  async function getCuratorStats(supabase, userId, input) {
342
1398
  const { data: antidotes, count: totalCount } = await supabase.from("global_antidotes").select("id, category, severity, is_immutable", { count: "exact" }).eq("is_active", true);
343
1399
  const fortressCount = antidotes?.filter((a) => a.is_immutable).length || 0;
@@ -380,6 +1436,7 @@ ${Object.entries(categoryDistribution).map(([cat, count]) => ` \u2022 ${cat}:
380
1436
  }
381
1437
 
382
1438
  // src/lib/curator/actions/fortress.ts
1439
+ init_esm_shims();
383
1440
  async function checkFortress(supabase, userId, input) {
384
1441
  const { data: fortressRules } = await supabase.from("global_antidotes").select("slug, title, instruction").eq("is_immutable", true).eq("is_active", true);
385
1442
  if (!fortressRules || fortressRules.length === 0) {
@@ -482,6 +1539,7 @@ Fortress rules can NEVER be overridden by any signal.`,
482
1539
  });
483
1540
 
484
1541
  // src/tools/teacher-mode.ts
1542
+ init_esm_shims();
485
1543
  import { z as z3 } from "zod";
486
1544
  import { v4 as uuidv4 } from "uuid";
487
1545
  var RefineLogicSchema = z3.object({
@@ -625,7 +1683,11 @@ Returns both user-specific and global Rigstate standards.`,
625
1683
  }
626
1684
  });
627
1685
 
1686
+ // src/tools/get-project-context.ts
1687
+ init_esm_shims();
1688
+
628
1689
  // src/lib/context-engine.ts
1690
+ init_esm_shims();
629
1691
  async function injectGlobalContext(supabase, userId, input) {
630
1692
  const searchTags = [
631
1693
  ...input.frameworks.map((f) => f.split(" ")[0].toLowerCase()),
@@ -671,6 +1733,7 @@ async function injectGlobalContext(supabase, userId, input) {
671
1733
  }
672
1734
 
673
1735
  // src/lib/schemas.ts
1736
+ init_esm_shims();
674
1737
  import { z as z4 } from "zod";
675
1738
  var QueryBrainInputSchema = z4.object({
676
1739
  projectId: z4.string().uuid("Invalid project ID"),
@@ -955,6 +2018,7 @@ Description: ${project.description}`);
955
2018
  }
956
2019
 
957
2020
  // src/tools/query-brain.ts
2021
+ init_esm_shims();
958
2022
  registry.register({
959
2023
  name: "query_brain",
960
2024
  description: `Takes a natural language query and performs semantic search
@@ -974,30 +2038,29 @@ architecture rules, decisions, and constraints.`,
974
2038
  }
975
2039
  });
976
2040
  async function generateQueryEmbedding(query) {
977
- const openRouterKey = process.env.OPENROUTER_API_KEY;
978
- const googleKey = process.env.GOOGLE_GENERATIVE_AI_API_KEY;
979
- if (!openRouterKey && !googleKey) {
2041
+ const apiKey = process.env.RIGSTATE_API_KEY;
2042
+ const apiUrl = process.env.RIGSTATE_API_URL || "http://localhost:3000/api/v1";
2043
+ if (!apiKey) {
980
2044
  return null;
981
2045
  }
982
2046
  try {
983
- const { embed } = await import("ai");
984
- if (openRouterKey) {
985
- const { createOpenRouter } = await import("@openrouter/ai-sdk-provider");
986
- const openrouter = createOpenRouter({ apiKey: openRouterKey });
987
- const { embedding } = await embed({
988
- model: openrouter.embedding("google/text-embedding-004"),
989
- value: query.replace(/\n/g, " ")
990
- });
991
- return embedding;
992
- } else {
993
- const { google } = await import("@ai-sdk/google");
994
- const { embedding } = await embed({
995
- model: google.embedding("text-embedding-004"),
996
- value: query.replace(/\n/g, " ")
997
- });
998
- return embedding;
2047
+ const response = await fetch(`${apiUrl}/intelligence/embed`, {
2048
+ method: "POST",
2049
+ headers: {
2050
+ "Content-Type": "application/json",
2051
+ "Authorization": `Bearer ${apiKey}`
2052
+ },
2053
+ body: JSON.stringify({ text: query })
2054
+ });
2055
+ if (!response.ok) {
2056
+ const errorText = await response.text();
2057
+ console.error(`Embedding API error (${response.status}):`, errorText);
2058
+ return null;
999
2059
  }
2060
+ const result = await response.json();
2061
+ return result.data?.embedding || null;
1000
2062
  } catch (error) {
2063
+ console.error("Failed to generate embedding via Proxy:", error);
1001
2064
  return null;
1002
2065
  }
1003
2066
  }
@@ -1037,6 +2100,13 @@ async function queryBrain(supabase, userId, projectId, query, limit = 8, thresho
1037
2100
  createdAt: m.created_at
1038
2101
  }));
1039
2102
  }
2103
+ let relevantFeatures = [];
2104
+ try {
2105
+ const { data: features } = await supabase.from("project_features").select("name, status, description").eq("project_id", projectId).or(`name.ilike.%${query}%,description.ilike.%${query}%`).limit(3);
2106
+ if (features) relevantFeatures = features;
2107
+ } catch (e) {
2108
+ console.warn("Feature fetch failed in brain query", e);
2109
+ }
1040
2110
  const contextLines = memories.map((m) => {
1041
2111
  const voteIndicator = m.netVotes && m.netVotes < 0 ? ` [\u26A0\uFE0F POORLY RATED: ${m.netVotes}]` : "";
1042
2112
  const tagStr = m.tags && m.tags.length > 0 ? ` [${m.tags.join(", ")}]` : "";
@@ -1044,17 +2114,28 @@ async function queryBrain(supabase, userId, projectId, query, limit = 8, thresho
1044
2114
  return `- [${category}]${tagStr}${voteIndicator}: ${m.content}`;
1045
2115
  });
1046
2116
  const searchType = embedding ? "TRIPLE-HYBRID (Vector + FTS + Fuzzy)" : "HYBRID (FTS + Fuzzy)";
1047
- const formatted = memories.length > 0 ? `=== PROJECT BRAIN: RELEVANT MEMORIES ===
2117
+ let formatted = `=== PROJECT BRAIN: RELEVANT MEMORIES ===
1048
2118
  Search Mode: ${searchType}
1049
- Query: "${query}"
2119
+ Query: "${query}"`;
2120
+ if (relevantFeatures.length > 0) {
2121
+ formatted += `
2122
+
2123
+ === RELATED FEATURES ===
2124
+ ` + relevantFeatures.map((f) => `- ${f.name} [${f.status}]`).join("\n");
2125
+ }
2126
+ formatted += `
2127
+
1050
2128
  Found ${memories.length} relevant memories:
1051
2129
 
1052
2130
  ${contextLines.join("\n")}
1053
2131
 
1054
- ==========================================` : `=== PROJECT BRAIN ===
2132
+ ==========================================`;
2133
+ if (memories.length === 0 && relevantFeatures.length === 0) {
2134
+ formatted = `=== PROJECT BRAIN ===
1055
2135
  Query: "${query}"
1056
- No relevant memories found for this query.
2136
+ No relevant memories or features found.
1057
2137
  =======================`;
2138
+ }
1058
2139
  return {
1059
2140
  query,
1060
2141
  memories,
@@ -1063,6 +2144,7 @@ No relevant memories found for this query.
1063
2144
  }
1064
2145
 
1065
2146
  // src/tools/get-latest-decisions.ts
2147
+ init_esm_shims();
1066
2148
  registry.register({
1067
2149
  name: "get_latest_decisions",
1068
2150
  description: `Fetches the most recent ADRs and decisions from The Council,
@@ -1153,6 +2235,7 @@ async function getLatestDecisions(supabase, userId, projectId, limit = 5) {
1153
2235
  }
1154
2236
 
1155
2237
  // src/tools/save-decision.ts
2238
+ init_esm_shims();
1156
2239
  registry.register({
1157
2240
  name: "save_decision",
1158
2241
  description: `Saves a new decision/ADR to the project's brain (project_memories).
@@ -1212,6 +2295,7 @@ async function saveDecision(supabase, userId, projectId, title, decision, ration
1212
2295
  }
1213
2296
 
1214
2297
  // src/tools/submit-idea.ts
2298
+ init_esm_shims();
1215
2299
  registry.register({
1216
2300
  name: "submit_idea",
1217
2301
  description: `Submits a new idea to the Idea Lab (saved_ideas table).
@@ -1265,6 +2349,7 @@ async function submitIdea(supabase, userId, projectId, title, description, categ
1265
2349
  }
1266
2350
 
1267
2351
  // src/tools/update-roadmap.ts
2352
+ init_esm_shims();
1268
2353
  registry.register({
1269
2354
  name: "update_roadmap",
1270
2355
  description: `Updates the status of a roadmap chunk (step).
@@ -1335,6 +2420,7 @@ async function updateRoadmap(supabase, userId, projectId, status, chunkId, title
1335
2420
  }
1336
2421
 
1337
2422
  // src/tools/run-architecture-audit.ts
2423
+ init_esm_shims();
1338
2424
  var VULNERABILITY_PATTERNS = [
1339
2425
  {
1340
2426
  type: "SQL_INJECTION",
@@ -1498,14 +2584,8 @@ ${violations.map((v, i) => `${i + 1}. [${v.severity}] ${v.title}${v.lineNumber ?
1498
2584
  }
1499
2585
 
1500
2586
  // src/tools/sync-ide-rules.ts
1501
- import {
1502
- generateRuleContent,
1503
- generateRuleFiles,
1504
- fetchLegacyStats,
1505
- fetchActiveAgents,
1506
- fetchProjectTechStack,
1507
- getFileNameForIDE
1508
- } from "@rigstate/rules-engine";
2587
+ init_esm_shims();
2588
+ var import_rules_engine = __toESM(require_dist(), 1);
1509
2589
  registry.register({
1510
2590
  name: "sync_ide_rules",
1511
2591
  description: `Generates the appropriate rules file content (e.g. .cursorrules, .windsurfrules)
@@ -1537,14 +2617,14 @@ async function syncIdeRules(supabase, projectId) {
1537
2617
  }
1538
2618
  const ide = project.preferred_ide || "cursor";
1539
2619
  const [stack, roadmapRes, legacyStats, activeAgents, dbMetadataRes] = await Promise.all([
1540
- fetchProjectTechStack(supabase, projectId),
2620
+ (0, import_rules_engine.fetchProjectTechStack)(supabase, projectId),
1541
2621
  supabase.from("roadmap_chunks").select("step_number, title, status, sprint_focus, prompt_content, is_legacy").eq("project_id", projectId),
1542
- fetchLegacyStats(supabase, projectId),
1543
- fetchActiveAgents(supabase),
2622
+ (0, import_rules_engine.fetchLegacyStats)(supabase, projectId),
2623
+ (0, import_rules_engine.fetchActiveAgents)(supabase),
1544
2624
  supabase.rpc("get_table_metadata")
1545
2625
  ]);
1546
2626
  const databaseMetadata = dbMetadataRes.data || [];
1547
- const content = generateRuleContent(
2627
+ const content = (0, import_rules_engine.generateRuleContent)(
1548
2628
  { ...project, id: projectId },
1549
2629
  stack,
1550
2630
  roadmapRes.data || [],
@@ -1552,7 +2632,7 @@ async function syncIdeRules(supabase, projectId) {
1552
2632
  legacyStats,
1553
2633
  activeAgents
1554
2634
  );
1555
- const fileResult = generateRuleFiles(
2635
+ const fileResult = (0, import_rules_engine.generateRuleFiles)(
1556
2636
  { ...project, id: projectId },
1557
2637
  stack,
1558
2638
  roadmapRes.data || [],
@@ -1562,13 +2642,14 @@ async function syncIdeRules(supabase, projectId) {
1562
2642
  databaseMetadata
1563
2643
  );
1564
2644
  return {
1565
- fileName: getFileNameForIDE(ide),
2645
+ fileName: (0, import_rules_engine.getFileNameForIDE)(ide),
1566
2646
  content,
1567
2647
  files: fileResult.files
1568
2648
  };
1569
2649
  }
1570
2650
 
1571
2651
  // src/tools/list-features.ts
2652
+ init_esm_shims();
1572
2653
  import { z as z5 } from "zod";
1573
2654
  var InputSchema = z5.object({
1574
2655
  projectId: z5.string().uuid().describe("The UUID of the Rigstate project")
@@ -1579,31 +2660,48 @@ var listFeaturesTool = {
1579
2660
  Useful for understanding the strategic context and major milestones.`,
1580
2661
  schema: InputSchema,
1581
2662
  handler: async ({ projectId }, { supabase, userId }) => {
1582
- const { data: project, error: projectError } = await supabase.from("projects").select("id").eq("id", projectId).eq("owner_id", userId).single();
2663
+ const { data: project, error: projectError } = await supabase.from("projects").select("id, functional_spec").eq("id", projectId).eq("owner_id", userId).single();
1583
2664
  if (projectError || !project) {
1584
2665
  throw new Error("Project not found or access denied");
1585
2666
  }
1586
- const { data: features, error } = await supabase.from("features").select("id, name, description, priority, status").eq("project_id", projectId).neq("status", "ARCHIVED").order("created_at", { ascending: false });
1587
- if (error) {
1588
- throw new Error(`Failed to fetch features: ${error.message}`);
2667
+ const { data: dbFeatures, error: dbError } = await supabase.from("project_features").select("id, name, description, status").eq("project_id", projectId).neq("status", "shadow").order("created_at", { ascending: false });
2668
+ let featuresList = [];
2669
+ let source = "DB";
2670
+ if (!dbError && dbFeatures && dbFeatures.length > 0) {
2671
+ featuresList = dbFeatures.map((f) => ({
2672
+ ...f,
2673
+ title: f.name
2674
+ // Map back to title specifically for uniform handling below
2675
+ }));
2676
+ } else {
2677
+ source = "FALLBACK_SPEC";
2678
+ console.error(`[WARN] Project ${projectId}: 'project_features' empty or missing. Falling back to 'functional_spec'.`);
2679
+ const spec = project.functional_spec;
2680
+ if (spec && typeof spec === "object" && Array.isArray(spec.features)) {
2681
+ featuresList = spec.features.map((f) => ({
2682
+ id: "legacy",
2683
+ title: f.name || f.title,
2684
+ description: f.description,
2685
+ status: f.status || "proposed"
2686
+ }));
2687
+ }
1589
2688
  }
1590
- const formatted = (features || []).length > 0 ? (features || []).map((f) => {
1591
- const priorityStr = f.priority === "MVP" ? "[MVP] " : "";
1592
- return `- ${priorityStr}${f.name} (${f.status})`;
1593
- }).join("\n") : "No active features found.";
2689
+ if (featuresList.length === 0) {
2690
+ return { content: [{ type: "text", text: "No active features found (checked DB and Spec)." }] };
2691
+ }
2692
+ const formatted = `=== PROJECT FEATURES (Source: ${source}) ===
2693
+ ` + featuresList.map((f) => {
2694
+ return `- ${f.title} [${f.status}]`;
2695
+ }).join("\n");
1594
2696
  return {
1595
- content: [
1596
- {
1597
- type: "text",
1598
- text: formatted
1599
- }
1600
- ]
2697
+ content: [{ type: "text", text: formatted }]
1601
2698
  };
1602
2699
  }
1603
2700
  };
1604
2701
  registry.register(listFeaturesTool);
1605
2702
 
1606
2703
  // src/tools/list-roadmap-tasks.ts
2704
+ init_esm_shims();
1607
2705
  registry.register({
1608
2706
  name: "list_roadmap_tasks",
1609
2707
  description: `Lists all actionable tasks for a project that are not yet COMPLETED.
@@ -1647,6 +2745,7 @@ async function listRoadmapTasks(supabase, userId, projectId) {
1647
2745
  }
1648
2746
 
1649
2747
  // src/tools/get-next-roadmap-step.ts
2748
+ init_esm_shims();
1650
2749
  registry.register({
1651
2750
  name: "get_next_roadmap_step",
1652
2751
  description: `Fetches the next logical step from the roadmap for a project.
@@ -1691,6 +2790,7 @@ async function getNextRoadmapStep(supabase, projectId, currentStepId) {
1691
2790
  }
1692
2791
 
1693
2792
  // src/tools/check-rules-sync.ts
2793
+ init_esm_shims();
1694
2794
  registry.register({
1695
2795
  name: "check_rules_sync",
1696
2796
  description: `Verifies if the IDE rules are present and belong to the correct project.`,
@@ -1763,9 +2863,13 @@ ${SAFETY_CACHE_RULES}`
1763
2863
  };
1764
2864
  }
1765
2865
 
2866
+ // src/tools/audit-integrity-gate.ts
2867
+ init_esm_shims();
2868
+
1766
2869
  // src/tools/analyze-database-performance.ts
2870
+ init_esm_shims();
1767
2871
  import fs from "fs/promises";
1768
- import path from "path";
2872
+ import path2 from "path";
1769
2873
  async function analyzeDatabasePerformance(supabase, input) {
1770
2874
  const issues = [];
1771
2875
  const { data: rawMetadata, error } = await supabase.rpc("get_table_metadata", {
@@ -1831,7 +2935,7 @@ async function analyzeDatabasePerformance(supabase, input) {
1831
2935
  }
1832
2936
  }
1833
2937
  } catch (e) {
1834
- console.warn(`Skipping file ${filePath}: ${e}`);
2938
+ console.error(`Skipping file ${filePath}: ${e}`);
1835
2939
  }
1836
2940
  }
1837
2941
  const highSev = issues.filter((i) => i.severity === "HIGH").length;
@@ -1849,7 +2953,7 @@ async function analyzeDatabasePerformance(supabase, input) {
1849
2953
  summary += `### \u{1F6A8} Critical Findings
1850
2954
  `;
1851
2955
  issues.filter((i) => i.severity === "HIGH").forEach((issue) => {
1852
- summary += `- **${issue.type}** in \`${path.basename(issue.file)}:${issue.line}\`
2956
+ summary += `- **${issue.type}** in \`${path2.basename(issue.file)}:${issue.line}\`
1853
2957
  `;
1854
2958
  summary += ` - ${issue.description}
1855
2959
  `;
@@ -1860,7 +2964,11 @@ async function analyzeDatabasePerformance(supabase, input) {
1860
2964
  return { issues, summary };
1861
2965
  }
1862
2966
 
2967
+ // src/tools/security-tools.ts
2968
+ init_esm_shims();
2969
+
1863
2970
  // src/tools/security-checks.ts
2971
+ init_esm_shims();
1864
2972
  function checkSqlInjection(content) {
1865
2973
  const sqlKeywords = ["from", "select", "insert", "update", "delete", "rpc", "execute", "query"];
1866
2974
  const hasSqlKeywords = sqlKeywords.some((kw) => content.toLowerCase().includes(kw));
@@ -2054,6 +3162,59 @@ function checkAntiLazy(filePath, content) {
2054
3162
  return violations;
2055
3163
  }
2056
3164
 
3165
+ // src/tools/security-checks-arch.ts
3166
+ init_esm_shims();
3167
+ function checkArchitectureIntegrity(filePath, content) {
3168
+ const violations = [];
3169
+ const isUI = filePath.includes("/components/") || filePath.includes("/hooks/") || filePath.includes("/app/") && !filePath.includes("/api/") && !filePath.includes("actions.ts") && !filePath.includes("route.ts");
3170
+ if (isUI) {
3171
+ const illegalImportRegex = /(import.*from\s+['"]@supabase\/supabase-js['"])/g;
3172
+ if (illegalImportRegex.test(content)) {
3173
+ violations.push({
3174
+ id: "SEC-ARCH-01",
3175
+ type: "ARCHITECTURE_VIOLATION",
3176
+ severity: "FATAL",
3177
+ title: "Illegal Supabase Client in UI",
3178
+ description: "Direct import of @supabase/supabase-js in a UI component/hook is strictly forbidden. It bypasses the server boundary.",
3179
+ recommendation: "Use the `createClient` helper from our utils or Server Actions for data access."
3180
+ });
3181
+ }
3182
+ }
3183
+ if (isUI) {
3184
+ const dbActionRegex = /\.(from|select|insert|update|delete|rpc)\s*\(/g;
3185
+ const matches = content.match(dbActionRegex);
3186
+ if (matches) {
3187
+ const suspicious = matches.some((m) => !m.includes(".from") || m.includes(".from") && content.includes("supabase.from"));
3188
+ if (suspicious || matches.length > 0 && content.includes("supabase")) {
3189
+ violations.push({
3190
+ id: "SEC-ARCH-02",
3191
+ type: "ARCHITECTURE_VIOLATION",
3192
+ severity: "FATAL",
3193
+ title: "Direct Database Query in UI",
3194
+ description: "Detected direct database query pattern (select/insert/update/delete) in a UI component. This exposes logic to the client.",
3195
+ recommendation: "Move all data fetching logic to Server Components or Server Actions."
3196
+ });
3197
+ }
3198
+ }
3199
+ }
3200
+ const isActionFile = filePath.includes("/actions/") || filePath.includes("actions.ts");
3201
+ if (isActionFile) {
3202
+ const header = content.slice(0, 200);
3203
+ const hasUseServer = /['"]use server['"]/.test(header);
3204
+ if (!hasUseServer) {
3205
+ violations.push({
3206
+ id: "SEC-ARCH-03",
3207
+ type: "ARCHITECTURE_VIOLATION",
3208
+ severity: "FATAL",
3209
+ title: 'Missing "use server" Directive',
3210
+ description: 'File appears to be a Server Action module but lacks the "use server" directive.',
3211
+ recommendation: 'Add "use server" at the very top of the file.'
3212
+ });
3213
+ }
3214
+ }
3215
+ return violations;
3216
+ }
3217
+
2057
3218
  // src/tools/security-tools.ts
2058
3219
  registry.register({
2059
3220
  name: "audit_rls_status",
@@ -2141,6 +3302,8 @@ async function auditSecurityIntegrity(supabase, input) {
2141
3302
  if (depViolation) violations.push(depViolation);
2142
3303
  const lazyViolations = checkAntiLazy(filePath, content);
2143
3304
  violations.push(...lazyViolations);
3305
+ const archViolations = checkArchitectureIntegrity(filePath, content);
3306
+ violations.push(...archViolations);
2144
3307
  const score = Math.max(0, 100 - violations.length * 10);
2145
3308
  const passed = !violations.some((v) => v.severity === "HIGH" || v.severity === "FATAL");
2146
3309
  return {
@@ -2194,13 +3357,13 @@ async function runAuditIntegrityGate(supabase, input) {
2194
3357
  });
2195
3358
  }
2196
3359
  if (input.filePaths && input.filePaths.length > 0) {
2197
- for (const path3 of input.filePaths) {
3360
+ for (const path4 of input.filePaths) {
2198
3361
  try {
2199
3362
  const fs3 = await import("fs/promises");
2200
- const content = await fs3.readFile(path3, "utf-8");
3363
+ const content = await fs3.readFile(path4, "utf-8");
2201
3364
  const securityResult = await auditSecurityIntegrity(supabase, {
2202
3365
  projectId: input.projectId,
2203
- filePath: path3,
3366
+ filePath: path4,
2204
3367
  content
2205
3368
  });
2206
3369
  if (!securityResult.passed) {
@@ -2208,21 +3371,21 @@ async function runAuditIntegrityGate(supabase, input) {
2208
3371
  checks.push({
2209
3372
  check: "FORTRESS_MATRIX",
2210
3373
  status: "FAIL",
2211
- message: `Fortress Violations in ${path3.split("/").pop()}`,
3374
+ message: `Fortress Violations in ${path4.split("/").pop()}`,
2212
3375
  details: securityResult.violations
2213
3376
  });
2214
3377
  } else {
2215
3378
  checks.push({
2216
3379
  check: "FORTRESS_MATRIX",
2217
3380
  status: "PASS",
2218
- message: `Fortress Secured: ${path3.split("/").pop()}`
3381
+ message: `Fortress Secured: ${path4.split("/").pop()}`
2219
3382
  });
2220
3383
  }
2221
3384
  } catch (e) {
2222
3385
  checks.push({
2223
3386
  check: "FORTRESS_MATRIX",
2224
3387
  status: "WARN",
2225
- message: `Failed to audit ${path3}: ${e.message}`
3388
+ message: `Failed to audit ${path4}: ${e.message}`
2226
3389
  });
2227
3390
  }
2228
3391
  }
@@ -2280,6 +3443,7 @@ async function runAuditIntegrityGate(supabase, input) {
2280
3443
  }
2281
3444
 
2282
3445
  // src/tools/complete-roadmap-task.ts
3446
+ init_esm_shims();
2283
3447
  registry.register({
2284
3448
  name: "complete_roadmap_task",
2285
3449
  description: `Finalizes a roadmap task. Generates a Release Manifest and triggers the Sovereign Harvesting protocol.`,
@@ -2341,7 +3505,7 @@ async function completeRoadmapTask(supabase, projectId, summary, taskId, gitDiff
2341
3505
  metadata
2342
3506
  });
2343
3507
  if (reportError) {
2344
- console.warn("Failed to save mission report:", reportError.message);
3508
+ console.error("Failed to save mission report:", reportError.message);
2345
3509
  }
2346
3510
  try {
2347
3511
  const apiKey = process.env.RIGSTATE_API_KEY;
@@ -2373,6 +3537,7 @@ async function completeRoadmapTask(supabase, projectId, summary, taskId, gitDiff
2373
3537
  }
2374
3538
 
2375
3539
  // src/tools/planning-tools.ts
3540
+ init_esm_shims();
2376
3541
  registry.register({
2377
3542
  name: "save_to_project_brain",
2378
3543
  description: `Maja's Tool: Persists knowledge, decisions, and lessons learned to the Project Brain.`,
@@ -2460,8 +3625,9 @@ async function addRoadmapChunk(supabase, userId, input) {
2460
3625
  }
2461
3626
 
2462
3627
  // src/tools/arch-tools.ts
2463
- import { promises as fs2 } from "fs";
2464
- import * as path2 from "path";
3628
+ init_esm_shims();
3629
+ import { promises as fs2, existsSync, statSync } from "fs";
3630
+ import * as path3 from "path";
2465
3631
  registry.register({
2466
3632
  name: "analyze_dependency_graph",
2467
3633
  description: `Einar's Tool: Architecture Integrity Scanner. Scans the codebase for circular dependencies and structural violations.`,
@@ -2472,7 +3638,7 @@ registry.register({
2472
3638
  }
2473
3639
  });
2474
3640
  async function analyzeDependencyGraph(input) {
2475
- const searchPath = path2.isAbsolute(input.path) ? input.path : path2.resolve(process.cwd(), input.path);
3641
+ const searchPath = path3.isAbsolute(input.path) ? input.path : path3.resolve(process.cwd(), input.path);
2476
3642
  try {
2477
3643
  await fs2.access(searchPath);
2478
3644
  } catch {
@@ -2480,22 +3646,38 @@ async function analyzeDependencyGraph(input) {
2480
3646
  error: `Directory not found: ${searchPath}. Ensure you are running the MCP server in the project root or provide an absolute path.`
2481
3647
  };
2482
3648
  }
3649
+ let externalDeps = {};
3650
+ const pkgPath = path3.join(process.cwd(), "package.json");
3651
+ if (existsSync(pkgPath)) {
3652
+ try {
3653
+ const pkgContent = await fs2.readFile(pkgPath, "utf-8");
3654
+ const pkg = JSON.parse(pkgContent);
3655
+ externalDeps = { ...pkg.dependencies, ...pkg.devDependencies };
3656
+ } catch (e) {
3657
+ console.error("Failed to parse package.json", e);
3658
+ }
3659
+ }
2483
3660
  const allFiles = await getAllFiles(searchPath);
2484
- const tsFiles = allFiles.filter((f) => /\.(ts|tsx|js|jsx)$/.test(f) && !f.includes("node_modules") && !f.includes(".next") && !f.includes("dist"));
3661
+ const tsFiles = allFiles.filter((f) => /\.(ts|tsx|js|jsx)$/.test(f) && !f.includes("node_modules") && !f.includes("dist") && !f.includes(".next"));
2485
3662
  const graph = {};
2486
- const fileSet = new Set(tsFiles);
2487
3663
  for (const file of tsFiles) {
2488
3664
  const content = await fs2.readFile(file, "utf-8");
2489
3665
  const imports = extractImports(content);
2490
3666
  const validDeps = [];
3667
+ const fileDir = path3.dirname(file);
2491
3668
  for (const imp of imports) {
2492
- const resolved = resolveImport(file, imp, searchPath);
2493
- if (resolved && fileSet.has(resolved)) {
2494
- validDeps.push(resolved);
3669
+ if (Object.keys(externalDeps).some((d) => imp === d || imp.startsWith(d + "/"))) {
3670
+ continue;
3671
+ }
3672
+ if (imp.startsWith(".") || imp.startsWith("@/")) {
3673
+ const resolved = resolveImportString(file, imp, searchPath);
3674
+ if (resolved && tsFiles.includes(resolved)) {
3675
+ validDeps.push(path3.relative(searchPath, resolved));
3676
+ }
2495
3677
  }
2496
3678
  }
2497
- const relFile = path2.relative(searchPath, file);
2498
- graph[relFile] = validDeps.map((d) => path2.relative(searchPath, d));
3679
+ const relFile = path3.relative(searchPath, file);
3680
+ graph[relFile] = validDeps;
2499
3681
  }
2500
3682
  const cycles = detectCycles(graph);
2501
3683
  return {
@@ -2503,17 +3685,18 @@ async function analyzeDependencyGraph(input) {
2503
3685
  analyzedPath: searchPath,
2504
3686
  metrics: {
2505
3687
  totalFiles: tsFiles.length,
2506
- circularDependencies: cycles.length
3688
+ circularDependencies: cycles.length,
3689
+ externalDependencies: Object.keys(externalDeps).length
2507
3690
  },
2508
3691
  cycles,
2509
3692
  status: cycles.length > 0 ? "VIOLATION" : "PASS",
2510
- summary: cycles.length > 0 ? `FAILED. Detected ${cycles.length} circular dependencies. These must be resolved to maintain architectural integrity.` : `PASSED. No circular dependencies detected in ${tsFiles.length} files.`
3693
+ summary: cycles.length > 0 ? `FAILED. Detected ${cycles.length} circular dependencies. Einar demands resolution!` : `PASSED. Architecture is sound. No circular dependencies in ${tsFiles.length} files.`
2511
3694
  };
2512
3695
  }
2513
3696
  async function getAllFiles(dir) {
2514
3697
  const entries = await fs2.readdir(dir, { withFileTypes: true });
2515
3698
  const files = await Promise.all(entries.map(async (entry) => {
2516
- const res = path2.resolve(dir, entry.name);
3699
+ const res = path3.resolve(dir, entry.name);
2517
3700
  return entry.isDirectory() ? getAllFiles(res) : res;
2518
3701
  }));
2519
3702
  return files.flat();
@@ -2527,47 +3710,47 @@ function extractImports(content) {
2527
3710
  }
2528
3711
  return imports;
2529
3712
  }
2530
- function resolveImport(importer, importPath, root) {
2531
- if (!importPath.startsWith(".") && !importPath.startsWith("@/")) {
2532
- return null;
2533
- }
2534
- let searchDir = path2.dirname(importer);
3713
+ function resolveImportString(importer, importPath, root) {
3714
+ let targetDir = path3.dirname(importer);
2535
3715
  let target = importPath;
2536
3716
  if (importPath.startsWith("@/")) {
2537
3717
  target = importPath.replace("@/", "");
2538
- searchDir = root;
3718
+ targetDir = root;
2539
3719
  }
2540
- const startPath = path2.resolve(searchDir, target);
2541
- const extensions = [".ts", ".tsx", ".js", ".jsx", "/index.ts", "/index.tsx", "/index.js", ""];
3720
+ const naivePath = path3.resolve(targetDir, target);
3721
+ const extensions = [".ts", ".tsx", ".js", ".jsx", "/index.ts", "/index.tsx"];
2542
3722
  for (const ext of extensions) {
2543
- const candidate = startPath + ext;
2544
- if (__require("fs").existsSync(candidate) && !__require("fs").statSync(candidate).isDirectory()) {
3723
+ const candidate = naivePath + ext;
3724
+ if (existsSync(candidate) && !statSync(candidate).isDirectory()) {
2545
3725
  return candidate;
2546
3726
  }
2547
3727
  }
3728
+ if (existsSync(naivePath) && !statSync(naivePath).isDirectory()) {
3729
+ return naivePath;
3730
+ }
2548
3731
  return null;
2549
3732
  }
2550
3733
  function detectCycles(graph) {
2551
3734
  const visited = /* @__PURE__ */ new Set();
2552
3735
  const recursionStack = /* @__PURE__ */ new Set();
2553
3736
  const cycles = [];
2554
- function dfs(node, path3) {
3737
+ function dfs(node, path4) {
2555
3738
  visited.add(node);
2556
3739
  recursionStack.add(node);
2557
- path3.push(node);
3740
+ path4.push(node);
2558
3741
  const deps = graph[node] || [];
2559
3742
  for (const dep of deps) {
2560
3743
  if (!visited.has(dep)) {
2561
- dfs(dep, path3);
3744
+ dfs(dep, path4);
2562
3745
  } else if (recursionStack.has(dep)) {
2563
- const cycleStart = path3.indexOf(dep);
3746
+ const cycleStart = path4.indexOf(dep);
2564
3747
  if (cycleStart !== -1) {
2565
- cycles.push([...path3.slice(cycleStart), dep]);
3748
+ cycles.push([...path4.slice(cycleStart), dep]);
2566
3749
  }
2567
3750
  }
2568
3751
  }
2569
3752
  recursionStack.delete(node);
2570
- path3.pop();
3753
+ path4.pop();
2571
3754
  }
2572
3755
  for (const node of Object.keys(graph)) {
2573
3756
  if (!visited.has(node)) {
@@ -2623,9 +3806,17 @@ function createMcpServer() {
2623
3806
  }
2624
3807
 
2625
3808
  // src/server/core.ts
3809
+ init_esm_shims();
2626
3810
  import { CallToolRequestSchema, McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
2627
3811
 
3812
+ // src/server/telemetry.ts
3813
+ init_esm_shims();
3814
+
3815
+ // src/tools/generate-professional-pdf.ts
3816
+ init_esm_shims();
3817
+
2628
3818
  // src/agents/the-scribe.ts
3819
+ init_esm_shims();
2629
3820
  async function getScribePersona(supabase) {
2630
3821
  const { data: persona, error } = await supabase.from("agent_personas").select("*").eq("slug", "the-scribe").single();
2631
3822
  if (error || !persona) {