@vibekiln/cutline-mcp-cli 0.5.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -135,6 +135,19 @@ cutline-mcp serve output # Export and rendering
135
135
  cutline-mcp serve integrations # External integrations
136
136
  ```
137
137
 
138
+ HTTP bridge mode (for registries/hosts that require an HTTPS MCP URL):
139
+
140
+ ```bash
141
+ cutline-mcp serve constraints --http --host 0.0.0.0 --port 8080 --path /mcp
142
+ # Health: GET /health
143
+ # MCP endpoint: POST /mcp
144
+ ```
145
+
146
+ Bridge notes:
147
+ - Default mode remains stdio (no behavior change for Cursor/Claude Desktop local configs).
148
+ - The bridge forwards JSON-RPC requests to the bundled stdio server process.
149
+ - Batch JSON-RPC payloads are not supported by the bridge.
150
+
138
151
  ### `upgrade`
139
152
 
140
153
  Open the upgrade page and refresh your session.
@@ -206,6 +219,10 @@ Config: [`server.json`](./server.json)
206
219
 
207
220
  Config: [`smithery.yaml`](./smithery.yaml) with [`Dockerfile`](./Dockerfile)
208
221
 
222
+ If the publish UI requires an MCP Server URL, deploy the bridge and provide your public HTTPS endpoint, for example:
223
+
224
+ `https://mcp.thecutline.ai/mcp`
225
+
209
226
  ### Claude Desktop Extension
210
227
 
211
228
  ```bash
@@ -1 +1,8 @@
1
- export declare function serveCommand(serverName: string): void;
1
+ interface ServeOptions {
2
+ http?: boolean;
3
+ host?: string;
4
+ port?: string;
5
+ path?: string;
6
+ }
7
+ export declare function serveCommand(serverName: string, options?: ServeOptions): void;
8
+ export {};
@@ -1,4 +1,5 @@
1
- import { execFileSync } from 'node:child_process';
1
+ import { execFileSync, spawn } from 'node:child_process';
2
+ import { createServer } from 'node:http';
2
3
  import { resolve, dirname } from 'node:path';
3
4
  import { fileURLToPath } from 'node:url';
4
5
  import { existsSync } from 'node:fs';
@@ -11,7 +12,174 @@ const SERVER_MAP = {
11
12
  output: 'output-server.js',
12
13
  integrations: 'integrations-server.js',
13
14
  };
14
- export function serveCommand(serverName) {
15
+ function readJsonBody(req) {
16
+ return new Promise((resolveBody, rejectBody) => {
17
+ const chunks = [];
18
+ let total = 0;
19
+ const MAX_BODY_BYTES = 1024 * 1024; // 1MB safety cap
20
+ req.on('data', (chunk) => {
21
+ total += chunk.length;
22
+ if (total > MAX_BODY_BYTES) {
23
+ rejectBody(new Error('Request body too large'));
24
+ req.destroy();
25
+ return;
26
+ }
27
+ chunks.push(chunk);
28
+ });
29
+ req.on('end', () => {
30
+ try {
31
+ const text = Buffer.concat(chunks).toString('utf8');
32
+ resolveBody(text ? JSON.parse(text) : {});
33
+ }
34
+ catch {
35
+ rejectBody(new Error('Invalid JSON body'));
36
+ }
37
+ });
38
+ req.on('error', rejectBody);
39
+ });
40
+ }
41
+ function writeJson(res, statusCode, body) {
42
+ const payload = JSON.stringify(body);
43
+ res.writeHead(statusCode, {
44
+ 'Content-Type': 'application/json',
45
+ 'Content-Length': Buffer.byteLength(payload),
46
+ });
47
+ res.end(payload);
48
+ }
49
+ function normalizeId(id) {
50
+ if (typeof id === 'string' || typeof id === 'number')
51
+ return String(id);
52
+ return '';
53
+ }
54
+ function serveHttpBridge(serverName, serverPath, opts) {
55
+ const host = opts.host || process.env.CUTLINE_MCP_HTTP_HOST || '0.0.0.0';
56
+ const port = Number(opts.port || process.env.CUTLINE_MCP_HTTP_PORT || '8080');
57
+ const mcpPath = opts.path || process.env.CUTLINE_MCP_HTTP_PATH || '/mcp';
58
+ const requestTimeoutMs = Number(process.env.CUTLINE_MCP_HTTP_TIMEOUT_MS || '30000');
59
+ if (!Number.isFinite(port) || port <= 0) {
60
+ console.error(`Invalid --port value: ${opts.port}`);
61
+ process.exit(1);
62
+ }
63
+ const child = spawn(process.execPath, [serverPath], {
64
+ stdio: ['pipe', 'pipe', 'inherit'],
65
+ env: process.env,
66
+ });
67
+ const pending = new Map();
68
+ let stdoutBuffer = Buffer.alloc(0);
69
+ const failAllPending = (reason) => {
70
+ for (const [key, entry] of pending.entries()) {
71
+ clearTimeout(entry.timer);
72
+ entry.reject(reason);
73
+ pending.delete(key);
74
+ }
75
+ };
76
+ const sendToStdioServer = (message) => {
77
+ if (!child.stdin || child.killed) {
78
+ throw new Error('MCP stdio server is not available');
79
+ }
80
+ const body = JSON.stringify(message);
81
+ const packet = `Content-Length: ${Buffer.byteLength(body, 'utf8')}\r\n\r\n${body}`;
82
+ child.stdin.write(packet, 'utf8');
83
+ };
84
+ child.stdout?.on('data', (chunk) => {
85
+ stdoutBuffer = Buffer.concat([stdoutBuffer, chunk]);
86
+ while (true) {
87
+ const separatorIndex = stdoutBuffer.indexOf('\r\n\r\n');
88
+ if (separatorIndex < 0)
89
+ break;
90
+ const headers = stdoutBuffer.slice(0, separatorIndex).toString('utf8');
91
+ const lengthMatch = headers.match(/content-length:\s*(\d+)/i);
92
+ if (!lengthMatch) {
93
+ stdoutBuffer = stdoutBuffer.slice(separatorIndex + 4);
94
+ continue;
95
+ }
96
+ const contentLength = Number(lengthMatch[1]);
97
+ const packetLength = separatorIndex + 4 + contentLength;
98
+ if (stdoutBuffer.length < packetLength)
99
+ break;
100
+ const jsonBytes = stdoutBuffer.slice(separatorIndex + 4, packetLength);
101
+ stdoutBuffer = stdoutBuffer.slice(packetLength);
102
+ try {
103
+ const message = JSON.parse(jsonBytes.toString('utf8'));
104
+ const id = normalizeId(message?.id);
105
+ if (!id)
106
+ continue;
107
+ const entry = pending.get(id);
108
+ if (!entry)
109
+ continue;
110
+ clearTimeout(entry.timer);
111
+ pending.delete(id);
112
+ entry.resolve(message);
113
+ }
114
+ catch {
115
+ // Ignore malformed child output and keep processing subsequent frames.
116
+ }
117
+ }
118
+ });
119
+ child.on('exit', (code, signal) => {
120
+ failAllPending(new Error(`MCP stdio server exited unexpectedly (${signal ? `signal ${signal}` : `code ${code ?? 'unknown'}`})`));
121
+ });
122
+ const httpServer = createServer(async (req, res) => {
123
+ const { method } = req;
124
+ const reqPath = (req.url || '/').split('?')[0];
125
+ if (method === 'GET' && reqPath === '/health') {
126
+ writeJson(res, 200, {
127
+ ok: true,
128
+ server: serverName,
129
+ transport: 'http-bridge-stdio',
130
+ mcp_path: mcpPath,
131
+ });
132
+ return;
133
+ }
134
+ if (method !== 'POST' || reqPath !== mcpPath) {
135
+ writeJson(res, 404, { error: `Not found. Use POST ${mcpPath}` });
136
+ return;
137
+ }
138
+ try {
139
+ const body = await readJsonBody(req);
140
+ if (Array.isArray(body)) {
141
+ writeJson(res, 400, { error: 'Batch JSON-RPC is not supported by this bridge' });
142
+ return;
143
+ }
144
+ const message = body;
145
+ const id = normalizeId(message?.id);
146
+ // Notifications do not have an id and do not expect a response.
147
+ if (!id) {
148
+ sendToStdioServer(message);
149
+ writeJson(res, 202, { ok: true });
150
+ return;
151
+ }
152
+ const response = await new Promise((resolveResponse, rejectResponse) => {
153
+ const timer = setTimeout(() => {
154
+ pending.delete(id);
155
+ rejectResponse(new Error(`Timed out waiting for response id=${id}`));
156
+ }, requestTimeoutMs);
157
+ pending.set(id, {
158
+ resolve: resolveResponse,
159
+ reject: rejectResponse,
160
+ timer,
161
+ });
162
+ try {
163
+ sendToStdioServer(message);
164
+ }
165
+ catch (error) {
166
+ clearTimeout(timer);
167
+ pending.delete(id);
168
+ rejectResponse(error);
169
+ }
170
+ });
171
+ writeJson(res, 200, response);
172
+ }
173
+ catch (error) {
174
+ writeJson(res, 500, { error: error?.message || 'Bridge request failed' });
175
+ }
176
+ });
177
+ httpServer.listen(port, host, () => {
178
+ console.error(`Cutline MCP HTTP bridge listening on http://${host}:${port}${mcpPath}`);
179
+ console.error(`Health check: http://${host}:${port}/health`);
180
+ });
181
+ }
182
+ export function serveCommand(serverName, options = {}) {
15
183
  const fileName = SERVER_MAP[serverName];
16
184
  if (!fileName) {
17
185
  const valid = Object.keys(SERVER_MAP).join(', ');
@@ -24,8 +192,11 @@ export function serveCommand(serverName) {
24
192
  console.error('The package may not have been built correctly.');
25
193
  process.exit(1);
26
194
  }
27
- // Replace this process with the MCP server.
28
- // MCP servers use stdio transport, so we need to keep stdin/stdout connected.
195
+ if (options.http) {
196
+ serveHttpBridge(serverName, serverPath, options);
197
+ return;
198
+ }
199
+ // Replace this process with the MCP server (default stdio mode).
29
200
  try {
30
201
  execFileSync(process.execPath, [serverPath], {
31
202
  stdio: 'inherit',
package/dist/index.js CHANGED
@@ -47,7 +47,16 @@ program
47
47
  program
48
48
  .command('serve <server>')
49
49
  .description('Start an MCP server (constraints, premortem, exploration, tools, output, integrations)')
50
- .action(serveCommand);
50
+ .option('--http', 'Expose the selected stdio server over an HTTP bridge')
51
+ .option('--host <host>', 'HTTP bind host for bridge mode (default: 0.0.0.0)')
52
+ .option('--port <port>', 'HTTP port for bridge mode (default: 8080)')
53
+ .option('--path <path>', 'HTTP MCP path for bridge mode (default: /mcp)')
54
+ .action((server, opts) => serveCommand(server, {
55
+ http: opts.http,
56
+ host: opts.host,
57
+ port: opts.port,
58
+ path: opts.path,
59
+ }));
51
60
  program
52
61
  .command('setup')
53
62
  .description('One-command onboarding: authenticate, write IDE MCP config, generate rules')
@@ -7395,6 +7395,18 @@ function mcpAudit(entry) {
7395
7395
  }));
7396
7396
  }
7397
7397
  var DEFAULT_MODEL = process.env.MODEL_ID || "gemini-2.5-pro";
7398
+ var GOVERNANCE_ENFORCEMENT = (process.env.CUTLINE_GOVERNANCE_ENFORCEMENT || "advisory").toLowerCase() === "enforced";
7399
+ function buildGovernanceEnvelope(input) {
7400
+ return {
7401
+ decision: input.decision,
7402
+ decisionReasons: input.reasons ?? [],
7403
+ requiredActions: input.requiredActions ?? [],
7404
+ executionPermission: input.executionPermission ?? "conditional",
7405
+ ...input.closePermission ? { closePermission: input.closePermission } : {},
7406
+ ...input.requiredBranches?.length ? { requiredBranches: input.requiredBranches } : {},
7407
+ ...input.mode ? { mode: input.mode } : {}
7408
+ };
7409
+ }
7398
7410
  var generateStructuredContent = (options) => withLlmMonitor(options.modelId || DEFAULT_MODEL, () => cfGenerateStructuredContent(options));
7399
7411
  var explorationSessions = /* @__PURE__ */ new Map();
7400
7412
  function generateId(prefix = "id") {
@@ -8003,6 +8015,11 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
8003
8015
  max_constraints: { type: "number", description: "Max constraints to return (default: 5)" },
8004
8016
  use_semantic: { type: "boolean", description: "Use semantic search if embeddings available (default: true)" },
8005
8017
  phase: { type: "string", enum: ["test_spec", "functional", "security", "performance", "economics", "full", "auto"], description: "RGR phase filter. 'auto' uses complexity heuristic. Default: 'full'" },
8018
+ requestedOutcome: {
8019
+ type: "string",
8020
+ enum: ["approve", "revise", "execute", "close"],
8021
+ description: "Governance intent for this transition (default: approve)"
8022
+ },
8006
8023
  auto_scope_expand: { type: "boolean", description: "If true, auto-seed a new graph entity when scope expansion intent is confidently detected and entity fields are provided." },
8007
8024
  scope_entity_name: { type: "string", description: "Optional explicit entity name to seed during scope expansion (for example 'Vibe Check Extension')." },
8008
8025
  scope_entity_type: { type: "string", enum: ["feature", "component", "data_type"], description: "Entity type for auto scope expansion." },
@@ -8223,7 +8240,12 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
8223
8240
  properties: {
8224
8241
  product_id: { type: "string", description: "Product ID" },
8225
8242
  file_path: { type: "string", description: "File path to assess (e.g., 'src/api/upload.ts')" },
8226
- depth: { type: "number", description: "BFS traversal depth for subgraph extraction (default: 2)" }
8243
+ depth: { type: "number", description: "BFS traversal depth for subgraph extraction (default: 2)" },
8244
+ requestedOutcome: {
8245
+ type: "string",
8246
+ enum: ["approve", "revise", "execute", "close"],
8247
+ description: "Governance intent for this transition (default: approve)"
8248
+ }
8227
8249
  },
8228
8250
  required: ["product_id", "file_path"]
8229
8251
  }
@@ -8240,6 +8262,39 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
8240
8262
  required: ["product_id", "phase"]
8241
8263
  }
8242
8264
  },
8265
+ {
8266
+ name: "request_execution_gate",
8267
+ description: "\u{1F512} PREMIUM - Governance checkpoint before downstream actions (code generation, file edits, migrations, deploy). Uses RGR completion, audit signals, and constraint acknowledgments to return approved/conditional/blocked.",
8268
+ inputSchema: {
8269
+ type: "object",
8270
+ properties: {
8271
+ product_id: { type: "string", description: "Product ID" },
8272
+ target_action: { type: "string", enum: ["generate_code", "modify_files", "run_migration", "deploy"], description: "Planned downstream action" },
8273
+ target_artifacts: { type: "array", items: { type: "string" }, description: "Files/modules the action will touch" },
8274
+ based_on_step_ids: { type: "array", items: { type: "string" }, description: "Optional planning step IDs this execution is based on" },
8275
+ open_critical_findings: { type: "number", description: "Count of unresolved critical/high findings from latest audit context" },
8276
+ touched_sensitive_paths: { type: "array", items: { type: "string" }, description: "Sensitive files touched in this action (auth, billing, security, AI, integrations, user flows)" },
8277
+ constraints_acknowledged: { type: "boolean", description: "Whether relevant constraints have been reviewed and acknowledged for touched sensitive paths" }
8278
+ },
8279
+ required: ["product_id", "target_action", "target_artifacts"]
8280
+ }
8281
+ },
8282
+ {
8283
+ name: "submit_done_claim",
8284
+ description: "\u{1F512} PREMIUM - Governance close gate. Evaluates completion claims against RGR state, unresolved critical findings, and provided evidence references.",
8285
+ inputSchema: {
8286
+ type: "object",
8287
+ properties: {
8288
+ product_id: { type: "string", description: "Product ID" },
8289
+ completion_summary: { type: "string", description: "What was completed" },
8290
+ claimed_criteria_satisfied: { type: "array", items: { type: "string" }, description: "Success criteria the agent claims are satisfied" },
8291
+ known_limitations: { type: "array", items: { type: "string" }, description: "Known limitations or intentional exclusions" },
8292
+ evidence_refs: { type: "array", items: { type: "string" }, description: "Evidence links/refs (tests, reports, docs, PRs)" },
8293
+ open_critical_findings: { type: "number", description: "Count of unresolved critical/high findings from latest audit context" }
8294
+ },
8295
+ required: ["product_id", "completion_summary", "claimed_criteria_satisfied"]
8296
+ }
8297
+ },
8243
8298
  {
8244
8299
  name: "export_readiness_badge",
8245
8300
  description: "\u{1F512} PREMIUM - Export a public readiness badge and verification page for this product. Computes an architecture readiness grade from the constraint graph, stores a public snapshot, and returns embeddable badge snippets (Markdown for GitHub README, HTML for website footer) plus a full CUTLINE-REPORT.md. The verification page at thecutline.ai/verify/{id} provides SEO backlinks.",
@@ -9182,7 +9237,7 @@ Meta: ${JSON.stringify(output.meta)}` }
9182
9237
  return void 0;
9183
9238
  };
9184
9239
  var detectFramework = detectFramework2;
9185
- const { product_id, file_paths, code_snippet, task_description, mode = "auto", max_constraints = 5, use_semantic = true, phase: autoPhase = "full", auto_scope_expand = false, scope_entity_name, scope_entity_type, scope_entity_description, scope_entity_tags, scope_parent_id, scope_similarity_threshold } = args;
9240
+ const { product_id, file_paths, code_snippet, task_description, mode = "auto", max_constraints = 5, use_semantic = true, phase: autoPhase = "full", requestedOutcome = "approve", auto_scope_expand = false, scope_entity_name, scope_entity_type, scope_entity_description, scope_entity_tags, scope_parent_id, scope_similarity_threshold } = args;
9186
9241
  if (!product_id) {
9187
9242
  throw new McpError(ErrorCode.InvalidParams, "product_id is required");
9188
9243
  }
@@ -9192,6 +9247,19 @@ Meta: ${JSON.stringify(output.meta)}` }
9192
9247
  task_description
9193
9248
  };
9194
9249
  if (!hasEnoughSignal(fileContext)) {
9250
+ const governance2 = buildGovernanceEnvelope({
9251
+ decision: "revise",
9252
+ reasons: [{
9253
+ constraintId: "context.signal.missing",
9254
+ severity: "warning",
9255
+ message: "Insufficient context to match constraints deterministically."
9256
+ }],
9257
+ requiredActions: [
9258
+ "Provide file_paths, code_snippet, or task_description.",
9259
+ "Re-run constraints_auto with richer context."
9260
+ ],
9261
+ executionPermission: "denied"
9262
+ });
9195
9263
  return {
9196
9264
  content: [{
9197
9265
  type: "text",
@@ -9202,7 +9270,8 @@ Meta: ${JSON.stringify(output.meta)}` }
9202
9270
  meta: {
9203
9271
  mode: "silent",
9204
9272
  signal_strength: 0
9205
- }
9273
+ },
9274
+ governance: governance2
9206
9275
  })
9207
9276
  }]
9208
9277
  };
@@ -9238,6 +9307,16 @@ Meta: ${JSON.stringify(output.meta)}` }
9238
9307
  }
9239
9308
  const actualMode = mode === "auto" ? analysis.suggested_mode : mode;
9240
9309
  if (actualMode === "silent") {
9310
+ const governance2 = buildGovernanceEnvelope({
9311
+ decision: "approved",
9312
+ reasons: [{
9313
+ constraintId: "constraints.mode.silent",
9314
+ severity: "info",
9315
+ message: "Silent mode selected; constraints were analyzed but not injected into output."
9316
+ }],
9317
+ requiredActions: requestedOutcome === "execute" ? ["If execution is next, run request_execution_gate before downstream write/deploy actions."] : [],
9318
+ executionPermission: requestedOutcome === "execute" ? "conditional" : "denied"
9319
+ });
9241
9320
  return {
9242
9321
  content: [{
9243
9322
  type: "text",
@@ -9252,7 +9331,8 @@ Meta: ${JSON.stringify(output.meta)}` }
9252
9331
  keywords_detected: analysis.signal.keywords?.slice(0, 5),
9253
9332
  scope_expansion: scopeExpansion,
9254
9333
  scope_seeded: scopeSeedResult || void 0
9255
- }
9334
+ },
9335
+ governance: governance2
9256
9336
  })
9257
9337
  }]
9258
9338
  };
@@ -9264,6 +9344,19 @@ Meta: ${JSON.stringify(output.meta)}` }
9264
9344
  lightNodes = await getAllNodesLight(product_id);
9265
9345
  }
9266
9346
  if (lightNodes.length === 0) {
9347
+ const governance2 = buildGovernanceEnvelope({
9348
+ decision: "blocked",
9349
+ reasons: [{
9350
+ constraintId: "constraints.graph.empty",
9351
+ severity: "high",
9352
+ message: "No ingested constraints are available for this product."
9353
+ }],
9354
+ requiredActions: [
9355
+ "Run constraints_ingest_* or graph_ingest_requirements first.",
9356
+ "Re-run constraints_auto after ingestion."
9357
+ ],
9358
+ executionPermission: "denied"
9359
+ });
9267
9360
  return {
9268
9361
  content: [{
9269
9362
  type: "text",
@@ -9275,7 +9368,8 @@ Meta: ${JSON.stringify(output.meta)}` }
9275
9368
  mode: actualMode,
9276
9369
  detected_domains: analysis.detected_domains,
9277
9370
  signal_strength: Math.round(analysis.confidence * 100) / 100
9278
- }
9371
+ },
9372
+ governance: governance2
9279
9373
  })
9280
9374
  }]
9281
9375
  };
@@ -9413,6 +9507,36 @@ Meta: ${JSON.stringify(output.meta)}` }
9413
9507
  ${recommendation}`;
9414
9508
  }
9415
9509
  }
9510
+ const criticalCount = topConstraints.filter((c) => c.severity === "critical").length;
9511
+ const warningCount = topConstraints.filter((c) => c.severity === "warning").length;
9512
+ const branchRequired = detectedConflicts.length > 0;
9513
+ const shouldBlock = actualMode === "blocking" && criticalCount > 0;
9514
+ const governanceDecision = shouldBlock ? "blocked" : branchRequired ? "branch" : criticalCount > 0 || warningCount > 0 ? "revise" : "approved";
9515
+ const governance = buildGovernanceEnvelope({
9516
+ decision: governanceDecision,
9517
+ reasons: [
9518
+ ...criticalCount > 0 ? [{
9519
+ constraintId: "constraints.severity.critical",
9520
+ severity: "critical",
9521
+ message: `${criticalCount} critical constraint(s) detected for the proposed transition.`
9522
+ }] : [],
9523
+ ...branchRequired ? [{
9524
+ constraintId: "constraints.conflict.detected",
9525
+ severity: "high",
9526
+ message: `${detectedConflicts.length} constraint conflict(s) detected; compare alternatives before execution.`
9527
+ }] : []
9528
+ ],
9529
+ requiredActions: [
9530
+ ...criticalCount > 0 ? ["Address critical constraints before implementation handoff."] : [],
9531
+ ...branchRequired ? ["Compare at least two implementation alternatives and resolve conflicts explicitly."] : [],
9532
+ ...requestedOutcome === "execute" ? ["Run request_execution_gate before invoking code-write or deploy actions."] : []
9533
+ ],
9534
+ executionPermission: shouldBlock ? "denied" : requestedOutcome === "execute" ? "conditional" : "denied",
9535
+ requiredBranches: branchRequired ? [
9536
+ { branchId: "A", instruction: "Propose the lowest-risk compliant implementation path." },
9537
+ { branchId: "B", instruction: "Propose an alternative path and document trade-offs." }
9538
+ ] : void 0
9539
+ });
9416
9540
  return {
9417
9541
  content: [{
9418
9542
  type: "text",
@@ -9433,9 +9557,12 @@ ${recommendation}`;
9433
9557
  used_category_prefilter: usePreFilter,
9434
9558
  phase: autoPhase,
9435
9559
  rgr_plan: autoRgrPlan || void 0,
9560
+ requested_outcome: requestedOutcome,
9436
9561
  scope_expansion: scopeExpansion,
9437
- scope_seeded: scopeSeedResult || void 0
9438
- }
9562
+ scope_seeded: scopeSeedResult || void 0,
9563
+ requested_outcome: requestedOutcome
9564
+ },
9565
+ governance
9439
9566
  })
9440
9567
  }]
9441
9568
  };
@@ -10253,7 +10380,7 @@ ${JSON.stringify(metrics, null, 2)}` }
10253
10380
  // RGR_PLAN
10254
10381
  // ─────────────────────────────────────────────────────────────────
10255
10382
  case "rgr_plan": {
10256
- const { product_id, file_path, depth: rgrDepth = 2 } = args;
10383
+ const { product_id, file_path, depth: rgrDepth = 2, requestedOutcome = "approve" } = args;
10257
10384
  if (!product_id || !file_path) {
10258
10385
  throw new McpError(ErrorCode.InvalidParams, "product_id and file_path are required");
10259
10386
  }
@@ -10264,10 +10391,26 @@ ${JSON.stringify(metrics, null, 2)}` }
10264
10391
  getAllBindings(product_id)
10265
10392
  ]);
10266
10393
  if (rgrEntities.length === 0) {
10394
+ const governance2 = buildGovernanceEnvelope({
10395
+ decision: "blocked",
10396
+ reasons: [{
10397
+ constraintId: "graph.entities.missing",
10398
+ severity: "high",
10399
+ message: `No graph entities found for product "${product_id}".`
10400
+ }],
10401
+ requiredActions: [
10402
+ "Run graph_ingest_requirements or constraints_ingest_* to seed the graph.",
10403
+ "Re-run rgr_plan after ingestion."
10404
+ ],
10405
+ executionPermission: "denied"
10406
+ });
10267
10407
  return {
10268
10408
  content: [{
10269
10409
  type: "text",
10270
- text: `No graph entities found for product "${product_id}". Run graph_ingest_requirements first.`
10410
+ text: JSON.stringify({
10411
+ message: `No graph entities found for product "${product_id}". Run graph_ingest_requirements first.`,
10412
+ governance: governance2
10413
+ }, null, 2)
10271
10414
  }]
10272
10415
  };
10273
10416
  }
@@ -10286,6 +10429,19 @@ ${JSON.stringify(metrics, null, 2)}` }
10286
10429
  }
10287
10430
  }
10288
10431
  if (rgrMatched.length === 0) {
10432
+ const governance2 = buildGovernanceEnvelope({
10433
+ decision: "revise",
10434
+ reasons: [{
10435
+ constraintId: "graph.binding.missing",
10436
+ severity: "warning",
10437
+ message: `No graph entity is bound to "${file_path}".`
10438
+ }],
10439
+ requiredActions: [
10440
+ "Bind codebase entities using graph_bind_codebase/graph_bind_confirm.",
10441
+ "Proceed with full delivery only if no bound entity is expected."
10442
+ ],
10443
+ executionPermission: requestedOutcome === "execute" ? "conditional" : "denied"
10444
+ });
10289
10445
  return {
10290
10446
  content: [{
10291
10447
  type: "text",
@@ -10293,7 +10449,8 @@ ${JSON.stringify(metrics, null, 2)}` }
10293
10449
  strategy: "full",
10294
10450
  phases: [{ name: "full", label: "Full Delivery", description: "No entity bound to this file. Deliver all constraints.", categories: [], constraint_count: 0 }],
10295
10451
  total_constraints: 0,
10296
- rationale: `No entity bound to "${file_path}". Cannot assess constraint complexity.`
10452
+ rationale: `No entity bound to "${file_path}". Cannot assess constraint complexity.`,
10453
+ governance: governance2
10297
10454
  }, null, 2)
10298
10455
  }]
10299
10456
  };
@@ -10302,13 +10459,36 @@ ${JSON.stringify(metrics, null, 2)}` }
10302
10459
  const fileConstraints = rgrSubgraph?.constraints ?? [];
10303
10460
  const plan = planRgrPhases(fileConstraints);
10304
10461
  const complexity = assessComplexity(fileConstraints);
10462
+ const branchNeeded = plan.strategy === "phased" && (complexity.critical_count > 0 || requestedOutcome === "execute");
10463
+ const governance = buildGovernanceEnvelope({
10464
+ decision: branchNeeded ? "branch" : "approved",
10465
+ reasons: branchNeeded ? [{
10466
+ constraintId: "rgr.phased.required",
10467
+ severity: "high",
10468
+ message: `${complexity.critical_count} critical constraint(s) across ${complexity.category_count} categories require phased comparison.`
10469
+ }] : [{
10470
+ constraintId: "rgr.plan.ready",
10471
+ severity: "info",
10472
+ message: "Plan assessed and ready for implementation sequencing."
10473
+ }],
10474
+ requiredActions: [
10475
+ ...plan.strategy === "phased" ? ["Follow phased RGR execution order from this plan."] : ["Deliver full constraint set in a single pass."],
10476
+ ...requestedOutcome === "execute" ? ["Request execution permission via request_execution_gate before downstream write/deploy actions."] : []
10477
+ ],
10478
+ executionPermission: requestedOutcome === "execute" ? "conditional" : "denied",
10479
+ requiredBranches: branchNeeded ? [
10480
+ { branchId: "A", instruction: "Prioritize functional progress (test_spec/functional) then apply NFR phases." },
10481
+ { branchId: "B", instruction: "Prioritize security/performance constraints before broad implementation." }
10482
+ ] : void 0
10483
+ });
10305
10484
  return {
10306
10485
  content: [{
10307
10486
  type: "text",
10308
10487
  text: JSON.stringify({
10309
10488
  ...plan,
10310
10489
  entity: rgrMatched[0].name,
10311
- complexity
10490
+ complexity,
10491
+ governance
10312
10492
  }, null, 2)
10313
10493
  }]
10314
10494
  };
@@ -10379,6 +10559,157 @@ ${JSON.stringify(metrics, null, 2)}` }
10379
10559
  };
10380
10560
  }
10381
10561
  // ─────────────────────────────────────────────────────────────────
10562
+ // REQUEST_EXECUTION_GATE
10563
+ // ─────────────────────────────────────────────────────────────────
10564
+ case "request_execution_gate": {
10565
+ const { product_id, target_action, target_artifacts, based_on_step_ids = [], open_critical_findings = 0, touched_sensitive_paths = [], constraints_acknowledged = false } = args;
10566
+ if (!product_id || !target_action || !Array.isArray(target_artifacts) || target_artifacts.length === 0) {
10567
+ throw new McpError(ErrorCode.InvalidParams, "product_id, target_action, and non-empty target_artifacts are required");
10568
+ }
10569
+ const meta = await getGraphMetadata(product_id);
10570
+ const completedPhases = meta?.rgr_completed_phases ?? [];
10571
+ const metrics = meta?.metrics;
10572
+ const unmetCheckpoints = [];
10573
+ const blockingFindings = [];
10574
+ const requiredNextSteps = [];
10575
+ if (!completedPhases.includes("security")) {
10576
+ unmetCheckpoints.push("security_commitment");
10577
+ blockingFindings.push({
10578
+ constraintId: "rgr.phase.security",
10579
+ severity: "high",
10580
+ message: "Security phase is not marked complete."
10581
+ });
10582
+ requiredNextSteps.push('Complete security remediation and call rgr_complete_phase(product_id, phase: "security").');
10583
+ }
10584
+ if (target_action === "deploy" && !completedPhases.includes("performance")) {
10585
+ unmetCheckpoints.push("implementation_readiness");
10586
+ blockingFindings.push({
10587
+ constraintId: "rgr.phase.performance",
10588
+ severity: "warning",
10589
+ message: "Deploy requested before performance phase completion."
10590
+ });
10591
+ requiredNextSteps.push('Complete performance checks and call rgr_complete_phase(product_id, phase: "performance").');
10592
+ }
10593
+ if (open_critical_findings > 0) {
10594
+ unmetCheckpoints.push("verification");
10595
+ blockingFindings.push({
10596
+ constraintId: "audit.findings.open",
10597
+ severity: "critical",
10598
+ message: `${open_critical_findings} unresolved critical/high findings are still open.`
10599
+ });
10600
+ requiredNextSteps.push("Resolve or explicitly accept outstanding critical/high findings before execution.");
10601
+ }
10602
+ if (touched_sensitive_paths.length > 0 && !constraints_acknowledged) {
10603
+ unmetCheckpoints.push("implementation_readiness");
10604
+ blockingFindings.push({
10605
+ constraintId: "constraints.ack.required",
10606
+ severity: "high",
10607
+ message: "Sensitive paths were provided without explicit constraint acknowledgment."
10608
+ });
10609
+ requiredNextSteps.push("Run constraints_auto for touched sensitive paths and acknowledge applicable constraints.");
10610
+ }
10611
+ const hasBlocks = blockingFindings.some((f) => f.severity === "critical" || f.severity === "high");
10612
+ const denied = hasBlocks && GOVERNANCE_ENFORCEMENT;
10613
+ const decision = denied ? "blocked" : blockingFindings.length > 0 ? "conditional" : "approved";
10614
+ const governance = buildGovernanceEnvelope({
10615
+ decision: denied ? "blocked" : blockingFindings.length > 0 ? "revise" : "approved",
10616
+ reasons: blockingFindings,
10617
+ requiredActions: requiredNextSteps,
10618
+ executionPermission: denied ? "denied" : blockingFindings.length > 0 ? "conditional" : "granted",
10619
+ mode: GOVERNANCE_ENFORCEMENT ? "enforced" : "advisory"
10620
+ });
10621
+ return {
10622
+ content: [{
10623
+ type: "text",
10624
+ text: JSON.stringify({
10625
+ decision,
10626
+ unmetCheckpoints: [...new Set(unmetCheckpoints)],
10627
+ blockingFindings,
10628
+ requiredNextSteps,
10629
+ based_on_step_ids,
10630
+ product_id,
10631
+ target_action,
10632
+ target_artifacts,
10633
+ governanceMode: GOVERNANCE_ENFORCEMENT ? "enforced" : "advisory",
10634
+ readiness: {
10635
+ rgr_completed_phases: completedPhases,
10636
+ security_readiness_pct: metrics?.security_readiness_pct ?? null,
10637
+ engineering_readiness_pct: metrics?.engineering_readiness_pct ?? null
10638
+ },
10639
+ governance
10640
+ }, null, 2)
10641
+ }]
10642
+ };
10643
+ }
10644
+ // ─────────────────────────────────────────────────────────────────
10645
+ // SUBMIT_DONE_CLAIM
10646
+ // ─────────────────────────────────────────────────────────────────
10647
+ case "submit_done_claim": {
10648
+ const { product_id, completion_summary, claimed_criteria_satisfied, known_limitations = [], evidence_refs = [], open_critical_findings = 0 } = args;
10649
+ if (!product_id || !completion_summary || !Array.isArray(claimed_criteria_satisfied)) {
10650
+ throw new McpError(ErrorCode.InvalidParams, "product_id, completion_summary, and claimed_criteria_satisfied are required");
10651
+ }
10652
+ const meta = await getGraphMetadata(product_id);
10653
+ const completedPhases = meta?.rgr_completed_phases ?? [];
10654
+ const unsatisfiedCriteria = [];
10655
+ const blockingFindings = [];
10656
+ const requiredNextSteps = [];
10657
+ if (!completedPhases.includes("security")) {
10658
+ unsatisfiedCriteria.push("Security phase completion not recorded.");
10659
+ blockingFindings.push({
10660
+ constraintId: "done.rgr.security_missing",
10661
+ severity: "high",
10662
+ message: "Cannot close work item before security phase completion."
10663
+ });
10664
+ requiredNextSteps.push('Complete and record security phase via rgr_complete_phase(product_id, phase: "security").');
10665
+ }
10666
+ if (evidence_refs.length === 0) {
10667
+ unsatisfiedCriteria.push("No evidence references provided for completion claim.");
10668
+ blockingFindings.push({
10669
+ constraintId: "done.evidence.required",
10670
+ severity: "warning",
10671
+ message: "Done claims require evidence refs (tests, reports, or docs)."
10672
+ });
10673
+ requiredNextSteps.push("Provide evidence_refs for tests, audit reruns, or implementation verification.");
10674
+ }
10675
+ if (open_critical_findings > 0) {
10676
+ unsatisfiedCriteria.push(`${open_critical_findings} unresolved critical/high findings remain.`);
10677
+ blockingFindings.push({
10678
+ constraintId: "done.critical_findings.open",
10679
+ severity: "critical",
10680
+ message: "Outstanding critical/high findings prevent closure."
10681
+ });
10682
+ requiredNextSteps.push("Resolve or explicitly disposition outstanding critical/high findings before closure.");
10683
+ }
10684
+ const hasHardBlock = blockingFindings.some((f) => f.severity === "critical" || f.severity === "high");
10685
+ const decision = hasHardBlock ? "blocked" : unsatisfiedCriteria.length > 0 ? "incomplete" : "complete";
10686
+ const governance = buildGovernanceEnvelope({
10687
+ decision: hasHardBlock ? "blocked" : unsatisfiedCriteria.length > 0 ? "incomplete" : "approved",
10688
+ reasons: blockingFindings,
10689
+ requiredActions: requiredNextSteps,
10690
+ executionPermission: "denied",
10691
+ closePermission: decision === "complete" ? "granted" : "denied",
10692
+ mode: GOVERNANCE_ENFORCEMENT ? "enforced" : "advisory"
10693
+ });
10694
+ return {
10695
+ content: [{
10696
+ type: "text",
10697
+ text: JSON.stringify({
10698
+ decision,
10699
+ closePermission: decision === "complete" ? "granted" : "denied",
10700
+ completionSummary: completion_summary,
10701
+ claimedCriteriaSatisfied: claimed_criteria_satisfied,
10702
+ knownLimitations: known_limitations,
10703
+ evidenceRefs: evidence_refs,
10704
+ unsatisfiedCriteria,
10705
+ blockingFindings,
10706
+ requiredNextSteps,
10707
+ governance
10708
+ }, null, 2)
10709
+ }]
10710
+ };
10711
+ }
10712
+ // ─────────────────────────────────────────────────────────────────
10382
10713
  // GENERATE_CUTLINE_MD
10383
10714
  // ─────────────────────────────────────────────────────────────────
10384
10715
  case "generate_cutline_md": {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vibekiln/cutline-mcp-cli",
3
- "version": "0.5.0",
3
+ "version": "0.7.0",
4
4
  "description": "CLI and MCP servers for Cutline — authenticate, then run constraint-aware MCP servers in Cursor or any MCP client.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",