prism-mcp-server 7.2.0 → 7.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,131 @@
1
+ // ─── session_start_pipeline ─────────────────────────────────
2
+ export const SESSION_START_PIPELINE_TOOL = {
3
+ name: "session_start_pipeline",
4
+ description: "Start an autonomous Dark Factory pipeline. The pipeline runs in the background " +
5
+ "and executes a PLAN → EXECUTE → VERIFY cycle up to `max_iterations` times.\n\n" +
6
+ "**Requires:** `PRISM_DARK_FACTORY_ENABLED=true` in the environment.\n\n" +
7
+ "**How it works:**\n" +
8
+ "1. Call this tool with an objective (what to accomplish)\n" +
9
+ "2. The pipeline is queued and executes autonomously in the background\n" +
10
+ "3. Use `session_check_pipeline_status` to poll for results\n\n" +
11
+ "**Safety:**\n" +
12
+ "- Pipelines are scoped to a `working_directory` — no filesystem escape\n" +
13
+ "- Strict iteration cap (default: 3) prevents infinite loops\n" +
14
+ "- Wall-clock timeout (default: 15min) prevents runaway execution\n" +
15
+ "- All operations are logged to the session ledger for audit",
16
+ inputSchema: {
17
+ type: "object",
18
+ properties: {
19
+ project: {
20
+ type: "string",
21
+ description: "Project identifier. Required for scoping and audit.",
22
+ },
23
+ objective: {
24
+ type: "string",
25
+ description: "What the pipeline should accomplish. Be specific — this becomes the LLM's system prompt objective.",
26
+ },
27
+ working_directory: {
28
+ type: "string",
29
+ description: "Absolute path to the working directory. The pipeline can only modify " +
30
+ "files within this directory. Defaults to the project's repo_path if configured.",
31
+ },
32
+ max_iterations: {
33
+ type: "number",
34
+ description: "Maximum PLAN→EXECUTE→VERIFY loop iterations (default: 3, max: 10). " +
35
+ "Each iteration is one complete cycle. Most tasks complete in 1-2 iterations.",
36
+ },
37
+ context_files: {
38
+ type: "array",
39
+ items: { type: "string" },
40
+ description: "Optional list of specific files to focus on. Paths are relative " +
41
+ "to the working directory.",
42
+ },
43
+ model_override: {
44
+ type: "string",
45
+ description: "Optional model name to use instead of the default LLM. " +
46
+ "Useful for routing to a local model (e.g., 'qwen3') via Claw.",
47
+ },
48
+ },
49
+ required: ["project", "objective"],
50
+ },
51
+ };
52
+ // ─── session_check_pipeline_status ──────────────────────────
53
+ export const SESSION_CHECK_PIPELINE_STATUS_TOOL = {
54
+ name: "session_check_pipeline_status",
55
+ description: "Check the status of a Dark Factory pipeline. Returns the current step, " +
56
+ "iteration count, and any error messages.\n\n" +
57
+ "**Statuses:**\n" +
58
+ "- `PENDING` — Queued, waiting for runner pickup\n" +
59
+ "- `RUNNING` — Currently executing a step\n" +
60
+ "- `COMPLETED` — Successfully finished all steps\n" +
61
+ "- `FAILED` — Encountered an error or exceeded limits\n" +
62
+ "- `ABORTED` — Manually cancelled",
63
+ inputSchema: {
64
+ type: "object",
65
+ properties: {
66
+ pipeline_id: {
67
+ type: "string",
68
+ description: "The pipeline ID returned by `session_start_pipeline`.",
69
+ },
70
+ project: {
71
+ type: "string",
72
+ description: "Optional project filter. If omitted, searches across all projects.",
73
+ },
74
+ },
75
+ required: ["pipeline_id"],
76
+ },
77
+ };
78
+ // ─── session_abort_pipeline ─────────────────────────────────
79
+ export const SESSION_ABORT_PIPELINE_TOOL = {
80
+ name: "session_abort_pipeline",
81
+ description: "Abort a running Dark Factory pipeline. The pipeline will be marked as ABORTED " +
82
+ "and the background runner will stop processing it on the next tick.\n\n" +
83
+ "**Note:** This is a 'kill switch' — the runner detects the status change via " +
84
+ "the storage status guard and gracefully stops execution.",
85
+ inputSchema: {
86
+ type: "object",
87
+ properties: {
88
+ pipeline_id: {
89
+ type: "string",
90
+ description: "The pipeline ID to abort.",
91
+ },
92
+ },
93
+ required: ["pipeline_id"],
94
+ },
95
+ };
96
+ export function isStartPipelineArgs(args) {
97
+ if (typeof args !== "object" || args === null)
98
+ return false;
99
+ const a = args;
100
+ if (typeof a.project !== "string" || !a.project.trim())
101
+ return false;
102
+ if (typeof a.objective !== "string" || !a.objective.trim())
103
+ return false;
104
+ if (a.working_directory !== undefined && typeof a.working_directory !== "string")
105
+ return false;
106
+ if (a.max_iterations !== undefined && (typeof a.max_iterations !== "number" || a.max_iterations < 1 || a.max_iterations > 10))
107
+ return false;
108
+ if (a.context_files !== undefined && (!Array.isArray(a.context_files) || !a.context_files.every((f) => typeof f === "string")))
109
+ return false;
110
+ if (a.model_override !== undefined && typeof a.model_override !== "string")
111
+ return false;
112
+ return true;
113
+ }
114
+ export function isCheckPipelineStatusArgs(args) {
115
+ if (typeof args !== "object" || args === null)
116
+ return false;
117
+ const a = args;
118
+ if (typeof a.pipeline_id !== "string" || !a.pipeline_id.trim())
119
+ return false;
120
+ if (a.project !== undefined && typeof a.project !== "string")
121
+ return false;
122
+ return true;
123
+ }
124
+ export function isAbortPipelineArgs(args) {
125
+ if (typeof args !== "object" || args === null)
126
+ return false;
127
+ const a = args;
128
+ if (typeof a.pipeline_id !== "string" || !a.pipeline_id.trim())
129
+ return false;
130
+ return true;
131
+ }
@@ -0,0 +1,214 @@
1
+ /**
2
+ * Pipeline Handlers (v7.3 — Dark Factory)
3
+ *
4
+ * MCP tool handlers for managing autonomous pipeline lifecycle:
5
+ * - session_start_pipeline: Create and enqueue a new pipeline
6
+ * - session_check_pipeline_status: Poll pipeline progress
7
+ * - session_abort_pipeline: Kill a running pipeline
8
+ *
9
+ * These handlers follow the exact same CallToolResult pattern as
10
+ * all other tools in /tools/*.ts.
11
+ */
12
+ import { randomUUID } from 'crypto';
13
+ import { getStorage } from '../storage/index.js';
14
+ import { PRISM_USER_ID } from '../config.js';
15
+ import { getSettingSync } from '../storage/configStorage.js';
16
+ import { isStartPipelineArgs, isCheckPipelineStatusArgs, isAbortPipelineArgs, } from './pipelineDefinitions.js';
17
+ import { debugLog } from '../utils/logger.js';
18
+ // ─── Start Pipeline Handler ─────────────────────────────────
19
+ export async function sessionStartPipelineHandler(args) {
20
+ if (!isStartPipelineArgs(args)) {
21
+ return {
22
+ content: [{ type: "text", text: "❌ Invalid arguments. Required: project (string), objective (string). Optional: working_directory, max_iterations (1-10), context_files, model_override." }],
23
+ isError: true,
24
+ };
25
+ }
26
+ const { project, objective, working_directory, max_iterations, context_files, model_override } = args;
27
+ // Resolve working directory: explicit arg > dashboard repo_path > reject
28
+ let resolvedWorkDir = working_directory;
29
+ if (!resolvedWorkDir) {
30
+ // Project-scoped key first (dashboard stores "repo_path:<project>"),
31
+ // then fall back to global "repo_path"
32
+ resolvedWorkDir = getSettingSync(`repo_path:${project}`, "") || getSettingSync("repo_path", "");
33
+ if (!resolvedWorkDir) {
34
+ return {
35
+ content: [{ type: "text", text: "❌ No working_directory provided and no repo_path configured for this project. Either pass working_directory or configure repo_path in the dashboard." }],
36
+ isError: true,
37
+ };
38
+ }
39
+ }
40
+ const pipelineId = randomUUID();
41
+ const now = new Date().toISOString();
42
+ const spec = {
43
+ objective,
44
+ maxIterations: Math.min(max_iterations ?? 3, 10),
45
+ workingDirectory: resolvedWorkDir,
46
+ contextFiles: context_files,
47
+ modelOverride: model_override,
48
+ };
49
+ const pipelineState = {
50
+ id: pipelineId,
51
+ project,
52
+ user_id: PRISM_USER_ID,
53
+ status: 'PENDING',
54
+ current_step: 'INIT',
55
+ iteration: 0,
56
+ spec: JSON.stringify(spec),
57
+ error: null,
58
+ started_at: now,
59
+ updated_at: now,
60
+ last_heartbeat: now,
61
+ };
62
+ try {
63
+ const storage = await getStorage();
64
+ await storage.savePipeline(pipelineState);
65
+ debugLog(`[PipelineHandler] Pipeline ${pipelineId} created for project=${project} objective="${objective.slice(0, 80)}"`);
66
+ return {
67
+ content: [{
68
+ type: "text",
69
+ text: [
70
+ `✅ Dark Factory pipeline started.`,
71
+ ``,
72
+ `**Pipeline ID:** \`${pipelineId}\``,
73
+ `**Project:** ${project}`,
74
+ `**Objective:** ${objective.slice(0, 200)}`,
75
+ `**Working Directory:** ${resolvedWorkDir}`,
76
+ `**Max Iterations:** ${spec.maxIterations}`,
77
+ `**Status:** PENDING (queued for runner pickup)`,
78
+ ``,
79
+ `The pipeline is now executing autonomously in the background.`,
80
+ `Use \`session_check_pipeline_status\` with the pipeline ID to poll for results.`,
81
+ `Use \`session_abort_pipeline\` to cancel the pipeline.`,
82
+ ].join('\n'),
83
+ }],
84
+ };
85
+ }
86
+ catch (err) {
87
+ const msg = err instanceof Error ? err.message : String(err);
88
+ debugLog(`[PipelineHandler] Failed to create pipeline: ${msg}`);
89
+ return {
90
+ content: [{ type: "text", text: `❌ Failed to create pipeline: ${msg}` }],
91
+ isError: true,
92
+ };
93
+ }
94
+ }
95
+ // ─── Check Pipeline Status Handler ──────────────────────────
96
+ export async function sessionCheckPipelineStatusHandler(args) {
97
+ if (!isCheckPipelineStatusArgs(args)) {
98
+ return {
99
+ content: [{ type: "text", text: "❌ Invalid arguments. Required: pipeline_id (string). Optional: project." }],
100
+ isError: true,
101
+ };
102
+ }
103
+ const { pipeline_id, project } = args;
104
+ try {
105
+ const storage = await getStorage();
106
+ const pipeline = await storage.getPipeline(pipeline_id, PRISM_USER_ID);
107
+ if (!pipeline) {
108
+ return {
109
+ content: [{ type: "text", text: `❌ Pipeline \`${pipeline_id}\` not found.` }],
110
+ isError: true,
111
+ };
112
+ }
113
+ // Project filter — if specified, ensure pipeline belongs to the project
114
+ if (project && pipeline.project !== project) {
115
+ return {
116
+ content: [{ type: "text", text: `❌ Pipeline \`${pipeline_id}\` does not belong to project "${project}".` }],
117
+ isError: true,
118
+ };
119
+ }
120
+ // Parse spec for display (safe — we handle parse failures)
121
+ let objective = 'Unknown';
122
+ let maxIter = '?';
123
+ try {
124
+ const spec = JSON.parse(pipeline.spec);
125
+ objective = spec.objective.slice(0, 200);
126
+ maxIter = String(spec.maxIterations);
127
+ }
128
+ catch {
129
+ objective = '(spec corrupted)';
130
+ }
131
+ const isTerminal = ['COMPLETED', 'FAILED', 'ABORTED'].includes(pipeline.status);
132
+ const emoji = pipeline.status === 'COMPLETED' ? '✅' :
133
+ pipeline.status === 'FAILED' ? '❌' :
134
+ pipeline.status === 'ABORTED' ? '🛑' :
135
+ pipeline.status === 'RUNNING' ? '⏳' :
136
+ pipeline.status === 'PENDING' ? '⏸' : '📋';
137
+ const lines = [
138
+ `${emoji} **Pipeline Status: ${pipeline.status}**`,
139
+ ``,
140
+ `| Field | Value |`,
141
+ `|-------|-------|`,
142
+ `| **ID** | \`${pipeline.id}\` |`,
143
+ `| **Project** | ${pipeline.project} |`,
144
+ `| **Objective** | ${objective} |`,
145
+ `| **Current Step** | ${pipeline.current_step} |`,
146
+ `| **Iteration** | ${pipeline.iteration} / ${maxIter} |`,
147
+ `| **Started** | ${pipeline.started_at} |`,
148
+ `| **Last Updated** | ${pipeline.updated_at} |`,
149
+ `| **Last Heartbeat** | ${pipeline.last_heartbeat || 'N/A'} |`,
150
+ ];
151
+ if (pipeline.error) {
152
+ lines.push(`| **Error** | ${pipeline.error.slice(0, 500)} |`);
153
+ }
154
+ if (!isTerminal) {
155
+ lines.push(``, `*Pipeline is still running. Poll again in 30-60 seconds.*`);
156
+ }
157
+ return {
158
+ content: [{ type: "text", text: lines.join('\n') }],
159
+ };
160
+ }
161
+ catch (err) {
162
+ const msg = err instanceof Error ? err.message : String(err);
163
+ return {
164
+ content: [{ type: "text", text: `❌ Failed to check pipeline status: ${msg}` }],
165
+ isError: true,
166
+ };
167
+ }
168
+ }
169
+ // ─── Abort Pipeline Handler ─────────────────────────────────
170
+ export async function sessionAbortPipelineHandler(args) {
171
+ if (!isAbortPipelineArgs(args)) {
172
+ return {
173
+ content: [{ type: "text", text: "❌ Invalid arguments. Required: pipeline_id (string)." }],
174
+ isError: true,
175
+ };
176
+ }
177
+ const { pipeline_id } = args;
178
+ try {
179
+ const storage = await getStorage();
180
+ const pipeline = await storage.getPipeline(pipeline_id, PRISM_USER_ID);
181
+ if (!pipeline) {
182
+ return {
183
+ content: [{ type: "text", text: `❌ Pipeline \`${pipeline_id}\` not found.` }],
184
+ isError: true,
185
+ };
186
+ }
187
+ // Already terminal?
188
+ if (['COMPLETED', 'FAILED', 'ABORTED'].includes(pipeline.status)) {
189
+ return {
190
+ content: [{ type: "text", text: `ℹ️ Pipeline \`${pipeline_id}\` is already in terminal state: **${pipeline.status}**. No action needed.` }],
191
+ };
192
+ }
193
+ // Abort — the status guard + kill switch in runner.ts will handle the rest
194
+ await storage.savePipeline({
195
+ ...pipeline,
196
+ status: 'ABORTED',
197
+ error: 'Manually aborted by user via session_abort_pipeline.',
198
+ });
199
+ debugLog(`[PipelineHandler] Pipeline ${pipeline_id} aborted by user.`);
200
+ return {
201
+ content: [{
202
+ type: "text",
203
+ text: `🛑 Pipeline \`${pipeline_id}\` has been **ABORTED**.\n\nThe background runner will stop processing this pipeline on the next tick.`,
204
+ }],
205
+ };
206
+ }
207
+ catch (err) {
208
+ const msg = err instanceof Error ? err.message : String(err);
209
+ return {
210
+ content: [{ type: "text", text: `❌ Failed to abort pipeline: ${msg}` }],
211
+ isError: true,
212
+ };
213
+ }
214
+ }
@@ -899,7 +899,8 @@ export const SESSION_SAVE_EXPERIENCE_TOOL = {
899
899
  "- **correction**: Agent was corrected by user\n" +
900
900
  "- **success**: Task completed successfully\n" +
901
901
  "- **failure**: Task failed\n" +
902
- "- **learning**: New knowledge acquired",
902
+ "- **learning**: New knowledge acquired\n" +
903
+ "- **validation_result**: Verification sandbox passed or failed",
903
904
  inputSchema: {
904
905
  type: "object",
905
906
  properties: {
@@ -909,7 +910,7 @@ export const SESSION_SAVE_EXPERIENCE_TOOL = {
909
910
  },
910
911
  event_type: {
911
912
  type: "string",
912
- enum: ["correction", "success", "failure", "learning"],
913
+ enum: ["correction", "success", "failure", "learning", "validation_result"],
913
914
  description: "Type of behavioral event.",
914
915
  },
915
916
  context: {
@@ -952,7 +953,8 @@ export function isSessionSaveExperienceArgs(args) {
952
953
  (a.event_type !== "correction" &&
953
954
  a.event_type !== "success" &&
954
955
  a.event_type !== "failure" &&
955
- a.event_type !== "learning"))
956
+ a.event_type !== "learning" &&
957
+ a.event_type !== "validation_result"))
956
958
  return false;
957
959
  if (typeof a.context !== "string")
958
960
  return false;
@@ -0,0 +1,228 @@
1
+ /**
2
+ * Claw-as-Validator (v7.2.0)
3
+ *
4
+ * Adversarial validation layer that delegates assertion generation
5
+ * to the local Claw agent. When the host model produces a plan with
6
+ * test_assertions.json, the clawValidator can:
7
+ *
8
+ * 1. Review the assertions for completeness (are all layers covered?)
9
+ * 2. Generate adversarial counter-assertions to stress-test the plan
10
+ * 3. Validate the test suite against the codebase before execution
11
+ *
12
+ * This creates a host ↔ Claw dialectic: the host plans and writes code,
13
+ * Claw validates and challenges, creating a feedback loop that catches
14
+ * issues before the automated verification runner executes.
15
+ *
16
+ * Prerequisites:
17
+ * - claw-code-agent MCP server must be available
18
+ * - PRISM_VERIFICATION_HARNESS_ENABLED=true
19
+ */
20
+ import { TestSuiteSchema } from "./schema.js";
21
+ /**
22
+ * Build the prompt for Claw validation.
23
+ * The prompt instructs the Claw agent to analyze the test suite
24
+ * and report any missing coverage, logical errors, or improvements.
25
+ */
26
+ function buildValidationPrompt(request) {
27
+ const assertionSummary = request.suite.tests
28
+ .map(t => ` - [${t.layer}/${t.severity}] ${t.id}: ${t.description}`)
29
+ .join("\n");
30
+ const filesContext = request.files_changed.length > 0
31
+ ? `\nFiles changed:\n${request.files_changed.map(f => ` - ${f}`).join("\n")}`
32
+ : "";
33
+ return `You are a code review validator. Analyze the following test assertion suite for completeness, correctness, and coverage.
34
+
35
+ Project: ${request.project}
36
+ ${request.change_summary ? `Change: ${request.change_summary}` : ""}
37
+ ${filesContext}
38
+
39
+ Test Assertions:
40
+ ${assertionSummary}
41
+
42
+ Full JSON:
43
+ ${JSON.stringify(request.suite, null, 2)}
44
+
45
+ Tasks:
46
+ 1. Check if all layers (data, agent, pipeline) have appropriate coverage
47
+ 2. Identify any assertions that could produce false positives/negatives
48
+ 3. Suggest any missing assertions that should be added
49
+ 4. Flag any assertions with incorrect severity levels
50
+
51
+ Respond in JSON format:
52
+ {
53
+ "accepted": true/false,
54
+ "issues": [{"severity": "info|warning|error", "message": "...", "assertion_id": "optional"}],
55
+ "suggested_assertions": []
56
+ }`;
57
+ }
58
+ function tryParseObject(text) {
59
+ try {
60
+ const parsed = JSON.parse(text);
61
+ if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) {
62
+ return parsed;
63
+ }
64
+ }
65
+ catch {
66
+ // Ignore parse errors — caller will continue searching
67
+ }
68
+ return null;
69
+ }
70
+ function extractCodeFenceBlocks(output) {
71
+ const blocks = [];
72
+ let cursor = 0;
73
+ while (cursor < output.length) {
74
+ const start = output.indexOf("```", cursor);
75
+ if (start === -1)
76
+ break;
77
+ const end = output.indexOf("```", start + 3);
78
+ if (end === -1)
79
+ break;
80
+ const block = output.slice(start + 3, end);
81
+ const firstNewline = block.indexOf("\n");
82
+ if (firstNewline === -1) {
83
+ blocks.push(block.trim());
84
+ }
85
+ else {
86
+ const body = block.slice(firstNewline + 1).trim();
87
+ if (body.length > 0)
88
+ blocks.push(body);
89
+ }
90
+ cursor = end + 3;
91
+ }
92
+ return blocks;
93
+ }
94
+ function extractBalancedAcceptedObject(output) {
95
+ const source = output.length > 200_000 ? output.slice(0, 200_000) : output;
96
+ for (let i = 0; i < source.length; i++) {
97
+ if (source[i] !== "{")
98
+ continue;
99
+ let depth = 0;
100
+ let inString = false;
101
+ let escaped = false;
102
+ for (let j = i; j < source.length; j++) {
103
+ const ch = source[j];
104
+ if (inString) {
105
+ if (escaped)
106
+ escaped = false;
107
+ else if (ch === "\\")
108
+ escaped = true;
109
+ else if (ch === '"')
110
+ inString = false;
111
+ continue;
112
+ }
113
+ if (ch === '"') {
114
+ inString = true;
115
+ continue;
116
+ }
117
+ if (ch === "{") {
118
+ depth++;
119
+ continue;
120
+ }
121
+ if (ch === "}") {
122
+ depth--;
123
+ if (depth === 0) {
124
+ const candidate = source.slice(i, j + 1);
125
+ const parsed = tryParseObject(candidate);
126
+ if (parsed && Object.prototype.hasOwnProperty.call(parsed, "accepted")) {
127
+ return parsed;
128
+ }
129
+ break;
130
+ }
131
+ if (depth < 0)
132
+ break;
133
+ }
134
+ }
135
+ }
136
+ return null;
137
+ }
138
+ function parseClawJsonOutput(output) {
139
+ const direct = tryParseObject(output.trim());
140
+ if (direct && Object.prototype.hasOwnProperty.call(direct, "accepted")) {
141
+ return direct;
142
+ }
143
+ for (const block of extractCodeFenceBlocks(output)) {
144
+ const parsed = tryParseObject(block);
145
+ if (parsed && Object.prototype.hasOwnProperty.call(parsed, "accepted")) {
146
+ return parsed;
147
+ }
148
+ }
149
+ return extractBalancedAcceptedObject(output);
150
+ }
151
+ function normalizeIssues(rawIssues) {
152
+ if (!Array.isArray(rawIssues))
153
+ return [];
154
+ return rawIssues
155
+ .filter((issue) => Boolean(issue) && typeof issue === "object")
156
+ .map((issue) => ({
157
+ severity: issue.severity === "error" || issue.severity === "warning" || issue.severity === "info"
158
+ ? issue.severity
159
+ : "info",
160
+ message: typeof issue.message === "string"
161
+ ? issue.message
162
+ : "Claw issue payload was not a valid object message.",
163
+ assertion_id: typeof issue.assertion_id === "string" ? issue.assertion_id : undefined,
164
+ }));
165
+ }
166
+ /**
167
+ * Validate a test suite using the Claw agent.
168
+ *
169
+ * This is a non-blocking call — if the Claw agent is unavailable,
170
+ * validation is skipped with a warning (fail-open for v7.2.0).
171
+ */
172
+ export async function validateWithClaw(request, clawRunTask) {
173
+ const prompt = buildValidationPrompt(request);
174
+ try {
175
+ const result = await clawRunTask(prompt, process.cwd());
176
+ const parsed = parseClawJsonOutput(result.output);
177
+ if (parsed) {
178
+ return {
179
+ accepted: typeof parsed.accepted === "boolean" ? parsed.accepted : true,
180
+ issues: normalizeIssues(parsed.issues),
181
+ suggested_assertions: Array.isArray(parsed.suggested_assertions) ? parsed.suggested_assertions : [],
182
+ raw_output: result.output,
183
+ };
184
+ }
185
+ return {
186
+ accepted: true,
187
+ issues: [{ severity: "info", message: "Claw response was unstructured; treating as accepted." }],
188
+ suggested_assertions: [],
189
+ raw_output: result.output,
190
+ };
191
+ }
192
+ catch (e) {
193
+ // Fail-open: Claw unavailable should not block verification
194
+ console.error(`[ClawValidator] Claw agent unavailable: ${e.message}`);
195
+ return {
196
+ accepted: true,
197
+ issues: [{ severity: "warning", message: `Claw agent unavailable: ${e.message}` }],
198
+ suggested_assertions: [],
199
+ raw_output: "",
200
+ };
201
+ }
202
+ }
203
+ /**
204
+ * Merge Claw-suggested assertions into an existing test suite.
205
+ * Re-validates the merged suite through the Zod schema.
206
+ */
207
+ export function mergeSuggestedAssertions(suite, suggestions) {
208
+ if (!suggestions || suggestions.length === 0)
209
+ return suite;
210
+ try {
211
+ const merged = {
212
+ tests: [
213
+ ...suite.tests,
214
+ ...suggestions.map((s) => ({
215
+ ...s,
216
+ id: s.id || `claw-suggestion-${Date.now()}-${Math.random().toString(36).slice(2, 6)}`,
217
+ severity: s.severity || "warn",
218
+ })),
219
+ ],
220
+ };
221
+ // Validate through schema — rejects malformed suggestions
222
+ return TestSuiteSchema.parse(merged);
223
+ }
224
+ catch (e) {
225
+ console.error(`[ClawValidator] Failed to merge suggestions: ${e.message}`);
226
+ return null;
227
+ }
228
+ }