@harness-engineering/cli 1.9.0 → 1.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/skills/claude-code/enforce-architecture/SKILL.md +4 -0
- package/dist/agents/skills/claude-code/harness-autopilot/SKILL.md +7 -2
- package/dist/agents/skills/claude-code/harness-brainstorming/SKILL.md +10 -1
- package/dist/agents/skills/claude-code/harness-execution/SKILL.md +2 -2
- package/dist/agents/skills/claude-code/harness-parallel-agents/SKILL.md +105 -20
- package/dist/agents/skills/claude-code/harness-pre-commit-review/SKILL.md +37 -0
- package/dist/agents/skills/gemini-cli/enforce-architecture/SKILL.md +4 -0
- package/dist/agents/skills/gemini-cli/harness-autopilot/SKILL.md +7 -2
- package/dist/agents/skills/gemini-cli/harness-brainstorming/SKILL.md +10 -1
- package/dist/agents/skills/gemini-cli/harness-execution/SKILL.md +2 -2
- package/dist/agents/skills/gemini-cli/harness-parallel-agents/SKILL.md +105 -20
- package/dist/agents/skills/gemini-cli/harness-pre-commit-review/SKILL.md +37 -0
- package/dist/agents-md-ZFV6RR5J.js +8 -0
- package/dist/architecture-EXNUMH5R.js +13 -0
- package/dist/bin/harness-mcp.d.ts +1 -0
- package/dist/bin/harness-mcp.js +28 -0
- package/dist/bin/harness.js +42 -8
- package/dist/check-phase-gate-VZFOY2PO.js +12 -0
- package/dist/chunk-2NCIKJES.js +470 -0
- package/dist/chunk-2YPZKGAG.js +62 -0
- package/dist/{chunk-CGSHUJES.js → chunk-2YSQOUHO.js} +4484 -2688
- package/dist/chunk-3WGJMBKH.js +45 -0
- package/dist/{chunk-ULSRSP53.js → chunk-6N4R6FVX.js} +11 -112
- package/dist/{chunk-6JIT7CEM.js → chunk-72GHBOL2.js} +1 -1
- package/dist/chunk-BM3PWGXQ.js +14 -0
- package/dist/chunk-C2ERUR3L.js +255 -0
- package/dist/chunk-EBJQ6N4M.js +39 -0
- package/dist/chunk-GNGELAXY.js +293 -0
- package/dist/chunk-GSIVNYVJ.js +187 -0
- package/dist/chunk-HD4IBGLA.js +80 -0
- package/dist/chunk-I6JZYEGT.js +4361 -0
- package/dist/chunk-IDZNPTYD.js +16 -0
- package/dist/chunk-JSTQ3AWB.js +31 -0
- package/dist/chunk-K6XAPGML.js +27 -0
- package/dist/chunk-KET4QQZB.js +8 -0
- package/dist/chunk-L2KLU56K.js +125 -0
- package/dist/chunk-MHBMTPW7.js +29 -0
- package/dist/chunk-NC6PXVWT.js +116 -0
- package/dist/chunk-NKDM3FMH.js +52 -0
- package/dist/chunk-PA2XHK75.js +248 -0
- package/dist/chunk-Q6AB7W5Z.js +135 -0
- package/dist/chunk-QPEH2QPG.js +347 -0
- package/dist/chunk-TEFCFC4H.js +15 -0
- package/dist/chunk-TI4TGEX6.js +85 -0
- package/dist/chunk-TRAPF4IX.js +185 -0
- package/dist/chunk-VRFZWGMS.js +68 -0
- package/dist/chunk-VUCPTQ6G.js +67 -0
- package/dist/chunk-W6Y7ZW3Y.js +13 -0
- package/dist/chunk-WJZDO6OY.js +103 -0
- package/dist/chunk-WUJTCNOU.js +122 -0
- package/dist/chunk-X3MN5UQJ.js +89 -0
- package/dist/chunk-Z75JC6I2.js +189 -0
- package/dist/chunk-ZOAWBDWU.js +72 -0
- package/dist/{chunk-RTPHUDZS.js → chunk-ZWC3MN5E.js} +1944 -2779
- package/dist/ci-workflow-K5RCRNYR.js +8 -0
- package/dist/constants-5JGUXPEK.js +6 -0
- package/dist/create-skill-WPXHSLX2.js +11 -0
- package/dist/dist-D4RYGUZE.js +14 -0
- package/dist/{dist-C5PYIQPF.js → dist-JVZ2MKBC.js} +108 -6
- package/dist/dist-L7LAAQAS.js +18 -0
- package/dist/{dist-I7DB5VKB.js → dist-M6BQODWC.js} +1145 -0
- package/dist/docs-PWCUVYWU.js +12 -0
- package/dist/engine-6XUP6GAK.js +8 -0
- package/dist/entropy-4I6JEYAC.js +12 -0
- package/dist/feedback-TNIW534S.js +18 -0
- package/dist/generate-agent-definitions-MWKEA5NU.js +15 -0
- package/dist/glob-helper-5OHBUQAI.js +52 -0
- package/dist/graph-loader-KO4GJ5N2.js +8 -0
- package/dist/index.d.ts +328 -12
- package/dist/index.js +93 -34
- package/dist/loader-4FIPIFII.js +10 -0
- package/dist/mcp-MOKLYNZL.js +34 -0
- package/dist/performance-BTOJCPXU.js +24 -0
- package/dist/review-pipeline-3YTW3463.js +9 -0
- package/dist/runner-VMYLHWOC.js +6 -0
- package/dist/runtime-GO7K2PJE.js +9 -0
- package/dist/security-4P2GGFF6.js +9 -0
- package/dist/skill-executor-RG45LUO5.js +8 -0
- package/dist/templates/orchestrator/WORKFLOW.md +48 -0
- package/dist/templates/orchestrator/template.json +6 -0
- package/dist/validate-JN44D2Q7.js +12 -0
- package/dist/validate-cross-check-DB7RIFFF.js +8 -0
- package/dist/version-KFFPOQAX.js +6 -0
- package/package.json +13 -7
- package/dist/create-skill-UZOHMXRU.js +0 -8
- package/dist/validate-cross-check-VG573VZO.js +0 -7
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
// src/persona/trigger-detector.ts
|
|
2
|
+
import * as fs from "fs";
|
|
3
|
+
import * as path from "path";
|
|
4
|
+
function detectTrigger(projectPath) {
|
|
5
|
+
const handoffPath = path.join(projectPath, ".harness", "handoff.json");
|
|
6
|
+
if (!fs.existsSync(handoffPath)) {
|
|
7
|
+
return { trigger: "manual" };
|
|
8
|
+
}
|
|
9
|
+
try {
|
|
10
|
+
const raw = fs.readFileSync(handoffPath, "utf-8");
|
|
11
|
+
const handoff = JSON.parse(raw);
|
|
12
|
+
if (handoff.fromSkill === "harness-planning" && Array.isArray(handoff.pending) && handoff.pending.length > 0) {
|
|
13
|
+
return {
|
|
14
|
+
trigger: "on_plan_approved",
|
|
15
|
+
handoff: {
|
|
16
|
+
fromSkill: handoff.fromSkill,
|
|
17
|
+
summary: handoff.summary ?? "",
|
|
18
|
+
pending: handoff.pending,
|
|
19
|
+
planPath: handoff.planPath
|
|
20
|
+
}
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
return { trigger: "manual" };
|
|
24
|
+
} catch {
|
|
25
|
+
return { trigger: "manual" };
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// src/persona/runner.ts
|
|
30
|
+
var TIMEOUT_ERROR_MESSAGE = "__PERSONA_RUNNER_TIMEOUT__";
|
|
31
|
+
function stepName(step) {
|
|
32
|
+
return "command" in step ? step.command : step.skill;
|
|
33
|
+
}
|
|
34
|
+
function stepType(step) {
|
|
35
|
+
return "command" in step ? "command" : "skill";
|
|
36
|
+
}
|
|
37
|
+
function matchesTrigger(step, trigger) {
|
|
38
|
+
const when = step.when ?? "always";
|
|
39
|
+
return when === "always" || when === trigger;
|
|
40
|
+
}
|
|
41
|
+
function skipRemaining(activeSteps, fromIndex, report) {
|
|
42
|
+
for (let j = fromIndex; j < activeSteps.length; j++) {
|
|
43
|
+
const remaining = activeSteps[j];
|
|
44
|
+
report.steps.push({
|
|
45
|
+
name: stepName(remaining),
|
|
46
|
+
type: stepType(remaining),
|
|
47
|
+
status: "skipped",
|
|
48
|
+
durationMs: 0
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
async function runPersona(persona, context) {
|
|
53
|
+
const startTime = Date.now();
|
|
54
|
+
const timeout = persona.config.timeout;
|
|
55
|
+
const report = {
|
|
56
|
+
persona: persona.name.toLowerCase().replace(/\s+/g, "-"),
|
|
57
|
+
status: "pass",
|
|
58
|
+
steps: [],
|
|
59
|
+
totalDurationMs: 0
|
|
60
|
+
};
|
|
61
|
+
let resolvedTrigger;
|
|
62
|
+
let handoff = context.handoff;
|
|
63
|
+
if (context.trigger === "auto") {
|
|
64
|
+
const detection = detectTrigger(context.projectPath);
|
|
65
|
+
resolvedTrigger = detection.trigger;
|
|
66
|
+
handoff = detection.handoff ?? handoff;
|
|
67
|
+
} else {
|
|
68
|
+
resolvedTrigger = context.trigger;
|
|
69
|
+
}
|
|
70
|
+
const activeSteps = persona.steps.filter((s) => matchesTrigger(s, resolvedTrigger));
|
|
71
|
+
for (let i = 0; i < activeSteps.length; i++) {
|
|
72
|
+
const step = activeSteps[i];
|
|
73
|
+
if (Date.now() - startTime >= timeout) {
|
|
74
|
+
skipRemaining(activeSteps, i, report);
|
|
75
|
+
report.status = "partial";
|
|
76
|
+
break;
|
|
77
|
+
}
|
|
78
|
+
const stepStart = Date.now();
|
|
79
|
+
const remainingTime = timeout - (Date.now() - startTime);
|
|
80
|
+
if ("command" in step) {
|
|
81
|
+
const result = await Promise.race([
|
|
82
|
+
context.commandExecutor(step.command),
|
|
83
|
+
new Promise(
|
|
84
|
+
(resolve) => setTimeout(
|
|
85
|
+
() => resolve({
|
|
86
|
+
ok: false,
|
|
87
|
+
error: new Error(TIMEOUT_ERROR_MESSAGE)
|
|
88
|
+
}),
|
|
89
|
+
remainingTime
|
|
90
|
+
)
|
|
91
|
+
)
|
|
92
|
+
]);
|
|
93
|
+
const durationMs = Date.now() - stepStart;
|
|
94
|
+
if (result.ok) {
|
|
95
|
+
report.steps.push({
|
|
96
|
+
name: step.command,
|
|
97
|
+
type: "command",
|
|
98
|
+
status: "pass",
|
|
99
|
+
result: result.value,
|
|
100
|
+
durationMs
|
|
101
|
+
});
|
|
102
|
+
} else if (result.error.message === TIMEOUT_ERROR_MESSAGE) {
|
|
103
|
+
report.steps.push({
|
|
104
|
+
name: step.command,
|
|
105
|
+
type: "command",
|
|
106
|
+
status: "skipped",
|
|
107
|
+
error: "timed out",
|
|
108
|
+
durationMs
|
|
109
|
+
});
|
|
110
|
+
report.status = "partial";
|
|
111
|
+
skipRemaining(activeSteps, i + 1, report);
|
|
112
|
+
break;
|
|
113
|
+
} else {
|
|
114
|
+
report.steps.push({
|
|
115
|
+
name: step.command,
|
|
116
|
+
type: "command",
|
|
117
|
+
status: "fail",
|
|
118
|
+
error: result.error.message,
|
|
119
|
+
durationMs
|
|
120
|
+
});
|
|
121
|
+
report.status = "fail";
|
|
122
|
+
skipRemaining(activeSteps, i + 1, report);
|
|
123
|
+
break;
|
|
124
|
+
}
|
|
125
|
+
} else {
|
|
126
|
+
const skillContext = {
|
|
127
|
+
trigger: resolvedTrigger,
|
|
128
|
+
projectPath: context.projectPath,
|
|
129
|
+
outputMode: step.output ?? "auto",
|
|
130
|
+
...handoff ? { handoff } : {}
|
|
131
|
+
};
|
|
132
|
+
const SKILL_TIMEOUT_RESULT = {
|
|
133
|
+
status: "fail",
|
|
134
|
+
output: "timed out",
|
|
135
|
+
durationMs: 0
|
|
136
|
+
};
|
|
137
|
+
const result = await Promise.race([
|
|
138
|
+
context.skillExecutor(step.skill, skillContext),
|
|
139
|
+
new Promise(
|
|
140
|
+
(resolve) => setTimeout(() => resolve(SKILL_TIMEOUT_RESULT), remainingTime)
|
|
141
|
+
)
|
|
142
|
+
]);
|
|
143
|
+
const durationMs = Date.now() - stepStart;
|
|
144
|
+
if (result === SKILL_TIMEOUT_RESULT) {
|
|
145
|
+
report.steps.push({
|
|
146
|
+
name: step.skill,
|
|
147
|
+
type: "skill",
|
|
148
|
+
status: "skipped",
|
|
149
|
+
error: "timed out",
|
|
150
|
+
durationMs
|
|
151
|
+
});
|
|
152
|
+
report.status = "partial";
|
|
153
|
+
skipRemaining(activeSteps, i + 1, report);
|
|
154
|
+
break;
|
|
155
|
+
} else if (result.status === "pass") {
|
|
156
|
+
report.steps.push({
|
|
157
|
+
name: step.skill,
|
|
158
|
+
type: "skill",
|
|
159
|
+
status: "pass",
|
|
160
|
+
result: result.output,
|
|
161
|
+
...result.artifactPath ? { artifactPath: result.artifactPath } : {},
|
|
162
|
+
durationMs
|
|
163
|
+
});
|
|
164
|
+
} else {
|
|
165
|
+
report.steps.push({
|
|
166
|
+
name: step.skill,
|
|
167
|
+
type: "skill",
|
|
168
|
+
status: "fail",
|
|
169
|
+
error: result.output,
|
|
170
|
+
durationMs
|
|
171
|
+
});
|
|
172
|
+
report.status = "fail";
|
|
173
|
+
skipRemaining(activeSteps, i + 1, report);
|
|
174
|
+
break;
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
report.totalDurationMs = Date.now() - startTime;
|
|
179
|
+
return report;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
export {
|
|
183
|
+
detectTrigger,
|
|
184
|
+
runPersona
|
|
185
|
+
};
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
// src/skill/schema.ts
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
var SkillPhaseSchema = z.object({
|
|
4
|
+
name: z.string(),
|
|
5
|
+
description: z.string(),
|
|
6
|
+
required: z.boolean().default(true)
|
|
7
|
+
});
|
|
8
|
+
var SkillCliSchema = z.object({
|
|
9
|
+
command: z.string(),
|
|
10
|
+
args: z.array(
|
|
11
|
+
z.object({
|
|
12
|
+
name: z.string(),
|
|
13
|
+
description: z.string(),
|
|
14
|
+
required: z.boolean().default(false)
|
|
15
|
+
})
|
|
16
|
+
).default([])
|
|
17
|
+
});
|
|
18
|
+
var SkillMcpSchema = z.object({
|
|
19
|
+
tool: z.string(),
|
|
20
|
+
input: z.record(z.string())
|
|
21
|
+
});
|
|
22
|
+
var SkillStateSchema = z.object({
|
|
23
|
+
persistent: z.boolean().default(false),
|
|
24
|
+
files: z.array(z.string()).default([])
|
|
25
|
+
});
|
|
26
|
+
var ALLOWED_TRIGGERS = [
|
|
27
|
+
"manual",
|
|
28
|
+
"on_pr",
|
|
29
|
+
"on_commit",
|
|
30
|
+
"on_new_feature",
|
|
31
|
+
"on_bug_fix",
|
|
32
|
+
"on_refactor",
|
|
33
|
+
"on_project_init",
|
|
34
|
+
"on_review",
|
|
35
|
+
"on_milestone",
|
|
36
|
+
"on_task_complete",
|
|
37
|
+
"on_doc_check"
|
|
38
|
+
];
|
|
39
|
+
var ALLOWED_PLATFORMS = ["claude-code", "gemini-cli"];
|
|
40
|
+
var ALLOWED_COGNITIVE_MODES = [
|
|
41
|
+
"adversarial-reviewer",
|
|
42
|
+
"constructive-architect",
|
|
43
|
+
"meticulous-implementer",
|
|
44
|
+
"diagnostic-investigator",
|
|
45
|
+
"advisory-guide",
|
|
46
|
+
"meticulous-verifier"
|
|
47
|
+
];
|
|
48
|
+
var SkillMetadataSchema = z.object({
|
|
49
|
+
name: z.string().regex(/^[a-z][a-z0-9-]*$/, "Name must be lowercase with hyphens"),
|
|
50
|
+
version: z.string().regex(/^\d+\.\d+\.\d+$/, "Version must be semver format"),
|
|
51
|
+
description: z.string(),
|
|
52
|
+
cognitive_mode: z.string().regex(/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/, "Cognitive mode must be kebab-case").optional(),
|
|
53
|
+
triggers: z.array(z.enum(ALLOWED_TRIGGERS)),
|
|
54
|
+
platforms: z.array(z.enum(ALLOWED_PLATFORMS)),
|
|
55
|
+
tools: z.array(z.string()),
|
|
56
|
+
cli: SkillCliSchema.optional(),
|
|
57
|
+
mcp: SkillMcpSchema.optional(),
|
|
58
|
+
type: z.enum(["rigid", "flexible"]),
|
|
59
|
+
phases: z.array(SkillPhaseSchema).optional(),
|
|
60
|
+
state: SkillStateSchema.default({}),
|
|
61
|
+
depends_on: z.array(z.string()).default([]),
|
|
62
|
+
repository: z.string().url().optional()
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
export {
|
|
66
|
+
ALLOWED_COGNITIVE_MODES,
|
|
67
|
+
SkillMetadataSchema
|
|
68
|
+
};
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import {
|
|
2
|
+
Err,
|
|
3
|
+
Ok
|
|
4
|
+
} from "./chunk-MHBMTPW7.js";
|
|
5
|
+
|
|
6
|
+
// src/persona/generators/ci-workflow.ts
|
|
7
|
+
import YAML from "yaml";
|
|
8
|
+
function buildGitHubTriggers(triggers) {
|
|
9
|
+
const on = {};
|
|
10
|
+
for (const trigger of triggers) {
|
|
11
|
+
switch (trigger.event) {
|
|
12
|
+
case "on_pr": {
|
|
13
|
+
const prConfig = {};
|
|
14
|
+
if (trigger.conditions?.paths) prConfig.paths = trigger.conditions.paths;
|
|
15
|
+
on.pull_request = prConfig;
|
|
16
|
+
break;
|
|
17
|
+
}
|
|
18
|
+
case "on_commit": {
|
|
19
|
+
const pushConfig = {};
|
|
20
|
+
if (trigger.conditions?.branches) pushConfig.branches = trigger.conditions.branches;
|
|
21
|
+
on.push = pushConfig;
|
|
22
|
+
break;
|
|
23
|
+
}
|
|
24
|
+
case "scheduled":
|
|
25
|
+
on.schedule = [{ cron: trigger.cron }];
|
|
26
|
+
break;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
return on;
|
|
30
|
+
}
|
|
31
|
+
function generateCIWorkflow(persona, platform) {
|
|
32
|
+
try {
|
|
33
|
+
if (platform === "gitlab") return Err(new Error("GitLab CI generation is not yet supported"));
|
|
34
|
+
const severity = persona.config.severity;
|
|
35
|
+
const steps = [
|
|
36
|
+
{ uses: "actions/checkout@v4" },
|
|
37
|
+
{ uses: "actions/setup-node@v4", with: { "node-version": "20" } },
|
|
38
|
+
{ uses: "pnpm/action-setup@v4", with: { run_install: "frozen" } }
|
|
39
|
+
];
|
|
40
|
+
const commandSteps = persona.steps.filter((s) => "command" in s);
|
|
41
|
+
for (const step of commandSteps) {
|
|
42
|
+
const severityFlag = severity ? ` --severity ${severity}` : "";
|
|
43
|
+
steps.push({ run: `npx harness ${step.command}${severityFlag}` });
|
|
44
|
+
}
|
|
45
|
+
const workflow = {
|
|
46
|
+
name: persona.name,
|
|
47
|
+
on: buildGitHubTriggers(persona.triggers),
|
|
48
|
+
jobs: {
|
|
49
|
+
enforce: {
|
|
50
|
+
"runs-on": "ubuntu-latest",
|
|
51
|
+
steps
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
};
|
|
55
|
+
return Ok(YAML.stringify(workflow, { lineWidth: 0 }));
|
|
56
|
+
} catch (error) {
|
|
57
|
+
return Err(
|
|
58
|
+
new Error(
|
|
59
|
+
`Failed to generate CI workflow: ${error instanceof Error ? error.message : String(error)}`
|
|
60
|
+
)
|
|
61
|
+
);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
export {
|
|
66
|
+
generateCIWorkflow
|
|
67
|
+
};
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
// src/mcp/utils/sanitize-path.ts
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
function sanitizePath(inputPath) {
|
|
4
|
+
const resolved = path.resolve(inputPath);
|
|
5
|
+
if (resolved === "/" || resolved === path.parse(resolved).root) {
|
|
6
|
+
throw new Error("Invalid project path: cannot use filesystem root");
|
|
7
|
+
}
|
|
8
|
+
return resolved;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export {
|
|
12
|
+
sanitizePath
|
|
13
|
+
};
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
import {
|
|
2
|
+
resolveProjectConfig
|
|
3
|
+
} from "./chunk-K6XAPGML.js";
|
|
4
|
+
import {
|
|
5
|
+
sanitizePath
|
|
6
|
+
} from "./chunk-W6Y7ZW3Y.js";
|
|
7
|
+
|
|
8
|
+
// src/mcp/tools/validate.ts
|
|
9
|
+
import * as path from "path";
|
|
10
|
+
var validateToolDefinition = {
|
|
11
|
+
name: "validate_project",
|
|
12
|
+
description: "Run all validation checks on a harness engineering project",
|
|
13
|
+
inputSchema: {
|
|
14
|
+
type: "object",
|
|
15
|
+
properties: {
|
|
16
|
+
path: { type: "string", description: "Path to project root directory" }
|
|
17
|
+
},
|
|
18
|
+
required: ["path"]
|
|
19
|
+
}
|
|
20
|
+
};
|
|
21
|
+
async function handleValidateProject(input) {
|
|
22
|
+
let projectPath;
|
|
23
|
+
try {
|
|
24
|
+
projectPath = sanitizePath(input.path);
|
|
25
|
+
} catch (error) {
|
|
26
|
+
return {
|
|
27
|
+
content: [
|
|
28
|
+
{
|
|
29
|
+
type: "text",
|
|
30
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
31
|
+
}
|
|
32
|
+
],
|
|
33
|
+
isError: true
|
|
34
|
+
};
|
|
35
|
+
}
|
|
36
|
+
const errors = [];
|
|
37
|
+
const checks = {
|
|
38
|
+
config: "fail",
|
|
39
|
+
structure: "skipped",
|
|
40
|
+
agentsMap: "skipped"
|
|
41
|
+
};
|
|
42
|
+
const configResult = resolveProjectConfig(projectPath);
|
|
43
|
+
if (!configResult.ok) {
|
|
44
|
+
errors.push(`Config: ${configResult.error.message}`);
|
|
45
|
+
return {
|
|
46
|
+
content: [{ type: "text", text: JSON.stringify({ valid: false, checks, errors }) }]
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
checks.config = "pass";
|
|
50
|
+
const config = configResult.value;
|
|
51
|
+
try {
|
|
52
|
+
const core = await import("./dist-JVZ2MKBC.js");
|
|
53
|
+
if (typeof core.validateFileStructure === "function" && Array.isArray(config.conventions)) {
|
|
54
|
+
const conventions = config.conventions;
|
|
55
|
+
const structureResult = await core.validateFileStructure(projectPath, conventions);
|
|
56
|
+
if (structureResult.ok) {
|
|
57
|
+
checks.structure = structureResult.value.valid ? "pass" : "fail";
|
|
58
|
+
if (!structureResult.value.valid) {
|
|
59
|
+
for (const missing of structureResult.value.missing) {
|
|
60
|
+
errors.push(`Missing required file: ${missing}`);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
} else {
|
|
64
|
+
checks.structure = "fail";
|
|
65
|
+
errors.push(`Structure validation error: ${structureResult.error.message}`);
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
} catch {
|
|
69
|
+
}
|
|
70
|
+
try {
|
|
71
|
+
const core = await import("./dist-JVZ2MKBC.js");
|
|
72
|
+
if (typeof core.validateAgentsMap === "function") {
|
|
73
|
+
const agentsMapPath = path.join(projectPath, "AGENTS.md");
|
|
74
|
+
const agentsResult = await core.validateAgentsMap(agentsMapPath);
|
|
75
|
+
if (agentsResult.ok) {
|
|
76
|
+
checks.agentsMap = agentsResult.value.valid ? "pass" : "fail";
|
|
77
|
+
if (!agentsResult.value.valid) {
|
|
78
|
+
if (agentsResult.value.missingSections.length > 0) {
|
|
79
|
+
errors.push(
|
|
80
|
+
`AGENTS.md missing sections: ${agentsResult.value.missingSections.join(", ")}`
|
|
81
|
+
);
|
|
82
|
+
}
|
|
83
|
+
if (agentsResult.value.brokenLinks.length > 0) {
|
|
84
|
+
errors.push(`AGENTS.md has ${agentsResult.value.brokenLinks.length} broken link(s)`);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
} else {
|
|
88
|
+
checks.agentsMap = "fail";
|
|
89
|
+
errors.push(`AGENTS.md validation error: ${agentsResult.error.message}`);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
} catch {
|
|
93
|
+
}
|
|
94
|
+
const valid = errors.length === 0;
|
|
95
|
+
return {
|
|
96
|
+
content: [{ type: "text", text: JSON.stringify({ valid, checks, errors }) }]
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
export {
|
|
101
|
+
validateToolDefinition,
|
|
102
|
+
handleValidateProject
|
|
103
|
+
};
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import {
|
|
2
|
+
sanitizePath
|
|
3
|
+
} from "./chunk-W6Y7ZW3Y.js";
|
|
4
|
+
|
|
5
|
+
// src/mcp/tools/review-pipeline.ts
|
|
6
|
+
var runCodeReviewDefinition = {
|
|
7
|
+
name: "run_code_review",
|
|
8
|
+
description: "Run the unified 7-phase code review pipeline: gate, mechanical checks, context scoping, parallel agents, validation, deduplication, and output.",
|
|
9
|
+
inputSchema: {
|
|
10
|
+
type: "object",
|
|
11
|
+
properties: {
|
|
12
|
+
path: { type: "string", description: "Path to project root" },
|
|
13
|
+
diff: { type: "string", description: "Git diff string to review" },
|
|
14
|
+
commitMessage: {
|
|
15
|
+
type: "string",
|
|
16
|
+
description: "Most recent commit message (for change-type detection)"
|
|
17
|
+
},
|
|
18
|
+
comment: {
|
|
19
|
+
type: "boolean",
|
|
20
|
+
description: "Post inline comments to GitHub PR (requires prNumber and repo)"
|
|
21
|
+
},
|
|
22
|
+
ci: {
|
|
23
|
+
type: "boolean",
|
|
24
|
+
description: "Enable eligibility gate and non-interactive output"
|
|
25
|
+
},
|
|
26
|
+
deep: {
|
|
27
|
+
type: "boolean",
|
|
28
|
+
description: "Add threat modeling pass to security agent"
|
|
29
|
+
},
|
|
30
|
+
noMechanical: {
|
|
31
|
+
type: "boolean",
|
|
32
|
+
description: "Skip mechanical checks (useful if already run)"
|
|
33
|
+
},
|
|
34
|
+
prNumber: {
|
|
35
|
+
type: "number",
|
|
36
|
+
description: "PR number (required for --comment and CI gate)"
|
|
37
|
+
},
|
|
38
|
+
repo: {
|
|
39
|
+
type: "string",
|
|
40
|
+
description: "Repository in owner/repo format (required for --comment)"
|
|
41
|
+
}
|
|
42
|
+
},
|
|
43
|
+
required: ["path", "diff"]
|
|
44
|
+
}
|
|
45
|
+
};
|
|
46
|
+
async function handleRunCodeReview(input) {
|
|
47
|
+
try {
|
|
48
|
+
const { parseDiff, runReviewPipeline } = await import("./dist-JVZ2MKBC.js");
|
|
49
|
+
const parseResult = parseDiff(input.diff);
|
|
50
|
+
if (!parseResult.ok) {
|
|
51
|
+
return {
|
|
52
|
+
content: [
|
|
53
|
+
{
|
|
54
|
+
type: "text",
|
|
55
|
+
text: `Error parsing diff: ${parseResult.error.message}`
|
|
56
|
+
}
|
|
57
|
+
],
|
|
58
|
+
isError: true
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
const codeChanges = parseResult.value;
|
|
62
|
+
const projectRoot = sanitizePath(input.path);
|
|
63
|
+
const diffInfo = {
|
|
64
|
+
changedFiles: codeChanges.files.map((f) => f.path),
|
|
65
|
+
newFiles: codeChanges.files.filter((f) => f.status === "added").map((f) => f.path),
|
|
66
|
+
deletedFiles: codeChanges.files.filter((f) => f.status === "deleted").map((f) => f.path),
|
|
67
|
+
totalDiffLines: input.diff.split("\n").length,
|
|
68
|
+
fileDiffs: new Map(
|
|
69
|
+
codeChanges.files.map((f) => [f.path, f.diff ?? ""])
|
|
70
|
+
)
|
|
71
|
+
};
|
|
72
|
+
const result = await runReviewPipeline({
|
|
73
|
+
projectRoot,
|
|
74
|
+
diff: diffInfo,
|
|
75
|
+
commitMessage: input.commitMessage ?? "",
|
|
76
|
+
flags: {
|
|
77
|
+
comment: input.comment ?? false,
|
|
78
|
+
ci: input.ci ?? false,
|
|
79
|
+
deep: input.deep ?? false,
|
|
80
|
+
noMechanical: input.noMechanical ?? false
|
|
81
|
+
},
|
|
82
|
+
...input.repo != null ? { repo: input.repo } : {}
|
|
83
|
+
});
|
|
84
|
+
return {
|
|
85
|
+
content: [
|
|
86
|
+
{
|
|
87
|
+
type: "text",
|
|
88
|
+
text: JSON.stringify(
|
|
89
|
+
{
|
|
90
|
+
skipped: result.skipped,
|
|
91
|
+
skipReason: result.skipReason,
|
|
92
|
+
stoppedByMechanical: result.stoppedByMechanical,
|
|
93
|
+
assessment: result.assessment,
|
|
94
|
+
findingCount: result.findings.length,
|
|
95
|
+
terminalOutput: result.terminalOutput,
|
|
96
|
+
githubCommentCount: result.githubComments.length,
|
|
97
|
+
exitCode: result.exitCode
|
|
98
|
+
},
|
|
99
|
+
null,
|
|
100
|
+
2
|
|
101
|
+
)
|
|
102
|
+
}
|
|
103
|
+
],
|
|
104
|
+
isError: false
|
|
105
|
+
};
|
|
106
|
+
} catch (error) {
|
|
107
|
+
return {
|
|
108
|
+
content: [
|
|
109
|
+
{
|
|
110
|
+
type: "text",
|
|
111
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
112
|
+
}
|
|
113
|
+
],
|
|
114
|
+
isError: true
|
|
115
|
+
};
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
export {
|
|
120
|
+
runCodeReviewDefinition,
|
|
121
|
+
handleRunCodeReview
|
|
122
|
+
};
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import {
|
|
2
|
+
sanitizePath
|
|
3
|
+
} from "./chunk-W6Y7ZW3Y.js";
|
|
4
|
+
|
|
5
|
+
// src/mcp/tools/security.ts
|
|
6
|
+
import * as path from "path";
|
|
7
|
+
var runSecurityScanDefinition = {
|
|
8
|
+
name: "run_security_scan",
|
|
9
|
+
description: "Run the built-in security scanner on a project or specific files. Detects secrets, injection, XSS, weak crypto, and other vulnerabilities.",
|
|
10
|
+
inputSchema: {
|
|
11
|
+
type: "object",
|
|
12
|
+
properties: {
|
|
13
|
+
path: { type: "string", description: "Path to project root" },
|
|
14
|
+
files: {
|
|
15
|
+
type: "array",
|
|
16
|
+
items: { type: "string" },
|
|
17
|
+
description: "Optional list of specific files to scan. If omitted, scans all source files."
|
|
18
|
+
},
|
|
19
|
+
strict: {
|
|
20
|
+
type: "boolean",
|
|
21
|
+
description: "Override strict mode \u2014 promotes all warnings to errors"
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
required: ["path"]
|
|
25
|
+
}
|
|
26
|
+
};
|
|
27
|
+
async function handleRunSecurityScan(input) {
|
|
28
|
+
try {
|
|
29
|
+
const core = await import("./dist-JVZ2MKBC.js");
|
|
30
|
+
const projectRoot = sanitizePath(input.path);
|
|
31
|
+
let configData = {};
|
|
32
|
+
try {
|
|
33
|
+
const fs = await import("fs");
|
|
34
|
+
const configPath = path.join(projectRoot, "harness.config.json");
|
|
35
|
+
if (fs.existsSync(configPath)) {
|
|
36
|
+
const raw = fs.readFileSync(configPath, "utf-8");
|
|
37
|
+
const parsed = JSON.parse(raw);
|
|
38
|
+
configData = parsed.security ?? {};
|
|
39
|
+
}
|
|
40
|
+
} catch {
|
|
41
|
+
}
|
|
42
|
+
if (input.strict !== void 0) {
|
|
43
|
+
configData.strict = input.strict;
|
|
44
|
+
}
|
|
45
|
+
const securityConfig = core.parseSecurityConfig(configData);
|
|
46
|
+
const scanner = new core.SecurityScanner(securityConfig);
|
|
47
|
+
scanner.configureForProject(projectRoot);
|
|
48
|
+
let filesToScan;
|
|
49
|
+
if (input.files && input.files.length > 0) {
|
|
50
|
+
filesToScan = input.files.map((f) => path.resolve(projectRoot, f));
|
|
51
|
+
} else {
|
|
52
|
+
const { globFiles } = await import("./glob-helper-5OHBUQAI.js");
|
|
53
|
+
filesToScan = await globFiles(projectRoot, securityConfig.exclude);
|
|
54
|
+
}
|
|
55
|
+
const result = await scanner.scanFiles(filesToScan);
|
|
56
|
+
return {
|
|
57
|
+
content: [
|
|
58
|
+
{
|
|
59
|
+
type: "text",
|
|
60
|
+
text: JSON.stringify({
|
|
61
|
+
...result,
|
|
62
|
+
summary: {
|
|
63
|
+
errors: result.findings.filter((f) => f.severity === "error").length,
|
|
64
|
+
warnings: result.findings.filter(
|
|
65
|
+
(f) => f.severity === "warning"
|
|
66
|
+
).length,
|
|
67
|
+
info: result.findings.filter((f) => f.severity === "info").length
|
|
68
|
+
}
|
|
69
|
+
})
|
|
70
|
+
}
|
|
71
|
+
]
|
|
72
|
+
};
|
|
73
|
+
} catch (error) {
|
|
74
|
+
return {
|
|
75
|
+
content: [
|
|
76
|
+
{
|
|
77
|
+
type: "text",
|
|
78
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
79
|
+
}
|
|
80
|
+
],
|
|
81
|
+
isError: true
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
export {
|
|
87
|
+
runSecurityScanDefinition,
|
|
88
|
+
handleRunSecurityScan
|
|
89
|
+
};
|