@harness-engineering/cli 1.9.0 → 1.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/skills/claude-code/enforce-architecture/SKILL.md +4 -0
- package/dist/agents/skills/claude-code/harness-autopilot/SKILL.md +7 -2
- package/dist/agents/skills/claude-code/harness-brainstorming/SKILL.md +10 -1
- package/dist/agents/skills/claude-code/harness-execution/SKILL.md +2 -2
- package/dist/agents/skills/claude-code/harness-parallel-agents/SKILL.md +105 -20
- package/dist/agents/skills/claude-code/harness-pre-commit-review/SKILL.md +37 -0
- package/dist/agents/skills/gemini-cli/enforce-architecture/SKILL.md +4 -0
- package/dist/agents/skills/gemini-cli/harness-autopilot/SKILL.md +7 -2
- package/dist/agents/skills/gemini-cli/harness-brainstorming/SKILL.md +10 -1
- package/dist/agents/skills/gemini-cli/harness-execution/SKILL.md +2 -2
- package/dist/agents/skills/gemini-cli/harness-parallel-agents/SKILL.md +105 -20
- package/dist/agents/skills/gemini-cli/harness-pre-commit-review/SKILL.md +37 -0
- package/dist/agents-md-ZFV6RR5J.js +8 -0
- package/dist/architecture-EXNUMH5R.js +13 -0
- package/dist/bin/harness-mcp.d.ts +1 -0
- package/dist/bin/harness-mcp.js +28 -0
- package/dist/bin/harness.js +42 -8
- package/dist/check-phase-gate-VZFOY2PO.js +12 -0
- package/dist/chunk-2NCIKJES.js +470 -0
- package/dist/chunk-2YPZKGAG.js +62 -0
- package/dist/{chunk-CGSHUJES.js → chunk-2YSQOUHO.js} +4484 -2688
- package/dist/chunk-3WGJMBKH.js +45 -0
- package/dist/{chunk-ULSRSP53.js → chunk-6N4R6FVX.js} +11 -112
- package/dist/{chunk-6JIT7CEM.js → chunk-72GHBOL2.js} +1 -1
- package/dist/chunk-BM3PWGXQ.js +14 -0
- package/dist/chunk-C2ERUR3L.js +255 -0
- package/dist/chunk-EBJQ6N4M.js +39 -0
- package/dist/chunk-GNGELAXY.js +293 -0
- package/dist/chunk-GSIVNYVJ.js +187 -0
- package/dist/chunk-HD4IBGLA.js +80 -0
- package/dist/chunk-I6JZYEGT.js +4361 -0
- package/dist/chunk-IDZNPTYD.js +16 -0
- package/dist/chunk-JSTQ3AWB.js +31 -0
- package/dist/chunk-K6XAPGML.js +27 -0
- package/dist/chunk-KET4QQZB.js +8 -0
- package/dist/chunk-L2KLU56K.js +125 -0
- package/dist/chunk-MHBMTPW7.js +29 -0
- package/dist/chunk-NC6PXVWT.js +116 -0
- package/dist/chunk-NKDM3FMH.js +52 -0
- package/dist/chunk-PA2XHK75.js +248 -0
- package/dist/chunk-Q6AB7W5Z.js +135 -0
- package/dist/chunk-QPEH2QPG.js +347 -0
- package/dist/chunk-TEFCFC4H.js +15 -0
- package/dist/chunk-TI4TGEX6.js +85 -0
- package/dist/chunk-TRAPF4IX.js +185 -0
- package/dist/chunk-VRFZWGMS.js +68 -0
- package/dist/chunk-VUCPTQ6G.js +67 -0
- package/dist/chunk-W6Y7ZW3Y.js +13 -0
- package/dist/chunk-WJZDO6OY.js +103 -0
- package/dist/chunk-WUJTCNOU.js +122 -0
- package/dist/chunk-X3MN5UQJ.js +89 -0
- package/dist/chunk-Z75JC6I2.js +189 -0
- package/dist/chunk-ZOAWBDWU.js +72 -0
- package/dist/{chunk-RTPHUDZS.js → chunk-ZWC3MN5E.js} +1944 -2779
- package/dist/ci-workflow-K5RCRNYR.js +8 -0
- package/dist/constants-5JGUXPEK.js +6 -0
- package/dist/create-skill-WPXHSLX2.js +11 -0
- package/dist/dist-D4RYGUZE.js +14 -0
- package/dist/{dist-C5PYIQPF.js → dist-JVZ2MKBC.js} +108 -6
- package/dist/dist-L7LAAQAS.js +18 -0
- package/dist/{dist-I7DB5VKB.js → dist-M6BQODWC.js} +1145 -0
- package/dist/docs-PWCUVYWU.js +12 -0
- package/dist/engine-6XUP6GAK.js +8 -0
- package/dist/entropy-4I6JEYAC.js +12 -0
- package/dist/feedback-TNIW534S.js +18 -0
- package/dist/generate-agent-definitions-MWKEA5NU.js +15 -0
- package/dist/glob-helper-5OHBUQAI.js +52 -0
- package/dist/graph-loader-KO4GJ5N2.js +8 -0
- package/dist/index.d.ts +328 -12
- package/dist/index.js +93 -34
- package/dist/loader-4FIPIFII.js +10 -0
- package/dist/mcp-MOKLYNZL.js +34 -0
- package/dist/performance-BTOJCPXU.js +24 -0
- package/dist/review-pipeline-3YTW3463.js +9 -0
- package/dist/runner-VMYLHWOC.js +6 -0
- package/dist/runtime-GO7K2PJE.js +9 -0
- package/dist/security-4P2GGFF6.js +9 -0
- package/dist/skill-executor-RG45LUO5.js +8 -0
- package/dist/templates/orchestrator/WORKFLOW.md +48 -0
- package/dist/templates/orchestrator/template.json +6 -0
- package/dist/validate-JN44D2Q7.js +12 -0
- package/dist/validate-cross-check-DB7RIFFF.js +8 -0
- package/dist/version-KFFPOQAX.js +6 -0
- package/package.json +13 -7
- package/dist/create-skill-UZOHMXRU.js +0 -8
- package/dist/validate-cross-check-VG573VZO.js +0 -7
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
// src/mcp/utils/result-adapter.ts
|
|
2
|
+
function resultToMcpResponse(result) {
|
|
3
|
+
if (result.ok) {
|
|
4
|
+
return {
|
|
5
|
+
content: [{ type: "text", text: JSON.stringify(result.value) }]
|
|
6
|
+
};
|
|
7
|
+
}
|
|
8
|
+
return {
|
|
9
|
+
content: [{ type: "text", text: result.error.message }],
|
|
10
|
+
isError: true
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export {
|
|
15
|
+
resultToMcpResponse
|
|
16
|
+
};
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import {
|
|
2
|
+
toKebabCase
|
|
3
|
+
} from "./chunk-KET4QQZB.js";
|
|
4
|
+
import {
|
|
5
|
+
Err,
|
|
6
|
+
Ok
|
|
7
|
+
} from "./chunk-MHBMTPW7.js";
|
|
8
|
+
|
|
9
|
+
// src/persona/generators/runtime.ts
|
|
10
|
+
function generateRuntime(persona) {
|
|
11
|
+
try {
|
|
12
|
+
const config = {
|
|
13
|
+
name: toKebabCase(persona.name),
|
|
14
|
+
skills: persona.skills,
|
|
15
|
+
steps: persona.steps,
|
|
16
|
+
timeout: persona.config.timeout,
|
|
17
|
+
severity: persona.config.severity
|
|
18
|
+
};
|
|
19
|
+
return Ok(JSON.stringify(config, null, 2));
|
|
20
|
+
} catch (error) {
|
|
21
|
+
return Err(
|
|
22
|
+
new Error(
|
|
23
|
+
`Failed to generate runtime config: ${error instanceof Error ? error.message : String(error)}`
|
|
24
|
+
)
|
|
25
|
+
);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export {
|
|
30
|
+
generateRuntime
|
|
31
|
+
};
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import {
|
|
2
|
+
Err,
|
|
3
|
+
Ok
|
|
4
|
+
} from "./chunk-MHBMTPW7.js";
|
|
5
|
+
|
|
6
|
+
// src/mcp/utils/config-resolver.ts
|
|
7
|
+
import * as fs from "fs";
|
|
8
|
+
import * as path from "path";
|
|
9
|
+
function resolveProjectConfig(projectPath) {
|
|
10
|
+
const configPath = path.join(projectPath, "harness.config.json");
|
|
11
|
+
if (!fs.existsSync(configPath)) {
|
|
12
|
+
return Err(new Error(`No harness.config.json found in ${projectPath}`));
|
|
13
|
+
}
|
|
14
|
+
try {
|
|
15
|
+
const raw = fs.readFileSync(configPath, "utf-8");
|
|
16
|
+
const config = JSON.parse(raw);
|
|
17
|
+
return Ok(config);
|
|
18
|
+
} catch (error) {
|
|
19
|
+
return Err(
|
|
20
|
+
new Error(`Failed to parse config: ${error instanceof Error ? error.message : String(error)}`)
|
|
21
|
+
);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export {
|
|
26
|
+
resolveProjectConfig
|
|
27
|
+
};
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import {
|
|
2
|
+
resolveSkillsDir
|
|
3
|
+
} from "./chunk-HD4IBGLA.js";
|
|
4
|
+
import {
|
|
5
|
+
SkillMetadataSchema
|
|
6
|
+
} from "./chunk-VRFZWGMS.js";
|
|
7
|
+
|
|
8
|
+
// src/persona/skill-executor.ts
|
|
9
|
+
import * as fs from "fs";
|
|
10
|
+
import * as path from "path";
|
|
11
|
+
import { parse } from "yaml";
|
|
12
|
+
function resolveOutputMode(mode, trigger) {
|
|
13
|
+
if (mode !== "auto") return mode;
|
|
14
|
+
return trigger === "manual" ? "inline" : "artifact";
|
|
15
|
+
}
|
|
16
|
+
function buildArtifactPath(projectPath, headSha) {
|
|
17
|
+
const date = (/* @__PURE__ */ new Date()).toISOString().slice(0, 10);
|
|
18
|
+
const sha = headSha?.slice(0, 7) ?? "unknown";
|
|
19
|
+
return path.join(projectPath, ".harness", "reviews", `${date}-${sha}.md`);
|
|
20
|
+
}
|
|
21
|
+
function buildArtifactContent(skillName, trigger, headSha) {
|
|
22
|
+
const date = (/* @__PURE__ */ new Date()).toISOString().slice(0, 10);
|
|
23
|
+
return [
|
|
24
|
+
"---",
|
|
25
|
+
`skill: ${skillName}`,
|
|
26
|
+
`trigger: ${trigger}`,
|
|
27
|
+
`sha: ${headSha?.slice(0, 7) ?? "unknown"}`,
|
|
28
|
+
`date: ${date}`,
|
|
29
|
+
`assessment: pending`,
|
|
30
|
+
"---",
|
|
31
|
+
"",
|
|
32
|
+
`# Review by ${skillName}`,
|
|
33
|
+
"",
|
|
34
|
+
"## Strengths",
|
|
35
|
+
"",
|
|
36
|
+
"- (review pending)",
|
|
37
|
+
"",
|
|
38
|
+
"## Issues",
|
|
39
|
+
"",
|
|
40
|
+
"### Critical",
|
|
41
|
+
"",
|
|
42
|
+
"- None identified",
|
|
43
|
+
"",
|
|
44
|
+
"### Important",
|
|
45
|
+
"",
|
|
46
|
+
"- None identified",
|
|
47
|
+
"",
|
|
48
|
+
"### Suggestions",
|
|
49
|
+
"",
|
|
50
|
+
"- None identified",
|
|
51
|
+
"",
|
|
52
|
+
"## Assessment",
|
|
53
|
+
"",
|
|
54
|
+
"Pending \u2014 skill execution scaffolded.",
|
|
55
|
+
"",
|
|
56
|
+
"## Harness Checks",
|
|
57
|
+
"",
|
|
58
|
+
"- (run harness validate, check-deps, check-docs to populate)",
|
|
59
|
+
""
|
|
60
|
+
].join("\n");
|
|
61
|
+
}
|
|
62
|
+
async function executeSkill(skillName, context) {
|
|
63
|
+
const startTime = Date.now();
|
|
64
|
+
const skillsDir = resolveSkillsDir();
|
|
65
|
+
const skillDir = path.join(skillsDir, skillName);
|
|
66
|
+
if (!fs.existsSync(skillDir)) {
|
|
67
|
+
return {
|
|
68
|
+
status: "fail",
|
|
69
|
+
output: `Skill not found: ${skillName}`,
|
|
70
|
+
durationMs: Date.now() - startTime
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
const yamlPath = path.join(skillDir, "skill.yaml");
|
|
74
|
+
if (!fs.existsSync(yamlPath)) {
|
|
75
|
+
return {
|
|
76
|
+
status: "fail",
|
|
77
|
+
output: `skill.yaml not found for ${skillName}`,
|
|
78
|
+
durationMs: Date.now() - startTime
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
const raw = fs.readFileSync(yamlPath, "utf-8");
|
|
82
|
+
const parsed = parse(raw);
|
|
83
|
+
const metadataResult = SkillMetadataSchema.safeParse(parsed);
|
|
84
|
+
if (!metadataResult.success) {
|
|
85
|
+
return {
|
|
86
|
+
status: "fail",
|
|
87
|
+
output: `Invalid skill metadata: ${metadataResult.error.message}`,
|
|
88
|
+
durationMs: Date.now() - startTime
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
const skillMdPath = path.join(skillDir, "SKILL.md");
|
|
92
|
+
if (!fs.existsSync(skillMdPath)) {
|
|
93
|
+
return {
|
|
94
|
+
status: "fail",
|
|
95
|
+
output: `SKILL.md not found for ${skillName}`,
|
|
96
|
+
durationMs: Date.now() - startTime
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
const skillContent = fs.readFileSync(skillMdPath, "utf-8");
|
|
100
|
+
const metadata = metadataResult.data;
|
|
101
|
+
const resolvedMode = resolveOutputMode(context.outputMode, context.trigger);
|
|
102
|
+
const output = `Skill ${metadata.name} (${metadata.type}) loaded.
|
|
103
|
+
Cognitive mode: ${metadata.cognitive_mode ?? "default"}
|
|
104
|
+
Content length: ${skillContent.length} chars
|
|
105
|
+
Trigger: ${context.trigger}
|
|
106
|
+
`;
|
|
107
|
+
let artifactPath;
|
|
108
|
+
if (resolvedMode === "artifact") {
|
|
109
|
+
artifactPath = buildArtifactPath(context.projectPath, context.headSha);
|
|
110
|
+
const artifactContent = buildArtifactContent(skillName, context.trigger, context.headSha);
|
|
111
|
+
const dir = path.dirname(artifactPath);
|
|
112
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
113
|
+
fs.writeFileSync(artifactPath, artifactContent, "utf-8");
|
|
114
|
+
}
|
|
115
|
+
return {
|
|
116
|
+
status: "pass",
|
|
117
|
+
output,
|
|
118
|
+
...artifactPath ? { artifactPath } : {},
|
|
119
|
+
durationMs: Date.now() - startTime
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
export {
|
|
124
|
+
executeSkill
|
|
125
|
+
};
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
// ../types/dist/index.mjs
|
|
2
|
+
function Ok(value) {
|
|
3
|
+
return { ok: true, value };
|
|
4
|
+
}
|
|
5
|
+
function Err(error) {
|
|
6
|
+
return { ok: false, error };
|
|
7
|
+
}
|
|
8
|
+
function isOk(result) {
|
|
9
|
+
return result.ok === true;
|
|
10
|
+
}
|
|
11
|
+
function isErr(result) {
|
|
12
|
+
return result.ok === false;
|
|
13
|
+
}
|
|
14
|
+
var STANDARD_COGNITIVE_MODES = [
|
|
15
|
+
"adversarial-reviewer",
|
|
16
|
+
"constructive-architect",
|
|
17
|
+
"meticulous-implementer",
|
|
18
|
+
"diagnostic-investigator",
|
|
19
|
+
"advisory-guide",
|
|
20
|
+
"meticulous-verifier"
|
|
21
|
+
];
|
|
22
|
+
|
|
23
|
+
export {
|
|
24
|
+
Ok,
|
|
25
|
+
Err,
|
|
26
|
+
isOk,
|
|
27
|
+
isErr,
|
|
28
|
+
STANDARD_COGNITIVE_MODES
|
|
29
|
+
};
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
import {
|
|
2
|
+
resultToMcpResponse
|
|
3
|
+
} from "./chunk-IDZNPTYD.js";
|
|
4
|
+
import {
|
|
5
|
+
sanitizePath
|
|
6
|
+
} from "./chunk-W6Y7ZW3Y.js";
|
|
7
|
+
import {
|
|
8
|
+
Ok
|
|
9
|
+
} from "./chunk-MHBMTPW7.js";
|
|
10
|
+
|
|
11
|
+
// src/mcp/tools/docs.ts
|
|
12
|
+
import * as path from "path";
|
|
13
|
+
var checkDocsDefinition = {
|
|
14
|
+
name: "check_docs",
|
|
15
|
+
description: "Analyze documentation coverage and/or validate knowledge map integrity",
|
|
16
|
+
inputSchema: {
|
|
17
|
+
type: "object",
|
|
18
|
+
properties: {
|
|
19
|
+
path: { type: "string", description: "Path to project root" },
|
|
20
|
+
domain: { type: "string", description: "Domain/module to check" },
|
|
21
|
+
scope: {
|
|
22
|
+
type: "string",
|
|
23
|
+
enum: ["coverage", "integrity", "all"],
|
|
24
|
+
description: "Scope of check: 'coverage' (doc coverage), 'integrity' (knowledge map validation), 'all' (both). Default: 'coverage'"
|
|
25
|
+
}
|
|
26
|
+
},
|
|
27
|
+
required: ["path"]
|
|
28
|
+
}
|
|
29
|
+
};
|
|
30
|
+
async function handleCheckDocs(input) {
|
|
31
|
+
try {
|
|
32
|
+
const projectPath = sanitizePath(input.path);
|
|
33
|
+
const scope = input.scope ?? "coverage";
|
|
34
|
+
if (scope === "integrity") {
|
|
35
|
+
const { validateKnowledgeMap } = await import("./dist-JVZ2MKBC.js");
|
|
36
|
+
const result2 = await validateKnowledgeMap(projectPath);
|
|
37
|
+
return resultToMcpResponse(result2);
|
|
38
|
+
}
|
|
39
|
+
if (scope === "all") {
|
|
40
|
+
const { checkDocCoverage: checkDocCoverage2, validateKnowledgeMap } = await import("./dist-JVZ2MKBC.js");
|
|
41
|
+
const domain2 = input.domain ?? "src";
|
|
42
|
+
const { loadGraphStore: loadGraphStore2 } = await import("./graph-loader-KO4GJ5N2.js");
|
|
43
|
+
const store2 = await loadGraphStore2(projectPath);
|
|
44
|
+
let graphCoverage2;
|
|
45
|
+
if (store2) {
|
|
46
|
+
const { Assembler } = await import("./dist-M6BQODWC.js");
|
|
47
|
+
const assembler = new Assembler(store2);
|
|
48
|
+
const report = assembler.checkCoverage();
|
|
49
|
+
graphCoverage2 = {
|
|
50
|
+
documented: [...report.documented],
|
|
51
|
+
undocumented: [...report.undocumented],
|
|
52
|
+
coveragePercentage: report.coveragePercentage
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
const [coverageResult, integrityResult] = await Promise.allSettled([
|
|
56
|
+
checkDocCoverage2(domain2, {
|
|
57
|
+
sourceDir: path.resolve(projectPath, "src"),
|
|
58
|
+
docsDir: path.resolve(projectPath, "docs"),
|
|
59
|
+
...graphCoverage2 !== void 0 && { graphCoverage: graphCoverage2 }
|
|
60
|
+
}),
|
|
61
|
+
validateKnowledgeMap(projectPath)
|
|
62
|
+
]);
|
|
63
|
+
let coverage;
|
|
64
|
+
if (coverageResult.status === "fulfilled") {
|
|
65
|
+
const r = coverageResult.value;
|
|
66
|
+
coverage = r.ok ? r.value : { error: r.error };
|
|
67
|
+
} else {
|
|
68
|
+
coverage = { error: String(coverageResult.reason) };
|
|
69
|
+
}
|
|
70
|
+
let integrity;
|
|
71
|
+
if (integrityResult.status === "fulfilled") {
|
|
72
|
+
const r = integrityResult.value;
|
|
73
|
+
integrity = r.ok ? r.value : { error: r.error };
|
|
74
|
+
} else {
|
|
75
|
+
integrity = { error: String(integrityResult.reason) };
|
|
76
|
+
}
|
|
77
|
+
return resultToMcpResponse(Ok({ coverage, integrity }));
|
|
78
|
+
}
|
|
79
|
+
const { checkDocCoverage } = await import("./dist-JVZ2MKBC.js");
|
|
80
|
+
const domain = input.domain ?? "src";
|
|
81
|
+
const { loadGraphStore } = await import("./graph-loader-KO4GJ5N2.js");
|
|
82
|
+
const store = await loadGraphStore(projectPath);
|
|
83
|
+
let graphCoverage;
|
|
84
|
+
if (store) {
|
|
85
|
+
const { Assembler } = await import("./dist-M6BQODWC.js");
|
|
86
|
+
const assembler = new Assembler(store);
|
|
87
|
+
const report = assembler.checkCoverage();
|
|
88
|
+
graphCoverage = {
|
|
89
|
+
documented: [...report.documented],
|
|
90
|
+
undocumented: [...report.undocumented],
|
|
91
|
+
coveragePercentage: report.coveragePercentage
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
const result = await checkDocCoverage(domain, {
|
|
95
|
+
sourceDir: path.resolve(projectPath, "src"),
|
|
96
|
+
docsDir: path.resolve(projectPath, "docs"),
|
|
97
|
+
...graphCoverage !== void 0 && { graphCoverage }
|
|
98
|
+
});
|
|
99
|
+
return resultToMcpResponse(result);
|
|
100
|
+
} catch (error) {
|
|
101
|
+
return {
|
|
102
|
+
content: [
|
|
103
|
+
{
|
|
104
|
+
type: "text",
|
|
105
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
106
|
+
}
|
|
107
|
+
],
|
|
108
|
+
isError: true
|
|
109
|
+
};
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
export {
|
|
114
|
+
checkDocsDefinition,
|
|
115
|
+
handleCheckDocs
|
|
116
|
+
};
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
import {
|
|
2
|
+
Err,
|
|
3
|
+
Ok
|
|
4
|
+
} from "./chunk-MHBMTPW7.js";
|
|
5
|
+
|
|
6
|
+
// src/persona/generators/agents-md.ts
|
|
7
|
+
function formatTrigger(trigger) {
|
|
8
|
+
switch (trigger.event) {
|
|
9
|
+
case "on_pr": {
|
|
10
|
+
const paths = trigger.conditions?.paths?.join(", ") ?? "all files";
|
|
11
|
+
return `On PR (${paths})`;
|
|
12
|
+
}
|
|
13
|
+
case "on_commit": {
|
|
14
|
+
const branches = trigger.conditions?.branches?.join(", ") ?? "all branches";
|
|
15
|
+
return `On commit (${branches})`;
|
|
16
|
+
}
|
|
17
|
+
case "scheduled":
|
|
18
|
+
return `Scheduled (cron: ${trigger.cron})`;
|
|
19
|
+
case "manual":
|
|
20
|
+
return "Manual";
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
function generateAgentsMd(persona) {
|
|
24
|
+
try {
|
|
25
|
+
const triggers = persona.triggers.map(formatTrigger).join(", ");
|
|
26
|
+
const skills = persona.skills.join(", ");
|
|
27
|
+
const commands = persona.steps.filter((s) => "command" in s).map((s) => `\`harness ${s.command}\``).join(", ");
|
|
28
|
+
const stepSkills = persona.steps.filter((s) => "skill" in s).map((s) => `\`harness skill run ${s.skill}\``).join(", ");
|
|
29
|
+
const allCommands = [commands, stepSkills].filter(Boolean).join(", ");
|
|
30
|
+
const fragment = `## ${persona.name} Agent
|
|
31
|
+
|
|
32
|
+
**Role:** ${persona.role}
|
|
33
|
+
|
|
34
|
+
**Triggers:** ${triggers}
|
|
35
|
+
|
|
36
|
+
**Skills:** ${skills}
|
|
37
|
+
|
|
38
|
+
**When this agent flags an issue:** Fix violations before merging. Run ${allCommands} locally to validate.
|
|
39
|
+
`;
|
|
40
|
+
return Ok(fragment);
|
|
41
|
+
} catch (error) {
|
|
42
|
+
return Err(
|
|
43
|
+
new Error(
|
|
44
|
+
`Failed to generate AGENTS.md fragment: ${error instanceof Error ? error.message : String(error)}`
|
|
45
|
+
)
|
|
46
|
+
);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export {
|
|
51
|
+
generateAgentsMd
|
|
52
|
+
};
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
import {
|
|
2
|
+
resultToMcpResponse
|
|
3
|
+
} from "./chunk-IDZNPTYD.js";
|
|
4
|
+
import {
|
|
5
|
+
sanitizePath
|
|
6
|
+
} from "./chunk-W6Y7ZW3Y.js";
|
|
7
|
+
|
|
8
|
+
// src/mcp/tools/feedback.ts
|
|
9
|
+
var createSelfReviewDefinition = {
|
|
10
|
+
name: "create_self_review",
|
|
11
|
+
description: "Generate a checklist-based code review from a git diff, checking harness constraints, custom rules, and diff patterns",
|
|
12
|
+
inputSchema: {
|
|
13
|
+
type: "object",
|
|
14
|
+
properties: {
|
|
15
|
+
path: { type: "string", description: "Path to project root" },
|
|
16
|
+
diff: { type: "string", description: "Git diff string to review" },
|
|
17
|
+
customRules: {
|
|
18
|
+
type: "array",
|
|
19
|
+
items: { type: "object" },
|
|
20
|
+
description: "Optional custom rules to apply during review"
|
|
21
|
+
},
|
|
22
|
+
maxFileSize: {
|
|
23
|
+
type: "number",
|
|
24
|
+
description: "Maximum number of lines changed per file before flagging"
|
|
25
|
+
},
|
|
26
|
+
maxFileCount: {
|
|
27
|
+
type: "number",
|
|
28
|
+
description: "Maximum number of changed files before flagging"
|
|
29
|
+
}
|
|
30
|
+
},
|
|
31
|
+
required: ["path", "diff"]
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
async function handleCreateSelfReview(input) {
|
|
35
|
+
try {
|
|
36
|
+
const { parseDiff, createSelfReview } = await import("./dist-JVZ2MKBC.js");
|
|
37
|
+
const parseResult = parseDiff(input.diff);
|
|
38
|
+
if (!parseResult.ok) {
|
|
39
|
+
return resultToMcpResponse(parseResult);
|
|
40
|
+
}
|
|
41
|
+
const projectPath = sanitizePath(input.path);
|
|
42
|
+
const config = {
|
|
43
|
+
rootDir: projectPath,
|
|
44
|
+
harness: {
|
|
45
|
+
context: true,
|
|
46
|
+
constraints: true,
|
|
47
|
+
entropy: true
|
|
48
|
+
},
|
|
49
|
+
...input.customRules ? { customRules: input.customRules } : {},
|
|
50
|
+
diffAnalysis: {
|
|
51
|
+
enabled: true,
|
|
52
|
+
...input.maxFileSize !== void 0 ? { maxFileSize: input.maxFileSize } : {},
|
|
53
|
+
...input.maxFileCount !== void 0 ? { maxChangedFiles: input.maxFileCount } : {}
|
|
54
|
+
}
|
|
55
|
+
};
|
|
56
|
+
const { loadGraphStore } = await import("./graph-loader-KO4GJ5N2.js");
|
|
57
|
+
const store = await loadGraphStore(projectPath);
|
|
58
|
+
let graphData;
|
|
59
|
+
if (store) {
|
|
60
|
+
const { GraphFeedbackAdapter } = await import("./dist-M6BQODWC.js");
|
|
61
|
+
const adapter = new GraphFeedbackAdapter(store);
|
|
62
|
+
const changedFiles = parseResult.value.files.map((f) => f.path);
|
|
63
|
+
const impact = adapter.computeImpactData(changedFiles);
|
|
64
|
+
const harness = adapter.computeHarnessCheckData();
|
|
65
|
+
graphData = {
|
|
66
|
+
impact: {
|
|
67
|
+
affectedTests: [...impact.affectedTests],
|
|
68
|
+
affectedDocs: [...impact.affectedDocs],
|
|
69
|
+
impactScope: impact.impactScope
|
|
70
|
+
},
|
|
71
|
+
harness: { ...harness }
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
const result = await createSelfReview(
|
|
75
|
+
parseResult.value,
|
|
76
|
+
config,
|
|
77
|
+
graphData
|
|
78
|
+
);
|
|
79
|
+
return resultToMcpResponse(result);
|
|
80
|
+
} catch (error) {
|
|
81
|
+
return {
|
|
82
|
+
content: [
|
|
83
|
+
{
|
|
84
|
+
type: "text",
|
|
85
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
86
|
+
}
|
|
87
|
+
],
|
|
88
|
+
isError: true
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
var analyzeDiffDefinition = {
|
|
93
|
+
name: "analyze_diff",
|
|
94
|
+
description: "Parse a git diff and check for forbidden patterns, oversized files, and missing test coverage",
|
|
95
|
+
inputSchema: {
|
|
96
|
+
type: "object",
|
|
97
|
+
properties: {
|
|
98
|
+
diff: { type: "string", description: "Git diff string to analyze" },
|
|
99
|
+
path: {
|
|
100
|
+
type: "string",
|
|
101
|
+
description: "Path to project root (enables graph-enhanced analysis)"
|
|
102
|
+
},
|
|
103
|
+
forbiddenPatterns: {
|
|
104
|
+
type: "array",
|
|
105
|
+
items: { type: "string" },
|
|
106
|
+
description: "List of regex patterns that are forbidden in the diff"
|
|
107
|
+
},
|
|
108
|
+
maxFileSize: {
|
|
109
|
+
type: "number",
|
|
110
|
+
description: "Maximum number of lines changed per file before flagging"
|
|
111
|
+
},
|
|
112
|
+
maxFileCount: {
|
|
113
|
+
type: "number",
|
|
114
|
+
description: "Maximum number of changed files before flagging"
|
|
115
|
+
}
|
|
116
|
+
},
|
|
117
|
+
required: ["diff"]
|
|
118
|
+
}
|
|
119
|
+
};
|
|
120
|
+
async function handleAnalyzeDiff(input) {
|
|
121
|
+
try {
|
|
122
|
+
const { parseDiff, analyzeDiff } = await import("./dist-JVZ2MKBC.js");
|
|
123
|
+
const parseResult = parseDiff(input.diff);
|
|
124
|
+
if (!parseResult.ok) {
|
|
125
|
+
return resultToMcpResponse(parseResult);
|
|
126
|
+
}
|
|
127
|
+
const options = {
|
|
128
|
+
enabled: true,
|
|
129
|
+
...input.forbiddenPatterns ? {
|
|
130
|
+
forbiddenPatterns: input.forbiddenPatterns.map((pattern) => ({
|
|
131
|
+
pattern,
|
|
132
|
+
message: `Forbidden pattern matched: ${pattern}`,
|
|
133
|
+
severity: "warning"
|
|
134
|
+
}))
|
|
135
|
+
} : {},
|
|
136
|
+
...input.maxFileSize !== void 0 ? { maxFileSize: input.maxFileSize } : {},
|
|
137
|
+
...input.maxFileCount !== void 0 ? { maxChangedFiles: input.maxFileCount } : {}
|
|
138
|
+
};
|
|
139
|
+
let graphImpactData;
|
|
140
|
+
if (input.path) {
|
|
141
|
+
try {
|
|
142
|
+
const { loadGraphStore } = await import("./graph-loader-KO4GJ5N2.js");
|
|
143
|
+
const store = await loadGraphStore(sanitizePath(input.path));
|
|
144
|
+
if (store) {
|
|
145
|
+
const { GraphFeedbackAdapter } = await import("./dist-M6BQODWC.js");
|
|
146
|
+
const adapter = new GraphFeedbackAdapter(store);
|
|
147
|
+
const changedFiles = parseResult.value.files.map((f) => f.path);
|
|
148
|
+
const impact = adapter.computeImpactData(changedFiles);
|
|
149
|
+
graphImpactData = {
|
|
150
|
+
affectedTests: [...impact.affectedTests],
|
|
151
|
+
affectedDocs: [...impact.affectedDocs],
|
|
152
|
+
impactScope: impact.impactScope
|
|
153
|
+
};
|
|
154
|
+
}
|
|
155
|
+
} catch {
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
const result = await analyzeDiff(parseResult.value, options, graphImpactData);
|
|
159
|
+
return resultToMcpResponse(result);
|
|
160
|
+
} catch (error) {
|
|
161
|
+
return {
|
|
162
|
+
content: [
|
|
163
|
+
{
|
|
164
|
+
type: "text",
|
|
165
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
166
|
+
}
|
|
167
|
+
],
|
|
168
|
+
isError: true
|
|
169
|
+
};
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
var requestPeerReviewDefinition = {
|
|
173
|
+
name: "request_peer_review",
|
|
174
|
+
description: "Spawn an agent subprocess to perform code review. Returns structured feedback with approval status. Timeout: 120 seconds.",
|
|
175
|
+
inputSchema: {
|
|
176
|
+
type: "object",
|
|
177
|
+
properties: {
|
|
178
|
+
path: { type: "string", description: "Path to project root" },
|
|
179
|
+
agentType: {
|
|
180
|
+
type: "string",
|
|
181
|
+
enum: [
|
|
182
|
+
"architecture-enforcer",
|
|
183
|
+
"documentation-maintainer",
|
|
184
|
+
"test-reviewer",
|
|
185
|
+
"entropy-cleaner",
|
|
186
|
+
"custom"
|
|
187
|
+
],
|
|
188
|
+
description: "Type of agent to use for the peer review"
|
|
189
|
+
},
|
|
190
|
+
diff: { type: "string", description: "Git diff string to review" },
|
|
191
|
+
context: { type: "string", description: "Optional additional context for the reviewer" }
|
|
192
|
+
},
|
|
193
|
+
required: ["path", "agentType", "diff"]
|
|
194
|
+
}
|
|
195
|
+
};
|
|
196
|
+
async function handleRequestPeerReview(input) {
|
|
197
|
+
try {
|
|
198
|
+
const { parseDiff, requestPeerReview } = await import("./dist-JVZ2MKBC.js");
|
|
199
|
+
const parseResult = parseDiff(input.diff);
|
|
200
|
+
if (!parseResult.ok) {
|
|
201
|
+
return resultToMcpResponse(parseResult);
|
|
202
|
+
}
|
|
203
|
+
const reviewContext = {
|
|
204
|
+
files: parseResult.value.files.map((f) => f.path),
|
|
205
|
+
diff: input.diff,
|
|
206
|
+
...input.context ? { metadata: { context: input.context } } : {}
|
|
207
|
+
};
|
|
208
|
+
try {
|
|
209
|
+
const { loadGraphStore } = await import("./graph-loader-KO4GJ5N2.js");
|
|
210
|
+
const store = await loadGraphStore(sanitizePath(input.path));
|
|
211
|
+
if (store) {
|
|
212
|
+
const { GraphFeedbackAdapter } = await import("./dist-M6BQODWC.js");
|
|
213
|
+
const adapter = new GraphFeedbackAdapter(store);
|
|
214
|
+
const changedFiles = parseResult.value.files.map((f) => f.path);
|
|
215
|
+
const impactData = adapter.computeImpactData(changedFiles);
|
|
216
|
+
reviewContext.metadata = {
|
|
217
|
+
...reviewContext.metadata,
|
|
218
|
+
graphContext: impactData
|
|
219
|
+
};
|
|
220
|
+
}
|
|
221
|
+
} catch {
|
|
222
|
+
}
|
|
223
|
+
const result = await requestPeerReview(input.agentType, reviewContext, {
|
|
224
|
+
timeout: 12e4,
|
|
225
|
+
wait: true
|
|
226
|
+
});
|
|
227
|
+
return resultToMcpResponse(result);
|
|
228
|
+
} catch (error) {
|
|
229
|
+
return {
|
|
230
|
+
content: [
|
|
231
|
+
{
|
|
232
|
+
type: "text",
|
|
233
|
+
text: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
234
|
+
}
|
|
235
|
+
],
|
|
236
|
+
isError: true
|
|
237
|
+
};
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
export {
|
|
242
|
+
createSelfReviewDefinition,
|
|
243
|
+
handleCreateSelfReview,
|
|
244
|
+
analyzeDiffDefinition,
|
|
245
|
+
handleAnalyzeDiff,
|
|
246
|
+
requestPeerReviewDefinition,
|
|
247
|
+
handleRequestPeerReview
|
|
248
|
+
};
|