agentweaver 0.1.17 → 0.1.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +104 -23
- package/dist/artifacts.js +41 -0
- package/dist/index.js +252 -27
- package/dist/interactive/controller.js +249 -13
- package/dist/interactive/ink/index.js +2 -2
- package/dist/interactive/state.js +1 -0
- package/dist/interactive/web/index.js +179 -0
- package/dist/interactive/web/protocol.js +154 -0
- package/dist/interactive/web/server.js +575 -0
- package/dist/interactive/web/static/app.js +709 -0
- package/dist/interactive/web/static/index.html +77 -0
- package/dist/interactive/web/static/styles.css +2 -0
- package/dist/interactive/web/static/styles.input.css +469 -0
- package/dist/pipeline/flow-catalog.js +4 -0
- package/dist/pipeline/flow-specs/auto-common-guided.json +313 -0
- package/dist/pipeline/flow-specs/auto-common.json +3 -1
- package/dist/pipeline/flow-specs/design-review/design-review-loop.json +2 -0
- package/dist/pipeline/flow-specs/design-review.json +2 -0
- package/dist/pipeline/flow-specs/implement.json +3 -1
- package/dist/pipeline/flow-specs/plan.json +4 -0
- package/dist/pipeline/flow-specs/playbook-init.json +199 -0
- package/dist/pipeline/flow-specs/review/review-fix.json +3 -1
- package/dist/pipeline/flow-specs/review/review-loop.json +4 -0
- package/dist/pipeline/flow-specs/review/review.json +2 -0
- package/dist/pipeline/node-registry.js +45 -0
- package/dist/pipeline/nodes/flow-run-node.js +13 -1
- package/dist/pipeline/nodes/playbook-ensure-node.js +115 -0
- package/dist/pipeline/nodes/playbook-inventory-node.js +51 -0
- package/dist/pipeline/nodes/playbook-questions-form-node.js +166 -0
- package/dist/pipeline/nodes/playbook-write-node.js +243 -0
- package/dist/pipeline/nodes/project-guidance-node.js +69 -0
- package/dist/pipeline/prompt-registry.js +4 -1
- package/dist/pipeline/prompt-runtime.js +6 -2
- package/dist/pipeline/spec-types.js +19 -0
- package/dist/pipeline/value-resolver.js +39 -1
- package/dist/playbook/practice-candidates.js +12 -0
- package/dist/playbook/repo-inventory.js +208 -0
- package/dist/prompts.js +31 -0
- package/dist/runtime/playbook.js +485 -0
- package/dist/runtime/project-guidance.js +339 -0
- package/dist/structured-artifact-schema-registry.js +8 -0
- package/dist/structured-artifact-schemas.json +235 -0
- package/dist/structured-artifacts.js +7 -1
- package/docs/declarative-workflows.md +565 -0
- package/docs/features.md +77 -0
- package/docs/playbook.md +327 -0
- package/package.json +8 -3
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
import { existsSync, readdirSync, readFileSync, statSync } from "node:fs";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
const IGNORED_DIRECTORIES = [".git", "node_modules", "dist", ".agentweaver/scopes/*/.artifacts/manifest-history"];
|
|
4
|
+
const EXACT_IGNORED_DIRS = new Set([".git", "node_modules", "dist"]);
|
|
5
|
+
function toPosix(relativePath) {
|
|
6
|
+
return relativePath.split(path.sep).join("/");
|
|
7
|
+
}
|
|
8
|
+
function shouldIgnoreDirectory(relativePath) {
|
|
9
|
+
if (!relativePath) {
|
|
10
|
+
return false;
|
|
11
|
+
}
|
|
12
|
+
const parts = toPosix(relativePath).split("/");
|
|
13
|
+
if (parts.some((part) => EXACT_IGNORED_DIRS.has(part))) {
|
|
14
|
+
return true;
|
|
15
|
+
}
|
|
16
|
+
return parts.length >= 5
|
|
17
|
+
&& parts[0] === ".agentweaver"
|
|
18
|
+
&& parts[1] === "scopes"
|
|
19
|
+
&& parts[3] === ".artifacts"
|
|
20
|
+
&& parts[4] === "manifest-history";
|
|
21
|
+
}
|
|
22
|
+
function walkFiles(root, directory = root) {
|
|
23
|
+
const entries = readdirSync(directory, { withFileTypes: true }).sort((left, right) => left.name.localeCompare(right.name));
|
|
24
|
+
const files = [];
|
|
25
|
+
for (const entry of entries) {
|
|
26
|
+
const absolutePath = path.join(directory, entry.name);
|
|
27
|
+
const relativePath = toPosix(path.relative(root, absolutePath));
|
|
28
|
+
if (entry.isDirectory()) {
|
|
29
|
+
if (!shouldIgnoreDirectory(relativePath)) {
|
|
30
|
+
files.push(...walkFiles(root, absolutePath));
|
|
31
|
+
}
|
|
32
|
+
continue;
|
|
33
|
+
}
|
|
34
|
+
if (entry.isFile()) {
|
|
35
|
+
files.push(relativePath);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
return files.sort((left, right) => left.localeCompare(right));
|
|
39
|
+
}
|
|
40
|
+
function hasFile(files, filePath) {
|
|
41
|
+
return files.has(filePath);
|
|
42
|
+
}
|
|
43
|
+
function addObservation(target, kind, title, evidence, details) {
|
|
44
|
+
const evidence_paths = Array.from(new Set(evidence)).sort((left, right) => left.localeCompare(right));
|
|
45
|
+
if (evidence_paths.length === 0) {
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
target.push({
|
|
49
|
+
kind,
|
|
50
|
+
title,
|
|
51
|
+
evidence_paths,
|
|
52
|
+
...(details && Object.keys(details).length > 0 ? { details } : {}),
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
function findByRegex(files, pattern) {
|
|
56
|
+
return files.filter((filePath) => pattern.test(filePath)).sort((left, right) => left.localeCompare(right));
|
|
57
|
+
}
|
|
58
|
+
function readJsonIfPresent(root, relativePath) {
|
|
59
|
+
const filePath = path.join(root, relativePath);
|
|
60
|
+
if (!existsSync(filePath)) {
|
|
61
|
+
return null;
|
|
62
|
+
}
|
|
63
|
+
try {
|
|
64
|
+
const parsed = JSON.parse(readFileSync(filePath, "utf8"));
|
|
65
|
+
return parsed && typeof parsed === "object" && !Array.isArray(parsed) ? parsed : null;
|
|
66
|
+
}
|
|
67
|
+
catch {
|
|
68
|
+
return null;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
function detectTextUsage(root, files, pattern) {
|
|
72
|
+
const candidates = files.filter((filePath) => {
|
|
73
|
+
const absolutePath = path.join(root, filePath);
|
|
74
|
+
try {
|
|
75
|
+
if (statSync(absolutePath).size > 256 * 1024) {
|
|
76
|
+
return false;
|
|
77
|
+
}
|
|
78
|
+
return pattern.test(readFileSync(absolutePath, "utf8"));
|
|
79
|
+
}
|
|
80
|
+
catch {
|
|
81
|
+
return false;
|
|
82
|
+
}
|
|
83
|
+
});
|
|
84
|
+
return candidates.sort((left, right) => left.localeCompare(right));
|
|
85
|
+
}
|
|
86
|
+
export function collectRepoInventory(repositoryRoot, generatedAt = new Date().toISOString()) {
|
|
87
|
+
const root = path.resolve(repositoryRoot);
|
|
88
|
+
const files = walkFiles(root);
|
|
89
|
+
const fileSet = new Set(files);
|
|
90
|
+
const stack_indicators = [];
|
|
91
|
+
const test_structure = [];
|
|
92
|
+
const architecture_hints = [];
|
|
93
|
+
const quality_tooling = [];
|
|
94
|
+
const specification_files = [];
|
|
95
|
+
const runtime_configs = [];
|
|
96
|
+
const generated_code = [];
|
|
97
|
+
const packageJson = readJsonIfPresent(root, "package.json");
|
|
98
|
+
if (packageJson) {
|
|
99
|
+
const scripts = packageJson.scripts && typeof packageJson.scripts === "object" && !Array.isArray(packageJson.scripts)
|
|
100
|
+
? Object.keys(packageJson.scripts).sort()
|
|
101
|
+
: [];
|
|
102
|
+
addObservation(stack_indicators, "node_package", "Node package manifest", ["package.json"], scripts.length > 0 ? { scripts } : {});
|
|
103
|
+
if (scripts.length > 0) {
|
|
104
|
+
addObservation(quality_tooling, "npm_scripts", "NPM scripts", ["package.json"], { scripts });
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
for (const [kind, title, evidence] of [
|
|
108
|
+
["go_module", "Go module", "go.mod"],
|
|
109
|
+
["maven_project", "Maven project", "pom.xml"],
|
|
110
|
+
["gradle_project", "Gradle project", "build.gradle"],
|
|
111
|
+
["gradle_kotlin_project", "Gradle Kotlin project", "build.gradle.kts"],
|
|
112
|
+
["typescript_config", "TypeScript configuration", "tsconfig.json"],
|
|
113
|
+
["dockerfile", "Dockerfile", "Dockerfile"],
|
|
114
|
+
["makefile", "Makefile", "Makefile"],
|
|
115
|
+
]) {
|
|
116
|
+
if (hasFile(fileSet, evidence)) {
|
|
117
|
+
addObservation(stack_indicators, kind, title, [evidence]);
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
addObservation(stack_indicators, "compose_file", "Compose runtime file", findByRegex(files, /(^|\/)(docker-)?compose\.(ya?ml|json)$/i));
|
|
121
|
+
const specFiles = findByRegex(files, /(^|\/)(openapi|swagger|asyncapi|graphql|schema|spec)[^/]*(\.(ya?ml|json|graphql|proto|md))$/i);
|
|
122
|
+
addObservation(specification_files, "api_or_specification", "API or specification files", specFiles);
|
|
123
|
+
if (specFiles.some((filePath) => /openapi|swagger/i.test(filePath))) {
|
|
124
|
+
addObservation(stack_indicators, "openapi_specification", "OpenAPI or Swagger specification", specFiles.filter((filePath) => /openapi|swagger/i.test(filePath)));
|
|
125
|
+
}
|
|
126
|
+
addObservation(test_structure, "test_directories", "Test directories", findByRegex(files, /(^|\/)(tests?|__tests__)\//i).map((filePath) => filePath.split("/").slice(0, -1).join("/")).filter(Boolean));
|
|
127
|
+
addObservation(test_structure, "go_tests", "Go test files", findByRegex(files, /_test\.go$/));
|
|
128
|
+
addObservation(test_structure, "typescript_tests", "TypeScript or JavaScript test files", findByRegex(files, /(\.|\/)(test|spec)\.[cm]?[tj]sx?$/));
|
|
129
|
+
addObservation(test_structure, "java_tests", "Java test files", findByRegex(files, /(^|\/)[^/]*(Test|IT)\.java$/));
|
|
130
|
+
addObservation(test_structure, "fixtures", "Fixture directories or files", findByRegex(files, /(^|\/)(fixtures?|testdata)\//i));
|
|
131
|
+
addObservation(test_structure, "testcontainers_usage", "Testcontainers usage", detectTextUsage(root, files, /testcontainers/i));
|
|
132
|
+
addObservation(test_structure, "mocks_usage", "Mock usage", detectTextUsage(root, files, /\b(mock|vi\.fn|jest\.fn|sinon)\b/i));
|
|
133
|
+
addObservation(architecture_hints, "handlers_or_controllers", "Handlers or controllers", findByRegex(files, /(^|\/)(handlers?|controllers?)\//i));
|
|
134
|
+
addObservation(architecture_hints, "services", "Service modules", findByRegex(files, /(^|\/)services?\//i));
|
|
135
|
+
addObservation(architecture_hints, "repositories", "Repository modules", findByRegex(files, /(^|\/)repositories?\//i));
|
|
136
|
+
addObservation(architecture_hints, "migrations", "Migration files", findByRegex(files, /(^|\/)migrations?\//i));
|
|
137
|
+
addObservation(architecture_hints, "dtos", "DTO modules", findByRegex(files, /(^|\/)(dto|dtos)\//i));
|
|
138
|
+
addObservation(generated_code, "generated_paths", "Generated code paths", findByRegex(files, /(^|\/)(generated|gen)\//i).concat(findByRegex(files, /\.(generated|gen)\./i)));
|
|
139
|
+
addObservation(runtime_configs, "environment_files", "Environment example files", findByRegex(files, /(^|\/)\.env(\.example|\.sample)?$/i));
|
|
140
|
+
addObservation(runtime_configs, "runtime_config_files", "Runtime configuration files", findByRegex(files, /(^|\/)(config|configs)\//i).concat(findByRegex(files, /(^|\/)(app|application|settings)\.(json|ya?ml|toml|properties)$/i)));
|
|
141
|
+
addObservation(quality_tooling, "eslint_config", "ESLint configuration", findByRegex(files, /(^|\/)(eslint\.config\.[cm]?js|\.eslintrc(\..*)?)$/i));
|
|
142
|
+
addObservation(quality_tooling, "prettier_config", "Prettier configuration", findByRegex(files, /(^|\/)(\.prettierrc(\..*)?|prettier\.config\.[cm]?js)$/i));
|
|
143
|
+
addObservation(quality_tooling, "golangci_lint", "golangci-lint configuration", findByRegex(files, /(^|\/)\.golangci\.ya?ml$/i));
|
|
144
|
+
addObservation(quality_tooling, "jest_or_vitest", "Jest or Vitest configuration", findByRegex(files, /(^|\/)(jest|vitest)\.config\.[cm]?[tj]s$/i));
|
|
145
|
+
addObservation(quality_tooling, "ci_config", "CI configuration", findByRegex(files, /(^|\/)(\.github\/workflows\/|\.gitlab-ci\.yml|Jenkinsfile)/i));
|
|
146
|
+
addObservation(quality_tooling, "custom_scripts", "Custom scripts", findByRegex(files, /(^|\/)(scripts?|bin)\//i));
|
|
147
|
+
const observations = [
|
|
148
|
+
...stack_indicators,
|
|
149
|
+
...test_structure,
|
|
150
|
+
...architecture_hints,
|
|
151
|
+
...quality_tooling,
|
|
152
|
+
...specification_files,
|
|
153
|
+
...runtime_configs,
|
|
154
|
+
...generated_code,
|
|
155
|
+
];
|
|
156
|
+
const evidence = Array.from(new Set(observations.flatMap((observation) => observation.evidence_paths))).sort((left, right) => left.localeCompare(right));
|
|
157
|
+
return {
|
|
158
|
+
summary: `Repository inventory captured ${evidence.length} evidence paths across stack, tests, architecture, quality, specifications, runtime config, and generated code.`,
|
|
159
|
+
repository_root: root,
|
|
160
|
+
generated_at: generatedAt,
|
|
161
|
+
ignored_directories: [...IGNORED_DIRECTORIES],
|
|
162
|
+
stack_indicators: stack_indicators.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
163
|
+
test_structure: test_structure.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
164
|
+
architecture_hints: architecture_hints.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
165
|
+
quality_tooling: quality_tooling.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
166
|
+
specification_files: specification_files.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
167
|
+
runtime_configs: runtime_configs.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
168
|
+
generated_code: generated_code.sort((left, right) => left.kind.localeCompare(right.kind)),
|
|
169
|
+
evidence,
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
function renderObservationList(observations) {
|
|
173
|
+
if (observations.length === 0) {
|
|
174
|
+
return "- No confirmed signals.";
|
|
175
|
+
}
|
|
176
|
+
return observations
|
|
177
|
+
.map((observation) => `- ${observation.title}: ${observation.evidence_paths.join(", ")}`)
|
|
178
|
+
.join("\n");
|
|
179
|
+
}
|
|
180
|
+
export function renderRepoInventoryMarkdown(inventory) {
|
|
181
|
+
return [
|
|
182
|
+
"# Repository Inventory",
|
|
183
|
+
"",
|
|
184
|
+
inventory.summary,
|
|
185
|
+
"",
|
|
186
|
+
"## Stack",
|
|
187
|
+
renderObservationList(inventory.stack_indicators),
|
|
188
|
+
"",
|
|
189
|
+
"## Tests",
|
|
190
|
+
renderObservationList(inventory.test_structure),
|
|
191
|
+
"",
|
|
192
|
+
"## Architecture",
|
|
193
|
+
renderObservationList(inventory.architecture_hints),
|
|
194
|
+
"",
|
|
195
|
+
"## Quality",
|
|
196
|
+
renderObservationList(inventory.quality_tooling),
|
|
197
|
+
"",
|
|
198
|
+
"## Specifications",
|
|
199
|
+
renderObservationList(inventory.specification_files),
|
|
200
|
+
"",
|
|
201
|
+
"## Runtime Configuration",
|
|
202
|
+
renderObservationList(inventory.runtime_configs),
|
|
203
|
+
"",
|
|
204
|
+
"## Generated Code",
|
|
205
|
+
renderObservationList(inventory.generated_code),
|
|
206
|
+
"",
|
|
207
|
+
].join("\n");
|
|
208
|
+
}
|
package/dist/prompts.js
CHANGED
|
@@ -11,6 +11,8 @@ function strictSchemaInstruction(outputFileVar, schemaId) {
|
|
|
11
11
|
`Canonical schema:\n${renderStructuredArtifactSchema(schemaId)}\n`);
|
|
12
12
|
}
|
|
13
13
|
export const PLAN_PROMPT_TEMPLATE = "Review and analyze the normalized task context in {task_context_json_file}. " +
|
|
14
|
+
"Optional compact project guidance is available at {project_guidance_file} with structured metadata at {project_guidance_json_file}; treat it as supplemental and do not let it override task context, planning answers, or required output schemas. " +
|
|
15
|
+
"Open referenced full examples only when directly relevant. " +
|
|
14
16
|
"Use planning answers from {planning_answers_json_file} when they exist and treat them as structured user clarifications. " +
|
|
15
17
|
"First create structured JSON artifacts - they are the source of truth for subsequent flows. " +
|
|
16
18
|
"Create human-readable markdown files as detailed derivative representations of these JSON artifacts for the user, not as brief summaries. " +
|
|
@@ -25,12 +27,37 @@ export const PLAN_PROMPT_TEMPLATE = "Review and analyze the normalized task cont
|
|
|
25
27
|
"Format markdown for design and plan comprehensively, with separate sections for Summary, Current State, Target State, Affected Code, Decisions, Migration/DB Changes, Risks, Implementation Steps, Tests, Rollout. " +
|
|
26
28
|
"JSON files must be valid and contain only JSON without markdown wrapping. ";
|
|
27
29
|
export const PLAN_QUESTIONS_PROMPT_TEMPLATE = "Review and analyze the normalized task context in {task_context_json_file}. " +
|
|
30
|
+
"Optional compact project guidance is available at {project_guidance_file} with structured metadata at {project_guidance_json_file}; treat it as supplemental project-local context and do not let it override task context or the planning-questions/v1 schema. " +
|
|
31
|
+
"Open referenced full examples only when directly relevant. " +
|
|
28
32
|
"Before final planning, determine if any clarifications are needed from the user. " +
|
|
29
33
|
strictSchemaInstruction("{planning_questions_json_file}", "planning-questions/v1") +
|
|
30
34
|
"Ask only questions without which the design/plan could be incorrect or too speculative. " +
|
|
31
35
|
"Do not ask obvious, decorative, or duplicate questions. " +
|
|
32
36
|
"Usually 1-5 questions are sufficient. " +
|
|
33
37
|
"The JSON file must be valid and contain only JSON without markdown wrapping. ";
|
|
38
|
+
export const PLAYBOOK_PRACTICE_CANDIDATES_PROMPT_TEMPLATE = "Use repo-inventory.json from {repo_inventory_json_file} as the deterministic source of repository facts. " +
|
|
39
|
+
"This step intentionally covers convention_scan semantics for v1 by deriving practice candidates from inventory evidence instead of creating a separate convention_scan artifact. " +
|
|
40
|
+
"Do not infer repository facts that are not supported by the inventory. " +
|
|
41
|
+
"Write evidence-backed practice candidates to {practice_candidates_json_file}, then write derivative Russian markdown to {practice_candidates_file}. " +
|
|
42
|
+
strictSchemaInstruction("{practice_candidates_json_file}", "practice-candidates/v1") +
|
|
43
|
+
"Every candidate must include id, title, proposed_rule_text, confidence, evidence_paths, rationale, and questions_needed. " +
|
|
44
|
+
"Every evidence_paths entry must be a concrete path from repo-inventory.json. " +
|
|
45
|
+
"Use only confidence values low, medium, and high. " +
|
|
46
|
+
"Weak or ambiguous evidence must remain a low or medium confidence candidate and must include questions_needed before it can become mandatory. " +
|
|
47
|
+
"Markdown must be Russian and derivative of the JSON artifact. JSON files must contain only JSON without markdown wrapping. ";
|
|
48
|
+
export const PLAYBOOK_QUESTIONS_PROMPT_TEMPLATE = "Use repo-inventory.json from {repo_inventory_json_file} and practice-candidates.json from {practice_candidates_json_file} as source of truth. " +
|
|
49
|
+
"Generate targeted clarification questions to {playbook_questions_json_file}. " +
|
|
50
|
+
strictSchemaInstruction("{playbook_questions_json_file}", "playbook-questions/v1") +
|
|
51
|
+
"Questions must be tied to weak evidence, conflicting evidence, or candidate questions_needed entries. " +
|
|
52
|
+
"Reject generic project-preference questions. Every question must reference candidate_ids or evidence_paths and include rationale. " +
|
|
53
|
+
"If there are no targeted questions, write an empty questions array with a non-empty summary. JSON files must contain only JSON without markdown wrapping. ";
|
|
54
|
+
export const PLAYBOOK_DRAFT_PROMPT_TEMPLATE = "Use only structured JSON artifacts as source of truth: repo-inventory.json {repo_inventory_json_file}, practice-candidates.json {practice_candidates_json_file}, playbook-questions.json {playbook_questions_json_file}, and playbook-answers.json {playbook_answers_json_file}. " +
|
|
55
|
+
"Generate playbook-draft.json at {playbook_draft_json_file}, then derivative Russian markdown at {playbook_draft_file}. " +
|
|
56
|
+
strictSchemaInstruction("{playbook_draft_json_file}", "playbook-draft/v1") +
|
|
57
|
+
"The draft must separate accepted_rules from candidate_rules and unresolved_questions. " +
|
|
58
|
+
"Do not promote weak or unresolved candidates to accepted_rules without supporting user answers. Preserve evidence paths. " +
|
|
59
|
+
"Set proposed_files to the manifest-based layout: .agentweaver/playbook/manifest.yaml, .agentweaver/playbook/project.md, .agentweaver/playbook/practices/generated-rules.md, .agentweaver/playbook/examples/generated-example.md, and .agentweaver/playbook/templates/default.md. " +
|
|
60
|
+
"Markdown must be Russian and derivative of the JSON artifact. JSON files must contain only JSON without markdown wrapping. ";
|
|
34
61
|
export const TASK_CONTEXT_FROM_JIRA_PROMPT_TEMPLATE = "Normalize the Jira task context into a connector-agnostic task context. " +
|
|
35
62
|
"Use Jira issue JSON {jira_task_file} as the primary source of truth. " +
|
|
36
63
|
"Also analyze Jira attachments manifest {jira_attachments_manifest_file} and attachments text context {jira_attachments_context_file}; if attachments contain more specific requirements, constraints, file lists, migration strategy, or invariants, preserve them in the normalized result. " +
|
|
@@ -69,6 +96,7 @@ export const MR_DESCRIPTION_PROMPT_TEMPLATE = "Review the task in {jira_task_fil
|
|
|
69
96
|
"Prepare a very brief intent description for the merge request without implementation details, file lists, or technical details. " +
|
|
70
97
|
`First write the source-of-truth JSON to {mr_description_json_file}. ${strictSchemaInstruction("{mr_description_json_file}", "mr-description/v1")}Then write the derivative markdown version to {mr_description_file}. `;
|
|
71
98
|
export const IMPLEMENT_PROMPT_TEMPLATE = "Use only structured artifacts as source of truth. " +
|
|
99
|
+
"Optional compact project guidance is available at {project_guidance_file} with structured metadata at {project_guidance_json_file}; treat it as supplemental and do not let it override the design, plan, or QA JSON. Open referenced full examples only when directly relevant. " +
|
|
72
100
|
"Analyze the system design {design_json_file}, implementation plan {plan_json_file}, and QA plan {qa_json_file}, then proceed with implementation according to those artifacts. " +
|
|
73
101
|
"Treat the QA plan as the source of truth for the minimum required test scenarios, edge cases, regression checks, and validation behavior that the implementation must satisfy. " +
|
|
74
102
|
"When the repository contains automated tests, add or update tests for the key scenarios from the QA plan whenever it is practical in the current codebase. " +
|
|
@@ -76,6 +104,7 @@ export const IMPLEMENT_PROMPT_TEMPLATE = "Use only structured artifacts as sourc
|
|
|
76
104
|
"Markdown artifacts such as {design_file}, {plan_file}, and {qa_file} are intended only for human reading and should not define the implementation. ";
|
|
77
105
|
export const REVIEW_PROMPT_TEMPLATE = "Conduct a code review of the current changes. " +
|
|
78
106
|
"Use only structured artifacts as source of truth. " +
|
|
107
|
+
"Optional compact project guidance is available at {project_guidance_file} with structured metadata at {project_guidance_json_file}; treat it as supplemental and do not let it replace required review inputs. Open referenced full examples only when directly relevant. " +
|
|
79
108
|
"Required planning inputs: design markdown {design_file}, design JSON {design_json_file}, plan markdown {plan_file}, and plan JSON {plan_json_file}. " +
|
|
80
109
|
"Optional task context is provided through these variables and may contain the literal value 'not provided' when absent: normalized task context JSON {task_context_json_file}, Jira task JSON {jira_task_file}, instant-task input JSON {task_input_json_file}. " +
|
|
81
110
|
"When an optional variable is 'not provided', treat that source as unavailable and do not invent details from it. " +
|
|
@@ -85,6 +114,7 @@ export const REVIEW_PROMPT_TEMPLATE = "Conduct a code review of the current chan
|
|
|
85
114
|
"Then write the derivative markdown version to {review_file}. ";
|
|
86
115
|
export const DESIGN_REVIEW_PROMPT_TEMPLATE = "Conduct a structured planning critique as a specification critic, not as an implementer. " +
|
|
87
116
|
"Use structured JSON artifacts as the source of truth for semantics. " +
|
|
117
|
+
"Optional compact project guidance is available at {project_guidance_file} with structured metadata at {project_guidance_json_file}; treat it as supplemental and do not let it override the structured planning artifacts. Open referenced full examples only when directly relevant. " +
|
|
88
118
|
"Required planning inputs: design markdown {design_file}, design JSON {design_json_file}, implementation plan markdown {plan_file}, implementation plan JSON {plan_json_file}. " +
|
|
89
119
|
"Review the markdown files as derivative human-readable renderings of the same planning run, but do not let markdown override the structured JSON. " +
|
|
90
120
|
"Optional supplemental context is provided through these variables and may contain the literal value 'not provided' when absent: normalized task context JSON {task_context_json_file}, QA markdown {qa_file}, QA JSON {qa_json_file}, Jira task JSON {jira_task_file}, Jira attachments manifest {jira_attachments_manifest_file}, Jira attachments context {jira_attachments_context_file}, planning answers JSON {planning_answers_json_file}, instant-task input JSON {task_input_json_file}. " +
|
|
@@ -124,6 +154,7 @@ export const REVIEW_SUMMARY_PROMPT_TEMPLATE = "Look at {review_file}. " +
|
|
|
124
154
|
"Create a brief list of comments without details, 3-7 items. " +
|
|
125
155
|
"Write the result to {review_summary_file}.";
|
|
126
156
|
export const REVIEW_FIX_PROMPT_TEMPLATE = "Use only structured artifacts as source of truth. " +
|
|
157
|
+
"Optional compact project guidance is available at {project_guidance_file} with structured metadata at {project_guidance_json_file}; treat it as supplemental and do not let it override review findings or selected repair scope. Open referenced full examples only when directly relevant. " +
|
|
127
158
|
"Analyze the findings in {review_json_file}. " +
|
|
128
159
|
"Fix what is contained in the additional instructions, and if there are none - fix all items. " +
|
|
129
160
|
"After completion, be sure to run the linter outside the sandbox, all tests, generate make swagger. " +
|