@wkronmiller/lisa 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +407 -0
- package/bin/lisa-runtime.js +8797 -0
- package/bin/lisa.js +21 -0
- package/completion.ts +58 -0
- package/install.ps1 +51 -0
- package/install.sh +93 -0
- package/lisa.ts +6 -0
- package/package.json +66 -0
- package/skills/README.md +28 -0
- package/skills/claude-code/CLAUDE.md +151 -0
- package/skills/codex/AGENTS.md +151 -0
- package/skills/gemini/GEMINI.md +151 -0
- package/skills/opencode/AGENTS.md +152 -0
- package/src/cli.ts +85 -0
- package/src/harness/base-adapter.ts +47 -0
- package/src/harness/claude-code.ts +106 -0
- package/src/harness/codex.ts +80 -0
- package/src/harness/command.ts +173 -0
- package/src/harness/gemini.ts +74 -0
- package/src/harness/opencode.ts +84 -0
- package/src/harness/registry.ts +29 -0
- package/src/harness/runner.ts +19 -0
- package/src/harness/types.ts +73 -0
- package/src/output-mode.ts +32 -0
- package/src/skill/artifacts.ts +174 -0
- package/src/skill/cli.ts +29 -0
- package/src/skill/install.ts +317 -0
- package/src/spec/agent-guidance.ts +466 -0
- package/src/spec/cli.ts +151 -0
- package/src/spec/commands/check.ts +1 -0
- package/src/spec/commands/config.ts +146 -0
- package/src/spec/commands/diff.ts +1 -0
- package/src/spec/commands/generate.ts +1 -0
- package/src/spec/commands/guide.ts +1 -0
- package/src/spec/commands/harness-list.ts +36 -0
- package/src/spec/commands/implement.ts +1 -0
- package/src/spec/commands/import.ts +1 -0
- package/src/spec/commands/init.ts +1 -0
- package/src/spec/commands/status.ts +87 -0
- package/src/spec/config.ts +63 -0
- package/src/spec/diff.ts +791 -0
- package/src/spec/extensions/benchmark.ts +347 -0
- package/src/spec/extensions/registry.ts +59 -0
- package/src/spec/extensions/types.ts +56 -0
- package/src/spec/grammar/index.ts +14 -0
- package/src/spec/grammar/parser.ts +443 -0
- package/src/spec/grammar/types.ts +70 -0
- package/src/spec/grammar/validator.ts +104 -0
- package/src/spec/loader.ts +174 -0
- package/src/spec/local-config.ts +59 -0
- package/src/spec/parser.ts +226 -0
- package/src/spec/path-utils.ts +73 -0
- package/src/spec/planner.ts +299 -0
- package/src/spec/prompt-renderer.ts +318 -0
- package/src/spec/skill-content.ts +119 -0
- package/src/spec/types.ts +239 -0
- package/src/spec/validator.ts +443 -0
- package/src/spec/workflows/check.ts +1534 -0
- package/src/spec/workflows/diff.ts +209 -0
- package/src/spec/workflows/generate.ts +1270 -0
- package/src/spec/workflows/guide.ts +190 -0
- package/src/spec/workflows/implement.ts +797 -0
- package/src/spec/workflows/import.ts +986 -0
- package/src/spec/workflows/init.ts +548 -0
- package/src/spec/workflows/status.ts +22 -0
- package/src/spec/workspace.ts +541 -0
- package/uninstall.ps1 +21 -0
- package/uninstall.sh +22 -0
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
LoadedSpecWorkspace,
|
|
3
|
+
ParsedSpecDocument,
|
|
4
|
+
PlannedBenchmarkContext,
|
|
5
|
+
PlannedSpecTarget,
|
|
6
|
+
PlannedTask,
|
|
7
|
+
SpecDelta,
|
|
8
|
+
SpecDiffReport,
|
|
9
|
+
SpecImplementationPlan,
|
|
10
|
+
} from "./types";
|
|
11
|
+
|
|
12
|
+
function normalizeHeading(title: string): string {
|
|
13
|
+
return title.toLowerCase().replace(/\s+/g, " ").trim();
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
function extractStringArray(frontmatter: Record<string, unknown>, key: string): string[] {
|
|
17
|
+
const value = frontmatter[key];
|
|
18
|
+
return Array.isArray(value)
|
|
19
|
+
? value.filter((entry): entry is string => typeof entry === "string" && entry.trim().length > 0)
|
|
20
|
+
: [];
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
function extractStringRecord(frontmatter: Record<string, unknown>, key: string): Record<string, string> {
|
|
24
|
+
const value = frontmatter[key];
|
|
25
|
+
if (!value || typeof value !== "object" || Array.isArray(value)) {
|
|
26
|
+
return {};
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
return Object.fromEntries(
|
|
30
|
+
Object.entries(value).filter((entry): entry is [string, string] => typeof entry[1] === "string" && entry[1].trim().length > 0),
|
|
31
|
+
);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
function extractBulletList(document: ParsedSpecDocument | undefined, title: string): string[] {
|
|
35
|
+
const section = document?.sections.find((entry) => normalizeHeading(entry.title) === normalizeHeading(title));
|
|
36
|
+
if (!section || section.content.trim().length === 0) {
|
|
37
|
+
return [];
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
const bullets = section.content
|
|
41
|
+
.split("\n")
|
|
42
|
+
.map((line) => line.trim())
|
|
43
|
+
.filter((line) => line.startsWith("- "))
|
|
44
|
+
.map((line) => line.slice(2).trim())
|
|
45
|
+
.filter((line) => line.length > 0);
|
|
46
|
+
|
|
47
|
+
return bullets.length > 0 ? bullets : [section.content.trim()];
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function getDeltaDocument(delta: SpecDelta): ParsedSpecDocument | undefined {
|
|
51
|
+
return delta.nextDocument ?? delta.previousDocument;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
function getBaseSpecDocument(
|
|
55
|
+
specId: string,
|
|
56
|
+
workspace: LoadedSpecWorkspace,
|
|
57
|
+
deltas: SpecDelta[],
|
|
58
|
+
): ParsedSpecDocument | undefined {
|
|
59
|
+
const changedBase = deltas.find((delta) => delta.kind === "base" && delta.specId === specId);
|
|
60
|
+
if (changedBase) {
|
|
61
|
+
return getDeltaDocument(changedBase);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
return workspace.documents.find((document) => document.kind === "base" && document.id === specId);
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function buildBenchmarkContext(delta: SpecDelta): PlannedBenchmarkContext | undefined {
|
|
68
|
+
const document = getDeltaDocument(delta);
|
|
69
|
+
if (!document) {
|
|
70
|
+
return undefined;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return {
|
|
74
|
+
id: delta.specId,
|
|
75
|
+
path: document.path,
|
|
76
|
+
fileChange: delta.fileChange,
|
|
77
|
+
environment: typeof document.frontmatter.environment === "string" ? document.frontmatter.environment : undefined,
|
|
78
|
+
required: typeof document.frontmatter.required === "boolean" ? document.frontmatter.required : undefined,
|
|
79
|
+
command: typeof document.frontmatter.command === "string" ? document.frontmatter.command : undefined,
|
|
80
|
+
metrics: extractStringRecord(document.frontmatter, "metrics"),
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
function getBenchmarkContexts(
|
|
85
|
+
specId: string,
|
|
86
|
+
workspace: LoadedSpecWorkspace,
|
|
87
|
+
deltas: SpecDelta[],
|
|
88
|
+
): PlannedBenchmarkContext[] {
|
|
89
|
+
const contexts = new Map<string, PlannedBenchmarkContext>();
|
|
90
|
+
|
|
91
|
+
for (const document of workspace.documents) {
|
|
92
|
+
if (document.kind !== "benchmark" || document.frontmatter.extends !== specId) {
|
|
93
|
+
continue;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
contexts.set(document.id ?? document.path, {
|
|
97
|
+
id: document.id ?? document.path,
|
|
98
|
+
path: document.path,
|
|
99
|
+
fileChange: "modified",
|
|
100
|
+
environment: typeof document.frontmatter.environment === "string" ? document.frontmatter.environment : undefined,
|
|
101
|
+
required: typeof document.frontmatter.required === "boolean" ? document.frontmatter.required : undefined,
|
|
102
|
+
command: typeof document.frontmatter.command === "string" ? document.frontmatter.command : undefined,
|
|
103
|
+
metrics: extractStringRecord(document.frontmatter, "metrics"),
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
for (const delta of deltas) {
|
|
108
|
+
if (delta.kind !== "benchmark" || delta.extendsSpecId !== specId) {
|
|
109
|
+
continue;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
const context = buildBenchmarkContext(delta);
|
|
113
|
+
if (context) {
|
|
114
|
+
contexts.set(context.id, context);
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return Array.from(contexts.values()).sort((left, right) => left.id.localeCompare(right.id));
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
function buildSpecTarget(
|
|
122
|
+
baseDelta: SpecDelta | undefined,
|
|
123
|
+
baseDocument: ParsedSpecDocument,
|
|
124
|
+
workspace: LoadedSpecWorkspace,
|
|
125
|
+
deltas: SpecDelta[],
|
|
126
|
+
): PlannedSpecTarget {
|
|
127
|
+
return {
|
|
128
|
+
specId: baseDocument.id ?? baseDocument.path,
|
|
129
|
+
path: baseDocument.path,
|
|
130
|
+
fileChange: baseDelta?.fileChange ?? "modified",
|
|
131
|
+
codePaths: extractStringArray(baseDocument.frontmatter, "code_paths"),
|
|
132
|
+
testPaths: extractStringArray(baseDocument.frontmatter, "test_paths"),
|
|
133
|
+
testCommands: extractStringArray(baseDocument.frontmatter, "test_commands"),
|
|
134
|
+
invariants: extractBulletList(baseDocument, "Invariants"),
|
|
135
|
+
acceptanceCriteria: extractBulletList(baseDocument, "Acceptance Criteria"),
|
|
136
|
+
benchmarkSidecars: getBenchmarkContexts(baseDocument.id ?? baseDocument.path, workspace, deltas),
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
function hasChangeType(delta: SpecDelta, changeType: string): boolean {
|
|
141
|
+
return delta.sectionChanges.some((change) => change.changeType === changeType);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
function hasAnyChangeType(delta: SpecDelta, changeTypes: string[]): boolean {
|
|
145
|
+
return changeTypes.some((changeType) => hasChangeType(delta, changeType));
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
function buildTasksForDelta(delta: SpecDelta, target?: PlannedSpecTarget): PlannedTask[] {
|
|
149
|
+
const tasks: PlannedTask[] = [];
|
|
150
|
+
const baseTargets = target ? [...target.codePaths, ...target.testPaths] : undefined;
|
|
151
|
+
|
|
152
|
+
if (delta.kind === "base") {
|
|
153
|
+
if (delta.fileChange === "added") {
|
|
154
|
+
tasks.push({
|
|
155
|
+
id: `${delta.specId}:code`,
|
|
156
|
+
specId: delta.specId,
|
|
157
|
+
type: "code",
|
|
158
|
+
description: `Implement the new behavior or repository artifacts described by ${delta.specId} within the mapped code paths, creating missing mapped files when required and starting with the narrowest affected files.`,
|
|
159
|
+
targets: target?.codePaths,
|
|
160
|
+
});
|
|
161
|
+
tasks.push({
|
|
162
|
+
id: `${delta.specId}:test`,
|
|
163
|
+
specId: delta.specId,
|
|
164
|
+
type: "test",
|
|
165
|
+
description: `Add or update the smallest mapped tests or artifact assertions needed to cover every acceptance criterion introduced by ${delta.specId}.`,
|
|
166
|
+
targets: target?.testPaths,
|
|
167
|
+
});
|
|
168
|
+
} else if (delta.fileChange === "modified") {
|
|
169
|
+
if (
|
|
170
|
+
hasAnyChangeType(delta, [
|
|
171
|
+
"frontmatter_changed",
|
|
172
|
+
"summary_changed",
|
|
173
|
+
"use_case_added_or_removed",
|
|
174
|
+
"invariant_added_or_removed",
|
|
175
|
+
"failure_modes_changed",
|
|
176
|
+
"acceptance_added_or_removed",
|
|
177
|
+
])
|
|
178
|
+
) {
|
|
179
|
+
tasks.push({
|
|
180
|
+
id: `${delta.specId}:code`,
|
|
181
|
+
specId: delta.specId,
|
|
182
|
+
type: "code",
|
|
183
|
+
description: `Update the mapped implementation or repository artifacts directly impacted by the changed requirements in ${delta.specId}.`,
|
|
184
|
+
targets: target?.codePaths,
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
if (hasChangeType(delta, "acceptance_added_or_removed")) {
|
|
189
|
+
tasks.push({
|
|
190
|
+
id: `${delta.specId}:test`,
|
|
191
|
+
specId: delta.specId,
|
|
192
|
+
type: "test",
|
|
193
|
+
description: `Refresh the smallest mapped tests or artifact assertions so the updated acceptance criteria for ${delta.specId} are enforced.`,
|
|
194
|
+
targets: target?.testPaths,
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
if (hasChangeType(delta, "invariant_added_or_removed")) {
|
|
199
|
+
tasks.push({
|
|
200
|
+
id: `${delta.specId}:review`,
|
|
201
|
+
specId: delta.specId,
|
|
202
|
+
type: "review",
|
|
203
|
+
description: `Preserve and strengthen regression coverage for the invariants changed in ${delta.specId}, using the closest existing test layer when possible.`,
|
|
204
|
+
targets: baseTargets,
|
|
205
|
+
});
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
if (delta.sectionChanges.length > 0 && tasks.length === 0) {
|
|
209
|
+
tasks.push({
|
|
210
|
+
id: `${delta.specId}:review`,
|
|
211
|
+
specId: delta.specId,
|
|
212
|
+
type: "review",
|
|
213
|
+
description: `Review the changed spec metadata and ensure the implementation scope for ${delta.specId} still matches the spec.`,
|
|
214
|
+
targets: baseTargets,
|
|
215
|
+
});
|
|
216
|
+
}
|
|
217
|
+
} else {
|
|
218
|
+
tasks.push({
|
|
219
|
+
id: `${delta.specId}:review`,
|
|
220
|
+
specId: delta.specId,
|
|
221
|
+
type: "review",
|
|
222
|
+
description: `Review the deletion of ${delta.specId}; do not remove code automatically without explicit cleanup intent.`,
|
|
223
|
+
targets: baseTargets,
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if (delta.kind === "benchmark") {
|
|
229
|
+
const benchmarkTargets = target?.benchmarkSidecars.map((entry) => entry.path) ?? [delta.path];
|
|
230
|
+
if (delta.fileChange === "deleted") {
|
|
231
|
+
tasks.push({
|
|
232
|
+
id: `${delta.specId}:review`,
|
|
233
|
+
specId: delta.specId,
|
|
234
|
+
type: "review",
|
|
235
|
+
description: `Review benchmark sidecar deletion for ${delta.specId} and keep functional code changes conservative.`,
|
|
236
|
+
targets: benchmarkTargets,
|
|
237
|
+
});
|
|
238
|
+
} else if (delta.sectionChanges.length > 0) {
|
|
239
|
+
tasks.push({
|
|
240
|
+
id: `${delta.specId}:benchmark`,
|
|
241
|
+
specId: delta.specId,
|
|
242
|
+
type: "benchmark",
|
|
243
|
+
description: `Keep the implementation compatible with benchmark constraints described by ${delta.specId}.`,
|
|
244
|
+
targets: benchmarkTargets,
|
|
245
|
+
});
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
return tasks;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
export function planSpecImplementation(
|
|
253
|
+
workspace: LoadedSpecWorkspace,
|
|
254
|
+
diff: SpecDiffReport,
|
|
255
|
+
): SpecImplementationPlan {
|
|
256
|
+
const { deltas } = diff;
|
|
257
|
+
const impactedSpecs = new Map<string, PlannedSpecTarget>();
|
|
258
|
+
const tasks = new Map<string, PlannedTask>();
|
|
259
|
+
const deletedSpecIds: string[] = [];
|
|
260
|
+
|
|
261
|
+
for (const delta of deltas) {
|
|
262
|
+
if (delta.kind === "base" && delta.fileChange === "deleted") {
|
|
263
|
+
deletedSpecIds.push(delta.specId);
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
const baseDocument = delta.kind === "base"
|
|
267
|
+
? getDeltaDocument(delta)
|
|
268
|
+
: delta.extendsSpecId
|
|
269
|
+
? getBaseSpecDocument(delta.extendsSpecId, workspace, deltas)
|
|
270
|
+
: undefined;
|
|
271
|
+
const baseDelta = baseDocument?.id ? deltas.find((entry) => entry.kind === "base" && entry.specId === baseDocument.id) : undefined;
|
|
272
|
+
const target = baseDocument ? buildSpecTarget(baseDelta, baseDocument, workspace, deltas) : undefined;
|
|
273
|
+
|
|
274
|
+
if (target && !impactedSpecs.has(target.specId)) {
|
|
275
|
+
impactedSpecs.set(target.specId, target);
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
for (const task of buildTasksForDelta(delta, target)) {
|
|
279
|
+
tasks.set(task.id, task);
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
const sortedImpactedSpecs = Array.from(impactedSpecs.values()).sort((left, right) => left.specId.localeCompare(right.specId));
|
|
284
|
+
const testCommands = Array.from(
|
|
285
|
+
new Set(
|
|
286
|
+
sortedImpactedSpecs
|
|
287
|
+
.filter((target) => target.fileChange !== "deleted")
|
|
288
|
+
.flatMap((target) => target.testCommands),
|
|
289
|
+
),
|
|
290
|
+
);
|
|
291
|
+
|
|
292
|
+
return {
|
|
293
|
+
diff,
|
|
294
|
+
tasks: Array.from(tasks.values()).sort((left, right) => left.id.localeCompare(right.id)),
|
|
295
|
+
impactedSpecs: sortedImpactedSpecs,
|
|
296
|
+
testCommands,
|
|
297
|
+
deletedSpecIds: deletedSpecIds.sort(),
|
|
298
|
+
};
|
|
299
|
+
}
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
import { relative } from "path";
|
|
2
|
+
|
|
3
|
+
import { getSpecDiffRelativePath } from "./diff";
|
|
4
|
+
import type {
|
|
5
|
+
ParsedSpecDocument,
|
|
6
|
+
PlannedBenchmarkContext,
|
|
7
|
+
PlannedSpecTarget,
|
|
8
|
+
SpecCheckEvidence,
|
|
9
|
+
SpecDelta,
|
|
10
|
+
SpecImportUncertaintyLevel,
|
|
11
|
+
SpecImplementationPlan,
|
|
12
|
+
} from "./types";
|
|
13
|
+
|
|
14
|
+
function indentBlock(text: string): string {
|
|
15
|
+
return text
|
|
16
|
+
.split("\n")
|
|
17
|
+
.map((line) => ` ${line}`)
|
|
18
|
+
.join("\n");
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
function relativePath(workspacePath: string, path: string): string {
|
|
22
|
+
return relative(workspacePath, path).split("\\").join("/") || path;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
function formatSectionChange(delta: SpecDelta): string {
|
|
26
|
+
if (delta.sectionChanges.length === 0) {
|
|
27
|
+
return " - no parsed section changes detected\n";
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return delta.sectionChanges.map((change) => {
|
|
31
|
+
const parts = [` - ${change.section} [${change.changeType}]`];
|
|
32
|
+
if (change.oldText) {
|
|
33
|
+
parts.push(` old:\n${indentBlock(change.oldText)}`);
|
|
34
|
+
}
|
|
35
|
+
if (change.newText) {
|
|
36
|
+
parts.push(` new:\n${indentBlock(change.newText)}`);
|
|
37
|
+
}
|
|
38
|
+
return parts.join("\n");
|
|
39
|
+
}).join("\n");
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
function formatBenchmark(benchmark: PlannedBenchmarkContext, workspacePath: string): string {
|
|
43
|
+
const metricEntries = Object.entries(benchmark.metrics)
|
|
44
|
+
.map(([metric, threshold]) => `${metric} ${threshold}`)
|
|
45
|
+
.join(", ");
|
|
46
|
+
|
|
47
|
+
return [
|
|
48
|
+
` - ${benchmark.id} (${benchmark.fileChange}) at ${relativePath(workspacePath, benchmark.path)}`,
|
|
49
|
+
` environment: ${benchmark.environment ?? "(unspecified)"}`,
|
|
50
|
+
` required: ${benchmark.required === undefined ? "(unspecified)" : benchmark.required ? "true" : "false"}`,
|
|
51
|
+
` command: ${benchmark.command ?? "(unspecified)"}`,
|
|
52
|
+
` metrics: ${metricEntries || "(none)"}`,
|
|
53
|
+
].join("\n");
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
function formatImpactedSpec(target: PlannedSpecTarget, workspacePath: string): string {
|
|
57
|
+
const lines = [
|
|
58
|
+
`- ${target.specId} (${target.fileChange}) at ${relativePath(workspacePath, target.path)}`,
|
|
59
|
+
` code_paths: ${target.codePaths.join(" | ") || "(none)"}`,
|
|
60
|
+
` test_paths: ${target.testPaths.join(" | ") || "(none)"}`,
|
|
61
|
+
` test_commands: ${target.testCommands.join(" | ") || "(none)"}`,
|
|
62
|
+
` invariants: ${target.invariants.join(" | ") || "(none)"}`,
|
|
63
|
+
` acceptance_criteria: ${target.acceptanceCriteria.join(" | ") || "(none)"}`,
|
|
64
|
+
];
|
|
65
|
+
|
|
66
|
+
if (target.benchmarkSidecars.length > 0) {
|
|
67
|
+
lines.push(" benchmark_sidecars:");
|
|
68
|
+
lines.push(target.benchmarkSidecars.map((benchmark) => formatBenchmark(benchmark, workspacePath)).join("\n"));
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return lines.join("\n");
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
export function renderImplementPrompt(plan: SpecImplementationPlan): string {
|
|
75
|
+
const changedSpecs = plan.diff.deltas
|
|
76
|
+
.map((delta) => {
|
|
77
|
+
const lines = [
|
|
78
|
+
`- ${delta.specId} (${delta.kind}, ${delta.fileChange}) at ${getSpecDiffRelativePath(plan.diff, delta.path)}`,
|
|
79
|
+
];
|
|
80
|
+
|
|
81
|
+
if (delta.kind !== "base" && delta.extendsSpecId) {
|
|
82
|
+
lines.push(` extends: ${delta.extendsSpecId}`);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
lines.push(formatSectionChange(delta));
|
|
86
|
+
return lines.join("\n");
|
|
87
|
+
})
|
|
88
|
+
.join("\n\n");
|
|
89
|
+
|
|
90
|
+
const impactedSpecs = plan.impactedSpecs.length > 0
|
|
91
|
+
? plan.impactedSpecs.map((target) => formatImpactedSpec(target, plan.diff.workspacePath)).join("\n\n")
|
|
92
|
+
: "- No mapped base specs were resolved from the changed deltas.";
|
|
93
|
+
|
|
94
|
+
const tasks = plan.tasks.length > 0
|
|
95
|
+
? plan.tasks
|
|
96
|
+
.map((task) => `- [${task.type}] ${task.description}${task.targets && task.targets.length > 0 ? ` Targets: ${task.targets.join(" | ")}` : ""}`)
|
|
97
|
+
.join("\n")
|
|
98
|
+
: "- No implementation tasks were derived from the changed specs.";
|
|
99
|
+
|
|
100
|
+
return `You are implementing code and test updates for Lisa spec deltas.
|
|
101
|
+
|
|
102
|
+
Return a brief plain-text note only inside these exact markers:
|
|
103
|
+
LISA_IMPLEMENT_RESULT_START
|
|
104
|
+
Implemented:
|
|
105
|
+
- <short bullet>
|
|
106
|
+
Open Questions:
|
|
107
|
+
- <optional bullet or \"none\">
|
|
108
|
+
LISA_IMPLEMENT_RESULT_END
|
|
109
|
+
|
|
110
|
+
Rules:
|
|
111
|
+
- If you discover the task cannot be completed safely with the capabilities available in this session, respond with only:
|
|
112
|
+
LISA_ABORT_START
|
|
113
|
+
<short reason>
|
|
114
|
+
LISA_ABORT_END
|
|
115
|
+
- Edit code, tests, and mapped repository artifacts only. Do not modify files under .specs/.
|
|
116
|
+
- Prefer the narrowest change that satisfies the spec.
|
|
117
|
+
- Start with the smallest end-to-end slice across the nearest mapped files before exploring broader repository context.
|
|
118
|
+
- Avoid broad repo scans when the task is clearly scoped to a few CLI, adapter, or test files.
|
|
119
|
+
- Treat mapped code and test paths as candidates, not mandatory edit targets.
|
|
120
|
+
- If the spec concerns checked-in docs, content, config, or repository artifacts, update those artifacts directly before broader code changes.
|
|
121
|
+
- If a mapped path does not exist yet and the spec requires it, create the missing file or directory within that mapped path instead of skipping the work.
|
|
122
|
+
- Stay within the mapped code and test paths unless a small supporting edit is required to satisfy a specific acceptance criterion or invariant.
|
|
123
|
+
- Do not self-certify completion or claim tests passed; Lisa will run deterministic test commands after this stage.
|
|
124
|
+
- Do not return a no-op result when the changed spec is not yet satisfied in the current workspace.
|
|
125
|
+
- When a spec introduces a new CLI command or a mapped path that does not exist yet, create the missing module, wire it from the nearest existing CLI entrypoint, and add the narrowest matching tests in the same pass.
|
|
126
|
+
- If the requested behavior is absent when you inspect the mapped files, a successful implementation must leave concrete repository edits that add that behavior.
|
|
127
|
+
- Acceptance Criteria changes must update or add tests at the narrowest existing seam that proves the requirement.
|
|
128
|
+
- Invariant changes must preserve or strengthen regression coverage, preferably in the closest existing test layer.
|
|
129
|
+
- Prefer durable test assertions over brittle prose-matching when shorter semantic or file-level checks provide enough evidence.
|
|
130
|
+
- If a spec deletion appears below, do not silently remove code. Keep cleanup conservative and explicit.
|
|
131
|
+
- Keep changes scoped to the planned tasks below.
|
|
132
|
+
|
|
133
|
+
Diff range:
|
|
134
|
+
- base: ${plan.diff.baseRef}
|
|
135
|
+
- head: ${plan.diff.headRef}
|
|
136
|
+
|
|
137
|
+
Changed specs:
|
|
138
|
+
${changedSpecs || "- none"}
|
|
139
|
+
|
|
140
|
+
Impacted implementation targets:
|
|
141
|
+
${impactedSpecs}
|
|
142
|
+
|
|
143
|
+
Planned tasks:
|
|
144
|
+
${tasks}
|
|
145
|
+
|
|
146
|
+
Deterministic follow-up tests Lisa will run after implementation:
|
|
147
|
+
- ${plan.testCommands.join("\n- ") || "No mapped test commands were declared by the impacted specs."}
|
|
148
|
+
|
|
149
|
+
Respond with the marked completion note only.`;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
interface RenderCheckPromptInput {
|
|
153
|
+
workspacePath: string;
|
|
154
|
+
spec: ParsedSpecDocument;
|
|
155
|
+
specContent: string;
|
|
156
|
+
codePaths: string[];
|
|
157
|
+
testPaths: string[];
|
|
158
|
+
testCommands: string[];
|
|
159
|
+
matchedCodeFiles: string[];
|
|
160
|
+
matchedTestFiles: string[];
|
|
161
|
+
deterministicEvidence: SpecCheckEvidence[];
|
|
162
|
+
deterministicIssues: string[];
|
|
163
|
+
driftWarning?: string;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
function formatPromptList(values: string[], emptyLabel: string): string {
|
|
167
|
+
return values.length > 0 ? `- ${values.join("\n- ")}` : `- ${emptyLabel}`;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
export function renderCheckPrompt(input: RenderCheckPromptInput): string {
|
|
171
|
+
const specPath = relativePath(input.workspacePath, input.spec.path);
|
|
172
|
+
const deterministicEvidence = input.deterministicEvidence.length > 0
|
|
173
|
+
? input.deterministicEvidence.map((entry) => `- [${entry.type}] ${entry.detail}`).join("\n")
|
|
174
|
+
: "- none";
|
|
175
|
+
const deterministicIssues = input.deterministicIssues.length > 0
|
|
176
|
+
? input.deterministicIssues.map((entry) => `- ${entry}`).join("\n")
|
|
177
|
+
: "- none";
|
|
178
|
+
|
|
179
|
+
return `You are performing a read-only Lisa spec conformance audit.
|
|
180
|
+
|
|
181
|
+
Return JSON only inside these exact markers:
|
|
182
|
+
LISA_CHECK_JSON_START
|
|
183
|
+
{
|
|
184
|
+
"status": "PASS | FAIL | UNSURE",
|
|
185
|
+
"summary": "short summary",
|
|
186
|
+
"evidence": [
|
|
187
|
+
{ "type": "code-audit", "detail": "specific supporting detail" }
|
|
188
|
+
],
|
|
189
|
+
"issues": ["specific drift, gap, or open question"]
|
|
190
|
+
}
|
|
191
|
+
LISA_CHECK_JSON_END
|
|
192
|
+
|
|
193
|
+
Rules:
|
|
194
|
+
- If you cannot complete a trustworthy read-only audit because required capabilities or files are unavailable, respond with only:
|
|
195
|
+
LISA_ABORT_START
|
|
196
|
+
<short reason>
|
|
197
|
+
LISA_ABORT_END
|
|
198
|
+
- Read files only. Do not edit code, tests, specs, or reports.
|
|
199
|
+
- Treat the spec as the source of truth.
|
|
200
|
+
- Use the deterministic evidence below first, then inspect the mapped code and tests.
|
|
201
|
+
- Return PASS only when the implementation and tests appear to satisfy the spec.
|
|
202
|
+
- Return FAIL when the spec and implementation clearly drift.
|
|
203
|
+
- Return UNSURE when the evidence is incomplete or inconclusive.
|
|
204
|
+
- Focus on the declared code paths, test paths, invariants, and acceptance criteria.
|
|
205
|
+
|
|
206
|
+
Spec under audit:
|
|
207
|
+
- id: ${input.spec.id ?? specPath}
|
|
208
|
+
- path: ${specPath}
|
|
209
|
+
- status: ${input.spec.status ?? "(unspecified)"}
|
|
210
|
+
${input.driftWarning ? `- drift warning: ${input.driftWarning}
|
|
211
|
+
` : ""}
|
|
212
|
+
Mapped code paths:
|
|
213
|
+
${formatPromptList(input.codePaths, "none declared")}
|
|
214
|
+
|
|
215
|
+
Matched code files:
|
|
216
|
+
${formatPromptList(input.matchedCodeFiles, "none matched")}
|
|
217
|
+
|
|
218
|
+
Mapped test paths:
|
|
219
|
+
${formatPromptList(input.testPaths, "none declared")}
|
|
220
|
+
|
|
221
|
+
Matched test files:
|
|
222
|
+
${formatPromptList(input.matchedTestFiles, "none matched")}
|
|
223
|
+
|
|
224
|
+
Mapped test commands:
|
|
225
|
+
${formatPromptList(input.testCommands, "none declared")}
|
|
226
|
+
|
|
227
|
+
Deterministic evidence:
|
|
228
|
+
${deterministicEvidence}
|
|
229
|
+
|
|
230
|
+
Deterministic issues:
|
|
231
|
+
${deterministicIssues}
|
|
232
|
+
|
|
233
|
+
Full spec content:
|
|
234
|
+
${indentBlock(input.specContent)}
|
|
235
|
+
|
|
236
|
+
Respond with the marked JSON payload only.`;
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
interface RenderImportPromptInput {
|
|
240
|
+
workspacePath: string;
|
|
241
|
+
selectedPaths: string[];
|
|
242
|
+
existingDocuments: ParsedSpecDocument[];
|
|
243
|
+
extensionGuidance: string[];
|
|
244
|
+
directContextOnly?: boolean;
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
function formatDocumentReference(document: ParsedSpecDocument, workspacePath: string): string {
|
|
248
|
+
return `- ${document.id ?? relativePath(workspacePath, document.path)} (${document.kind}, ${document.status ?? "unknown"}) at ${relativePath(workspacePath, document.path)}`;
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
function formatUncertaintyLevels(): string {
|
|
252
|
+
return (["low", "medium", "high"] as SpecImportUncertaintyLevel[]).join(" | ");
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
export function renderImportPrompt(input: RenderImportPromptInput): string {
|
|
256
|
+
const existingDocuments = input.existingDocuments.length > 0
|
|
257
|
+
? input.existingDocuments.map((document) => formatDocumentReference(document, input.workspacePath)).join("\n")
|
|
258
|
+
: "- none";
|
|
259
|
+
const extensionGuidance = input.extensionGuidance.length > 0
|
|
260
|
+
? input.extensionGuidance.map((entry) => `- ${entry}`).join("\n")
|
|
261
|
+
: "- none";
|
|
262
|
+
const directContextRules = input.directContextOnly
|
|
263
|
+
? "- Do not use tools, subagents, code-review agents, or skills. Work only from the attached files and this prompt.\n"
|
|
264
|
+
+ "- If the attached files are insufficient, abort instead of searching for additional repository context.\n"
|
|
265
|
+
: "";
|
|
266
|
+
|
|
267
|
+
return `You are drafting Lisa specs from an existing codebase.
|
|
268
|
+
|
|
269
|
+
Return JSON only inside these exact markers:
|
|
270
|
+
LISA_IMPORT_JSON_START
|
|
271
|
+
{
|
|
272
|
+
"files": [
|
|
273
|
+
{ "path": ".specs/backend/example.md", "content": "full markdown or yaml document" }
|
|
274
|
+
],
|
|
275
|
+
"notes": ["short note grounded in code, tests, or docs"],
|
|
276
|
+
"uncertainties": [
|
|
277
|
+
{
|
|
278
|
+
"path": ".specs/backend/example.md",
|
|
279
|
+
"specId": "backend.example",
|
|
280
|
+
"level": "${formatUncertaintyLevels()}",
|
|
281
|
+
"detail": "what remains uncertain",
|
|
282
|
+
"question": "optional follow-up question"
|
|
283
|
+
}
|
|
284
|
+
]
|
|
285
|
+
}
|
|
286
|
+
LISA_IMPORT_JSON_END
|
|
287
|
+
|
|
288
|
+
Rules:
|
|
289
|
+
- If you cannot complete the import safely because required capabilities or repository access are unavailable, respond with only:
|
|
290
|
+
LISA_ABORT_START
|
|
291
|
+
<short reason>
|
|
292
|
+
LISA_ABORT_END
|
|
293
|
+
- Ensure the payload between the markers is valid JSON with properly escaped strings.
|
|
294
|
+
${directContextRules}- Read code, tests, and docs to infer current behavior. Do not edit source files outside \`.specs/\`.
|
|
295
|
+
- Import is scoped to the selected paths below, plus directly relevant tests/docs.
|
|
296
|
+
- Imported base specs and benchmark sidecars must declare \`status: draft\`.
|
|
297
|
+
- Base spec frontmatter must use \`id\` (not \`spec_id\`), \`status\`, \`code_paths\`, and at least one of \`test_paths\` or \`test_commands\`. Optional keys are \`owners\` and \`depends_on\`.
|
|
298
|
+
- Benchmark sidecars must use \`id\`, \`kind: benchmark\`, \`extends\`, \`status\`, \`environment\`, \`command\`, and string-valued \`metrics\`. Optional keys include \`required\`, \`baseline\`, and \`noise_tolerance_pct\`.
|
|
299
|
+
- Environment configs must use \`name\` and may include object-valued \`runtime\` and \`resources\`, plus string-valued \`notes\`.
|
|
300
|
+
- Draft base specs must include Summary, Use Cases, Invariants, Acceptance Criteria, Out of Scope, Open Questions, and Uncertainty sections.
|
|
301
|
+
- Use standard Lisa section headings exactly as written above; do not substitute custom headings like \`Title\` for required sections.
|
|
302
|
+
- Prefer evidence from tests and docs over unsupported guesses.
|
|
303
|
+
- If you find benchmark scripts, perf tests, or explicit latency/throughput targets, draft benchmark sidecars and any needed environment configs.
|
|
304
|
+
- Only emit files under \`.specs/backend/\`, \`.specs/frontend/\`, or \`.specs/environments/\`.
|
|
305
|
+
- Keep file names stable, concrete, and reviewable.
|
|
306
|
+
- Do not invent unsupported frontmatter keys such as \`spec_id\`, \`title\`, or \`sources\`.
|
|
307
|
+
|
|
308
|
+
Selected import scope:
|
|
309
|
+
- ${input.selectedPaths.join("\n- ")}
|
|
310
|
+
|
|
311
|
+
Existing spec workspace:
|
|
312
|
+
${existingDocuments}
|
|
313
|
+
|
|
314
|
+
Extension guidance:
|
|
315
|
+
${extensionGuidance}
|
|
316
|
+
|
|
317
|
+
Respond with the marked JSON payload only.`;
|
|
318
|
+
}
|