edsger 0.43.0 → 0.45.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.local.json +23 -3
- package/.env.local +12 -0
- package/dist/api/release-test-cases.d.ts +7 -0
- package/dist/api/release-test-cases.js +21 -0
- package/dist/api/releases.d.ts +41 -0
- package/dist/api/releases.js +31 -0
- package/dist/api/run-sheets.d.ts +22 -0
- package/dist/api/run-sheets.js +13 -0
- package/dist/commands/release-sync/index.d.ts +5 -0
- package/dist/commands/release-sync/index.js +38 -0
- package/dist/commands/run-sheet/index.d.ts +6 -0
- package/dist/commands/run-sheet/index.js +48 -0
- package/dist/commands/smoke-test/index.d.ts +5 -0
- package/dist/commands/smoke-test/index.js +40 -0
- package/dist/index.js +62 -0
- package/dist/phases/release-sync/__tests__/github.test.d.ts +9 -0
- package/dist/phases/release-sync/__tests__/github.test.js +123 -0
- package/dist/phases/release-sync/__tests__/snapshot.test.d.ts +8 -0
- package/dist/phases/release-sync/__tests__/snapshot.test.js +93 -0
- package/dist/phases/release-sync/github.d.ts +54 -0
- package/dist/phases/release-sync/github.js +101 -0
- package/dist/phases/release-sync/index.d.ts +24 -0
- package/dist/phases/release-sync/index.js +147 -0
- package/dist/phases/release-sync/snapshot.d.ts +27 -0
- package/dist/phases/release-sync/snapshot.js +159 -0
- package/dist/phases/run-sheet/index.d.ts +39 -0
- package/dist/phases/run-sheet/index.js +297 -0
- package/dist/phases/run-sheet/render.d.ts +42 -0
- package/dist/phases/run-sheet/render.js +133 -0
- package/dist/phases/smoke-test/__tests__/agent.test.d.ts +4 -0
- package/dist/phases/smoke-test/__tests__/agent.test.js +85 -0
- package/dist/phases/smoke-test/agent.d.ts +12 -0
- package/dist/phases/smoke-test/agent.js +94 -0
- package/dist/phases/smoke-test/index.d.ts +22 -0
- package/dist/phases/smoke-test/index.js +233 -0
- package/dist/phases/smoke-test/prompts.d.ts +15 -0
- package/dist/phases/smoke-test/prompts.js +35 -0
- package/dist/skills/phase/smoke-test/SKILL.md +80 -0
- package/dist/utils/json-extract.d.ts +6 -0
- package/dist/utils/json-extract.js +44 -0
- package/dist/workspace/__tests__/workspace-manager.test.d.ts +7 -0
- package/dist/workspace/__tests__/workspace-manager.test.js +52 -0
- package/dist/workspace/workspace-manager.d.ts +31 -0
- package/dist/workspace/workspace-manager.js +96 -10
- package/package.json +9 -2
- package/tsconfig.json +2 -1
- package/vitest.config.ts +12 -0
- package/dist/services/lifecycle-agent/__tests__/phase-criteria.test.d.ts +0 -4
- package/dist/services/lifecycle-agent/__tests__/phase-criteria.test.js +0 -133
- package/dist/services/lifecycle-agent/__tests__/transition-rules.test.d.ts +0 -4
- package/dist/services/lifecycle-agent/__tests__/transition-rules.test.js +0 -336
- package/dist/services/lifecycle-agent/index.d.ts +0 -24
- package/dist/services/lifecycle-agent/index.js +0 -25
- package/dist/services/lifecycle-agent/phase-criteria.d.ts +0 -57
- package/dist/services/lifecycle-agent/phase-criteria.js +0 -335
- package/dist/services/lifecycle-agent/transition-rules.d.ts +0 -60
- package/dist/services/lifecycle-agent/transition-rules.js +0 -184
- package/dist/services/lifecycle-agent/types.d.ts +0 -190
- package/dist/services/lifecycle-agent/types.js +0 -12
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Run-sheet generation for a release.
|
|
3
|
+
*
|
|
4
|
+
* Given a release, fetches the product's `run_sheet_template`, clones the
|
|
5
|
+
* repo at the release tag (reusing the workspace layout used by
|
|
6
|
+
* smoke-test), renders placeholders, and upserts the result to the
|
|
7
|
+
* `run_sheets` table.
|
|
8
|
+
*
|
|
9
|
+
* Release tags are immutable, so if an existing run sheet has the same
|
|
10
|
+
* template_snapshot + tag and no clone error, we short-circuit without
|
|
11
|
+
* re-cloning. Pass `{ force: true }` to regenerate anyway.
|
|
12
|
+
*/
|
|
13
|
+
import { closeSync, mkdirSync, openSync, readFileSync, unlinkSync, writeFileSync, } from 'fs';
|
|
14
|
+
import { join } from 'path';
|
|
15
|
+
import { getGitHubConfigByProduct } from '../../api/github.js';
|
|
16
|
+
import { getProduct } from '../../api/products.js';
|
|
17
|
+
import { getRelease } from '../../api/releases.js';
|
|
18
|
+
import { getRunSheetByRelease, upsertRunSheet, } from '../../api/run-sheets.js';
|
|
19
|
+
import { logError, logInfo, logSuccess, logWarning, } from '../../utils/logger.js';
|
|
20
|
+
import { cloneFeatureRepo, ensureWorkspaceDir, getFeatureRepoPath, syncRepoToRef, } from '../../workspace/workspace-manager.js';
|
|
21
|
+
import { fetchCompare } from '../release-sync/github.js';
|
|
22
|
+
import { isSafeGitRef, renderTemplate } from './render.js';
|
|
23
|
+
// GitHub's compare endpoint is paginated; we don't page, so we warn when
|
|
24
|
+
// we hit the first-page cap.
|
|
25
|
+
const GITHUB_COMPARE_MAX_COMMITS = 250;
|
|
26
|
+
async function fetchCommitsBetween(owner, repo, base, head, token) {
|
|
27
|
+
if (!base) {
|
|
28
|
+
return { text: '', truncated: false };
|
|
29
|
+
}
|
|
30
|
+
try {
|
|
31
|
+
const compare = await fetchCompare(owner, repo, base, head, token);
|
|
32
|
+
const commits = compare.commits ?? [];
|
|
33
|
+
const text = commits
|
|
34
|
+
.map((c) => `- ${c.sha.slice(0, 7)} ${(c.commit.message || '').split('\n')[0]}`)
|
|
35
|
+
.join('\n');
|
|
36
|
+
return {
|
|
37
|
+
text,
|
|
38
|
+
truncated: commits.length >= GITHUB_COMPARE_MAX_COMMITS,
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
catch (err) {
|
|
42
|
+
logWarning(`Could not fetch commits for run sheet: ${err instanceof Error ? err.message : String(err)}`);
|
|
43
|
+
return { text: '', truncated: false };
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
// Stale locks (e.g. left behind by a crashed or SIGKILLed CLI) are
|
|
47
|
+
// considered abandoned after this many ms.
|
|
48
|
+
const LOCK_STALE_MS = 15 * 60 * 1000;
|
|
49
|
+
function writeLockFile(fd) {
|
|
50
|
+
const payload = { pid: process.pid, ts: Date.now() };
|
|
51
|
+
writeFileSync(fd, JSON.stringify(payload));
|
|
52
|
+
closeSync(fd);
|
|
53
|
+
}
|
|
54
|
+
function tryCreateLock(lockPath) {
|
|
55
|
+
try {
|
|
56
|
+
const fd = openSync(lockPath, 'wx');
|
|
57
|
+
writeLockFile(fd);
|
|
58
|
+
return true;
|
|
59
|
+
}
|
|
60
|
+
catch (err) {
|
|
61
|
+
if (err.code === 'EEXIST') {
|
|
62
|
+
return false;
|
|
63
|
+
}
|
|
64
|
+
throw err;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
function isLockStale(lockPath) {
|
|
68
|
+
try {
|
|
69
|
+
const raw = readFileSync(lockPath, 'utf-8').trim();
|
|
70
|
+
let ts;
|
|
71
|
+
try {
|
|
72
|
+
ts = JSON.parse(raw).ts;
|
|
73
|
+
}
|
|
74
|
+
catch {
|
|
75
|
+
ts = Number(raw);
|
|
76
|
+
}
|
|
77
|
+
return !Number.isFinite(ts) || Date.now() - ts > LOCK_STALE_MS;
|
|
78
|
+
}
|
|
79
|
+
catch {
|
|
80
|
+
// Lock vanished between exists-check and read — treat as not held.
|
|
81
|
+
return true;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Atomic-create a `.lock` file alongside the workspace dir. Returns
|
|
86
|
+
* true if we acquired the lock, false if another CLI instance holds a
|
|
87
|
+
* fresh one. Stale locks (> LOCK_STALE_MS old) are stolen via a single
|
|
88
|
+
* unlink + create pair; concurrent stealers race fairly via O_EXCL on
|
|
89
|
+
* the second create.
|
|
90
|
+
*/
|
|
91
|
+
/** Exported for unit tests; not part of the public CLI surface. */
|
|
92
|
+
export function tryAcquireFileLock(lockPath) {
|
|
93
|
+
mkdirSync(join(lockPath, '..'), { recursive: true });
|
|
94
|
+
// Pass 1: clean acquisition.
|
|
95
|
+
if (tryCreateLock(lockPath)) {
|
|
96
|
+
return true;
|
|
97
|
+
}
|
|
98
|
+
// Pass 2: someone holds it — only steal if their lock is stale.
|
|
99
|
+
if (!isLockStale(lockPath)) {
|
|
100
|
+
return false;
|
|
101
|
+
}
|
|
102
|
+
// Try to steal. Best-effort unlink (another stealer may have beaten
|
|
103
|
+
// us); the O_EXCL create then arbitrates fairly.
|
|
104
|
+
try {
|
|
105
|
+
unlinkSync(lockPath);
|
|
106
|
+
}
|
|
107
|
+
catch {
|
|
108
|
+
// Already gone — fine, race the create below.
|
|
109
|
+
}
|
|
110
|
+
return tryCreateLock(lockPath);
|
|
111
|
+
}
|
|
112
|
+
/** Exported for unit tests; not part of the public CLI surface. */
|
|
113
|
+
export function releaseFileLock(lockPath) {
|
|
114
|
+
try {
|
|
115
|
+
unlinkSync(lockPath);
|
|
116
|
+
}
|
|
117
|
+
catch {
|
|
118
|
+
// Best-effort — lock will expire via LOCK_STALE_MS anyway.
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
function runSheetIsFresh(existing, template, tag) {
|
|
122
|
+
if (!existing || !existing.content) {
|
|
123
|
+
return false;
|
|
124
|
+
}
|
|
125
|
+
if (existing.template_snapshot !== template) {
|
|
126
|
+
return false;
|
|
127
|
+
}
|
|
128
|
+
const meta = existing.metadata ?? {};
|
|
129
|
+
if (meta.tag !== tag) {
|
|
130
|
+
return false;
|
|
131
|
+
}
|
|
132
|
+
if (meta.clone_error) {
|
|
133
|
+
return false;
|
|
134
|
+
}
|
|
135
|
+
return true;
|
|
136
|
+
}
|
|
137
|
+
export async function runRunSheet(options) {
|
|
138
|
+
const { releaseId, force, verbose } = options;
|
|
139
|
+
let release;
|
|
140
|
+
try {
|
|
141
|
+
release = await getRelease(releaseId, verbose);
|
|
142
|
+
}
|
|
143
|
+
catch (err) {
|
|
144
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
145
|
+
return {
|
|
146
|
+
status: 'error',
|
|
147
|
+
releaseId,
|
|
148
|
+
summary: `Failed to load release: ${message}`,
|
|
149
|
+
};
|
|
150
|
+
}
|
|
151
|
+
if (!isSafeGitRef(release.tag)) {
|
|
152
|
+
return {
|
|
153
|
+
status: 'error',
|
|
154
|
+
releaseId,
|
|
155
|
+
releaseTag: release.tag,
|
|
156
|
+
summary: `Unsafe release tag: ${release.tag}`,
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- mcp product shape is open
|
|
160
|
+
const product = (await getProduct(release.product_id, verbose));
|
|
161
|
+
const template = product?.run_sheet_template;
|
|
162
|
+
if (!template || !template.trim()) {
|
|
163
|
+
return {
|
|
164
|
+
status: 'error',
|
|
165
|
+
releaseId,
|
|
166
|
+
releaseTag: release.tag,
|
|
167
|
+
summary: 'Product has no run_sheet_template configured. Set one in product settings.',
|
|
168
|
+
};
|
|
169
|
+
}
|
|
170
|
+
// Short-circuit cache: identical template + tag, no prior clone error.
|
|
171
|
+
if (!force) {
|
|
172
|
+
const existing = await getRunSheetByRelease(releaseId, verbose).catch(() => null);
|
|
173
|
+
if (runSheetIsFresh(existing, template, release.tag)) {
|
|
174
|
+
logInfo(`Run sheet for ${release.tag} is already up-to-date (template unchanged).`);
|
|
175
|
+
return {
|
|
176
|
+
status: 'cached',
|
|
177
|
+
releaseId,
|
|
178
|
+
releaseTag: release.tag,
|
|
179
|
+
runSheetId: existing.id,
|
|
180
|
+
summary: 'Cached (no change since last render)',
|
|
181
|
+
missingPlaceholders: existing.metadata?.missing_placeholders ?? [],
|
|
182
|
+
cloneError: null,
|
|
183
|
+
commitsTruncated: Boolean(existing.metadata?.commits_truncated),
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
const gh = await getGitHubConfigByProduct(release.product_id, verbose);
|
|
188
|
+
const repoConfigured = gh.configured && gh.token && gh.owner && gh.repo;
|
|
189
|
+
// Clone at tag + fetch commits (both best-effort — we still render with
|
|
190
|
+
// whatever metadata we have).
|
|
191
|
+
let repoDir = null;
|
|
192
|
+
let cloneError = null;
|
|
193
|
+
let commits = '';
|
|
194
|
+
let commitsTruncated = false;
|
|
195
|
+
let lockPath = null;
|
|
196
|
+
if (repoConfigured) {
|
|
197
|
+
const owner = gh.owner;
|
|
198
|
+
const repo = gh.repo;
|
|
199
|
+
const token = gh.token;
|
|
200
|
+
// Serialise concurrent CLI invocations for the *same release* by
|
|
201
|
+
// taking a file lock next to the clone dir. Different releases get
|
|
202
|
+
// different lock files (per-release workspace).
|
|
203
|
+
const workspaceRoot = ensureWorkspaceDir();
|
|
204
|
+
const workspaceName = `run-sheet-${release.id}`;
|
|
205
|
+
const repoPathAhead = getFeatureRepoPath(workspaceRoot, workspaceName);
|
|
206
|
+
lockPath = `${repoPathAhead}.lock`;
|
|
207
|
+
if (!tryAcquireFileLock(lockPath)) {
|
|
208
|
+
return {
|
|
209
|
+
status: 'error',
|
|
210
|
+
releaseId: release.id,
|
|
211
|
+
releaseTag: release.tag,
|
|
212
|
+
summary: 'Another run-sheet generation is in progress for this release. Wait for it to finish or retry.',
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
try {
|
|
216
|
+
// Clone into a per-release directory so two releases of the same
|
|
217
|
+
// product don't stomp each other's checkout during concurrent
|
|
218
|
+
// generation. (smoke-test uses `release-${product_id}` — that races;
|
|
219
|
+
// we don't want to inherit that bug here.)
|
|
220
|
+
const { repoPath } = cloneFeatureRepo(workspaceRoot, workspaceName, owner, repo, token);
|
|
221
|
+
repoDir = repoPath;
|
|
222
|
+
try {
|
|
223
|
+
syncRepoToRef(repoPath, { tag: release.tag }, token);
|
|
224
|
+
}
|
|
225
|
+
catch (err) {
|
|
226
|
+
cloneError = `Could not checkout tag ${release.tag}: ${err instanceof Error ? err.message : String(err)}`;
|
|
227
|
+
logWarning(cloneError);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
catch (err) {
|
|
231
|
+
cloneError = err instanceof Error ? err.message : String(err);
|
|
232
|
+
logWarning(`Clone failed: ${cloneError}`);
|
|
233
|
+
}
|
|
234
|
+
const c = await fetchCommitsBetween(owner, repo, release.previous_tag, release.tag, token);
|
|
235
|
+
commits = c.text;
|
|
236
|
+
commitsTruncated = c.truncated;
|
|
237
|
+
}
|
|
238
|
+
else {
|
|
239
|
+
cloneError = 'Product is not linked to a GitHub repository';
|
|
240
|
+
}
|
|
241
|
+
const { rendered, missing } = await renderTemplate(template, {
|
|
242
|
+
name: product?.name ?? 'Unknown product',
|
|
243
|
+
github_repository_full_name: product?.github_repository_full_name ?? null,
|
|
244
|
+
}, {
|
|
245
|
+
tag: release.tag,
|
|
246
|
+
name: release.name,
|
|
247
|
+
body: release.body,
|
|
248
|
+
url: release.url,
|
|
249
|
+
published_at: release.published_at,
|
|
250
|
+
previous_tag: release.previous_tag,
|
|
251
|
+
previous_published_at: release.previous_published_at,
|
|
252
|
+
diff_summary: release.diff_summary,
|
|
253
|
+
diff_stats: (release.diff_stats ?? {}),
|
|
254
|
+
}, repoDir, commits);
|
|
255
|
+
try {
|
|
256
|
+
const saved = await upsertRunSheet({
|
|
257
|
+
release_id: release.id,
|
|
258
|
+
content: rendered,
|
|
259
|
+
title: `Run Sheet — ${product?.name ?? ''} ${release.tag}`.trim(),
|
|
260
|
+
template_snapshot: template,
|
|
261
|
+
metadata: {
|
|
262
|
+
missing_placeholders: missing,
|
|
263
|
+
clone_error: cloneError,
|
|
264
|
+
repo: product?.github_repository_full_name ?? null,
|
|
265
|
+
tag: release.tag,
|
|
266
|
+
commits_truncated: commitsTruncated,
|
|
267
|
+
},
|
|
268
|
+
generated_at: new Date().toISOString(),
|
|
269
|
+
}, verbose);
|
|
270
|
+
logSuccess(`Generated run sheet for ${release.tag}`);
|
|
271
|
+
return {
|
|
272
|
+
status: 'success',
|
|
273
|
+
releaseId: release.id,
|
|
274
|
+
releaseTag: release.tag,
|
|
275
|
+
runSheetId: saved.id,
|
|
276
|
+
summary: `Rendered ${rendered.length} characters`,
|
|
277
|
+
missingPlaceholders: missing,
|
|
278
|
+
cloneError,
|
|
279
|
+
commitsTruncated,
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
catch (err) {
|
|
283
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
284
|
+
logError(`Failed to upsert run sheet: ${message}`);
|
|
285
|
+
return {
|
|
286
|
+
status: 'error',
|
|
287
|
+
releaseId: release.id,
|
|
288
|
+
releaseTag: release.tag,
|
|
289
|
+
summary: `Failed to upsert run sheet: ${message}`,
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
finally {
|
|
293
|
+
if (lockPath) {
|
|
294
|
+
releaseFileLock(lockPath);
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pure template-rendering logic for run sheets.
|
|
3
|
+
*
|
|
4
|
+
* Intentionally identical to `web/src/services/run-sheets/render.ts` — any
|
|
5
|
+
* change here should mirror the web side (and its unit tests) so the CLI
|
|
6
|
+
* and the web `/api/releases/[id]/generate-run-sheet` route produce the
|
|
7
|
+
* exact same output for the same template + release.
|
|
8
|
+
*/
|
|
9
|
+
export declare const MAX_FILE_INCLUDE_BYTES: number;
|
|
10
|
+
export declare const MAX_TOTAL_FILE_BYTES: number;
|
|
11
|
+
export interface TemplateProduct {
|
|
12
|
+
name: string;
|
|
13
|
+
github_repository_full_name: string | null;
|
|
14
|
+
}
|
|
15
|
+
export interface TemplateRelease {
|
|
16
|
+
tag: string;
|
|
17
|
+
name: string | null;
|
|
18
|
+
body: string | null;
|
|
19
|
+
url: string | null;
|
|
20
|
+
published_at: string | null;
|
|
21
|
+
previous_tag: string | null;
|
|
22
|
+
previous_published_at: string | null;
|
|
23
|
+
diff_summary: string | null;
|
|
24
|
+
diff_stats: Record<string, unknown>;
|
|
25
|
+
}
|
|
26
|
+
export declare function isSafeGitRef(ref: string): boolean;
|
|
27
|
+
export interface FileReadResult {
|
|
28
|
+
content: string | null;
|
|
29
|
+
bytes: number;
|
|
30
|
+
reason?: string;
|
|
31
|
+
}
|
|
32
|
+
export declare function safeReadRepoFile(repoDir: string, relPath: string, remainingBudget: number): Promise<FileReadResult>;
|
|
33
|
+
export interface RenderResult {
|
|
34
|
+
rendered: string;
|
|
35
|
+
missing: string[];
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Strip Markdown-style backslash escapes inside `{{ ... }}` spans so the
|
|
39
|
+
* Rich-text editor roundtrip doesn't break placeholder matching.
|
|
40
|
+
*/
|
|
41
|
+
export declare function normalizeTemplate(template: string): string;
|
|
42
|
+
export declare function renderTemplate(template: string, product: TemplateProduct, release: TemplateRelease, repoDir: string | null, commits: string): Promise<RenderResult>;
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Pure template-rendering logic for run sheets.
|
|
3
|
+
*
|
|
4
|
+
* Intentionally identical to `web/src/services/run-sheets/render.ts` — any
|
|
5
|
+
* change here should mirror the web side (and its unit tests) so the CLI
|
|
6
|
+
* and the web `/api/releases/[id]/generate-run-sheet` route produce the
|
|
7
|
+
* exact same output for the same template + release.
|
|
8
|
+
*/
|
|
9
|
+
import { statSync } from 'fs';
|
|
10
|
+
import { readFile } from 'fs/promises';
|
|
11
|
+
import { join, resolve } from 'path';
|
|
12
|
+
export const MAX_FILE_INCLUDE_BYTES = 256 * 1024;
|
|
13
|
+
export const MAX_TOTAL_FILE_BYTES = 1024 * 1024;
|
|
14
|
+
export function isSafeGitRef(ref) {
|
|
15
|
+
if (typeof ref !== 'string' || ref.length === 0 || ref.length > 200) {
|
|
16
|
+
return false;
|
|
17
|
+
}
|
|
18
|
+
if (/\s/.test(ref) || /^[-.]/.test(ref)) {
|
|
19
|
+
return false;
|
|
20
|
+
}
|
|
21
|
+
if (ref.includes('..') || ref.includes('@{')) {
|
|
22
|
+
return false;
|
|
23
|
+
}
|
|
24
|
+
return /^[A-Za-z0-9._\-+/@]+$/.test(ref);
|
|
25
|
+
}
|
|
26
|
+
export async function safeReadRepoFile(repoDir, relPath, remainingBudget) {
|
|
27
|
+
if (!relPath || relPath.startsWith('/') || relPath.includes('\0')) {
|
|
28
|
+
return { content: null, bytes: 0, reason: 'invalid path' };
|
|
29
|
+
}
|
|
30
|
+
const repoDirResolved = resolve(repoDir);
|
|
31
|
+
const abs = resolve(join(repoDirResolved, relPath));
|
|
32
|
+
if (abs !== repoDirResolved && !abs.startsWith(`${repoDirResolved}/`)) {
|
|
33
|
+
return { content: null, bytes: 0, reason: 'path escape' };
|
|
34
|
+
}
|
|
35
|
+
let size;
|
|
36
|
+
try {
|
|
37
|
+
const stat = statSync(abs);
|
|
38
|
+
if (!stat.isFile()) {
|
|
39
|
+
return { content: null, bytes: 0, reason: 'not a file' };
|
|
40
|
+
}
|
|
41
|
+
size = stat.size;
|
|
42
|
+
}
|
|
43
|
+
catch {
|
|
44
|
+
return { content: null, bytes: 0, reason: 'not found' };
|
|
45
|
+
}
|
|
46
|
+
if (size > MAX_FILE_INCLUDE_BYTES) {
|
|
47
|
+
return {
|
|
48
|
+
content: null,
|
|
49
|
+
bytes: 0,
|
|
50
|
+
reason: `too large (${size} > ${MAX_FILE_INCLUDE_BYTES} bytes)`,
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
if (size > remainingBudget) {
|
|
54
|
+
return {
|
|
55
|
+
content: null,
|
|
56
|
+
bytes: 0,
|
|
57
|
+
reason: 'total file-inclusion budget exhausted',
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
try {
|
|
61
|
+
const content = await readFile(abs, 'utf-8');
|
|
62
|
+
return { content, bytes: size };
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
return { content: null, bytes: 0, reason: 'read failed' };
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
/**
|
|
69
|
+
* Strip Markdown-style backslash escapes inside `{{ ... }}` spans so the
|
|
70
|
+
* Rich-text editor roundtrip doesn't break placeholder matching.
|
|
71
|
+
*/
|
|
72
|
+
export function normalizeTemplate(template) {
|
|
73
|
+
return template.replace(/\{\{([^}]*)\}\}/g, (_match, inner) => `{{${inner.replace(/\\([_.\-\\/])/g, '$1')}}}`);
|
|
74
|
+
}
|
|
75
|
+
export async function renderTemplate(template, product, release, repoDir, commits) {
|
|
76
|
+
const missing = [];
|
|
77
|
+
const stats = release.diff_stats ?? {};
|
|
78
|
+
const normalized = normalizeTemplate(template);
|
|
79
|
+
const simpleVars = {
|
|
80
|
+
product_name: product.name,
|
|
81
|
+
release_tag: release.tag,
|
|
82
|
+
release_name: release.name ?? release.tag,
|
|
83
|
+
release_body: release.body ?? '',
|
|
84
|
+
release_url: release.url ?? '',
|
|
85
|
+
previous_tag: release.previous_tag ?? '',
|
|
86
|
+
published_at: release.published_at ?? '',
|
|
87
|
+
previous_published_at: release.previous_published_at ?? '',
|
|
88
|
+
diff_summary: release.diff_summary ?? '',
|
|
89
|
+
files_changed: String(stats.files_changed ?? ''),
|
|
90
|
+
additions: String(stats.additions ?? ''),
|
|
91
|
+
deletions: String(stats.deletions ?? ''),
|
|
92
|
+
commits_count: String(stats.commits_count ?? stats.total_commits ?? ''),
|
|
93
|
+
repository: product.github_repository_full_name ?? '',
|
|
94
|
+
generated_at: new Date().toISOString(),
|
|
95
|
+
commits,
|
|
96
|
+
};
|
|
97
|
+
const fileRegex = /\{\{\s*file:([^}]+?)\s*\}\}/g;
|
|
98
|
+
const fileMatches = [];
|
|
99
|
+
for (const m of normalized.matchAll(fileRegex)) {
|
|
100
|
+
fileMatches.push({ match: m[0], path: m[1].trim() });
|
|
101
|
+
}
|
|
102
|
+
let remainingBudget = MAX_TOTAL_FILE_BYTES;
|
|
103
|
+
const fileResults = new Map();
|
|
104
|
+
for (const { match, path } of fileMatches) {
|
|
105
|
+
if (fileResults.has(match)) {
|
|
106
|
+
continue;
|
|
107
|
+
}
|
|
108
|
+
if (!repoDir) {
|
|
109
|
+
missing.push(`file:${path} (no repo)`);
|
|
110
|
+
fileResults.set(match, `<!-- file ${path} unavailable: repo not cloned -->`);
|
|
111
|
+
continue;
|
|
112
|
+
}
|
|
113
|
+
// eslint-disable-next-line no-await-in-loop -- sequential so the byte budget is enforced
|
|
114
|
+
const res = await safeReadRepoFile(repoDir, path, remainingBudget);
|
|
115
|
+
if (res.content === null) {
|
|
116
|
+
missing.push(`file:${path} (${res.reason ?? 'unavailable'})`);
|
|
117
|
+
fileResults.set(match, `<!-- file ${path} unavailable: ${res.reason ?? 'unknown'} -->`);
|
|
118
|
+
}
|
|
119
|
+
else {
|
|
120
|
+
remainingBudget -= res.bytes;
|
|
121
|
+
fileResults.set(match, res.content);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
let rendered = normalized.replace(fileRegex, (match) => fileResults.get(match) ?? match);
|
|
125
|
+
rendered = rendered.replace(/\{\{\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*\}\}/g, (match, key) => {
|
|
126
|
+
if (key in simpleVars) {
|
|
127
|
+
return simpleVars[key];
|
|
128
|
+
}
|
|
129
|
+
missing.push(key);
|
|
130
|
+
return match;
|
|
131
|
+
});
|
|
132
|
+
return { rendered, missing };
|
|
133
|
+
}
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unit tests for smoke-test agent response parsing.
|
|
3
|
+
*/
|
|
4
|
+
import assert from 'node:assert';
|
|
5
|
+
import { describe, it } from 'node:test';
|
|
6
|
+
import { findBalancedJsonObject } from '../../../utils/json-extract.js';
|
|
7
|
+
import { extractJson } from '../agent.js';
|
|
8
|
+
void describe('extractJson', () => {
|
|
9
|
+
void it('parses a plain JSON object', () => {
|
|
10
|
+
const raw = `{
|
|
11
|
+
"summary": "one thing changed",
|
|
12
|
+
"test_cases": [
|
|
13
|
+
{ "name": "login still works", "description": "step 1", "is_critical": true }
|
|
14
|
+
]
|
|
15
|
+
}`;
|
|
16
|
+
const parsed = extractJson(raw);
|
|
17
|
+
assert.strictEqual(parsed.summary, 'one thing changed');
|
|
18
|
+
assert.strictEqual(parsed.test_cases.length, 1);
|
|
19
|
+
assert.strictEqual(parsed.test_cases[0].is_critical, true);
|
|
20
|
+
});
|
|
21
|
+
void it('strips ```json fences', () => {
|
|
22
|
+
const raw = '```json\n{"summary":"x","test_cases":[{"name":"a","description":"b"}]}\n```';
|
|
23
|
+
const parsed = extractJson(raw);
|
|
24
|
+
assert.strictEqual(parsed.summary, 'x');
|
|
25
|
+
assert.strictEqual(parsed.test_cases[0].name, 'a');
|
|
26
|
+
});
|
|
27
|
+
void it('strips unlabeled fences', () => {
|
|
28
|
+
const raw = '```\n{"summary":"x","test_cases":[]}\n```';
|
|
29
|
+
const parsed = extractJson(raw);
|
|
30
|
+
assert.deepStrictEqual(parsed.test_cases, []);
|
|
31
|
+
});
|
|
32
|
+
void it('tolerates leading and trailing prose', () => {
|
|
33
|
+
const raw = `Here you go:
|
|
34
|
+
{"summary":"x","test_cases":[{"name":"a","description":"b"}]}
|
|
35
|
+
|
|
36
|
+
Hope that helps.`;
|
|
37
|
+
const parsed = extractJson(raw);
|
|
38
|
+
assert.strictEqual(parsed.summary, 'x');
|
|
39
|
+
});
|
|
40
|
+
void it('throws when test_cases is missing', () => {
|
|
41
|
+
assert.throws(() => extractJson('{"summary":"x"}'), /test_cases/);
|
|
42
|
+
});
|
|
43
|
+
void it('throws on invalid JSON', () => {
|
|
44
|
+
assert.throws(() => extractJson('not json'));
|
|
45
|
+
});
|
|
46
|
+
void it('picks the first balanced object when prose contains decoy braces', () => {
|
|
47
|
+
const raw = 'Note: { pseudo-json example } but the real answer is:\n' +
|
|
48
|
+
'{"summary":"x","test_cases":[{"name":"a","description":"b"}]}\n' +
|
|
49
|
+
'Let me know if you want changes.';
|
|
50
|
+
const parsed = extractJson(raw);
|
|
51
|
+
assert.strictEqual(parsed.summary, 'x');
|
|
52
|
+
assert.strictEqual(parsed.test_cases[0].name, 'a');
|
|
53
|
+
});
|
|
54
|
+
void it('preserves braces inside JSON string values', () => {
|
|
55
|
+
const raw = '{"summary":"changes to {pricing} and {checkout}","test_cases":[{"name":"n","description":"d"}]}';
|
|
56
|
+
const parsed = extractJson(raw);
|
|
57
|
+
assert.strictEqual(parsed.summary, 'changes to {pricing} and {checkout}');
|
|
58
|
+
});
|
|
59
|
+
void it('throws when test_cases is not an array', () => {
|
|
60
|
+
assert.throws(() => extractJson('{"summary":"x","test_cases":"nope"}'), /test_cases/);
|
|
61
|
+
});
|
|
62
|
+
});
|
|
63
|
+
void describe('findBalancedJsonObject', () => {
|
|
64
|
+
void it('returns null when there is no opening brace', () => {
|
|
65
|
+
assert.strictEqual(findBalancedJsonObject('no braces here'), null);
|
|
66
|
+
});
|
|
67
|
+
void it('returns the first balanced top-level object', () => {
|
|
68
|
+
assert.strictEqual(findBalancedJsonObject('prefix {"a": 1} and {"b": 2} suffix'), '{"a": 1}');
|
|
69
|
+
});
|
|
70
|
+
void it('handles nested braces', () => {
|
|
71
|
+
const text = 'xxx {"a": {"b": {"c": 1}}} yyy';
|
|
72
|
+
assert.strictEqual(findBalancedJsonObject(text), '{"a": {"b": {"c": 1}}}');
|
|
73
|
+
});
|
|
74
|
+
void it('ignores braces inside strings', () => {
|
|
75
|
+
const text = '{"msg": "this has { and } inside", "ok": true}';
|
|
76
|
+
assert.strictEqual(findBalancedJsonObject(text), text);
|
|
77
|
+
});
|
|
78
|
+
void it('handles escaped quotes inside strings', () => {
|
|
79
|
+
const text = '{"msg": "she said \\"hi\\" {not object}"}';
|
|
80
|
+
assert.strictEqual(findBalancedJsonObject(text), text);
|
|
81
|
+
});
|
|
82
|
+
void it('returns null on unbalanced input', () => {
|
|
83
|
+
assert.strictEqual(findBalancedJsonObject('{"a": 1'), null);
|
|
84
|
+
});
|
|
85
|
+
});
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import { type EdsgerConfig } from '../../types/index.js';
|
|
2
|
+
export interface GeneratedSmokeTestPlan {
|
|
3
|
+
summary: string;
|
|
4
|
+
test_cases: {
|
|
5
|
+
name: string;
|
|
6
|
+
description: string;
|
|
7
|
+
is_critical?: boolean;
|
|
8
|
+
}[];
|
|
9
|
+
}
|
|
10
|
+
/** @internal Exported for unit tests only. */
|
|
11
|
+
export declare function extractJson(raw: string): GeneratedSmokeTestPlan;
|
|
12
|
+
export declare function executeSmokeTestQuery(systemPrompt: string, userPrompt: string, config: EdsgerConfig, verbose?: boolean, cwd?: string): Promise<GeneratedSmokeTestPlan>;
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { query } from '@anthropic-ai/claude-agent-sdk';
|
|
2
|
+
import { DEFAULT_MODEL } from '../../constants.js';
|
|
3
|
+
import { findBalancedJsonObject } from '../../utils/json-extract.js';
|
|
4
|
+
import { logDebug, logError, logInfo } from '../../utils/logger.js';
|
|
5
|
+
function userMessage(content) {
|
|
6
|
+
return { type: 'user', message: { role: 'user', content } };
|
|
7
|
+
}
|
|
8
|
+
// eslint-disable-next-line @typescript-eslint/require-await -- async generator required by SDK interface
|
|
9
|
+
async function* makePrompt(text) {
|
|
10
|
+
yield userMessage(text);
|
|
11
|
+
}
|
|
12
|
+
/** @internal Exported for unit tests only. */
|
|
13
|
+
export function extractJson(raw) {
|
|
14
|
+
let body = raw.trim();
|
|
15
|
+
const fence = body.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
16
|
+
if (fence) {
|
|
17
|
+
body = fence[1].trim();
|
|
18
|
+
}
|
|
19
|
+
// Try parsing the whole body first — if the model returned clean JSON
|
|
20
|
+
// it round-trips without needing to slice.
|
|
21
|
+
let parsed;
|
|
22
|
+
try {
|
|
23
|
+
parsed = JSON.parse(body);
|
|
24
|
+
}
|
|
25
|
+
catch {
|
|
26
|
+
const object = findBalancedJsonObject(body);
|
|
27
|
+
if (!object) {
|
|
28
|
+
throw new Error('No JSON object found in model output');
|
|
29
|
+
}
|
|
30
|
+
parsed = JSON.parse(object);
|
|
31
|
+
}
|
|
32
|
+
if (typeof parsed !== 'object' ||
|
|
33
|
+
parsed === null ||
|
|
34
|
+
!Array.isArray(parsed.test_cases)) {
|
|
35
|
+
throw new Error('Model response is missing test_cases array');
|
|
36
|
+
}
|
|
37
|
+
return parsed;
|
|
38
|
+
}
|
|
39
|
+
// eslint-disable-next-line complexity -- agent loop with message type handling
|
|
40
|
+
export async function executeSmokeTestQuery(systemPrompt, userPrompt, config, verbose, cwd) {
|
|
41
|
+
let lastAssistant = '';
|
|
42
|
+
let plan = null;
|
|
43
|
+
let parseError = null;
|
|
44
|
+
let turnCount = 0;
|
|
45
|
+
if (verbose) {
|
|
46
|
+
logInfo('Connecting to Claude Code for smoke-test generation...');
|
|
47
|
+
}
|
|
48
|
+
for await (const message of query({
|
|
49
|
+
prompt: makePrompt(userPrompt),
|
|
50
|
+
options: {
|
|
51
|
+
systemPrompt: {
|
|
52
|
+
type: 'preset',
|
|
53
|
+
preset: 'claude_code',
|
|
54
|
+
append: systemPrompt,
|
|
55
|
+
},
|
|
56
|
+
model: DEFAULT_MODEL,
|
|
57
|
+
maxTurns: 20,
|
|
58
|
+
permissionMode: 'bypassPermissions',
|
|
59
|
+
...(cwd ? { cwd } : {}),
|
|
60
|
+
},
|
|
61
|
+
})) {
|
|
62
|
+
if (message.type === 'assistant' && message.message?.content) {
|
|
63
|
+
turnCount++;
|
|
64
|
+
for (const content of message.message.content) {
|
|
65
|
+
if (content.type === 'text') {
|
|
66
|
+
lastAssistant += `${content.text}\n`;
|
|
67
|
+
logDebug(content.text, verbose);
|
|
68
|
+
}
|
|
69
|
+
else if (content.type === 'tool_use') {
|
|
70
|
+
const desc = content.input?.description || content.input?.command || 'Running...';
|
|
71
|
+
if (verbose) {
|
|
72
|
+
logInfo(`[Turn ${turnCount}] ${content.name}: ${typeof desc === 'string' ? desc.slice(0, 120) : 'Running...'}`);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
if (message.type === 'result') {
|
|
78
|
+
const text = ('result' in message ? message.result : '') || lastAssistant;
|
|
79
|
+
try {
|
|
80
|
+
plan = extractJson(text);
|
|
81
|
+
}
|
|
82
|
+
catch (err) {
|
|
83
|
+
parseError = err instanceof Error ? err.message : String(err);
|
|
84
|
+
}
|
|
85
|
+
if (message.subtype !== 'success') {
|
|
86
|
+
logError(`Smoke-test query incomplete: ${message.subtype}`);
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
if (!plan) {
|
|
91
|
+
throw new Error(`Failed to parse smoke-test plan from model output: ${parseError ?? 'no result message received'}`);
|
|
92
|
+
}
|
|
93
|
+
return plan;
|
|
94
|
+
}
|