comfy-qa 1.0.0 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +73 -6
- package/package.json +18 -3
- package/src/agent/browser-agent.ts +298 -0
- package/src/agent/demo-editor.ts +450 -0
- package/src/agent/demo-research.ts +725 -0
- package/src/agent/orchestrator.ts +268 -0
- package/src/agent/qa-research.ts +813 -0
- package/src/agent/research.ts +221 -0
- package/src/browser/hud.ts +136 -0
- package/src/browser/recorder.ts +131 -0
- package/src/cli.ts +69 -28
- package/src/commands/full.ts +40 -0
- package/src/commands/issue.ts +23 -0
- package/src/commands/pr.ts +23 -0
- package/src/commands/setup.ts +46 -0
- package/src/recorder/narration.ts +176 -0
- package/src/recorder/post-mix.ts +81 -0
- package/src/report/e2e-test.ts +132 -0
- package/src/report/generate.ts +271 -0
- package/src/utils/comfyui.ts +349 -0
- package/src/utils/github.ts +87 -0
- package/src/utils/parse-url.ts +11 -0
- package/src/utils/qa-skill.ts +376 -0
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
|
|
4
|
+
/** Get the setup prompt text (reusable across setup command and auto-setup) */
|
|
5
|
+
export function getSetupPrompt(): string {
|
|
6
|
+
const skillPath = path.join(import.meta.dir, "../../skills/comfy-qa-setup/SKILL.md");
|
|
7
|
+
const skill = fs.readFileSync(skillPath, "utf-8");
|
|
8
|
+
|
|
9
|
+
// Strip frontmatter
|
|
10
|
+
const body = skill.replace(/^---[\s\S]*?---\n*/, "");
|
|
11
|
+
|
|
12
|
+
return `${body}
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## Context: Current Repository
|
|
17
|
+
|
|
18
|
+
You are running inside a frontend repository. Follow the steps above to:
|
|
19
|
+
|
|
20
|
+
1. Detect the stack from package.json, lock files, and framework config
|
|
21
|
+
2. Determine the backend dependency and whether to use staging or local
|
|
22
|
+
3. Install Playwright if not present
|
|
23
|
+
4. Create \`playwright.qa.config.ts\`
|
|
24
|
+
5. Create \`.claude/skills/comfy-qa/SKILL.md\` with repo-specific details
|
|
25
|
+
6. Create \`.claude/skills/comfy-qa/REPRODUCE.md\`
|
|
26
|
+
7. Create \`tests/e2e/qa.spec.ts\` with starter smoke tests
|
|
27
|
+
8. Update \`.gitignore\` to include \`tmp/\`
|
|
28
|
+
9. Verify by running the tests
|
|
29
|
+
|
|
30
|
+
### Updating an existing setup
|
|
31
|
+
|
|
32
|
+
If QA files already exist, update them instead of overwriting:
|
|
33
|
+
|
|
34
|
+
- **playwright.qa.config.ts** — check if port or browser config needs updating
|
|
35
|
+
- **SKILL.md** — update if the stack, backend, or key routes changed
|
|
36
|
+
- **qa.spec.ts** — add new tests for new routes/features, keep passing tests
|
|
37
|
+
- **REPRODUCE.md** — update if reproduction workflow changed
|
|
38
|
+
|
|
39
|
+
Only modify what's out of date. Don't regenerate files that are already correct.
|
|
40
|
+
|
|
41
|
+
Now read this repo's package.json and begin.`;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export async function commandSetup(_args: string[]): Promise<void> {
|
|
45
|
+
console.log(getSetupPrompt());
|
|
46
|
+
}
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* TTS narration generator using Gemini.
|
|
3
|
+
* Generates per-segment WAV files, concatenates them into a single track,
|
|
4
|
+
* and returns durations for narration-driven recording timing.
|
|
5
|
+
*
|
|
6
|
+
* Inspired by snomiao/playwright-multi-tab — see docs/making-the-demo-video.md
|
|
7
|
+
*/
|
|
8
|
+
import { $ } from "bun";
|
|
9
|
+
import * as fs from "fs";
|
|
10
|
+
import * as path from "path";
|
|
11
|
+
|
|
12
|
+
const GEMINI_TTS_MODEL = "gemini-2.5-flash-preview-tts";
|
|
13
|
+
const GEMINI_API_BASE = "https://generativelanguage.googleapis.com/v1beta";
|
|
14
|
+
|
|
15
|
+
export interface NarrationSegment {
|
|
16
|
+
id: string;
|
|
17
|
+
text: string;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export interface NarrationResult {
|
|
21
|
+
/** Per-segment durations in ms, keyed by segment id */
|
|
22
|
+
durations: Map<string, number>;
|
|
23
|
+
/** Concatenated WAV path (all segments joined in order) */
|
|
24
|
+
trackPath: string;
|
|
25
|
+
/** Per-segment WAV files in order */
|
|
26
|
+
segmentPaths: string[];
|
|
27
|
+
/** Meta JSON path with text + duration for subtitle generation */
|
|
28
|
+
metaPath: string;
|
|
29
|
+
/** Total duration in ms */
|
|
30
|
+
totalDurationMs: number;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/** Wrap raw PCM in a WAV header */
|
|
34
|
+
function pcmToWav(pcm: Buffer, rate = 24000, channels = 1, bits = 16): Buffer {
|
|
35
|
+
const dataLen = pcm.length;
|
|
36
|
+
const byteRate = rate * channels * (bits / 8);
|
|
37
|
+
const blockAlign = channels * (bits / 8);
|
|
38
|
+
const header = Buffer.alloc(44);
|
|
39
|
+
header.write("RIFF", 0);
|
|
40
|
+
header.writeUInt32LE(36 + dataLen, 4);
|
|
41
|
+
header.write("WAVE", 8);
|
|
42
|
+
header.write("fmt ", 12);
|
|
43
|
+
header.writeUInt32LE(16, 16); // PCM chunk size
|
|
44
|
+
header.writeUInt16LE(1, 20); // PCM format
|
|
45
|
+
header.writeUInt16LE(channels, 22);
|
|
46
|
+
header.writeUInt32LE(rate, 24);
|
|
47
|
+
header.writeUInt32LE(byteRate, 28);
|
|
48
|
+
header.writeUInt16LE(blockAlign, 32);
|
|
49
|
+
header.writeUInt16LE(bits, 34);
|
|
50
|
+
header.write("data", 36);
|
|
51
|
+
header.writeUInt32LE(dataLen, 40);
|
|
52
|
+
return Buffer.concat([header, pcm]);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/** Get WAV duration in ms by reading the header */
|
|
56
|
+
function getWavDurationMs(wavPath: string): number {
|
|
57
|
+
const buf = fs.readFileSync(wavPath);
|
|
58
|
+
const sampleRate = buf.readUInt32LE(24);
|
|
59
|
+
const channels = buf.readUInt16LE(22);
|
|
60
|
+
const bits = buf.readUInt16LE(34);
|
|
61
|
+
const dataLen = buf.readUInt32LE(40);
|
|
62
|
+
return Math.round((dataLen / (sampleRate * channels * (bits / 8))) * 1000);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/** Call Gemini TTS for one text segment */
|
|
66
|
+
async function geminiTTS(text: string, apiKey: string): Promise<Buffer> {
|
|
67
|
+
const url = `${GEMINI_API_BASE}/models/${GEMINI_TTS_MODEL}:generateContent?key=${apiKey}`;
|
|
68
|
+
const body = {
|
|
69
|
+
contents: [{ parts: [{ text }] }],
|
|
70
|
+
generationConfig: {
|
|
71
|
+
responseModalities: ["AUDIO"],
|
|
72
|
+
speechConfig: {
|
|
73
|
+
voiceConfig: { prebuiltVoiceConfig: { voiceName: "Kore" } },
|
|
74
|
+
},
|
|
75
|
+
},
|
|
76
|
+
};
|
|
77
|
+
|
|
78
|
+
const resp = await fetch(url, {
|
|
79
|
+
method: "POST",
|
|
80
|
+
headers: { "Content-Type": "application/json" },
|
|
81
|
+
body: JSON.stringify(body),
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
if (!resp.ok) {
|
|
85
|
+
throw new Error(`Gemini TTS failed: ${resp.status} ${await resp.text()}`);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const data: any = await resp.json();
|
|
89
|
+
const b64 = data.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
|
|
90
|
+
if (!b64) throw new Error(`Gemini TTS returned no audio: ${JSON.stringify(data).slice(0, 200)}`);
|
|
91
|
+
return Buffer.from(b64, "base64");
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Generate narration WAVs for all segments, concatenate, and return durations.
|
|
96
|
+
* Uses GEMINI_API_KEY from env. If unavailable, returns empty result (recorder will skip narration).
|
|
97
|
+
*/
|
|
98
|
+
export async function generateNarration(
|
|
99
|
+
segments: NarrationSegment[],
|
|
100
|
+
outputDir: string
|
|
101
|
+
): Promise<NarrationResult | null> {
|
|
102
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
103
|
+
if (!apiKey) {
|
|
104
|
+
console.log(` [narration] GEMINI_API_KEY not set — skipping narration`);
|
|
105
|
+
return null;
|
|
106
|
+
}
|
|
107
|
+
if (segments.length === 0) return null;
|
|
108
|
+
|
|
109
|
+
const narrationDir = path.join(outputDir, "narration");
|
|
110
|
+
fs.mkdirSync(narrationDir, { recursive: true });
|
|
111
|
+
|
|
112
|
+
console.log(` [narration] Generating ${segments.length} TTS segments via Gemini…`);
|
|
113
|
+
|
|
114
|
+
const segmentPaths: string[] = [];
|
|
115
|
+
const durations = new Map<string, number>();
|
|
116
|
+
const meta: { id: string; text: string; durationMs: number }[] = [];
|
|
117
|
+
|
|
118
|
+
// Generate sequentially to avoid rate limiting
|
|
119
|
+
for (const seg of segments) {
|
|
120
|
+
const segPath = path.join(narrationDir, `${seg.id}.wav`);
|
|
121
|
+
if (fs.existsSync(segPath)) {
|
|
122
|
+
// Reuse cached segment
|
|
123
|
+
const dur = getWavDurationMs(segPath);
|
|
124
|
+
segmentPaths.push(segPath);
|
|
125
|
+
durations.set(seg.id, dur);
|
|
126
|
+
meta.push({ id: seg.id, text: seg.text, durationMs: dur });
|
|
127
|
+
continue;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Retry up to 2 times on failure (Gemini sometimes returns OTHER finishReason)
|
|
131
|
+
let pcm: Buffer | null = null;
|
|
132
|
+
for (let attempt = 0; attempt < 3; attempt++) {
|
|
133
|
+
try {
|
|
134
|
+
pcm = await geminiTTS(seg.text, apiKey);
|
|
135
|
+
break;
|
|
136
|
+
} catch (err) {
|
|
137
|
+
if (attempt === 2) {
|
|
138
|
+
console.log(` [narration] Skipped ${seg.id}: ${String(err).slice(0, 80)}`);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
if (!pcm) continue; // Skip this segment but keep going
|
|
143
|
+
|
|
144
|
+
const wav = pcmToWav(pcm);
|
|
145
|
+
fs.writeFileSync(segPath, wav);
|
|
146
|
+
const dur = getWavDurationMs(segPath);
|
|
147
|
+
segmentPaths.push(segPath);
|
|
148
|
+
durations.set(seg.id, dur);
|
|
149
|
+
meta.push({ id: seg.id, text: seg.text, durationMs: dur });
|
|
150
|
+
console.log(` [narration] ${seg.id}: ${dur}ms — "${seg.text.slice(0, 50)}..."`);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
if (segmentPaths.length === 0) {
|
|
154
|
+
console.log(` [narration] No segments generated — disabling narration`);
|
|
155
|
+
return null;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// Concatenate all WAVs into one track
|
|
159
|
+
const trackPath = path.join(narrationDir, "narration_track.wav");
|
|
160
|
+
const listPath = path.join(narrationDir, "concat-list.txt");
|
|
161
|
+
fs.writeFileSync(
|
|
162
|
+
listPath,
|
|
163
|
+
segmentPaths.map((p) => `file '${path.resolve(p)}'`).join("\n")
|
|
164
|
+
);
|
|
165
|
+
await $`ffmpeg -y -f concat -safe 0 -i ${listPath} -c copy ${trackPath}`.quiet();
|
|
166
|
+
|
|
167
|
+
const totalDurationMs = meta.reduce((sum, m) => sum + m.durationMs, 0);
|
|
168
|
+
|
|
169
|
+
// Save meta for subtitle generation
|
|
170
|
+
const metaPath = path.join(narrationDir, "meta.json");
|
|
171
|
+
fs.writeFileSync(metaPath, JSON.stringify({ segments: meta, totalDurationMs }, null, 2));
|
|
172
|
+
|
|
173
|
+
console.log(` [narration] Track: ${trackPath} (${(totalDurationMs / 1000).toFixed(1)}s)`);
|
|
174
|
+
|
|
175
|
+
return { durations, trackPath, segmentPaths, metaPath, totalDurationMs };
|
|
176
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Post-mix narration audio onto the recorded video and burn subtitles.
|
|
3
|
+
* Uses ffmpeg adelay filter for sync — single offset = (demo_start - ffmpeg_start) ms.
|
|
4
|
+
*
|
|
5
|
+
* Inspired by snomiao/playwright-multi-tab — see docs/making-the-demo-video.md
|
|
6
|
+
*/
|
|
7
|
+
import { $ } from "bun";
|
|
8
|
+
import * as fs from "fs";
|
|
9
|
+
import * as path from "path";
|
|
10
|
+
|
|
11
|
+
interface Meta {
|
|
12
|
+
segments: { id: string; text: string; durationMs: number }[];
|
|
13
|
+
totalDurationMs: number;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/** Format ms as SRT timestamp HH:MM:SS,mmm */
|
|
17
|
+
function srtTime(ms: number): string {
|
|
18
|
+
const h = Math.floor(ms / 3600000);
|
|
19
|
+
const m = Math.floor((ms % 3600000) / 60000);
|
|
20
|
+
const s = Math.floor((ms % 60000) / 1000);
|
|
21
|
+
const msr = ms % 1000;
|
|
22
|
+
return `${String(h).padStart(2, "0")}:${String(m).padStart(2, "0")}:${String(s).padStart(2, "0")},${String(msr).padStart(3, "0")}`;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/** Generate SRT subtitle file from meta + initial offset */
|
|
26
|
+
function generateSrt(meta: Meta, offsetMs: number, outPath: string): void {
|
|
27
|
+
const lines: string[] = [];
|
|
28
|
+
let cursor = offsetMs;
|
|
29
|
+
meta.segments.forEach((seg, i) => {
|
|
30
|
+
const start = cursor;
|
|
31
|
+
const end = cursor + seg.durationMs;
|
|
32
|
+
lines.push(String(i + 1));
|
|
33
|
+
lines.push(`${srtTime(start)} --> ${srtTime(end)}`);
|
|
34
|
+
lines.push(seg.text);
|
|
35
|
+
lines.push("");
|
|
36
|
+
cursor = end;
|
|
37
|
+
});
|
|
38
|
+
fs.writeFileSync(outPath, lines.join("\n"));
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
/**
|
|
42
|
+
* Mix audio + subtitles onto video.
|
|
43
|
+
* @param videoPath path to silent recorded video (webm/mp4)
|
|
44
|
+
* @param trackPath narration_track.wav from generateNarration
|
|
45
|
+
* @param metaPath meta.json from generateNarration
|
|
46
|
+
* @param offsetMs delay to apply to audio (when narration starts in video timeline)
|
|
47
|
+
* @param outPath output video path
|
|
48
|
+
*/
|
|
49
|
+
export async function postMix(
|
|
50
|
+
videoPath: string,
|
|
51
|
+
trackPath: string,
|
|
52
|
+
metaPath: string,
|
|
53
|
+
offsetMs: number,
|
|
54
|
+
outPath: string
|
|
55
|
+
): Promise<void> {
|
|
56
|
+
const meta: Meta = JSON.parse(fs.readFileSync(metaPath, "utf-8"));
|
|
57
|
+
|
|
58
|
+
// Step 1: overlay audio with adelay
|
|
59
|
+
const tmpDir = path.dirname(outPath);
|
|
60
|
+
const audioMixed = path.join(tmpDir, "_audio-mixed.mp4");
|
|
61
|
+
const adelay = `${offsetMs}|${offsetMs}`;
|
|
62
|
+
|
|
63
|
+
console.log(` [post-mix] Overlaying audio (adelay=${offsetMs}ms)…`);
|
|
64
|
+
await $`ffmpeg -y -i ${videoPath} -i ${trackPath} -filter_complex ${`[1:a]adelay=${adelay}[aout]`} -map 0:v -map [aout] -c:v libx264 -preset fast -pix_fmt yuv420p -c:a aac -shortest ${audioMixed}`.quiet();
|
|
65
|
+
|
|
66
|
+
// Step 2: generate SRT
|
|
67
|
+
const srtPath = path.join(tmpDir, "narration.srt");
|
|
68
|
+
generateSrt(meta, offsetMs, srtPath);
|
|
69
|
+
console.log(` [post-mix] Subtitles → ${srtPath}`);
|
|
70
|
+
|
|
71
|
+
// Step 3: burn subtitles
|
|
72
|
+
console.log(` [post-mix] Burning subtitles…`);
|
|
73
|
+
// Escape path for ffmpeg subtitle filter
|
|
74
|
+
const escSrt = srtPath.replace(/\\/g, "/").replace(/:/g, "\\:");
|
|
75
|
+
await $`ffmpeg -y -i ${audioMixed} -vf ${`subtitles=${escSrt}:force_style='FontSize=18,Alignment=2,OutlineColour=&H80000000,BorderStyle=4,MarginV=30'`} -c:a copy ${outPath}`.quiet();
|
|
76
|
+
|
|
77
|
+
// Cleanup intermediate
|
|
78
|
+
try { fs.unlinkSync(audioMixed); } catch {}
|
|
79
|
+
|
|
80
|
+
console.log(` [post-mix] Final video → ${outPath}`);
|
|
81
|
+
}
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import type { ResearchResult, TestScenario } from "../agent/research";
|
|
2
|
+
import type { PRInfo, IssueInfo } from "../utils/github";
|
|
3
|
+
|
|
4
|
+
/** Generate a Playwright E2E test file from research results */
|
|
5
|
+
export function generateE2ETest(
|
|
6
|
+
target: PRInfo | IssueInfo,
|
|
7
|
+
targetType: "pr" | "issue",
|
|
8
|
+
research: ResearchResult,
|
|
9
|
+
comfyUrl = "http://localhost:8188"
|
|
10
|
+
): string {
|
|
11
|
+
const num = target.number;
|
|
12
|
+
const title = target.title.replace(/'/g, "\\'");
|
|
13
|
+
|
|
14
|
+
const scenarioBlocks = research.testScenarios
|
|
15
|
+
.map((s, i) => generateScenarioTest(s, i, comfyUrl))
|
|
16
|
+
.join("\n\n");
|
|
17
|
+
|
|
18
|
+
return `// Auto-generated E2E test for ${targetType.toUpperCase()} #${num}
|
|
19
|
+
// ${title}
|
|
20
|
+
// ${target.url}
|
|
21
|
+
//
|
|
22
|
+
// Generated by comfy-qa — run with: bunx playwright test ${targetType}-${num}.e2e.ts
|
|
23
|
+
|
|
24
|
+
import { test, expect, type Page } from "@playwright/test";
|
|
25
|
+
|
|
26
|
+
const COMFY_URL = process.env.COMFY_URL ?? "${comfyUrl}";
|
|
27
|
+
|
|
28
|
+
test.describe("${targetType.toUpperCase()} #${num}: ${title}", () => {
|
|
29
|
+
test.beforeEach(async ({ page }) => {
|
|
30
|
+
await page.goto(COMFY_URL, { waitUntil: "networkidle", timeout: 30000 });
|
|
31
|
+
// Wait for ComfyUI to fully initialize
|
|
32
|
+
await page.waitForTimeout(2000);
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
${scenarioBlocks}
|
|
36
|
+
});
|
|
37
|
+
`;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function generateScenarioTest(
|
|
41
|
+
scenario: TestScenario,
|
|
42
|
+
index: number,
|
|
43
|
+
comfyUrl: string
|
|
44
|
+
): string {
|
|
45
|
+
const stepsComment = scenario.steps
|
|
46
|
+
.map((s, i) => ` // Step ${i + 1}: ${s}`)
|
|
47
|
+
.join("\n");
|
|
48
|
+
|
|
49
|
+
const preconditionsComment = scenario.preconditions
|
|
50
|
+
.map((p) => ` // Precondition: ${p}`)
|
|
51
|
+
.join("\n");
|
|
52
|
+
|
|
53
|
+
return ` test("Scenario ${index + 1}: ${scenario.name.replace(/"/g, '\\"')}", async ({ page }) => {
|
|
54
|
+
// ${scenario.description}
|
|
55
|
+
//
|
|
56
|
+
// Preconditions:
|
|
57
|
+
${preconditionsComment}
|
|
58
|
+
//
|
|
59
|
+
// Steps:
|
|
60
|
+
${stepsComment}
|
|
61
|
+
//
|
|
62
|
+
// Expected: ${scenario.expectedOutcome}
|
|
63
|
+
// Playwright hint: ${scenario.playwrightHint}
|
|
64
|
+
|
|
65
|
+
// --- AUTO-GENERATED SKELETON ---
|
|
66
|
+
// This test provides the structure; the AI agent fills in
|
|
67
|
+
// real selectors and assertions at runtime.
|
|
68
|
+
|
|
69
|
+
${generateStepCode(scenario)}
|
|
70
|
+
|
|
71
|
+
// Verify expected outcome
|
|
72
|
+
// TODO: Add assertion for: ${scenario.expectedOutcome}
|
|
73
|
+
});`;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
function generateStepCode(scenario: TestScenario): string {
|
|
77
|
+
return scenario.steps
|
|
78
|
+
.map((step, i) => {
|
|
79
|
+
const lowered = step.toLowerCase();
|
|
80
|
+
|
|
81
|
+
// Try to generate meaningful Playwright code from step descriptions
|
|
82
|
+
if (lowered.includes("click") && lowered.includes("queue")) {
|
|
83
|
+
return ` // Step ${i + 1}: ${step}
|
|
84
|
+
const queueBtn = page.locator('button:has-text("Queue"), [data-testid="queue-button"]').first();
|
|
85
|
+
if (await queueBtn.isVisible()) await queueBtn.click();`;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
if (lowered.includes("wait") && (lowered.includes("preview") || lowered.includes("image"))) {
|
|
89
|
+
return ` // Step ${i + 1}: ${step}
|
|
90
|
+
await page.waitForTimeout(3000); // Wait for preview to appear`;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if (lowered.includes("switch") && lowered.includes("tab")) {
|
|
94
|
+
return ` // Step ${i + 1}: ${step}
|
|
95
|
+
const newTab = await page.context().newPage();
|
|
96
|
+
await newTab.goto("about:blank");
|
|
97
|
+
await newTab.waitForTimeout(3000);
|
|
98
|
+
await newTab.close();
|
|
99
|
+
await page.bringToFront();`;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
if (lowered.includes("visibility") || lowered.includes("hidden")) {
|
|
103
|
+
return ` // Step ${i + 1}: ${step}
|
|
104
|
+
await page.evaluate(() => {
|
|
105
|
+
Object.defineProperty(document, "hidden", { value: true, writable: true });
|
|
106
|
+
document.dispatchEvent(new Event("visibilitychange"));
|
|
107
|
+
});
|
|
108
|
+
await page.waitForTimeout(2000);
|
|
109
|
+
await page.evaluate(() => {
|
|
110
|
+
Object.defineProperty(document, "hidden", { value: false, writable: true });
|
|
111
|
+
document.dispatchEvent(new Event("visibilitychange"));
|
|
112
|
+
});`;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
if (lowered.includes("assert") || lowered.includes("verify") || lowered.includes("check")) {
|
|
116
|
+
return ` // Step ${i + 1}: ${step}
|
|
117
|
+
// TODO: Add specific assertion here
|
|
118
|
+
await page.waitForTimeout(1000);`;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
if (lowered.includes("scroll")) {
|
|
122
|
+
return ` // Step ${i + 1}: ${step}
|
|
123
|
+
await page.mouse.wheel(0, 300);
|
|
124
|
+
await page.waitForTimeout(500);`;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Generic fallback
|
|
128
|
+
return ` // Step ${i + 1}: ${step}
|
|
129
|
+
await page.waitForTimeout(1000);`;
|
|
130
|
+
})
|
|
131
|
+
.join("\n\n");
|
|
132
|
+
}
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
import * as fs from "fs";
|
|
2
|
+
import * as path from "path";
|
|
3
|
+
import type { ResearchResult, QAChecklistItem } from "../agent/research";
|
|
4
|
+
import type { PRInfo, IssueInfo } from "../utils/github";
|
|
5
|
+
|
|
6
|
+
export interface ReportContext {
|
|
7
|
+
target: PRInfo | IssueInfo;
|
|
8
|
+
targetType: "pr" | "issue";
|
|
9
|
+
research: ResearchResult;
|
|
10
|
+
outputDir: string;
|
|
11
|
+
screenshots: string[];
|
|
12
|
+
videoPath?: string;
|
|
13
|
+
runAt: Date;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
function isPR(t: PRInfo | IssueInfo): t is PRInfo {
|
|
17
|
+
return "headRefName" in t;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
function severityBadge(s: string): string {
|
|
21
|
+
const map: Record<string, string> = {
|
|
22
|
+
critical: "🔴 CRITICAL",
|
|
23
|
+
high: "🟠 HIGH",
|
|
24
|
+
medium: "🟡 MEDIUM",
|
|
25
|
+
low: "🟢 LOW",
|
|
26
|
+
};
|
|
27
|
+
return map[s] ?? s.toUpperCase();
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
function statusIcon(s: string): string {
|
|
31
|
+
const map: Record<string, string> = {
|
|
32
|
+
pending: "⬜",
|
|
33
|
+
pass: "✅",
|
|
34
|
+
fail: "❌",
|
|
35
|
+
blocked: "⛔",
|
|
36
|
+
};
|
|
37
|
+
return map[s] ?? "⬜";
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function priorityBadge(p: string): string {
|
|
41
|
+
const map: Record<string, string> = {
|
|
42
|
+
P0: "🔥 P0",
|
|
43
|
+
P1: "⚡ P1",
|
|
44
|
+
P2: "📋 P2",
|
|
45
|
+
};
|
|
46
|
+
return map[p] ?? p;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
export function generateMarkdownReport(ctx: ReportContext): string {
|
|
50
|
+
const { target, targetType, research, screenshots, videoPath, runAt } = ctx;
|
|
51
|
+
const num = target.number;
|
|
52
|
+
const url = target.url;
|
|
53
|
+
const title = target.title;
|
|
54
|
+
|
|
55
|
+
const checklistTable = research.qaChecklist
|
|
56
|
+
.map(
|
|
57
|
+
(item) =>
|
|
58
|
+
`| ${statusIcon(item.status)} | ${priorityBadge(item.priority)} | ${item.id} | ${item.category} | ${item.description} |`
|
|
59
|
+
)
|
|
60
|
+
.join("\n");
|
|
61
|
+
|
|
62
|
+
const scenarioBlocks = research.testScenarios
|
|
63
|
+
.map(
|
|
64
|
+
(s, i) => `
|
|
65
|
+
### Scenario ${i + 1}: ${s.name}
|
|
66
|
+
|
|
67
|
+
> ${s.description}
|
|
68
|
+
|
|
69
|
+
**Preconditions:**
|
|
70
|
+
${s.preconditions.map((p) => `- ${p}`).join("\n")}
|
|
71
|
+
|
|
72
|
+
**Steps:**
|
|
73
|
+
${s.steps.map((step, j) => `${j + 1}. ${step}`).join("\n")}
|
|
74
|
+
|
|
75
|
+
**Expected Outcome:** ${s.expectedOutcome}
|
|
76
|
+
|
|
77
|
+
**Playwright Hint:** \`${s.playwrightHint}\`
|
|
78
|
+
`
|
|
79
|
+
)
|
|
80
|
+
.join("\n---\n");
|
|
81
|
+
|
|
82
|
+
const checklistDetails = research.qaChecklist
|
|
83
|
+
.map(
|
|
84
|
+
(item) => `
|
|
85
|
+
#### ${item.id} — ${item.description}
|
|
86
|
+
|
|
87
|
+
- **Category:** ${item.category}
|
|
88
|
+
- **Priority:** ${priorityBadge(item.priority)}
|
|
89
|
+
- **Status:** ${statusIcon(item.status)} ${item.status}
|
|
90
|
+
|
|
91
|
+
**Steps:**
|
|
92
|
+
${item.steps.map((s, i) => `${i + 1}. ${s}`).join("\n")}
|
|
93
|
+
|
|
94
|
+
**Expected Result:** ${item.expectedResult}
|
|
95
|
+
`
|
|
96
|
+
)
|
|
97
|
+
.join("\n---\n");
|
|
98
|
+
|
|
99
|
+
const screenshotSection =
|
|
100
|
+
screenshots.length > 0
|
|
101
|
+
? `
|
|
102
|
+
## Screenshots
|
|
103
|
+
|
|
104
|
+
${screenshots
|
|
105
|
+
.map((s) => {
|
|
106
|
+
const rel = path.basename(s);
|
|
107
|
+
return `### ${rel}\n\n`;
|
|
108
|
+
})
|
|
109
|
+
.join("\n")}`
|
|
110
|
+
: "";
|
|
111
|
+
|
|
112
|
+
const videoSection = videoPath
|
|
113
|
+
? `
|
|
114
|
+
## Video Recording
|
|
115
|
+
|
|
116
|
+
[Watch QA Session Video](${path.basename(videoPath)})
|
|
117
|
+
|
|
118
|
+
> Recorded with Comfy-QA Agent HUD overlay showing step-by-step operations.
|
|
119
|
+
`
|
|
120
|
+
: `
|
|
121
|
+
## Video Recording
|
|
122
|
+
|
|
123
|
+
> ⚠️ Video recording requires a running ComfyUI instance. Start ComfyUI and re-run with \`--record\`.
|
|
124
|
+
`;
|
|
125
|
+
|
|
126
|
+
return `# QA Report: ${targetType.toUpperCase()} #${num}
|
|
127
|
+
|
|
128
|
+
> **${title}**
|
|
129
|
+
> ${url}
|
|
130
|
+
|
|
131
|
+
---
|
|
132
|
+
|
|
133
|
+
## Executive Summary
|
|
134
|
+
|
|
135
|
+
${research.summary}
|
|
136
|
+
|
|
137
|
+
| Field | Value |
|
|
138
|
+
|-------|-------|
|
|
139
|
+
| Type | ${research.bugType} |
|
|
140
|
+
| Severity | ${severityBadge(research.severity)} |
|
|
141
|
+
| Affected Area | ${research.affectedArea} |
|
|
142
|
+
| Generated At | ${runAt.toISOString()} |
|
|
143
|
+
| Target | [${targetType.toUpperCase()} #${num}](${url}) |
|
|
144
|
+
|
|
145
|
+
---
|
|
146
|
+
|
|
147
|
+
## Bug Analysis
|
|
148
|
+
|
|
149
|
+
### Actual Behavior (The Bug)
|
|
150
|
+
${research.actualBehavior}
|
|
151
|
+
|
|
152
|
+
### Expected Behavior
|
|
153
|
+
${research.expectedBehavior}
|
|
154
|
+
|
|
155
|
+
### Root Cause
|
|
156
|
+
${research.rootCauseSummary}
|
|
157
|
+
|
|
158
|
+
---
|
|
159
|
+
|
|
160
|
+
## Reproduction Steps
|
|
161
|
+
|
|
162
|
+
${research.reproductionSteps.map((s, i) => `${i + 1}. ${s}`).join("\n")}
|
|
163
|
+
|
|
164
|
+
---
|
|
165
|
+
|
|
166
|
+
## QA Checklist
|
|
167
|
+
|
|
168
|
+
| Status | Priority | ID | Category | Description |
|
|
169
|
+
|--------|----------|----|----------|-------------|
|
|
170
|
+
${checklistTable}
|
|
171
|
+
|
|
172
|
+
### Checklist Details
|
|
173
|
+
|
|
174
|
+
${checklistDetails}
|
|
175
|
+
|
|
176
|
+
---
|
|
177
|
+
|
|
178
|
+
## Test Scenarios
|
|
179
|
+
|
|
180
|
+
${scenarioBlocks}
|
|
181
|
+
|
|
182
|
+
---
|
|
183
|
+
|
|
184
|
+
## Risk Assessment
|
|
185
|
+
|
|
186
|
+
${research.risks.map((r) => `- ⚠️ ${r}`).join("\n")}
|
|
187
|
+
|
|
188
|
+
${research.relatedFiles.length > 0 ? `
|
|
189
|
+
## Related Files
|
|
190
|
+
|
|
191
|
+
${research.relatedFiles.map((f) => `- \`${f}\``).join("\n")}
|
|
192
|
+
` : ""}
|
|
193
|
+
|
|
194
|
+
${videoSection}
|
|
195
|
+
|
|
196
|
+
${screenshotSection}
|
|
197
|
+
|
|
198
|
+
---
|
|
199
|
+
|
|
200
|
+
*Generated by [Comfy-QA](https://github.com/snomiao/Comfy-QA) · ${runAt.toISOString()}*
|
|
201
|
+
`;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
export function generateQASheet(ctx: ReportContext): string {
|
|
205
|
+
const { target, targetType, research } = ctx;
|
|
206
|
+
|
|
207
|
+
const rows = research.qaChecklist
|
|
208
|
+
.map(
|
|
209
|
+
(item) =>
|
|
210
|
+
`| ${item.id} | ${item.description} | ${item.priority} | ${item.category} | ${item.status} | ${item.expectedResult} |`
|
|
211
|
+
)
|
|
212
|
+
.join("\n");
|
|
213
|
+
|
|
214
|
+
return `# QA Test Sheet — ${targetType.toUpperCase()} #${target.number}
|
|
215
|
+
|
|
216
|
+
## ${target.title}
|
|
217
|
+
|
|
218
|
+
**Severity:** ${severityBadge(research.severity)}
|
|
219
|
+
**Area:** ${research.affectedArea}
|
|
220
|
+
**Tester:** _____________
|
|
221
|
+
**Date:** ${ctx.runAt.toISOString().split("T")[0]}
|
|
222
|
+
|
|
223
|
+
---
|
|
224
|
+
|
|
225
|
+
## Test Cases
|
|
226
|
+
|
|
227
|
+
| ID | Description | Priority | Category | Result | Expected Result |
|
|
228
|
+
|----|-------------|----------|----------|--------|-----------------|
|
|
229
|
+
${rows}
|
|
230
|
+
|
|
231
|
+
---
|
|
232
|
+
|
|
233
|
+
## Notes
|
|
234
|
+
|
|
235
|
+
_________________________________
|
|
236
|
+
_________________________________
|
|
237
|
+
_________________________________
|
|
238
|
+
|
|
239
|
+
## Sign-off
|
|
240
|
+
|
|
241
|
+
- [ ] All P0 items passed
|
|
242
|
+
- [ ] All P1 items passed or accepted with known issues
|
|
243
|
+
- [ ] Video evidence recorded
|
|
244
|
+
- [ ] Report filed
|
|
245
|
+
|
|
246
|
+
**QA Engineer:** _________________ **Date:** _________________
|
|
247
|
+
`;
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
export function saveReport(ctx: ReportContext): {
|
|
251
|
+
reportPath: string;
|
|
252
|
+
qaSheetPath: string;
|
|
253
|
+
jsonPath: string;
|
|
254
|
+
} {
|
|
255
|
+
const { outputDir, target, targetType } = ctx;
|
|
256
|
+
fs.mkdirSync(outputDir, { recursive: true });
|
|
257
|
+
|
|
258
|
+
const reportPath = path.join(outputDir, "report.md");
|
|
259
|
+
const qaSheetPath = path.join(outputDir, "qa-sheet.md");
|
|
260
|
+
const jsonPath = path.join(outputDir, "research.json");
|
|
261
|
+
|
|
262
|
+
fs.writeFileSync(reportPath, generateMarkdownReport(ctx));
|
|
263
|
+
fs.writeFileSync(qaSheetPath, generateQASheet(ctx));
|
|
264
|
+
fs.writeFileSync(jsonPath, JSON.stringify({ target, research: ctx.research, runAt: ctx.runAt }, null, 2));
|
|
265
|
+
|
|
266
|
+
console.log(` [report] ${reportPath}`);
|
|
267
|
+
console.log(` [qa-sheet] ${qaSheetPath}`);
|
|
268
|
+
console.log(` [json] ${jsonPath}`);
|
|
269
|
+
|
|
270
|
+
return { reportPath, qaSheetPath, jsonPath };
|
|
271
|
+
}
|