@brunosps00/dev-workflow 0.0.7 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/bin/dev-workflow.js +11 -4
  2. package/lib/install-deps.js +42 -0
  3. package/package.json +1 -1
  4. package/scaffold/en/commands/dw-bugfix.md +2 -3
  5. package/scaffold/en/commands/dw-fix-qa.md +1 -2
  6. package/scaffold/en/commands/dw-functional-doc.md +36 -1
  7. package/scaffold/en/commands/dw-review-implementation.md +72 -24
  8. package/scaffold/en/commands/dw-run-qa.md +59 -4
  9. package/scaffold/en/commands/dw-run-task.md +1 -2
  10. package/scaffold/pt-br/commands/dw-bugfix.md +2 -3
  11. package/scaffold/pt-br/commands/dw-fix-qa.md +1 -2
  12. package/scaffold/pt-br/commands/dw-functional-doc.md +36 -1
  13. package/scaffold/pt-br/commands/dw-review-implementation.md +71 -19
  14. package/scaffold/pt-br/commands/dw-run-qa.md +59 -4
  15. package/scaffold/pt-br/commands/dw-run-task.md +1 -2
  16. package/scaffold/skills/agent-browser/SKILL.md +0 -750
  17. package/scaffold/skills/agent-browser/references/authentication.md +0 -303
  18. package/scaffold/skills/agent-browser/references/commands.md +0 -295
  19. package/scaffold/skills/agent-browser/references/profiling.md +0 -120
  20. package/scaffold/skills/agent-browser/references/proxy-support.md +0 -194
  21. package/scaffold/skills/agent-browser/references/session-management.md +0 -193
  22. package/scaffold/skills/agent-browser/references/snapshot-refs.md +0 -219
  23. package/scaffold/skills/agent-browser/references/video-recording.md +0 -173
  24. package/scaffold/skills/agent-browser/templates/authenticated-session.sh +0 -105
  25. package/scaffold/skills/agent-browser/templates/capture-workflow.sh +0 -69
  26. package/scaffold/skills/agent-browser/templates/form-automation.sh +0 -62
@@ -1,6 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
 
3
3
  const { run } = require('../lib/init');
4
+ const installDeps = require('../lib/install-deps');
4
5
 
5
6
  const args = process.argv.slice(2);
6
7
  const command = args[0];
@@ -19,13 +20,15 @@ const HELP_TEXT = `
19
20
  Usage:
20
21
  npx dev-workflow init [--force] [--lang=en|pt-br]
21
22
  npx dev-workflow update [--lang=en|pt-br]
23
+ npx dev-workflow install-deps
22
24
  npx dev-workflow help
23
25
 
24
26
  Commands:
25
- init Scaffold .dw/ (commands, templates, references, scripts, skills, rules, MCPs)
26
- update Update managed files (commands, templates, references, scripts, skills, wrappers, MCPs)
27
- Preserves: .dw/rules/, .dw/spec/, user data
28
- help Show this help message
27
+ init Scaffold .dw/ (commands, templates, references, scripts, skills, rules, MCPs)
28
+ update Update managed files (commands, templates, references, scripts, skills, wrappers, MCPs)
29
+ Preserves: .dw/rules/, .dw/spec/, user data
30
+ install-deps Install system dependencies (Playwright browsers, MCP servers)
31
+ help Show this help message
29
32
 
30
33
  Options:
31
34
  --force Overwrite existing files (init only; update always overwrites managed files)
@@ -37,6 +40,7 @@ const HELP_TEXT = `
37
40
  npx dev-workflow init --lang=pt-br # Portuguese, no prompt
38
41
  npx dev-workflow init --force # Overwrite existing files
39
42
  npx dev-workflow update --lang=en # Update all managed files to latest version
43
+ npx dev-workflow install-deps # Install Playwright browsers and MCP servers
40
44
  `;
41
45
 
42
46
  async function main() {
@@ -47,6 +51,9 @@ async function main() {
47
51
  case 'update':
48
52
  await run({ force: !!flags.force, lang: flags.lang, mode: 'update' });
49
53
  break;
54
+ case 'install-deps':
55
+ installDeps.run();
56
+ break;
50
57
  case 'help':
51
58
  case '--help':
52
59
  case '-h':
@@ -0,0 +1,42 @@
1
+ const { execSync } = require('child_process');
2
+
3
+ function run() {
4
+ console.log('\n dev-workflow install-deps');
5
+ console.log(` ${'='.repeat(40)}\n`);
6
+
7
+ const deps = [
8
+ {
9
+ name: 'Playwright browsers',
10
+ check: 'npx playwright --version',
11
+ install: 'npx playwright install --with-deps',
12
+ },
13
+ {
14
+ name: 'Context7 MCP',
15
+ check: null,
16
+ install: 'npx -y @upstash/context7-mcp --help',
17
+ },
18
+ ];
19
+
20
+ let installed = 0;
21
+ let skipped = 0;
22
+ let failed = 0;
23
+
24
+ for (const dep of deps) {
25
+ process.stdout.write(` Installing ${dep.name}...`);
26
+ try {
27
+ execSync(dep.install, { stdio: 'pipe', timeout: 300000 });
28
+ console.log(' \x1b[32m✓\x1b[0m');
29
+ installed++;
30
+ } catch (err) {
31
+ console.log(' \x1b[31m✗\x1b[0m');
32
+ console.log(` Error: ${err.message.split('\n')[0]}`);
33
+ failed++;
34
+ }
35
+ }
36
+
37
+ console.log(`\n ${'='.repeat(40)}`);
38
+ console.log(` Done! ${installed} installed, ${skipped} skipped, ${failed} failed`);
39
+ console.log();
40
+ }
41
+
42
+ module.exports = { run };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@brunosps00/dev-workflow",
3
- "version": "0.0.7",
3
+ "version": "0.1.1",
4
4
  "description": "AI-driven development workflow commands for any project. Scaffolds a complete PRD-to-PR pipeline with multi-platform AI assistant support.",
5
5
  "bin": {
6
6
  "dev-workflow": "./bin/dev-workflow.js"
@@ -16,7 +16,6 @@
16
16
  When available in the project at `./.agents/skills/`, use these skills as contextual support without replacing this command:
17
17
 
18
18
  - `vercel-react-best-practices`: use when the bug affects React/Next.js and there is suspicion of render, hydration, fetching, waterfall, bundle, or re-render issues
19
- - `agent-browser`: use when the bug requires reproduction in a real browser, persistent session, request inspection, or visual capture
20
19
  - `webapp-testing`: use when the fix requires a reproducible E2E/retest flow in a web app
21
20
  - `security-review`: use when the root cause touches auth, authorization, external input, upload, secrets, SQL, XSS, SSRF, or other sensitive surfaces
22
21
 
@@ -158,7 +157,7 @@
158
157
  - Related error messages
159
158
  - Stack traces
160
159
  - Recently modified files
161
- - If the bug is UI-related or depends on browser flow, supplement collection with `agent-browser` or `webapp-testing`
160
+ - If the bug is UI-related or depends on browser flow, supplement collection with `webapp-testing`
162
161
 
163
162
  ### 3. Clarification Questions (MANDATORY - EXACTLY 3)
164
163
 
@@ -196,7 +195,7 @@
196
195
  - **Probable Cause**: Based on the evidence
197
196
  - **Affected Files**: List of files to modify
198
197
  - **Impact**: Other components that may be affected
199
- - **Skills used**: explicitly record if the analysis used `vercel-react-best-practices`, `agent-browser`, `webapp-testing`, or `security-review`
198
+ - **Skills used**: explicitly record if the analysis used `vercel-react-best-practices`, `webapp-testing`, or `security-review`
200
199
 
201
200
  ### 4.1 Scope Checkpoint (MANDATORY)
202
201
 
@@ -17,7 +17,6 @@ You are an AI assistant specialized in post-QA bug fixing with evidence-driven r
17
17
 
18
18
  When available in the project under `./.agents/skills/`, use these skills as operational support without replacing this command:
19
19
 
20
- - `agent-browser`: support for reproducing bugs with persistent sessions, capturing network data, additional screenshots, and validating fixes browser-first
21
20
  - `webapp-testing`: support for structuring retests, captures, and scripts when complementary to Playwright MCP
22
21
  - `vercel-react-best-practices`: use only if the fix affects React/Next.js frontend and there is risk of rendering, hydration, fetching, or performance regression
23
22
 
@@ -87,7 +86,7 @@ For each fixed bug:
87
86
  - `QA/logs/console-retest.log`
88
87
  - `QA/logs/network-retest.log`
89
88
  7. Record in the QA report which user/profile was used in the retest
90
- 8. If the retest requires persistent auth, request inspection beyond MCP, or more faithful real-browser reproduction, complement with `agent-browser` and record this in the report
89
+ 8. If the retest requires persistent auth, request inspection beyond MCP, or more faithful real-browser reproduction, record this in the report
91
90
 
92
91
  ### 4. Update Artifacts
93
92
 
@@ -37,6 +37,12 @@ Works best with project analyzed by `/dw-analyze-project`
37
37
  <critical>Header and footer of human videos must not compete for useful area with the browser stage. When they exist, they must be outside the browser stage, in a larger composition, preserving the real application viewport intact.</critical>
38
38
  <critical>When the goal is a human tour with centered browser, keep the application in a central stage without fixed side columns, preserving header and footer at full width outside the browser area.</critical>
39
39
  <critical>Browser visual quality is a mandatory requirement. Do not deliver a human tour with viewport or recording downscaled relative to the final resolution. The runner must align viewport and video capture to the final resolution or record an explicit blocker.</critical>
40
+ <critical>Hardcoded subtitles over the product screen are not an acceptable standard when the environment supports a dedicated shell. The preferred and mandatory standard is: `header` at the top with the tour title, `stage` centered for the intact browser, and `footer` at the bottom exclusively for the narrative caption.</critical>
41
+ <critical>Even when the human video is assembled from screenshots rather than recorded navigation, the final composition must maintain the same shell layout: header and footer outside the application area. Do not deliver a fullscreen slideshow with subtitles burned directly over the product content.</critical>
42
+ <critical>In the main human video artifact, the caption must be visible inside the shell's `footer`. A sidecar `.srt` file and an embedded subtitle track may exist as support, but they do not replace the obligation for the main narrative to already appear correctly positioned in the footer of the final composition.</critical>
43
+ <critical>It is invalid to deliver as the main version an MP4 whose caption depends on the player for positioning (`mov_text`, `tx3g`, or similar subtitle track) when this causes the text to appear outside the shell's footer. If there is an auxiliary embedded track, visually validate that the main version remains correct even without the player rendering subtitles.</critical>
44
+ <critical>When the request involves human video with captions, always generate two video artifacts: a `clean` version without captions rendered in the frame, for use with player + sidecar `.srt`; and a `captioned` version with the narrative already burned correctly in the shell's `footer`.</critical>
45
+ <critical>If a previous flow in the workspace already has a better-resolved human recording shell, reuse that visual and structural pattern before improvising a new composition. This reuse is preferable to a simplified solution with captions embedded over the viewport.</critical>
40
46
 
41
47
  ### Video Pacing Requirements
42
48
  <critical>Before and after main actions, insert intentional pauses. As an operational rule: maintain 2 to 3 seconds of permanence on relevant loaded states and at least 1.5 seconds after the visible outcome of each main action before proceeding.</critical>
@@ -49,7 +55,6 @@ Works best with project analyzed by `/dw-analyze-project`
49
55
 
50
56
  When available in the project under `./.agents/skills/`, use these skills as operational support without replacing this command as source of truth:
51
57
 
52
- - `agent-browser`: support for real navigation, request inspection, screenshots, persistent auth, and browser-first reproduction
53
58
  - `webapp-testing`: support for structuring E2E flows, local retests, and evidence collection
54
59
  - `remotion-best-practices`: mandatory support when there is a final human video, captions, composition, transitions, FFmpeg, or Remotion
55
60
  - `humanizer`: mandatory support for reviewing and naturalizing all captions, `.srt` files, descriptive texts, and any human-facing writing before final delivery
@@ -119,6 +124,9 @@ If there is execution:
119
124
  If there is a final human video:
120
125
  - save in `evidence/videos/` with a name that clearly differentiates the final tour from the raw capture
121
126
  - when `ffmpeg` is available, also save the `mp4` version of the final human tour
127
+ - when captions are involved, save two explicit variants:
128
+ - a `clean` version without captions drawn in the frame
129
+ - a `captioned` version with captions drawn in the shell's `footer`
122
130
  - record in `manifest.json` which files are `raw` and which are `human_final`
123
131
 
124
132
  ## Required Flow
@@ -224,6 +232,11 @@ Generate `e2e-runbook.md` in detailed operational style:
224
232
  - when there is a title and caption, reserve header and footer outside the browser stage
225
233
  - when the composition calls for a centered browser, keep the application in a central stage without fixed side columns and without sacrificing full-width header and footer
226
234
  - avoid any artificial reduction of the app viewport to fit overlays
235
+ - avoid burned subtitles directly inside the product viewport when there is a possibility of using an external shell
236
+ - burn the main narrative in the shell's `footer` of the final video; use sidecar `.srt` and embedded track only as complementary artifacts
237
+ - for each final tour with captions, produce:
238
+ - `clean`: no captions in the frame, with separate `.srt` for the player to decide
239
+ - `captioned`: captions already positioned in the shell's `footer`
227
240
  - align video capture to final resolution to avoid sharpness loss in the browser
228
241
  - keep each relevant state on screen long enough for visual reading, especially lists, dialogs, badges, validations, messages, and final results
229
242
  - keep captions on screen long enough for comfortable reading, without switching text before the corresponding step is visually understood
@@ -246,6 +259,28 @@ When producing or reviewing the final tour, apply these rules as baseline:
246
259
  - when comparing before and after states, clearly show both moments
247
260
  - if the recording is too fast for human reading, consider the execution inadequate even if technically correct
248
261
  - if `ffmpeg` is installed, consider incomplete any delivery that leaves only `webm` or another raw format without generating `mp4`
262
+ - consider inadequate any video that uses only overlaid captions on the browser when the project supports a shell with dedicated header/footer
263
+ - consider inadequate any video whose main caption depends on the player's renderer and therefore appears outside the shell's intended `footer`
264
+ - consider incomplete any delivery that provides only one of the variants (`clean` or `captioned`) when the flow requires video with captions
265
+
266
+ ## Mandatory Visual Shell Standard
267
+
268
+ When there is a final human video, adopt as minimum visual standard:
269
+
270
+ - `header` fixed outside the browser with the module or flow name
271
+ - `main` centering a single browser `stage`
272
+ - `footer` fixed outside the browser, reserved for narrative caption or short context
273
+ - `stage` with its own border, radius, and shadow, without cropping the application viewport
274
+ - `stage` width and height explicitly defined and proportional to the final resolution
275
+
276
+ Baseline recommended for `1920x1080` when no better standard exists in the flow itself:
277
+
278
+ - `header`: ~`64px`
279
+ - `footer`: ~`112px`
280
+ - `stage`: ~`1600x900`
281
+
282
+ If a previous flow in the workspace already has a working shell script (e.g., `record-human-tour.cjs`), reuse it as reference. If choosing a different layout, justify explicitly in `manifest.json`.
283
+
249
284
  - Update `manifest.json` with final status, artifacts, and blockers, distinguishing:
250
285
  - MCP evidence
251
286
  - raw execution capture
@@ -7,7 +7,7 @@ You are a specialized implementation reviewer that compares documented requireme
7
7
  - Do NOT use when requirements have not been finalized yet
8
8
 
9
9
  ## Pipeline Position
10
- **Predecessor:** `/dw-run-plan` (auto) or `/dw-run-task` (manual) | **Successor:** `/dw-code-review`
10
+ **Predecessor:** `/dw-run-plan` (auto) or `/dw-run-task` (manual) | **Successor:** `/dw-code-review` (auto-fixes gaps before completing)
11
11
 
12
12
  Called by: `/dw-run-plan` at end of all tasks
13
13
 
@@ -175,35 +175,82 @@ Check if the implementation follows project patterns:
175
175
  2. [secondary action]
176
176
  ```
177
177
 
178
- ### 8. Post-Report Decision (Required)
178
+ ### 8. Gap Resolution Loop (Required)
179
179
 
180
- After generating the final report, evaluate the result:
180
+ <critical>Review does NOT end at the first report. If gaps are found, enter an automatic fix-review loop until 100% compliance or explicit BLOCK.</critical>
181
+
182
+ After generating the report, evaluate:
181
183
 
182
- **If there are NO gaps (0 pending, 0 partial, 100% implemented):**
183
- - Present the report to the user
184
- - **DO NOT enter planning mode (EnterPlanMode)**
185
- - **DO NOT dispatch execution agents (Task)**
186
- - **DO NOT create tasks (TaskCreate)**
187
- - **DO NOT propose implementing anything**
188
- - Simply conclude with: "Implementation 100% compliant. No action needed."
189
- - END the review immediately
190
-
191
- **If there ARE gaps (pending > 0 OR partial > 0):**
192
- - Present the report with gaps and recommendations
193
- - List actions needed to resolve each gap
194
- - Wait for user instructions on how to proceed
195
- - **DO NOT enter planning mode automatically**
196
- - **DO NOT execute fixes without explicit user instruction**
197
-
198
- **Compliance Check Decision Flow:**
199
184
  ```dot
200
- digraph compliance {
201
- "Analysis Complete" -> "0 gaps AND 0 partial?";
202
- "0 gaps AND 0 partial?" -> "Report + EXIT" [label="yes"];
203
- "0 gaps AND 0 partial?" -> "Report + List Actions\nWAIT for user" [label="no"];
185
+ digraph review_loop {
186
+ rankdir=TB;
187
+ "Generate Review Report" -> "Gaps found?";
188
+ "Gaps found?" -> "100% Compliant\nExit" [label="no"];
189
+ "Gaps found?" -> "Fix gaps\n(implement missing code)" [label="yes"];
190
+ "Fix gaps\n(implement missing code)" -> "Re-review\nimplementation";
191
+ "Re-review\nimplementation" -> "Still gaps?";
192
+ "Still gaps?" -> "100% Compliant\nExit" [label="no"];
193
+ "Still gaps?" -> "Max cycles\nreached?" [label="yes"];
194
+ "Max cycles\nreached?" -> "Fix gaps\n(implement missing code)" [label="no"];
195
+ "Max cycles\nreached?" -> "BLOCKED\nReport residual gaps" [label="yes (3 cycles)"];
204
196
  }
205
197
  ```
206
198
 
199
+ **Loop rules:**
200
+ 1. After the initial report, if there are gaps (❌ not implemented or ⚠️ partial), enter the loop automatically
201
+ 2. For each cycle:
202
+ a. Fix all identified gaps: implement missing code, complete partial implementations
203
+ b. Follow project patterns from `.dw/rules/` during fixes
204
+ c. Run tests after fixes (`pnpm test` or equivalent)
205
+ d. Re-read the changed files and re-compare against PRD requirements
206
+ e. Update the review report with cycle results
207
+ f. If 100% compliance → exit loop, present final report
208
+ g. If gaps remain → continue next cycle
209
+ 3. **Maximum 3 fix-review cycles.** After 3 cycles, mark review as **BLOCKED** with residual gaps documented
210
+ 4. Each cycle must append a section to the report showing what was fixed and the new compliance status
211
+ 5. Commit fixes after each cycle: `fix(review): implement [requirement] from PRD`
212
+
213
+ **What to fix automatically:**
214
+ - ❌ Requirements not implemented → implement them
215
+ - ⚠️ Requirements partially implemented → complete them
216
+ - 📝 Tasks marked complete but actually incomplete → finish them
217
+
218
+ **What NOT to fix (stop and ask user):**
219
+ - Requirements that contradict each other in the PRD
220
+ - Requirements that need architectural decisions not covered in TechSpec
221
+ - Requirements that depend on external services not available
222
+ - If a fix would take more than the scope of a single task
223
+
224
+ **Cycle report format (append to review report):**
225
+ ```markdown
226
+ ## Fix Cycle [N] — [YYYY-MM-DD]
227
+
228
+ ### Gaps Resolved
229
+ | RF | Description | Action Taken | Status |
230
+ |----|-------------|-------------|--------|
231
+ | RF-XX | [requirement] | [what was implemented] | ✅ |
232
+
233
+ ### Tests
234
+ - `pnpm test`: PASS/FAIL
235
+ - Files changed: [list]
236
+
237
+ ### Remaining Gaps
238
+ - [list or "None"]
239
+
240
+ ### Cycle Result: CONTINUE / COMPLIANT / BLOCKED
241
+ ```
242
+
243
+ **If 100% compliant after any cycle:**
244
+ - Present the final report
245
+ - **DO NOT enter planning mode (EnterPlanMode)**
246
+ - **DO NOT create tasks (TaskCreate)**
247
+ - Conclude with: "Implementation 100% compliant after [N] fix cycles. No further action needed."
248
+
249
+ **If BLOCKED after 3 cycles:**
250
+ - Present the report with residual gaps
251
+ - List what could not be resolved and why
252
+ - Wait for user instructions
253
+
207
254
  ## Status Levels
208
255
 
209
256
  | Icon | Meaning |
@@ -255,4 +302,5 @@ git diff <commit> -- path/to/file
255
302
  <critical>DO NOT APPROVE requirements without concrete evidence in the code</critical>
256
303
  <critical>ANALYZE the actual code, do not trust only the checkboxes in tasks.md</critical>
257
304
  <critical>If 100% of requirements were implemented and there are NO gaps: DO NOT enter plan mode, DO NOT create tasks, DO NOT dispatch agents. Just present the report and END.</critical>
305
+ <critical>If gaps are found, enter the fix-review loop automatically. Do NOT wait for user instructions to fix gaps. Maximum 3 cycles before marking as BLOCKED.</critical>
258
306
  </system_instructions>
@@ -7,7 +7,7 @@ You are an AI assistant specialized in Quality Assurance. Your task is to valida
7
7
  - Do NOT use when requirements have not been defined yet (create PRD first)
8
8
 
9
9
  ## Pipeline Position
10
- **Predecessor:** `/dw-run-plan` or `/dw-run-task` | **Successor:** `/dw-fix-qa` (if bugs) or `/dw-code-review`
10
+ **Predecessor:** `/dw-run-plan` or `/dw-run-task` | **Successor:** `/dw-code-review` (auto-fixes bugs internally before completing)
11
11
 
12
12
  <critical>Use the Playwright MCP to execute all E2E tests</critical>
13
13
  <critical>Verify ALL requirements from the PRD and TechSpec before approving</critical>
@@ -20,7 +20,6 @@ You are an AI assistant specialized in Quality Assurance. Your task is to valida
20
20
 
21
21
  When available in the project under `./.agents/skills/`, use these skills as operational support without replacing this command:
22
22
 
23
- - `agent-browser`: support for operational navigation, persistent auth, additional screenshots, request inspection, and session debugging
24
23
  - `webapp-testing`: support for structuring test flows, retests, screenshots, and logs when complementary to Playwright MCP
25
24
  - `vercel-react-best-practices`: use only if the frontend under test is React/Next.js and there is indication of regression related to rendering, fetching, hydration, or perceived performance
26
25
 
@@ -96,7 +95,7 @@ Refer to `.dw/rules/` for project-specific URLs and frameworks.
96
95
  - Verify the application is running on localhost
97
96
  - Use `browser_navigate` from Playwright MCP to access the application
98
97
  - Confirm the page loaded correctly with `browser_snapshot`
99
- - If persistent session, auth import, network inspection beyond MCP, or browser-first reproduction is needed, complement with `agent-browser`
98
+ - If persistent session, auth import, or network inspection beyond MCP is needed, complement with `webapp-testing`
100
99
 
101
100
  ### 3. Menu Page Verification (Required -- Execute BEFORE RF tests)
102
101
 
@@ -163,7 +162,7 @@ For each functional requirement from the PRD:
163
162
  8. Mark as PASSED or FAILED
164
163
  9. Save the Playwright flow script in `{{PRD_PATH}}/QA/scripts/` with standardized name: `RF-XX-[slug].spec.ts` (or `.js`)
165
164
  10. Record in the report which credentials (user/profile) were used in each permission-sensitive flow
166
- 11. When the MCP flow becomes unstable or insufficient for operational evidence, complement with `agent-browser` or `webapp-testing`, recording this explicitly in the report
165
+ 11. When the MCP flow becomes unstable or insufficient for operational evidence, complement with `webapp-testing`, recording this explicitly in the report
167
166
 
168
167
  <critical>It is not enough to validate only the happy path. Each requirement must be exercised against its boundary states and most likely regressions</critical>
169
168
  <critical>If a requirement cannot be fully validated via E2E, QA must be marked as REJECTED or BLOCKED, never APPROVED</critical>
@@ -272,6 +271,62 @@ Generate report in `{{PRD_PATH}}/QA/qa-report.md`:
272
271
  [Final QA assessment]
273
272
  ```
274
273
 
274
+ ### 9. QA Fix-Retest Loop (Automatic)
275
+
276
+ <critical>QA does NOT end at the first report. If bugs are found, enter an automatic fix-retest loop until QA is APPROVED or explicitly BLOCKED.</critical>
277
+
278
+ After generating the initial QA report:
279
+
280
+ ```dot
281
+ digraph qa_loop {
282
+ rankdir=TB;
283
+ "Generate QA Report" -> "Bugs found?";
284
+ "Bugs found?" -> "QA APPROVED\nExit" [label="no"];
285
+ "Bugs found?" -> "Fix bugs\n(follow dw-fix-qa rules)" [label="yes"];
286
+ "Fix bugs\n(follow dw-fix-qa rules)" -> "Retest ALL\nfixed bugs";
287
+ "Retest ALL\nfixed bugs" -> "New/reopened\nbugs?";
288
+ "New/reopened\nbugs?" -> "QA APPROVED\nExit" [label="no"];
289
+ "New/reopened\nbugs?" -> "Max cycles\nreached?" [label="yes"];
290
+ "Max cycles\nreached?" -> "Fix bugs\n(follow dw-fix-qa rules)" [label="no"];
291
+ "Max cycles\nreached?" -> "QA BLOCKED\nReport residual bugs" [label="yes (5 cycles)"];
292
+ }
293
+ ```
294
+
295
+ **Loop rules:**
296
+ 1. After the initial report, if `QA/bugs.md` has bugs with `Status: Open`, enter the loop automatically
297
+ 2. For each cycle:
298
+ a. Fix all open bugs surgically (same rules as `/dw-fix-qa`: no scope creep, minimal impact)
299
+ b. Retest ALL fixed bugs via Playwright MCP with evidence capture
300
+ c. Check for regressions introduced by the fixes
301
+ d. Update `QA/bugs.md` and `QA/qa-report.md` with the cycle results
302
+ e. If all critical/high bugs are closed → **QA APPROVED**, exit loop
303
+ f. If new bugs appeared or fixes failed → continue next cycle
304
+ 3. **Maximum 5 fix-retest cycles.** After 5 cycles, mark QA as **BLOCKED** with residual bugs documented
305
+ 4. Each cycle must update the QA report with a "Cycle N" section showing what was fixed, retested, and the result
306
+ 5. Commit fixes after each successful cycle: `fix(qa): resolve BUG-NN [description]`
307
+
308
+ **Cycle report format (append to qa-report.md):**
309
+ ```markdown
310
+ ## Fix-Retest Cycle [N] — [YYYY-MM-DD]
311
+
312
+ ### Bugs Fixed
313
+ | Bug | Fix Description | Retest | Evidence |
314
+ |-----|----------------|--------|----------|
315
+ | BUG-01 | [what was changed] | PASS/FAIL | `QA/screenshots/BUG-01-cycle-N.png` |
316
+
317
+ ### Regressions Checked
318
+ - [list of related flows retested]
319
+
320
+ ### Cycle Result
321
+ - **Bugs remaining:** [count]
322
+ - **Status:** CONTINUE / APPROVED / BLOCKED
323
+ ```
324
+
325
+ **Red flags — STOP the loop:**
326
+ - Fix requires a new feature (not a bug) → stop, recommend `/dw-create-prd`
327
+ - Fix requires major refactoring → stop, recommend `/dw-refactoring-analysis`
328
+ - Same bug keeps reappearing after 2+ fix attempts → mark as BLOCKED with root cause analysis
329
+
275
330
  ## Quality Checklist
276
331
 
277
332
  - [ ] PRD analyzed and requirements extracted
@@ -20,7 +20,6 @@ When available in the project at `./.agents/skills/`, use these skills as specia
20
20
  |-------|---------|
21
21
  | `vercel-react-best-practices` | Task touches React rendering, hydration, data fetching, bundle, cache, or performance |
22
22
  | `webapp-testing` | Task has interactive frontend needing E2E validation in a real browser |
23
- | `agent-browser` | UI validation requires persistent session, operational navigation inspection, or complementary visual evidence |
24
23
 
25
24
  ## File Locations
26
25
 
@@ -78,7 +77,7 @@ After providing the summary and approach, **begin implementation immediately**:
78
77
  - Follow established project patterns
79
78
  - Ensure all requirements are met
80
79
  - **Run tests**: use the project's test command
81
- - If there is interactive frontend, also validate real behavior with `webapp-testing` or `agent-browser` when doing so reduces the risk of invisible regression in unit tests
80
+ - If there is interactive frontend, also validate real behavior with `webapp-testing` when doing so reduces the risk of invisible regression in unit tests
82
81
 
83
82
  **YOU MUST** start the implementation right after the process above.
84
83
 
@@ -16,7 +16,6 @@
16
16
  Quando disponíveis no projeto em `./.agents/skills/`, use estas skills como suporte contextual sem substituir este comando:
17
17
 
18
18
  - `vercel-react-best-practices`: use quando o bug afeta React/Next.js e há suspeita de problemas de render, hidratação, fetching, waterfall, bundle ou re-render
19
- - `agent-browser`: use quando o bug requer reprodução em navegador real, sessão persistente, inspeção de requests ou captura visual
20
19
  - `webapp-testing`: use quando a correção requer fluxo E2E/reteste reproduzível em uma web app
21
20
  - `security-review`: use quando a causa raiz toca auth, autorização, input externo, upload, secrets, SQL, XSS, SSRF ou outras superfícies sensíveis
22
21
 
@@ -152,7 +151,7 @@
152
151
  - Mensagens de erro relacionadas
153
152
  - Stack traces
154
153
  - Arquivos modificados recentemente
155
- - Se o bug for relacionado a UI ou depender de fluxo no navegador, complemente a coleta com `agent-browser` ou `webapp-testing`
154
+ - Se o bug for relacionado a UI ou depender de fluxo no navegador, complemente a coleta com `webapp-testing`
156
155
 
157
156
  ### 3. Perguntas de Clarificação (OBRIGATÓRIO - EXATAMENTE 3)
158
157
 
@@ -179,7 +178,7 @@
179
178
  - **Causa Provável**: Baseado nas evidências
180
179
  - **Arquivos Afetados**: Lista de arquivos a modificar
181
180
  - **Impacto**: Outros componentes que podem ser afetados
182
- - **Skills utilizadas**: registre explicitamente se a análise usou `vercel-react-best-practices`, `agent-browser`, `webapp-testing` ou `security-review`
181
+ - **Skills utilizadas**: registre explicitamente se a análise usou `vercel-react-best-practices`, `webapp-testing` ou `security-review`
183
182
 
184
183
  ### 4.1 Checkpoint de Escopo (OBRIGATÓRIO)
185
184
 
@@ -17,7 +17,6 @@ Você é um assistente IA especializado em correção de bugs pós-QA com retest
17
17
 
18
18
  Quando disponíveis no projeto em `./.agents/skills/`, use estas skills como suporte operacional sem substituir este comando:
19
19
 
20
- - `agent-browser`: suporte para reproduzir bugs com sessões persistentes, capturar dados de rede, screenshots adicionais e validar correções browser-first
21
20
  - `webapp-testing`: suporte para estruturar retestes, capturas e scripts quando complementar ao Playwright MCP
22
21
  - `vercel-react-best-practices`: use apenas se a correção afetar frontend React/Next.js e houver risco de regressão de renderização, hidratação, fetching ou performance
23
22
 
@@ -87,7 +86,7 @@ Para cada bug corrigido:
87
86
  - `QA/logs/console-retest.log`
88
87
  - `QA/logs/network-retest.log`
89
88
  7. Registrar no relatório de QA qual usuário/perfil foi usado no reteste
90
- 8. Se o reteste exigir auth persistente, inspeção além do MCP, ou reprodução mais fiel em navegador real, complementar com `agent-browser` e registrar no relatório
89
+ 8. Se o reteste exigir auth persistente, inspeção além do MCP, ou reprodução mais fiel em navegador real, registrar no relatório
91
90
 
92
91
  ### 4. Atualização de Artefatos
93
92
 
@@ -37,6 +37,12 @@ Funciona melhor com projeto analisado por `/dw-analyze-project`
37
37
  <critical>Header e footer de vídeos humanos não podem disputar área útil com a tela do browser. Quando eles existirem, devem ficar fora do stage do browser, em uma composição maior, preservando intacta a viewport real da aplicação.</critical>
38
38
  <critical>Quando o objetivo for um tour humano com browser centralizado, mantenha a aplicação em um palco central sem colunas laterais fixas, preservando header e footer em largura total fora da área do browser.</critical>
39
39
  <critical>Qualidade visual do browser é requisito obrigatório. Não entregue tour humano com viewport ou gravação reescalada para baixo em relação à resolução final. O runner deve alinhar viewport e captura de vídeo à resolução final ou registrar bloqueio explícito.</critical>
40
+ <critical>Legenda hardcoded sobre a tela do produto não é padrão aceitável quando o ambiente permitir shell dedicada. O padrão preferencial e obrigatório é: `header` superior com título do tour, `stage` centralizado para o browser intacto e `footer` inferior exclusivo para a legenda narrativa.</critical>
41
+ <critical>Mesmo quando o vídeo humano for montado a partir de screenshots e não de navegação gravada, a composição final deve manter o mesmo layout de shell: cabeçalho e rodapé fora da área útil da aplicação. Não entregar slideshow fullscreen com subtitles queimadas diretamente sobre o conteúdo do produto.</critical>
42
+ <critical>No artefato principal de vídeo humano, a legenda precisa estar visível dentro do `footer` da shell. Arquivo `.srt` sidecar e faixa de subtitle embutida podem existir como apoio, mas não substituem a obrigação de a narrativa principal já aparecer posicionada corretamente no rodapé da composição final.</critical>
43
+ <critical>É inválido entregar como versão principal um MP4 cuja legenda dependa do player para posicionamento (`mov_text`, `tx3g`, subtitle track similar) quando isso fizer o texto sair do footer da shell. Se houver faixa embutida auxiliar, validar visualmente que a versão principal continua correta mesmo sem o player renderizar subtitles.</critical>
44
+ <critical>Quando o pedido envolver vídeo humano com legenda, gerar sempre dois artefatos de vídeo: um `clean` sem legenda renderizada no quadro, para uso com player + `.srt` sidecar; e um `captioned` com a narrativa já queimada corretamente no `footer` da shell.</critical>
45
+ <critical>Se já existir no workspace um flow anterior com shell de gravação humana melhor resolvida, reutilize esse padrão visual e estrutural antes de improvisar nova composição. Esse reaproveitamento é preferível a uma solução simplificada com legendas embutidas sobre a viewport.</critical>
40
46
 
41
47
  ### Requisitos de Cadência do Vídeo
42
48
  <critical>Antes e depois de ações principais, inserir pausas intencionais. Como regra operacional: manter de 2 a 3 segundos de permanência em estados relevantes já carregados e pelo menos 1,5 segundo após o desfecho visível de cada ação principal antes de seguir.</critical>
@@ -49,7 +55,6 @@ Funciona melhor com projeto analisado por `/dw-analyze-project`
49
55
 
50
56
  Quando disponíveis no projeto em `./.agents/skills/`, use estas skills como apoio operacional, sem substituir este comando como fonte de verdade:
51
57
 
52
- - `agent-browser`: apoio para navegação real, inspeção de requests, screenshots, auth persistente e reprodução browser-first
53
58
  - `webapp-testing`: apoio para estruturar fluxos E2E, retestes locais e coleta de evidências
54
59
  - `remotion-best-practices`: apoio obrigatório quando houver vídeo humano final, legendas, composição, transições, FFmpeg ou Remotion
55
60
  - `humanizer`: apoio obrigatório para revisar e naturalizar todas as legendas, captions `.srt`, textos descritivos e qualquer redação voltada a leitura humana antes da entrega final
@@ -119,6 +124,9 @@ Se houver execução:
119
124
  Se houver vídeo humano final:
120
125
  - salvar em `evidence/videos/` com nome que diferencie claramente o tour final da captura bruta
121
126
  - quando `ffmpeg` estiver disponível, salvar também a versão `mp4` do tour humano final
127
+ - quando houver legendas, salvar também duas variantes explícitas:
128
+ - uma versão `clean` sem legenda desenhada no frame
129
+ - uma versão `captioned` com legenda desenhada no `footer` da shell
122
130
  - registrar no `manifest.json` quais arquivos são `raw` e quais são `human_final`
123
131
 
124
132
  ## Fluxo obrigatório
@@ -224,6 +232,11 @@ Gerar `e2e-runbook.md` no estilo operacional detalhado:
224
232
  - quando houver título e legenda, reservar cabeçalho e rodapé próprios fora do stage do browser
225
233
  - quando a composição pedir browser centralizado, manter a aplicação em um palco central sem colunas laterais fixas e sem sacrificar a largura total do cabeçalho e do rodapé
226
234
  - evitar qualquer redução artificial da viewport do app para encaixar overlays
235
+ - evitar subtitles queimadas diretamente dentro da viewport do produto quando houver possibilidade de usar shell externa
236
+ - queimar a narrativa principal no `footer` da shell do vídeo final; usar `.srt` sidecar e faixa embutida apenas como artefatos complementares
237
+ - para cada tour final com legenda, produzir:
238
+ - `clean`: sem legenda no frame, com `.srt` separado para o player decidir
239
+ - `captioned`: legenda já posicionada no `footer` da shell
227
240
  - alinhar a captura de vídeo à resolução final para evitar perda de nitidez no browser
228
241
  - manter em tela cada estado relevante por tempo suficiente para leitura visual, em especial listas, diálogos, badges, validações, mensagens e resultados finais
229
242
  - manter as legendas tempo suficiente para leitura confortável, sem trocar texto antes de a etapa correspondente ser compreendida visualmente
@@ -246,6 +259,28 @@ Quando produzir ou revisar o tour final, aplicar estas regras como baseline:
246
259
  - quando houver comparação entre estado anterior e posterior, mostrar claramente os dois momentos
247
260
  - se a gravação ficar rápida demais para leitura humana, considerar a execução inadequada mesmo que tecnicamente correta
248
261
  - se `ffmpeg` estiver instalado, considerar incompleta a entrega que deixar apenas `webm` ou outro bruto sem gerar `mp4`
262
+ - considerar inadequado o vídeo que use apenas captions sobrepostas ao browser quando o projeto permitir shell com header/footer dedicados
263
+ - considerar inadequado o vídeo cuja legenda principal dependa do renderer do player e por isso apareça fora do `footer` previsto na shell
264
+ - considerar incompleta a entrega que disponibilize só uma das variantes (`clean` ou `captioned`) quando o fluxo exigir vídeo com legenda
265
+
266
+ ## Padrão visual obrigatório da shell
267
+
268
+ Quando houver vídeo humano final, adotar como padrão visual mínimo:
269
+
270
+ - `header` fixo fora do browser com o nome do módulo ou fluxo
271
+ - `main` centralizando um `stage` único do browser
272
+ - `footer` fixo fora do browser, reservado para legenda narrativa ou contexto curto
273
+ - `stage` com borda, raio e sombra próprios, sem cortar a viewport da aplicação
274
+ - largura e altura do `stage` definidas explicitamente e proporcionais à resolução final
275
+
276
+ Baseline recomendada para `1920x1080` quando não houver padrão melhor no próprio flow:
277
+
278
+ - `header`: ~`64px`
279
+ - `footer`: ~`112px`
280
+ - `stage`: ~`1600x900`
281
+
282
+ Se já existir no workspace um script de shell funcional (ex: `record-human-tour.cjs`), reutilize-o como referência. Se optar por outro layout, justifique explicitamente no `manifest.json`.
283
+
249
284
  - Atualizar `manifest.json` com status final, artefatos e bloqueios, distinguindo:
250
285
  - evidências MCP
251
286
  - captura bruta de execução