@sentry/warden 0.14.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/dist/cli/args.d.ts +1 -0
  2. package/dist/cli/args.d.ts.map +1 -1
  3. package/dist/cli/args.js +17 -2
  4. package/dist/cli/args.js.map +1 -1
  5. package/dist/cli/commands/add.d.ts.map +1 -1
  6. package/dist/cli/commands/add.js +25 -33
  7. package/dist/cli/commands/add.js.map +1 -1
  8. package/dist/cli/commands/logs.d.ts.map +1 -1
  9. package/dist/cli/commands/logs.js +4 -11
  10. package/dist/cli/commands/logs.js.map +1 -1
  11. package/dist/cli/commands/setup-app.d.ts.map +1 -1
  12. package/dist/cli/commands/setup-app.js +19 -15
  13. package/dist/cli/commands/setup-app.js.map +1 -1
  14. package/dist/cli/context.d.ts +2 -0
  15. package/dist/cli/context.d.ts.map +1 -1
  16. package/dist/cli/context.js +8 -2
  17. package/dist/cli/context.js.map +1 -1
  18. package/dist/cli/files.d.ts.map +1 -1
  19. package/dist/cli/files.js +27 -30
  20. package/dist/cli/files.js.map +1 -1
  21. package/dist/cli/git.d.ts +8 -3
  22. package/dist/cli/git.d.ts.map +1 -1
  23. package/dist/cli/git.js +24 -13
  24. package/dist/cli/git.js.map +1 -1
  25. package/dist/cli/index.js +10 -0
  26. package/dist/cli/index.js.map +1 -1
  27. package/dist/cli/input.d.ts +7 -0
  28. package/dist/cli/input.d.ts.map +1 -1
  29. package/dist/cli/input.js +13 -2
  30. package/dist/cli/input.js.map +1 -1
  31. package/dist/cli/main.d.ts.map +1 -1
  32. package/dist/cli/main.js +62 -19
  33. package/dist/cli/main.js.map +1 -1
  34. package/dist/cli/output/tasks.d.ts.map +1 -1
  35. package/dist/cli/output/tasks.js +22 -3
  36. package/dist/cli/output/tasks.js.map +1 -1
  37. package/dist/config/writer.d.ts.map +1 -1
  38. package/dist/config/writer.js +18 -0
  39. package/dist/config/writer.js.map +1 -1
  40. package/dist/diff/apply.d.ts +6 -0
  41. package/dist/diff/apply.d.ts.map +1 -0
  42. package/dist/diff/apply.js +44 -0
  43. package/dist/diff/apply.js.map +1 -0
  44. package/dist/diff/index.d.ts +1 -0
  45. package/dist/diff/index.d.ts.map +1 -1
  46. package/dist/diff/index.js +1 -0
  47. package/dist/diff/index.js.map +1 -1
  48. package/dist/evals/index.js +1 -1
  49. package/dist/evals/index.js.map +1 -1
  50. package/dist/output/github-issues.d.ts.map +1 -1
  51. package/dist/output/github-issues.js +15 -57
  52. package/dist/output/github-issues.js.map +1 -1
  53. package/dist/sdk/analyze.d.ts.map +1 -1
  54. package/dist/sdk/analyze.js +24 -5
  55. package/dist/sdk/analyze.js.map +1 -1
  56. package/dist/sdk/auth.d.ts.map +1 -1
  57. package/dist/sdk/auth.js +2 -2
  58. package/dist/sdk/auth.js.map +1 -1
  59. package/dist/sdk/errors.d.ts +3 -1
  60. package/dist/sdk/errors.d.ts.map +1 -1
  61. package/dist/sdk/errors.js +2 -2
  62. package/dist/sdk/errors.js.map +1 -1
  63. package/dist/sdk/fix-quality.d.ts +20 -0
  64. package/dist/sdk/fix-quality.d.ts.map +1 -0
  65. package/dist/sdk/fix-quality.js +167 -0
  66. package/dist/sdk/fix-quality.js.map +1 -0
  67. package/dist/sdk/prepare.d.ts.map +1 -1
  68. package/dist/sdk/prepare.js +5 -0
  69. package/dist/sdk/prepare.js.map +1 -1
  70. package/dist/sentry.d.ts +5 -3
  71. package/dist/sentry.d.ts.map +1 -1
  72. package/dist/sentry.js +37 -11
  73. package/dist/sentry.js.map +1 -1
  74. package/dist/skills/remote.js +1 -1
  75. package/dist/skills/remote.js.map +1 -1
  76. package/dist/utils/exec.d.ts +4 -1
  77. package/dist/utils/exec.d.ts.map +1 -1
  78. package/dist/utils/exec.js +6 -4
  79. package/dist/utils/exec.js.map +1 -1
  80. package/package.json +1 -1
  81. package/skills/warden-sweep/SKILL.md +67 -74
  82. package/skills/warden-sweep/references/patch-prompt.md +72 -0
  83. package/skills/warden-sweep/references/verify-prompt.md +25 -0
  84. package/skills/warden-sweep/scripts/_utils.py +62 -0
  85. package/skills/warden-sweep/scripts/create_issue.py +189 -0
  86. package/skills/warden-sweep/scripts/find_reviewers.py +16 -17
  87. package/skills/warden-sweep/scripts/generate_report.py +20 -25
  88. package/skills/warden-sweep/scripts/organize.py +128 -21
  89. package/skills/warden-sweep/scripts/scan.py +82 -130
@@ -1,6 +1,7 @@
1
1
  ---
2
2
  name: warden-sweep
3
3
  description: Full-repository code sweep. Scans every file with warden, verifies findings via deep tracing, creates draft PRs for validated issues. Use when asked to "sweep the repo", "scan everything", "find all bugs", "full codebase review", "batch code analysis", or run warden across the entire repository.
4
+ disable-model-invocation: true
4
5
  ---
5
6
 
6
7
  # Warden Sweep
@@ -30,9 +31,17 @@ Fetches open warden-labeled PRs, builds file-to-PR dedup index, caches diffs for
30
31
  uv run ${CLAUDE_SKILL_ROOT}/scripts/index_prs.py <sweep-dir>
31
32
  ```
32
33
 
34
+ ### `scripts/create_issue.py`
35
+
36
+ Creates a GitHub tracking issue summarizing sweep results. Run after verification, before patching.
37
+
38
+ ```bash
39
+ uv run ${CLAUDE_SKILL_ROOT}/scripts/create_issue.py <sweep-dir>
40
+ ```
41
+
33
42
  ### `scripts/organize.py`
34
43
 
35
- Tags security findings, labels security PRs, updates finding reports with PR links, generates summary report, finalizes manifest.
44
+ Tags security findings, labels security PRs, updates finding reports with PR links, posts final results to tracking issue, generates summary report, finalizes manifest.
36
45
 
37
46
  ```bash
38
47
  uv run ${CLAUDE_SKILL_ROOT}/scripts/organize.py <sweep-dir>
@@ -87,7 +96,7 @@ Parse the JSON stdout. Save `runId` and `sweepDir` for subsequent phases.
87
96
  ```
88
97
  ## Scan Complete
89
98
 
90
- Scanned **{filesScanned}** files, **{filesErrored}** errors.
99
+ Scanned **{filesScanned}** files, **{filesTimedOut}** timed out, **{filesErrored}** errors.
91
100
 
92
101
  ### Findings ({totalFindings} total)
93
102
 
@@ -99,7 +108,7 @@ Scanned **{filesScanned}** files, **{filesErrored}** errors.
99
108
 
100
109
  Render every finding from the `findings` array. Bold severity for high and above.
101
110
 
102
- **On failure**: If exit code 1, show the error JSON and stop. If exit code 2, show the partial results and note which files errored.
111
+ **On failure**: If exit code 1, show the error JSON and stop. If exit code 2, show the partial results. List timed-out files separately from errored files so users know which can be retried.
103
112
 
104
113
  ---
105
114
 
@@ -111,37 +120,11 @@ Deep-trace each finding using Task subagents to qualify or disqualify.
111
120
 
112
121
  Check if `data/verify/<finding-id>.json` already exists (incrementality). If it does, skip.
113
122
 
114
- Launch a Task subagent (`subagent_type: "general-purpose"`) for each finding. Process findings sequentially (one at a time) to keep output organized.
123
+ Launch a Task subagent (`subagent_type: "general-purpose"`) for each finding. Process findings in parallel batches of up to 8 to improve throughput.
115
124
 
116
125
  **Task prompt for each finding:**
117
126
 
118
- ```
119
- Verify a code analysis finding. Determine if this is a TRUE issue or a FALSE POSITIVE.
120
- Do NOT write or edit any files. Research only.
121
-
122
- ## Finding
123
- - Title: ${TITLE}
124
- - Severity: ${SEVERITY} | Confidence: ${CONFIDENCE}
125
- - Skill: ${SKILL}
126
- - Location: ${FILE_PATH}:${START_LINE}-${END_LINE}
127
- - Description: ${DESCRIPTION}
128
- - Verification hint: ${VERIFICATION}
129
-
130
- ## Instructions
131
- 1. Read the file at the reported location. Examine at least 50 lines of surrounding context.
132
- 2. Trace data flow to/from the flagged code using Grep/Glob.
133
- 3. Check if the issue is mitigated elsewhere (guards, validation, try/catch upstream).
134
- 4. Check if the issue is actually reachable in practice.
135
-
136
- Return your verdict as JSON:
137
- {
138
- "findingId": "${FINDING_ID}",
139
- "verdict": "verified" or "rejected",
140
- "confidence": "high" or "medium" or "low",
141
- "reasoning": "2-3 sentence explanation",
142
- "traceNotes": "What code paths you examined"
143
- }
144
- ```
127
+ Read `${CLAUDE_SKILL_ROOT}/references/verify-prompt.md` for the prompt template. Substitute the finding's values into the `${...}` placeholders.
145
128
 
146
129
  **Process results:**
147
130
 
@@ -195,11 +178,37 @@ Update manifest: set `phases.verify` to `"complete"`.
195
178
 
196
179
  ---
197
180
 
198
- ## Phase 3: Patch
181
+ ## Phase 3: Issue
199
182
 
200
- For each verified finding, create a worktree, fix the code, and open a draft PR.
183
+ Create a tracking issue that ties all PRs together and gives reviewers a single overview.
201
184
 
202
- **Step 0: Index existing PRs** (1 tool call):
185
+ **Run** (1 tool call):
186
+
187
+ ```bash
188
+ uv run ${CLAUDE_SKILL_ROOT}/scripts/create_issue.py ${SWEEP_DIR}
189
+ ```
190
+
191
+ Parse the JSON stdout. Save `issueUrl` and `issueNumber` for Phase 4.
192
+
193
+ **Report** to user:
194
+
195
+ ```
196
+ ## Tracking Issue Created
197
+
198
+ {issueUrl}
199
+ ```
200
+
201
+ **On failure**: Show the error. Continue to Phase 4 (PRs can still be created without a tracking issue).
202
+
203
+ ---
204
+
205
+ ## Phase 4: Patch
206
+
207
+ For each verified finding, create a worktree, fix the code, and open a draft PR. Process findings **sequentially** (one at a time) since parallel subagents cross-contaminate worktrees.
208
+
209
+ **Severity triage**: Patch HIGH and above. For MEDIUM, only patch findings from bug-detection skills (e.g., `code-review`, `security-review`). Skip LOW and INFO findings.
210
+
211
+ **Step 0: Setup** (run once before the loop):
203
212
 
204
213
  ```bash
205
214
  uv run ${CLAUDE_SKILL_ROOT}/scripts/index_prs.py ${SWEEP_DIR}
@@ -207,6 +216,13 @@ uv run ${CLAUDE_SKILL_ROOT}/scripts/index_prs.py ${SWEEP_DIR}
207
216
 
208
217
  Parse the JSON stdout. Use `fileIndex` for dedup checks.
209
218
 
219
+ Determine the default branch and fetch latest so worktrees branch from current upstream:
220
+
221
+ ```bash
222
+ DEFAULT_BRANCH=$(gh repo view --json defaultBranchRef --jq '.defaultBranchRef.name')
223
+ git fetch origin "${DEFAULT_BRANCH}"
224
+ ```
225
+
210
226
  **For each finding in `data/verified.jsonl`:**
211
227
 
212
228
  Check if finding ID already exists in `data/patches.jsonl` (incrementality). If it does, skip.
@@ -224,47 +240,21 @@ Skip the finding only when there is both chunk overlap AND the PR addresses the
224
240
  ```bash
225
241
  BRANCH="warden-sweep/${RUN_ID}/${FINDING_ID}"
226
242
  WORKTREE="${SWEEP_DIR}/worktrees/${FINDING_ID}"
227
- git worktree add "${WORKTREE}" -b "${BRANCH}"
243
+ git worktree add "${WORKTREE}" -b "${BRANCH}" "origin/${DEFAULT_BRANCH}"
228
244
  ```
229
245
 
230
- Each finding branches from the current HEAD to avoid merge conflicts between PRs.
246
+ Each finding branches from the repo's default branch so PRs contain only the fix commit.
231
247
 
232
248
  **Step 2: Generate fix**
233
249
 
234
- Launch a Task subagent (`subagent_type: "general-purpose"`) to apply the fix in the worktree:
250
+ Launch a Task subagent (`subagent_type: "general-purpose"`) to apply the fix in the worktree. Read `${CLAUDE_SKILL_ROOT}/references/patch-prompt.md` for the prompt template. Substitute the finding's values and worktree path into the `${...}` placeholders.
235
251
 
236
- ```
237
- Fix a verified code issue and add test coverage. You are working in a git worktree at: ${WORKTREE}
252
+ **Step 2b: Handle skipped findings**
238
253
 
239
- ## Finding
240
- - Title: ${TITLE}
241
- - File: ${FILE_PATH}:${START_LINE}
242
- - Description: ${DESCRIPTION}
243
- - Verification: ${REASONING}
244
- - Suggested Fix: ${FIX_DESCRIPTION}
245
- ```diff
246
- ${FIX_DIFF}
247
- ```
248
-
249
- ## Instructions
250
- 1. Read the file at the reported location (use the worktree path: ${WORKTREE}/${FILE_PATH}).
251
- 2. Apply the suggested fix. If the diff doesn't apply cleanly, adapt it while preserving intent.
252
- 3. Write or update tests that verify the fix:
253
- - Follow existing test patterns (co-located files, same framework)
254
- - At minimum, write a test that would have caught the original bug
255
- 4. Only modify the fix target and its test file.
256
- 5. Do NOT run tests locally. CI will validate the changes.
257
- 6. Stage and commit with this exact message:
258
-
259
- fix: ${TITLE}
260
-
261
- Warden finding ${FINDING_ID}
262
- Severity: ${SEVERITY}
263
-
264
- Co-Authored-By: Warden <noreply@getsentry.com>
265
-
266
- Report what you changed: files modified, test files added/updated, any notes.
267
- ```
254
+ If the subagent returned `"status": "skipped"` (not `"applied"`), do NOT proceed to Steps 3-4. Instead:
255
+ 1. Record the finding in `data/patches.jsonl` with `"status": "error"` and `"error": "Subagent skipped: ${skipReason}"`
256
+ 2. Clean up the worktree
257
+ 3. Continue to the next finding
268
258
 
269
259
  **Step 3: Find reviewers**
270
260
 
@@ -275,8 +265,7 @@ uv run ${CLAUDE_SKILL_ROOT}/scripts/find_reviewers.py "${FILE_PATH}"
275
265
  **Step 4: Create draft PR**
276
266
 
277
267
  ```bash
278
- cd "${WORKTREE}"
279
- git push -u origin "${BRANCH}"
268
+ cd "${WORKTREE}" && git push -u origin HEAD:"${BRANCH}"
280
269
  ```
281
270
 
282
271
  Create the PR with a 1-2 sentence "What" summary based on the finding and fix, followed by the finding description and verification reasoning:
@@ -298,6 +287,9 @@ ${REASONING}
298
287
 
299
288
  Automated fix for Warden finding ${FINDING_ID} (${SEVERITY}, detected by ${SKILL}).
300
289
 
290
+ <!-- Only include the next line if Phase 3 succeeded and ISSUE_NUMBER is available -->
291
+ Ref #${ISSUE_NUMBER}
292
+
301
293
  > This PR was auto-generated by a Warden Sweep (run ${RUN_ID}).
302
294
  > The finding has been validated through automated deep tracing,
303
295
  > but human confirmation is requested as this is batch work.
@@ -309,7 +301,7 @@ Save the PR URL.
309
301
 
310
302
  **Step 5: Record and cleanup**
311
303
 
312
- Append to `data/patches.jsonl`:
304
+ Append to `data/patches.jsonl` (use `"created"` as status for successful PRs, not the subagent's `"applied"`):
313
305
  ```json
314
306
  {"findingId": "...", "prUrl": "https://...", "branch": "...", "reviewers": ["user1", "user2"], "filesChanged": ["..."], "status": "created|existing|error"}
315
307
  ```
@@ -340,7 +332,7 @@ Update manifest: set `phases.patch` to `"complete"`.
340
332
 
341
333
  ---
342
334
 
343
- ## Phase 4: Organize
335
+ ## Phase 5: Organize
344
336
 
345
337
  **Run** (1 tool call):
346
338
 
@@ -376,8 +368,9 @@ Each phase is incremental. To resume from where you left off:
376
368
  1. Check `data/manifest.json` to see which phases are complete
377
369
  2. For scan: pass `--sweep-dir` to `scan.py`
378
370
  3. For verify: existing `data/verify/<id>.json` files are skipped
379
- 4. For patch: existing entries in `data/patches.jsonl` are skipped
380
- 5. For organize: safe to re-run (idempotent)
371
+ 4. For issue: `create_issue.py` is idempotent (skips if `issueUrl` in manifest)
372
+ 5. For patch: existing entries in `data/patches.jsonl` are skipped
373
+ 6. For organize: safe to re-run (idempotent)
381
374
 
382
375
  ## Output Directory Structure
383
376
 
@@ -0,0 +1,72 @@
1
+ Fix a verified code issue. You are working in a git worktree at: ${WORKTREE}
2
+
3
+ ## Finding
4
+ - Title: ${TITLE}
5
+ - File: ${FILE_PATH}:${START_LINE}
6
+ - Description: ${DESCRIPTION}
7
+ - Verification: ${REASONING}
8
+ - Suggested Fix: ${FIX_DESCRIPTION}
9
+ ```diff
10
+ ${FIX_DIFF}
11
+ ```
12
+
13
+ ## Instructions
14
+
15
+ ### Step 1: Understand the code
16
+ Read the file at ${WORKTREE}/${FILE_PATH}. Read at least 50 lines above and below the reported location. Trace callers and callees of the affected code using Grep/Glob to understand how it is used. Do NOT skip this step.
17
+
18
+ ### Step 2: Apply a minimal fix
19
+ Apply the smallest change that addresses the finding. If the suggested diff doesn't apply cleanly, adapt it while preserving intent. Do NOT refactor surrounding code, rename variables, add comments, or make any change beyond what the finding requires.
20
+
21
+ ### Step 3: Write tests
22
+ Write or update tests that verify the fix:
23
+ - Follow existing test patterns (co-located files, same framework)
24
+ - At minimum, write a test that would have caught the original bug
25
+ - Test the specific edge case, not just the happy path
26
+
27
+ Only modify the fix target and its test file.
28
+
29
+ ### Step 4: Self-review
30
+ Before staging, run `git diff` in the worktree and review every changed line. Verify:
31
+ 1. The change addresses the specific finding described, not something else
32
+ 2. No unrelated code was modified (no drive-by cleanups, no formatting changes)
33
+ 3. Trace through changed code paths: does the fix introduce any new bug, null reference, type error, or broken import?
34
+ 4. Tests exercise the fix (the failure case), not just that the code runs
35
+
36
+ If ANY check fails, fix the problem before proceeding. If the suggested fix is wrong or would introduce a regression you cannot resolve, do NOT commit. Instead, skip to the output step and report why.
37
+
38
+ ### Step 5: Commit
39
+ Do NOT run tests locally. CI will validate the changes.
40
+
41
+ Stage and commit with this exact message:
42
+
43
+ fix: ${TITLE}
44
+
45
+ Warden finding ${FINDING_ID}
46
+ Severity: ${SEVERITY}
47
+
48
+ Co-Authored-By: Warden <noreply@getsentry.com>
49
+
50
+ ### Step 6: Output
51
+ Return ONLY valid JSON (no surrounding text). Use `"status": "applied"` if you committed a fix, or `"status": "skipped"` if you did not.
52
+
53
+ ```json
54
+ {
55
+ "status": "applied",
56
+ "filesChanged": ["src/example.ts"],
57
+ "testFilesChanged": ["src/example.test.ts"],
58
+ "selfReview": "Verified the fix addresses the null check and test covers the failure case",
59
+ "skipReason": null
60
+ }
61
+ ```
62
+
63
+ When skipping:
64
+ ```json
65
+ {
66
+ "status": "skipped",
67
+ "filesChanged": [],
68
+ "testFilesChanged": [],
69
+ "selfReview": null,
70
+ "skipReason": "The suggested fix would introduce a regression in the error handling path"
71
+ }
72
+ ```
@@ -0,0 +1,25 @@
1
+ Verify a code analysis finding. Determine if this is a TRUE issue or a FALSE POSITIVE.
2
+ Do NOT write or edit any files. Research only.
3
+
4
+ ## Finding
5
+ - Title: ${TITLE}
6
+ - Severity: ${SEVERITY} | Confidence: ${CONFIDENCE}
7
+ - Skill: ${SKILL}
8
+ - Location: ${FILE_PATH}:${START_LINE}-${END_LINE}
9
+ - Description: ${DESCRIPTION}
10
+ - Verification hint: ${VERIFICATION}
11
+
12
+ ## Instructions
13
+ 1. Read the file at the reported location. Examine at least 50 lines of surrounding context.
14
+ 2. Trace data flow to/from the flagged code using Grep/Glob.
15
+ 3. Check if the issue is mitigated elsewhere (guards, validation, try/catch upstream).
16
+ 4. Check if the issue is actually reachable in practice.
17
+
18
+ Return your verdict as JSON:
19
+ {
20
+ "findingId": "${FINDING_ID}",
21
+ "verdict": "verified" or "rejected",
22
+ "confidence": "high" or "medium" or "low",
23
+ "reasoning": "2-3 sentence explanation",
24
+ "traceNotes": "What code paths you examined"
25
+ }
@@ -20,6 +20,35 @@ def run_cmd(
20
20
  )
21
21
 
22
22
 
23
+ def run_cmd_stdout(
24
+ args: list[str], timeout: int = 30, cwd: str | None = None
25
+ ) -> str | None:
26
+ """Run a command and return stripped stdout, or None on failure."""
27
+ try:
28
+ result = run_cmd(args, timeout=timeout, cwd=cwd)
29
+ return result.stdout.strip() if result.returncode == 0 else None
30
+ except (subprocess.TimeoutExpired, FileNotFoundError):
31
+ return None
32
+
33
+
34
+ def read_json(path: str) -> dict[str, Any] | None:
35
+ """Read a JSON file and return parsed object, or None on failure."""
36
+ if not os.path.exists(path):
37
+ return None
38
+ try:
39
+ with open(path) as f:
40
+ return json.load(f)
41
+ except (json.JSONDecodeError, OSError):
42
+ return None
43
+
44
+
45
+ def write_json(path: str, data: dict[str, Any]) -> None:
46
+ """Write a dict to a JSON file with trailing newline."""
47
+ with open(path, "w") as f:
48
+ json.dump(data, f, indent=2)
49
+ f.write("\n")
50
+
51
+
23
52
  def read_jsonl(path: str) -> list[dict[str, Any]]:
24
53
  """Read a JSONL file and return list of parsed objects."""
25
54
  entries: list[dict[str, Any]] = []
@@ -35,3 +64,36 @@ def read_jsonl(path: str) -> list[dict[str, Any]]:
35
64
  except json.JSONDecodeError:
36
65
  continue
37
66
  return entries
67
+
68
+
69
+ def severity_badge(severity: str) -> str:
70
+ """Return a markdown-friendly severity indicator."""
71
+ badges = {
72
+ "critical": "**CRITICAL**",
73
+ "high": "**HIGH**",
74
+ "medium": "MEDIUM",
75
+ "low": "LOW",
76
+ "info": "info",
77
+ }
78
+ return badges.get(severity, severity)
79
+
80
+
81
+ def pr_number_from_url(pr_url: str) -> str:
82
+ """Extract the PR or issue number from a GitHub URL's last path segment."""
83
+ return pr_url.rstrip("/").split("/")[-1]
84
+
85
+
86
+ def ensure_github_label(name: str, color: str, description: str) -> None:
87
+ """Create a GitHub label if it doesn't exist (idempotent)."""
88
+ try:
89
+ subprocess.run(
90
+ [
91
+ "gh", "label", "create", name,
92
+ "--color", color,
93
+ "--description", description,
94
+ ],
95
+ capture_output=True,
96
+ timeout=15,
97
+ )
98
+ except (subprocess.TimeoutExpired, FileNotFoundError):
99
+ pass
@@ -0,0 +1,189 @@
1
+ #!/usr/bin/env python3
2
+ # /// script
3
+ # requires-python = ">=3.9"
4
+ # ///
5
+ """
6
+ Warden Sweep: Create tracking issue.
7
+
8
+ Creates a GitHub issue summarizing the sweep results after verification
9
+ but before patching. Gives every PR a parent to reference and gives
10
+ reviewers a single place to see the full picture.
11
+
12
+ Usage:
13
+ uv run create_issue.py <sweep-dir>
14
+
15
+ Stdout: JSON with issueUrl and issueNumber
16
+ Stderr: Progress lines
17
+
18
+ Idempotent: if issueUrl already exists in manifest, skips creation.
19
+ """
20
+ from __future__ import annotations
21
+
22
+ import argparse
23
+ import json
24
+ import os
25
+ import subprocess
26
+ import sys
27
+ from typing import Any
28
+
29
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
30
+ from _utils import ( # noqa: E402
31
+ ensure_github_label,
32
+ pr_number_from_url,
33
+ read_json,
34
+ read_jsonl,
35
+ severity_badge,
36
+ write_json,
37
+ )
38
+
39
+
40
+ def build_issue_body(
41
+ run_id: str,
42
+ scan_index: list[dict[str, Any]],
43
+ all_findings: list[dict[str, Any]],
44
+ verified: list[dict[str, Any]],
45
+ rejected: list[dict[str, Any]],
46
+ ) -> str:
47
+ """Build the GitHub issue body markdown."""
48
+ files_scanned = sum(1 for e in scan_index if e.get("status") == "complete")
49
+ files_timed_out = sum(
50
+ 1 for e in scan_index
51
+ if e.get("status") == "error" and e.get("error") == "timeout"
52
+ )
53
+ files_errored = sum(
54
+ 1 for e in scan_index
55
+ if e.get("status") == "error" and e.get("error") != "timeout"
56
+ )
57
+
58
+ # Collect unique skills from scan index
59
+ skills: set[str] = set()
60
+ for entry in scan_index:
61
+ for skill in entry.get("skills", []):
62
+ skills.add(skill)
63
+
64
+ lines = [
65
+ f"## Warden Sweep `{run_id}`",
66
+ "",
67
+ "| Metric | Count |",
68
+ "|--------|-------|",
69
+ f"| Files scanned | {files_scanned} |",
70
+ f"| Files timed out | {files_timed_out} |",
71
+ f"| Files errored | {files_errored} |",
72
+ f"| Total findings | {len(all_findings)} |",
73
+ f"| Verified | {len(verified)} |",
74
+ f"| Rejected | {len(rejected)} |",
75
+ "",
76
+ ]
77
+
78
+ if verified:
79
+ lines.append("### Verified Findings")
80
+ lines.append("")
81
+ lines.append("| Severity | Skill | File | Title |")
82
+ lines.append("|----------|-------|------|-------|")
83
+ for f in verified:
84
+ sev = severity_badge(f.get("severity", "info"))
85
+ skill = f.get("skill", "")
86
+ file_path = f.get("file", "")
87
+ start_line = f.get("startLine")
88
+ location = f"{file_path}:{start_line}" if start_line else file_path
89
+ title = f.get("title", "")
90
+ lines.append(f"| {sev} | {skill} | `{location}` | {title} |")
91
+ lines.append("")
92
+
93
+ if skills:
94
+ lines.append("### Skills Run")
95
+ lines.append("")
96
+ lines.append(", ".join(sorted(skills)))
97
+ lines.append("")
98
+
99
+ lines.append("> Generated by Warden Sweep. PRs referencing this issue will appear below.")
100
+
101
+ return "\n".join(lines) + "\n"
102
+
103
+
104
+ def create_github_issue(title: str, body: str) -> dict[str, Any]:
105
+ """Create a GitHub issue with the warden label. Returns issueUrl and issueNumber."""
106
+ ensure_github_label("warden", "5319E7", "Automated fix from Warden Sweep")
107
+
108
+ result = subprocess.run(
109
+ [
110
+ "gh", "issue", "create",
111
+ "--label", "warden",
112
+ "--title", title,
113
+ "--body", body,
114
+ ],
115
+ capture_output=True,
116
+ text=True,
117
+ timeout=30,
118
+ )
119
+
120
+ if result.returncode != 0:
121
+ raise RuntimeError(f"gh issue create failed: {result.stderr.strip()}")
122
+
123
+ issue_url = result.stdout.strip()
124
+ try:
125
+ issue_number = int(pr_number_from_url(issue_url))
126
+ except (ValueError, IndexError):
127
+ raise RuntimeError(f"Could not parse issue number from gh output: {issue_url}")
128
+
129
+ return {"issueUrl": issue_url, "issueNumber": issue_number}
130
+
131
+
132
+ def main() -> None:
133
+ parser = argparse.ArgumentParser(
134
+ description="Warden Sweep: Create tracking issue"
135
+ )
136
+ parser.add_argument("sweep_dir", help="Path to the sweep directory")
137
+ args = parser.parse_args()
138
+
139
+ sweep_dir = args.sweep_dir
140
+ data_dir = os.path.join(sweep_dir, "data")
141
+ manifest_path = os.path.join(data_dir, "manifest.json")
142
+
143
+ if not os.path.isdir(sweep_dir):
144
+ print(
145
+ json.dumps({"error": f"Sweep directory not found: {sweep_dir}"}),
146
+ file=sys.stdout,
147
+ )
148
+ sys.exit(1)
149
+
150
+ manifest = read_json(manifest_path) or {}
151
+
152
+ # Idempotency: if issue already exists, return existing values
153
+ if manifest.get("issueUrl"):
154
+ output = {
155
+ "issueUrl": manifest["issueUrl"],
156
+ "issueNumber": manifest.get("issueNumber", 0),
157
+ }
158
+ print(json.dumps(output))
159
+ return
160
+
161
+ run_id = manifest.get("runId", "unknown")
162
+
163
+ # Read sweep data
164
+ scan_index = read_jsonl(os.path.join(data_dir, "scan-index.jsonl"))
165
+ all_findings = read_jsonl(os.path.join(data_dir, "all-findings.jsonl"))
166
+ verified = read_jsonl(os.path.join(data_dir, "verified.jsonl"))
167
+ rejected = read_jsonl(os.path.join(data_dir, "rejected.jsonl"))
168
+
169
+ files_scanned = sum(1 for e in scan_index if e.get("status") == "complete")
170
+
171
+ # Build issue
172
+ title = f"Warden Sweep {run_id}: {len(verified)} findings across {files_scanned} files"
173
+ body = build_issue_body(run_id, scan_index, all_findings, verified, rejected)
174
+
175
+ print("Creating tracking issue...", file=sys.stderr)
176
+ result = create_github_issue(title, body)
177
+ print(f"Created issue: {result['issueUrl']}", file=sys.stderr)
178
+
179
+ # Write issueUrl and issueNumber to manifest
180
+ manifest["issueUrl"] = result["issueUrl"]
181
+ manifest["issueNumber"] = result["issueNumber"]
182
+ manifest.setdefault("phases", {})["issue"] = "complete"
183
+ write_json(manifest_path, manifest)
184
+
185
+ print(json.dumps(result))
186
+
187
+
188
+ if __name__ == "__main__":
189
+ main()
@@ -20,22 +20,11 @@ from __future__ import annotations
20
20
 
21
21
  import argparse
22
22
  import json
23
- import subprocess
23
+ import os
24
24
  import sys
25
25
 
26
-
27
- def run_cmd(args: list[str], timeout: int = 30) -> str | None:
28
- """Run a command and return stdout, or None on failure."""
29
- try:
30
- result = subprocess.run(
31
- args,
32
- capture_output=True,
33
- text=True,
34
- timeout=timeout,
35
- )
36
- return result.stdout.strip() if result.returncode == 0 else None
37
- except (subprocess.TimeoutExpired, FileNotFoundError):
38
- return None
26
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
27
+ from _utils import run_cmd_stdout as run_cmd # noqa: E402
39
28
 
40
29
 
41
30
  def get_top_authors(file_path: str, count: int = 2) -> list[str]:
@@ -86,6 +75,12 @@ def email_to_github_username(email: str) -> str | None:
86
75
  return output if output else None
87
76
 
88
77
 
78
+ def get_current_github_user() -> str | None:
79
+ """Get the currently authenticated GitHub username."""
80
+ output = run_cmd(["gh", "api", "/user", "--jq", ".login"])
81
+ return output if output else None
82
+
83
+
89
84
  def main():
90
85
  parser = argparse.ArgumentParser(
91
86
  description="Find top git contributors for PR reviewer assignment"
@@ -97,7 +92,11 @@ def main():
97
92
  )
98
93
  args = parser.parse_args()
99
94
 
100
- emails = get_top_authors(args.file_path, args.count)
95
+ current_user = get_current_github_user()
96
+
97
+ # Request extra candidates to compensate for self-exclusion
98
+ fetch_count = args.count + 1 if current_user else args.count
99
+ emails = get_top_authors(args.file_path, fetch_count)
101
100
  if not emails:
102
101
  print(json.dumps({"reviewers": [], "note": "No recent authors found"}))
103
102
  return
@@ -105,10 +104,10 @@ def main():
105
104
  reviewers: list[str] = []
106
105
  for email in emails:
107
106
  username = email_to_github_username(email)
108
- if username:
107
+ if username and username != current_user:
109
108
  reviewers.append(username)
110
109
 
111
- print(json.dumps({"reviewers": reviewers}))
110
+ print(json.dumps({"reviewers": reviewers[:args.count]}))
112
111
 
113
112
 
114
113
  if __name__ == "__main__":