@clipboard-health/ai-rules 2.6.0 → 2.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/ghClient.ts +59 -0
- package/lib/prClient.ts +58 -0
- package/package.json +1 -1
- package/scripts/sync.js +24 -3
- package/skills/cognito-user-analysis/SKILL.md +7 -7
- package/skills/cognito-user-analysis/docs/analysis-workflow.md +3 -3
- package/skills/cognito-user-analysis/docs/fix-workflow.md +2 -2
- package/skills/cognito-user-analysis/docs/setup.md +2 -2
- package/skills/cognito-user-analysis/scripts/cognito-analyze-duplicates.sh +35 -19
- package/skills/commit-push-pr/SKILL.md +2 -1
- package/skills/iterate-pr/SKILL.md +3 -3
- package/skills/simplify/SKILL.md +0 -2
- package/skills/unresolved-pr-comments/SKILL.md +1 -1
- package/skills/unresolved-pr-comments/{unresolvedPrComments.ts → scripts/unresolvedPrComments.ts} +7 -2
- package/skills/eng-artifact-review/SKILL.md +0 -89
- package/skills/unresolved-pr-comments/parseNitpicks.spec.ts +0 -480
- /package/skills/unresolved-pr-comments/{parseNitpicks.ts → scripts/parseNitpicks.ts} +0 -0
package/lib/ghClient.ts
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import { spawnSync, type SpawnSyncReturns } from "node:child_process";
|
|
2
|
+
|
|
3
|
+
interface ErrorResult {
|
|
4
|
+
error: string;
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
export function runGh(args: readonly string[], timeout = 30_000): SpawnSyncReturns<string> {
|
|
8
|
+
return spawnSync("gh", args, { encoding: "utf8", timeout });
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export function outputError(message: string): never {
|
|
12
|
+
const result: ErrorResult = { error: message };
|
|
13
|
+
console.log(JSON.stringify(result));
|
|
14
|
+
process.exit(1);
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export function validatePrerequisites(): void {
|
|
18
|
+
if (!isGhCliInstalled()) {
|
|
19
|
+
outputError("gh CLI not found. Install from https://cli.github.com");
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
if (!isGhAuthenticated()) {
|
|
23
|
+
outputError("Not authenticated with GitHub. Run: gh auth login");
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export function executeGraphQL<T>(query: string, variables: Record<string, string | number>): T {
|
|
28
|
+
const args = ["api", "graphql", "-f", `query=${query}`];
|
|
29
|
+
|
|
30
|
+
for (const [key, value] of Object.entries(variables)) {
|
|
31
|
+
if (typeof value === "number") {
|
|
32
|
+
args.push("-F", `${key}=${value}`);
|
|
33
|
+
} else {
|
|
34
|
+
args.push("-f", `${key}=${value}`);
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const result = runGh(args);
|
|
39
|
+
if (result.status !== 0) {
|
|
40
|
+
outputError(`GraphQL query failed: ${result.stderr}`);
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
try {
|
|
44
|
+
return JSON.parse(result.stdout) as T;
|
|
45
|
+
} catch {
|
|
46
|
+
outputError(`Failed to parse GraphQL response: ${result.stdout.slice(0, 200)}`);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function isGhCliInstalled(): boolean {
|
|
51
|
+
return runGh(["--version"]).status === 0;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
function isGhAuthenticated(): boolean {
|
|
55
|
+
// `gh auth status` returns non-zero when *any* stored account is invalid,
|
|
56
|
+
// even if the active account (e.g. via GITHUB_TOKEN) works fine.
|
|
57
|
+
// Use `gh api user` to verify actual API access instead.
|
|
58
|
+
return runGh(["api", "user", "--jq", ".login"]).status === 0;
|
|
59
|
+
}
|
package/lib/prClient.ts
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
import { outputError, runGh } from "./ghClient.ts";
|
|
2
|
+
|
|
3
|
+
export interface RepoInfo {
|
|
4
|
+
owner: string;
|
|
5
|
+
name: string;
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Get PR number from argument or current branch.
|
|
10
|
+
* Exits with error if no PR is found.
|
|
11
|
+
*/
|
|
12
|
+
export function getPrNumber(prNumberArg: string | undefined): number {
|
|
13
|
+
if (prNumberArg) {
|
|
14
|
+
if (!/^\d+$/.test(prNumberArg)) {
|
|
15
|
+
outputError(`Invalid PR number: ${prNumberArg}`);
|
|
16
|
+
}
|
|
17
|
+
return Number.parseInt(prNumberArg, 10);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const prNumber = getPrNumberFromCurrentBranch();
|
|
21
|
+
if (!prNumber) {
|
|
22
|
+
outputError("No PR found for current branch. Provide PR number as argument.");
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
return prNumber;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Get repository owner and name from current git repository.
|
|
30
|
+
* Exits with error if not in a git repo with GitHub remote.
|
|
31
|
+
*/
|
|
32
|
+
export function getRepoInfo(): RepoInfo {
|
|
33
|
+
const result = runGh(["repo", "view", "--json", "owner,name"]);
|
|
34
|
+
if (result.status !== 0) {
|
|
35
|
+
outputError("Could not determine repository. Are you in a git repo with a GitHub remote?");
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
try {
|
|
39
|
+
const parsed = JSON.parse(result.stdout.trim()) as { name: string; owner: { login: string } };
|
|
40
|
+
return { name: parsed.name, owner: parsed.owner.login };
|
|
41
|
+
} catch {
|
|
42
|
+
outputError("Failed to parse repository info from gh CLI output.");
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function getPrNumberFromCurrentBranch(): number | undefined {
|
|
47
|
+
const result = runGh(["pr", "view", "--json", "number"]);
|
|
48
|
+
if (result.status !== 0) {
|
|
49
|
+
return undefined;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
try {
|
|
53
|
+
const parsed = JSON.parse(result.stdout.trim()) as { number: number };
|
|
54
|
+
return parsed.number;
|
|
55
|
+
} catch {
|
|
56
|
+
return undefined;
|
|
57
|
+
}
|
|
58
|
+
}
|
package/package.json
CHANGED
package/scripts/sync.js
CHANGED
|
@@ -22,21 +22,25 @@ async function sync() {
|
|
|
22
22
|
process.exit(1);
|
|
23
23
|
}
|
|
24
24
|
const rulesOutput = node_path_1.default.join(PATHS.projectRoot, ".rules");
|
|
25
|
-
const
|
|
25
|
+
const agentsOutput = node_path_1.default.join(PATHS.projectRoot, ".agents");
|
|
26
|
+
const skillsOutput = node_path_1.default.join(agentsOutput, "skills");
|
|
27
|
+
const libraryOutput = node_path_1.default.join(agentsOutput, "lib");
|
|
26
28
|
await Promise.all([
|
|
27
29
|
(0, promises_1.rm)(rulesOutput, { recursive: true, force: true }),
|
|
28
30
|
(0, promises_1.rm)(skillsOutput, { recursive: true, force: true }),
|
|
31
|
+
(0, promises_1.rm)(libraryOutput, { recursive: true, force: true }),
|
|
29
32
|
]);
|
|
30
|
-
const [, skillsCopied] = await Promise.all([
|
|
33
|
+
const [, skillsCopied, libraryCopied] = await Promise.all([
|
|
31
34
|
copyRuleFiles(ruleIds, rulesOutput),
|
|
32
35
|
copySkillFiles(skillsOutput),
|
|
36
|
+
copyLibraryFiles(libraryOutput),
|
|
33
37
|
]);
|
|
34
38
|
const agentsContent = await generateAgentsIndex(ruleIds);
|
|
35
39
|
await (0, promises_1.writeFile)(node_path_1.default.join(PATHS.projectRoot, constants_1.FILES.agents), agentsContent, "utf8");
|
|
36
40
|
await (0, promises_1.writeFile)(node_path_1.default.join(PATHS.projectRoot, constants_1.FILES.claude), "@AGENTS.md\n", "utf8");
|
|
37
41
|
console.log(`✅ @clipboard-health/ai-rules synced ${parsedArguments.profile} (${ruleIds.length} rules)`);
|
|
38
42
|
await appendOverlay(PATHS.projectRoot);
|
|
39
|
-
await formatOutputFiles(PATHS.projectRoot, { skillsCopied });
|
|
43
|
+
await formatOutputFiles(PATHS.projectRoot, { skillsCopied, libCopied: libraryCopied });
|
|
40
44
|
}
|
|
41
45
|
catch (error) {
|
|
42
46
|
// Log error but exit gracefully to avoid breaking installs
|
|
@@ -130,6 +134,20 @@ async function copySkillFiles(skillsOutput) {
|
|
|
130
134
|
throw error;
|
|
131
135
|
}
|
|
132
136
|
}
|
|
137
|
+
async function copyLibraryFiles(libraryOutput) {
|
|
138
|
+
const librarySource = node_path_1.default.join(PATHS.packageRoot, "lib");
|
|
139
|
+
try {
|
|
140
|
+
await (0, promises_1.cp)(librarySource, libraryOutput, { recursive: true });
|
|
141
|
+
console.log(`📋 Synced lib to .agents/lib/`);
|
|
142
|
+
return true;
|
|
143
|
+
}
|
|
144
|
+
catch (error) {
|
|
145
|
+
if (error.code === "ENOENT") {
|
|
146
|
+
return false;
|
|
147
|
+
}
|
|
148
|
+
throw error;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
133
151
|
async function extractHeading(filePath) {
|
|
134
152
|
try {
|
|
135
153
|
const content = await (0, promises_1.readFile)(filePath, "utf8");
|
|
@@ -227,6 +245,9 @@ async function formatOutputFiles(projectRoot, options) {
|
|
|
227
245
|
if (options.skillsCopied) {
|
|
228
246
|
filesToFormat.push(node_path_1.default.join(projectRoot, ".agents", "skills"));
|
|
229
247
|
}
|
|
248
|
+
if (options.libCopied) {
|
|
249
|
+
filesToFormat.push(node_path_1.default.join(projectRoot, ".agents", "lib"));
|
|
250
|
+
}
|
|
230
251
|
const command = formatter === "oxfmt"
|
|
231
252
|
? ["npx", "oxfmt", ...filesToFormat]
|
|
232
253
|
: ["npx", "prettier", "--write", ...filesToFormat];
|
|
@@ -26,24 +26,24 @@ Analyze and fix duplicate Cognito users in clipboard-production by comparing aga
|
|
|
26
26
|
SKILL_DIR="<path-to-this-skill>"
|
|
27
27
|
|
|
28
28
|
# 1. Verify prerequisites
|
|
29
|
-
$
|
|
29
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/check-prerequisites.sh"
|
|
30
30
|
|
|
31
31
|
# 2. Create input file (one sub per line)
|
|
32
32
|
echo "68e1e380-d0c1-7028-4256-3361fd833080" > subs.txt
|
|
33
33
|
|
|
34
34
|
# 3. Pipeline: lookup → find duplicates → analyze → fix
|
|
35
|
-
$
|
|
36
|
-
$
|
|
37
|
-
$
|
|
35
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-lookup.sh" subs.txt results.csv
|
|
36
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-find-duplicates.sh" results.csv duplicates.csv
|
|
37
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-analyze-duplicates.sh" duplicates.csv analysis.csv
|
|
38
38
|
|
|
39
39
|
# 4. Review analysis.csv, then fix (ALWAYS dry-run first!)
|
|
40
|
-
$
|
|
41
|
-
$
|
|
40
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-fix-duplicates.sh" analysis.csv --dry-run
|
|
41
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-fix-duplicates.sh" analysis.csv
|
|
42
42
|
```
|
|
43
43
|
|
|
44
44
|
## Prerequisites
|
|
45
45
|
|
|
46
|
-
Run `scripts/check-prerequisites.sh` to verify. Requirements:
|
|
46
|
+
Run `"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/check-prerequisites.sh"` to verify. Requirements:
|
|
47
47
|
|
|
48
48
|
| Requirement | Setup |
|
|
49
49
|
| ------------------------------------- | ----------------------------------------------------------- |
|
|
@@ -5,7 +5,7 @@ Pipeline: `subs.txt → lookup → find-duplicates → analyze → analysis.csv`
|
|
|
5
5
|
## Step 1: Lookup Users
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
|
-
scripts/cognito-lookup.sh <input_file> [output_file]
|
|
8
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-lookup.sh" <input_file> [output_file]
|
|
9
9
|
```
|
|
10
10
|
|
|
11
11
|
Converts Cognito subs to user details. Run `--help` for all options.
|
|
@@ -16,7 +16,7 @@ Converts Cognito subs to user details. Run `--help` for all options.
|
|
|
16
16
|
## Step 2: Find Duplicates
|
|
17
17
|
|
|
18
18
|
```bash
|
|
19
|
-
scripts/cognito-find-duplicates.sh <results_csv> [output_file]
|
|
19
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-find-duplicates.sh" <results_csv> [output_file]
|
|
20
20
|
```
|
|
21
21
|
|
|
22
22
|
Searches for other accounts sharing phone or email. Run `--help` for all options.
|
|
@@ -26,7 +26,7 @@ Searches for other accounts sharing phone or email. Run `--help` for all options
|
|
|
26
26
|
## Step 3: Analyze Duplicates
|
|
27
27
|
|
|
28
28
|
```bash
|
|
29
|
-
scripts/cognito-analyze-duplicates.sh <duplicates_csv> [output_file]
|
|
29
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-analyze-duplicates.sh" <duplicates_csv> [output_file]
|
|
30
30
|
```
|
|
31
31
|
|
|
32
32
|
Compares each duplicate against backend API. Run `--help` for all options.
|
|
@@ -5,7 +5,7 @@ Execute fixes after reviewing `analysis.csv`.
|
|
|
5
5
|
## Always Dry-Run First
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
|
-
scripts/cognito-fix-duplicates.sh analysis.csv --dry-run
|
|
8
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-fix-duplicates.sh" analysis.csv --dry-run
|
|
9
9
|
```
|
|
10
10
|
|
|
11
11
|
Review output to confirm correct users will be deleted/updated.
|
|
@@ -13,7 +13,7 @@ Review output to confirm correct users will be deleted/updated.
|
|
|
13
13
|
## Execute
|
|
14
14
|
|
|
15
15
|
```bash
|
|
16
|
-
scripts/cognito-fix-duplicates.sh analysis.csv
|
|
16
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-fix-duplicates.sh" analysis.csv
|
|
17
17
|
```
|
|
18
18
|
|
|
19
19
|
Run `--help` for all options.
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
## Quick Check
|
|
4
4
|
|
|
5
5
|
```bash
|
|
6
|
-
scripts/check-prerequisites.sh
|
|
6
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/check-prerequisites.sh"
|
|
7
7
|
```
|
|
8
8
|
|
|
9
9
|
This validates all requirements and shows how to fix failures.
|
|
@@ -60,7 +60,7 @@ aws cognito-idp list-user-pools \
|
|
|
60
60
|
Pass the pool ID as a parameter to override the default:
|
|
61
61
|
|
|
62
62
|
```bash
|
|
63
|
-
scripts/cognito-lookup.sh subs.txt results.csv cbh-staging-platform us-west-2_XXXXX
|
|
63
|
+
"${CLAUDE_PLUGIN_ROOT:-.agents}/skills/cognito-user-analysis/scripts/cognito-lookup.sh" subs.txt results.csv cbh-staging-platform us-west-2_XXXXX
|
|
64
64
|
```
|
|
65
65
|
|
|
66
66
|
## Troubleshooting
|
|
@@ -67,7 +67,11 @@ if [[ ! -f "$TOKEN_FILE" ]]; then
|
|
|
67
67
|
exit 1
|
|
68
68
|
fi
|
|
69
69
|
|
|
70
|
-
TOKEN=$(
|
|
70
|
+
TOKEN=$(tr -d '\r\n' < "$TOKEN_FILE" | sed -E 's/^[[:space:]]+|[[:space:]]+$//g')
|
|
71
|
+
if [[ -z "$TOKEN" ]]; then
|
|
72
|
+
echo "Error: Token file '$TOKEN_FILE' is empty or whitespace-only" >&2
|
|
73
|
+
exit 1
|
|
74
|
+
fi
|
|
71
75
|
|
|
72
76
|
echo "sub,username,phone,email,cbh_user_id,status,created,last_modified,action,match_score,match_details,backend_phone,backend_email,backend_cbh_user_id,backend_name,duplicate_group" > "$OUTPUT_FILE"
|
|
73
77
|
|
|
@@ -185,32 +189,38 @@ tail -n +2 "$DUPLICATES_FILE" | while IFS=, read -r original_sub original_userna
|
|
|
185
189
|
all_subs+=("${dup_subs[$i]}")
|
|
186
190
|
done
|
|
187
191
|
|
|
188
|
-
backend_response=$(query_backend "$search_value")
|
|
192
|
+
backend_response=$(query_backend "$search_value") || backend_response=""
|
|
189
193
|
backend_phone=""
|
|
190
194
|
backend_email=""
|
|
191
195
|
backend_cbh_user_id=""
|
|
192
196
|
backend_name=""
|
|
193
197
|
|
|
194
|
-
if [[ -
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
198
|
+
if [[ -z "$backend_response" ]]; then
|
|
199
|
+
echo " Error: Backend lookup failed for '$search_value'; skipping group $group_id" >&2
|
|
200
|
+
continue
|
|
201
|
+
fi
|
|
202
|
+
|
|
203
|
+
error_status=$(echo "$backend_response" | jq -r '.statusCode // ""' 2>/dev/null)
|
|
204
|
+
if [[ -n "$error_status" && "$error_status" != "null" ]]; then
|
|
205
|
+
echo " Error: Backend API error: $(echo "$backend_response" | jq -r '.message // "Unknown"'); skipping group $group_id" >&2
|
|
206
|
+
continue
|
|
207
|
+
fi
|
|
208
|
+
|
|
209
|
+
backend_phone=$(echo "$backend_response" | jq -r '.list[0].phone // ""' 2>/dev/null) || backend_phone=""
|
|
210
|
+
backend_email=$(echo "$backend_response" | jq -r '.list[0].email // ""' 2>/dev/null) || backend_email=""
|
|
211
|
+
backend_cbh_user_id=$(echo "$backend_response" | jq -r '.list[0].userId // ""' 2>/dev/null) || backend_cbh_user_id=""
|
|
212
|
+
backend_name=$(echo "$backend_response" | jq -r '.list[0].name // ""' 2>/dev/null) || backend_name=""
|
|
213
|
+
|
|
214
|
+
if [[ -n "$backend_cbh_user_id" && "$backend_cbh_user_id" != "null" ]]; then
|
|
215
|
+
echo " Backend: $backend_name ($backend_email) - userId: $backend_cbh_user_id"
|
|
216
|
+
else
|
|
217
|
+
echo " Error: No backend user found for '$search_value'; skipping group $group_id" >&2
|
|
218
|
+
continue
|
|
210
219
|
fi
|
|
211
220
|
|
|
212
221
|
declare -A user_data
|
|
213
222
|
declare -A user_scores
|
|
223
|
+
resolved_usernames=()
|
|
214
224
|
best_score=-1
|
|
215
225
|
best_username=""
|
|
216
226
|
|
|
@@ -241,6 +251,7 @@ tail -n +2 "$DUPLICATES_FILE" | while IFS=, read -r original_sub original_userna
|
|
|
241
251
|
|
|
242
252
|
user_scores["${username}_score"]="$score"
|
|
243
253
|
user_scores["${username}_details"]="$match_details"
|
|
254
|
+
resolved_usernames+=("$username")
|
|
244
255
|
|
|
245
256
|
echo " User $username: score=$score ($match_details)"
|
|
246
257
|
|
|
@@ -256,9 +267,14 @@ tail -n +2 "$DUPLICATES_FILE" | while IFS=, read -r original_sub original_userna
|
|
|
256
267
|
fi
|
|
257
268
|
done
|
|
258
269
|
|
|
270
|
+
if [[ ${#resolved_usernames[@]} -eq 0 ]]; then
|
|
271
|
+
echo " Error: No Cognito user details resolved for group $group_id; skipping" >&2
|
|
272
|
+
continue
|
|
273
|
+
fi
|
|
274
|
+
|
|
259
275
|
echo " Decision: KEEP $best_username (score: $best_score)"
|
|
260
276
|
|
|
261
|
-
for username in "${
|
|
277
|
+
for username in "${resolved_usernames[@]}"; do
|
|
262
278
|
sub="${user_data["${username}_sub"]}"
|
|
263
279
|
phone="${user_data["${username}_phone"]}"
|
|
264
280
|
email="${user_data["${username}_email"]}"
|
|
@@ -20,4 +20,5 @@ Based on the above changes:
|
|
|
20
20
|
4. Check for an existing PR with `gh pr view`.
|
|
21
21
|
- No PR exists: Create with `gh pr create`. Title = commit subject line. Description = brief explanation of **why**, not what.
|
|
22
22
|
- PR exists: Report the URL and move on.
|
|
23
|
-
5. You have the capability to call multiple tools in a single response. You MUST do all of the above in a single message. Do not use any other tools or do anything else.
|
|
23
|
+
5. You have the capability to call multiple tools in a single response. You MUST do all of the above in a single message. Do not use any other tools or do anything else.
|
|
24
|
+
6. After tool calls complete, send one short final text response with the branch name and PR URL (or the reason PR creation was skipped).
|
|
@@ -28,7 +28,7 @@ Get the PR for the current branch:
|
|
|
28
28
|
|
|
29
29
|
**If PR exists:** Get unresolved comments data:
|
|
30
30
|
|
|
31
|
-
!`node "${CLAUDE_PLUGIN_ROOT}/skills/unresolved-pr-comments/unresolvedPrComments.ts" 2>/dev/null`
|
|
31
|
+
!`node "${CLAUDE_PLUGIN_ROOT:-.agents}/skills/unresolved-pr-comments/scripts/unresolvedPrComments.ts" 2>/dev/null`
|
|
32
32
|
|
|
33
33
|
Parse the JSON output and evaluate exit conditions.
|
|
34
34
|
|
|
@@ -75,7 +75,7 @@ Spawn a Task subagent with `subagent_type: "general-purpose"` using this prompt:
|
|
|
75
75
|
> 4. **Wait for CI**: !`rc=0; timeout 600 gh pr checks --watch || rc=$?; case $rc in 0|1|8|124) ;; *) exit $rc;; esac` (10 minute timeout; exit codes: 0=pass, 1=fail, 8=pending, 124=timeout are expected and handled in next step; other codes like 4=auth error are re-raised)
|
|
76
76
|
> 5. **Check CI Status**: Run `gh pr checks --json name,state,bucket` and parse the output
|
|
77
77
|
> - If any check has `bucket: "fail"`, invoke `core:fix-ci` via the Skill tool. Since you are running autonomously, do NOT wait for user approval — apply the fixes directly. Report what was fixed and exit.
|
|
78
|
-
> 6. **Check Comments**: Run `node "${CLAUDE_PLUGIN_ROOT}/skills/unresolved-pr-comments/unresolvedPrComments.ts"` and parse the JSON output
|
|
78
|
+
> 6. **Check Comments**: Run `node "${CLAUDE_PLUGIN_ROOT:-.agents}/skills/unresolved-pr-comments/scripts/unresolvedPrComments.ts"` and parse the JSON output
|
|
79
79
|
> - If unresolved comments or nitpicks exist:
|
|
80
80
|
> 1. Group comments by file path and read each file once (not per-comment)
|
|
81
81
|
> 2. If a file no longer exists, note the comment may be outdated and skip it
|
|
@@ -92,7 +92,7 @@ After the subagent completes:
|
|
|
92
92
|
|
|
93
93
|
1. Increment iteration counter
|
|
94
94
|
2. If no commits were made this iteration:
|
|
95
|
-
- Get unresolved comments by running: `node "${CLAUDE_PLUGIN_ROOT}/skills/unresolved-pr-comments/unresolvedPrComments.ts"`
|
|
95
|
+
- Get unresolved comments by running: `node "${CLAUDE_PLUGIN_ROOT:-.agents}/skills/unresolved-pr-comments/scripts/unresolvedPrComments.ts"`
|
|
96
96
|
- If unresolved comments remain, exit with: "Comments addressed, awaiting reviewer resolution. Run `/iterate-pr` after reviewer responds."
|
|
97
97
|
3. Report: "Iteration [N]/[max] complete. Checking state..."
|
|
98
98
|
4. Return to Step 2
|
package/skills/simplify/SKILL.md
CHANGED
|
@@ -13,7 +13,7 @@ Fetch and analyze unresolved review comments from a GitHub pull request.
|
|
|
13
13
|
Run the script to fetch PR comment data:
|
|
14
14
|
|
|
15
15
|
```bash
|
|
16
|
-
node "${CLAUDE_PLUGIN_ROOT}/skills/unresolved-pr-comments/unresolvedPrComments.ts" [pr-number]
|
|
16
|
+
node "${CLAUDE_PLUGIN_ROOT:-.agents}/skills/unresolved-pr-comments/scripts/unresolvedPrComments.ts" [pr-number]
|
|
17
17
|
```
|
|
18
18
|
|
|
19
19
|
If no PR number is provided, it uses the PR associated with the current branch.
|
package/skills/unresolved-pr-comments/{unresolvedPrComments.ts → scripts/unresolvedPrComments.ts}
RENAMED
|
@@ -1,6 +1,11 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import {
|
|
3
|
-
|
|
2
|
+
import {
|
|
3
|
+
executeGraphQL,
|
|
4
|
+
outputError,
|
|
5
|
+
runGh,
|
|
6
|
+
validatePrerequisites,
|
|
7
|
+
} from "../../../lib/ghClient.ts";
|
|
8
|
+
import { getPrNumber, getRepoInfo } from "../../../lib/prClient.ts";
|
|
4
9
|
|
|
5
10
|
import {
|
|
6
11
|
extractCodeScanningAlertNumber,
|
|
@@ -1,89 +0,0 @@
|
|
|
1
|
-
---
|
|
2
|
-
name: eng-artifact-review
|
|
3
|
-
description: >
|
|
4
|
-
Review engineering artifacts (PRs, design docs, RFCs) against project rules for violations and recommendations.
|
|
5
|
-
Use this skill whenever the user asks to review a PR, design doc, RFC, ADR, technical spec, or any engineering
|
|
6
|
-
document — even if they just say "review this" or paste a PR URL. Also use when the user asks to check something
|
|
7
|
-
"against the rules" or wants a "code review" of a document.
|
|
8
|
-
argument-hint: "[file-path-or-pr-url]"
|
|
9
|
-
---
|
|
10
|
-
|
|
11
|
-
# Engineering Artifact Reviewer
|
|
12
|
-
|
|
13
|
-
You are a Staff Software Engineer reviewing engineering artifacts. Produce structured, actionable findings.
|
|
14
|
-
|
|
15
|
-
## Arguments
|
|
16
|
-
|
|
17
|
-
- `$ARGUMENTS` - Path to the artifact file or PR URL to review
|
|
18
|
-
|
|
19
|
-
## Procedure
|
|
20
|
-
|
|
21
|
-
1. **Resolve input:**
|
|
22
|
-
- **PR URL or number**: Run `gh pr diff $ARGUMENTS` to get the diff. Run `gh pr view $ARGUMENTS --json title,body` for context. For large PRs, also run `gh pr view $ARGUMENTS --json files` to understand scope before diving in.
|
|
23
|
-
- **File path**: Read the file directly.
|
|
24
|
-
|
|
25
|
-
2. **Load relevant rules:** Read `.rules/` files, but only the ones relevant to the artifact. Use the "When to Read" column in AGENTS.md to decide — a design doc about logging needs `loggingObservability.md`, not `testing.md`. Skip rules that clearly don't apply.
|
|
26
|
-
|
|
27
|
-
3. **Read and analyze the artifact.** Understand what it's doing, what decisions it makes, and what it explicitly addresses. Then note:
|
|
28
|
-
- Flaws, oversights, or gaps
|
|
29
|
-
- Over-engineering or opportunities for simplification
|
|
30
|
-
- Reduced product scope that would enable faster delivery
|
|
31
|
-
- Hard things to change later (data models, public interfaces, high-interest technical debt)
|
|
32
|
-
- Focus on key decisions and critical parts, not cosmetics
|
|
33
|
-
- Play devil's advocate on important tradeoffs
|
|
34
|
-
- Prioritize suggestions that help deliver great features to customers faster
|
|
35
|
-
|
|
36
|
-
4. **Evaluate against rules.** For each relevant rule, determine one of:
|
|
37
|
-
- **VIOLATION**: The artifact clearly fails this rule.
|
|
38
|
-
- **PASS**: The artifact clearly meets this rule.
|
|
39
|
-
- **N/A**: The rule doesn't apply.
|
|
40
|
-
- **NEEDS_JUDGMENT**: You can't determine compliance with confidence. Don't force a call.
|
|
41
|
-
|
|
42
|
-
5. **Filter and rank:** Keep only VIOLATION and NEEDS_JUDGMENT items from rule evaluation, plus recommendations from step 3. Rank by impact, cap at 10. If you find 10+ high-impact violations, the artifact likely needs a rewrite — say so directly rather than listing every issue.
|
|
43
|
-
|
|
44
|
-
## Output
|
|
45
|
-
|
|
46
|
-
```markdown
|
|
47
|
-
# {filename or PR} Review
|
|
48
|
-
|
|
49
|
-
## Summary
|
|
50
|
-
|
|
51
|
-
{2-3 sentences. Lead with the most important finding. If the artifact is solid or needs a rewrite, say so.}
|
|
52
|
-
|
|
53
|
-
## Best practice review
|
|
54
|
-
|
|
55
|
-
Blocking: {count} | Recommendations: {count} | Needs judgment: {count}
|
|
56
|
-
|
|
57
|
-
### Blocking Issues
|
|
58
|
-
|
|
59
|
-
#### {Brief title}
|
|
60
|
-
|
|
61
|
-
**Finding:** {What specifically is wrong. Reference the exact location.}
|
|
62
|
-
**Fix:** {Concrete action. "Add X to Y" not "consider adding X."}
|
|
63
|
-
|
|
64
|
-
### Needs Human Judgment
|
|
65
|
-
|
|
66
|
-
#### {Brief title}
|
|
67
|
-
|
|
68
|
-
**Question:** {What you couldn't determine and why.}
|
|
69
|
-
|
|
70
|
-
### Recommendations
|
|
71
|
-
|
|
72
|
-
#### {Brief title}
|
|
73
|
-
|
|
74
|
-
**Finding:** {What could be improved.}
|
|
75
|
-
**Fix:** {Concrete suggestion.}
|
|
76
|
-
```
|
|
77
|
-
|
|
78
|
-
Omit any section that has zero items. Do not list passed rules unless the user asks.
|
|
79
|
-
|
|
80
|
-
## Guidelines
|
|
81
|
-
|
|
82
|
-
- **Reviewer, not linter:** If the artifact explicitly acknowledges a tradeoff and explains why it departs from a rule, that's good engineering judgment, not a violation.
|
|
83
|
-
- **Don't manufacture findings:** If the artifact is strong, zero blocking issues and one recommendation is a perfectly valid review.
|
|
84
|
-
- **Don't hallucinate references:** If you cite a line, section, or quote, it must exist. Say "the document does not address..." rather than inventing a reference.
|
|
85
|
-
- **Group related violations:** If three violations stem from the same root cause, report one finding with the root cause, not three separate items.
|
|
86
|
-
|
|
87
|
-
## Input
|
|
88
|
-
|
|
89
|
-
Artifact: $ARGUMENTS
|
|
@@ -1,480 +0,0 @@
|
|
|
1
|
-
import assert from "node:assert/strict";
|
|
2
|
-
import { describe, it } from "node:test";
|
|
3
|
-
|
|
4
|
-
import {
|
|
5
|
-
cleanCommentBody,
|
|
6
|
-
extractCodeScanningAlertNumber,
|
|
7
|
-
extractNitpickComments,
|
|
8
|
-
extractNitpicksFromReview,
|
|
9
|
-
extractNitpickSectionContent,
|
|
10
|
-
getLatestCodeRabbitReview,
|
|
11
|
-
parseCommentsFromFileSection,
|
|
12
|
-
type Review,
|
|
13
|
-
} from "./parseNitpicks.ts";
|
|
14
|
-
|
|
15
|
-
describe("cleanCommentBody", () => {
|
|
16
|
-
it("removes details elements", () => {
|
|
17
|
-
const input = "Some text <details><summary>Hidden</summary>Content</details> more text";
|
|
18
|
-
|
|
19
|
-
const actual = cleanCommentBody(input);
|
|
20
|
-
|
|
21
|
-
assert.equal(actual, "Some text more text");
|
|
22
|
-
});
|
|
23
|
-
|
|
24
|
-
it("escapes HTML angle brackets", () => {
|
|
25
|
-
const input = "Use <T> for generics";
|
|
26
|
-
|
|
27
|
-
const actual = cleanCommentBody(input);
|
|
28
|
-
|
|
29
|
-
assert.equal(actual, "Use <T> for generics");
|
|
30
|
-
});
|
|
31
|
-
|
|
32
|
-
it("trims whitespace", () => {
|
|
33
|
-
const input = " content ";
|
|
34
|
-
|
|
35
|
-
const actual = cleanCommentBody(input);
|
|
36
|
-
|
|
37
|
-
assert.equal(actual, "content");
|
|
38
|
-
});
|
|
39
|
-
|
|
40
|
-
it("handles nested details elements", () => {
|
|
41
|
-
const input =
|
|
42
|
-
"text <details><summary>1</summary><details><summary>2</summary>nested</details></details> end";
|
|
43
|
-
|
|
44
|
-
const actual = cleanCommentBody(input);
|
|
45
|
-
|
|
46
|
-
assert.equal(actual, "text end");
|
|
47
|
-
});
|
|
48
|
-
});
|
|
49
|
-
|
|
50
|
-
describe("extractNitpickSectionContent", () => {
|
|
51
|
-
it("extracts content from a simple nitpick section", () => {
|
|
52
|
-
const input = `
|
|
53
|
-
<details>
|
|
54
|
-
<summary>🧹 Nitpick comments (2)</summary><blockquote>
|
|
55
|
-
inner content here
|
|
56
|
-
</blockquote></details>
|
|
57
|
-
`;
|
|
58
|
-
|
|
59
|
-
const actual = extractNitpickSectionContent(input);
|
|
60
|
-
|
|
61
|
-
assert.equal(actual, "\ninner content here\n");
|
|
62
|
-
});
|
|
63
|
-
|
|
64
|
-
it("handles nested blockquotes", () => {
|
|
65
|
-
const input = `
|
|
66
|
-
<details>
|
|
67
|
-
<summary>🧹 Nitpick comments (1)</summary><blockquote>
|
|
68
|
-
<details><summary>file.ts (1)</summary><blockquote>
|
|
69
|
-
nested content
|
|
70
|
-
</blockquote></details>
|
|
71
|
-
</blockquote></details>
|
|
72
|
-
`;
|
|
73
|
-
|
|
74
|
-
const actual = extractNitpickSectionContent(input);
|
|
75
|
-
|
|
76
|
-
assert.ok(actual?.includes("nested content"));
|
|
77
|
-
assert.ok(actual?.includes("<blockquote>"));
|
|
78
|
-
});
|
|
79
|
-
|
|
80
|
-
it("returns undefined when no nitpick section exists", () => {
|
|
81
|
-
const input = "No nitpicks here";
|
|
82
|
-
|
|
83
|
-
const actual = extractNitpickSectionContent(input);
|
|
84
|
-
|
|
85
|
-
assert.equal(actual, undefined);
|
|
86
|
-
});
|
|
87
|
-
|
|
88
|
-
it("handles multiple file sections", () => {
|
|
89
|
-
const input = `
|
|
90
|
-
<details>
|
|
91
|
-
<summary>🧹 Nitpick comments (3)</summary><blockquote>
|
|
92
|
-
<details><summary>file1.ts (1)</summary><blockquote>content1</blockquote></details>
|
|
93
|
-
<details><summary>file2.ts (2)</summary><blockquote>content2</blockquote></details>
|
|
94
|
-
</blockquote></details>
|
|
95
|
-
`;
|
|
96
|
-
|
|
97
|
-
const actual = extractNitpickSectionContent(input);
|
|
98
|
-
|
|
99
|
-
assert.ok(actual?.includes("file1.ts"));
|
|
100
|
-
assert.ok(actual?.includes("file2.ts"));
|
|
101
|
-
assert.ok(actual?.includes("content1"));
|
|
102
|
-
assert.ok(actual?.includes("content2"));
|
|
103
|
-
});
|
|
104
|
-
|
|
105
|
-
it("handles CodeRabbit format with comment marker after", () => {
|
|
106
|
-
const input = `
|
|
107
|
-
<details>
|
|
108
|
-
<summary>🧹 Nitpick comments (1)</summary><blockquote>
|
|
109
|
-
<details><summary>test.ts (1)</summary><blockquote>content</blockquote></details>
|
|
110
|
-
</blockquote></details>
|
|
111
|
-
|
|
112
|
-
<!-- This is an auto-generated comment by CodeRabbit for review status -->
|
|
113
|
-
`;
|
|
114
|
-
|
|
115
|
-
const actual = extractNitpickSectionContent(input);
|
|
116
|
-
|
|
117
|
-
assert.ok(actual?.includes("content"));
|
|
118
|
-
assert.ok(!actual?.includes("auto-generated"));
|
|
119
|
-
});
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
describe("parseCommentsFromFileSection", () => {
|
|
123
|
-
const review: Review = {
|
|
124
|
-
author: { login: "coderabbitai" },
|
|
125
|
-
body: "",
|
|
126
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
127
|
-
};
|
|
128
|
-
|
|
129
|
-
it("parses a single comment", () => {
|
|
130
|
-
const fileContent = "`42-45`: **Fix naming convention.**\n\nUse camelCase for variables.";
|
|
131
|
-
|
|
132
|
-
const actual = parseCommentsFromFileSection(fileContent, "src/utils.ts", review);
|
|
133
|
-
|
|
134
|
-
assert.equal(actual.length, 1);
|
|
135
|
-
assert.deepEqual(actual[0], {
|
|
136
|
-
author: "coderabbitai",
|
|
137
|
-
body: "Fix naming convention.\n\nUse camelCase for variables.",
|
|
138
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
139
|
-
file: "src/utils.ts",
|
|
140
|
-
line: "42-45",
|
|
141
|
-
});
|
|
142
|
-
});
|
|
143
|
-
|
|
144
|
-
it("parses multiple comments", () => {
|
|
145
|
-
const fileContent = `\`10\`: **First issue.**
|
|
146
|
-
|
|
147
|
-
Description 1
|
|
148
|
-
|
|
149
|
-
---
|
|
150
|
-
|
|
151
|
-
\`20-25\`: **Second issue.**
|
|
152
|
-
|
|
153
|
-
Description 2`;
|
|
154
|
-
|
|
155
|
-
const actual = parseCommentsFromFileSection(fileContent, "file.ts", review);
|
|
156
|
-
|
|
157
|
-
assert.equal(actual.length, 2);
|
|
158
|
-
assert.equal(actual[0].line, "10");
|
|
159
|
-
assert.equal(actual[1].line, "20-25");
|
|
160
|
-
});
|
|
161
|
-
|
|
162
|
-
it("handles deleted user", () => {
|
|
163
|
-
const reviewWithDeletedUser: Review = {
|
|
164
|
-
author: null,
|
|
165
|
-
body: "",
|
|
166
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
167
|
-
};
|
|
168
|
-
const fileContent = "`1`: **Issue.**\n\nDescription";
|
|
169
|
-
|
|
170
|
-
const actual = parseCommentsFromFileSection(fileContent, "file.ts", reviewWithDeletedUser);
|
|
171
|
-
|
|
172
|
-
assert.equal(actual[0].author, "deleted-user");
|
|
173
|
-
});
|
|
174
|
-
|
|
175
|
-
it("removes nested details from comment body", () => {
|
|
176
|
-
const fileContent =
|
|
177
|
-
"`1`: **Issue.**\n\nText <details><summary>More</summary>Hidden</details> end";
|
|
178
|
-
|
|
179
|
-
const actual = parseCommentsFromFileSection(fileContent, "file.ts", review);
|
|
180
|
-
|
|
181
|
-
assert.equal(actual[0].body, "Issue.\n\nText end");
|
|
182
|
-
});
|
|
183
|
-
});
|
|
184
|
-
|
|
185
|
-
describe("extractNitpicksFromReview", () => {
|
|
186
|
-
it("extracts nitpicks for dotless filenames", () => {
|
|
187
|
-
const review: Review = {
|
|
188
|
-
author: { login: "coderabbitai" },
|
|
189
|
-
body: `<details>
|
|
190
|
-
<summary>🧹 Nitpick comments (1)</summary><blockquote>
|
|
191
|
-
|
|
192
|
-
<details>
|
|
193
|
-
<summary>Dockerfile (1)</summary><blockquote>
|
|
194
|
-
|
|
195
|
-
\`7\`: **Pin base image.**
|
|
196
|
-
|
|
197
|
-
Use a digest for reproducibility.
|
|
198
|
-
|
|
199
|
-
</blockquote></details>
|
|
200
|
-
|
|
201
|
-
</blockquote></details>`,
|
|
202
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
203
|
-
};
|
|
204
|
-
|
|
205
|
-
const actual = extractNitpicksFromReview(review);
|
|
206
|
-
|
|
207
|
-
assert.equal(actual.length, 1);
|
|
208
|
-
assert.equal(actual[0].file, "Dockerfile");
|
|
209
|
-
assert.equal(actual[0].line, "7");
|
|
210
|
-
assert.ok(actual[0].body.includes("Pin base image."));
|
|
211
|
-
});
|
|
212
|
-
|
|
213
|
-
it("returns empty array when review has no nitpick section", () => {
|
|
214
|
-
const review: Review = {
|
|
215
|
-
author: { login: "coderabbitai" },
|
|
216
|
-
body: "Just a regular review without nitpicks",
|
|
217
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
218
|
-
};
|
|
219
|
-
|
|
220
|
-
const actual = extractNitpicksFromReview(review);
|
|
221
|
-
|
|
222
|
-
assert.deepEqual(actual, []);
|
|
223
|
-
});
|
|
224
|
-
|
|
225
|
-
it("extracts nitpicks from a CodeRabbit review", () => {
|
|
226
|
-
const review: Review = {
|
|
227
|
-
author: { login: "coderabbitai" },
|
|
228
|
-
body: `**Actionable comments posted: 1**
|
|
229
|
-
|
|
230
|
-
<details>
|
|
231
|
-
<summary>🧹 Nitpick comments (1)</summary><blockquote>
|
|
232
|
-
|
|
233
|
-
<details>
|
|
234
|
-
<summary>src/utils.ts (1)</summary><blockquote>
|
|
235
|
-
|
|
236
|
-
\`42-45\`: **Consider using const.**
|
|
237
|
-
|
|
238
|
-
The variable is never reassigned.
|
|
239
|
-
|
|
240
|
-
</blockquote></details>
|
|
241
|
-
|
|
242
|
-
</blockquote></details>
|
|
243
|
-
|
|
244
|
-
<!-- This is an auto-generated comment by CodeRabbit -->`,
|
|
245
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
246
|
-
};
|
|
247
|
-
|
|
248
|
-
const actual = extractNitpicksFromReview(review);
|
|
249
|
-
|
|
250
|
-
assert.equal(actual.length, 1);
|
|
251
|
-
assert.equal(actual[0].file, "src/utils.ts");
|
|
252
|
-
assert.equal(actual[0].line, "42-45");
|
|
253
|
-
assert.ok(actual[0].body.includes("Consider using const."));
|
|
254
|
-
});
|
|
255
|
-
|
|
256
|
-
it("extracts nitpicks from multiple files", () => {
|
|
257
|
-
const review: Review = {
|
|
258
|
-
author: { login: "coderabbitai" },
|
|
259
|
-
body: `<details>
|
|
260
|
-
<summary>🧹 Nitpick comments (2)</summary><blockquote>
|
|
261
|
-
|
|
262
|
-
<details>
|
|
263
|
-
<summary>file1.ts (1)</summary><blockquote>
|
|
264
|
-
|
|
265
|
-
\`10\`: **Issue 1.**
|
|
266
|
-
|
|
267
|
-
Description 1
|
|
268
|
-
|
|
269
|
-
</blockquote></details>
|
|
270
|
-
<details>
|
|
271
|
-
<summary>file2.ts (1)</summary><blockquote>
|
|
272
|
-
|
|
273
|
-
\`20\`: **Issue 2.**
|
|
274
|
-
|
|
275
|
-
Description 2
|
|
276
|
-
|
|
277
|
-
</blockquote></details>
|
|
278
|
-
|
|
279
|
-
</blockquote></details>`,
|
|
280
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
281
|
-
};
|
|
282
|
-
|
|
283
|
-
const actual = extractNitpicksFromReview(review);
|
|
284
|
-
|
|
285
|
-
assert.equal(actual.length, 2);
|
|
286
|
-
assert.equal(actual[0].file, "file1.ts");
|
|
287
|
-
assert.equal(actual[1].file, "file2.ts");
|
|
288
|
-
});
|
|
289
|
-
|
|
290
|
-
it("extracts nitpicks when comments have nested details elements", () => {
|
|
291
|
-
// Real CodeRabbit format: comments have nested <details> for proposed fixes
|
|
292
|
-
const review: Review = {
|
|
293
|
-
author: { login: "coderabbitai" },
|
|
294
|
-
body: `<details>
|
|
295
|
-
<summary>🧹 Nitpick comments (3)</summary><blockquote>
|
|
296
|
-
|
|
297
|
-
<details>
|
|
298
|
-
<summary>src/api.ts (2)</summary><blockquote>
|
|
299
|
-
|
|
300
|
-
\`47-88\`: **Fix markdown formatting.**
|
|
301
|
-
|
|
302
|
-
Several issues here.
|
|
303
|
-
|
|
304
|
-
<details>
|
|
305
|
-
<summary>📝 Proposed fix</summary>
|
|
306
|
-
|
|
307
|
-
\`\`\`diff
|
|
308
|
-
-old
|
|
309
|
-
+new
|
|
310
|
-
\`\`\`
|
|
311
|
-
|
|
312
|
-
</details>
|
|
313
|
-
|
|
314
|
-
---
|
|
315
|
-
|
|
316
|
-
\`89-107\`: **Fix heading format.**
|
|
317
|
-
|
|
318
|
-
Use proper headings.
|
|
319
|
-
|
|
320
|
-
<details>
|
|
321
|
-
<summary>📝 Proposed fix</summary>
|
|
322
|
-
|
|
323
|
-
\`\`\`diff
|
|
324
|
-
-bad
|
|
325
|
-
+good
|
|
326
|
-
\`\`\`
|
|
327
|
-
|
|
328
|
-
</details>
|
|
329
|
-
|
|
330
|
-
</blockquote></details>
|
|
331
|
-
<details>
|
|
332
|
-
<summary>src/utils.ts (1)</summary><blockquote>
|
|
333
|
-
|
|
334
|
-
\`10\`: **Add type annotation.**
|
|
335
|
-
|
|
336
|
-
Missing type.
|
|
337
|
-
|
|
338
|
-
</blockquote></details>
|
|
339
|
-
|
|
340
|
-
</blockquote></details>`,
|
|
341
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
342
|
-
};
|
|
343
|
-
|
|
344
|
-
const actual = extractNitpicksFromReview(review);
|
|
345
|
-
|
|
346
|
-
assert.equal(actual.length, 3);
|
|
347
|
-
assert.equal(actual[0].file, "src/api.ts");
|
|
348
|
-
assert.equal(actual[0].line, "47-88");
|
|
349
|
-
assert.equal(actual[1].file, "src/api.ts");
|
|
350
|
-
assert.equal(actual[1].line, "89-107");
|
|
351
|
-
assert.equal(actual[2].file, "src/utils.ts");
|
|
352
|
-
assert.equal(actual[2].line, "10");
|
|
353
|
-
});
|
|
354
|
-
});
|
|
355
|
-
|
|
356
|
-
describe("getLatestCodeRabbitReview", () => {
|
|
357
|
-
it("returns undefined when no reviews exist", () => {
|
|
358
|
-
const actual = getLatestCodeRabbitReview([]);
|
|
359
|
-
|
|
360
|
-
assert.equal(actual, undefined);
|
|
361
|
-
});
|
|
362
|
-
|
|
363
|
-
it("returns undefined when no CodeRabbit reviews with nitpicks exist", () => {
|
|
364
|
-
const reviews: Review[] = [
|
|
365
|
-
{ author: { login: "human" }, body: "LGTM", createdAt: "2024-01-15T10:00:00Z" },
|
|
366
|
-
{
|
|
367
|
-
author: { login: "coderabbitai" },
|
|
368
|
-
body: "No issues found",
|
|
369
|
-
createdAt: "2024-01-15T11:00:00Z",
|
|
370
|
-
},
|
|
371
|
-
];
|
|
372
|
-
|
|
373
|
-
const actual = getLatestCodeRabbitReview(reviews);
|
|
374
|
-
|
|
375
|
-
assert.equal(actual, undefined);
|
|
376
|
-
});
|
|
377
|
-
|
|
378
|
-
it("returns the latest CodeRabbit review with nitpicks", () => {
|
|
379
|
-
const reviews: Review[] = [
|
|
380
|
-
{
|
|
381
|
-
author: { login: "coderabbitai" },
|
|
382
|
-
body: "🧹 Nitpick comments (1) - older",
|
|
383
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
384
|
-
},
|
|
385
|
-
{
|
|
386
|
-
author: { login: "coderabbitai" },
|
|
387
|
-
body: "🧹 Nitpick comments (2) - newer",
|
|
388
|
-
createdAt: "2024-01-15T12:00:00Z",
|
|
389
|
-
},
|
|
390
|
-
{
|
|
391
|
-
author: { login: "coderabbitai" },
|
|
392
|
-
body: "🧹 Nitpick comments (1) - middle",
|
|
393
|
-
createdAt: "2024-01-15T11:00:00Z",
|
|
394
|
-
},
|
|
395
|
-
];
|
|
396
|
-
|
|
397
|
-
const actual = getLatestCodeRabbitReview(reviews);
|
|
398
|
-
|
|
399
|
-
assert.ok(actual?.body.includes("newer"));
|
|
400
|
-
});
|
|
401
|
-
|
|
402
|
-
it("ignores reviews from other authors", () => {
|
|
403
|
-
const reviews: Review[] = [
|
|
404
|
-
{
|
|
405
|
-
author: { login: "human" },
|
|
406
|
-
body: "🧹 Nitpick comments (1) - not coderabbit",
|
|
407
|
-
createdAt: "2024-01-15T12:00:00Z",
|
|
408
|
-
},
|
|
409
|
-
{
|
|
410
|
-
author: { login: "coderabbitai" },
|
|
411
|
-
body: "🧹 Nitpick comments (1) - coderabbit",
|
|
412
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
413
|
-
},
|
|
414
|
-
];
|
|
415
|
-
|
|
416
|
-
const actual = getLatestCodeRabbitReview(reviews);
|
|
417
|
-
|
|
418
|
-
assert.ok(actual?.body.includes("coderabbit"));
|
|
419
|
-
assert.equal(actual?.createdAt, "2024-01-15T10:00:00Z");
|
|
420
|
-
});
|
|
421
|
-
});
|
|
422
|
-
|
|
423
|
-
describe("extractNitpickComments", () => {
|
|
424
|
-
it("returns empty array when no reviews exist", () => {
|
|
425
|
-
const actual = extractNitpickComments([]);
|
|
426
|
-
|
|
427
|
-
assert.deepEqual(actual, []);
|
|
428
|
-
});
|
|
429
|
-
|
|
430
|
-
it("extracts nitpicks from the latest CodeRabbit review", () => {
|
|
431
|
-
const reviews: Review[] = [
|
|
432
|
-
{
|
|
433
|
-
author: { login: "coderabbitai" },
|
|
434
|
-
body: `<details>
|
|
435
|
-
<summary>🧹 Nitpick comments (1)</summary><blockquote>
|
|
436
|
-
<details><summary>old.ts (1)</summary><blockquote>
|
|
437
|
-
\`1\`: **Old issue.**
|
|
438
|
-
|
|
439
|
-
Old
|
|
440
|
-
</blockquote></details>
|
|
441
|
-
</blockquote></details>`,
|
|
442
|
-
createdAt: "2024-01-15T10:00:00Z",
|
|
443
|
-
},
|
|
444
|
-
{
|
|
445
|
-
author: { login: "coderabbitai" },
|
|
446
|
-
body: `<details>
|
|
447
|
-
<summary>🧹 Nitpick comments (1)</summary><blockquote>
|
|
448
|
-
<details><summary>new.ts (1)</summary><blockquote>
|
|
449
|
-
\`2\`: **New issue.**
|
|
450
|
-
|
|
451
|
-
New
|
|
452
|
-
</blockquote></details>
|
|
453
|
-
</blockquote></details>`,
|
|
454
|
-
createdAt: "2024-01-15T12:00:00Z",
|
|
455
|
-
},
|
|
456
|
-
];
|
|
457
|
-
|
|
458
|
-
const actual = extractNitpickComments(reviews);
|
|
459
|
-
|
|
460
|
-
assert.equal(actual.length, 1);
|
|
461
|
-
assert.equal(actual[0].file, "new.ts");
|
|
462
|
-
});
|
|
463
|
-
});
|
|
464
|
-
|
|
465
|
-
describe("extractCodeScanningAlertNumber", () => {
|
|
466
|
-
const testCases = [
|
|
467
|
-
{ input: "https://github.com/org/repo/security/code-scanning/123", expected: 123 },
|
|
468
|
-
{ input: "See /code-scanning/456 for details", expected: 456 },
|
|
469
|
-
{ input: "No alert here", expected: undefined },
|
|
470
|
-
{ input: "/code-scanning/", expected: undefined },
|
|
471
|
-
];
|
|
472
|
-
|
|
473
|
-
for (const { input, expected } of testCases) {
|
|
474
|
-
it(`returns ${expected} for "${input.slice(0, 40)}..."`, () => {
|
|
475
|
-
const actual = extractCodeScanningAlertNumber(input);
|
|
476
|
-
|
|
477
|
-
assert.equal(actual, expected);
|
|
478
|
-
});
|
|
479
|
-
}
|
|
480
|
-
});
|
|
File without changes
|