rhachet-roles-ehmpathy 1.15.15 → 1.15.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/domain.roles/mechanic/briefs/practices/code.test/frames.behavior/howto.write-bdd.[lesson].pt2.md +222 -0
- package/dist/domain.roles/mechanic/briefs/practices/work.flow/diagnose/howto.bisect.[lesson].md +74 -0
- package/dist/domain.roles/mechanic/inits/init.claude.permissions.jsonc +16 -5
- package/dist/domain.roles/mechanic/skills/claude.tools/mvsafe.sh +103 -0
- package/dist/domain.roles/mechanic/skills/claude.tools/rmsafe.sh +97 -0
- package/dist/domain.roles/mechanic/skills/gh.workflow.logs.sh +213 -0
- package/package.json +1 -1
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
# how to write bdd-style acceptance tests
|
|
2
|
+
|
|
3
|
+
## structure
|
|
4
|
+
|
|
5
|
+
use `given`, `when`, `then` from `test-fns` to structure tests:
|
|
6
|
+
|
|
7
|
+
```ts
|
|
8
|
+
import { given, when, then, useBeforeAll } from 'test-fns';
|
|
9
|
+
|
|
10
|
+
describe('featureName', () => {
|
|
11
|
+
given('[case1] scenario description', () => {
|
|
12
|
+
when('[t0] before any changes', () => {
|
|
13
|
+
then('precondition holds', async () => { ... });
|
|
14
|
+
then('another precondition holds', async () => { ... });
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
when('[t1] target operation is executed', () => {
|
|
18
|
+
then('expected outcome', async () => { ... });
|
|
19
|
+
});
|
|
20
|
+
|
|
21
|
+
when('[t2] alternate operation is executed', () => {
|
|
22
|
+
then('alternate outcome', async () => { ... });
|
|
23
|
+
});
|
|
24
|
+
});
|
|
25
|
+
});
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
## labels
|
|
31
|
+
|
|
32
|
+
### `[caseN]` for given blocks
|
|
33
|
+
|
|
34
|
+
each `given` block should have a unique case label:
|
|
35
|
+
|
|
36
|
+
```ts
|
|
37
|
+
given('[case1] valid inputs', () => { ... });
|
|
38
|
+
given('[case2] invalid inputs', () => { ... });
|
|
39
|
+
given('[case3] edge case scenario', () => { ... });
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### `[tN]` for when blocks
|
|
43
|
+
|
|
44
|
+
each `when` block should have a time index label:
|
|
45
|
+
|
|
46
|
+
- `[t0]` = precondition checks / before any changes
|
|
47
|
+
- `[t1]` = first target operation
|
|
48
|
+
- `[t2]` = second target operation
|
|
49
|
+
- etc.
|
|
50
|
+
|
|
51
|
+
```ts
|
|
52
|
+
given('[case1] prose-author example repo', () => {
|
|
53
|
+
when('[t0] before any changes', () => {
|
|
54
|
+
then('rules glob matches 2 files', ...);
|
|
55
|
+
then('chapters glob matches 3 files', ...);
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
when('[t1] stepReview on clean chapter', () => {
|
|
59
|
+
then('review contains no blockers', ...);
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
when('[t2] stepReview on dirty chapter', () => {
|
|
63
|
+
then('review contains blockers', ...);
|
|
64
|
+
});
|
|
65
|
+
});
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## principles
|
|
71
|
+
|
|
72
|
+
### consolidate related tests
|
|
73
|
+
|
|
74
|
+
don't split related scenarios across multiple `given` blocks:
|
|
75
|
+
|
|
76
|
+
```ts
|
|
77
|
+
// ❌ bad - fragmented
|
|
78
|
+
given('[case8] prose-author rule enumeration', () => { ... });
|
|
79
|
+
given('[case9] prose-author chapter enumeration', () => { ... });
|
|
80
|
+
given('[case10] prose-author review works', () => { ... });
|
|
81
|
+
|
|
82
|
+
// ✅ good - consolidated
|
|
83
|
+
given('[case8] prose-author example repo', () => {
|
|
84
|
+
when('[t0] before any changes', () => {
|
|
85
|
+
then('rules glob matches', ...);
|
|
86
|
+
then('chapters glob matches', ...);
|
|
87
|
+
});
|
|
88
|
+
when('[t1] stepReview on clean chapter', () => { ... });
|
|
89
|
+
when('[t2] stepReview on dirty chapter', () => { ... });
|
|
90
|
+
});
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### when describes state/time, not action
|
|
94
|
+
|
|
95
|
+
```ts
|
|
96
|
+
// ❌ bad - describes action
|
|
97
|
+
when('[t0] assets are checked', () => { ... });
|
|
98
|
+
|
|
99
|
+
// ✅ good - describes state/time
|
|
100
|
+
when('[t0] before any changes', () => { ... });
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### use afterEach for cleanup
|
|
104
|
+
|
|
105
|
+
```ts
|
|
106
|
+
// ❌ bad - inline cleanup
|
|
107
|
+
then('creates output file', async () => {
|
|
108
|
+
const result = await doThing();
|
|
109
|
+
await fs.rm(outputPath); // cleanup inside then
|
|
110
|
+
expect(result).toBeDefined();
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
// ✅ good - afterEach cleanup
|
|
114
|
+
when('[t1] operation runs', () => {
|
|
115
|
+
const outputPath = path.join(os.tmpdir(), 'output.md');
|
|
116
|
+
afterEach(async () => fs.rm(outputPath, { force: true }));
|
|
117
|
+
|
|
118
|
+
then('creates output file', async () => {
|
|
119
|
+
const result = await doThing();
|
|
120
|
+
expect(result).toBeDefined();
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
```
|
|
124
|
+
|
|
125
|
+
### preconditions shouldn't expect errors
|
|
126
|
+
|
|
127
|
+
```ts
|
|
128
|
+
// ❌ bad - precondition expects error then checks it's not a validation error
|
|
129
|
+
then('does not throw validation errors', async () => {
|
|
130
|
+
const error = await getError(doThing());
|
|
131
|
+
expect(error.message).not.toContain('validation');
|
|
132
|
+
});
|
|
133
|
+
|
|
134
|
+
// ✅ good - precondition checks assets directly
|
|
135
|
+
then('rules glob matches 2 files', async () => {
|
|
136
|
+
const files = await enumFiles({ glob: 'rules/*.md' });
|
|
137
|
+
expect(files).toHaveLength(2);
|
|
138
|
+
});
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
### use useBeforeAll for shared setup
|
|
142
|
+
|
|
143
|
+
```ts
|
|
144
|
+
given('[case1] scenario with shared setup', () => {
|
|
145
|
+
const scene = useBeforeAll(async () => {
|
|
146
|
+
const entity = await createEntity();
|
|
147
|
+
return { entity };
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
when('[t1] operation runs', () => {
|
|
151
|
+
then('uses shared entity', async () => {
|
|
152
|
+
const result = await doThing({ id: scene.entity.id });
|
|
153
|
+
expect(result).toBeDefined();
|
|
154
|
+
});
|
|
155
|
+
});
|
|
156
|
+
});
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
---
|
|
160
|
+
|
|
161
|
+
## complete example
|
|
162
|
+
|
|
163
|
+
```ts
|
|
164
|
+
import { given, when, then, useBeforeAll } from 'test-fns';
|
|
165
|
+
import * as fs from 'fs/promises';
|
|
166
|
+
import * as path from 'path';
|
|
167
|
+
import * as os from 'os';
|
|
168
|
+
|
|
169
|
+
describe('stepReview', () => {
|
|
170
|
+
given('[case1] prose-author example repo', () => {
|
|
171
|
+
when('[t0] before any changes', () => {
|
|
172
|
+
then('rules glob matches 2 prose style rules', async () => {
|
|
173
|
+
const ruleFiles = await enumFilesFromGlob({
|
|
174
|
+
glob: '.agent/**/rules/*.md',
|
|
175
|
+
cwd: ASSETS_PROSE,
|
|
176
|
+
});
|
|
177
|
+
expect(ruleFiles).toHaveLength(2);
|
|
178
|
+
});
|
|
179
|
+
|
|
180
|
+
then('chapters glob matches 3 chapters', async () => {
|
|
181
|
+
const chapterFiles = await enumFilesFromGlob({
|
|
182
|
+
glob: 'chapters/*.md',
|
|
183
|
+
cwd: ASSETS_PROSE,
|
|
184
|
+
});
|
|
185
|
+
expect(chapterFiles).toHaveLength(3);
|
|
186
|
+
});
|
|
187
|
+
});
|
|
188
|
+
|
|
189
|
+
when('[t1] stepReview on chapter2.fixed.md', () => {
|
|
190
|
+
const outputPath = path.join(os.tmpdir(), 'review-fixed.md');
|
|
191
|
+
afterEach(async () => fs.rm(outputPath, { force: true }));
|
|
192
|
+
|
|
193
|
+
then('review contains no blockers', async () => {
|
|
194
|
+
const result = await stepReview({
|
|
195
|
+
rules: '.agent/**/rules/*.md',
|
|
196
|
+
paths: 'chapters/chapter2.fixed.md',
|
|
197
|
+
output: outputPath,
|
|
198
|
+
mode: 'hard',
|
|
199
|
+
cwd: ASSETS_PROSE,
|
|
200
|
+
});
|
|
201
|
+
expect(result.review.formatted.toLowerCase()).not.toContain('blocker');
|
|
202
|
+
});
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
when('[t2] stepReview on chapter2.md', () => {
|
|
206
|
+
const outputPath = path.join(os.tmpdir(), 'review-unfixed.md');
|
|
207
|
+
afterEach(async () => fs.rm(outputPath, { force: true }));
|
|
208
|
+
|
|
209
|
+
then('review contains blockers for gerund violations', async () => {
|
|
210
|
+
const result = await stepReview({
|
|
211
|
+
rules: '.agent/**/rules/*.md',
|
|
212
|
+
paths: 'chapters/chapter2.md',
|
|
213
|
+
output: outputPath,
|
|
214
|
+
mode: 'hard',
|
|
215
|
+
cwd: ASSETS_PROSE,
|
|
216
|
+
});
|
|
217
|
+
expect(result.review.formatted.toLowerCase()).toContain('blocker');
|
|
218
|
+
});
|
|
219
|
+
});
|
|
220
|
+
});
|
|
221
|
+
});
|
|
222
|
+
```
|
package/dist/domain.roles/mechanic/briefs/practices/work.flow/diagnose/howto.bisect.[lesson].md
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# howto: diagnose via bisection
|
|
2
|
+
|
|
3
|
+
## .what
|
|
4
|
+
|
|
5
|
+
use binary search (bisection) to isolate the root cause of a defect in O(log n) steps instead of O(n)
|
|
6
|
+
|
|
7
|
+
## .why
|
|
8
|
+
|
|
9
|
+
- linear search through suspects wastes time
|
|
10
|
+
- bisection cuts the search space in half with each test
|
|
11
|
+
- works for code changes, data inputs, config options, and time ranges
|
|
12
|
+
- essential skill for debugging regressions and intermittent failures
|
|
13
|
+
|
|
14
|
+
## .how
|
|
15
|
+
|
|
16
|
+
### the pattern
|
|
17
|
+
|
|
18
|
+
1. define the search space (lines, inputs, configs, changes)
|
|
19
|
+
2. find a known-good state and a known-bad state
|
|
20
|
+
3. test the midpoint
|
|
21
|
+
4. if midpoint is good → defect is in the second half
|
|
22
|
+
5. if midpoint is bad → defect is in the first half
|
|
23
|
+
6. repeat until you find the exact boundary
|
|
24
|
+
|
|
25
|
+
### code bisection (for logic errors)
|
|
26
|
+
|
|
27
|
+
when a function produces wrong output:
|
|
28
|
+
|
|
29
|
+
```ts
|
|
30
|
+
// suspect: 10 lines of logic
|
|
31
|
+
const result = complexTransform(input);
|
|
32
|
+
|
|
33
|
+
// bisect: comment out bottom half, test
|
|
34
|
+
// if still broken → defect in top half
|
|
35
|
+
// if fixed → defect in bottom half
|
|
36
|
+
// repeat until isolated to 1-2 lines
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### input bisection (for data issues)
|
|
40
|
+
|
|
41
|
+
when processing fails on large input:
|
|
42
|
+
|
|
43
|
+
```ts
|
|
44
|
+
// 1000 records fail; which one causes it?
|
|
45
|
+
const midpoint = Math.floor(records.length / 2);
|
|
46
|
+
const firstHalf = records.slice(0, midpoint);
|
|
47
|
+
const secondHalf = records.slice(midpoint);
|
|
48
|
+
|
|
49
|
+
// test each half separately
|
|
50
|
+
// defect is in the half that fails
|
|
51
|
+
// repeat until you find the single bad record
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### config bisection (for env issues)
|
|
55
|
+
|
|
56
|
+
when config changes break behavior:
|
|
57
|
+
|
|
58
|
+
1. list all config differences between working and broken
|
|
59
|
+
2. apply half the changes
|
|
60
|
+
3. test → narrow to the half that breaks
|
|
61
|
+
4. repeat until isolated to single config key
|
|
62
|
+
|
|
63
|
+
## .when to use
|
|
64
|
+
|
|
65
|
+
- regression appeared but unclear which change caused it
|
|
66
|
+
- feature works with small data but fails with large data
|
|
67
|
+
- behavior differs between environments
|
|
68
|
+
- any scenario with a "it used to work" vs "now it's broken" boundary
|
|
69
|
+
|
|
70
|
+
## .key insight
|
|
71
|
+
|
|
72
|
+
> the power of bisection: 1000 suspects → 10 tests max
|
|
73
|
+
|
|
74
|
+
always prefer structured bisection over random guessing or linear elimination
|
|
@@ -11,7 +11,9 @@
|
|
|
11
11
|
"deny": [
|
|
12
12
|
// git write operations - require explicit user request for audit trail
|
|
13
13
|
"Bash(git commit:*)",
|
|
14
|
-
"Bash(git add
|
|
14
|
+
"Bash(git add .)",
|
|
15
|
+
"Bash(git add -A:*)",
|
|
16
|
+
"Bash(git add --all:*)",
|
|
15
17
|
"Bash(git stash:*)",
|
|
16
18
|
"Bash(git checkout:*)",
|
|
17
19
|
"Bash(git branch -d:*)",
|
|
@@ -24,6 +26,10 @@
|
|
|
24
26
|
"Bash(git reflog delete:*)",
|
|
25
27
|
"Bash(git config:*)",
|
|
26
28
|
|
|
29
|
+
// git mv/rm - use mvsafe.sh and rmsafe.sh instead (repo-constrained)
|
|
30
|
+
"Bash(git mv:*)",
|
|
31
|
+
"Bash(git rm:*)",
|
|
32
|
+
|
|
27
33
|
// "anywrite" commands - CRITICAL SECURITY RISK
|
|
28
34
|
//
|
|
29
35
|
// unlike Claude's native Edit/Write tools which are scoped to the repo,
|
|
@@ -141,10 +147,6 @@
|
|
|
141
147
|
"Bash(mkdir:*)",
|
|
142
148
|
"Bash(pwd)",
|
|
143
149
|
|
|
144
|
-
// git mv/rm are safe - constrained to repo, all changes revertable
|
|
145
|
-
"Bash(git mv:*)",
|
|
146
|
-
"Bash(git rm:*)",
|
|
147
|
-
|
|
148
150
|
// git read-only - all have no write variants
|
|
149
151
|
"Bash(git log:*)",
|
|
150
152
|
"Bash(git status:*)",
|
|
@@ -162,6 +164,12 @@
|
|
|
162
164
|
// cpsafe - safe file copy within git repo (source must be git-tracked)
|
|
163
165
|
"Bash(.agent/repo=ehmpathy/role=mechanic/skills/.skills/claude.tools/cpsafe.sh:*)",
|
|
164
166
|
|
|
167
|
+
// mvsafe - safe file move within git repo
|
|
168
|
+
"Bash(.agent/repo=ehmpathy/role=mechanic/skills/.skills/claude.tools/mvsafe.sh:*)",
|
|
169
|
+
|
|
170
|
+
// rmsafe - safe file removal within git repo
|
|
171
|
+
"Bash(.agent/repo=ehmpathy/role=mechanic/skills/.skills/claude.tools/rmsafe.sh:*)",
|
|
172
|
+
|
|
165
173
|
// npm read operations
|
|
166
174
|
"Bash(npm view:*)",
|
|
167
175
|
"Bash(npm list:*)",
|
|
@@ -209,6 +217,9 @@
|
|
|
209
217
|
"Bash(RESNAP=true THOROUGH=true npm run test:integration:*)",
|
|
210
218
|
"Bash(RESNAP=true THOROUGH=true npm run test:acceptance:*)",
|
|
211
219
|
|
|
220
|
+
// test list operation
|
|
221
|
+
"Bash(npx jest --listTests:*)",
|
|
222
|
+
|
|
212
223
|
// fix operations
|
|
213
224
|
"Bash(npm run fix:*)",
|
|
214
225
|
"Bash(npm run fix:format:*)",
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
######################################################################
|
|
3
|
+
# .what = safe file move within git repo
|
|
4
|
+
#
|
|
5
|
+
# .why = enables file moving without:
|
|
6
|
+
# - touching files outside the repo
|
|
7
|
+
# - accidental path traversal attacks
|
|
8
|
+
#
|
|
9
|
+
# this is a controlled alternative to raw mv, which is
|
|
10
|
+
# denied in permissions due to security risks.
|
|
11
|
+
#
|
|
12
|
+
# usage:
|
|
13
|
+
# mvsafe.sh --src "path/to/source" --dest "path/to/dest"
|
|
14
|
+
#
|
|
15
|
+
# guarantee:
|
|
16
|
+
# - source must be within repo
|
|
17
|
+
# - dest must be within repo
|
|
18
|
+
# - creates parent directories if needed
|
|
19
|
+
# - fail-fast on errors
|
|
20
|
+
######################################################################
|
|
21
|
+
set -euo pipefail
|
|
22
|
+
|
|
23
|
+
# parse named arguments
|
|
24
|
+
SRC=""
|
|
25
|
+
DEST=""
|
|
26
|
+
|
|
27
|
+
while [[ $# -gt 0 ]]; do
|
|
28
|
+
case $1 in
|
|
29
|
+
--src)
|
|
30
|
+
SRC="$2"
|
|
31
|
+
shift 2
|
|
32
|
+
;;
|
|
33
|
+
--dest)
|
|
34
|
+
DEST="$2"
|
|
35
|
+
shift 2
|
|
36
|
+
;;
|
|
37
|
+
*)
|
|
38
|
+
echo "unknown argument: $1"
|
|
39
|
+
echo "usage: mvsafe.sh --src 'source' --dest 'destination'"
|
|
40
|
+
exit 1
|
|
41
|
+
;;
|
|
42
|
+
esac
|
|
43
|
+
done
|
|
44
|
+
|
|
45
|
+
# validate required args
|
|
46
|
+
if [[ -z "$SRC" ]]; then
|
|
47
|
+
echo "error: --src is required"
|
|
48
|
+
exit 1
|
|
49
|
+
fi
|
|
50
|
+
|
|
51
|
+
if [[ -z "$DEST" ]]; then
|
|
52
|
+
echo "error: --dest is required"
|
|
53
|
+
exit 1
|
|
54
|
+
fi
|
|
55
|
+
|
|
56
|
+
# ensure we're in a git repo
|
|
57
|
+
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
|
58
|
+
echo "error: not in a git repository"
|
|
59
|
+
exit 1
|
|
60
|
+
fi
|
|
61
|
+
|
|
62
|
+
# get repo root
|
|
63
|
+
REPO_ROOT=$(git rev-parse --show-toplevel)
|
|
64
|
+
|
|
65
|
+
# resolve absolute paths
|
|
66
|
+
SRC_ABS=$(realpath -m "$SRC")
|
|
67
|
+
DEST_ABS=$(realpath -m "$DEST")
|
|
68
|
+
|
|
69
|
+
# validate source is within repo
|
|
70
|
+
if [[ "$SRC_ABS" != "$REPO_ROOT"* ]]; then
|
|
71
|
+
echo "error: source must be within the git repository"
|
|
72
|
+
echo " repo root: $REPO_ROOT"
|
|
73
|
+
echo " source: $SRC_ABS"
|
|
74
|
+
exit 1
|
|
75
|
+
fi
|
|
76
|
+
|
|
77
|
+
# validate dest is within repo
|
|
78
|
+
if [[ "$DEST_ABS" != "$REPO_ROOT"* ]]; then
|
|
79
|
+
echo "error: destination must be within the git repository"
|
|
80
|
+
echo " repo root: $REPO_ROOT"
|
|
81
|
+
echo " dest: $DEST_ABS"
|
|
82
|
+
exit 1
|
|
83
|
+
fi
|
|
84
|
+
|
|
85
|
+
# validate source exists
|
|
86
|
+
if [[ ! -e "$SRC_ABS" ]]; then
|
|
87
|
+
echo "error: source does not exist: $SRC"
|
|
88
|
+
exit 1
|
|
89
|
+
fi
|
|
90
|
+
|
|
91
|
+
# create parent directories if needed
|
|
92
|
+
DEST_DIR=$(dirname "$DEST_ABS")
|
|
93
|
+
if [[ ! -d "$DEST_DIR" ]]; then
|
|
94
|
+
echo "creating directory: $DEST_DIR"
|
|
95
|
+
mkdir -p "$DEST_DIR"
|
|
96
|
+
fi
|
|
97
|
+
|
|
98
|
+
# perform the move
|
|
99
|
+
mv "$SRC_ABS" "$DEST_ABS"
|
|
100
|
+
|
|
101
|
+
SRC_REL="${SRC_ABS#$REPO_ROOT/}"
|
|
102
|
+
DEST_REL="${DEST_ABS#$REPO_ROOT/}"
|
|
103
|
+
echo "moved: $SRC_REL -> $DEST_REL"
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
######################################################################
|
|
3
|
+
# .what = safe file removal within git repo
|
|
4
|
+
#
|
|
5
|
+
# .why = enables file deletion without:
|
|
6
|
+
# - touching files outside the repo
|
|
7
|
+
# - accidental path traversal attacks
|
|
8
|
+
#
|
|
9
|
+
# this is a controlled alternative to raw rm, which is
|
|
10
|
+
# denied in permissions due to security risks.
|
|
11
|
+
#
|
|
12
|
+
# usage:
|
|
13
|
+
# rmsafe.sh --path "path/to/file"
|
|
14
|
+
# rmsafe.sh --path "path/to/dir" --recursive
|
|
15
|
+
#
|
|
16
|
+
# guarantee:
|
|
17
|
+
# - path must be within repo
|
|
18
|
+
# - requires --recursive for directories
|
|
19
|
+
# - fail-fast on errors
|
|
20
|
+
######################################################################
|
|
21
|
+
set -euo pipefail
|
|
22
|
+
|
|
23
|
+
# parse named arguments
|
|
24
|
+
TARGET=""
|
|
25
|
+
RECURSIVE=false
|
|
26
|
+
|
|
27
|
+
while [[ $# -gt 0 ]]; do
|
|
28
|
+
case $1 in
|
|
29
|
+
--path)
|
|
30
|
+
TARGET="$2"
|
|
31
|
+
shift 2
|
|
32
|
+
;;
|
|
33
|
+
--recursive|-r)
|
|
34
|
+
RECURSIVE=true
|
|
35
|
+
shift
|
|
36
|
+
;;
|
|
37
|
+
*)
|
|
38
|
+
echo "unknown argument: $1"
|
|
39
|
+
echo "usage: rmsafe.sh --path 'target' [--recursive]"
|
|
40
|
+
exit 1
|
|
41
|
+
;;
|
|
42
|
+
esac
|
|
43
|
+
done
|
|
44
|
+
|
|
45
|
+
# validate required args
|
|
46
|
+
if [[ -z "$TARGET" ]]; then
|
|
47
|
+
echo "error: --path is required"
|
|
48
|
+
exit 1
|
|
49
|
+
fi
|
|
50
|
+
|
|
51
|
+
# ensure we're in a git repo
|
|
52
|
+
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
|
53
|
+
echo "error: not in a git repository"
|
|
54
|
+
exit 1
|
|
55
|
+
fi
|
|
56
|
+
|
|
57
|
+
# get repo root
|
|
58
|
+
REPO_ROOT=$(git rev-parse --show-toplevel)
|
|
59
|
+
|
|
60
|
+
# resolve absolute path
|
|
61
|
+
TARGET_ABS=$(realpath -m "$TARGET")
|
|
62
|
+
|
|
63
|
+
# validate target is within repo
|
|
64
|
+
if [[ "$TARGET_ABS" != "$REPO_ROOT"* ]]; then
|
|
65
|
+
echo "error: path must be within the git repository"
|
|
66
|
+
echo " repo root: $REPO_ROOT"
|
|
67
|
+
echo " path: $TARGET_ABS"
|
|
68
|
+
exit 1
|
|
69
|
+
fi
|
|
70
|
+
|
|
71
|
+
# prevent deleting repo root itself
|
|
72
|
+
if [[ "$TARGET_ABS" == "$REPO_ROOT" ]]; then
|
|
73
|
+
echo "error: cannot delete the repository root"
|
|
74
|
+
exit 1
|
|
75
|
+
fi
|
|
76
|
+
|
|
77
|
+
# validate target exists
|
|
78
|
+
if [[ ! -e "$TARGET_ABS" ]]; then
|
|
79
|
+
echo "error: path does not exist: $TARGET"
|
|
80
|
+
exit 1
|
|
81
|
+
fi
|
|
82
|
+
|
|
83
|
+
# check if directory and require --recursive
|
|
84
|
+
if [[ -d "$TARGET_ABS" ]] && [[ "$RECURSIVE" != true ]]; then
|
|
85
|
+
echo "error: target is a directory, use --recursive to delete"
|
|
86
|
+
exit 1
|
|
87
|
+
fi
|
|
88
|
+
|
|
89
|
+
# perform the removal
|
|
90
|
+
if [[ "$RECURSIVE" == true ]]; then
|
|
91
|
+
rm -rf "$TARGET_ABS"
|
|
92
|
+
else
|
|
93
|
+
rm "$TARGET_ABS"
|
|
94
|
+
fi
|
|
95
|
+
|
|
96
|
+
TARGET_REL="${TARGET_ABS#$REPO_ROOT/}"
|
|
97
|
+
echo "removed: $TARGET_REL"
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
######################################################################
|
|
3
|
+
# .what = fetch logs from the latest test workflow run on current branch
|
|
4
|
+
#
|
|
5
|
+
# .why = enables quick access to CI logs without leaving the terminal
|
|
6
|
+
# - diagnose failing tests faster
|
|
7
|
+
# - review workflow output during development
|
|
8
|
+
# - avoid context-switching to browser
|
|
9
|
+
#
|
|
10
|
+
# usage:
|
|
11
|
+
# gh.workflow.logs.sh --workflow "test" # failed logs from latest test run
|
|
12
|
+
# gh.workflow.logs.sh --workflow "ci" --full # show full logs (not just failed)
|
|
13
|
+
# gh.workflow.logs.sh --run-id 12345678 # view specific run by id
|
|
14
|
+
# gh.workflow.logs.sh --workflow "test" --watch # watch in-progress run
|
|
15
|
+
# gh.workflow.logs.sh --workflow "test" --web # open in browser instead
|
|
16
|
+
#
|
|
17
|
+
# guarantee:
|
|
18
|
+
# - uses gh cli (must be authenticated)
|
|
19
|
+
# - defaults to current branch
|
|
20
|
+
# - shows most recent run if no run-id specified
|
|
21
|
+
# - fail-fast on errors
|
|
22
|
+
######################################################################
|
|
23
|
+
set -euo pipefail
|
|
24
|
+
|
|
25
|
+
# parse named arguments
|
|
26
|
+
WORKFLOW=""
|
|
27
|
+
RUN_ID=""
|
|
28
|
+
FULL_LOGS=false
|
|
29
|
+
WATCH_MODE=false
|
|
30
|
+
WEB_MODE=false
|
|
31
|
+
BRANCH=""
|
|
32
|
+
|
|
33
|
+
while [[ $# -gt 0 ]]; do
|
|
34
|
+
case $1 in
|
|
35
|
+
--workflow|-w)
|
|
36
|
+
WORKFLOW="$2"
|
|
37
|
+
shift 2
|
|
38
|
+
;;
|
|
39
|
+
--run-id|-r)
|
|
40
|
+
RUN_ID="$2"
|
|
41
|
+
shift 2
|
|
42
|
+
;;
|
|
43
|
+
--full)
|
|
44
|
+
FULL_LOGS=true
|
|
45
|
+
shift
|
|
46
|
+
;;
|
|
47
|
+
--watch)
|
|
48
|
+
WATCH_MODE=true
|
|
49
|
+
shift
|
|
50
|
+
;;
|
|
51
|
+
--web)
|
|
52
|
+
WEB_MODE=true
|
|
53
|
+
shift
|
|
54
|
+
;;
|
|
55
|
+
--branch|-b)
|
|
56
|
+
BRANCH="$2"
|
|
57
|
+
shift 2
|
|
58
|
+
;;
|
|
59
|
+
--help|-h)
|
|
60
|
+
echo "usage: gh.workflow.logs.sh --workflow <name> [options]"
|
|
61
|
+
echo ""
|
|
62
|
+
echo "required:"
|
|
63
|
+
echo " --workflow, -w <name> workflow name (e.g., 'test', 'ci')"
|
|
64
|
+
echo ""
|
|
65
|
+
echo "options:"
|
|
66
|
+
echo " --run-id, -r <id> view specific run by id (skips workflow lookup)"
|
|
67
|
+
echo " --full show full logs (default: failed only)"
|
|
68
|
+
echo " --watch watch in-progress run"
|
|
69
|
+
echo " --web open in browser instead of terminal"
|
|
70
|
+
echo " --branch, -b <name> use specific branch (default: current)"
|
|
71
|
+
echo " --help, -h show this help"
|
|
72
|
+
exit 0
|
|
73
|
+
;;
|
|
74
|
+
*)
|
|
75
|
+
echo "unknown argument: $1"
|
|
76
|
+
echo "run with --help for usage"
|
|
77
|
+
exit 1
|
|
78
|
+
;;
|
|
79
|
+
esac
|
|
80
|
+
done
|
|
81
|
+
|
|
82
|
+
# require workflow unless run-id specified
|
|
83
|
+
if [[ -z "$WORKFLOW" && -z "$RUN_ID" ]]; then
|
|
84
|
+
echo "error: --workflow is required"
|
|
85
|
+
echo "usage: gh.workflow.logs.sh --workflow <name> [options]"
|
|
86
|
+
echo "run with --help for more info"
|
|
87
|
+
exit 1
|
|
88
|
+
fi
|
|
89
|
+
|
|
90
|
+
# ensure gh cli is available
|
|
91
|
+
if ! command -v gh &> /dev/null; then
|
|
92
|
+
echo "error: gh cli is not installed"
|
|
93
|
+
echo "install: https://cli.github.com/"
|
|
94
|
+
exit 1
|
|
95
|
+
fi
|
|
96
|
+
|
|
97
|
+
# ensure we're authenticated
|
|
98
|
+
if ! gh auth status &> /dev/null; then
|
|
99
|
+
echo "error: not authenticated with gh cli"
|
|
100
|
+
echo "run: gh auth login"
|
|
101
|
+
exit 1
|
|
102
|
+
fi
|
|
103
|
+
|
|
104
|
+
# ensure we're in a git repo
|
|
105
|
+
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
|
106
|
+
echo "error: not in a git repository"
|
|
107
|
+
exit 1
|
|
108
|
+
fi
|
|
109
|
+
|
|
110
|
+
# get current branch if not specified
|
|
111
|
+
if [[ -z "$BRANCH" ]]; then
|
|
112
|
+
BRANCH=$(git branch --show-current)
|
|
113
|
+
if [[ -z "$BRANCH" ]]; then
|
|
114
|
+
echo "error: could not determine current branch (detached HEAD?)"
|
|
115
|
+
exit 1
|
|
116
|
+
fi
|
|
117
|
+
fi
|
|
118
|
+
|
|
119
|
+
echo ":: branch: $BRANCH"
|
|
120
|
+
|
|
121
|
+
# if run-id specified, use it directly
|
|
122
|
+
if [[ -n "$RUN_ID" ]]; then
|
|
123
|
+
echo ":: run-id: $RUN_ID"
|
|
124
|
+
else
|
|
125
|
+
# build gh run list command
|
|
126
|
+
LIST_CMD="gh run list --branch $BRANCH --limit 1 --json databaseId,workflowName,status,conclusion,createdAt"
|
|
127
|
+
|
|
128
|
+
if [[ -n "$WORKFLOW" ]]; then
|
|
129
|
+
LIST_CMD="$LIST_CMD --workflow $WORKFLOW"
|
|
130
|
+
fi
|
|
131
|
+
|
|
132
|
+
# get latest run
|
|
133
|
+
RUNS_JSON=$(eval "$LIST_CMD")
|
|
134
|
+
|
|
135
|
+
if [[ "$RUNS_JSON" == "[]" ]]; then
|
|
136
|
+
echo ""
|
|
137
|
+
echo "no workflow runs found for branch: $BRANCH"
|
|
138
|
+
if [[ -n "$WORKFLOW" ]]; then
|
|
139
|
+
echo "with workflow filter: $WORKFLOW"
|
|
140
|
+
fi
|
|
141
|
+
echo ""
|
|
142
|
+
echo "available workflows:"
|
|
143
|
+
gh workflow list
|
|
144
|
+
exit 1
|
|
145
|
+
fi
|
|
146
|
+
|
|
147
|
+
# extract run info
|
|
148
|
+
RUN_ID=$(echo "$RUNS_JSON" | jq -r '.[0].databaseId')
|
|
149
|
+
WORKFLOW_NAME=$(echo "$RUNS_JSON" | jq -r '.[0].workflowName')
|
|
150
|
+
STATUS=$(echo "$RUNS_JSON" | jq -r '.[0].status')
|
|
151
|
+
CONCLUSION=$(echo "$RUNS_JSON" | jq -r '.[0].conclusion')
|
|
152
|
+
CREATED_AT=$(echo "$RUNS_JSON" | jq -r '.[0].createdAt')
|
|
153
|
+
|
|
154
|
+
echo ":: workflow: $WORKFLOW_NAME"
|
|
155
|
+
echo ":: run-id: $RUN_ID"
|
|
156
|
+
echo ":: status: $STATUS"
|
|
157
|
+
if [[ "$CONCLUSION" != "null" ]]; then
|
|
158
|
+
echo ":: conclusion: $CONCLUSION"
|
|
159
|
+
fi
|
|
160
|
+
echo ":: created: $CREATED_AT"
|
|
161
|
+
fi
|
|
162
|
+
|
|
163
|
+
echo ""
|
|
164
|
+
|
|
165
|
+
# handle watch mode
|
|
166
|
+
if [[ "$WATCH_MODE" == "true" ]]; then
|
|
167
|
+
echo ":: watching run $RUN_ID ..."
|
|
168
|
+
gh run watch "$RUN_ID"
|
|
169
|
+
exit 0
|
|
170
|
+
fi
|
|
171
|
+
|
|
172
|
+
# handle web mode
|
|
173
|
+
if [[ "$WEB_MODE" == "true" ]]; then
|
|
174
|
+
echo ":: opening in browser ..."
|
|
175
|
+
gh run view "$RUN_ID" --web
|
|
176
|
+
exit 0
|
|
177
|
+
fi
|
|
178
|
+
|
|
179
|
+
# get repo info for api calls
|
|
180
|
+
REPO=$(gh repo view --json nameWithOwner -q '.nameWithOwner')
|
|
181
|
+
|
|
182
|
+
# get jobs for this run
|
|
183
|
+
JOBS_JSON=$(gh api --method GET "repos/$REPO/actions/runs/$RUN_ID/jobs" -q '.jobs')
|
|
184
|
+
|
|
185
|
+
# view logs
|
|
186
|
+
if [[ "$FULL_LOGS" == "true" ]]; then
|
|
187
|
+
echo ":: fetching full logs ..."
|
|
188
|
+
echo ""
|
|
189
|
+
# get all job ids and fetch logs for each
|
|
190
|
+
JOB_IDS=$(echo "$JOBS_JSON" | jq -r '.[].id')
|
|
191
|
+
for JOB_ID in $JOB_IDS; do
|
|
192
|
+
JOB_NAME=$(echo "$JOBS_JSON" | jq -r ".[] | select(.id == $JOB_ID) | .name")
|
|
193
|
+
echo "=== $JOB_NAME ==="
|
|
194
|
+
gh api --method GET "repos/$REPO/actions/jobs/$JOB_ID/logs"
|
|
195
|
+
echo ""
|
|
196
|
+
done
|
|
197
|
+
else
|
|
198
|
+
echo ":: fetching failed job logs ..."
|
|
199
|
+
echo ""
|
|
200
|
+
# get only failed job ids
|
|
201
|
+
FAILED_JOB_IDS=$(echo "$JOBS_JSON" | jq -r '.[] | select(.conclusion == "failure") | .id')
|
|
202
|
+
if [[ -z "$FAILED_JOB_IDS" ]]; then
|
|
203
|
+
echo "no failed jobs found"
|
|
204
|
+
exit 0
|
|
205
|
+
fi
|
|
206
|
+
for JOB_ID in $FAILED_JOB_IDS; do
|
|
207
|
+
JOB_NAME=$(echo "$JOBS_JSON" | jq -r ".[] | select(.id == $JOB_ID) | .name")
|
|
208
|
+
echo "=== $JOB_NAME (failed) ==="
|
|
209
|
+
# fetch logs and filter to show failures
|
|
210
|
+
gh api --method GET "repos/$REPO/actions/jobs/$JOB_ID/logs" | grep -E "(FAIL |✕|Error:|Cannot find|##\[error\])" | head -100
|
|
211
|
+
echo ""
|
|
212
|
+
done
|
|
213
|
+
fi
|
package/package.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"name": "rhachet-roles-ehmpathy",
|
|
3
3
|
"author": "ehmpathy",
|
|
4
4
|
"description": "empathetic software construction roles and skills, via rhachet",
|
|
5
|
-
"version": "1.15.
|
|
5
|
+
"version": "1.15.17",
|
|
6
6
|
"repository": "ehmpathy/rhachet-roles-ehmpathy",
|
|
7
7
|
"homepage": "https://github.com/ehmpathy/rhachet-roles-ehmpathy",
|
|
8
8
|
"keywords": [
|