rhachet-roles-ehmpathy 1.15.16 → 1.15.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,222 @@
1
+ # how to write bdd-style acceptance tests
2
+
3
+ ## structure
4
+
5
+ use `given`, `when`, `then` from `test-fns` to structure tests:
6
+
7
+ ```ts
8
+ import { given, when, then, useBeforeAll } from 'test-fns';
9
+
10
+ describe('featureName', () => {
11
+ given('[case1] scenario description', () => {
12
+ when('[t0] before any changes', () => {
13
+ then('precondition holds', async () => { ... });
14
+ then('another precondition holds', async () => { ... });
15
+ });
16
+
17
+ when('[t1] target operation is executed', () => {
18
+ then('expected outcome', async () => { ... });
19
+ });
20
+
21
+ when('[t2] alternate operation is executed', () => {
22
+ then('alternate outcome', async () => { ... });
23
+ });
24
+ });
25
+ });
26
+ ```
27
+
28
+ ---
29
+
30
+ ## labels
31
+
32
+ ### `[caseN]` for given blocks
33
+
34
+ each `given` block should have a unique case label:
35
+
36
+ ```ts
37
+ given('[case1] valid inputs', () => { ... });
38
+ given('[case2] invalid inputs', () => { ... });
39
+ given('[case3] edge case scenario', () => { ... });
40
+ ```
41
+
42
+ ### `[tN]` for when blocks
43
+
44
+ each `when` block should have a time index label:
45
+
46
+ - `[t0]` = precondition checks / before any changes
47
+ - `[t1]` = first target operation
48
+ - `[t2]` = second target operation
49
+ - etc.
50
+
51
+ ```ts
52
+ given('[case1] prose-author example repo', () => {
53
+ when('[t0] before any changes', () => {
54
+ then('rules glob matches 2 files', ...);
55
+ then('chapters glob matches 3 files', ...);
56
+ });
57
+
58
+ when('[t1] stepReview on clean chapter', () => {
59
+ then('review contains no blockers', ...);
60
+ });
61
+
62
+ when('[t2] stepReview on dirty chapter', () => {
63
+ then('review contains blockers', ...);
64
+ });
65
+ });
66
+ ```
67
+
68
+ ---
69
+
70
+ ## principles
71
+
72
+ ### consolidate related tests
73
+
74
+ don't split related scenarios across multiple `given` blocks:
75
+
76
+ ```ts
77
+ // ❌ bad - fragmented
78
+ given('[case8] prose-author rule enumeration', () => { ... });
79
+ given('[case9] prose-author chapter enumeration', () => { ... });
80
+ given('[case10] prose-author review works', () => { ... });
81
+
82
+ // ✅ good - consolidated
83
+ given('[case8] prose-author example repo', () => {
84
+ when('[t0] before any changes', () => {
85
+ then('rules glob matches', ...);
86
+ then('chapters glob matches', ...);
87
+ });
88
+ when('[t1] stepReview on clean chapter', () => { ... });
89
+ when('[t2] stepReview on dirty chapter', () => { ... });
90
+ });
91
+ ```
92
+
93
+ ### when describes state/time, not action
94
+
95
+ ```ts
96
+ // ❌ bad - describes action
97
+ when('[t0] assets are checked', () => { ... });
98
+
99
+ // ✅ good - describes state/time
100
+ when('[t0] before any changes', () => { ... });
101
+ ```
102
+
103
+ ### use afterEach for cleanup
104
+
105
+ ```ts
106
+ // ❌ bad - inline cleanup
107
+ then('creates output file', async () => {
108
+ const result = await doThing();
109
+ await fs.rm(outputPath); // cleanup inside then
110
+ expect(result).toBeDefined();
111
+ });
112
+
113
+ // ✅ good - afterEach cleanup
114
+ when('[t1] operation runs', () => {
115
+ const outputPath = path.join(os.tmpdir(), 'output.md');
116
+ afterEach(async () => fs.rm(outputPath, { force: true }));
117
+
118
+ then('creates output file', async () => {
119
+ const result = await doThing();
120
+ expect(result).toBeDefined();
121
+ });
122
+ });
123
+ ```
124
+
125
+ ### preconditions shouldn't expect errors
126
+
127
+ ```ts
128
+ // ❌ bad - precondition expects error then checks it's not a validation error
129
+ then('does not throw validation errors', async () => {
130
+ const error = await getError(doThing());
131
+ expect(error.message).not.toContain('validation');
132
+ });
133
+
134
+ // ✅ good - precondition checks assets directly
135
+ then('rules glob matches 2 files', async () => {
136
+ const files = await enumFiles({ glob: 'rules/*.md' });
137
+ expect(files).toHaveLength(2);
138
+ });
139
+ ```
140
+
141
+ ### use useBeforeAll for shared setup
142
+
143
+ ```ts
144
+ given('[case1] scenario with shared setup', () => {
145
+ const scene = useBeforeAll(async () => {
146
+ const entity = await createEntity();
147
+ return { entity };
148
+ });
149
+
150
+ when('[t1] operation runs', () => {
151
+ then('uses shared entity', async () => {
152
+ const result = await doThing({ id: scene.entity.id });
153
+ expect(result).toBeDefined();
154
+ });
155
+ });
156
+ });
157
+ ```
158
+
159
+ ---
160
+
161
+ ## complete example
162
+
163
+ ```ts
164
+ import { given, when, then, useBeforeAll } from 'test-fns';
165
+ import * as fs from 'fs/promises';
166
+ import * as path from 'path';
167
+ import * as os from 'os';
168
+
169
+ describe('stepReview', () => {
170
+ given('[case1] prose-author example repo', () => {
171
+ when('[t0] before any changes', () => {
172
+ then('rules glob matches 2 prose style rules', async () => {
173
+ const ruleFiles = await enumFilesFromGlob({
174
+ glob: '.agent/**/rules/*.md',
175
+ cwd: ASSETS_PROSE,
176
+ });
177
+ expect(ruleFiles).toHaveLength(2);
178
+ });
179
+
180
+ then('chapters glob matches 3 chapters', async () => {
181
+ const chapterFiles = await enumFilesFromGlob({
182
+ glob: 'chapters/*.md',
183
+ cwd: ASSETS_PROSE,
184
+ });
185
+ expect(chapterFiles).toHaveLength(3);
186
+ });
187
+ });
188
+
189
+ when('[t1] stepReview on chapter2.fixed.md', () => {
190
+ const outputPath = path.join(os.tmpdir(), 'review-fixed.md');
191
+ afterEach(async () => fs.rm(outputPath, { force: true }));
192
+
193
+ then('review contains no blockers', async () => {
194
+ const result = await stepReview({
195
+ rules: '.agent/**/rules/*.md',
196
+ paths: 'chapters/chapter2.fixed.md',
197
+ output: outputPath,
198
+ mode: 'hard',
199
+ cwd: ASSETS_PROSE,
200
+ });
201
+ expect(result.review.formatted.toLowerCase()).not.toContain('blocker');
202
+ });
203
+ });
204
+
205
+ when('[t2] stepReview on chapter2.md', () => {
206
+ const outputPath = path.join(os.tmpdir(), 'review-unfixed.md');
207
+ afterEach(async () => fs.rm(outputPath, { force: true }));
208
+
209
+ then('review contains blockers for gerund violations', async () => {
210
+ const result = await stepReview({
211
+ rules: '.agent/**/rules/*.md',
212
+ paths: 'chapters/chapter2.md',
213
+ output: outputPath,
214
+ mode: 'hard',
215
+ cwd: ASSETS_PROSE,
216
+ });
217
+ expect(result.review.formatted.toLowerCase()).toContain('blocker');
218
+ });
219
+ });
220
+ });
221
+ });
222
+ ```
@@ -0,0 +1,74 @@
1
+ # howto: diagnose via bisection
2
+
3
+ ## .what
4
+
5
+ use binary search (bisection) to isolate the root cause of a defect in O(log n) steps instead of O(n)
6
+
7
+ ## .why
8
+
9
+ - linear search through suspects wastes time
10
+ - bisection cuts the search space in half with each test
11
+ - works for code changes, data inputs, config options, and time ranges
12
+ - essential skill for debugging regressions and intermittent failures
13
+
14
+ ## .how
15
+
16
+ ### the pattern
17
+
18
+ 1. define the search space (lines, inputs, configs, changes)
19
+ 2. find a known-good state and a known-bad state
20
+ 3. test the midpoint
21
+ 4. if midpoint is good → defect is in the second half
22
+ 5. if midpoint is bad → defect is in the first half
23
+ 6. repeat until you find the exact boundary
24
+
25
+ ### code bisection (for logic errors)
26
+
27
+ when a function produces wrong output:
28
+
29
+ ```ts
30
+ // suspect: 10 lines of logic
31
+ const result = complexTransform(input);
32
+
33
+ // bisect: comment out bottom half, test
34
+ // if still broken → defect in top half
35
+ // if fixed → defect in bottom half
36
+ // repeat until isolated to 1-2 lines
37
+ ```
38
+
39
+ ### input bisection (for data issues)
40
+
41
+ when processing fails on large input:
42
+
43
+ ```ts
44
+ // 1000 records fail; which one causes it?
45
+ const midpoint = Math.floor(records.length / 2);
46
+ const firstHalf = records.slice(0, midpoint);
47
+ const secondHalf = records.slice(midpoint);
48
+
49
+ // test each half separately
50
+ // defect is in the half that fails
51
+ // repeat until you find the single bad record
52
+ ```
53
+
54
+ ### config bisection (for env issues)
55
+
56
+ when config changes break behavior:
57
+
58
+ 1. list all config differences between working and broken
59
+ 2. apply half the changes
60
+ 3. test → narrow to the half that breaks
61
+ 4. repeat until isolated to single config key
62
+
63
+ ## .when to use
64
+
65
+ - regression appeared but unclear which change caused it
66
+ - feature works with small data but fails with large data
67
+ - behavior differs between environments
68
+ - any scenario with a "it used to work" vs "now it's broken" boundary
69
+
70
+ ## .key insight
71
+
72
+ > the power of bisection: 1000 suspects → 10 tests max
73
+
74
+ always prefer structured bisection over random guessing or linear elimination
@@ -26,10 +26,6 @@
26
26
  "Bash(git reflog delete:*)",
27
27
  "Bash(git config:*)",
28
28
 
29
- // git mv/rm - use mvsafe.sh and rmsafe.sh instead (repo-constrained)
30
- "Bash(git mv:*)",
31
- "Bash(git rm:*)",
32
-
33
29
  // "anywrite" commands - CRITICAL SECURITY RISK
34
30
  //
35
31
  // unlike Claude's native Edit/Write tools which are scoped to the repo,
@@ -147,6 +143,10 @@
147
143
  "Bash(mkdir:*)",
148
144
  "Bash(pwd)",
149
145
 
146
+ // git mv/rm are safe - constrained to repo, all changes revertable
147
+ "Bash(git mv:*)",
148
+ "Bash(git rm:*)",
149
+
150
150
  // git read-only - all have no write variants
151
151
  "Bash(git log:*)",
152
152
  "Bash(git status:*)",
@@ -0,0 +1,213 @@
1
+ #!/usr/bin/env bash
2
+ ######################################################################
3
+ # .what = fetch logs from the latest test workflow run on current branch
4
+ #
5
+ # .why = enables quick access to CI logs without leaving the terminal
6
+ # - diagnose failing tests faster
7
+ # - review workflow output during development
8
+ # - avoid context-switching to browser
9
+ #
10
+ # usage:
11
+ # gh.workflow.logs.sh --workflow "test" # failed logs from latest test run
12
+ # gh.workflow.logs.sh --workflow "ci" --full # show full logs (not just failed)
13
+ # gh.workflow.logs.sh --run-id 12345678 # view specific run by id
14
+ # gh.workflow.logs.sh --workflow "test" --watch # watch in-progress run
15
+ # gh.workflow.logs.sh --workflow "test" --web # open in browser instead
16
+ #
17
+ # guarantee:
18
+ # - uses gh cli (must be authenticated)
19
+ # - defaults to current branch
20
+ # - shows most recent run if no run-id specified
21
+ # - fail-fast on errors
22
+ ######################################################################
23
+ set -euo pipefail
24
+
25
+ # parse named arguments
26
+ WORKFLOW=""
27
+ RUN_ID=""
28
+ FULL_LOGS=false
29
+ WATCH_MODE=false
30
+ WEB_MODE=false
31
+ BRANCH=""
32
+
33
+ while [[ $# -gt 0 ]]; do
34
+ case $1 in
35
+ --workflow|-w)
36
+ WORKFLOW="$2"
37
+ shift 2
38
+ ;;
39
+ --run-id|-r)
40
+ RUN_ID="$2"
41
+ shift 2
42
+ ;;
43
+ --full)
44
+ FULL_LOGS=true
45
+ shift
46
+ ;;
47
+ --watch)
48
+ WATCH_MODE=true
49
+ shift
50
+ ;;
51
+ --web)
52
+ WEB_MODE=true
53
+ shift
54
+ ;;
55
+ --branch|-b)
56
+ BRANCH="$2"
57
+ shift 2
58
+ ;;
59
+ --help|-h)
60
+ echo "usage: gh.workflow.logs.sh --workflow <name> [options]"
61
+ echo ""
62
+ echo "required:"
63
+ echo " --workflow, -w <name> workflow name (e.g., 'test', 'ci')"
64
+ echo ""
65
+ echo "options:"
66
+ echo " --run-id, -r <id> view specific run by id (skips workflow lookup)"
67
+ echo " --full show full logs (default: failed only)"
68
+ echo " --watch watch in-progress run"
69
+ echo " --web open in browser instead of terminal"
70
+ echo " --branch, -b <name> use specific branch (default: current)"
71
+ echo " --help, -h show this help"
72
+ exit 0
73
+ ;;
74
+ *)
75
+ echo "unknown argument: $1"
76
+ echo "run with --help for usage"
77
+ exit 1
78
+ ;;
79
+ esac
80
+ done
81
+
82
+ # require workflow unless run-id specified
83
+ if [[ -z "$WORKFLOW" && -z "$RUN_ID" ]]; then
84
+ echo "error: --workflow is required"
85
+ echo "usage: gh.workflow.logs.sh --workflow <name> [options]"
86
+ echo "run with --help for more info"
87
+ exit 1
88
+ fi
89
+
90
+ # ensure gh cli is available
91
+ if ! command -v gh &> /dev/null; then
92
+ echo "error: gh cli is not installed"
93
+ echo "install: https://cli.github.com/"
94
+ exit 1
95
+ fi
96
+
97
+ # ensure we're authenticated
98
+ if ! gh auth status &> /dev/null; then
99
+ echo "error: not authenticated with gh cli"
100
+ echo "run: gh auth login"
101
+ exit 1
102
+ fi
103
+
104
+ # ensure we're in a git repo
105
+ if ! git rev-parse --git-dir > /dev/null 2>&1; then
106
+ echo "error: not in a git repository"
107
+ exit 1
108
+ fi
109
+
110
+ # get current branch if not specified
111
+ if [[ -z "$BRANCH" ]]; then
112
+ BRANCH=$(git branch --show-current)
113
+ if [[ -z "$BRANCH" ]]; then
114
+ echo "error: could not determine current branch (detached HEAD?)"
115
+ exit 1
116
+ fi
117
+ fi
118
+
119
+ echo ":: branch: $BRANCH"
120
+
121
+ # if run-id specified, use it directly
122
+ if [[ -n "$RUN_ID" ]]; then
123
+ echo ":: run-id: $RUN_ID"
124
+ else
125
+ # build gh run list command
126
+ LIST_CMD="gh run list --branch $BRANCH --limit 1 --json databaseId,workflowName,status,conclusion,createdAt"
127
+
128
+ if [[ -n "$WORKFLOW" ]]; then
129
+ LIST_CMD="$LIST_CMD --workflow $WORKFLOW"
130
+ fi
131
+
132
+ # get latest run
133
+ RUNS_JSON=$(eval "$LIST_CMD")
134
+
135
+ if [[ "$RUNS_JSON" == "[]" ]]; then
136
+ echo ""
137
+ echo "no workflow runs found for branch: $BRANCH"
138
+ if [[ -n "$WORKFLOW" ]]; then
139
+ echo "with workflow filter: $WORKFLOW"
140
+ fi
141
+ echo ""
142
+ echo "available workflows:"
143
+ gh workflow list
144
+ exit 1
145
+ fi
146
+
147
+ # extract run info
148
+ RUN_ID=$(echo "$RUNS_JSON" | jq -r '.[0].databaseId')
149
+ WORKFLOW_NAME=$(echo "$RUNS_JSON" | jq -r '.[0].workflowName')
150
+ STATUS=$(echo "$RUNS_JSON" | jq -r '.[0].status')
151
+ CONCLUSION=$(echo "$RUNS_JSON" | jq -r '.[0].conclusion')
152
+ CREATED_AT=$(echo "$RUNS_JSON" | jq -r '.[0].createdAt')
153
+
154
+ echo ":: workflow: $WORKFLOW_NAME"
155
+ echo ":: run-id: $RUN_ID"
156
+ echo ":: status: $STATUS"
157
+ if [[ "$CONCLUSION" != "null" ]]; then
158
+ echo ":: conclusion: $CONCLUSION"
159
+ fi
160
+ echo ":: created: $CREATED_AT"
161
+ fi
162
+
163
+ echo ""
164
+
165
+ # handle watch mode
166
+ if [[ "$WATCH_MODE" == "true" ]]; then
167
+ echo ":: watching run $RUN_ID ..."
168
+ gh run watch "$RUN_ID"
169
+ exit 0
170
+ fi
171
+
172
+ # handle web mode
173
+ if [[ "$WEB_MODE" == "true" ]]; then
174
+ echo ":: opening in browser ..."
175
+ gh run view "$RUN_ID" --web
176
+ exit 0
177
+ fi
178
+
179
+ # get repo info for api calls
180
+ REPO=$(gh repo view --json nameWithOwner -q '.nameWithOwner')
181
+
182
+ # get jobs for this run
183
+ JOBS_JSON=$(gh api --method GET "repos/$REPO/actions/runs/$RUN_ID/jobs" -q '.jobs')
184
+
185
+ # view logs
186
+ if [[ "$FULL_LOGS" == "true" ]]; then
187
+ echo ":: fetching full logs ..."
188
+ echo ""
189
+ # get all job ids and fetch logs for each
190
+ JOB_IDS=$(echo "$JOBS_JSON" | jq -r '.[].id')
191
+ for JOB_ID in $JOB_IDS; do
192
+ JOB_NAME=$(echo "$JOBS_JSON" | jq -r ".[] | select(.id == $JOB_ID) | .name")
193
+ echo "=== $JOB_NAME ==="
194
+ gh api --method GET "repos/$REPO/actions/jobs/$JOB_ID/logs"
195
+ echo ""
196
+ done
197
+ else
198
+ echo ":: fetching failed job logs ..."
199
+ echo ""
200
+ # get only failed job ids
201
+ FAILED_JOB_IDS=$(echo "$JOBS_JSON" | jq -r '.[] | select(.conclusion == "failure") | .id')
202
+ if [[ -z "$FAILED_JOB_IDS" ]]; then
203
+ echo "no failed jobs found"
204
+ exit 0
205
+ fi
206
+ for JOB_ID in $FAILED_JOB_IDS; do
207
+ JOB_NAME=$(echo "$JOBS_JSON" | jq -r ".[] | select(.id == $JOB_ID) | .name")
208
+ echo "=== $JOB_NAME (failed) ==="
209
+ # fetch logs and filter to show failures
210
+ gh api --method GET "repos/$REPO/actions/jobs/$JOB_ID/logs" | grep -E "(FAIL |✕|Error:|Cannot find|##\[error\])" | head -100
211
+ echo ""
212
+ done
213
+ fi
package/package.json CHANGED
@@ -2,7 +2,7 @@
2
2
  "name": "rhachet-roles-ehmpathy",
3
3
  "author": "ehmpathy",
4
4
  "description": "empathetic software construction roles and skills, via rhachet",
5
- "version": "1.15.16",
5
+ "version": "1.15.18",
6
6
  "repository": "ehmpathy/rhachet-roles-ehmpathy",
7
7
  "homepage": "https://github.com/ehmpathy/rhachet-roles-ehmpathy",
8
8
  "keywords": [