@link-assistant/hive-mind 1.59.5 → 1.59.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +158 -0
  2. package/package.json +1 -1
  3. package/src/bidirectional-interactive.lib.mjs +1 -0
  4. package/src/contributing-guidelines.lib.mjs +3 -2
  5. package/src/github-error-reporter.lib.mjs +3 -2
  6. package/src/github-merge-ci-signals.lib.mjs +8 -2
  7. package/src/github-merge-ci.lib.mjs +8 -2
  8. package/src/github-merge-ready-sync.lib.mjs +7 -1
  9. package/src/github-merge-repo-actions.lib.mjs +7 -1
  10. package/src/github-merge.lib.mjs +40 -32
  11. package/src/github-rate-limit.lib.mjs +276 -0
  12. package/src/github.batch.lib.mjs +1 -0
  13. package/src/hive.mjs +2 -2
  14. package/src/hive.recheck.lib.mjs +1 -0
  15. package/src/lib.mjs +30 -4
  16. package/src/limits.lib.mjs +1 -0
  17. package/src/protect-branch.mjs +3 -2
  18. package/src/queue-config.lib.mjs +7 -3
  19. package/src/review.mjs +3 -2
  20. package/src/reviewers-hive.mjs +3 -2
  21. package/src/solve.accept-invite.lib.mjs +7 -1
  22. package/src/solve.auto-continue.lib.mjs +3 -2
  23. package/src/solve.auto-ensure.lib.mjs +3 -2
  24. package/src/solve.auto-merge-helpers.lib.mjs +3 -2
  25. package/src/solve.auto-merge.lib.mjs +40 -2
  26. package/src/solve.auto-pr.lib.mjs +1 -0
  27. package/src/solve.branch-errors.lib.mjs +1 -0
  28. package/src/solve.config.lib.mjs +2 -2
  29. package/src/solve.error-handlers.lib.mjs +1 -0
  30. package/src/solve.execution.lib.mjs +3 -2
  31. package/src/solve.feedback.lib.mjs +1 -0
  32. package/src/solve.mjs +20 -36
  33. package/src/solve.preparation.lib.mjs +1 -0
  34. package/src/solve.progress-monitoring.lib.mjs +1 -0
  35. package/src/solve.repository.lib.mjs +3 -3
  36. package/src/solve.restart-shared.lib.mjs +3 -2
  37. package/src/solve.results.lib.mjs +89 -14
  38. package/src/solve.session.lib.mjs +1 -0
  39. package/src/solve.watch.lib.mjs +39 -2
  40. package/src/telegram-accept-invitations.lib.mjs +7 -1
  41. package/src/token-sanitization.lib.mjs +1 -0
  42. package/src/tool-comments.lib.mjs +12 -1
  43. package/src/youtrack/youtrack-sync.mjs +1 -0
package/CHANGELOG.md CHANGED
@@ -1,5 +1,163 @@
1
1
  # @link-assistant/hive-mind
2
2
 
3
+ ## 1.59.7
4
+
5
+ ### Patch Changes
6
+
7
+ - 4f03aea: fix(solve): post a Working session summary at the end of every working session — issue #1728.
8
+
9
+ `--auto-attach-solution-summary` previously only ran in `solve.mjs`'s top-level flow.
10
+ Iterations inside `--auto-restart-until-mergeable` (`src/solve.auto-merge.lib.mjs`) and
11
+ `--watch` / temporary auto-restart (`src/solve.watch.lib.mjs`) called
12
+ `executeToolIteration()`, uploaded a log comment, and discarded the AI's
13
+ `toolResult.resultSummary` — so when the AI finished an iteration without posting
14
+ a comment, the user saw only the start (`Auto-restart triggered`) and end
15
+ (`Auto-restart-until-mergeable Log`) brackets with no AI conclusions in between.
16
+ Reproduced live on link-foundation/box PR #83 between comment ids
17
+ [`4345164478`](https://github.com/link-foundation/box/pull/83#issuecomment-4345164478)
18
+ and [`4345439482`](https://github.com/link-foundation/box/pull/83#issuecomment-4345439482).
19
+
20
+ Fix: extracted the attach-decision into a single helper
21
+ `maybeAttachWorkingSessionSummary` in `src/solve.results.lib.mjs` that all three
22
+ working-session call sites (`solve.mjs`, `solve.auto-merge.lib.mjs`,
23
+ `solve.watch.lib.mjs`) invoke with their own `iterationStartTime`. Each successful
24
+ iteration now ends with either an AI-authored comment OR an automated
25
+ "Working session summary" comment.
26
+
27
+ Also renamed the comment header from "Solution summary" to "Working session
28
+ summary" because not every working session is a solution draft — many are
29
+ continuation/restart iterations. CLI flag names (`--attach-solution-summary`,
30
+ `--auto-attach-solution-summary`, `--no-auto-attach-solution-summary`) and
31
+ function names are preserved for backwards compatibility. The new header is
32
+ registered in `TOOL_GENERATED_COMMENT_MARKERS` so a previous iteration's summary
33
+ is excluded from the next iteration's "did the AI post anything?" check.
34
+
35
+ Tests: extended `tests/test-solution-summary.mjs` to cover the new helper, the
36
+ header rename, the marker registration, and the per-iteration wiring in
37
+ `solve.auto-merge.lib.mjs` / `solve.watch.lib.mjs`.
38
+
39
+ Case study: `docs/case-studies/issue-1728/`.
40
+
41
+ ## 1.59.6
42
+
43
+ ### Patch Changes
44
+
45
+ - d6d05a0: Fully safeguard from GitHub API rate-limit errors — issue #1726.
46
+
47
+ `/merge` merged a draft PR even though every `gh api` call had been failing
48
+ with `HTTP 403: API rate limit exceeded`. The merge subsystem caught those
49
+ errors silently in `getActiveRepoWorkflows()` and reported _"no CI checks
50
+ and repo has no active workflows — no CI/CD configured"_, which `/merge`
51
+ interpreted as _"all clear"_. Verbose log
52
+ ([`docs/case-studies/issue-1726/data/a4dccea2-a941-4a0c-a50e-60b1ed454e1e.log`](./docs/case-studies/issue-1726/data/a4dccea2-a941-4a0c-a50e-60b1ed454e1e.log),
53
+ lines 40251–40269):
54
+
55
+ ```
56
+ [VERBOSE] /merge: Error fetching workflows for link-foundation/relative-meta-logic:
57
+ Command failed: gh api "repos/link-foundation/relative-meta-logic/actions/workflows" --paginate --slurp
58
+ gh: API rate limit exceeded for user ID 1431904 ... (HTTP 403)
59
+
60
+ [VERBOSE] /merge: PR #100 has no CI checks and repo has no active workflows - no CI/CD configured
61
+ ```
62
+
63
+ Two combining root causes:
64
+ 1. **`getActiveRepoWorkflows()` swallowed exceptions** in
65
+ [`src/github-merge.lib.mjs`](./src/github-merge.lib.mjs) and returned
66
+ `[]`. Rate-limit responses became "this repo has no workflows", which the
67
+ merge gate treated as "no CI configured, safe to merge".
68
+ 2. **No gh API call site had rate-limit retry**. The existing
69
+ `ghCmdRetry`/`ghRetry` helpers only recognised transient TCP/TLS faults,
70
+ so a 403 fell straight through. ~135 raw `$gh ...` and
71
+ ``exec(`gh ...`)`` call sites scattered across `src/solve.*`,
72
+ `src/github-merge.*`, scripts, and reviewers.
73
+
74
+ Fix:
75
+ - **New rate-limit module**
76
+ [`src/github-rate-limit.lib.mjs`](./src/github-rate-limit.lib.mjs) with
77
+ `isRateLimitError`, `parseRateLimitReset`, `fetchNextRateLimitReset`,
78
+ `computeRateLimitWait`, `ghWithRateLimitRetry`, `execGhWithRetry`,
79
+ `wrapDollarWithGhRetry`. Applies the issue's policy:
80
+ `wait = (resetTime − now) + bufferMs (10 min) + random(0..jitterMs) (0..5 min)`,
81
+ reusing `limitReset.bufferMs` / `limitReset.jitterMs` from
82
+ [`src/config.lib.mjs`](./src/config.lib.mjs) (introduced in #1236).
83
+ - **Propagate errors instead of swallowing**. `getActiveRepoWorkflows()`
84
+ no longer wraps the gh call in try/catch that returns `[]`. Errors bubble
85
+ up; the merge gate sees the failure and stops.
86
+ - **Layered retry in legacy helpers**. `ghRetry` and `ghCmdRetry` in
87
+ [`src/lib.mjs`](./src/lib.mjs) check `isRateLimitError` first and delegate
88
+ to `ghWithRateLimitRetry` before applying transient-network retry.
89
+ - **Local `exec` shim** in 7 merge files rebound through
90
+ `ghWithRateLimitRetry` — converts every existing ``exec(`gh ...`)`` site
91
+ without per-call edits.
92
+ - **Wrapped `$` at every entry point** (15 files). `wrapDollarWithGhRetry`
93
+ routes every `$gh ...` through the retry helper while passing non-gh
94
+ commands unchanged.
95
+ - **Marker imports** in 17 callee files that receive `$` as a parameter,
96
+ declaring rate-limit awareness for the ESLint rule.
97
+ - **Queue threshold lowered** from 75% to 50% in
98
+ [`src/queue-config.lib.mjs`](./src/queue-config.lib.mjs).
99
+ - **Custom ESLint rule**
100
+ [`eslint-rules/no-direct-gh-exec.mjs`](./eslint-rules/no-direct-gh-exec.mjs)
101
+ flags any unsafe `gh` exec call site; files that import a known-safe
102
+ wrapper are exempted at file scope.
103
+
104
+ Tests:
105
+ - [`tests/github-rate-limit.test.mjs`](./tests/github-rate-limit.test.mjs)
106
+ — 22 unit tests covering `isRateLimitError` (primary, secondary,
107
+ abuse-detection, stderr, cause-chain), `parseRateLimitReset` (header
108
+ variants), `computeRateLimitWait` (future / null / past reset, jitter
109
+ bounds), `ghWithRateLimitRetry` (success, propagation, retry-then-succeed,
110
+ exhausted retries), `wrapDollarWithGhRetry` (passthrough, retry,
111
+ propagation).
112
+ - [`tests/test-no-direct-gh-exec-rule.mjs`](./tests/test-no-direct-gh-exec-rule.mjs)
113
+ — RuleTester valid/invalid cases.
114
+ - Updated `tests/queue-config.test.mjs` and `tests/limits-display.test.mjs`
115
+ for the 50% threshold.
116
+
117
+ Documentation:
118
+ [`docs/case-studies/issue-1726/`](./docs/case-studies/issue-1726/README.md)
119
+ contains the failing run logs, root-cause analysis, fix breakdown, and
120
+ verification commands.
121
+
122
+ - bb0af8c: Fix `check-file-line-limits` CI failure on `main` after issue #1726 merge.
123
+
124
+ After PR #1726 (rate-limit safeguards) merged into `main`, the
125
+ `check-file-line-limits` job failed because three `.mjs` files crossed the
126
+ 1500-line hard limit:
127
+ - `src/hive.mjs` — 1500 → 1504 lines
128
+ - `src/limits.lib.mjs` — 1497 → 1501 lines
129
+ - `src/solve.repository.lib.mjs` — 1500 → 1501 lines
130
+
131
+ Two root causes combined: (1) the per-file marker block PR #1726 added was 4
132
+ lines (2 comment lines + import + `void`), with no headroom check; (2) ESLint's
133
+ `max-lines` rule was configured with `skipBlankLines: true, skipComments: true`
134
+ while the CI script counts raw `wc -l`, so `npm run lint` passed locally even
135
+ though the CI script would fail. Local lint and CI line-limit had silently
136
+ drifted apart. See
137
+ [`docs/case-studies/issue-1730`](./docs/case-studies/issue-1730/README.md)
138
+ for the timeline, log excerpts, and template comparison.
139
+
140
+ Fix:
141
+ - **Synchronize ESLint `max-lines` with the CI script** in
142
+ [`eslint.config.mjs`](./eslint.config.mjs) by setting `skipBlankLines: false,
143
+ skipComments: false`. Now `npm run lint` catches the failure locally before
144
+ push, restoring the invariant the rule's comment claimed.
145
+ - **Compact the rate-limit marker** introduced by #1726 from 4 lines to 1 line
146
+ in all 17 files. ESLint's existing `varsIgnorePattern: '^_'` means the
147
+ `void _wrapDollarWithGhRetry;` line was redundant; the trailing-comment form
148
+ preserves rate-limit awareness for `no-direct-gh-exec` while saving 3 lines
149
+ per file. Files: `src/hive.mjs`, `src/limits.lib.mjs`,
150
+ `src/{solve.session,solve.preparation,solve.progress-monitoring,solve.error-handlers,solve.feedback,solve.auto-pr,solve.branch-errors,hive.recheck,github.batch,bidirectional-interactive,token-sanitization}.lib.mjs`,
151
+ `src/youtrack/youtrack-sync.mjs`,
152
+ `scripts/{create-github-release,format-github-release,format-release-notes}.mjs`.
153
+ - **Compact `solve.repository.lib.mjs`** wrap pattern from 4 lines to 3 while
154
+ keeping the destructure form so `eslint-rules/no-direct-gh-exec.mjs` still
155
+ recognizes `wrapDollarWithGhRetry` in scope.
156
+
157
+ After the fix, all three previously-failing files are at or below 1500 raw
158
+ lines (1500 / 1498 / 1500) and `npm run lint` would now reject any
159
+ re-introduction of the regression.
160
+
3
161
  ## 1.59.5
4
162
 
5
163
  ### Patch Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@link-assistant/hive-mind",
3
- "version": "1.59.5",
3
+ "version": "1.59.7",
4
4
  "description": "AI-powered issue solver and hive mind for collaborative problem solving",
5
5
  "main": "src/hive.mjs",
6
6
  "type": "module",
@@ -22,6 +22,7 @@
22
22
  * @experimental
23
23
  */
24
24
 
25
+ import { wrapDollarWithGhRetry as _wrapDollarWithGhRetry } from './github-rate-limit.lib.mjs'; // rate-limit marker (#1726): gh API calls flow through $ wrapped by caller
25
26
  // Configuration constants
26
27
  const CONFIG = {
27
28
  // Minimum time between comment checks to avoid rate limiting (in ms)
@@ -9,8 +9,9 @@ if (typeof globalThis.use === 'undefined') {
9
9
  globalThis.use = (await eval(await (await fetch('https://unpkg.com/use-m/use.js')).text())).use;
10
10
  }
11
11
 
12
- const { $ } = await use('command-stream');
13
-
12
+ const { $: __rawDollar$ } = await use('command-stream');
13
+ const { wrapDollarWithGhRetry } = await import('./github-rate-limit.lib.mjs');
14
+ const $ = wrapDollarWithGhRetry(__rawDollar$);
14
15
  /**
15
16
  * Common paths where contributing guidelines might be found
16
17
  */
@@ -13,8 +13,9 @@ if (typeof globalThis.use === 'undefined') {
13
13
  }
14
14
 
15
15
  const fs = (await use('fs')).promises;
16
- const { $ } = await use('command-stream');
17
-
16
+ const { $: __rawDollar$ } = await use('command-stream');
17
+ const { wrapDollarWithGhRetry } = await import('./github-rate-limit.lib.mjs');
18
+ const $ = wrapDollarWithGhRetry(__rawDollar$);
18
19
  const GITHUB_ISSUE_BODY_MAX_SIZE = 60000;
19
20
  const GITHUB_FILE_MAX_SIZE = 10 * 1024 * 1024;
20
21
 
@@ -13,8 +13,14 @@
13
13
 
14
14
  import { promisify } from 'util';
15
15
  import { exec as execCallback } from 'child_process';
16
-
17
- const exec = promisify(execCallback);
16
+ import { ghWithRateLimitRetry } from './github-rate-limit.lib.mjs';
17
+
18
+ const execRaw = promisify(execCallback);
19
+ // Issue #1726: rate-limit safe gh wrapper.
20
+ const exec = (cmd, opts) =>
21
+ ghWithRateLimitRetry(() => execRaw(cmd, opts), {
22
+ label: `gh exec (${cmd.split(/\s+/).slice(0, 3).join(' ')})`,
23
+ });
18
24
 
19
25
  /**
20
26
  * Get the committed date of a specific commit from GitHub API
@@ -11,8 +11,14 @@
11
11
  import { getWorkflowRunsForSha } from './github-merge.lib.mjs';
12
12
  import { promisify } from 'util';
13
13
  import { exec as execCallback } from 'child_process';
14
-
15
- const exec = promisify(execCallback);
14
+ import { ghWithRateLimitRetry } from './github-rate-limit.lib.mjs';
15
+
16
+ const execRaw = promisify(execCallback);
17
+ // Issue #1726: every gh call must be rate-limit safe.
18
+ const exec = (cmd, opts) =>
19
+ ghWithRateLimitRetry(() => execRaw(cmd, opts), {
20
+ label: `gh exec (${cmd.split(/\s+/).slice(0, 3).join(' ')})`,
21
+ });
16
22
 
17
23
  /**
18
24
  * Wait for all workflow runs triggered by a specific commit to complete
@@ -11,8 +11,14 @@
11
11
 
12
12
  import { promisify } from 'util';
13
13
  import { exec as execCallback } from 'child_process';
14
+ import { ghWithRateLimitRetry } from './github-rate-limit.lib.mjs';
14
15
 
15
- const exec = promisify(execCallback);
16
+ const execRaw = promisify(execCallback);
17
+ // Issue #1726: rate-limit safe gh wrapper.
18
+ const exec = (cmd, opts) =>
19
+ ghWithRateLimitRetry(() => execRaw(cmd, opts), {
20
+ label: `gh exec (${cmd.split(/\s+/).slice(0, 3).join(' ')})`,
21
+ });
16
22
 
17
23
  import { extractLinkedIssueNumber } from './github-linking.lib.mjs';
18
24
 
@@ -12,10 +12,16 @@
12
12
  import { promisify } from 'util';
13
13
  import { exec as execCallback } from 'child_process';
14
14
  import { githubLimits } from './config.lib.mjs';
15
+ import { ghWithRateLimitRetry } from './github-rate-limit.lib.mjs';
15
16
  const execRaw = promisify(execCallback);
16
17
  // Issue #1722: raise exec maxBuffer above Node's 1 MB default for paginated gh
17
18
  // API responses (workflow runs can easily exceed that on busy repos).
18
- const exec = (cmd, opts = {}) => execRaw(cmd, { maxBuffer: githubLimits.bufferMaxSize, ...opts });
19
+ // Issue #1726: wrap with rate-limit retry so a 5,000/hr quota hit waits for
20
+ // reset instead of bubbling up as a generic fetch failure.
21
+ const exec = (cmd, opts = {}) =>
22
+ ghWithRateLimitRetry(() => execRaw(cmd, { maxBuffer: githubLimits.bufferMaxSize, ...opts }), {
23
+ label: `gh exec (${cmd.split(/\s+/).slice(0, 3).join(' ')})`,
24
+ });
19
25
 
20
26
  // Statuses we treat as "not yet finished".
21
27
  const ACTIVE_RUN_STATUSES = ['in_progress', 'queued', 'waiting', 'requested', 'pending'];
@@ -18,13 +18,24 @@ const execRaw = promisify(execCallback);
18
18
 
19
19
  import { parseGitHubUrl } from './github.lib.mjs';
20
20
  import { githubLimits } from './config.lib.mjs';
21
+ import { ghWithRateLimitRetry } from './github-rate-limit.lib.mjs';
21
22
 
22
23
  // Issue #1722: gh api `--paginate --slurp` responses for repos with many
23
24
  // historical workflow runs can easily exceed Node's default 1 MB exec buffer
24
25
  // (observed 12.7 MB on this repo's main branch). Default to the configured
25
26
  // githubLimits.bufferMaxSize (10 MB; HIVE_MIND_GITHUB_BUFFER_MAX_SIZE) for all
26
27
  // gh calls in this file.
27
- const exec = (cmd, opts = {}) => execRaw(cmd, { maxBuffer: githubLimits.bufferMaxSize, ...opts });
28
+ //
29
+ // Issue #1726: every gh call in the merge subsystem must be rate-limit safe.
30
+ // Wrapping the local `exec` shim ensures all 25+ call sites pick up retry
31
+ // behaviour without per-call changes. Non-rate-limit errors continue to throw
32
+ // so genuine failures (404, auth, malformed JSON downstream) surface to the
33
+ // caller — they MUST NOT be swallowed as in the original /merge bug where a
34
+ // rate-limit error was silently treated as "no workflows".
35
+ const exec = (cmd, opts = {}) =>
36
+ ghWithRateLimitRetry(() => execRaw(cmd, { maxBuffer: githubLimits.bufferMaxSize, ...opts }), {
37
+ label: `gh exec (${cmd.split(/\s+/).slice(0, 3).join(' ')})`,
38
+ });
28
39
 
29
40
  // Issue #1413: Import ready tag sync, timeline, and label constant from separate module
30
41
  // to keep this file under the 1500 line limit
@@ -1340,40 +1351,37 @@ export async function getWorkflowRunJobsCount(owner, repo, runId, verbose = fals
1340
1351
  * @returns {Promise<{count: number, hasWorkflows: boolean, workflows: Array<{id: number, name: string, state: string, path: string}>}>}
1341
1352
  */
1342
1353
  export async function getActiveRepoWorkflows(owner, repo, verbose = false) {
1343
- try {
1344
- const { stdout } = await exec(`gh api "repos/${owner}/${repo}/actions/workflows" --paginate --slurp`);
1345
- const allWorkflows = JSON.parse(stdout.trim() || '[]')
1346
- .flatMap(page => page.workflows || [])
1347
- .filter(workflow => workflow.state === 'active')
1348
- .map(workflow => ({ id: workflow.id, name: workflow.name, state: workflow.state, path: workflow.path }));
1349
-
1350
- // GitHub Pages workflows only run after merge and never produce PR check-runs.
1351
- const workflows = allWorkflows.filter(wf => !wf.path.startsWith('dynamic/pages/'));
1352
-
1353
- if (verbose) {
1354
- console.log(`[VERBOSE] /merge: Found ${allWorkflows.length} active workflows in ${owner}/${repo} (${workflows.length} PR-relevant after filtering out GitHub Pages deployment workflows)`);
1355
- for (const wf of allWorkflows) {
1356
- const filtered = wf.path.startsWith('dynamic/pages/');
1357
- console.log(`[VERBOSE] /merge: - ${wf.name} (${wf.id}): ${wf.state}, path=${wf.path}${filtered ? ' [excluded: GitHub Pages deployment]' : ''}`);
1358
- }
1359
- }
1354
+ // Issue #1726: this function previously swallowed every error as "no workflows",
1355
+ // including GitHub API rate-limit responses. The /merge command then thought CI
1356
+ // was unconfigured and proceeded as if checks had passed — a hard failure mode
1357
+ // visible in the original case-study log where errors were thrown but the
1358
+ // process exited 0.
1359
+ //
1360
+ // Rate-limit errors are now retried inside the local exec() wrapper. After
1361
+ // retries are exhausted, the error MUST propagate so callers can decide
1362
+ // whether to abort or continue — never default to "no workflows".
1363
+ const { stdout } = await exec(`gh api "repos/${owner}/${repo}/actions/workflows" --paginate --slurp`);
1364
+ const allWorkflows = JSON.parse(stdout.trim() || '[]')
1365
+ .flatMap(page => page.workflows || [])
1366
+ .filter(workflow => workflow.state === 'active')
1367
+ .map(workflow => ({ id: workflow.id, name: workflow.name, state: workflow.state, path: workflow.path }));
1368
+
1369
+ // GitHub Pages workflows only run after merge and never produce PR check-runs.
1370
+ const workflows = allWorkflows.filter(wf => !wf.path.startsWith('dynamic/pages/'));
1360
1371
 
1361
- return {
1362
- count: workflows.length,
1363
- hasWorkflows: workflows.length > 0,
1364
- workflows,
1365
- };
1366
- } catch (error) {
1367
- if (verbose) {
1368
- console.log(`[VERBOSE] /merge: Error fetching workflows for ${owner}/${repo}: ${error.message}`);
1372
+ if (verbose) {
1373
+ console.log(`[VERBOSE] /merge: Found ${allWorkflows.length} active workflows in ${owner}/${repo} (${workflows.length} PR-relevant after filtering out GitHub Pages deployment workflows)`);
1374
+ for (const wf of allWorkflows) {
1375
+ const filtered = wf.path.startsWith('dynamic/pages/');
1376
+ console.log(`[VERBOSE] /merge: - ${wf.name} (${wf.id}): ${wf.state}, path=${wf.path}${filtered ? ' [excluded: GitHub Pages deployment]' : ''}`);
1369
1377
  }
1370
- // On error, assume no workflows (safer: avoids false positives in the no-CI case)
1371
- return {
1372
- count: 0,
1373
- hasWorkflows: false,
1374
- workflows: [],
1375
- };
1376
1378
  }
1379
+
1380
+ return {
1381
+ count: workflows.length,
1382
+ hasWorkflows: workflows.length > 0,
1383
+ workflows,
1384
+ };
1377
1385
  }
1378
1386
 
1379
1387
  // Issue #1690: Re-export CI signal helpers from separate module to keep this file under 1500 lines
@@ -0,0 +1,276 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * GitHub API rate-limit detection and retry utilities.
5
+ *
6
+ * Issue #1726: Hosted runners hit GitHub's 5,000/hr core API quota and bubble
7
+ * the failure up as a generic 403/HTTP error. The wrappers in lib.mjs only
8
+ * recognise transient TCP/TLS faults; rate-limit responses fell through and
9
+ * crashed callers (or worse, were silently swallowed in the merge subsystem
10
+ * making it look like "no workflows / no checks" — see
11
+ * src/github-merge.lib.mjs:getActiveRepoWorkflows in the original log).
12
+ *
13
+ * The retry policy required by the issue:
14
+ * wait = (resetTimestamp - now) + bufferMs (10 min) + random(jitterMs) (0-5 min)
15
+ *
16
+ * `bufferMs` and `jitterMs` already exist in src/config.lib.mjs#limitReset
17
+ * (added in #1236 for Claude limit waits) so we re-use them rather than
18
+ * duplicate constants.
19
+ */
20
+ import { promisify } from 'node:util';
21
+ import { exec as execCb } from 'node:child_process';
22
+
23
+ import { limitReset, retryLimits } from './config.lib.mjs';
24
+
25
+ const exec = promisify(execCb);
26
+
27
+ const RATE_LIMIT_PATTERNS = ['api rate limit exceeded', 'rate limit exceeded', 'you have exceeded a secondary rate limit', 'secondary rate limit', 'abuse detection', 'was submitted too quickly'];
28
+
29
+ /**
30
+ * Pull every plausible string out of a thrown error/result so pattern matches
31
+ * survive whatever shape the upstream caller gave us (Error, exec result with
32
+ * stdout/stderr, command-stream result, plain string, etc.).
33
+ */
34
+ const collectErrorText = error => {
35
+ if (!error) return '';
36
+ if (typeof error === 'string') return error;
37
+ const parts = [];
38
+ if (typeof error.message === 'string') parts.push(error.message);
39
+ if (typeof error.stderr === 'string') parts.push(error.stderr);
40
+ else if (error.stderr && typeof error.stderr.toString === 'function') parts.push(error.stderr.toString());
41
+ if (typeof error.stdout === 'string') parts.push(error.stdout);
42
+ else if (error.stdout && typeof error.stdout.toString === 'function') parts.push(error.stdout.toString());
43
+ if (error.cause) parts.push(collectErrorText(error.cause));
44
+ return parts.join('\n');
45
+ };
46
+
47
+ /**
48
+ * Detect whether `error` represents a GitHub rate-limit response.
49
+ * Recognises both primary (5,000/hr) and secondary (abuse-detection) forms.
50
+ *
51
+ * @param {unknown} error
52
+ * @returns {boolean}
53
+ */
54
+ export const isRateLimitError = error => {
55
+ const text = collectErrorText(error).toLowerCase();
56
+ if (!text) return false;
57
+ return RATE_LIMIT_PATTERNS.some(pattern => text.includes(pattern));
58
+ };
59
+
60
+ /**
61
+ * Extract a `Date` for when the rate-limit window resets, in priority order:
62
+ * 1. `X-RateLimit-Reset` header value (Unix epoch seconds) embedded in the
63
+ * error text — `gh` prints headers when --include is used and graphql
64
+ * surfaces them in the error body.
65
+ * 2. `Retry-After` header (seconds from now).
66
+ * 3. None — caller falls back to a polled `gh api rate_limit` lookup.
67
+ *
68
+ * @param {unknown} error
69
+ * @returns {Date|null}
70
+ */
71
+ export const parseRateLimitReset = error => {
72
+ const text = collectErrorText(error);
73
+ if (!text) return null;
74
+
75
+ const resetMatch = text.match(/x-ratelimit-reset:\s*(\d+)/i);
76
+ if (resetMatch) {
77
+ const epochSeconds = Number(resetMatch[1]);
78
+ if (Number.isFinite(epochSeconds) && epochSeconds > 0) {
79
+ return new Date(epochSeconds * 1000);
80
+ }
81
+ }
82
+
83
+ const retryAfterMatch = text.match(/retry-after:\s*(\d+)/i);
84
+ if (retryAfterMatch) {
85
+ const seconds = Number(retryAfterMatch[1]);
86
+ if (Number.isFinite(seconds) && seconds >= 0) {
87
+ return new Date(Date.now() + seconds * 1000);
88
+ }
89
+ }
90
+
91
+ return null;
92
+ };
93
+
94
+ /**
95
+ * Ask `gh api rate_limit` directly when the error didn't carry a reset header.
96
+ * Returns the most-restrictive (soonest) reset time across the resources we
97
+ * touch (core, search, graphql) so we don't resume into a still-throttled
98
+ * bucket.
99
+ *
100
+ * @returns {Promise<Date|null>}
101
+ */
102
+ export const fetchNextRateLimitReset = async () => {
103
+ try {
104
+ // eslint-disable-next-line gh-rate-limit/no-direct-gh-exec -- this IS the rate-limit helper; calling itself recursively would loop.
105
+ const { stdout } = await exec('gh api rate_limit');
106
+ const data = JSON.parse(stdout);
107
+ const resources = data?.resources || {};
108
+ const candidates = [];
109
+ for (const key of ['core', 'graphql', 'search']) {
110
+ const r = resources[key];
111
+ if (r && Number.isFinite(r.reset) && r.remaining === 0) {
112
+ candidates.push(r.reset);
113
+ }
114
+ }
115
+ if (candidates.length === 0) return null;
116
+ const soonestEpoch = Math.min(...candidates);
117
+ return new Date(soonestEpoch * 1000);
118
+ } catch {
119
+ return null;
120
+ }
121
+ };
122
+
123
+ /**
124
+ * Compute the absolute wait deadline that satisfies issue #1726:
125
+ * reset + bufferMs (default 10 min) + random(0..jitterMs) (default 0-5 min)
126
+ *
127
+ * @param {Date|null} reset
128
+ * @returns {{ waitMs: number, deadline: Date, reset: Date|null, bufferMs: number, jitterMs: number }}
129
+ */
130
+ export const computeRateLimitWait = (reset, now = Date.now()) => {
131
+ const bufferMs = limitReset.bufferMs;
132
+ const jitterMs = Math.floor(Math.random() * (limitReset.jitterMs + 1));
133
+ const resetTime = reset instanceof Date ? reset.getTime() : null;
134
+ const baselineWait = resetTime && resetTime > now ? resetTime - now : 0;
135
+ const waitMs = baselineWait + bufferMs + jitterMs;
136
+ return {
137
+ waitMs,
138
+ deadline: new Date(now + waitMs),
139
+ reset: reset || null,
140
+ bufferMs,
141
+ jitterMs,
142
+ };
143
+ };
144
+
145
+ /**
146
+ * Sleep with optional periodic countdown notifications.
147
+ *
148
+ * @param {number} ms
149
+ * @param {(msg: string) => Promise<void>|void} [log]
150
+ */
151
+ const sleepWithCountdown = async (ms, log) => {
152
+ if (ms <= 0) return;
153
+ if (!log || ms <= 60_000) {
154
+ await new Promise(resolve => setTimeout(resolve, ms));
155
+ return;
156
+ }
157
+ let remaining = ms;
158
+ const timer = setInterval(() => {
159
+ remaining -= 60_000;
160
+ if (remaining > 0) {
161
+ const minutes = Math.round(remaining / 60_000);
162
+ Promise.resolve(log(`⏳ Rate-limit wait: ${minutes} min remaining...`)).catch(() => {});
163
+ }
164
+ }, 60_000);
165
+ try {
166
+ await new Promise(resolve => setTimeout(resolve, ms));
167
+ } finally {
168
+ clearInterval(timer);
169
+ }
170
+ };
171
+
172
+ /**
173
+ * Wrap `fn` so that GitHub rate-limit errors are converted into a sleep until
174
+ * (resetTime + bufferMs + jitterMs) followed by a retry. Non-rate-limit errors
175
+ * are rethrown immediately so we don't mask programming bugs or 404s.
176
+ *
177
+ * @template T
178
+ * @param {() => Promise<T>} fn
179
+ * @param {object} [options]
180
+ * @param {number} [options.maxAttempts] - hard cap on rate-limit retries (default `retryLimits.maxApiRetries`).
181
+ * @param {string} [options.label] - prefix for log messages.
182
+ * @param {(msg: string) => Promise<void>|void} [options.log] - logger. Defaults to console.warn.
183
+ * @returns {Promise<T>}
184
+ */
185
+ export const ghWithRateLimitRetry = async (fn, options = {}) => {
186
+ const maxAttempts = options.maxAttempts ?? retryLimits.maxApiRetries;
187
+ const label = options.label || 'gh';
188
+ const log = options.log || (msg => console.warn(msg));
189
+
190
+ let lastError;
191
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
192
+ try {
193
+ return await fn();
194
+ } catch (error) {
195
+ lastError = error;
196
+ if (!isRateLimitError(error)) throw error;
197
+
198
+ if (attempt === maxAttempts) {
199
+ await Promise.resolve(log(`❌ ${label}: rate limit still active after ${attempt} attempts; giving up.`));
200
+ throw error;
201
+ }
202
+
203
+ const reset = parseRateLimitReset(error) || (await fetchNextRateLimitReset());
204
+ const { waitMs, deadline, bufferMs, jitterMs } = computeRateLimitWait(reset);
205
+ const waitMinutes = Math.round(waitMs / 60_000);
206
+ const resetSummary = reset ? `reset at ${reset.toISOString()}` : 'reset time unknown (using buffer + jitter only)';
207
+ await Promise.resolve(log(`⏳ ${label}: GitHub API rate limit hit (attempt ${attempt}/${maxAttempts}). Waiting ${waitMinutes} min (${resetSummary}; buffer ${Math.round(bufferMs / 60_000)} min + jitter ${Math.round(jitterMs / 1000)}s) until ${deadline.toISOString()}.`));
208
+ await sleepWithCountdown(waitMs, log);
209
+ }
210
+ }
211
+ // Unreachable — loop either returns or throws.
212
+ throw lastError;
213
+ };
214
+
215
+ /**
216
+ * Convenience wrapper around child_process.exec that retries on rate-limit
217
+ * errors. Use it for callers that build a `gh` command string and want the
218
+ * existing exec-based ergonomics.
219
+ *
220
+ * @param {string} command
221
+ * @param {object} [options] - forwarded to ghWithRateLimitRetry, plus `execOptions`.
222
+ * @returns {Promise<{stdout: string, stderr: string}>}
223
+ */
224
+ export const execGhWithRetry = async (command, options = {}) => {
225
+ const { execOptions, ...retryOptions } = options;
226
+ return ghWithRateLimitRetry(() => exec(command, execOptions), {
227
+ label: retryOptions.label || `gh exec (${command.split(/\s+/).slice(0, 3).join(' ')})`,
228
+ ...retryOptions,
229
+ });
230
+ };
231
+
232
+ /**
233
+ * Wrap a command-stream `$` tagged-template so every `$gh ...` it issues is
234
+ * retried on rate-limit errors. Returns a callable that delegates to the
235
+ * underlying `$` for non-`gh` commands and through `ghWithRateLimitRetry` for
236
+ * `gh ...` commands.
237
+ *
238
+ * Usage at the top of a file:
239
+ * const { $: rawDollar } = await use('command-stream');
240
+ * const $ = wrapDollarWithGhRetry(rawDollar);
241
+ *
242
+ * @template T
243
+ * @param {(strings: TemplateStringsArray, ...values: unknown[]) => Promise<T>} dollar
244
+ * @param {object} [options] - forwarded to ghWithRateLimitRetry per call.
245
+ * @returns {(strings: TemplateStringsArray, ...values: unknown[]) => Promise<T>}
246
+ */
247
+ export const wrapDollarWithGhRetry = (dollar, options = {}) => {
248
+ const wrapped = (strings, ...values) => {
249
+ // Reconstruct the literal command for inspection (sufficient — leading
250
+ // `gh ` is what we care about).
251
+ let preview = '';
252
+ for (let i = 0; i < strings.length; i++) {
253
+ preview += strings[i];
254
+ if (i < values.length) preview += String(values[i] ?? '');
255
+ }
256
+ const isGh = /^\s*gh(?:\s|$)/.test(preview);
257
+ if (!isGh) return dollar(strings, ...values);
258
+ return ghWithRateLimitRetry(() => dollar(strings, ...values), {
259
+ label: `$gh (${preview.trim().split(/\s+/).slice(0, 3).join(' ')})`,
260
+ ...options,
261
+ });
262
+ };
263
+ // Preserve a reference to the underlying $ for consumers that need it.
264
+ wrapped.raw = dollar;
265
+ return wrapped;
266
+ };
267
+
268
+ export default {
269
+ isRateLimitError,
270
+ parseRateLimitReset,
271
+ fetchNextRateLimitReset,
272
+ computeRateLimitWait,
273
+ ghWithRateLimitRetry,
274
+ execGhWithRetry,
275
+ wrapDollarWithGhRetry,
276
+ };
@@ -11,6 +11,7 @@ if (typeof globalThis.use === 'undefined') {
11
11
  import { log, cleanErrorMessage } from './lib.mjs';
12
12
  import { githubLimits, timeouts } from './config.lib.mjs';
13
13
 
14
+ import { wrapDollarWithGhRetry as _wrapDollarWithGhRetry } from './github-rate-limit.lib.mjs'; // rate-limit marker (#1726): gh API calls flow through $ wrapped by caller
14
15
  /**
15
16
  * Check if a PR body/title indicates it fixes/closes/resolves a specific issue number
16
17
  * GitHub auto-closes issues when PR body contains keywords like "fixes #123", "closes #123", "resolves #123"