@oss-autopilot/core 3.5.0 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,6 +7,7 @@
7
7
  * orchestration layer that wires up the phases and handles I/O.
8
8
  */
9
9
  import { type DailyDigest, type CommentedIssue, type PRCheckFailure, type RepoGroup } from '../core/index.js';
10
+ import { type StrategyResult } from '../core/strategy.js';
10
11
  import { type DailyOutput, type DailyWarning, type CapacityAssessment, type ActionableIssue, type ActionMenu } from '../formatters/json.js';
11
12
  export { applyStatusOverrides, computeRepoSignals, groupPRsByRepo, assessCapacity, collectActionableIssues, computeActionMenu, toShelvedPRRef, formatBriefSummary, formatSummary, printDigest, CRITICAL_STATUSES, } from '../core/index.js';
12
13
  import { buildStarFilter } from '../core/daily-logic.js';
@@ -28,6 +29,13 @@ export interface DailyCheckResult {
28
29
  failures: PRCheckFailure[];
29
30
  /** Non-fatal warnings from ancillary pipeline phases — see #1042. */
30
31
  warnings: DailyWarning[];
32
+ /**
33
+ * Periodic strategy snapshot (#1270). Set when the cadence trigger
34
+ * fires AND the user has crossed `STRATEGY_MIN_PRS`. The action-menu
35
+ * renderer in `commands/oss.md` reads this; absent or null on runs
36
+ * where the gate stays silent.
37
+ */
38
+ strategySummary?: StrategyResult | null;
31
39
  }
32
40
  /**
33
41
  * Convert a full DailyCheckResult to the compact DailyOutput for JSON serialization (#287).
@@ -8,6 +8,7 @@
8
8
  */
9
9
  import { getStateManager, PRMonitor, IssueConversationMonitor, requireGitHubToken, CRITICAL_STATUSES, applyStatusOverrides, computeRepoSignals, groupPRsByRepo, assessCapacity, collectActionableIssues, computeActionMenu, toShelvedPRRef, formatBriefSummary, formatSummary, } from '../core/index.js';
10
10
  import { errorMessage, isRateLimitOrAuthError } from '../core/errors.js';
11
+ import { computeStrategy, shouldComputeStrategy } from '../core/strategy.js';
11
12
  import { warn } from '../core/logger.js';
12
13
  import { emptyPRCountsResult } from '../core/github-stats.js';
13
14
  import { createAutopilotScout } from './scout-bridge.js';
@@ -421,6 +422,24 @@ function generateDigestOutput(digest, activePRs, shelvedPRs, commentedIssues, fa
421
422
  const briefSummary = formatBriefSummary(digest, actionableIssues.length, issueResponses.length);
422
423
  const actionMenu = computeActionMenu(actionableIssues, capacity, filteredCommentedIssues);
423
424
  const repoGroups = groupPRsByRepo(activePRs);
425
+ // Periodic strategy snapshot (#1270 Step 2). Cadence-gated to fire every
426
+ // 30 days OR after 5+ PRs merge since the last snapshot, whichever comes
427
+ // first. Below STRATEGY_MIN_PRS the gate stays silent. Compute failures
428
+ // are non-fatal — the daily run continues and the snapshot is omitted.
429
+ let strategySummary;
430
+ try {
431
+ const state = stateManager.getState();
432
+ const nowIso = new Date().toISOString();
433
+ if (shouldComputeStrategy(state, nowIso)) {
434
+ strategySummary = computeStrategy(state);
435
+ if (strategySummary) {
436
+ stateManager.setLastStrategyAt(nowIso);
437
+ }
438
+ }
439
+ }
440
+ catch (error) {
441
+ recordWarning(warnings, 'analytics', 'compute strategy snapshot', error);
442
+ }
424
443
  return {
425
444
  digest,
426
445
  capacity,
@@ -432,6 +451,7 @@ function generateDigestOutput(digest, activePRs, shelvedPRs, commentedIssues, fa
432
451
  repoGroups,
433
452
  failures,
434
453
  warnings,
454
+ strategySummary,
435
455
  };
436
456
  }
437
457
  // ---------------------------------------------------------------------------
@@ -457,6 +477,7 @@ export function toDailyOutput(result) {
457
477
  repoGroups: compactRepoGroups(result.repoGroups),
458
478
  failures: result.failures,
459
479
  warnings: result.warnings,
480
+ strategySummary: result.strategySummary,
460
481
  };
461
482
  }
462
483
  /**
@@ -75,6 +75,8 @@ export { runStateUnlink } from './state-cmd.js';
75
75
  export { runParseList, pruneIssueList } from './parse-list.js';
76
76
  /** Move an issue between Pursue / Maybe / Skip sections of a curated list (#1107). */
77
77
  export { runListMoveTier, moveIssueToTier, type Tier, type ListMoveTierOptions, type ListMoveTierOutput, } from './list-move-tier.js';
78
+ /** Mark an issue line in a curated list as done with strikethrough + Done sub-bullet (#1299). */
79
+ export { runMarkIssueListItemDone, markIssueAsDone, type MarkDoneOptions, type MarkDoneOutput, } from './list-mark-done.js';
78
80
  /** Check if new files are properly referenced/integrated. */
79
81
  export { runCheckIntegration } from './check-integration.js';
80
82
  /** System-health diagnostic — verifies tokens, bundle, state, scout, rate limit. */
@@ -82,6 +82,8 @@ export { runStateUnlink } from './state-cmd.js';
82
82
  export { runParseList, pruneIssueList } from './parse-list.js';
83
83
  /** Move an issue between Pursue / Maybe / Skip sections of a curated list (#1107). */
84
84
  export { runListMoveTier, moveIssueToTier, } from './list-move-tier.js';
85
+ /** Mark an issue line in a curated list as done with strikethrough + Done sub-bullet (#1299). */
86
+ export { runMarkIssueListItemDone, markIssueAsDone, } from './list-mark-done.js';
85
87
  /** Check if new files are properly referenced/integrated. */
86
88
  export { runCheckIntegration } from './check-integration.js';
87
89
  /** System-health diagnostic — verifies tokens, bundle, state, scout, rate limit. */
@@ -115,11 +115,33 @@ export function detectIssueList() {
115
115
  // Matches the sibling warn at line 64 for `issueListPath` read failures.
116
116
  warn('startup', `Could not read skippedIssuesPath from state: ${errorMessage(err)}`);
117
117
  }
118
- // Probe default path: same directory as issue list, named skipped-issues.md
118
+ // Probe default path: same directory as issue list, named skipped-issues.md.
119
+ // When found, also persist to state.config so downstream commands
120
+ // (`skip-add`, `scout search`'s skip-list filter) read the same path
121
+ // instead of silently no-opping with "No skipped-issues path configured"
122
+ // (#1330). Without persistence, the auto-detect printed the path on
123
+ // every startup but nothing else honored it — search would re-surface
124
+ // already-skipped candidates round after round.
119
125
  if (!skippedIssuesPath && issueListPath) {
120
126
  const defaultSkipPath = path.join(path.dirname(issueListPath), 'skipped-issues.md');
121
127
  if (fs.existsSync(defaultSkipPath)) {
122
128
  skippedIssuesPath = defaultSkipPath;
129
+ try {
130
+ const stateManager = getStateManager();
131
+ // Only write when config actually doesn't have one — re-running
132
+ // startup shouldn't trigger an autoSave on every run if the
133
+ // value is already there.
134
+ const current = stateManager.getState().config.skippedIssuesPath;
135
+ if (!current) {
136
+ stateManager.updateConfig({ skippedIssuesPath: defaultSkipPath });
137
+ }
138
+ }
139
+ catch (err) {
140
+ // Persistence is best-effort — startup still surfaces the path
141
+ // in its return value so the current run benefits, but the next
142
+ // run won't. Log so the failure is debuggable.
143
+ warn('startup', `Could not persist auto-detected skippedIssuesPath: ${errorMessage(err)}`);
144
+ }
123
145
  }
124
146
  }
125
147
  return { path: issueListPath, source, availableCount, completedCount, skippedIssuesPath };
@@ -1,26 +1,35 @@
1
1
  /**
2
- * Anti-LLM policy scan (#108, #911, #979).
2
+ * AI/LLM policy scans (#108, #911, #979, #1269).
3
3
  *
4
- * Scan concatenated repo docs (CONTRIBUTING.md, CODE_OF_CONDUCT.md,
5
- * README) for language that indicates the project does not accept
6
- * AI/LLM-generated contributions. Previously described as a keyword
7
- * table in prose in agents/issue-scout.md.
4
+ * Two complementary scanners over the same input (concatenated repo docs
5
+ * CONTRIBUTING.md, CODE_OF_CONDUCT.md, README, etc.):
6
+ *
7
+ * 1. {@link scanForAntiLLMPolicy} detects language indicating the
8
+ * project does NOT accept AI/LLM-generated contributions. Used by
9
+ * issue-scout / repo-evaluator to filter out repos where landing a
10
+ * PR is impossible.
11
+ *
12
+ * 2. {@link scanAIDisclosureRequirement} — detects the opposite:
13
+ * language requiring or inviting AI disclosure ("must disclose AI
14
+ * use", "credit AI tools"). Used by pr-compliance-checker to decide
15
+ * whether AI attribution should be flagged as a violation, encouraged,
16
+ * or required (#1269 Improvement C).
8
17
  *
9
18
  * The long-term home for this logic is `@oss-scout/core`, where the
10
19
  * relevant files are already fetched during vetting. Keeping it here
11
20
  * for now lets the agent invoke it directly and gives scout a
12
21
  * reference implementation + test fixtures to adopt. See #979.
13
22
  *
14
- * Precision matters more than recall. False positives (flagging a
15
- * project that actually welcomes AI help) silently shrink the user's
16
- * contribution surface without recourse. We only match on phrases
17
- * that combine a rejection keyword (no / reject / will be closed /
18
- * don't accept) with an AI/LLM noun.
23
+ * Precision matters more than recall in both directions. A false
24
+ * positive on the anti-LLM side silently shrinks the user's
25
+ * contribution surface; a false positive on the disclosure side tells
26
+ * the user to add attribution that the maintainer didn't actually ask
27
+ * for. Patterns require an explicit verb-phrase + AI/LLM-noun pairing.
19
28
  *
20
29
  * **User-facing reference:** `docs/anti-llm-policy.md` — explains the
21
- * three categories, example phrases per category, and the false-positive-
22
- * resistance design (why "AI division will be closed at end of Q4"
23
- * does NOT match).
30
+ * three anti-LLM categories, example phrases per category, and the
31
+ * false-positive-resistance design (why "AI division will be closed at
32
+ * end of Q4" does NOT match).
24
33
  */
25
34
  export type AntiLLMCategory = 'explicit_ban' | 'tool_ban' | 'reject_framing';
26
35
  export interface AntiLLMMatch {
@@ -35,3 +44,23 @@ export interface AntiLLMScanResult {
35
44
  matches: AntiLLMMatch[];
36
45
  }
37
46
  export declare function scanForAntiLLMPolicy(text: string): AntiLLMScanResult;
47
+ export type AIDisclosureCategory = 'mandatory' | 'recommended' | 'invited';
48
+ export interface AIDisclosureMatch {
49
+ category: AIDisclosureCategory;
50
+ phrase: string;
51
+ excerpt: string;
52
+ }
53
+ export interface AIDisclosureScanResult {
54
+ matched: boolean;
55
+ matches: AIDisclosureMatch[];
56
+ }
57
+ /**
58
+ * Scan repo docs for language requiring or inviting AI disclosure (#1269).
59
+ *
60
+ * Mirrors {@link scanForAntiLLMPolicy}'s shape: same input contract,
61
+ * same false-positive-resistance discipline, same per-match excerpt for
62
+ * surfacing to the user. Categories are ordered by binding strength —
63
+ * callers that want to avoid false-positive flagging on weak invitations
64
+ * can filter to 'mandatory' / 'recommended' only.
65
+ */
66
+ export declare function scanAIDisclosureRequirement(text: string): AIDisclosureScanResult;
@@ -1,26 +1,35 @@
1
1
  /**
2
- * Anti-LLM policy scan (#108, #911, #979).
2
+ * AI/LLM policy scans (#108, #911, #979, #1269).
3
3
  *
4
- * Scan concatenated repo docs (CONTRIBUTING.md, CODE_OF_CONDUCT.md,
5
- * README) for language that indicates the project does not accept
6
- * AI/LLM-generated contributions. Previously described as a keyword
7
- * table in prose in agents/issue-scout.md.
4
+ * Two complementary scanners over the same input (concatenated repo docs
5
+ * CONTRIBUTING.md, CODE_OF_CONDUCT.md, README, etc.):
6
+ *
7
+ * 1. {@link scanForAntiLLMPolicy} detects language indicating the
8
+ * project does NOT accept AI/LLM-generated contributions. Used by
9
+ * issue-scout / repo-evaluator to filter out repos where landing a
10
+ * PR is impossible.
11
+ *
12
+ * 2. {@link scanAIDisclosureRequirement} — detects the opposite:
13
+ * language requiring or inviting AI disclosure ("must disclose AI
14
+ * use", "credit AI tools"). Used by pr-compliance-checker to decide
15
+ * whether AI attribution should be flagged as a violation, encouraged,
16
+ * or required (#1269 Improvement C).
8
17
  *
9
18
  * The long-term home for this logic is `@oss-scout/core`, where the
10
19
  * relevant files are already fetched during vetting. Keeping it here
11
20
  * for now lets the agent invoke it directly and gives scout a
12
21
  * reference implementation + test fixtures to adopt. See #979.
13
22
  *
14
- * Precision matters more than recall. False positives (flagging a
15
- * project that actually welcomes AI help) silently shrink the user's
16
- * contribution surface without recourse. We only match on phrases
17
- * that combine a rejection keyword (no / reject / will be closed /
18
- * don't accept) with an AI/LLM noun.
23
+ * Precision matters more than recall in both directions. A false
24
+ * positive on the anti-LLM side silently shrinks the user's
25
+ * contribution surface; a false positive on the disclosure side tells
26
+ * the user to add attribution that the maintainer didn't actually ask
27
+ * for. Patterns require an explicit verb-phrase + AI/LLM-noun pairing.
19
28
  *
20
29
  * **User-facing reference:** `docs/anti-llm-policy.md` — explains the
21
- * three categories, example phrases per category, and the false-positive-
22
- * resistance design (why "AI division will be closed at end of Q4"
23
- * does NOT match).
30
+ * three anti-LLM categories, example phrases per category, and the
31
+ * false-positive-resistance design (why "AI division will be closed at
32
+ * end of Q4" does NOT match).
24
33
  */
25
34
  const PATTERNS = [
26
35
  // Explicit "no X" bans against AI/LLM nouns.
@@ -104,3 +113,83 @@ export function scanForAntiLLMPolicy(text) {
104
113
  }
105
114
  return { matched: matches.length > 0, matches };
106
115
  }
116
+ const DISCLOSURE_PATTERNS = [
117
+ // Mandatory: imperative verbs binding to an AI/LLM disclosure noun.
118
+ // Match shape: <verb-phrase> <AI noun> <disclosure noun>?
119
+ // Verb phrases: "must disclose", "required to disclose", "are required
120
+ // to indicate", "you must indicate". The optional disclosure-action
121
+ // noun catches "must disclose use of AI" without requiring a separate
122
+ // pattern. The AI noun can be plain "ai/llm" or a tool name.
123
+ {
124
+ category: 'mandatory',
125
+ regex: /\b(must|required\s+to|are\s+required\s+to)\s+(disclose|indicate|declare|note|state|mention|label|tag|credit|acknowledge)\s+(?:[a-z\s'-]{0,40}?\b)?(ai|llm|generative\s+ai|copilot|chatgpt|claude|cursor)\b/i,
126
+ },
127
+ // "PRs using AI must be labeled / tagged / disclosed"
128
+ {
129
+ category: 'mandatory',
130
+ regex: /\b(prs?|contributions?|commits?|code)\s+(using|generated\s+by|written\s+by|made\s+with)\s+(ai|llm|copilot|chatgpt|claude|cursor)\s+(?:tools?\s+)?(must\s+be|need\s+to\s+be|are\s+to\s+be)\s+(labeled|tagged|disclosed|marked|flagged|noted)\b/i,
131
+ },
132
+ // "Disclosure of AI assistance is required" — passive form
133
+ {
134
+ category: 'mandatory',
135
+ regex: /\b(disclosure|disclosing|labeling|labelling|tagging|crediting)\s+(of\s+)?(ai|llm|copilot|chatgpt|claude|cursor)(\s+(use|usage|assistance|tools?|contributions?|generated\s+code))?\s+(is\s+required|is\s+mandatory|must\s+be\s+included)\b/i,
136
+ },
137
+ // Recommended: softer "should" or "we ask" framing. Same noun anchors.
138
+ // Verb forms allow optional gerund (-ing) endings since "we strongly
139
+ // encourage acknowledging AI" reads naturally even though "acknowledge"
140
+ // is the bare verb in the imperative list.
141
+ {
142
+ category: 'recommended',
143
+ regex: /\b(should|we\s+ask\s+you\s+to|we\s+ask\s+that\s+you|we\s+(?:strongly\s+)?(?:encourage|recommend))\s+(disclose|disclosing|indicate|indicating|declare|declaring|note|noting|mention|mentioning|label|labeling|labelling|tag|tagging|credit|crediting|acknowledge|acknowledging)\s+(?:[a-z\s'-]{0,40}?\b)?(ai|llm|generative\s+ai|copilot|chatgpt|claude|cursor)\b/i,
144
+ },
145
+ // "credit AI tools you used" — direct imperative without "must/should"
146
+ {
147
+ category: 'recommended',
148
+ regex: /\b(credit|acknowledge|attribute)\s+(ai|llm|generative\s+ai)\s+(tools?|assistants?|use|usage|assistance)\b/i,
149
+ },
150
+ // Invited: permissive framing. Lower confidence.
151
+ // The "please" branch is dropped intentionally — bare "please mention X"
152
+ // doesn't carry enough policy weight on its own, and including it as
153
+ // `please\s+(?:feel\s+free\s+to)?\s+` introduces super-linear backtracking
154
+ // (regexp/no-super-linear-backtracking) because of the ambiguous \s+
155
+ // boundary on either side of the optional group.
156
+ {
157
+ category: 'invited',
158
+ regex: /\b(feel\s+free\s+to|please\s+feel\s+free\s+to|you('re|\s+are)\s+welcome\s+to|welcome\s+to)\s+(disclose|mention|note|indicate|label|tag|credit)\s+(?:[a-z\s'-]{0,40}?\b)?(ai|llm|generative\s+ai|copilot|chatgpt|claude|cursor)\b/i,
159
+ },
160
+ ];
161
+ /**
162
+ * Scan repo docs for language requiring or inviting AI disclosure (#1269).
163
+ *
164
+ * Mirrors {@link scanForAntiLLMPolicy}'s shape: same input contract,
165
+ * same false-positive-resistance discipline, same per-match excerpt for
166
+ * surfacing to the user. Categories are ordered by binding strength —
167
+ * callers that want to avoid false-positive flagging on weak invitations
168
+ * can filter to 'mandatory' / 'recommended' only.
169
+ */
170
+ export function scanAIDisclosureRequirement(text) {
171
+ if (typeof text !== 'string') {
172
+ throw new TypeError(`scanAIDisclosureRequirement: expected string, received ${typeof text}`);
173
+ }
174
+ if (text === '')
175
+ return { matched: false, matches: [] };
176
+ const normalized = normalizeText(text);
177
+ const seenLabels = new Set();
178
+ const matches = [];
179
+ for (const pattern of DISCLOSURE_PATTERNS) {
180
+ const hit = normalized.match(pattern.regex);
181
+ if (!hit || hit.index === undefined)
182
+ continue;
183
+ const phrase = hit[0];
184
+ const key = `${pattern.category}:${phrase.toLowerCase()}`;
185
+ if (seenLabels.has(key))
186
+ continue;
187
+ seenLabels.add(key);
188
+ matches.push({
189
+ category: pattern.category,
190
+ phrase,
191
+ excerpt: makeExcerpt(normalized, hit.index, phrase.length),
192
+ });
193
+ }
194
+ return { matched: matches.length > 0, matches };
195
+ }
@@ -3,7 +3,7 @@
3
3
  * Extracted from PRMonitor to isolate CI-related logic (#263).
4
4
  */
5
5
  import type { Octokit } from '@octokit/rest';
6
- import { CIFailureCategory, ClassifiedCheck, CIStatusResult } from './types.js';
6
+ import { CIFailureCategory, ClassifiedCheck, CIStatusResult, CIStatus, CIStatusCategorization } from './types.js';
7
7
  /**
8
8
  * Classify a failing CI check as actionable, fork_limitation, auth_gate, or infrastructure (#81, #145, #743).
9
9
  * Default is 'actionable' — only known patterns get reclassified.
@@ -16,6 +16,37 @@ export declare function classifyCICheck(name: string, description?: string, conc
16
16
  * Accepts optional conclusion data to detect infrastructure failures and auth gates.
17
17
  */
18
18
  export declare function classifyFailingChecks(failingCheckNames: string[], conclusions?: Map<string, string>): ClassifiedCheck[];
19
+ /**
20
+ * Map an aggregate `ciStatus + failingCheckNames + classifiedChecks` triple
21
+ * into one of five mutually exclusive overall states (#1272). The 5-row
22
+ * truth table previously lived as prose in `agents/pr-health-checker.md`;
23
+ * extracting it lets that agent (and any future consumer — dashboard,
24
+ * MCP, sibling agents that adopt the field) read a single typed value
25
+ * instead of re-deriving from `ciStatus + failingCheckNames + classifiedChecks`.
26
+ *
27
+ * Decision order (each branch is exclusive):
28
+ * 1. `passing` → `all_passing`
29
+ * 2. `pending` → `blocked` (awaiting trigger / completion)
30
+ * 3. `failing` + actionable → `failing` (real test/lint/build issue)
31
+ * 4. `failing` + only infrastructure → `blocked` (cancelled/timed-out runner — needs rerun)
32
+ * 5. `failing` + only fork/auth → `fork_limitation` (informational)
33
+ * 6. `failing` + zero classified → `failing` w/ "details unavailable" summary
34
+ * 7. `unknown` → `not_running`
35
+ *
36
+ * Why infrastructure routes to `blocked` and not `fork_limitation`:
37
+ * a cancelled or timed-out runner is genuinely worth re-running; calling
38
+ * it "informational" would tell the agent to ignore something the user
39
+ * can fix with a rerun-request.
40
+ *
41
+ * The `summary` is short (≤180 char even for 10+ failing checks) and
42
+ * suitable for inline display. `action` is a hint, not enforcement —
43
+ * agents may still escalate based on other PR context.
44
+ */
45
+ export declare function categorizeCIStatus(input: {
46
+ ciStatus: CIStatus;
47
+ failingCheckNames: string[];
48
+ classifiedChecks: ClassifiedCheck[];
49
+ }): CIStatusCategorization;
19
50
  /**
20
51
  * Analyze check runs (GitHub Actions, etc.) and categorize them.
21
52
  * Returns flags for failing/pending/success and lists of failing check names + conclusions.
@@ -92,6 +92,98 @@ export function classifyFailingChecks(failingCheckNames, conclusions) {
92
92
  };
93
93
  });
94
94
  }
95
+ /**
96
+ * Map an aggregate `ciStatus + failingCheckNames + classifiedChecks` triple
97
+ * into one of five mutually exclusive overall states (#1272). The 5-row
98
+ * truth table previously lived as prose in `agents/pr-health-checker.md`;
99
+ * extracting it lets that agent (and any future consumer — dashboard,
100
+ * MCP, sibling agents that adopt the field) read a single typed value
101
+ * instead of re-deriving from `ciStatus + failingCheckNames + classifiedChecks`.
102
+ *
103
+ * Decision order (each branch is exclusive):
104
+ * 1. `passing` → `all_passing`
105
+ * 2. `pending` → `blocked` (awaiting trigger / completion)
106
+ * 3. `failing` + actionable → `failing` (real test/lint/build issue)
107
+ * 4. `failing` + only infrastructure → `blocked` (cancelled/timed-out runner — needs rerun)
108
+ * 5. `failing` + only fork/auth → `fork_limitation` (informational)
109
+ * 6. `failing` + zero classified → `failing` w/ "details unavailable" summary
110
+ * 7. `unknown` → `not_running`
111
+ *
112
+ * Why infrastructure routes to `blocked` and not `fork_limitation`:
113
+ * a cancelled or timed-out runner is genuinely worth re-running; calling
114
+ * it "informational" would tell the agent to ignore something the user
115
+ * can fix with a rerun-request.
116
+ *
117
+ * The `summary` is short (≤180 char even for 10+ failing checks) and
118
+ * suitable for inline display. `action` is a hint, not enforcement —
119
+ * agents may still escalate based on other PR context.
120
+ */
121
+ export function categorizeCIStatus(input) {
122
+ const { ciStatus, failingCheckNames, classifiedChecks } = input;
123
+ if (ciStatus === 'passing') {
124
+ return { category: 'all_passing', summary: 'All checks passing', action: 'none' };
125
+ }
126
+ if (ciStatus === 'pending') {
127
+ // `mergeStatuses` currently sets `failingCheckNames: []` on pending,
128
+ // so the count branch is defensive — kept so a future caller that
129
+ // forwards pending names doesn't silently drop them.
130
+ const count = failingCheckNames.length;
131
+ const summary = count > 0 ? `${count} pending check(s); CI run incomplete` : 'CI checks pending';
132
+ return { category: 'blocked', summary, action: 'request_rerun' };
133
+ }
134
+ if (ciStatus === 'failing') {
135
+ const actionable = classifiedChecks.filter((c) => c.category === 'actionable');
136
+ if (actionable.length > 0) {
137
+ const preview = actionable
138
+ .slice(0, 3)
139
+ .map((c) => c.name)
140
+ .join(', ');
141
+ const more = actionable.length > 3 ? ` (+${actionable.length - 3} more)` : '';
142
+ return {
143
+ category: 'failing',
144
+ summary: `${actionable.length} actionable failure(s): ${preview}${more}`,
145
+ action: 'investigate',
146
+ };
147
+ }
148
+ // No actionable failures. Distinguish three sub-cases:
149
+ // (a) failing with zero classified checks: status came from the
150
+ // legacy combined-status endpoint without per-check detail. Be
151
+ // honest about the missing detail rather than asserting "fork
152
+ // limitations" the caller can't verify.
153
+ if (classifiedChecks.length === 0) {
154
+ return {
155
+ category: 'failing',
156
+ summary: 'CI reported failure but no check details available',
157
+ action: 'investigate',
158
+ };
159
+ }
160
+ // (b) at least one infrastructure failure (cancelled / timed-out /
161
+ // dependency-install). Re-running often fixes the issue, so
162
+ // surface as `blocked` with `request_rerun` rather than
163
+ // mislabeling as "fork limits / auth gates."
164
+ const hasInfrastructure = classifiedChecks.some((c) => c.category === 'infrastructure');
165
+ if (hasInfrastructure) {
166
+ const total = classifiedChecks.length;
167
+ return {
168
+ category: 'blocked',
169
+ summary: `${total} non-actionable failure(s) including infrastructure issues; rerun may resolve`,
170
+ action: 'request_rerun',
171
+ };
172
+ }
173
+ // (c) only fork-limitation / auth-gate failures — purely informational.
174
+ const total = classifiedChecks.length;
175
+ return {
176
+ category: 'fork_limitation',
177
+ summary: `${total} non-actionable failure(s) (fork limits / auth gates)`,
178
+ action: 'informational',
179
+ };
180
+ }
181
+ // ciStatus === 'unknown' — no checks reported, or status couldn't be
182
+ // determined. Treat both as not_running so callers don't have to
183
+ // distinguish the rare indeterminate case from the common "no CI
184
+ // configured" case.
185
+ return { category: 'not_running', summary: 'No CI checks reported', action: 'check_workflows' };
186
+ }
95
187
  /**
96
188
  * Analyze check runs (GitHub Actions, etc.) and categorize them.
97
189
  * Returns flags for failing/pending/success and lists of failing check names + conclusions.
@@ -85,6 +85,25 @@ export declare function errorMessage(e: unknown): string;
85
85
  export declare function getHttpStatusCode(error: unknown): number | undefined;
86
86
  /** Check if an error is a GitHub rate limit error (429 or rate-limit 403). */
87
87
  export declare function isRateLimitError(error: unknown): boolean;
88
+ /**
89
+ * Check if an error is GitHub's "users do not exist" Search-API validation
90
+ * failure (HTTP 422 with `resource: 'Search', code: 'invalid'` and a message
91
+ * indicating the user couldn't be resolved). Returned when the Search API
92
+ * can't resolve the user named in an `author:`/`user:` qualifier — the
93
+ * typical cause is a stale or mis-typed `githubUsername` in
94
+ * `~/.oss-autopilot/state.json`.
95
+ *
96
+ * Surfaced as a generic "Validation Failed" string by Octokit, which gives
97
+ * the user no actionable signal. Callers wrap the search and rethrow this
98
+ * as a {@link ConfigurationError} so the CLI prints the configured username
99
+ * and points at `/setup-oss`.
100
+ *
101
+ * The message-text gate is load-bearing: GitHub returns the same
102
+ * `resource`/`code` pair for other Search 422s (query too long, too many
103
+ * ORs). Without the gate, those would silently rewrite to "your configured
104
+ * username is wrong," which is actively misleading.
105
+ */
106
+ export declare function isInvalidUserSearchError(err: unknown): boolean;
88
107
  /** Return true for errors that should propagate (not degrade gracefully): rate limits, auth failures, abuse detection. */
89
108
  export declare function isRateLimitOrAuthError(err: unknown): boolean;
90
109
  /**
@@ -142,6 +142,60 @@ export function isRateLimitError(error) {
142
142
  }
143
143
  return false;
144
144
  }
145
+ /**
146
+ * Match-text used to discriminate the user-resolution failure from sibling
147
+ * `resource: 'Search', code: 'invalid'` 422s (query-too-long,
148
+ * too-many-OR-operators, malformed qualifier). Both the structured and the
149
+ * fallback paths gate on this pattern so the matcher's name remains accurate
150
+ * if a future caller uses a different Search query.
151
+ */
152
+ const USER_NOT_FOUND_SEARCH_MESSAGE = /users.*do not exist|cannot be searched/i;
153
+ /**
154
+ * Check if an error is GitHub's "users do not exist" Search-API validation
155
+ * failure (HTTP 422 with `resource: 'Search', code: 'invalid'` and a message
156
+ * indicating the user couldn't be resolved). Returned when the Search API
157
+ * can't resolve the user named in an `author:`/`user:` qualifier — the
158
+ * typical cause is a stale or mis-typed `githubUsername` in
159
+ * `~/.oss-autopilot/state.json`.
160
+ *
161
+ * Surfaced as a generic "Validation Failed" string by Octokit, which gives
162
+ * the user no actionable signal. Callers wrap the search and rethrow this
163
+ * as a {@link ConfigurationError} so the CLI prints the configured username
164
+ * and points at `/setup-oss`.
165
+ *
166
+ * The message-text gate is load-bearing: GitHub returns the same
167
+ * `resource`/`code` pair for other Search 422s (query too long, too many
168
+ * ORs). Without the gate, those would silently rewrite to "your configured
169
+ * username is wrong," which is actively misleading.
170
+ */
171
+ export function isInvalidUserSearchError(err) {
172
+ if (getHttpStatusCode(err) !== 422)
173
+ return false;
174
+ const data = err?.response?.data;
175
+ const errors = data && typeof data === 'object' ? data.errors : undefined;
176
+ if (Array.isArray(errors)) {
177
+ return errors.some((e) => {
178
+ if (!e || typeof e !== 'object')
179
+ return false;
180
+ const entry = e;
181
+ if (entry.resource !== 'Search' || entry.code !== 'invalid')
182
+ return false;
183
+ // The Search API includes a per-error `message` for this case. When
184
+ // present, gate on it to avoid matching sibling validation failures
185
+ // that share the resource/code pair. When absent, fall back to the
186
+ // top-level message check below — some serializations drop the
187
+ // per-entry message but keep it on the response.
188
+ if (typeof entry.message === 'string') {
189
+ return USER_NOT_FOUND_SEARCH_MESSAGE.test(entry.message);
190
+ }
191
+ return USER_NOT_FOUND_SEARCH_MESSAGE.test(errorMessage(err));
192
+ });
193
+ }
194
+ // Fallback for serialized errors that lost the structured `response.data`
195
+ // (e.g. messages re-thrown across boundaries). The Search API's own copy
196
+ // is stable enough to match against.
197
+ return USER_NOT_FOUND_SEARCH_MESSAGE.test(errorMessage(err));
198
+ }
145
199
  /** Return true for errors that should propagate (not degrade gracefully): rate limits, auth failures, abuse detection. */
146
200
  export function isRateLimitOrAuthError(err) {
147
201
  const status = getHttpStatusCode(err);
@@ -15,7 +15,7 @@
15
15
  import { FetchedPR, DailyDigest, ClosedPR, MergedPR, StarFilter } from './types.js';
16
16
  import { type PRCountsResult } from './github-stats.js';
17
17
  export { computeDisplayLabel } from './display-utils.js';
18
- export { classifyCICheck, classifyFailingChecks, getCIStatus } from './ci-analysis.js';
18
+ export { categorizeCIStatus, classifyCICheck, classifyFailingChecks, getCIStatus } from './ci-analysis.js';
19
19
  export { isConditionalChecklistItem } from './checklist-analysis.js';
20
20
  export { determineStatus } from './status-determination.js';
21
21
  /**