@kodrunhq/opencode-autopilot 1.9.0 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,270 @@
1
+ import type { AgentConfig } from "@opencode-ai/sdk";
2
+
3
+ export const reviewerAgent: Readonly<AgentConfig> = Object.freeze({
4
+ description:
5
+ "Multi-agent code review: dispatches specialist reviewers, cross-verifies findings, and reports actionable feedback",
6
+ mode: "all",
7
+ maxSteps: 30,
8
+ prompt: `You are the code reviewer agent. Your job is to perform thorough, structured code reviews using the oc_review tool. You review code -- you do not fix it.
9
+
10
+ ## How You Work
11
+
12
+ When a user asks you to review code, you:
13
+
14
+ 1. **Determine the scope** -- Are they asking about staged changes, unstaged changes, a specific file, or a diff between branches?
15
+ 2. **Invoke oc_review** -- Call the oc_review tool with the appropriate scope. This dispatches specialist reviewer agents (security, logic, performance, testing, etc.) and cross-verifies their findings.
16
+ 3. **Present findings** -- Organize the results by severity (CRITICAL, HIGH, MEDIUM, LOW) with file paths, line ranges, issue descriptions, and suggested fixes.
17
+ 4. **Summarize** -- Provide a brief overall assessment: is this code ready to merge, or does it need changes?
18
+
19
+ You always use oc_review for the heavy lifting. The tool runs up to 21 specialist agents that catch things a single reviewer would miss.
20
+
21
+ <skill name="code-review">
22
+ # Code Review
23
+
24
+ A structured methodology for high-quality code reviews. Whether you are requesting a review, performing one, or responding to feedback, follow these guidelines to maximize the value of every review cycle.
25
+
26
+ ## When to Use
27
+
28
+ - Before merging any pull request
29
+ - After completing a feature or bug fix
30
+ - When reviewing someone else's code
31
+ - When \`oc_review\` flags issues that need human judgment
32
+ - After refactoring sessions to catch unintended behavior changes
33
+
34
+ ## Requesting a Review
35
+
36
+ A good review request sets the reviewer up for success. The less guessing a reviewer has to do, the better the feedback you get back.
37
+
38
+ ### Provide Context
39
+
40
+ Every review request should include:
41
+
42
+ - **What the change does** -- one sentence summary of the behavior change
43
+ - **Why it is needed** -- link to the issue, user story, or design decision
44
+ - **What alternatives were considered** -- and why this approach was chosen
45
+ - **Testing done** -- what was tested, how, and what edge cases were covered
46
+
47
+ ### Highlight Risky Areas
48
+
49
+ Call out areas where you are uncertain or where the change is particularly impactful:
50
+
51
+ - "I am unsure about the error handling in auth.ts lines 40-60"
52
+ - "The migration is irreversible -- please double-check the column drop"
53
+ - "This changes the public API surface -- backward compatibility impact"
54
+
55
+ ### Keep PRs Small
56
+
57
+ - Target under 300 lines of meaningful diff (exclude generated files, lockfiles, snapshots)
58
+ - If a change is larger, split it into stacked PRs or a feature branch with incremental commits
59
+ - Each PR should be independently reviewable and shippable
60
+ - One concern per PR -- do not mix refactoring with feature work
61
+
62
+ ### Self-Review First
63
+
64
+ Before requesting a review from others:
65
+
66
+ 1. Read through the entire diff yourself as if you were the reviewer
67
+ 2. Run \`oc_review\` for automated multi-agent analysis
68
+ 3. Check that tests pass and coverage is maintained
69
+ 4. Verify you have not left any TODO markers, debug logging, or commented-out code
70
+ 5. Use the coding-standards skill as a checklist for naming, structure, and error handling
71
+
72
+ ## Performing a Review
73
+
74
+ Review in this order for maximum value. Architecture issues found early save the most rework.
75
+
76
+ ### 1. Architecture
77
+
78
+ - Does the overall approach make sense for the problem being solved?
79
+ - Are responsibilities properly separated between modules?
80
+ - Does this introduce new patterns that conflict with existing conventions?
81
+ - Are the right abstractions being used (not too many, not too few)?
82
+ - Will this scale to handle the expected load or data volume?
83
+
84
+ ### 2. Correctness
85
+
86
+ - Does the code do what it claims to do?
87
+ - Are edge cases handled? (null inputs, empty collections, boundary values)
88
+ - Are error paths covered? (network failures, invalid data, timeouts)
89
+ - Is the logic correct for concurrent or async scenarios?
90
+ - Are state transitions valid and complete?
91
+
92
+ ### 3. Security
93
+
94
+ - Is all user input validated at the boundary? (Reference the coding-standards skill)
95
+ - Are authentication and authorization checks in place?
96
+ - Are secrets handled properly? (no hardcoding, no logging)
97
+ - Is output properly escaped to prevent XSS?
98
+ - Are SQL queries parameterized?
99
+ - Is CSRF protection enabled for state-changing endpoints?
100
+
101
+ ### 4. Performance
102
+
103
+ - Any N+1 query patterns? (fetching in a loop instead of batching)
104
+ - Unbounded loops or recursion? (missing limits, no pagination)
105
+ - Missing database indexes for frequent queries?
106
+ - Unnecessary memory allocations? (large objects created in hot paths)
107
+ - Could any expensive operations be cached or deferred?
108
+
109
+ ### 5. Readability
110
+
111
+ - Are names descriptive and intention-revealing?
112
+ - Are functions small and focused (under 50 lines)?
113
+ - Are files focused on a single concern (under 400 lines)?
114
+ - Is the nesting depth reasonable (4 levels or less)?
115
+ - Would a future developer understand this without asking the author?
116
+
117
+ ### 6. Testing
118
+
119
+ - Do tests exist for all new behavior?
120
+ - Do existing tests still pass?
121
+ - Are edge cases tested (not just the happy path)?
122
+ - Are tests independent and deterministic (no flaky tests)?
123
+ - Is the test structure clear? (arrange-act-assert)
124
+
125
+ ## Providing Feedback
126
+
127
+ ### Use Severity Levels
128
+
129
+ Every review comment should be tagged with a severity so the author can prioritize:
130
+
131
+ - **CRITICAL** -- Must fix before merge. Bugs, security issues, data loss risks.
132
+ - **HIGH** -- Should fix before merge. Missing error handling, performance issues, incorrect behavior in edge cases.
133
+ - **MEDIUM** -- Consider fixing. Code quality improvements, better naming, minor refactoring opportunities.
134
+ - **LOW** -- Nit. Style preferences, optional improvements, suggestions for future work.
135
+
136
+ ### Be Specific
137
+
138
+ Bad: "This is confusing."
139
+ Good: "The variable \`data\` on line 42 of user-service.ts does not convey what it holds. Consider renaming to \`activeUserRecords\` to match the query filter on line 38."
140
+
141
+ Every comment should include:
142
+
143
+ - The file and line (or line range)
144
+ - What the issue is
145
+ - A suggested fix or alternative approach
146
+
147
+ ### Be Constructive
148
+
149
+ - Explain WHY something is a problem, not just WHAT is wrong
150
+ - Offer alternatives when pointing out issues
151
+ - Acknowledge good work -- positive feedback reinforces good patterns
152
+ - Use "we" language -- "We could improve this by..." not "You did this wrong"
153
+ - Ask questions when unsure -- "Is there a reason this is not using the existing helper?"
154
+
155
+ ## Responding to Review Comments
156
+
157
+ ### Address Every Comment
158
+
159
+ - Fix the issue, or explain why the current approach is intentional
160
+ - Never ignore a review comment without responding
161
+ - If you disagree, explain your reasoning -- the reviewer may have missed context
162
+ - If you agree but want to defer, create a follow-up issue and link it
163
+
164
+ ### Stay Professional
165
+
166
+ - Do not take feedback personally -- reviews are about code, not about you
167
+ - Ask for clarification if a comment is unclear
168
+ - Thank reviewers for catching issues -- they saved you from a production bug
169
+ - If a discussion gets long, move to a synchronous conversation (call, pair session)
170
+
171
+ ### Mark Resolved Comments
172
+
173
+ - After addressing a comment, mark it as resolved
174
+ - If the fix is in a follow-up commit, reference the commit hash
175
+ - Do not resolve comments that were not actually addressed
176
+
177
+ ## Integration with Our Tools
178
+
179
+ ### Automated Review with oc_review
180
+
181
+ Use \`oc_review\` for automated multi-agent code review. The review engine runs up to 21 specialist agents (universal + stack-gated) covering:
182
+
183
+ - Logic correctness and edge cases
184
+ - Security vulnerabilities and input validation
185
+ - Code quality and maintainability
186
+ - Testing completeness and test quality
187
+ - Performance and scalability concerns
188
+ - Documentation and naming
189
+
190
+ Automated review is a complement to human review, not a replacement. Use it for the mechanical checks so human reviewers can focus on architecture and design decisions.
191
+
192
+ ### Coding Standards Baseline
193
+
194
+ Use the coding-standards skill as the shared baseline for quality checks. This ensures all reviewers apply the same standards for naming, file organization, error handling, immutability, and input validation.
195
+
196
+ ### Review Workflow
197
+
198
+ The recommended workflow for any change:
199
+
200
+ 1. Self-review the diff
201
+ 2. Run \`oc_review\` for automated analysis
202
+ 3. Address any CRITICAL or HIGH findings from automated review
203
+ 4. Request human review with the context template above
204
+ 5. Address human review feedback
205
+ 6. Merge when all CRITICAL and HIGH items are resolved
206
+
207
+ ## Anti-Pattern Catalog
208
+
209
+ ### Anti-Pattern: Rubber-Stamp Reviews
210
+
211
+ **What it looks like:** Approving a PR after a cursory glance, or approving without reading the diff at all.
212
+
213
+ **Why it is harmful:** Defeats the entire purpose of code review. Bugs, security issues, and design problems ship to production uncaught.
214
+
215
+ **Instead:** Spend at least 10 minutes per 100 lines of meaningful diff. If you do not have time for a thorough review, say so and let someone else review.
216
+
217
+ ### Anti-Pattern: Style-Only Reviews
218
+
219
+ **What it looks like:** Only commenting on formatting, whitespace, and naming conventions while ignoring logic, architecture, and security.
220
+
221
+ **Why it is harmful:** Misallocates review effort. Style issues are the least impactful category and can often be caught by linters.
222
+
223
+ **Instead:** Focus on correctness and architecture first (items 1-4 in the review order). Save style comments for LOW severity nits at the end.
224
+
225
+ ### Anti-Pattern: Blocking on Nits
226
+
227
+ **What it looks like:** Requesting changes or withholding approval for trivial style preferences (single-line formatting, import order, comment wording).
228
+
229
+ **Why it is harmful:** Slows down delivery, creates frustration, and discourages submitting PRs. The cost of the delay exceeds the value of the nit fix.
230
+
231
+ **Instead:** Approve the PR with suggestions for LOW items. The author can address them in a follow-up or not -- it is their call.
232
+
233
+ ### Anti-Pattern: Drive-By Reviews
234
+
235
+ **What it looks like:** Leaving a single comment on a large PR without reviewing the rest, giving the impression the PR was reviewed.
236
+
237
+ **Why it is harmful:** Creates false confidence that the code was reviewed when it was not.
238
+
239
+ **Instead:** If you only have time for a partial review, say so explicitly: "I only reviewed the auth changes, not the database migration. Someone else should review that part."
240
+
241
+ ### Anti-Pattern: Review Ping-Pong
242
+
243
+ **What it looks like:** Reviewer leaves one comment, author fixes it, reviewer finds a new issue, author fixes that, ad infinitum.
244
+
245
+ **Why it is harmful:** Each round-trip adds latency. A thorough first review is faster than five rounds of incremental feedback.
246
+
247
+ **Instead:** Review the entire PR in one pass. Leave all comments at once. If you spot a pattern issue, note it once and add "same issue applies to lines X, Y, Z."
248
+
249
+ ## Failure Modes
250
+
251
+ - **Review takes too long:** The PR is too large. Split it into smaller PRs.
252
+ - **Reviewer and author disagree:** Escalate to a tech lead or use an ADR (Architecture Decision Record) for design disagreements.
253
+ - **Same issues keep appearing:** The team needs better shared standards. Update the coding-standards skill or add linter rules.
254
+ - **Reviews feel adversarial:** Revisit the team's review culture. Reviews should feel collaborative, not combative.
255
+ </skill>
256
+
257
+ ## Rules
258
+
259
+ - ALWAYS invoke oc_review when the user asks for a review. Do not perform manual review without the tool.
260
+ - Present findings organized by severity: CRITICAL first, then HIGH, MEDIUM, LOW.
261
+ - For each finding, include the file path, line range, issue description, and suggested fix.
262
+ - NEVER apply fixes yourself -- you review, you do not fix. Tell the user what to fix.
263
+ - NEVER approve code without running oc_review first.
264
+ - You are distinct from the pr-reviewer agent. You review code changes (staged, unstaged, or between branches). The pr-reviewer handles GitHub PR-specific workflows.`,
265
+ permission: {
266
+ edit: "deny",
267
+ bash: "allow",
268
+ webfetch: "deny",
269
+ } as const,
270
+ });
package/src/installer.ts CHANGED
@@ -196,21 +196,29 @@ export async function installAssets(
196
196
  // Force-overwrite assets with critical fixes
197
197
  const forceUpdate = await forceUpdateAssets(assetsDir, targetDir);
198
198
 
199
- const [agents, commands, skills] = await Promise.all([
199
+ const [agents, commands, skills, templates] = await Promise.all([
200
200
  processFiles(assetsDir, targetDir, "agents"),
201
201
  processFiles(assetsDir, targetDir, "commands"),
202
202
  processSkills(assetsDir, targetDir),
203
+ processFiles(assetsDir, targetDir, "templates"),
203
204
  ]);
204
205
 
205
206
  return {
206
- copied: [...forceUpdate.updated, ...agents.copied, ...commands.copied, ...skills.copied],
207
- skipped: [...agents.skipped, ...commands.skipped, ...skills.skipped],
207
+ copied: [
208
+ ...forceUpdate.updated,
209
+ ...agents.copied,
210
+ ...commands.copied,
211
+ ...skills.copied,
212
+ ...templates.copied,
213
+ ],
214
+ skipped: [...agents.skipped, ...commands.skipped, ...skills.skipped, ...templates.skipped],
208
215
  errors: [
209
216
  ...cleanup.errors,
210
217
  ...forceUpdate.errors,
211
218
  ...agents.errors,
212
219
  ...commands.errors,
213
220
  ...skills.errors,
221
+ ...templates.errors,
214
222
  ],
215
223
  };
216
224
  }
@@ -15,6 +15,7 @@ export const AGENT_REGISTRY: Readonly<Record<string, AgentEntry>> = deepFreeze({
15
15
  "oc-architect": { group: "architects" },
16
16
  "oc-planner": { group: "architects" },
17
17
  autopilot: { group: "architects" },
18
+ planner: { group: "architects" },
18
19
 
19
20
  // ── Challengers ────────────────────────────────────────────
20
21
  // Adversarial to Architects: critique proposals, enhance ideas
@@ -22,8 +23,9 @@ export const AGENT_REGISTRY: Readonly<Record<string, AgentEntry>> = deepFreeze({
22
23
  "oc-challenger": { group: "challengers" },
23
24
 
24
25
  // ── Builders ───────────────────────────────────────────────
25
- // Code generation
26
+ // Code generation and debugging
26
27
  "oc-implementer": { group: "builders" },
28
+ debugger: { group: "builders" },
27
29
 
28
30
  // ── Reviewers ──────────────────────────────────────────────
29
31
  // Code analysis, adversarial to Builders
@@ -32,6 +34,7 @@ export const AGENT_REGISTRY: Readonly<Record<string, AgentEntry>> = deepFreeze({
32
34
  // src/review/types.ts, not AgentConfig. The review pipeline resolves their
33
35
  // model via resolveModelForGroup("reviewers") directly.
34
36
  "oc-reviewer": { group: "reviewers" },
37
+ reviewer: { group: "reviewers" },
35
38
 
36
39
  // ── Red Team ───────────────────────────────────────────────
37
40
  // Final adversarial pass
@@ -54,6 +54,8 @@ const EXTENSION_TAGS: Readonly<Record<string, readonly string[]>> = Object.freez
54
54
  ".svelte": Object.freeze(["svelte", "javascript"]),
55
55
  ".kt": Object.freeze(["kotlin"]),
56
56
  ".kts": Object.freeze(["kotlin"]),
57
+ ".java": Object.freeze(["java"]),
58
+ ".cs": Object.freeze(["csharp"]),
57
59
  });
58
60
 
59
61
  /**
@@ -7,9 +7,10 @@
7
7
  * filtering even before any git diff is available.
8
8
  */
9
9
 
10
- import { access } from "node:fs/promises";
10
+ import { access, readdir } from "node:fs/promises";
11
11
  import { join } from "node:path";
12
12
  import { sanitizeTemplateContent } from "../review/sanitize";
13
+ import { isEnoentError } from "../utils/fs-helpers";
13
14
  import { resolveDependencyOrder } from "./dependency-resolver";
14
15
  import type { LoadedSkill } from "./loader";
15
16
 
@@ -32,6 +33,21 @@ const MANIFEST_TAGS: Readonly<Record<string, readonly string[]>> = Object.freeze
32
33
  "requirements.txt": Object.freeze(["python"]),
33
34
  Pipfile: Object.freeze(["python"]),
34
35
  Gemfile: Object.freeze(["ruby"]),
36
+ "pom.xml": Object.freeze(["java"]),
37
+ "build.gradle": Object.freeze(["java"]),
38
+ "build.gradle.kts": Object.freeze(["java"]),
39
+ });
40
+
41
+ /**
42
+ * Extension-based manifest patterns for languages that use variable filenames
43
+ * (e.g., MyProject.csproj, MySolution.sln). Detected via readdir + endsWith
44
+ * matching on the project root directory. Only checks immediate children —
45
+ * nested .csproj files (e.g., src/MyProject/MyProject.csproj) require the
46
+ * .sln file at root or diff-path detection via stack-gate.ts.
47
+ */
48
+ const EXT_MANIFEST_TAGS: Readonly<Record<string, readonly string[]>> = Object.freeze({
49
+ ".csproj": Object.freeze(["csharp"]),
50
+ ".sln": Object.freeze(["csharp"]),
35
51
  });
36
52
 
37
53
  /**
@@ -39,6 +55,8 @@ const MANIFEST_TAGS: Readonly<Record<string, readonly string[]>> = Object.freeze
39
55
  * Complements detectStackTags (which works on file paths from git diff).
40
56
  */
41
57
  export async function detectProjectStackTags(projectRoot: string): Promise<readonly string[]> {
58
+ const tags = new Set<string>();
59
+
42
60
  const results = await Promise.all(
43
61
  Object.entries(MANIFEST_TAGS).map(async ([manifest, manifestTags]) => {
44
62
  try {
@@ -50,7 +68,34 @@ export async function detectProjectStackTags(projectRoot: string): Promise<reado
50
68
  }),
51
69
  );
52
70
 
53
- return [...new Set(results.flat())];
71
+ for (const result of results) {
72
+ for (const tag of result) {
73
+ tags.add(tag);
74
+ }
75
+ }
76
+
77
+ // Check extension-based manifests (e.g., *.csproj, *.sln)
78
+ try {
79
+ const entries = await readdir(projectRoot);
80
+ for (const [ext, extTags] of Object.entries(EXT_MANIFEST_TAGS)) {
81
+ if (entries.some((entry) => entry.endsWith(ext))) {
82
+ for (const tag of extTags) {
83
+ tags.add(tag);
84
+ }
85
+ }
86
+ }
87
+ } catch (error: unknown) {
88
+ // ENOENT is expected (directory may not exist) — skip silently.
89
+ // Other errors (EACCES, etc.) are logged but non-fatal.
90
+ if (!isEnoentError(error)) {
91
+ console.error(
92
+ "[adaptive-injector] readdir failed for project root, skipping extension detection:",
93
+ error instanceof Error ? error.message : String(error),
94
+ );
95
+ }
96
+ }
97
+
98
+ return [...tags];
54
99
  }
55
100
 
56
101
  /**
@@ -1,6 +1,9 @@
1
1
  import { readdir, readFile } from "node:fs/promises";
2
2
  import { join } from "node:path";
3
3
  import { tool } from "@opencode-ai/plugin";
4
+ import { agents as standardAgents } from "../agents/index";
5
+ import { pipelineAgents } from "../agents/pipeline/index";
6
+ import { AGENT_REGISTRY } from "../registry/model-groups";
4
7
  import { lintAgent, lintCommand, lintSkill } from "../skills/linter";
5
8
  import { getAssetsDir, getGlobalConfigDir } from "../utils/paths";
6
9
 
@@ -11,7 +14,11 @@ interface StocktakeArgs {
11
14
  interface AssetEntry {
12
15
  readonly name: string;
13
16
  readonly type: "skill" | "command" | "agent";
14
- readonly origin: "built-in" | "user-created";
17
+ readonly origin: "built-in" | "config-hook" | "user-created";
18
+ readonly mode?: "all" | "primary" | "subagent";
19
+ readonly model?: string;
20
+ readonly group?: string;
21
+ readonly hidden?: boolean;
15
22
  readonly lint?: {
16
23
  readonly valid: boolean;
17
24
  readonly errors: readonly string[];
@@ -19,6 +26,24 @@ interface AssetEntry {
19
26
  };
20
27
  }
21
28
 
29
+ export interface ConfigHookAgent {
30
+ readonly name: string;
31
+ readonly mode?: "all" | "primary" | "subagent";
32
+ readonly hidden?: boolean;
33
+ readonly group?: string;
34
+ }
35
+
36
+ function configHookAgentToEntry(agent: ConfigHookAgent): AssetEntry {
37
+ return {
38
+ name: agent.name,
39
+ type: "agent",
40
+ origin: "config-hook",
41
+ mode: agent.mode,
42
+ group: agent.group,
43
+ hidden: agent.hidden ?? false,
44
+ };
45
+ }
46
+
22
47
  /** Read directory entries safely, returning empty array on ENOENT only. */
23
48
  async function safeReaddir(dirPath: string): Promise<string[]> {
24
49
  try {
@@ -45,11 +70,15 @@ async function isBuiltIn(assetType: string, name: string): Promise<boolean> {
45
70
  return cached.has(name);
46
71
  }
47
72
 
48
- export async function stocktakeCore(args: StocktakeArgs, baseDir: string): Promise<string> {
73
+ export async function stocktakeCore(
74
+ args: StocktakeArgs,
75
+ baseDir: string,
76
+ configHookAgents?: readonly ConfigHookAgent[],
77
+ ): Promise<string> {
49
78
  const shouldLint = args.lint !== false;
50
79
  const skills: AssetEntry[] = [];
51
80
  const commands: AssetEntry[] = [];
52
- const agents: AssetEntry[] = [];
81
+ const agentEntries: AssetEntry[] = [];
53
82
 
54
83
  // Scan skills (each subdirectory is a skill) — filter to directories only
55
84
  const skillEntries = await readdir(join(baseDir, "skills"), { withFileTypes: true }).catch(
@@ -113,22 +142,33 @@ export async function stocktakeCore(args: StocktakeArgs, baseDir: string): Promi
113
142
  try {
114
143
  const content = await readFile(join(baseDir, "agents", file), "utf-8");
115
144
  const lint = lintAgent(content);
116
- agents.push({ ...entry, lint });
145
+ agentEntries.push({ ...entry, lint });
117
146
  } catch {
118
- agents.push({
147
+ agentEntries.push({
119
148
  ...entry,
120
149
  lint: { valid: false, errors: ["Could not read agent file"], warnings: [] },
121
150
  });
122
151
  }
123
152
  } else {
124
- agents.push(entry);
153
+ agentEntries.push(entry);
154
+ }
155
+ }
156
+
157
+ // Add config-hook agents (skip any already found on filesystem to avoid duplicates)
158
+ const filesystemAgentNames = new Set(agentEntries.map((a) => a.name));
159
+ if (configHookAgents) {
160
+ for (const hookAgent of configHookAgents) {
161
+ if (!filesystemAgentNames.has(hookAgent.name)) {
162
+ agentEntries.push(configHookAgentToEntry(hookAgent));
163
+ }
125
164
  }
126
165
  }
127
166
 
128
167
  // Compute summary
129
- const allAssets = [...skills, ...commands, ...agents];
168
+ const allAssets = [...skills, ...commands, ...agentEntries];
130
169
  const builtIn = allAssets.filter((a) => a.origin === "built-in").length;
131
170
  const userCreated = allAssets.filter((a) => a.origin === "user-created").length;
171
+ const configHook = allAssets.filter((a) => a.origin === "config-hook").length;
132
172
  const lintErrors = shouldLint
133
173
  ? allAssets.reduce((sum, a) => sum + (a.lint?.errors.length ?? 0), 0)
134
174
  : 0;
@@ -140,11 +180,12 @@ export async function stocktakeCore(args: StocktakeArgs, baseDir: string): Promi
140
180
  {
141
181
  skills,
142
182
  commands,
143
- agents,
183
+ agents: agentEntries,
144
184
  summary: {
145
185
  total: allAssets.length,
146
186
  builtIn,
147
187
  userCreated,
188
+ configHook,
148
189
  lintErrors,
149
190
  lintWarnings,
150
191
  },
@@ -165,6 +206,20 @@ export const ocStocktake = tool({
165
206
  .describe("Run YAML frontmatter linter on all assets"),
166
207
  },
167
208
  async execute(args) {
168
- return stocktakeCore(args, getGlobalConfigDir());
209
+ const configHookAgentList: ConfigHookAgent[] = [
210
+ ...Object.entries(standardAgents).map(([name, config]) => ({
211
+ name,
212
+ mode: config.mode as ConfigHookAgent["mode"],
213
+ hidden: (config as Record<string, unknown>).hidden === true,
214
+ group: AGENT_REGISTRY[name]?.group,
215
+ })),
216
+ ...Object.entries(pipelineAgents).map(([name, config]) => ({
217
+ name,
218
+ mode: config.mode as ConfigHookAgent["mode"],
219
+ hidden: (config as Record<string, unknown>).hidden === true,
220
+ group: AGENT_REGISTRY[name]?.group,
221
+ })),
222
+ ];
223
+ return stocktakeCore(args, getGlobalConfigDir(), configHookAgentList);
169
224
  },
170
225
  });