oco-claude-plugin 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +189 -0
- package/README.md +65 -0
- package/cli.mjs +413 -0
- package/package.json +28 -0
- package/plugin/agents/codebase-investigator.md +63 -0
- package/plugin/agents/patch-verifier.md +68 -0
- package/plugin/agents/refactor-reviewer.md +81 -0
- package/plugin/hooks/lib/utils.mjs +109 -0
- package/plugin/hooks/post-tool-use.mjs +88 -0
- package/plugin/hooks/pre-tool-use.mjs +100 -0
- package/plugin/hooks/stop.mjs +98 -0
- package/plugin/hooks/user-prompt-submit.cjs +61 -0
- package/plugin/mcp/bridge.js +434 -0
- package/plugin/settings-fragment.json +64 -0
- package/plugin/skills/oco-inspect-repo-area/SKILL.md +61 -0
- package/plugin/skills/oco-investigate-bug/SKILL.md +71 -0
- package/plugin/skills/oco-safe-refactor/SKILL.md +81 -0
- package/plugin/skills/oco-trace-stack/SKILL.md +64 -0
- package/plugin/skills/oco-verify-fix/SKILL.md +82 -0
|
@@ -0,0 +1,434 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* OCO MCP Bridge Server
|
|
4
|
+
*
|
|
5
|
+
* Minimal MCP server that bridges Claude Code to the local OCO runtime.
|
|
6
|
+
* Exposes only composite, high-value tools.
|
|
7
|
+
*
|
|
8
|
+
* Transport: stdio (Claude Code spawns this process)
|
|
9
|
+
* Backend: calls local `oco` CLI binary
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
const { spawn } = require("child_process");
|
|
13
|
+
const readline = require("readline");
|
|
14
|
+
|
|
15
|
+
const OCO_BIN = process.env.OCO_BIN || "oco";
|
|
16
|
+
const WORKSPACE = process.env.OCO_WORKSPACE || process.cwd();
|
|
17
|
+
|
|
18
|
+
// --- MCP Protocol Handler ---
|
|
19
|
+
|
|
20
|
+
const rl = readline.createInterface({ input: process.stdin });
|
|
21
|
+
let buffer = "";
|
|
22
|
+
|
|
23
|
+
rl.on("line", (line) => {
|
|
24
|
+
try {
|
|
25
|
+
const request = JSON.parse(line);
|
|
26
|
+
handleRequest(request).then((response) => {
|
|
27
|
+
process.stdout.write(JSON.stringify(response) + "\n");
|
|
28
|
+
});
|
|
29
|
+
} catch {
|
|
30
|
+
// Ignore malformed lines
|
|
31
|
+
}
|
|
32
|
+
});
|
|
33
|
+
|
|
34
|
+
async function handleRequest(request) {
|
|
35
|
+
const { id, method, params } = request;
|
|
36
|
+
|
|
37
|
+
switch (method) {
|
|
38
|
+
case "initialize":
|
|
39
|
+
return success(id, {
|
|
40
|
+
protocolVersion: "2024-11-05",
|
|
41
|
+
serverInfo: { name: "oco-bridge", version: "0.1.0" },
|
|
42
|
+
capabilities: {
|
|
43
|
+
tools: { listChanged: false },
|
|
44
|
+
},
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
case "tools/list":
|
|
48
|
+
return success(id, { tools: TOOLS });
|
|
49
|
+
|
|
50
|
+
case "tools/call":
|
|
51
|
+
return handleToolCall(id, params.name, params.arguments || {});
|
|
52
|
+
|
|
53
|
+
default:
|
|
54
|
+
return error(id, -32601, `Method not found: ${method}`);
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// --- Tool Definitions ---
|
|
59
|
+
|
|
60
|
+
const TOOLS = [
|
|
61
|
+
{
|
|
62
|
+
name: "oco.search_codebase",
|
|
63
|
+
description:
|
|
64
|
+
"Composite codebase search: lexical + structural ranking with symbol-aware narrowing. Returns compact ranked results.",
|
|
65
|
+
inputSchema: {
|
|
66
|
+
type: "object",
|
|
67
|
+
properties: {
|
|
68
|
+
query: {
|
|
69
|
+
type: "string",
|
|
70
|
+
description: "Search query (natural language or symbol name)",
|
|
71
|
+
},
|
|
72
|
+
workspace: {
|
|
73
|
+
type: "string",
|
|
74
|
+
description: "Workspace root path (defaults to cwd)",
|
|
75
|
+
},
|
|
76
|
+
limit: {
|
|
77
|
+
type: "integer",
|
|
78
|
+
description: "Max results (default: 10)",
|
|
79
|
+
default: 10,
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
required: ["query"],
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
name: "oco.trace_error",
|
|
87
|
+
description:
|
|
88
|
+
"Composite error analysis: maps stack trace to codebase, identifies likely root cause regions, suggests next verification step.",
|
|
89
|
+
inputSchema: {
|
|
90
|
+
type: "object",
|
|
91
|
+
properties: {
|
|
92
|
+
stacktrace: {
|
|
93
|
+
type: "string",
|
|
94
|
+
description: "The stack trace or error output to analyze",
|
|
95
|
+
},
|
|
96
|
+
workspace: {
|
|
97
|
+
type: "string",
|
|
98
|
+
description: "Workspace root path",
|
|
99
|
+
},
|
|
100
|
+
},
|
|
101
|
+
required: ["stacktrace"],
|
|
102
|
+
},
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
name: "oco.verify_patch",
|
|
106
|
+
description:
|
|
107
|
+
"Composite verification: detects project type, runs build/test/lint/typecheck, returns structured verdict.",
|
|
108
|
+
inputSchema: {
|
|
109
|
+
type: "object",
|
|
110
|
+
properties: {
|
|
111
|
+
workspace: {
|
|
112
|
+
type: "string",
|
|
113
|
+
description: "Workspace root path",
|
|
114
|
+
},
|
|
115
|
+
checks: {
|
|
116
|
+
type: "array",
|
|
117
|
+
items: { type: "string" },
|
|
118
|
+
description:
|
|
119
|
+
"Specific checks to run (build, test, lint, typecheck). Defaults to all available.",
|
|
120
|
+
},
|
|
121
|
+
},
|
|
122
|
+
},
|
|
123
|
+
},
|
|
124
|
+
{
|
|
125
|
+
name: "oco.collect_findings",
|
|
126
|
+
description:
|
|
127
|
+
"Composite state extraction: current evidence, open questions, unresolved risks, suggested next action from the OCO session.",
|
|
128
|
+
inputSchema: {
|
|
129
|
+
type: "object",
|
|
130
|
+
properties: {
|
|
131
|
+
session_id: {
|
|
132
|
+
type: "string",
|
|
133
|
+
description: "OCO session ID (optional, uses latest if omitted)",
|
|
134
|
+
},
|
|
135
|
+
},
|
|
136
|
+
},
|
|
137
|
+
},
|
|
138
|
+
];
|
|
139
|
+
|
|
140
|
+
// --- Tool Handlers ---
|
|
141
|
+
|
|
142
|
+
async function handleToolCall(id, toolName, args) {
|
|
143
|
+
try {
|
|
144
|
+
switch (toolName) {
|
|
145
|
+
case "oco.search_codebase":
|
|
146
|
+
return await searchCodebase(id, args);
|
|
147
|
+
case "oco.trace_error":
|
|
148
|
+
return await traceError(id, args);
|
|
149
|
+
case "oco.verify_patch":
|
|
150
|
+
return await verifyPatch(id, args);
|
|
151
|
+
case "oco.collect_findings":
|
|
152
|
+
return await collectFindings(id, args);
|
|
153
|
+
default:
|
|
154
|
+
return error(id, -32601, `Unknown tool: ${toolName}`);
|
|
155
|
+
}
|
|
156
|
+
} catch (e) {
|
|
157
|
+
return success(id, {
|
|
158
|
+
content: [{ type: "text", text: `Error: ${e.message}` }],
|
|
159
|
+
isError: true,
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
async function searchCodebase(id, args) {
|
|
165
|
+
const workspace = args.workspace || WORKSPACE;
|
|
166
|
+
const limit = args.limit || 10;
|
|
167
|
+
|
|
168
|
+
const result = await runOco([
|
|
169
|
+
"search",
|
|
170
|
+
args.query,
|
|
171
|
+
"--workspace",
|
|
172
|
+
workspace,
|
|
173
|
+
"--limit",
|
|
174
|
+
String(limit),
|
|
175
|
+
"--format",
|
|
176
|
+
"json",
|
|
177
|
+
]);
|
|
178
|
+
|
|
179
|
+
if (result.error) {
|
|
180
|
+
// Graceful degradation: return empty results
|
|
181
|
+
return success(id, {
|
|
182
|
+
content: [
|
|
183
|
+
{
|
|
184
|
+
type: "text",
|
|
185
|
+
text: JSON.stringify({ results: [], note: "OCO backend unavailable, use standard search tools" }),
|
|
186
|
+
},
|
|
187
|
+
],
|
|
188
|
+
});
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
return success(id, {
|
|
192
|
+
content: [{ type: "text", text: result.stdout }],
|
|
193
|
+
});
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
async function traceError(id, args) {
|
|
197
|
+
const workspace = args.workspace || WORKSPACE;
|
|
198
|
+
|
|
199
|
+
// Parse stack trace to extract file paths and line numbers
|
|
200
|
+
const frames = parseStackTrace(args.stacktrace);
|
|
201
|
+
|
|
202
|
+
if (frames.length === 0) {
|
|
203
|
+
return success(id, {
|
|
204
|
+
content: [
|
|
205
|
+
{
|
|
206
|
+
type: "text",
|
|
207
|
+
text: JSON.stringify({
|
|
208
|
+
frames: [],
|
|
209
|
+
note: "Could not parse stack trace. Provide the raw error output.",
|
|
210
|
+
}),
|
|
211
|
+
},
|
|
212
|
+
],
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// Search for each unique file in the stack trace
|
|
217
|
+
const fileSet = [...new Set(frames.map((f) => f.file))];
|
|
218
|
+
const results = [];
|
|
219
|
+
|
|
220
|
+
for (const file of fileSet.slice(0, 5)) {
|
|
221
|
+
const search = await runOco([
|
|
222
|
+
"search",
|
|
223
|
+
file,
|
|
224
|
+
"--workspace",
|
|
225
|
+
workspace,
|
|
226
|
+
"--limit",
|
|
227
|
+
"3",
|
|
228
|
+
"--format",
|
|
229
|
+
"json",
|
|
230
|
+
]);
|
|
231
|
+
if (!search.error && search.stdout) {
|
|
232
|
+
try {
|
|
233
|
+
const parsed = JSON.parse(search.stdout);
|
|
234
|
+
results.push({ file, matches: parsed });
|
|
235
|
+
} catch {
|
|
236
|
+
// skip
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
return success(id, {
|
|
242
|
+
content: [
|
|
243
|
+
{
|
|
244
|
+
type: "text",
|
|
245
|
+
text: JSON.stringify({
|
|
246
|
+
parsed_frames: frames,
|
|
247
|
+
codebase_matches: results,
|
|
248
|
+
suggestion: "Inspect the deepest application frame first. Check for null access, type errors, or missing validation.",
|
|
249
|
+
}),
|
|
250
|
+
},
|
|
251
|
+
],
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
async function verifyPatch(id, args) {
|
|
256
|
+
const workspace = args.workspace || WORKSPACE;
|
|
257
|
+
const checks = args.checks || ["build", "test", "lint", "typecheck"];
|
|
258
|
+
|
|
259
|
+
const verdicts = {};
|
|
260
|
+
|
|
261
|
+
for (const check of checks) {
|
|
262
|
+
const cmd = getCheckCommand(workspace, check);
|
|
263
|
+
if (!cmd) {
|
|
264
|
+
verdicts[check] = { status: "skip", reason: "not available" };
|
|
265
|
+
continue;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
const result = await runShell(cmd.command, cmd.args, { cwd: workspace });
|
|
269
|
+
const passed = result.exitCode === 0;
|
|
270
|
+
verdicts[check] = {
|
|
271
|
+
status: passed ? "pass" : "fail",
|
|
272
|
+
// Only include output on failure to avoid leaking noisy stderr warnings
|
|
273
|
+
...(passed ? {} : { output: truncate((result.stderr + "\n" + result.stdout).trim(), 500) }),
|
|
274
|
+
};
|
|
275
|
+
|
|
276
|
+
// Stop on first failure
|
|
277
|
+
if (result.exitCode !== 0) {
|
|
278
|
+
break;
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
const entries = Object.values(verdicts);
|
|
283
|
+
const allSkipped = entries.every((v) => v.status === "skip");
|
|
284
|
+
const hasFail = entries.some((v) => v.status === "fail");
|
|
285
|
+
const verdict = hasFail ? "FAIL" : allSkipped ? "SKIP" : "PASS";
|
|
286
|
+
|
|
287
|
+
return success(id, {
|
|
288
|
+
content: [
|
|
289
|
+
{
|
|
290
|
+
type: "text",
|
|
291
|
+
text: JSON.stringify({
|
|
292
|
+
verdict,
|
|
293
|
+
checks: verdicts,
|
|
294
|
+
...(allSkipped && { note: "No verification commands available for this workspace. Manual review required." }),
|
|
295
|
+
}),
|
|
296
|
+
},
|
|
297
|
+
],
|
|
298
|
+
});
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
async function collectFindings(id, args) {
|
|
302
|
+
const sessionId = args.session_id || "latest";
|
|
303
|
+
|
|
304
|
+
const result = await runOco([
|
|
305
|
+
"trace",
|
|
306
|
+
sessionId,
|
|
307
|
+
"--format",
|
|
308
|
+
"json",
|
|
309
|
+
]);
|
|
310
|
+
|
|
311
|
+
if (result.error) {
|
|
312
|
+
return success(id, {
|
|
313
|
+
content: [
|
|
314
|
+
{
|
|
315
|
+
type: "text",
|
|
316
|
+
text: JSON.stringify({
|
|
317
|
+
evidence: [],
|
|
318
|
+
open_questions: [],
|
|
319
|
+
risks: [],
|
|
320
|
+
next_action: "No OCO session data available. Use standard investigation.",
|
|
321
|
+
}),
|
|
322
|
+
},
|
|
323
|
+
],
|
|
324
|
+
});
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
return success(id, {
|
|
328
|
+
content: [{ type: "text", text: result.stdout }],
|
|
329
|
+
});
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// --- Helpers ---
|
|
333
|
+
|
|
334
|
+
function parseStackTrace(text) {
|
|
335
|
+
const frames = [];
|
|
336
|
+
// Common patterns: file:line, file(line), at file:line:col
|
|
337
|
+
const patterns = [
|
|
338
|
+
/at\s+(?:\w+\s+\()?([^:(\s]+):(\d+)/g, // JS/TS: at func (file:line:col)
|
|
339
|
+
/File "([^"]+)", line (\d+)/g, // Python: File "path", line N
|
|
340
|
+
/([^\s]+\.rs):(\d+)/g, // Rust: file.rs:line
|
|
341
|
+
/([^\s]+\.[a-z]+):(\d+)/g, // Generic: file.ext:line
|
|
342
|
+
];
|
|
343
|
+
|
|
344
|
+
for (const pattern of patterns) {
|
|
345
|
+
let match;
|
|
346
|
+
while ((match = pattern.exec(text)) !== null) {
|
|
347
|
+
frames.push({ file: match[1], line: parseInt(match[2], 10) });
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
return frames;
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
function getCheckCommand(workspace, check) {
|
|
355
|
+
const fs = require("fs");
|
|
356
|
+
const path = require("path");
|
|
357
|
+
|
|
358
|
+
const hasFile = (name) =>
|
|
359
|
+
fs.existsSync(path.join(workspace, name));
|
|
360
|
+
|
|
361
|
+
switch (check) {
|
|
362
|
+
case "build":
|
|
363
|
+
if (hasFile("Cargo.toml"))
|
|
364
|
+
return { command: "cargo", args: ["build"] };
|
|
365
|
+
if (hasFile("package.json"))
|
|
366
|
+
return { command: "npm", args: ["run", "build"] };
|
|
367
|
+
return null;
|
|
368
|
+
case "test":
|
|
369
|
+
if (hasFile("Cargo.toml"))
|
|
370
|
+
return { command: "cargo", args: ["test"] };
|
|
371
|
+
if (hasFile("package.json"))
|
|
372
|
+
return { command: "npm", args: ["test"] };
|
|
373
|
+
if (hasFile("pyproject.toml"))
|
|
374
|
+
return { command: "pytest", args: [] };
|
|
375
|
+
return null;
|
|
376
|
+
case "lint":
|
|
377
|
+
if (hasFile("Cargo.toml"))
|
|
378
|
+
return { command: "cargo", args: ["clippy", "--", "-D", "warnings"] };
|
|
379
|
+
if (hasFile("package.json"))
|
|
380
|
+
return { command: "npm", args: ["run", "lint"] };
|
|
381
|
+
return null;
|
|
382
|
+
case "typecheck":
|
|
383
|
+
if (hasFile("Cargo.toml"))
|
|
384
|
+
return { command: "cargo", args: ["check"] };
|
|
385
|
+
if (hasFile("tsconfig.json"))
|
|
386
|
+
return { command: "npx", args: ["tsc", "--noEmit"] };
|
|
387
|
+
if (hasFile("pyproject.toml"))
|
|
388
|
+
return { command: "mypy", args: ["."] };
|
|
389
|
+
return null;
|
|
390
|
+
default:
|
|
391
|
+
return null;
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
function runOco(args) {
|
|
396
|
+
return runShell(OCO_BIN, args, {});
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
function runShell(command, args, options) {
|
|
400
|
+
return new Promise((resolve) => {
|
|
401
|
+
const proc = spawn(command, args, {
|
|
402
|
+
...options,
|
|
403
|
+
timeout: 30000,
|
|
404
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
405
|
+
});
|
|
406
|
+
|
|
407
|
+
let stdout = "";
|
|
408
|
+
let stderr = "";
|
|
409
|
+
|
|
410
|
+
proc.stdout.on("data", (d) => (stdout += d.toString()));
|
|
411
|
+
proc.stderr.on("data", (d) => (stderr += d.toString()));
|
|
412
|
+
|
|
413
|
+
proc.on("close", (code) => {
|
|
414
|
+
resolve({ exitCode: code, stdout, stderr, error: null });
|
|
415
|
+
});
|
|
416
|
+
|
|
417
|
+
proc.on("error", (err) => {
|
|
418
|
+
resolve({ exitCode: -1, stdout: "", stderr: "", error: err.message });
|
|
419
|
+
});
|
|
420
|
+
});
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
function truncate(str, maxLen) {
|
|
424
|
+
if (!str || str.length <= maxLen) return str || "";
|
|
425
|
+
return str.slice(0, maxLen) + "\n... (truncated)";
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
function success(id, result) {
|
|
429
|
+
return { jsonrpc: "2.0", id, result };
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
function error(id, code, message) {
|
|
433
|
+
return { jsonrpc: "2.0", id, error: { code, message } };
|
|
434
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
{
|
|
2
|
+
"hooks": {
|
|
3
|
+
"UserPromptSubmit": [
|
|
4
|
+
{
|
|
5
|
+
"matcher": "",
|
|
6
|
+
"hooks": [
|
|
7
|
+
{
|
|
8
|
+
"type": "command",
|
|
9
|
+
"command": "node .claude/hooks/user-prompt-submit.cjs",
|
|
10
|
+
"timeout": 10
|
|
11
|
+
}
|
|
12
|
+
]
|
|
13
|
+
}
|
|
14
|
+
],
|
|
15
|
+
"PreToolUse": [
|
|
16
|
+
{
|
|
17
|
+
"matcher": "Bash|Edit|Write|MultiEdit",
|
|
18
|
+
"hooks": [
|
|
19
|
+
{
|
|
20
|
+
"type": "command",
|
|
21
|
+
"command": "node .claude/hooks/pre-tool-use.mjs",
|
|
22
|
+
"timeout": 10
|
|
23
|
+
}
|
|
24
|
+
]
|
|
25
|
+
}
|
|
26
|
+
],
|
|
27
|
+
"PostToolUse": [
|
|
28
|
+
{
|
|
29
|
+
"matcher": "",
|
|
30
|
+
"hooks": [
|
|
31
|
+
{
|
|
32
|
+
"type": "command",
|
|
33
|
+
"command": "node .claude/hooks/post-tool-use.mjs",
|
|
34
|
+
"timeout": 10
|
|
35
|
+
}
|
|
36
|
+
]
|
|
37
|
+
}
|
|
38
|
+
],
|
|
39
|
+
"Stop": [
|
|
40
|
+
{
|
|
41
|
+
"matcher": "",
|
|
42
|
+
"hooks": [
|
|
43
|
+
{
|
|
44
|
+
"type": "command",
|
|
45
|
+
"command": "node .claude/hooks/stop.mjs",
|
|
46
|
+
"timeout": 10
|
|
47
|
+
}
|
|
48
|
+
]
|
|
49
|
+
}
|
|
50
|
+
]
|
|
51
|
+
},
|
|
52
|
+
"mcpServers": {
|
|
53
|
+
"oco": {
|
|
54
|
+
"command": "node",
|
|
55
|
+
"args": [".claude/mcp/bridge.js"],
|
|
56
|
+
"env": {
|
|
57
|
+
"OCO_BIN": "oco"
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
},
|
|
61
|
+
"permissions": {
|
|
62
|
+
"allow": ["Bash(oco *)"]
|
|
63
|
+
}
|
|
64
|
+
}
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: oco-inspect-repo-area
|
|
3
|
+
description: Explore and understand a specific area of the repository using OCO-backed code intelligence. Use when the task is exploratory and repo-specific.
|
|
4
|
+
triggers:
|
|
5
|
+
- "explore"
|
|
6
|
+
- "understand"
|
|
7
|
+
- "how does"
|
|
8
|
+
- "what does"
|
|
9
|
+
- "show me the"
|
|
10
|
+
- "explain the"
|
|
11
|
+
- "where is"
|
|
12
|
+
- "codebase"
|
|
13
|
+
- "module"
|
|
14
|
+
- "architecture"
|
|
15
|
+
---
|
|
16
|
+
|
|
17
|
+
# OCO: Inspect Repository Area
|
|
18
|
+
|
|
19
|
+
You are performing a focused exploration of a codebase area. Follow this structured workflow.
|
|
20
|
+
|
|
21
|
+
## Step 1: Identify the Target Area
|
|
22
|
+
|
|
23
|
+
Determine which part of the codebase needs exploration:
|
|
24
|
+
- A module, package, or directory
|
|
25
|
+
- A feature or capability
|
|
26
|
+
- A data flow or interaction pattern
|
|
27
|
+
|
|
28
|
+
## Step 2: Gather Ranked Context (via OCO)
|
|
29
|
+
|
|
30
|
+
Use the `oco.search_codebase` MCP tool if available, otherwise use standard search tools:
|
|
31
|
+
|
|
32
|
+
```
|
|
33
|
+
oco.search_codebase({ query: "<area description>", workspace: "." })
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
This returns ranked, symbol-aware results — prefer these over raw file dumping.
|
|
37
|
+
|
|
38
|
+
## Step 3: Read Key Files Selectively
|
|
39
|
+
|
|
40
|
+
Based on search results, read only the most relevant files. **Do NOT dump entire directories.**
|
|
41
|
+
|
|
42
|
+
Priority order:
|
|
43
|
+
1. Entry points and public API surfaces
|
|
44
|
+
2. Core types and data structures
|
|
45
|
+
3. Key implementation logic
|
|
46
|
+
4. Tests (for behavior documentation)
|
|
47
|
+
|
|
48
|
+
## Step 4: Summarize Before Acting
|
|
49
|
+
|
|
50
|
+
Before taking any action, produce a **compact summary**:
|
|
51
|
+
- Purpose of the area
|
|
52
|
+
- Key types and their relationships
|
|
53
|
+
- Entry points and data flow
|
|
54
|
+
- Potential concerns or complexity hotspots
|
|
55
|
+
|
|
56
|
+
## Rules
|
|
57
|
+
|
|
58
|
+
- Never read more than 10 files without summarizing first
|
|
59
|
+
- Prefer symbol-level inspection over full file reads
|
|
60
|
+
- If an area is complex (>5 files), delegate to the `@codebase-investigator` subagent
|
|
61
|
+
- Report confidence level: high / medium / low
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: oco-investigate-bug
|
|
3
|
+
description: Systematic bug investigation without a full stacktrace. Enforces evidence-first debugging with reproduction and root cause analysis.
|
|
4
|
+
triggers:
|
|
5
|
+
- "debug"
|
|
6
|
+
- "bug"
|
|
7
|
+
- "not working"
|
|
8
|
+
- "broken"
|
|
9
|
+
- "doesn't work"
|
|
10
|
+
- "wrong behavior"
|
|
11
|
+
- "unexpected"
|
|
12
|
+
- "regression"
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
# OCO: Investigate Bug
|
|
16
|
+
|
|
17
|
+
You are investigating a bug without a clear stack trace. Follow strict evidence-based debugging.
|
|
18
|
+
|
|
19
|
+
## Step 1: Understand the Symptom
|
|
20
|
+
|
|
21
|
+
Clarify with the user if needed:
|
|
22
|
+
- **Expected behavior**: What should happen?
|
|
23
|
+
- **Actual behavior**: What happens instead?
|
|
24
|
+
- **Reproduction steps**: How to trigger it?
|
|
25
|
+
- **When it started**: Recent change? Always broken?
|
|
26
|
+
|
|
27
|
+
## Step 2: Narrow the Scope
|
|
28
|
+
|
|
29
|
+
Identify the subsystem:
|
|
30
|
+
1. Search for relevant code using `oco.search_codebase` or standard search
|
|
31
|
+
2. Identify the code path from user action to observed behavior
|
|
32
|
+
3. List candidate files (max 5 initial candidates)
|
|
33
|
+
|
|
34
|
+
## Step 3: Gather Evidence
|
|
35
|
+
|
|
36
|
+
For each candidate:
|
|
37
|
+
1. Read the relevant code section
|
|
38
|
+
2. Look for: edge cases, missing validation, incorrect logic, state corruption, timing issues
|
|
39
|
+
3. Check recent changes (`git log --oneline -10 -- <file>`)
|
|
40
|
+
4. Check tests: do existing tests cover this case?
|
|
41
|
+
|
|
42
|
+
## Step 4: Reproduce or Narrow
|
|
43
|
+
|
|
44
|
+
Before proposing a fix:
|
|
45
|
+
- If tests exist: check if they pass or fail
|
|
46
|
+
- If no tests: describe how to reproduce
|
|
47
|
+
- Narrow to the smallest possible scope
|
|
48
|
+
|
|
49
|
+
## Step 5: Root Cause Analysis
|
|
50
|
+
|
|
51
|
+
State the root cause with evidence:
|
|
52
|
+
- **Root cause**: [description]
|
|
53
|
+
- **Evidence**: [what you found in the code]
|
|
54
|
+
- **Why it wasn't caught**: [missing test, edge case, etc.]
|
|
55
|
+
|
|
56
|
+
## Step 6: Fix Only After Evidence
|
|
57
|
+
|
|
58
|
+
Once root cause is confirmed:
|
|
59
|
+
1. Propose the minimal fix
|
|
60
|
+
2. Explain why the fix addresses the root cause
|
|
61
|
+
3. Identify if new tests are needed
|
|
62
|
+
4. After applying changes, run the verification workflow described in the `oco-verify-fix` skill (build, test, lint, typecheck)
|
|
63
|
+
5. Use `oco.collect_findings` to synthesize current evidence and remaining open questions
|
|
64
|
+
|
|
65
|
+
## Rules
|
|
66
|
+
|
|
67
|
+
- **Never guess at fixes** — evidence first
|
|
68
|
+
- After 2 failed attempts at the same approach, step back and reconsider
|
|
69
|
+
- If scope exceeds 5 files, delegate reading to `@codebase-investigator`
|
|
70
|
+
- For semantic review of proposed patches, delegate to `@patch-verifier`
|
|
71
|
+
- Always document what you ruled out and why
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: oco-safe-refactor
|
|
3
|
+
description: Structured refactoring with impact analysis, staged changes, and verification. Use for renames, restructuring, module extraction.
|
|
4
|
+
triggers:
|
|
5
|
+
- "refactor"
|
|
6
|
+
- "rename"
|
|
7
|
+
- "restructure"
|
|
8
|
+
- "extract"
|
|
9
|
+
- "move to"
|
|
10
|
+
- "split into"
|
|
11
|
+
- "reorganize"
|
|
12
|
+
- "decouple"
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
# OCO: Safe Refactor
|
|
16
|
+
|
|
17
|
+
You are performing a refactoring operation. Follow this staged, verification-gated workflow.
|
|
18
|
+
|
|
19
|
+
## Step 1: Define the Refactoring Scope
|
|
20
|
+
|
|
21
|
+
Clearly state:
|
|
22
|
+
- **What** is being refactored (symbol, module, pattern)
|
|
23
|
+
- **Why** (improve clarity, reduce coupling, fix naming)
|
|
24
|
+
- **Boundary**: what files/modules are affected
|
|
25
|
+
|
|
26
|
+
## Step 2: Impact Analysis
|
|
27
|
+
|
|
28
|
+
Before making any changes:
|
|
29
|
+
|
|
30
|
+
1. **Find all usages** of the target symbol/pattern:
|
|
31
|
+
- Use `oco.search_codebase` or Grep for symbol references
|
|
32
|
+
- Check imports, re-exports, type references, test references
|
|
33
|
+
- Check config files, documentation, comments
|
|
34
|
+
|
|
35
|
+
2. **Map the dependency graph**:
|
|
36
|
+
- What depends on the thing being refactored?
|
|
37
|
+
- What does it depend on?
|
|
38
|
+
- Are there external consumers (API, CLI, exports)?
|
|
39
|
+
|
|
40
|
+
3. **Produce impact summary**:
|
|
41
|
+
- Files affected: [list]
|
|
42
|
+
- Symbols affected: [list]
|
|
43
|
+
- Risk level: low / medium / high
|
|
44
|
+
- Breaking changes: yes / no
|
|
45
|
+
|
|
46
|
+
If impact is high (>10 files or breaking changes), delegate deep analysis to `@refactor-reviewer` subagent.
|
|
47
|
+
|
|
48
|
+
## Step 3: Staged Changes
|
|
49
|
+
|
|
50
|
+
Apply changes in this order:
|
|
51
|
+
1. **Internal implementation** (the core change)
|
|
52
|
+
2. **Direct consumers** (files importing/using the changed entity)
|
|
53
|
+
3. **Indirect consumers** (transitive dependencies)
|
|
54
|
+
4. **Tests** (update to match new structure)
|
|
55
|
+
5. **Documentation/config** (if applicable)
|
|
56
|
+
|
|
57
|
+
**After each stage, verify the build compiles.**
|
|
58
|
+
|
|
59
|
+
## Step 4: Verification
|
|
60
|
+
|
|
61
|
+
Run the full verification suite:
|
|
62
|
+
1. Build: `cargo build` / `npm run build` / equivalent
|
|
63
|
+
2. Type check: `cargo check` / `tsc --noEmit` / equivalent
|
|
64
|
+
3. Tests: `cargo test` / `npm test` / equivalent
|
|
65
|
+
4. Lint: `cargo clippy` / `eslint` / equivalent
|
|
66
|
+
|
|
67
|
+
Follow the verification workflow described in the `oco-verify-fix` skill (build, test, lint, typecheck in order).
|
|
68
|
+
|
|
69
|
+
## Step 5: Review
|
|
70
|
+
|
|
71
|
+
Delegate reviews to the appropriate subagents:
|
|
72
|
+
- `@refactor-reviewer` — check for stale references, breaking changes, and hidden impact
|
|
73
|
+
- `@patch-verifier` — semantic review of the change for correctness and completeness
|
|
74
|
+
|
|
75
|
+
## Rules
|
|
76
|
+
|
|
77
|
+
- Never rename/move without searching for all usages first
|
|
78
|
+
- Never skip the impact analysis step
|
|
79
|
+
- If >10 files change, produce a summary for user review before committing
|
|
80
|
+
- Preserve all existing test coverage
|
|
81
|
+
- Keep each logical change as a separate commit if practical
|