oh-my-customcode 0.40.0 → 0.42.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,7 +13,7 @@
13
13
 
14
14
  **[한국어 문서 (Korean)](./README_ko.md)**
15
15
 
16
- 44 agents. 74 skills. 21 rules. One command.
16
+ 44 agents. 75 skills. 21 rules. One command.
17
17
 
18
18
  ```bash
19
19
  npm install -g oh-my-customcode && cd your-project && omcustom init
@@ -138,7 +138,7 @@ Each agent declares its tools, model, memory scope, and limitations in YAML fron
138
138
 
139
139
  ---
140
140
 
141
- ### Skills (74)
141
+ ### Skills (75)
142
142
 
143
143
  | Category | Count | Includes |
144
144
  |----------|-------|----------|
@@ -150,7 +150,7 @@ Each agent declares its tools, model, memory scope, and limitations in YAML fron
150
150
  | Memory | 3 | memory-save, memory-recall, memory-management |
151
151
  | Package | 3 | npm-publish, npm-version, npm-audit |
152
152
  | Optimization | 3 | optimize-analyze, optimize-bundle, optimize-report |
153
- | Security | 2 | cve-triage, jinja2-prompts |
153
+ | Security | 3 | adversarial-review, cve-triage, jinja2-prompts |
154
154
  | Other | 8 | codex-exec, vercel-deploy, skills-sh-search, result-aggregation, writing-clearly-and-concisely, and more |
155
155
 
156
156
  Skills use a 3-tier scope system: `core` (universal), `harness` (agent/skill maintenance), `package` (project-specific).
@@ -257,7 +257,7 @@ your-project/
257
257
  ├── CLAUDE.md # Entry point
258
258
  ├── .claude/
259
259
  │ ├── agents/ # 44 agent definitions
260
- │ ├── skills/ # 74 skill modules
260
+ │ ├── skills/ # 75 skill modules
261
261
  │ ├── rules/ # 21 governance rules (R000-R021)
262
262
  │ ├── hooks/ # 15 lifecycle hook scripts
263
263
  │ ├── schemas/ # Tool input validation schemas
package/dist/cli/index.js CHANGED
@@ -13443,6 +13443,158 @@ function getComponentPath(component) {
13443
13443
  return `.claude/${component}`;
13444
13444
  }
13445
13445
 
13446
+ // src/core/lockfile.ts
13447
+ init_fs();
13448
+ import { createHash } from "node:crypto";
13449
+ import { createReadStream } from "node:fs";
13450
+ import { readdir, stat } from "node:fs/promises";
13451
+ import { join as join5, relative as relative2 } from "node:path";
13452
+ var LOCKFILE_NAME = ".omcustom.lock.json";
13453
+ var LOCKFILE_VERSION = 1;
13454
+ var LOCKFILE_COMPONENTS = [
13455
+ "rules",
13456
+ "agents",
13457
+ "skills",
13458
+ "hooks",
13459
+ "contexts",
13460
+ "ontology",
13461
+ "guides"
13462
+ ];
13463
+ var COMPONENT_PATHS = LOCKFILE_COMPONENTS.map((component) => [getComponentPath(component), component]);
13464
+ function computeFileHash(filePath) {
13465
+ return new Promise((resolve2, reject) => {
13466
+ const hash = createHash("sha256");
13467
+ const stream = createReadStream(filePath);
13468
+ stream.on("error", (err) => {
13469
+ reject(err);
13470
+ });
13471
+ stream.on("data", (chunk) => {
13472
+ hash.update(chunk);
13473
+ });
13474
+ stream.on("end", () => {
13475
+ resolve2(hash.digest("hex"));
13476
+ });
13477
+ });
13478
+ }
13479
+ async function readLockfile(targetDir) {
13480
+ const lockfilePath = join5(targetDir, LOCKFILE_NAME);
13481
+ const exists2 = await fileExists(lockfilePath);
13482
+ if (!exists2) {
13483
+ debug("lockfile.not_found", { path: lockfilePath });
13484
+ return null;
13485
+ }
13486
+ try {
13487
+ const data = await readJsonFile(lockfilePath);
13488
+ if (typeof data !== "object" || data === null || data.lockfileVersion !== LOCKFILE_VERSION) {
13489
+ warn("lockfile.invalid_version", { path: lockfilePath });
13490
+ return null;
13491
+ }
13492
+ const record = data;
13493
+ if (typeof record.files !== "object" || record.files === null) {
13494
+ warn("lockfile.invalid_structure", { path: lockfilePath });
13495
+ return null;
13496
+ }
13497
+ return data;
13498
+ } catch (err) {
13499
+ warn("lockfile.read_failed", { path: lockfilePath, error: String(err) });
13500
+ return null;
13501
+ }
13502
+ }
13503
+ async function writeLockfile(targetDir, lockfile) {
13504
+ const lockfilePath = join5(targetDir, LOCKFILE_NAME);
13505
+ await writeJsonFile(lockfilePath, lockfile);
13506
+ debug("lockfile.written", { path: lockfilePath });
13507
+ }
13508
+ function resolveComponent(relativePath) {
13509
+ const normalized = relativePath.replace(/\\/g, "/");
13510
+ for (const [prefix, component] of COMPONENT_PATHS) {
13511
+ if (normalized === prefix || normalized.startsWith(`${prefix}/`)) {
13512
+ return component;
13513
+ }
13514
+ }
13515
+ return "unknown";
13516
+ }
13517
+ async function collectFiles(dir2, projectRoot, isTopLevel) {
13518
+ const results = [];
13519
+ let entries;
13520
+ try {
13521
+ entries = await readdir(dir2);
13522
+ } catch {
13523
+ return results;
13524
+ }
13525
+ for (const entry of entries) {
13526
+ if (isTopLevel && entry.startsWith(".") && entry !== ".claude") {
13527
+ continue;
13528
+ }
13529
+ const fullPath = join5(dir2, entry);
13530
+ let fileStat;
13531
+ try {
13532
+ fileStat = await stat(fullPath);
13533
+ } catch {
13534
+ continue;
13535
+ }
13536
+ if (fileStat.isDirectory()) {
13537
+ const subFiles = await collectFiles(fullPath, projectRoot, false);
13538
+ results.push(...subFiles);
13539
+ } else if (fileStat.isFile()) {
13540
+ results.push(fullPath);
13541
+ }
13542
+ }
13543
+ return results;
13544
+ }
13545
+ async function generateLockfile(targetDir, generatorVersion, templateVersion) {
13546
+ const files = {};
13547
+ const componentRoots = COMPONENT_PATHS.map(([prefix]) => join5(targetDir, prefix));
13548
+ for (const componentRoot of componentRoots) {
13549
+ const exists2 = await fileExists(componentRoot);
13550
+ if (!exists2) {
13551
+ debug("lockfile.component_dir_missing", { path: componentRoot });
13552
+ continue;
13553
+ }
13554
+ const allFiles = await collectFiles(componentRoot, targetDir, false);
13555
+ for (const absolutePath of allFiles) {
13556
+ const relativePath = relative2(targetDir, absolutePath).replace(/\\/g, "/");
13557
+ let hash;
13558
+ let size;
13559
+ try {
13560
+ hash = await computeFileHash(absolutePath);
13561
+ const fileStat = await stat(absolutePath);
13562
+ size = fileStat.size;
13563
+ } catch (err) {
13564
+ warn("lockfile.hash_failed", { path: absolutePath, error: String(err) });
13565
+ continue;
13566
+ }
13567
+ const component = resolveComponent(relativePath);
13568
+ files[relativePath] = {
13569
+ templateHash: hash,
13570
+ size,
13571
+ component
13572
+ };
13573
+ debug("lockfile.entry_added", { path: relativePath, component });
13574
+ }
13575
+ }
13576
+ return {
13577
+ lockfileVersion: LOCKFILE_VERSION,
13578
+ generatorVersion,
13579
+ generatedAt: new Date().toISOString(),
13580
+ templateVersion,
13581
+ files
13582
+ };
13583
+ }
13584
+ async function generateAndWriteLockfileForDir(targetDir) {
13585
+ try {
13586
+ const packageRoot = getPackageRoot();
13587
+ const manifest = await readJsonFile(join5(packageRoot, "templates", "manifest.json"));
13588
+ const { version: generatorVersion } = await readJsonFile(join5(packageRoot, "package.json"));
13589
+ const lockfile = await generateLockfile(targetDir, generatorVersion, manifest.version);
13590
+ await writeLockfile(targetDir, lockfile);
13591
+ return { fileCount: Object.keys(lockfile.files).length };
13592
+ } catch (err) {
13593
+ const msg = err instanceof Error ? err.message : String(err);
13594
+ return { fileCount: 0, warning: `Lockfile generation failed: ${msg}` };
13595
+ }
13596
+ }
13597
+
13446
13598
  // src/cli/doctor.ts
13447
13599
  async function pathExists(targetPath) {
13448
13600
  try {
@@ -13454,8 +13606,8 @@ async function pathExists(targetPath) {
13454
13606
  }
13455
13607
  async function isDirectory(targetPath) {
13456
13608
  try {
13457
- const stat = await fs.stat(targetPath);
13458
- return stat.isDirectory();
13609
+ const stat2 = await fs.stat(targetPath);
13610
+ return stat2.isDirectory();
13459
13611
  } catch {
13460
13612
  return false;
13461
13613
  }
@@ -13491,8 +13643,8 @@ async function collectSymlinksFromRefsDir(refsDir) {
13491
13643
  for (const entry of entries) {
13492
13644
  const entryPath = path.join(refsDir, entry.name);
13493
13645
  try {
13494
- const stat = await fs.lstat(entryPath);
13495
- if (stat.isSymbolicLink()) {
13646
+ const stat2 = await fs.lstat(entryPath);
13647
+ if (stat2.isSymbolicLink()) {
13496
13648
  symlinks.push(entryPath);
13497
13649
  }
13498
13650
  } catch {}
@@ -13890,6 +14042,45 @@ function readCurrentVersion() {
13890
14042
  return "0.0.0";
13891
14043
  }
13892
14044
  }
14045
+ async function checkLockfileDrift(targetDir) {
14046
+ const lockfile = await readLockfile(targetDir);
14047
+ if (!lockfile) {
14048
+ return null;
14049
+ }
14050
+ const modified = [];
14051
+ const removed = [];
14052
+ for (const [relativePath, entry] of Object.entries(lockfile.files)) {
14053
+ const absolutePath = path.join(targetDir, relativePath);
14054
+ try {
14055
+ const currentHash = await computeFileHash(absolutePath);
14056
+ if (currentHash !== entry.templateHash) {
14057
+ modified.push(relativePath);
14058
+ }
14059
+ } catch {
14060
+ removed.push(relativePath);
14061
+ }
14062
+ }
14063
+ const driftedFiles = [...modified, ...removed];
14064
+ if (driftedFiles.length === 0) {
14065
+ return {
14066
+ name: "Lockfile",
14067
+ status: "pass",
14068
+ message: `Lockfile OK — no drift detected (${Object.keys(lockfile.files).length} files tracked)`,
14069
+ fixable: false
14070
+ };
14071
+ }
14072
+ const details = [
14073
+ ...modified.map((f) => `modified: ${f}`),
14074
+ ...removed.map((f) => `removed: ${f}`)
14075
+ ];
14076
+ return {
14077
+ name: "Lockfile",
14078
+ status: "warn",
14079
+ message: `Lockfile drift detected: ${driftedFiles.length} file(s) changed since install`,
14080
+ fixable: false,
14081
+ details
14082
+ };
14083
+ }
13893
14084
  async function checkFrameworkDrift(targetDir, currentVersion) {
13894
14085
  const result = await checkFrameworkVersion(targetDir, currentVersion);
13895
14086
  if (!result)
@@ -13957,7 +14148,9 @@ async function runAllChecks(targetDir, layout, packageVersion, includeUpdates) {
13957
14148
  ]);
13958
14149
  const frameworkCheck = await checkFrameworkDrift(targetDir, packageVersion);
13959
14150
  const checksWithFramework = frameworkCheck ? [...baseChecks, frameworkCheck] : baseChecks;
13960
- return includeUpdates ? [...checksWithFramework, checkUpdateAvailable(packageVersion)] : checksWithFramework;
14151
+ const lockfileCheck = await checkLockfileDrift(targetDir);
14152
+ const checksWithLockfile = lockfileCheck ? [...checksWithFramework, lockfileCheck] : checksWithFramework;
14153
+ return includeUpdates ? [...checksWithLockfile, checkUpdateAvailable(packageVersion)] : checksWithLockfile;
13961
14154
  }
13962
14155
  async function doctorCommand(options = {}) {
13963
14156
  const targetDir = process.cwd();
@@ -14029,7 +14222,7 @@ import { basename as basename2, join as join7 } from "node:path";
14029
14222
 
14030
14223
  // src/core/file-preservation.ts
14031
14224
  init_fs();
14032
- import { basename, join as join5 } from "node:path";
14225
+ import { basename, join as join6 } from "node:path";
14033
14226
  var DEFAULT_CRITICAL_FILES = ["settings.json", "settings.local.json"];
14034
14227
  var DEFAULT_CRITICAL_DIRECTORIES = ["agent-memory", "agent-memory-local"];
14035
14228
  var PROTECTED_FRAMEWORK_FILES = ["CLAUDE.md", "AGENTS.md"];
@@ -14052,8 +14245,8 @@ function matchesGlobPattern(filePath, pattern) {
14052
14245
  return regex.test(filePath);
14053
14246
  }
14054
14247
  async function extractSingleFile(fileName, rootDir, tempDir, result) {
14055
- const srcPath = join5(rootDir, fileName);
14056
- const destPath = join5(tempDir, fileName);
14248
+ const srcPath = join6(rootDir, fileName);
14249
+ const destPath = join6(tempDir, fileName);
14057
14250
  try {
14058
14251
  if (await fileExists(srcPath)) {
14059
14252
  await copyFile(srcPath, destPath);
@@ -14067,8 +14260,8 @@ async function extractSingleFile(fileName, rootDir, tempDir, result) {
14067
14260
  }
14068
14261
  }
14069
14262
  async function extractSingleDir(dirName, rootDir, tempDir, result) {
14070
- const srcPath = join5(rootDir, dirName);
14071
- const destPath = join5(tempDir, dirName);
14263
+ const srcPath = join6(rootDir, dirName);
14264
+ const destPath = join6(tempDir, dirName);
14072
14265
  try {
14073
14266
  if (await fileExists(srcPath)) {
14074
14267
  await copyDirectory(srcPath, destPath, { overwrite: true, preserveTimestamps: true });
@@ -14105,8 +14298,8 @@ async function restoreCriticalFiles(rootDir, preservation) {
14105
14298
  failures: []
14106
14299
  };
14107
14300
  for (const fileName of preservation.extractedFiles) {
14108
- const preservedPath = join5(preservation.tempDir, fileName);
14109
- const targetPath = join5(rootDir, fileName);
14301
+ const preservedPath = join6(preservation.tempDir, fileName);
14302
+ const targetPath = join6(rootDir, fileName);
14110
14303
  try {
14111
14304
  if (fileName.endsWith(".json")) {
14112
14305
  await mergeJsonFile(preservedPath, targetPath);
@@ -14122,8 +14315,8 @@ async function restoreCriticalFiles(rootDir, preservation) {
14122
14315
  }
14123
14316
  }
14124
14317
  for (const dirName of preservation.extractedDirs) {
14125
- const preservedPath = join5(preservation.tempDir, dirName);
14126
- const targetPath = join5(rootDir, dirName);
14318
+ const preservedPath = join6(preservation.tempDir, dirName);
14319
+ const targetPath = join6(rootDir, dirName);
14127
14320
  try {
14128
14321
  await copyDirectory(preservedPath, targetPath, {
14129
14322
  overwrite: false,
@@ -14422,134 +14615,6 @@ function getDefaultWorkflow() {
14422
14615
  };
14423
14616
  }
14424
14617
 
14425
- // src/core/lockfile.ts
14426
- init_fs();
14427
- import { createHash } from "node:crypto";
14428
- import { createReadStream } from "node:fs";
14429
- import { readdir, stat } from "node:fs/promises";
14430
- import { join as join6, relative as relative2 } from "node:path";
14431
- var LOCKFILE_NAME = ".omcustom.lock.json";
14432
- var LOCKFILE_VERSION = 1;
14433
- var LOCKFILE_COMPONENTS = [
14434
- "rules",
14435
- "agents",
14436
- "skills",
14437
- "hooks",
14438
- "contexts",
14439
- "ontology",
14440
- "guides"
14441
- ];
14442
- var COMPONENT_PATHS = LOCKFILE_COMPONENTS.map((component) => [getComponentPath(component), component]);
14443
- function computeFileHash(filePath) {
14444
- return new Promise((resolve2, reject) => {
14445
- const hash = createHash("sha256");
14446
- const stream = createReadStream(filePath);
14447
- stream.on("error", (err) => {
14448
- reject(err);
14449
- });
14450
- stream.on("data", (chunk) => {
14451
- hash.update(chunk);
14452
- });
14453
- stream.on("end", () => {
14454
- resolve2(hash.digest("hex"));
14455
- });
14456
- });
14457
- }
14458
- async function writeLockfile(targetDir, lockfile) {
14459
- const lockfilePath = join6(targetDir, LOCKFILE_NAME);
14460
- await writeJsonFile(lockfilePath, lockfile);
14461
- debug("lockfile.written", { path: lockfilePath });
14462
- }
14463
- function resolveComponent(relativePath) {
14464
- const normalized = relativePath.replace(/\\/g, "/");
14465
- for (const [prefix, component] of COMPONENT_PATHS) {
14466
- if (normalized === prefix || normalized.startsWith(`${prefix}/`)) {
14467
- return component;
14468
- }
14469
- }
14470
- return "unknown";
14471
- }
14472
- async function collectFiles(dir2, projectRoot, isTopLevel) {
14473
- const results = [];
14474
- let entries;
14475
- try {
14476
- entries = await readdir(dir2);
14477
- } catch {
14478
- return results;
14479
- }
14480
- for (const entry of entries) {
14481
- if (isTopLevel && entry.startsWith(".") && entry !== ".claude") {
14482
- continue;
14483
- }
14484
- const fullPath = join6(dir2, entry);
14485
- let fileStat;
14486
- try {
14487
- fileStat = await stat(fullPath);
14488
- } catch {
14489
- continue;
14490
- }
14491
- if (fileStat.isDirectory()) {
14492
- const subFiles = await collectFiles(fullPath, projectRoot, false);
14493
- results.push(...subFiles);
14494
- } else if (fileStat.isFile()) {
14495
- results.push(fullPath);
14496
- }
14497
- }
14498
- return results;
14499
- }
14500
- async function generateLockfile(targetDir, generatorVersion, templateVersion) {
14501
- const files = {};
14502
- const componentRoots = COMPONENT_PATHS.map(([prefix]) => join6(targetDir, prefix));
14503
- for (const componentRoot of componentRoots) {
14504
- const exists2 = await fileExists(componentRoot);
14505
- if (!exists2) {
14506
- debug("lockfile.component_dir_missing", { path: componentRoot });
14507
- continue;
14508
- }
14509
- const allFiles = await collectFiles(componentRoot, targetDir, false);
14510
- for (const absolutePath of allFiles) {
14511
- const relativePath = relative2(targetDir, absolutePath).replace(/\\/g, "/");
14512
- let hash;
14513
- let size;
14514
- try {
14515
- hash = await computeFileHash(absolutePath);
14516
- const fileStat = await stat(absolutePath);
14517
- size = fileStat.size;
14518
- } catch (err) {
14519
- warn("lockfile.hash_failed", { path: absolutePath, error: String(err) });
14520
- continue;
14521
- }
14522
- const component = resolveComponent(relativePath);
14523
- files[relativePath] = {
14524
- templateHash: hash,
14525
- size,
14526
- component
14527
- };
14528
- debug("lockfile.entry_added", { path: relativePath, component });
14529
- }
14530
- }
14531
- return {
14532
- lockfileVersion: LOCKFILE_VERSION,
14533
- generatorVersion,
14534
- generatedAt: new Date().toISOString(),
14535
- templateVersion,
14536
- files
14537
- };
14538
- }
14539
- async function generateAndWriteLockfileForDir(targetDir) {
14540
- try {
14541
- const packageRoot = getPackageRoot();
14542
- const manifest = await readJsonFile(join6(packageRoot, "templates", "manifest.json"));
14543
- const { version: generatorVersion } = await readJsonFile(join6(packageRoot, "package.json"));
14544
- const lockfile = await generateLockfile(targetDir, generatorVersion, manifest.version);
14545
- await writeLockfile(targetDir, lockfile);
14546
- return { fileCount: Object.keys(lockfile.files).length };
14547
- } catch (err) {
14548
- const msg = err instanceof Error ? err.message : String(err);
14549
- return { fileCount: 0, warning: `Lockfile generation failed: ${msg}` };
14550
- }
14551
- }
14552
-
14553
14618
  // src/core/scope-filter.ts
14554
14619
  function getSkillScope(content) {
14555
14620
  const cleaned = content.replace(/^\uFEFF/, "");
@@ -14936,12 +15001,21 @@ async function generateMCPConfig(targetDir) {
14936
15001
  if (!ontologyExists) {
14937
15002
  return;
14938
15003
  }
15004
+ try {
15005
+ execSync3("uv --version", { stdio: "pipe" });
15006
+ } catch {
15007
+ warn("uv (Python package manager) not found. Install it with: curl -LsSf https://astral.sh/uv/install.sh | sh");
15008
+ warn("Skipping ontology-rag MCP configuration. You can set it up manually later.");
15009
+ return;
15010
+ }
14939
15011
  try {
14940
15012
  execSync3("uv venv .venv", { cwd: targetDir, stdio: "pipe" });
14941
15013
  execSync3('uv pip install "ontology-rag @ git+https://github.com/baekenough/oh-my-customcode.git#subdirectory=packages/ontology-rag"', { cwd: targetDir, stdio: "pipe" });
14942
15014
  } catch (error2) {
14943
15015
  const msg = error2 instanceof Error ? error2.message : String(error2);
14944
- throw new Error(`Failed to setup Python environment: ${msg}`);
15016
+ warn(`Failed to setup ontology-rag: ${msg}`);
15017
+ warn("You can configure the MCP server manually. See: https://github.com/baekenough/oh-my-customcode/tree/develop/packages/ontology-rag");
15018
+ return;
14945
15019
  }
14946
15020
  const config = {
14947
15021
  mcpServers: {
@@ -14975,6 +15049,7 @@ async function generateMCPConfig(targetDir) {
14975
15049
  await writeFile(mcpConfigPath, `${JSON.stringify(config, null, 2)}
14976
15050
  `);
14977
15051
  }
15052
+ info("ontology-rag MCP server configured successfully");
14978
15053
  }
14979
15054
  async function checkUvAvailable() {
14980
15055
  try {
@@ -16078,6 +16153,15 @@ async function initCommand(options) {
16078
16153
  logInstallResultInfo(installResult);
16079
16154
  logSuccessDetails(installedPaths, installResult.skippedComponents);
16080
16155
  await setupMcpConfig(targetDir);
16156
+ console.log("");
16157
+ console.log("Required plugins (install manually):");
16158
+ console.log(" /plugin marketplace add obra/superpowers-marketplace");
16159
+ console.log(" /plugin install superpowers");
16160
+ console.log(" /plugin install superpowers-developing-for-claude-code");
16161
+ console.log(" /plugin install elements-of-style");
16162
+ console.log(" /plugin install context7");
16163
+ console.log("");
16164
+ console.log('See CLAUDE.md "외부 의존성" section for details.');
16081
16165
  return {
16082
16166
  success: true,
16083
16167
  message: i18n.t("cli.init.success"),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "oh-my-customcode",
3
3
  "workspaces": ["packages/*"],
4
- "version": "0.40.0",
4
+ "version": "0.42.2",
5
5
  "description": "Batteries-included agent harness for Claude Code",
6
6
  "type": "module",
7
7
  "bin": {
@@ -7,6 +7,7 @@ memory: project
7
7
  isolation: sandbox
8
8
  skills:
9
9
  - cve-triage
10
+ - adversarial-review
10
11
  tools:
11
12
  - Read
12
13
  - Write
@@ -0,0 +1,30 @@
1
+ {
2
+ "requiredPlugins": [
3
+ {
4
+ "name": "superpowers",
5
+ "source": "claude-plugins-official",
6
+ "description": "TDD, debugging, collaboration patterns"
7
+ },
8
+ {
9
+ "name": "superpowers-developing-for-claude-code",
10
+ "source": "superpowers-marketplace",
11
+ "description": "Claude Code development documentation"
12
+ },
13
+ {
14
+ "name": "elements-of-style",
15
+ "source": "superpowers-marketplace",
16
+ "description": "Writing clarity guidelines"
17
+ },
18
+ {
19
+ "name": "context7",
20
+ "source": "claude-plugins-official",
21
+ "description": "Library documentation lookup"
22
+ }
23
+ ],
24
+ "marketplaces": [
25
+ {
26
+ "name": "superpowers-marketplace",
27
+ "url": "obra/superpowers-marketplace"
28
+ }
29
+ ]
30
+ }
@@ -6,6 +6,7 @@
6
6
  # Always exits 0 (advisory only)
7
7
 
8
8
  set -euo pipefail
9
+ HOOK_START=$(date +%s%N 2>/dev/null || echo 0)
9
10
 
10
11
  # Dependency check: exit silently if jq not available
11
12
  command -v jq >/dev/null 2>&1 || exit 0
@@ -55,4 +56,9 @@ fi
55
56
 
56
57
  # Pass through
57
58
  echo "$input"
59
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
60
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
61
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
62
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
63
+ fi
58
64
  exit 0
@@ -1,5 +1,6 @@
1
1
  #!/bin/bash
2
2
  set -euo pipefail
3
+ HOOK_START=$(date +%s%N 2>/dev/null || echo 0)
3
4
 
4
5
  # Dependency check: exit silently if jq not available
5
6
  command -v jq >/dev/null 2>&1 || exit 0
@@ -86,4 +87,9 @@ fi
86
87
 
87
88
  # Pass through
88
89
  echo "$input"
90
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
91
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
92
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
93
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
94
+ fi
89
95
  exit 0
@@ -185,6 +185,10 @@ case "$DRIFT_STATUS" in
185
185
  echo " Skipped (not a git repository)" >&2
186
186
  ;;
187
187
  esac
188
+ echo "" >&2
189
+ echo " [Lockfile Drift]" >&2
190
+ echo " Note: file-level lockfile drift (template hash changes) is checked via 'omcustom doctor'" >&2
191
+ echo " Run 'omcustom doctor' to detect modified/removed template files since install." >&2
188
192
  echo "------------------------------------" >&2
189
193
 
190
194
  # SessionEnd hooks timeout (v2.1.74+)
@@ -1,5 +1,6 @@
1
1
  #!/bin/bash
2
2
  set -euo pipefail
3
+ HOOK_START=$(date +%s%N 2>/dev/null || echo 0)
3
4
 
4
5
  # Dependency check: exit silently if jq not available
5
6
  command -v jq >/dev/null 2>&1 || exit 0
@@ -180,9 +181,19 @@ if [ "$hard_block" = true ]; then
180
181
  echo " Recovery: Step back, re-read the error, and try a fundamentally different approach." >&2
181
182
  echo "=====================================" >&2
182
183
  echo "$input"
184
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
185
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
186
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
187
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
188
+ fi
183
189
  exit 1
184
190
  fi
185
191
 
186
192
  # Pass through
187
193
  echo "$input"
194
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
195
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
196
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
197
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
198
+ fi
188
199
  exit 0
@@ -85,6 +85,7 @@ skills:
85
85
  description: "Audit agent dependencies and references"
86
86
  user_invocable: true
87
87
  model_invocable: true
88
+ scope: harness
88
89
  summary: "Audit agent dependencies to ensure all skill and guide references are valid"
89
90
  keywords: [audit, dependencies, validation, references]
90
91
  rule_references: []
@@ -103,6 +104,7 @@ skills:
103
104
  description: "Fetch and verify Claude Code official documentation. Use when checking official spec compliance or updating local reference docs."
104
105
  user_invocable: true
105
106
  model_invocable: false
107
+ scope: harness
106
108
  summary: "Maintain up-to-date local copies of Claude Code official documentation"
107
109
  keywords: [claude-code, documentation, spec, compliance, verification]
108
110
  rule_references: []
@@ -112,6 +114,7 @@ skills:
112
114
  description: "Create a new agent with complete structure"
113
115
  user_invocable: true
114
116
  model_invocable: false
117
+ scope: harness
115
118
  summary: "Create a new agent with complete directory structure and validation"
116
119
  keywords: [create, agent, structure, validation]
117
120
  rule_references: []
@@ -203,6 +206,7 @@ skills:
203
206
  description: "Fix broken agent references and symlinks"
204
207
  user_invocable: true
205
208
  model_invocable: false
209
+ scope: harness
206
210
  summary: "Fix broken references, missing symlinks, and agent dependency issues"
207
211
  keywords: [fix, references, symlinks, dependencies]
208
212
  rule_references: []
@@ -230,6 +234,7 @@ skills:
230
234
  description: "Show help information for commands and system"
231
235
  user_invocable: true
232
236
  model_invocable: true
237
+ scope: harness
233
238
  summary: "Show help information for commands, agents, and system rules"
234
239
  keywords: [help, documentation, commands, agents, rules]
235
240
  rule_references: []
@@ -266,6 +271,7 @@ skills:
266
271
  description: "Show all available commands"
267
272
  user_invocable: true
268
273
  model_invocable: true
274
+ scope: harness
269
275
  summary: "Show all available commands with optional filtering and detailed information"
270
276
  keywords: [lists, commands, categories, system]
271
277
  rule_references: []
@@ -302,6 +308,7 @@ skills:
302
308
  description: "Enable/disable OpenTelemetry console monitoring for Claude Code usage tracking"
303
309
  user_invocable: true
304
310
  model_invocable: true
311
+ scope: package
305
312
  summary: "Enable or disable OpenTelemetry console monitoring for usage metrics"
306
313
  keywords: [monitoring, telemetry, otel, metrics, usage]
307
314
  rule_references: []
@@ -311,6 +318,7 @@ skills:
311
318
  description: "Audit npm dependencies for security and updates"
312
319
  user_invocable: true
313
320
  model_invocable: true
321
+ scope: package
314
322
  summary: "Audit npm dependencies for security vulnerabilities and outdated packages"
315
323
  keywords: [npm, audit, security, dependencies, vulnerabilities]
316
324
  rule_references: []
@@ -320,6 +328,7 @@ skills:
320
328
  description: "Publish package to npm registry with pre-checks"
321
329
  user_invocable: true
322
330
  model_invocable: false
331
+ scope: package
323
332
  summary: "Publish package to npm registry with comprehensive pre-publish checks"
324
333
  keywords: [npm, publish, package, registry, validation]
325
334
  rule_references: []
@@ -329,6 +338,7 @@ skills:
329
338
  description: "Manage semantic versions for npm packages"
330
339
  user_invocable: true
331
340
  model_invocable: false
341
+ scope: package
332
342
  summary: "Manage semantic versions for npm packages with changelog and git integration"
333
343
  keywords: [npm, version, semantic, changelog, git]
334
344
  rule_references: []
@@ -447,6 +457,7 @@ skills:
447
457
  description: "Full R017 verification (5+3 rounds) before commit"
448
458
  user_invocable: true
449
459
  model_invocable: false
460
+ scope: harness
450
461
  summary: "Execute full R017 verification with 5 rounds of manager verification and 3 rounds of deep review"
451
462
  keywords: [verification, r017, sync, validation, compliance]
452
463
  rule_references: [R017]
@@ -493,6 +504,7 @@ skills:
493
504
  description: "Show system status and health checks"
494
505
  user_invocable: true
495
506
  model_invocable: true
507
+ scope: harness
496
508
  summary: "Show comprehensive system status including agents, skills, guides, and health checks"
497
509
  keywords: [status, health, system, agents, skills]
498
510
  rule_references: []
@@ -520,6 +532,7 @@ skills:
520
532
  description: "Sync documentation with project structure"
521
533
  user_invocable: true
522
534
  model_invocable: false
535
+ scope: harness
523
536
  summary: "Ensure documentation accurately reflects current project state and agents work together"
524
537
  keywords: [update, documentation, sync, validation, consistency]
525
538
  rule_references: []
@@ -529,6 +542,7 @@ skills:
529
542
  description: "Update agents from external sources (GitHub, docs, etc.)"
530
543
  user_invocable: true
531
544
  model_invocable: false
545
+ scope: harness
532
546
  summary: "Update agents, skills, and guides from external sources to latest versions"
533
547
  keywords: [update, external, github, sources, versioning]
534
548
  rule_references: []
@@ -203,10 +203,10 @@ Use `context: fork` for skills that orchestrate multi-agent workflows. Cap at **
203
203
  | Multi-agent coordination patterns | Single-agent reference skills |
204
204
  | Task decomposition/planning | External tool integrations |
205
205
 
206
- Current skills with `context: fork` (11/12 cap):
206
+ Current skills with `context: fork` (9/12 cap):
207
207
  - secretary-routing, dev-lead-routing, de-lead-routing, qa-lead-routing
208
208
  - dag-orchestration, task-decomposition, worker-reviewer-pipeline, pipeline-guards
209
- - deep-plan, evaluator-optimizer, sauron-watch
209
+ - deep-plan
210
210
 
211
211
  ## Naming
212
212
 
@@ -13,6 +13,7 @@ Available when `CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS=1` or TeamCreate/SendMessag
13
13
  | Scenario | Preferred | Reason |
14
14
  |----------|-----------|--------|
15
15
  | Simple independent subtasks | Agent Tool | Lower cost, no coordination overhead |
16
+ | Sequential-dependency init/scaffolding | Agent Tool | Blocked agents waste tokens polling; single agent faster |
16
17
  | Multi-step with shared state | **Agent Teams** | Shared task list, peer messaging |
17
18
  | Research requiring discussion | **Agent Teams** | Iterative discovery, synthesis |
18
19
  | Cost-sensitive batch ops | Agent Tool | Minimal token overhead |
@@ -46,9 +47,14 @@ Before using Agent tool for 2+ agent tasks, complete this check:
46
47
  ║ ║
47
48
  ║ 4. Are 2+ issues being fixed in the same release batch? ║
48
49
  ║ YES → prefer Agent Teams (coordination benefit) ║
49
- ║ NO → Proceed with Agent tool
50
+ ║ NO → Check #5
51
+ ║ ║
52
+ ║ 5. Are tasks sequentially dependent (init/scaffold)? ║
53
+ ║ YES → prefer Agent Tool (single agent, no coordination) ║
54
+ ║ NO → Continue with Agent Teams ║
50
55
  ║ ║
51
56
  ║ Simple rule: 3+ agents OR review cycle → use Agent Teams ║
57
+ ║ Sequential deps / scaffolding → Agent Tool (single agent) ║
52
58
  ║ 2+ issues in same batch → prefer Agent Teams ║
53
59
  ║ Everything else → Agent tool ║
54
60
  ╚══════════════════════════════════════════════════════════════════╝
@@ -189,6 +195,36 @@ When Agent Teams creates a new agent via mgr-creator:
189
195
  4. New agent joins team immediately
190
196
  5. Team continues with expanded capabilities
191
197
 
198
+ ## Blocked Agent Behavior
199
+
200
+ When a team member is blocked by task dependencies:
201
+
202
+ | Strategy | When | Benefit |
203
+ |----------|------|---------|
204
+ | Deferred spawn | Dependency chain is clear | No wasted tokens; spawn after blocker completes |
205
+ | Silent wait | Agent already spawned, short wait expected | Minimal overhead |
206
+ | Reassign | Agent blocked >2 min with no progress | Reuse agent for unblocked work |
207
+
208
+ ### Prompt Guidelines for Blocked Agents
209
+
210
+ When spawning agents that may be blocked:
211
+ 1. Include explicit instruction: "If your task is blocked, wait silently. Do NOT send periodic status messages."
212
+ 2. Set check interval: "Check TaskList once per minute, not continuously."
213
+ 3. Prefer deferred spawn when the dependency resolution time is unpredictable.
214
+
215
+ ### Anti-Pattern: Idle Polling
216
+
217
+ ```
218
+ ❌ WRONG: Blocked agent sends repeated status messages
219
+ docker-dev: "Task #1 still pending..." (×5 messages, wasting tokens)
220
+
221
+ ✓ CORRECT: Deferred spawn after dependency resolves
222
+ (Task #1 completes) → then spawn docker-dev for Task #3
223
+
224
+ ✓ ALSO CORRECT: Silent wait with infrequent checks
225
+ docker-dev spawned with: "Wait silently if blocked. Check TaskList once per minute."
226
+ ```
227
+
192
228
  ## Lifecycle
193
229
 
194
230
  ```
@@ -0,0 +1,72 @@
1
+ ---
2
+ name: adversarial-review
3
+ description: Adversarial code review using attacker mindset — trust boundary, attack surface, business logic, and defense evaluation
4
+ scope: core
5
+ argument-hint: "<file-or-directory> [--depth quick|thorough]"
6
+ user-invocable: true
7
+ ---
8
+
9
+ # Adversarial Code Review
10
+
11
+ Review code from an attacker's perspective using STRIDE + OWASP frameworks.
12
+
13
+ ## 4-Phase Review Process
14
+
15
+ ### Phase 1: Trust Boundary Analysis
16
+ Identify where trust transitions occur:
17
+ - External input reaching internal logic without validation → **Tampering**
18
+ - Implicit trust between services → **Elevation of Privilege**
19
+ - Shared storage without isolation → **Information Disclosure**
20
+ - Authentication boundaries not clearly marked → **Spoofing**
21
+
22
+ Output: `[TRUST-BOUNDARY]` findings with location, threat type, and current validation level.
23
+
24
+ ### Phase 2: Attack Surface Mapping
25
+ Map all entry points and exposure:
26
+ - Public API endpoints and auth requirements
27
+ - File upload/download paths → Path traversal risk
28
+ - External system calls (URLs, queries) → SSRF/Injection
29
+ - Event handlers and callbacks → Race conditions
30
+ - Error message verbosity → Information Disclosure
31
+
32
+ Output: `[ATTACK-SURFACE]` table with endpoint, exposure level, and mitigation status.
33
+
34
+ ### Phase 3: Business Logic Review
35
+ Analyze logic flaws that static analysis misses:
36
+ - State machine violations (skip steps, replay)
37
+ - Authorization != authentication (authn ok but authz missing)
38
+ - Race conditions in multi-step operations
39
+ - Numeric overflow/underflow in financial calculations
40
+ - Default-allow vs default-deny patterns
41
+
42
+ Output: `[LOGIC-FLAW]` findings with exploitation scenario and impact.
43
+
44
+ ### Phase 4: Defense Evaluation
45
+ Assess existing defense mechanisms:
46
+ - Input validation completeness (allowlist vs blocklist)
47
+ - Output encoding consistency
48
+ - Rate limiting and abuse prevention
49
+ - Logging coverage for security events
50
+ - Secret management (hardcoded credentials, env leaks)
51
+
52
+ Output: `[DEFENSE-GAP]` findings with recommendation.
53
+
54
+ ## Output Format
55
+
56
+ For each finding:
57
+ ```
58
+ [CATEGORY] Severity: HIGH|MEDIUM|LOW
59
+ Location: file:line
60
+ Finding: Description
61
+ Attack: How an attacker would exploit this
62
+ Fix: Recommended remediation
63
+ ```
64
+
65
+ ## Depth Modes
66
+ - **quick**: Phase 1 + 2 only (trust boundaries + attack surface)
67
+ - **thorough**: All 4 phases with detailed exploitation scenarios
68
+
69
+ ## Integration
70
+ - Complements `dev-review` (best practices) with attacker perspective
71
+ - Works with `sec-codeql-expert` for pattern-based + logic-based coverage
72
+ - Can be chained: `dev-review` → `adversarial-review` for complete coverage
@@ -2,6 +2,7 @@
2
2
  name: deep-plan
3
3
  description: Research-validated planning — research → plan → verify cycle for high-confidence implementation plans
4
4
  scope: core
5
+ context: fork
5
6
  version: 1.0.0
6
7
  user-invocable: true
7
8
  argument-hint: "<topic-or-issue>"
@@ -102,6 +102,7 @@ oh-my-customcode로 구동됩니다.
102
102
  | `/omcustom:audit-agents` | 에이전트 의존성 감사 |
103
103
  | `/omcustom:fix-refs` | 깨진 참조 수정 |
104
104
  | `/omcustom:takeover` | 기존 에이전트/스킬에서 canonical spec 추출 |
105
+ | `/adversarial-review` | 공격자 관점 보안 코드 리뷰 |
105
106
  | `/dev-review` | 코드 베스트 프랙티스 리뷰 |
106
107
  | `/dev-refactor` | 코드 리팩토링 |
107
108
  | `/memory-save` | 세션 컨텍스트를 claude-mem에 저장 |
@@ -130,7 +131,7 @@ project/
130
131
  +-- CLAUDE.md # 진입점
131
132
  +-- .claude/
132
133
  | +-- agents/ # 서브에이전트 정의 (44 파일)
133
- | +-- skills/ # 스킬 (74 디렉토리)
134
+ | +-- skills/ # 스킬 (75 디렉토리)
134
135
  | +-- rules/ # 전역 규칙 (R000-R021)
135
136
  | +-- hooks/ # 훅 스크립트 (보안, 검증, HUD)
136
137
  | +-- contexts/ # 컨텍스트 파일 (ecomode)
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "0.40.0",
2
+ "version": "0.42.2",
3
3
  "lastUpdated": "2026-03-16T00:00:00.000Z",
4
4
  "components": [
5
5
  {
@@ -18,7 +18,7 @@
18
18
  "name": "skills",
19
19
  "path": ".claude/skills",
20
20
  "description": "Reusable skill modules (includes slash commands)",
21
- "files": 74
21
+ "files": 75
22
22
  },
23
23
  {
24
24
  "name": "guides",