oh-my-customcode 0.39.0 → 0.42.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,7 +13,7 @@
13
13
 
14
14
  **[한국어 문서 (Korean)](./README_ko.md)**
15
15
 
16
- 44 agents. 74 skills. 20 rules. One command.
16
+ 44 agents. 75 skills. 21 rules. One command.
17
17
 
18
18
  ```bash
19
19
  npm install -g oh-my-customcode && cd your-project && omcustom init
@@ -138,7 +138,7 @@ Each agent declares its tools, model, memory scope, and limitations in YAML fron
138
138
 
139
139
  ---
140
140
 
141
- ### Skills (74)
141
+ ### Skills (75)
142
142
 
143
143
  | Category | Count | Includes |
144
144
  |----------|-------|----------|
@@ -150,7 +150,7 @@ Each agent declares its tools, model, memory scope, and limitations in YAML fron
150
150
  | Memory | 3 | memory-save, memory-recall, memory-management |
151
151
  | Package | 3 | npm-publish, npm-version, npm-audit |
152
152
  | Optimization | 3 | optimize-analyze, optimize-bundle, optimize-report |
153
- | Security | 2 | cve-triage, jinja2-prompts |
153
+ | Security | 3 | adversarial-review, cve-triage, jinja2-prompts |
154
154
  | Other | 8 | codex-exec, vercel-deploy, skills-sh-search, result-aggregation, writing-clearly-and-concisely, and more |
155
155
 
156
156
  Skills use a 3-tier scope system: `core` (universal), `harness` (agent/skill maintenance), `package` (project-specific).
@@ -203,15 +203,15 @@ All commands are invoked inside the Claude Code conversation.
203
203
 
204
204
  ---
205
205
 
206
- ### Rules (20)
206
+ ### Rules (21)
207
207
 
208
208
  | Priority | Count | Purpose |
209
209
  |----------|-------|---------|
210
- | **MUST** | 13 | Safety, permissions, agent design, identification, orchestration, verification, completion |
210
+ | **MUST** | 14 | Safety, permissions, agent design, identification, orchestration, verification, completion, enforcement |
211
211
  | **SHOULD** | 6 | Interaction, error handling, memory, HUD, ecomode, ontology routing |
212
212
  | **MAY** | 1 | Optimization |
213
213
 
214
- Key rules: R010 (orchestrator never writes files), R009 (parallel execution mandatory), R017 (sauron verification before push), R020 (completion verification before declaring done).
214
+ Key rules: R010 (orchestrator never writes files), R009 (parallel execution mandatory), R017 (sauron verification before push), R020 (completion verification before declaring done), R021 (advisory-first enforcement model).
215
215
 
216
216
  ---
217
217
 
@@ -257,8 +257,8 @@ your-project/
257
257
  ├── CLAUDE.md # Entry point
258
258
  ├── .claude/
259
259
  │ ├── agents/ # 44 agent definitions
260
- │ ├── skills/ # 74 skill modules
261
- │ ├── rules/ # 20 governance rules (R000-R020)
260
+ │ ├── skills/ # 75 skill modules
261
+ │ ├── rules/ # 21 governance rules (R000-R021)
262
262
  │ ├── hooks/ # 15 lifecycle hook scripts
263
263
  │ ├── schemas/ # Tool input validation schemas
264
264
  │ ├── specs/ # Extracted canonical specs
package/dist/cli/index.js CHANGED
@@ -13443,6 +13443,158 @@ function getComponentPath(component) {
13443
13443
  return `.claude/${component}`;
13444
13444
  }
13445
13445
 
13446
+ // src/core/lockfile.ts
13447
+ init_fs();
13448
+ import { createHash } from "node:crypto";
13449
+ import { createReadStream } from "node:fs";
13450
+ import { readdir, stat } from "node:fs/promises";
13451
+ import { join as join5, relative as relative2 } from "node:path";
13452
+ var LOCKFILE_NAME = ".omcustom.lock.json";
13453
+ var LOCKFILE_VERSION = 1;
13454
+ var LOCKFILE_COMPONENTS = [
13455
+ "rules",
13456
+ "agents",
13457
+ "skills",
13458
+ "hooks",
13459
+ "contexts",
13460
+ "ontology",
13461
+ "guides"
13462
+ ];
13463
+ var COMPONENT_PATHS = LOCKFILE_COMPONENTS.map((component) => [getComponentPath(component), component]);
13464
+ function computeFileHash(filePath) {
13465
+ return new Promise((resolve2, reject) => {
13466
+ const hash = createHash("sha256");
13467
+ const stream = createReadStream(filePath);
13468
+ stream.on("error", (err) => {
13469
+ reject(err);
13470
+ });
13471
+ stream.on("data", (chunk) => {
13472
+ hash.update(chunk);
13473
+ });
13474
+ stream.on("end", () => {
13475
+ resolve2(hash.digest("hex"));
13476
+ });
13477
+ });
13478
+ }
13479
+ async function readLockfile(targetDir) {
13480
+ const lockfilePath = join5(targetDir, LOCKFILE_NAME);
13481
+ const exists2 = await fileExists(lockfilePath);
13482
+ if (!exists2) {
13483
+ debug("lockfile.not_found", { path: lockfilePath });
13484
+ return null;
13485
+ }
13486
+ try {
13487
+ const data = await readJsonFile(lockfilePath);
13488
+ if (typeof data !== "object" || data === null || data.lockfileVersion !== LOCKFILE_VERSION) {
13489
+ warn("lockfile.invalid_version", { path: lockfilePath });
13490
+ return null;
13491
+ }
13492
+ const record = data;
13493
+ if (typeof record.files !== "object" || record.files === null) {
13494
+ warn("lockfile.invalid_structure", { path: lockfilePath });
13495
+ return null;
13496
+ }
13497
+ return data;
13498
+ } catch (err) {
13499
+ warn("lockfile.read_failed", { path: lockfilePath, error: String(err) });
13500
+ return null;
13501
+ }
13502
+ }
13503
+ async function writeLockfile(targetDir, lockfile) {
13504
+ const lockfilePath = join5(targetDir, LOCKFILE_NAME);
13505
+ await writeJsonFile(lockfilePath, lockfile);
13506
+ debug("lockfile.written", { path: lockfilePath });
13507
+ }
13508
+ function resolveComponent(relativePath) {
13509
+ const normalized = relativePath.replace(/\\/g, "/");
13510
+ for (const [prefix, component] of COMPONENT_PATHS) {
13511
+ if (normalized === prefix || normalized.startsWith(`${prefix}/`)) {
13512
+ return component;
13513
+ }
13514
+ }
13515
+ return "unknown";
13516
+ }
13517
+ async function collectFiles(dir2, projectRoot, isTopLevel) {
13518
+ const results = [];
13519
+ let entries;
13520
+ try {
13521
+ entries = await readdir(dir2);
13522
+ } catch {
13523
+ return results;
13524
+ }
13525
+ for (const entry of entries) {
13526
+ if (isTopLevel && entry.startsWith(".") && entry !== ".claude") {
13527
+ continue;
13528
+ }
13529
+ const fullPath = join5(dir2, entry);
13530
+ let fileStat;
13531
+ try {
13532
+ fileStat = await stat(fullPath);
13533
+ } catch {
13534
+ continue;
13535
+ }
13536
+ if (fileStat.isDirectory()) {
13537
+ const subFiles = await collectFiles(fullPath, projectRoot, false);
13538
+ results.push(...subFiles);
13539
+ } else if (fileStat.isFile()) {
13540
+ results.push(fullPath);
13541
+ }
13542
+ }
13543
+ return results;
13544
+ }
13545
+ async function generateLockfile(targetDir, generatorVersion, templateVersion) {
13546
+ const files = {};
13547
+ const componentRoots = COMPONENT_PATHS.map(([prefix]) => join5(targetDir, prefix));
13548
+ for (const componentRoot of componentRoots) {
13549
+ const exists2 = await fileExists(componentRoot);
13550
+ if (!exists2) {
13551
+ debug("lockfile.component_dir_missing", { path: componentRoot });
13552
+ continue;
13553
+ }
13554
+ const allFiles = await collectFiles(componentRoot, targetDir, false);
13555
+ for (const absolutePath of allFiles) {
13556
+ const relativePath = relative2(targetDir, absolutePath).replace(/\\/g, "/");
13557
+ let hash;
13558
+ let size;
13559
+ try {
13560
+ hash = await computeFileHash(absolutePath);
13561
+ const fileStat = await stat(absolutePath);
13562
+ size = fileStat.size;
13563
+ } catch (err) {
13564
+ warn("lockfile.hash_failed", { path: absolutePath, error: String(err) });
13565
+ continue;
13566
+ }
13567
+ const component = resolveComponent(relativePath);
13568
+ files[relativePath] = {
13569
+ templateHash: hash,
13570
+ size,
13571
+ component
13572
+ };
13573
+ debug("lockfile.entry_added", { path: relativePath, component });
13574
+ }
13575
+ }
13576
+ return {
13577
+ lockfileVersion: LOCKFILE_VERSION,
13578
+ generatorVersion,
13579
+ generatedAt: new Date().toISOString(),
13580
+ templateVersion,
13581
+ files
13582
+ };
13583
+ }
13584
+ async function generateAndWriteLockfileForDir(targetDir) {
13585
+ try {
13586
+ const packageRoot = getPackageRoot();
13587
+ const manifest = await readJsonFile(join5(packageRoot, "templates", "manifest.json"));
13588
+ const { version: generatorVersion } = await readJsonFile(join5(packageRoot, "package.json"));
13589
+ const lockfile = await generateLockfile(targetDir, generatorVersion, manifest.version);
13590
+ await writeLockfile(targetDir, lockfile);
13591
+ return { fileCount: Object.keys(lockfile.files).length };
13592
+ } catch (err) {
13593
+ const msg = err instanceof Error ? err.message : String(err);
13594
+ return { fileCount: 0, warning: `Lockfile generation failed: ${msg}` };
13595
+ }
13596
+ }
13597
+
13446
13598
  // src/cli/doctor.ts
13447
13599
  async function pathExists(targetPath) {
13448
13600
  try {
@@ -13454,8 +13606,8 @@ async function pathExists(targetPath) {
13454
13606
  }
13455
13607
  async function isDirectory(targetPath) {
13456
13608
  try {
13457
- const stat = await fs.stat(targetPath);
13458
- return stat.isDirectory();
13609
+ const stat2 = await fs.stat(targetPath);
13610
+ return stat2.isDirectory();
13459
13611
  } catch {
13460
13612
  return false;
13461
13613
  }
@@ -13491,8 +13643,8 @@ async function collectSymlinksFromRefsDir(refsDir) {
13491
13643
  for (const entry of entries) {
13492
13644
  const entryPath = path.join(refsDir, entry.name);
13493
13645
  try {
13494
- const stat = await fs.lstat(entryPath);
13495
- if (stat.isSymbolicLink()) {
13646
+ const stat2 = await fs.lstat(entryPath);
13647
+ if (stat2.isSymbolicLink()) {
13496
13648
  symlinks.push(entryPath);
13497
13649
  }
13498
13650
  } catch {}
@@ -13890,6 +14042,45 @@ function readCurrentVersion() {
13890
14042
  return "0.0.0";
13891
14043
  }
13892
14044
  }
14045
+ async function checkLockfileDrift(targetDir) {
14046
+ const lockfile = await readLockfile(targetDir);
14047
+ if (!lockfile) {
14048
+ return null;
14049
+ }
14050
+ const modified = [];
14051
+ const removed = [];
14052
+ for (const [relativePath, entry] of Object.entries(lockfile.files)) {
14053
+ const absolutePath = path.join(targetDir, relativePath);
14054
+ try {
14055
+ const currentHash = await computeFileHash(absolutePath);
14056
+ if (currentHash !== entry.templateHash) {
14057
+ modified.push(relativePath);
14058
+ }
14059
+ } catch {
14060
+ removed.push(relativePath);
14061
+ }
14062
+ }
14063
+ const driftedFiles = [...modified, ...removed];
14064
+ if (driftedFiles.length === 0) {
14065
+ return {
14066
+ name: "Lockfile",
14067
+ status: "pass",
14068
+ message: `Lockfile OK — no drift detected (${Object.keys(lockfile.files).length} files tracked)`,
14069
+ fixable: false
14070
+ };
14071
+ }
14072
+ const details = [
14073
+ ...modified.map((f) => `modified: ${f}`),
14074
+ ...removed.map((f) => `removed: ${f}`)
14075
+ ];
14076
+ return {
14077
+ name: "Lockfile",
14078
+ status: "warn",
14079
+ message: `Lockfile drift detected: ${driftedFiles.length} file(s) changed since install`,
14080
+ fixable: false,
14081
+ details
14082
+ };
14083
+ }
13893
14084
  async function checkFrameworkDrift(targetDir, currentVersion) {
13894
14085
  const result = await checkFrameworkVersion(targetDir, currentVersion);
13895
14086
  if (!result)
@@ -13957,7 +14148,9 @@ async function runAllChecks(targetDir, layout, packageVersion, includeUpdates) {
13957
14148
  ]);
13958
14149
  const frameworkCheck = await checkFrameworkDrift(targetDir, packageVersion);
13959
14150
  const checksWithFramework = frameworkCheck ? [...baseChecks, frameworkCheck] : baseChecks;
13960
- return includeUpdates ? [...checksWithFramework, checkUpdateAvailable(packageVersion)] : checksWithFramework;
14151
+ const lockfileCheck = await checkLockfileDrift(targetDir);
14152
+ const checksWithLockfile = lockfileCheck ? [...checksWithFramework, lockfileCheck] : checksWithFramework;
14153
+ return includeUpdates ? [...checksWithLockfile, checkUpdateAvailable(packageVersion)] : checksWithLockfile;
13961
14154
  }
13962
14155
  async function doctorCommand(options = {}) {
13963
14156
  const targetDir = process.cwd();
@@ -14029,7 +14222,7 @@ import { basename as basename2, join as join7 } from "node:path";
14029
14222
 
14030
14223
  // src/core/file-preservation.ts
14031
14224
  init_fs();
14032
- import { basename, join as join5 } from "node:path";
14225
+ import { basename, join as join6 } from "node:path";
14033
14226
  var DEFAULT_CRITICAL_FILES = ["settings.json", "settings.local.json"];
14034
14227
  var DEFAULT_CRITICAL_DIRECTORIES = ["agent-memory", "agent-memory-local"];
14035
14228
  var PROTECTED_FRAMEWORK_FILES = ["CLAUDE.md", "AGENTS.md"];
@@ -14052,8 +14245,8 @@ function matchesGlobPattern(filePath, pattern) {
14052
14245
  return regex.test(filePath);
14053
14246
  }
14054
14247
  async function extractSingleFile(fileName, rootDir, tempDir, result) {
14055
- const srcPath = join5(rootDir, fileName);
14056
- const destPath = join5(tempDir, fileName);
14248
+ const srcPath = join6(rootDir, fileName);
14249
+ const destPath = join6(tempDir, fileName);
14057
14250
  try {
14058
14251
  if (await fileExists(srcPath)) {
14059
14252
  await copyFile(srcPath, destPath);
@@ -14067,8 +14260,8 @@ async function extractSingleFile(fileName, rootDir, tempDir, result) {
14067
14260
  }
14068
14261
  }
14069
14262
  async function extractSingleDir(dirName, rootDir, tempDir, result) {
14070
- const srcPath = join5(rootDir, dirName);
14071
- const destPath = join5(tempDir, dirName);
14263
+ const srcPath = join6(rootDir, dirName);
14264
+ const destPath = join6(tempDir, dirName);
14072
14265
  try {
14073
14266
  if (await fileExists(srcPath)) {
14074
14267
  await copyDirectory(srcPath, destPath, { overwrite: true, preserveTimestamps: true });
@@ -14105,8 +14298,8 @@ async function restoreCriticalFiles(rootDir, preservation) {
14105
14298
  failures: []
14106
14299
  };
14107
14300
  for (const fileName of preservation.extractedFiles) {
14108
- const preservedPath = join5(preservation.tempDir, fileName);
14109
- const targetPath = join5(rootDir, fileName);
14301
+ const preservedPath = join6(preservation.tempDir, fileName);
14302
+ const targetPath = join6(rootDir, fileName);
14110
14303
  try {
14111
14304
  if (fileName.endsWith(".json")) {
14112
14305
  await mergeJsonFile(preservedPath, targetPath);
@@ -14122,8 +14315,8 @@ async function restoreCriticalFiles(rootDir, preservation) {
14122
14315
  }
14123
14316
  }
14124
14317
  for (const dirName of preservation.extractedDirs) {
14125
- const preservedPath = join5(preservation.tempDir, dirName);
14126
- const targetPath = join5(rootDir, dirName);
14318
+ const preservedPath = join6(preservation.tempDir, dirName);
14319
+ const targetPath = join6(rootDir, dirName);
14127
14320
  try {
14128
14321
  await copyDirectory(preservedPath, targetPath, {
14129
14322
  overwrite: false,
@@ -14422,134 +14615,6 @@ function getDefaultWorkflow() {
14422
14615
  };
14423
14616
  }
14424
14617
 
14425
- // src/core/lockfile.ts
14426
- init_fs();
14427
- import { createHash } from "node:crypto";
14428
- import { createReadStream } from "node:fs";
14429
- import { readdir, stat } from "node:fs/promises";
14430
- import { join as join6, relative as relative2 } from "node:path";
14431
- var LOCKFILE_NAME = ".omcustom.lock.json";
14432
- var LOCKFILE_VERSION = 1;
14433
- var LOCKFILE_COMPONENTS = [
14434
- "rules",
14435
- "agents",
14436
- "skills",
14437
- "hooks",
14438
- "contexts",
14439
- "ontology",
14440
- "guides"
14441
- ];
14442
- var COMPONENT_PATHS = LOCKFILE_COMPONENTS.map((component) => [getComponentPath(component), component]);
14443
- function computeFileHash(filePath) {
14444
- return new Promise((resolve2, reject) => {
14445
- const hash = createHash("sha256");
14446
- const stream = createReadStream(filePath);
14447
- stream.on("error", (err) => {
14448
- reject(err);
14449
- });
14450
- stream.on("data", (chunk) => {
14451
- hash.update(chunk);
14452
- });
14453
- stream.on("end", () => {
14454
- resolve2(hash.digest("hex"));
14455
- });
14456
- });
14457
- }
14458
- async function writeLockfile(targetDir, lockfile) {
14459
- const lockfilePath = join6(targetDir, LOCKFILE_NAME);
14460
- await writeJsonFile(lockfilePath, lockfile);
14461
- debug("lockfile.written", { path: lockfilePath });
14462
- }
14463
- function resolveComponent(relativePath) {
14464
- const normalized = relativePath.replace(/\\/g, "/");
14465
- for (const [prefix, component] of COMPONENT_PATHS) {
14466
- if (normalized === prefix || normalized.startsWith(`${prefix}/`)) {
14467
- return component;
14468
- }
14469
- }
14470
- return "unknown";
14471
- }
14472
- async function collectFiles(dir2, projectRoot, isTopLevel) {
14473
- const results = [];
14474
- let entries;
14475
- try {
14476
- entries = await readdir(dir2);
14477
- } catch {
14478
- return results;
14479
- }
14480
- for (const entry of entries) {
14481
- if (isTopLevel && entry.startsWith(".") && entry !== ".claude") {
14482
- continue;
14483
- }
14484
- const fullPath = join6(dir2, entry);
14485
- let fileStat;
14486
- try {
14487
- fileStat = await stat(fullPath);
14488
- } catch {
14489
- continue;
14490
- }
14491
- if (fileStat.isDirectory()) {
14492
- const subFiles = await collectFiles(fullPath, projectRoot, false);
14493
- results.push(...subFiles);
14494
- } else if (fileStat.isFile()) {
14495
- results.push(fullPath);
14496
- }
14497
- }
14498
- return results;
14499
- }
14500
- async function generateLockfile(targetDir, generatorVersion, templateVersion) {
14501
- const files = {};
14502
- const componentRoots = COMPONENT_PATHS.map(([prefix]) => join6(targetDir, prefix));
14503
- for (const componentRoot of componentRoots) {
14504
- const exists2 = await fileExists(componentRoot);
14505
- if (!exists2) {
14506
- debug("lockfile.component_dir_missing", { path: componentRoot });
14507
- continue;
14508
- }
14509
- const allFiles = await collectFiles(componentRoot, targetDir, false);
14510
- for (const absolutePath of allFiles) {
14511
- const relativePath = relative2(targetDir, absolutePath).replace(/\\/g, "/");
14512
- let hash;
14513
- let size;
14514
- try {
14515
- hash = await computeFileHash(absolutePath);
14516
- const fileStat = await stat(absolutePath);
14517
- size = fileStat.size;
14518
- } catch (err) {
14519
- warn("lockfile.hash_failed", { path: absolutePath, error: String(err) });
14520
- continue;
14521
- }
14522
- const component = resolveComponent(relativePath);
14523
- files[relativePath] = {
14524
- templateHash: hash,
14525
- size,
14526
- component
14527
- };
14528
- debug("lockfile.entry_added", { path: relativePath, component });
14529
- }
14530
- }
14531
- return {
14532
- lockfileVersion: LOCKFILE_VERSION,
14533
- generatorVersion,
14534
- generatedAt: new Date().toISOString(),
14535
- templateVersion,
14536
- files
14537
- };
14538
- }
14539
- async function generateAndWriteLockfileForDir(targetDir) {
14540
- try {
14541
- const packageRoot = getPackageRoot();
14542
- const manifest = await readJsonFile(join6(packageRoot, "templates", "manifest.json"));
14543
- const { version: generatorVersion } = await readJsonFile(join6(packageRoot, "package.json"));
14544
- const lockfile = await generateLockfile(targetDir, generatorVersion, manifest.version);
14545
- await writeLockfile(targetDir, lockfile);
14546
- return { fileCount: Object.keys(lockfile.files).length };
14547
- } catch (err) {
14548
- const msg = err instanceof Error ? err.message : String(err);
14549
- return { fileCount: 0, warning: `Lockfile generation failed: ${msg}` };
14550
- }
14551
- }
14552
-
14553
14618
  // src/core/scope-filter.ts
14554
14619
  function getSkillScope(content) {
14555
14620
  const cleaned = content.replace(/^\uFEFF/, "");
@@ -14936,12 +15001,21 @@ async function generateMCPConfig(targetDir) {
14936
15001
  if (!ontologyExists) {
14937
15002
  return;
14938
15003
  }
15004
+ try {
15005
+ execSync3("uv --version", { stdio: "pipe" });
15006
+ } catch {
15007
+ warn("uv (Python package manager) not found. Install it with: curl -LsSf https://astral.sh/uv/install.sh | sh");
15008
+ warn("Skipping ontology-rag MCP configuration. You can set it up manually later.");
15009
+ return;
15010
+ }
14939
15011
  try {
14940
15012
  execSync3("uv venv .venv", { cwd: targetDir, stdio: "pipe" });
14941
15013
  execSync3('uv pip install "ontology-rag @ git+https://github.com/baekenough/oh-my-customcode.git#subdirectory=packages/ontology-rag"', { cwd: targetDir, stdio: "pipe" });
14942
15014
  } catch (error2) {
14943
15015
  const msg = error2 instanceof Error ? error2.message : String(error2);
14944
- throw new Error(`Failed to setup Python environment: ${msg}`);
15016
+ warn(`Failed to setup ontology-rag: ${msg}`);
15017
+ warn("You can configure the MCP server manually. See: https://github.com/baekenough/oh-my-customcode/tree/develop/packages/ontology-rag");
15018
+ return;
14945
15019
  }
14946
15020
  const config = {
14947
15021
  mcpServers: {
@@ -14975,6 +15049,7 @@ async function generateMCPConfig(targetDir) {
14975
15049
  await writeFile(mcpConfigPath, `${JSON.stringify(config, null, 2)}
14976
15050
  `);
14977
15051
  }
15052
+ info("ontology-rag MCP server configured successfully");
14978
15053
  }
14979
15054
  async function checkUvAvailable() {
14980
15055
  try {
@@ -16078,6 +16153,15 @@ async function initCommand(options) {
16078
16153
  logInstallResultInfo(installResult);
16079
16154
  logSuccessDetails(installedPaths, installResult.skippedComponents);
16080
16155
  await setupMcpConfig(targetDir);
16156
+ console.log("");
16157
+ console.log("Required plugins (install manually):");
16158
+ console.log(" /plugin marketplace add obra/superpowers-marketplace");
16159
+ console.log(" /plugin install superpowers");
16160
+ console.log(" /plugin install superpowers-developing-for-claude-code");
16161
+ console.log(" /plugin install elements-of-style");
16162
+ console.log(" /plugin install context7");
16163
+ console.log("");
16164
+ console.log('See CLAUDE.md "외부 의존성" section for details.');
16081
16165
  return {
16082
16166
  success: true,
16083
16167
  message: i18n.t("cli.init.success"),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "oh-my-customcode",
3
3
  "workspaces": ["packages/*"],
4
- "version": "0.39.0",
4
+ "version": "0.42.1",
5
5
  "description": "Batteries-included agent harness for Claude Code",
6
6
  "type": "module",
7
7
  "bin": {
@@ -7,6 +7,7 @@ memory: project
7
7
  isolation: sandbox
8
8
  skills:
9
9
  - cve-triage
10
+ - adversarial-review
10
11
  tools:
11
12
  - Read
12
13
  - Write
@@ -0,0 +1,30 @@
1
+ {
2
+ "requiredPlugins": [
3
+ {
4
+ "name": "superpowers",
5
+ "source": "claude-plugins-official",
6
+ "description": "TDD, debugging, collaboration patterns"
7
+ },
8
+ {
9
+ "name": "superpowers-developing-for-claude-code",
10
+ "source": "superpowers-marketplace",
11
+ "description": "Claude Code development documentation"
12
+ },
13
+ {
14
+ "name": "elements-of-style",
15
+ "source": "superpowers-marketplace",
16
+ "description": "Writing clarity guidelines"
17
+ },
18
+ {
19
+ "name": "context7",
20
+ "source": "claude-plugins-official",
21
+ "description": "Library documentation lookup"
22
+ }
23
+ ],
24
+ "marketplaces": [
25
+ {
26
+ "name": "superpowers-marketplace",
27
+ "url": "obra/superpowers-marketplace"
28
+ }
29
+ ]
30
+ }
@@ -6,6 +6,7 @@
6
6
  # Always exits 0 (advisory only)
7
7
 
8
8
  set -euo pipefail
9
+ HOOK_START=$(date +%s%N 2>/dev/null || echo 0)
9
10
 
10
11
  # Dependency check: exit silently if jq not available
11
12
  command -v jq >/dev/null 2>&1 || exit 0
@@ -55,4 +56,9 @@ fi
55
56
 
56
57
  # Pass through
57
58
  echo "$input"
59
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
60
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
61
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
62
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
63
+ fi
58
64
  exit 0
@@ -1,5 +1,6 @@
1
1
  #!/bin/bash
2
2
  set -euo pipefail
3
+ HOOK_START=$(date +%s%N 2>/dev/null || echo 0)
3
4
 
4
5
  # Dependency check: exit silently if jq not available
5
6
  command -v jq >/dev/null 2>&1 || exit 0
@@ -86,4 +87,9 @@ fi
86
87
 
87
88
  # Pass through
88
89
  echo "$input"
90
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
91
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
92
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
93
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
94
+ fi
89
95
  exit 0
@@ -185,6 +185,10 @@ case "$DRIFT_STATUS" in
185
185
  echo " Skipped (not a git repository)" >&2
186
186
  ;;
187
187
  esac
188
+ echo "" >&2
189
+ echo " [Lockfile Drift]" >&2
190
+ echo " Note: file-level lockfile drift (template hash changes) is checked via 'omcustom doctor'" >&2
191
+ echo " Run 'omcustom doctor' to detect modified/removed template files since install." >&2
188
192
  echo "------------------------------------" >&2
189
193
 
190
194
  # SessionEnd hooks timeout (v2.1.74+)
@@ -1,5 +1,6 @@
1
1
  #!/bin/bash
2
2
  set -euo pipefail
3
+ HOOK_START=$(date +%s%N 2>/dev/null || echo 0)
3
4
 
4
5
  # Dependency check: exit silently if jq not available
5
6
  command -v jq >/dev/null 2>&1 || exit 0
@@ -180,9 +181,19 @@ if [ "$hard_block" = true ]; then
180
181
  echo " Recovery: Step back, re-read the error, and try a fundamentally different approach." >&2
181
182
  echo "=====================================" >&2
182
183
  echo "$input"
184
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
185
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
186
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
187
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
188
+ fi
183
189
  exit 1
184
190
  fi
185
191
 
186
192
  # Pass through
187
193
  echo "$input"
194
+ HOOK_END=$(date +%s%N 2>/dev/null || echo 0)
195
+ if [ "$HOOK_START" != "0" ] && [ "$HOOK_END" != "0" ]; then
196
+ HOOK_MS=$(( (HOOK_END - HOOK_START) / 1000000 ))
197
+ echo "[Hook Perf] $(basename "$0"): ${HOOK_MS}ms" >> "/tmp/.claude-hook-perf-${PPID}.log"
198
+ fi
188
199
  exit 0
@@ -85,6 +85,7 @@ skills:
85
85
  description: "Audit agent dependencies and references"
86
86
  user_invocable: true
87
87
  model_invocable: true
88
+ scope: harness
88
89
  summary: "Audit agent dependencies to ensure all skill and guide references are valid"
89
90
  keywords: [audit, dependencies, validation, references]
90
91
  rule_references: []
@@ -103,6 +104,7 @@ skills:
103
104
  description: "Fetch and verify Claude Code official documentation. Use when checking official spec compliance or updating local reference docs."
104
105
  user_invocable: true
105
106
  model_invocable: false
107
+ scope: harness
106
108
  summary: "Maintain up-to-date local copies of Claude Code official documentation"
107
109
  keywords: [claude-code, documentation, spec, compliance, verification]
108
110
  rule_references: []
@@ -112,6 +114,7 @@ skills:
112
114
  description: "Create a new agent with complete structure"
113
115
  user_invocable: true
114
116
  model_invocable: false
117
+ scope: harness
115
118
  summary: "Create a new agent with complete directory structure and validation"
116
119
  keywords: [create, agent, structure, validation]
117
120
  rule_references: []
@@ -203,6 +206,7 @@ skills:
203
206
  description: "Fix broken agent references and symlinks"
204
207
  user_invocable: true
205
208
  model_invocable: false
209
+ scope: harness
206
210
  summary: "Fix broken references, missing symlinks, and agent dependency issues"
207
211
  keywords: [fix, references, symlinks, dependencies]
208
212
  rule_references: []
@@ -230,6 +234,7 @@ skills:
230
234
  description: "Show help information for commands and system"
231
235
  user_invocable: true
232
236
  model_invocable: true
237
+ scope: harness
233
238
  summary: "Show help information for commands, agents, and system rules"
234
239
  keywords: [help, documentation, commands, agents, rules]
235
240
  rule_references: []
@@ -266,6 +271,7 @@ skills:
266
271
  description: "Show all available commands"
267
272
  user_invocable: true
268
273
  model_invocable: true
274
+ scope: harness
269
275
  summary: "Show all available commands with optional filtering and detailed information"
270
276
  keywords: [lists, commands, categories, system]
271
277
  rule_references: []
@@ -302,6 +308,7 @@ skills:
302
308
  description: "Enable/disable OpenTelemetry console monitoring for Claude Code usage tracking"
303
309
  user_invocable: true
304
310
  model_invocable: true
311
+ scope: package
305
312
  summary: "Enable or disable OpenTelemetry console monitoring for usage metrics"
306
313
  keywords: [monitoring, telemetry, otel, metrics, usage]
307
314
  rule_references: []
@@ -311,6 +318,7 @@ skills:
311
318
  description: "Audit npm dependencies for security and updates"
312
319
  user_invocable: true
313
320
  model_invocable: true
321
+ scope: package
314
322
  summary: "Audit npm dependencies for security vulnerabilities and outdated packages"
315
323
  keywords: [npm, audit, security, dependencies, vulnerabilities]
316
324
  rule_references: []
@@ -320,6 +328,7 @@ skills:
320
328
  description: "Publish package to npm registry with pre-checks"
321
329
  user_invocable: true
322
330
  model_invocable: false
331
+ scope: package
323
332
  summary: "Publish package to npm registry with comprehensive pre-publish checks"
324
333
  keywords: [npm, publish, package, registry, validation]
325
334
  rule_references: []
@@ -329,6 +338,7 @@ skills:
329
338
  description: "Manage semantic versions for npm packages"
330
339
  user_invocable: true
331
340
  model_invocable: false
341
+ scope: package
332
342
  summary: "Manage semantic versions for npm packages with changelog and git integration"
333
343
  keywords: [npm, version, semantic, changelog, git]
334
344
  rule_references: []
@@ -447,6 +457,7 @@ skills:
447
457
  description: "Full R017 verification (5+3 rounds) before commit"
448
458
  user_invocable: true
449
459
  model_invocable: false
460
+ scope: harness
450
461
  summary: "Execute full R017 verification with 5 rounds of manager verification and 3 rounds of deep review"
451
462
  keywords: [verification, r017, sync, validation, compliance]
452
463
  rule_references: [R017]
@@ -493,6 +504,7 @@ skills:
493
504
  description: "Show system status and health checks"
494
505
  user_invocable: true
495
506
  model_invocable: true
507
+ scope: harness
496
508
  summary: "Show comprehensive system status including agents, skills, guides, and health checks"
497
509
  keywords: [status, health, system, agents, skills]
498
510
  rule_references: []
@@ -520,6 +532,7 @@ skills:
520
532
  description: "Sync documentation with project structure"
521
533
  user_invocable: true
522
534
  model_invocable: false
535
+ scope: harness
523
536
  summary: "Ensure documentation accurately reflects current project state and agents work together"
524
537
  keywords: [update, documentation, sync, validation, consistency]
525
538
  rule_references: []
@@ -529,6 +542,7 @@ skills:
529
542
  description: "Update agents from external sources (GitHub, docs, etc.)"
530
543
  user_invocable: true
531
544
  model_invocable: false
545
+ scope: harness
532
546
  summary: "Update agents, skills, and guides from external sources to latest versions"
533
547
  keywords: [update, external, github, sources, versioning]
534
548
  rule_references: []
@@ -75,6 +75,19 @@ All members must be spawned in a single message. Partial spawning needs correcti
75
75
  ╚══════════════════════════════════════════════════════════════════╝
76
76
  ```
77
77
 
78
+ ### External Skill Conflict Resolution
79
+
80
+ When an external skill instructs using Agent tool but R018 criteria are met:
81
+
82
+ | Skill says | R018 requires | Resolution |
83
+ |------------|--------------|------------|
84
+ | "Use Agent tool for N tasks" | 3+ agents → Teams | Use Agent Teams, follow skill logic |
85
+ | "Sequential agent spawning" | Independent tasks → parallel | Parallelize per R009 |
86
+ | "Skip coordination" | Shared state → Teams | Use Teams for coordination |
87
+
88
+ **Rule**: External skills define the WORKFLOW. R018 defines the EXECUTION METHOD.
89
+ The skill's steps are followed, but agent spawning uses Teams when criteria are met.
90
+
78
91
  ## Common Violations
79
92
 
80
93
  ```
@@ -23,3 +23,19 @@ Update the relevant rule rather than just acknowledging the violation.
23
23
  | User points out violation | Update rule → Continue |
24
24
  | Self-detected violation | Fix immediately, consider rule update |
25
25
  | Ambiguous situation | Ask user, then update if needed |
26
+
27
+ ## Anti-Patterns
28
+
29
+ | Anti-Pattern | Why It's Wrong | Correct Action |
30
+ |-------------|----------------|----------------|
31
+ | "I'll update the rule later" | Deferred fixes are forgotten | Update rule NOW, before continuing |
32
+ | "This is a one-time exception" | Exceptions become patterns | If the rule is wrong, fix it; if it's right, follow it |
33
+ | "The rule doesn't cover this case" | Missing coverage = rule gap | Add the case to the rule immediately |
34
+ | "Let me finish the task first" | Rule violations compound | Fix rule first (5 min), then continue (prevents N future violations) |
35
+
36
+ ## Timing
37
+
38
+ Rule updates MUST happen:
39
+ - **Before** continuing the original task
40
+ - **In the same session** as the violation
41
+ - **Not** as a separate TODO or follow-up issue
@@ -0,0 +1,42 @@
1
+ # [MUST] Enforcement Policy
2
+
3
+ > **Priority**: MUST | **ID**: R021
4
+
5
+ ## Core Policy
6
+
7
+ oh-my-customcode uses an **advisory-first enforcement model**. Most rules are enforced through prompt engineering (CLAUDE.md, rules/, PostCompact hook) rather than hard-blocking hooks. This is intentional — it preserves agent flexibility while maintaining behavioral standards.
8
+
9
+ ## Enforcement Tiers
10
+
11
+ | Tier | Mechanism | Rules | Behavior |
12
+ |------|-----------|-------|----------|
13
+ | Hard Block | PreToolUse hook, exit 1 | stage-blocker, dev-server tmux | Prevents tool execution |
14
+ | Soft Block | Stop hook prompt | R011 session-end saves | Auto-performs then approves |
15
+ | Advisory | PostToolUse hooks | R007, R008, R009, R010, R018 | Warns via stderr, never blocks |
16
+ | Prompt-based | CLAUDE.md + rules/ + PostCompact | All MUST rules | Behavioral guidance in context |
17
+
18
+ ## Why Advisory-First
19
+
20
+ 1. **Agent flexibility**: Hard blocks can trap agents in unrecoverable states
21
+ 2. **Graceful degradation**: Missing dependencies (jq, etc.) don't break the session
22
+ 3. **Composability**: External skills and internal rules can coexist without deadlocks
23
+ 4. **PostCompact reinforcement**: R007/R008/R009/R010/R018 are re-injected after context compaction
24
+
25
+ ## Hard Enforcement Candidates (Future)
26
+
27
+ If advisory enforcement proves insufficient for specific rules, these are candidates for promotion to hard-block:
28
+
29
+ | Rule | Candidate Hook | Condition for Promotion |
30
+ |------|---------------|------------------------|
31
+ | R010 | git-delegation-guard.sh | If orchestrator-direct-write violations exceed 3/session |
32
+ | R007/R008 | (new hook) | If identification omission rate exceeds 20% |
33
+
34
+ Promotion requires: (1) measured violation rate data, (2) user approval, (3) rollback plan.
35
+
36
+ ## Integration
37
+
38
+ | Rule | Interaction |
39
+ |------|-------------|
40
+ | R010 | git-delegation-guard.sh is advisory; could promote to blocking |
41
+ | R016 | Violations trigger rule updates, not enforcement changes |
42
+ | PostCompact | Re-injects critical rules to combat context compaction amnesia |
@@ -207,17 +207,24 @@ All git operations (commit, push, branch, PR) MUST go through `mgr-gitnerd`. Int
207
207
 
208
208
  ## External Skills vs Internal Rules
209
209
 
210
- ```
211
- Internal rules always take precedence over external skills.
210
+ Internal rules ALWAYS take precedence over external skills.
211
+
212
+ This applies to ALL rule domains, not just git operations:
212
213
 
213
- Translation:
214
- External skill says → Internal rule requires
215
- ─────────────────────────────────────────────────────
216
- "git commit -m ..." Agent(mgr-gitnerd) commit
217
- "git push ..." Agent(mgr-gitnerd) push
218
- "gh pr create ..." Agent(mgr-gitnerd) create PR
219
- "git merge ..." Agent(mgr-gitnerd) merge
214
+ | External skill says | Internal rule requires |
215
+ |---------------------|----------------------|
216
+ | "git commit -m ..." | Agent(mgr-gitnerd) commit (R010) |
217
+ | "run 3 agents sequentially" | Parallel execution if independent (R009) |
218
+ | "use Agent tool for 5 research tasks" | Agent Teams when criteria met (R018) |
219
+ | "skip code review" | Follow project review workflow |
220
+ | "write files directly" | Delegate to specialist subagent (R010) |
220
221
 
222
+ When a skill's workflow conflicts with R009/R010/R018:
223
+ 1. Follow the skill's LOGIC and STEPS
224
+ 2. Replace the EXECUTION method with rule-compliant alternatives
225
+ 3. The skill defines WHAT to do; rules define HOW to execute
226
+
227
+ ```
221
228
  Incorrect:
222
229
  [Using external skill]
223
230
  Main conversation → directly runs "git push"
@@ -0,0 +1,72 @@
1
+ ---
2
+ name: adversarial-review
3
+ description: Adversarial code review using attacker mindset — trust boundary, attack surface, business logic, and defense evaluation
4
+ scope: core
5
+ argument-hint: "<file-or-directory> [--depth quick|thorough]"
6
+ user-invocable: true
7
+ ---
8
+
9
+ # Adversarial Code Review
10
+
11
+ Review code from an attacker's perspective using STRIDE + OWASP frameworks.
12
+
13
+ ## 4-Phase Review Process
14
+
15
+ ### Phase 1: Trust Boundary Analysis
16
+ Identify where trust transitions occur:
17
+ - External input reaching internal logic without validation → **Tampering**
18
+ - Implicit trust between services → **Elevation of Privilege**
19
+ - Shared storage without isolation → **Information Disclosure**
20
+ - Authentication boundaries not clearly marked → **Spoofing**
21
+
22
+ Output: `[TRUST-BOUNDARY]` findings with location, threat type, and current validation level.
23
+
24
+ ### Phase 2: Attack Surface Mapping
25
+ Map all entry points and exposure:
26
+ - Public API endpoints and auth requirements
27
+ - File upload/download paths → Path traversal risk
28
+ - External system calls (URLs, queries) → SSRF/Injection
29
+ - Event handlers and callbacks → Race conditions
30
+ - Error message verbosity → Information Disclosure
31
+
32
+ Output: `[ATTACK-SURFACE]` table with endpoint, exposure level, and mitigation status.
33
+
34
+ ### Phase 3: Business Logic Review
35
+ Analyze logic flaws that static analysis misses:
36
+ - State machine violations (skip steps, replay)
37
+ - Authorization != authentication (authn ok but authz missing)
38
+ - Race conditions in multi-step operations
39
+ - Numeric overflow/underflow in financial calculations
40
+ - Default-allow vs default-deny patterns
41
+
42
+ Output: `[LOGIC-FLAW]` findings with exploitation scenario and impact.
43
+
44
+ ### Phase 4: Defense Evaluation
45
+ Assess existing defense mechanisms:
46
+ - Input validation completeness (allowlist vs blocklist)
47
+ - Output encoding consistency
48
+ - Rate limiting and abuse prevention
49
+ - Logging coverage for security events
50
+ - Secret management (hardcoded credentials, env leaks)
51
+
52
+ Output: `[DEFENSE-GAP]` findings with recommendation.
53
+
54
+ ## Output Format
55
+
56
+ For each finding:
57
+ ```
58
+ [CATEGORY] Severity: HIGH|MEDIUM|LOW
59
+ Location: file:line
60
+ Finding: Description
61
+ Attack: How an attacker would exploit this
62
+ Fix: Recommended remediation
63
+ ```
64
+
65
+ ## Depth Modes
66
+ - **quick**: Phase 1 + 2 only (trust boundaries + attack surface)
67
+ - **thorough**: All 4 phases with detailed exploitation scenarios
68
+
69
+ ## Integration
70
+ - Complements `dev-review` (best practices) with attacker perspective
71
+ - Works with `sec-codeql-expert` for pattern-based + logic-based coverage
72
+ - Can be chained: `dev-review` → `adversarial-review` for complete coverage
@@ -0,0 +1,272 @@
1
+ # AI 에이전트 시스템
2
+
3
+ oh-my-customcode로 구동됩니다.
4
+
5
+ ---
6
+ ## 모든 응답 전 반드시 확인
7
+
8
+ 1. 에이전트 식별로 시작하는가? (R007) 2. 도구 호출에 식별 포함? (R008) 3. 2+ 에이전트 스폰 시 R018 체크? → 하나라도 NO면 즉시 수정
9
+
10
+ ---
11
+
12
+ ## 중요: 규칙 적용 범위
13
+
14
+ > **이 규칙들은 상황에 관계없이 항상 적용됩니다:**
15
+
16
+ | 상황 | 규칙 적용? |
17
+ |------|-----------|
18
+ | 이 프로젝트 작업 시 | **예** |
19
+ | 외부 프로젝트 작업 시 | **예** |
20
+ | 컨텍스트 압축 후 | **예** |
21
+ | 간단한 질문 | **예** |
22
+ | 모든 상황 | **예** |
23
+
24
+ ---
25
+
26
+ ## 중요: 세션 연속성
27
+
28
+ > **이 규칙들은 컨텍스트 압축 후에도 항상 적용됩니다.**
29
+
30
+ ```
31
+ "compact conversation" 후 세션이 계속될 때:
32
+ 1. 이 CLAUDE.md를 즉시 다시 읽기
33
+ 2. 모든 강제 규칙 활성 상태 유지
34
+ 3. 이전 컨텍스트 요약이 이 규칙을 대체하지 않음
35
+ 4. 첫 응답은 반드시 에이전트 식별 포함
36
+
37
+ 예외 없음. 변명 없음.
38
+ ```
39
+
40
+ ---
41
+
42
+ ## 중요: 강제 규칙
43
+
44
+ > **이 규칙들은 협상 불가. 위반 = 즉시 수정 필요.**
45
+
46
+ | 규칙 | 핵심 | 위반 시 |
47
+ |------|------|--------|
48
+ | R007 에이전트 식별 | 모든 응답은 `┌─ Agent:` 헤더로 시작 | 즉시 헤더 추가 |
49
+ | R008 도구 식별 | 모든 도구 호출에 `[에이전트명][모델] → Tool:` 접두사 | 즉시 접두사 추가 |
50
+ | R009 병렬 실행 | 독립 작업 2개 이상 → 병렬 에이전트 (최대 4개) | 순차 실행 중단, 병렬로 전환 |
51
+ | R010 오케스트레이터 | 오케스트레이터는 파일 수정 금지 → 서브에이전트에 위임 | 직접 수정 중단, 위임 |
52
+
53
+ ---
54
+
55
+ ## 전역 규칙 (필수 준수)
56
+
57
+ > `.claude/rules/` 참조
58
+
59
+ ### MUST (절대 위반 금지)
60
+ | ID | 규칙 | 설명 |
61
+ |----|------|------|
62
+ | R000 | 언어 정책 | 한국어 입출력, 영어 파일, 위임 모델 |
63
+ | R001 | 안전 규칙 | 금지된 작업, 필수 확인 |
64
+ | R002 | 권한 규칙 | 도구 티어, 파일 접근 범위 |
65
+ | R006 | 에이전트 설계 | 에이전트 구조, 관심사 분리 |
66
+ | R007 | 에이전트 식별 | **강제** - 모든 응답에 에이전트/스킬 표시 |
67
+ | R008 | 도구 식별 | **강제** - 모든 도구 사용 시 에이전트 표시 |
68
+ | R009 | 병렬 실행 | **강제** - 병렬 실행, 대규모 작업 분해 |
69
+ | R010 | 오케스트레이터 조율 | **강제** - 오케스트레이터 조율, 세션 연속성, 직접 실행 금지 |
70
+ | R015 | 의도 투명성 | **강제** - 투명한 에이전트 라우팅 |
71
+ | R016 | 지속적 개선 | **강제** - 위반 발생 시 규칙 업데이트 |
72
+ | R017 | 동기화 검증 | **강제** - 구조 변경 전 검증 |
73
+ | R018 | Agent Teams | **강제(조건부)** - Agent Teams 활성화 시 적합한 작업에 필수 사용 |
74
+ | R020 | 완료 검증 | **강제** - 작업 완료 선언 전 검증 필수 |
75
+ | R021 | Enforcement Policy | **강제** - Advisory-first enforcement model |
76
+
77
+ ### SHOULD (강력 권장)
78
+ | ID | 규칙 | 설명 |
79
+ |----|------|------|
80
+ | R003 | 상호작용 규칙 | 응답 원칙, 상태 형식 |
81
+ | R004 | 오류 처리 | 오류 수준, 복구 전략 |
82
+ | R011 | 메모리 통합 | claude-mem을 통한 세션 지속성 |
83
+ | R012 | HUD 상태줄 | 실시간 상태 표시 |
84
+ | R013 | Ecomode | 배치 작업 토큰 효율성 |
85
+ | R019 | Ontology-RAG 라우팅 | 라우팅 스킬의 ontology-RAG enrichment |
86
+
87
+ ### MAY (선택)
88
+ | ID | 규칙 | 설명 |
89
+ |----|------|------|
90
+ | R005 | 최적화 | 효율성, 토큰 최적화 |
91
+
92
+ ## 커맨드
93
+
94
+ ### 슬래시 커맨드 (스킬 기반)
95
+
96
+ | 커맨드 | 설명 |
97
+ |--------|------|
98
+ | `/omcustom:analysis` | 프로젝트 분석 및 자동 커스터마이징 |
99
+ | `/omcustom:create-agent` | 새 에이전트 생성 |
100
+ | `/omcustom:update-docs` | 프로젝트 구조와 문서 동기화 |
101
+ | `/omcustom:update-external` | 외부 소스에서 에이전트 업데이트 |
102
+ | `/omcustom:audit-agents` | 에이전트 의존성 감사 |
103
+ | `/omcustom:fix-refs` | 깨진 참조 수정 |
104
+ | `/omcustom:takeover` | 기존 에이전트/스킬에서 canonical spec 추출 |
105
+ | `/adversarial-review` | 공격자 관점 보안 코드 리뷰 |
106
+ | `/dev-review` | 코드 베스트 프랙티스 리뷰 |
107
+ | `/dev-refactor` | 코드 리팩토링 |
108
+ | `/memory-save` | 세션 컨텍스트를 claude-mem에 저장 |
109
+ | `/memory-recall` | 메모리 검색 및 리콜 |
110
+ | `/omcustom:monitoring-setup` | OTel 콘솔 모니터링 활성화/비활성화 |
111
+ | `/omcustom:npm-publish` | npm 레지스트리에 패키지 배포 |
112
+ | `/omcustom:npm-version` | 시맨틱 버전 관리 |
113
+ | `/omcustom:npm-audit` | 의존성 감사 |
114
+ | `/omcustom:release-notes` | 릴리즈 노트 생성 (git 히스토리 기반) |
115
+ | `/codex-exec` | Codex CLI 프롬프트 실행 |
116
+ | `/optimize-analyze` | 번들 및 성능 분석 |
117
+ | `/optimize-bundle` | 번들 크기 최적화 |
118
+ | `/optimize-report` | 최적화 리포트 생성 |
119
+ | `/research` | 10-team 병렬 딥 분석 및 교차 검증 |
120
+ | `/deep-plan` | 연구 검증 기반 계획 수립 (research → plan → verify) |
121
+ | `/omcustom:sauron-watch` | 전체 R017 검증 |
122
+ | `/structured-dev-cycle` | 6단계 구조적 개발 사이클 (Plan → Verify → Implement → Verify → Compound → Done) |
123
+ | `/omcustom:lists` | 모든 사용 가능한 커맨드 표시 |
124
+ | `/omcustom:status` | 시스템 상태 표시 |
125
+ | `/omcustom:help` | 도움말 표시 |
126
+
127
+ ## 프로젝트 구조
128
+
129
+ ```
130
+ project/
131
+ +-- CLAUDE.md # 진입점
132
+ +-- .claude/
133
+ | +-- agents/ # 서브에이전트 정의 (44 파일)
134
+ | +-- skills/ # 스킬 (74 디렉토리)
135
+ | +-- rules/ # 전역 규칙 (R000-R021)
136
+ | +-- hooks/ # 훅 스크립트 (보안, 검증, HUD)
137
+ | +-- contexts/ # 컨텍스트 파일 (ecomode)
138
+ +-- guides/ # 레퍼런스 문서 (25 토픽)
139
+ ```
140
+
141
+ ## 오케스트레이션
142
+
143
+ 오케스트레이션은 메인 대화의 라우팅 스킬로 처리됩니다:
144
+ - **secretary-routing**: 매니저 에이전트로 관리 작업 라우팅
145
+ - **dev-lead-routing**: 언어/프레임워크 전문가에게 개발 작업 라우팅
146
+ - **de-lead-routing**: 데이터 엔지니어링 작업을 DE/파이프라인 전문가에게 라우팅
147
+ - **qa-lead-routing**: QA 워크플로우 조율
148
+
149
+ 메인 대화가 유일한 오케스트레이터 역할을 합니다. 서브에이전트는 다른 서브에이전트를 생성할 수 없습니다.
150
+
151
+ ### 동적 에이전트 생성
152
+
153
+ 기존 에이전트 중 작업에 맞는 전문가가 없으면 자동으로 생성합니다:
154
+
155
+ 1. 라우팅 스킬이 매칭 전문가 없음을 감지
156
+ 2. 오케스트레이터가 mgr-creator에 컨텍스트와 함께 위임
157
+ 3. mgr-creator가 관련 skills/guides를 자동 탐색
158
+ 4. 새 에이전트 생성 후 즉시 사용
159
+
160
+ 이것이 oh-my-customcode의 핵심 철학입니다: **"전문가가 없으면? 만들고, 지식을 연결하고, 사용한다."**
161
+
162
+ ## 아키텍처 철학: 컴파일레이션 메타포
163
+
164
+ oh-my-customcode는 소프트웨어 컴파일과 동일한 구조를 따릅니다:
165
+
166
+ | 컴파일 개념 | oh-my-customcode 매핑 | 역할 |
167
+ |------------|----------------------|------|
168
+ | Source code | `.claude/skills/` | 재사용 가능한 지식과 워크플로우 정의 |
169
+ | Build artifacts | `.claude/agents/` | 스킬을 조합한 실행 가능한 전문가 |
170
+ | Compiler | `mgr-sauron` (R017) | 구조 검증 및 정합성 보장 |
171
+ | Spec | `.claude/rules/` | 빌드 규칙과 제약 조건 |
172
+ | Linker | Routing skills | 에이전트를 작업에 연결 |
173
+ | Standard library | `guides/` | 공유 레퍼런스 문서 |
174
+
175
+ 이 메타포는 관심사 분리(R006)의 핵심입니다: 스킬(소스)을 에이전트(빌드 결과물)와 분리하여 독립적 진화를 가능하게 합니다.
176
+
177
+ ## 에이전트 요약
178
+
179
+ | 타입 | 수량 | 에이전트 |
180
+ |------|------|----------|
181
+ | SW Engineer/Language | 6 | lang-golang-expert, lang-python-expert, lang-rust-expert, lang-kotlin-expert, lang-typescript-expert, lang-java21-expert |
182
+ | SW Engineer/Backend | 6 | be-fastapi-expert, be-springboot-expert, be-go-backend-expert, be-express-expert, be-nestjs-expert, be-django-expert |
183
+ | SW Engineer/Frontend | 4 | fe-vercel-agent, fe-vuejs-agent, fe-svelte-agent, fe-flutter-agent |
184
+ | SW Engineer/Tooling | 3 | tool-npm-expert, tool-optimizer, tool-bun-expert |
185
+ | DE Engineer | 6 | de-airflow-expert, de-dbt-expert, de-spark-expert, de-kafka-expert, de-snowflake-expert, de-pipeline-expert |
186
+ | SW Engineer/Database | 3 | db-supabase-expert, db-postgres-expert, db-redis-expert |
187
+ | Security | 1 | sec-codeql-expert |
188
+ | SW Architect | 2 | arch-documenter, arch-speckit-agent |
189
+ | Infra Engineer | 2 | infra-docker-expert, infra-aws-expert |
190
+ | QA Team | 3 | qa-planner, qa-writer, qa-engineer |
191
+ | Manager | 6 | mgr-creator, mgr-updater, mgr-supplier, mgr-gitnerd, mgr-sauron, mgr-claude-code-bible |
192
+ | System | 2 | sys-memory-keeper, sys-naggy |
193
+ | **총계** | **44** | |
194
+
195
+ ## Agent Teams (MUST when enabled)
196
+
197
+ Claude Code의 Agent Teams 기능이 활성화되어 있으면 (`CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS=1`), 적격한 작업에 적극적으로 사용합니다.
198
+
199
+ | 기능 | 서브에이전트 (기본) | Agent Teams |
200
+ |------|---------------------|-------------|
201
+ | 통신 | 호출자에게 결과만 반환 | 피어 투 피어 메시지 |
202
+ | 조율 | 오케스트레이터가 관리 | 공유 작업 목록 |
203
+ | 적합한 작업 | 집중된 작업 | 리서치, 리뷰, 디버깅 |
204
+ | 토큰 비용 | 낮음 | 높음 |
205
+
206
+ **활성화 시, 적격한 협업 작업에 Agent Teams를 반드시 사용해야 합니다 (R018 MUST).**
207
+ 결정 매트릭스는 R018 (MUST-agent-teams.md)을 참조하세요.
208
+ 하이브리드 패턴 (Claude + Codex, 동적 생성 + Teams)이 지원됩니다.
209
+ 단순/비용 민감 작업에는 Task tool + 라우팅 스킬이 폴백으로 유지됩니다.
210
+
211
+ ## 빠른 참조
212
+
213
+ ```bash
214
+ # 프로젝트 분석
215
+ /omcustom:analysis
216
+
217
+ # 모든 커맨드 표시
218
+ /omcustom:lists
219
+
220
+ # 에이전트 관리
221
+ /omcustom:create-agent my-agent
222
+ /omcustom:update-docs
223
+ /omcustom:audit-agents
224
+
225
+ # 코드 리뷰
226
+ /dev-review src/main.go
227
+
228
+ # 메모리 관리
229
+ /memory-save
230
+ /memory-recall authentication
231
+
232
+ # 검증
233
+ /omcustom:sauron-watch
234
+ ```
235
+
236
+ ## 외부 의존성
237
+
238
+ ### 필수 플러그인
239
+
240
+ `/plugin install <이름>`으로 설치:
241
+
242
+ | 플러그인 | 소스 | 용도 |
243
+ |----------|------|------|
244
+ | superpowers | claude-plugins-official | TDD, 디버깅, 협업 패턴 |
245
+ | superpowers-developing-for-claude-code | superpowers-marketplace | Claude Code 개발 문서 |
246
+ | elements-of-style | superpowers-marketplace | 글쓰기 명확성 가이드라인 |
247
+ | obsidian-skills | - | 옵시디언 마크다운 지원 |
248
+ | context7 | claude-plugins-official | 라이브러리 문서 조회 |
249
+
250
+ ### 권장 MCP 서버
251
+
252
+ | 서버 | 용도 |
253
+ |------|------|
254
+ | claude-mem | 세션 메모리 영속성 (Chroma 기반) |
255
+
256
+ ### 설치 명령어
257
+
258
+ ```bash
259
+ # 마켓플레이스 추가
260
+ /plugin marketplace add obra/superpowers-marketplace
261
+
262
+ # 플러그인 설치
263
+ /plugin install superpowers
264
+ /plugin install superpowers-developing-for-claude-code
265
+ /plugin install elements-of-style
266
+
267
+ # MCP 설정 (claude-mem)
268
+ npm install -g claude-mem
269
+ claude-mem setup
270
+ ```
271
+
272
+ <!-- omcustom:git-workflow -->
@@ -1,12 +1,12 @@
1
1
  {
2
- "version": "0.39.0",
2
+ "version": "0.42.1",
3
3
  "lastUpdated": "2026-03-16T00:00:00.000Z",
4
4
  "components": [
5
5
  {
6
6
  "name": "rules",
7
7
  "path": ".claude/rules",
8
8
  "description": "Agent behavior rules and guidelines",
9
- "files": 20
9
+ "files": 21
10
10
  },
11
11
  {
12
12
  "name": "agents",
@@ -18,7 +18,7 @@
18
18
  "name": "skills",
19
19
  "path": ".claude/skills",
20
20
  "description": "Reusable skill modules (includes slash commands)",
21
- "files": 74
21
+ "files": 75
22
22
  },
23
23
  {
24
24
  "name": "guides",