oh-my-customcode 0.55.0 → 0.57.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -13,7 +13,7 @@
13
13
 
14
14
  **[한국어 문서 (Korean)](./README_ko.md)**
15
15
 
16
- 45 agents. 92 skills. 21 rules. One command.
16
+ 45 agents. 93 skills. 21 rules. One command.
17
17
 
18
18
  ```bash
19
19
  npm install -g oh-my-customcode && cd your-project && omcustom init
@@ -146,7 +146,7 @@ Each agent declares its tools, model, memory scope, and limitations in YAML fron
146
146
 
147
147
  ---
148
148
 
149
- ### Skills (92)
149
+ ### Skills (93)
150
150
 
151
151
  | Category | Count | Includes |
152
152
  |----------|-------|----------|
package/dist/cli/index.js CHANGED
@@ -9322,8 +9322,10 @@ var package_default;
9322
9322
  var init_package = __esm(() => {
9323
9323
  package_default = {
9324
9324
  name: "oh-my-customcode",
9325
- workspaces: ["packages/*"],
9326
- version: "0.55.0",
9325
+ workspaces: [
9326
+ "packages/*"
9327
+ ],
9328
+ version: "0.57.0",
9327
9329
  description: "Batteries-included agent harness for Claude Code",
9328
9330
  type: "module",
9329
9331
  bin: {
@@ -9371,7 +9373,7 @@ var init_package = __esm(() => {
9371
9373
  yaml: "^2.8.2"
9372
9374
  },
9373
9375
  devDependencies: {
9374
- "@anthropic-ai/sdk": "^0.78.0",
9376
+ "@anthropic-ai/sdk": "^0.80.0",
9375
9377
  "@biomejs/biome": "^2.3.12",
9376
9378
  "@types/bun": "^1.3.6",
9377
9379
  "@types/js-yaml": "^4.0.9",
@@ -24789,6 +24791,8 @@ var en_default = {
24789
24791
  backupCreated: "Backup created at {{path}}",
24790
24792
  summary: "Update complete: {{updated}} updated, {{skipped}} skipped",
24791
24793
  summaryFailed: "Update failed: {{error}}",
24794
+ hardOption: "Sync namespace (name: field) in unmodified files from upstream",
24795
+ namespaceSynced: "↻ Namespace synced: {{count}} file(s)",
24792
24796
  allOption: "Batch update all outdated projects found by project discovery",
24793
24797
  allScanning: "Scanning for oh-my-customcode projects...",
24794
24798
  allNoneFound: "No oh-my-customcode projects found.",
@@ -25171,6 +25175,8 @@ var ko_default = {
25171
25175
  backupCreated: "백업 생성됨: {{path}}",
25172
25176
  summary: "업데이트 완료: {{updated}}개 업데이트, {{skipped}}개 건너뜀",
25173
25177
  summaryFailed: "업데이트 실패: {{error}}",
25178
+ hardOption: "미수정 파일의 네임스페이스(name: 필드)를 upstream에서 동기화",
25179
+ namespaceSynced: "↻ 네임스페이스 동기화: {{count}}개 파일",
25174
25180
  allOption: "프로젝트 탐색으로 발견된 모든 outdated 프로젝트 일괄 업데이트",
25175
25181
  allScanning: "oh-my-customcode 프로젝트 검색 중...",
25176
25182
  allNoneFound: "oh-my-customcode 프로젝트를 찾을 수 없습니다.",
@@ -25839,6 +25845,7 @@ var MESSAGES = {
25839
25845
  "update.lockfile_regenerated": "Lockfile regenerated ({{files}} files tracked)",
25840
25846
  "update.lockfile_failed": "Failed to regenerate lockfile: {{error}}",
25841
25847
  "update.protected_file_updated": "⟳ Protected file {{file}} in {{component}} updated: {{hint}}",
25848
+ "update.namespace_synced": "Namespace synced: {{file}} ({{component}})",
25842
25849
  "config.load_failed": "Failed to load config: {{error}}",
25843
25850
  "config.not_found": "Config not found at {{path}}, using defaults",
25844
25851
  "config.saved": "Config saved to {{path}}",
@@ -25884,6 +25891,7 @@ var MESSAGES = {
25884
25891
  "update.lockfile_regenerated": "잠금 파일 재생성 완료 ({{files}}개 파일 추적)",
25885
25892
  "update.lockfile_failed": "잠금 파일 재생성 실패: {{error}}",
25886
25893
  "update.protected_file_updated": "⟳ 보호 파일 {{file}} ({{component}}) 업데이트됨: {{hint}}",
25894
+ "update.namespace_synced": "네임스페이스 동기화: {{file}} ({{component}})",
25887
25895
  "config.load_failed": "설정 로드 실패: {{error}}",
25888
25896
  "config.not_found": "{{path}}에 설정 없음, 기본값 사용",
25889
25897
  "config.saved": "설정 저장: {{path}}",
@@ -29925,7 +29933,8 @@ function createUpdateResult() {
29925
29933
  newVersion: "",
29926
29934
  warnings: [],
29927
29935
  syncedRootFiles: [],
29928
- removedDeprecatedFiles: []
29936
+ removedDeprecatedFiles: [],
29937
+ namespaceSynced: []
29929
29938
  };
29930
29939
  }
29931
29940
  async function handleBackupIfRequested(targetDir, backup, result) {
@@ -29950,6 +29959,10 @@ async function processComponentUpdate(targetDir, component, updateCheck, customi
29950
29959
  const preserved = await updateComponent(targetDir, component, customizations, options, config, lockfile);
29951
29960
  result.updatedComponents.push(component);
29952
29961
  result.preservedFiles.push(...preserved);
29962
+ if (options.hard) {
29963
+ const synced = await applyNamespaceSync(targetDir, component, lockfile);
29964
+ result.namespaceSynced.push(...synced);
29965
+ }
29953
29966
  } catch (err) {
29954
29967
  const message = err instanceof Error ? err.message : String(err);
29955
29968
  result.warnings.push(`Failed to update ${component}: ${message}`);
@@ -30401,6 +30414,75 @@ async function removeDeprecatedFiles(targetDir, options) {
30401
30414
  }
30402
30415
  return removed;
30403
30416
  }
30417
+ function extractFrontmatterName(content) {
30418
+ const match = content.match(/^---\n([\s\S]*?)\n---/);
30419
+ if (!match)
30420
+ return null;
30421
+ const nameMatch = match[1].match(/^name:\s*(.+)$/m);
30422
+ if (!nameMatch)
30423
+ return null;
30424
+ return nameMatch[1].trim().replace(/^["']|["']$/g, "");
30425
+ }
30426
+ async function syncNamespaceInFile(targetFilePath, upstreamFilePath) {
30427
+ const targetContent = await readTextFile(targetFilePath);
30428
+ const upstreamContent = await readTextFile(upstreamFilePath);
30429
+ const upstreamName = extractFrontmatterName(upstreamContent);
30430
+ const targetName = extractFrontmatterName(targetContent);
30431
+ if (!upstreamName || !targetName || upstreamName === targetName)
30432
+ return false;
30433
+ const safeUpstreamName = upstreamName.replace(/\$/g, "$$$$");
30434
+ const updated = targetContent.replace(/^(name:\s*).+$/m, `$1${safeUpstreamName}`);
30435
+ if (updated === targetContent)
30436
+ return false;
30437
+ await writeTextFile(targetFilePath, updated);
30438
+ return true;
30439
+ }
30440
+ async function processNamespaceSyncEntry(entry, relPath, fullSrcPath, destPath, componentPath, lockfile) {
30441
+ if (!entry.isFile() || !entry.name.endsWith(".md"))
30442
+ return null;
30443
+ const targetFilePath = join14(destPath, relPath);
30444
+ const lockfileKey = `${componentPath}/${relPath}`.replace(/\\/g, "/");
30445
+ const shouldSkip = await shouldSkipProtectedFile(targetFilePath, lockfileKey, lockfile);
30446
+ if (shouldSkip)
30447
+ return null;
30448
+ if (!await fileExists(targetFilePath))
30449
+ return null;
30450
+ const didSync = await syncNamespaceInFile(targetFilePath, fullSrcPath);
30451
+ return didSync ? `${componentPath}/${relPath}` : null;
30452
+ }
30453
+ async function applyNamespaceSync(targetDir, component, lockfile) {
30454
+ if (!lockfile)
30455
+ return [];
30456
+ const componentPath = getComponentPath2(component);
30457
+ const srcPath = resolveTemplatePath(componentPath);
30458
+ const destPath = join14(targetDir, componentPath);
30459
+ const fs3 = await import("node:fs/promises");
30460
+ const synced = [];
30461
+ const queue = [{ dir: srcPath, relDir: "" }];
30462
+ while (queue.length > 0) {
30463
+ const { dir: dir2, relDir } = queue.shift();
30464
+ let entries;
30465
+ try {
30466
+ entries = await fs3.readdir(dir2, { withFileTypes: true });
30467
+ } catch {
30468
+ continue;
30469
+ }
30470
+ for (const entry of entries) {
30471
+ const relPath = relDir ? `${relDir}/${entry.name}` : entry.name;
30472
+ const fullSrcPath = join14(dir2, entry.name);
30473
+ if (entry.isDirectory()) {
30474
+ queue.push({ dir: fullSrcPath, relDir: relPath });
30475
+ continue;
30476
+ }
30477
+ const syncedPath = await processNamespaceSyncEntry(entry, relPath, fullSrcPath, destPath, componentPath, lockfile);
30478
+ if (syncedPath) {
30479
+ synced.push(syncedPath);
30480
+ info("update.namespace_synced", { file: relPath, component });
30481
+ }
30482
+ }
30483
+ }
30484
+ return synced;
30485
+ }
30404
30486
  function getComponentPath2(component) {
30405
30487
  const layout = getProviderLayout();
30406
30488
  if (component === "guides") {
@@ -30467,7 +30549,8 @@ async function updateSingleProject(targetDir, options) {
30467
30549
  preserveCustomizations: true,
30468
30550
  forceOverwriteAll: options.forceOverwriteAll,
30469
30551
  dryRun: options.dryRun,
30470
- backup: options.backup
30552
+ backup: options.backup,
30553
+ hard: options.hard
30471
30554
  };
30472
30555
  const result = await update(updateOptions);
30473
30556
  printUpdateResults(result);
@@ -30507,7 +30590,8 @@ async function updateAllProjects(options) {
30507
30590
  preserveCustomizations: true,
30508
30591
  forceOverwriteAll: options.forceOverwriteAll,
30509
30592
  dryRun: options.dryRun,
30510
- backup: options.backup
30593
+ backup: options.backup,
30594
+ hard: options.hard
30511
30595
  };
30512
30596
  const result = await update(updateOptions);
30513
30597
  if (result.success) {
@@ -30598,6 +30682,12 @@ function printUpdateResults(result) {
30598
30682
  if (result.preservedFiles.length > 0) {
30599
30683
  console.log(i18n.t("cli.update.preservedFiles", { count: String(result.preservedFiles.length) }));
30600
30684
  }
30685
+ if ((result.namespaceSynced?.length ?? 0) > 0) {
30686
+ console.log(i18n.t("cli.update.namespaceSynced", { count: String(result.namespaceSynced.length) }));
30687
+ for (const file of result.namespaceSynced) {
30688
+ console.log(` ↻ ${file}`);
30689
+ }
30690
+ }
30601
30691
  if (result.backedUpPaths.length > 0) {
30602
30692
  for (const path4 of result.backedUpPaths) {
30603
30693
  console.log(i18n.t("cli.update.backupCreated", { path: path4 }));
@@ -30654,7 +30744,7 @@ function createProgram() {
30654
30744
  program2.command("init").description(i18n.t("cli.init.description")).option("-l, --lang <language>", i18n.t("cli.init.langOption")).option("--domain <domain>", "Install only agents/skills for specific domain (backend, frontend, data-engineering, devops)").option("--yes", "Skip interactive wizard, use defaults").action(async (options) => {
30655
30745
  await initCommand(options);
30656
30746
  });
30657
- program2.command("update").description(i18n.t("cli.update.description")).option("--dry-run", i18n.t("cli.update.dryRunOption")).option("--force", i18n.t("cli.update.forceOption")).option("--force-overwrite-all", i18n.t("cli.update.forceOverwriteAllOption")).option("--backup", i18n.t("cli.update.backupOption")).option("--agents", i18n.t("cli.update.agentsOption")).option("--skills", i18n.t("cli.update.skillsOption")).option("--rules", i18n.t("cli.update.rulesOption")).option("--guides", i18n.t("cli.update.guidesOption")).option("--hooks", i18n.t("cli.update.hooksOption")).option("--contexts", i18n.t("cli.update.contextsOption")).option("--all", i18n.t("cli.update.allOption")).action(async (options) => {
30747
+ program2.command("update").description(i18n.t("cli.update.description")).option("--dry-run", i18n.t("cli.update.dryRunOption")).option("--force", i18n.t("cli.update.forceOption")).option("--force-overwrite-all", i18n.t("cli.update.forceOverwriteAllOption")).option("--hard", i18n.t("cli.update.hardOption")).option("--backup", i18n.t("cli.update.backupOption")).option("--agents", i18n.t("cli.update.agentsOption")).option("--skills", i18n.t("cli.update.skillsOption")).option("--rules", i18n.t("cli.update.rulesOption")).option("--guides", i18n.t("cli.update.guidesOption")).option("--hooks", i18n.t("cli.update.hooksOption")).option("--contexts", i18n.t("cli.update.contextsOption")).option("--all", i18n.t("cli.update.allOption")).action(async (options) => {
30658
30748
  await updateCommand(options);
30659
30749
  });
30660
30750
  program2.command("list").description(i18n.t("cli.list.description")).argument("[type]", i18n.t("cli.list.typeArgument"), "all").option("-f, --format <format>", "Output format: table, json, or simple", "table").option("--verbose", "Show detailed information").action(async (type, options) => {
package/dist/index.js CHANGED
@@ -377,6 +377,7 @@ var MESSAGES = {
377
377
  "update.lockfile_regenerated": "Lockfile regenerated ({{files}} files tracked)",
378
378
  "update.lockfile_failed": "Failed to regenerate lockfile: {{error}}",
379
379
  "update.protected_file_updated": "⟳ Protected file {{file}} in {{component}} updated: {{hint}}",
380
+ "update.namespace_synced": "Namespace synced: {{file}} ({{component}})",
380
381
  "config.load_failed": "Failed to load config: {{error}}",
381
382
  "config.not_found": "Config not found at {{path}}, using defaults",
382
383
  "config.saved": "Config saved to {{path}}",
@@ -422,6 +423,7 @@ var MESSAGES = {
422
423
  "update.lockfile_regenerated": "잠금 파일 재생성 완료 ({{files}}개 파일 추적)",
423
424
  "update.lockfile_failed": "잠금 파일 재생성 실패: {{error}}",
424
425
  "update.protected_file_updated": "⟳ 보호 파일 {{file}} ({{component}}) 업데이트됨: {{hint}}",
426
+ "update.namespace_synced": "네임스페이스 동기화: {{file}} ({{component}})",
425
427
  "config.load_failed": "설정 로드 실패: {{error}}",
426
428
  "config.not_found": "{{path}}에 설정 없음, 기본값 사용",
427
429
  "config.saved": "설정 저장: {{path}}",
@@ -1667,8 +1669,10 @@ import { join as join6 } from "node:path";
1667
1669
  // package.json
1668
1670
  var package_default = {
1669
1671
  name: "oh-my-customcode",
1670
- workspaces: ["packages/*"],
1671
- version: "0.55.0",
1672
+ workspaces: [
1673
+ "packages/*"
1674
+ ],
1675
+ version: "0.57.0",
1672
1676
  description: "Batteries-included agent harness for Claude Code",
1673
1677
  type: "module",
1674
1678
  bin: {
@@ -1716,7 +1720,7 @@ var package_default = {
1716
1720
  yaml: "^2.8.2"
1717
1721
  },
1718
1722
  devDependencies: {
1719
- "@anthropic-ai/sdk": "^0.78.0",
1723
+ "@anthropic-ai/sdk": "^0.80.0",
1720
1724
  "@biomejs/biome": "^2.3.12",
1721
1725
  "@types/bun": "^1.3.6",
1722
1726
  "@types/js-yaml": "^4.0.9",
@@ -1891,7 +1895,8 @@ function createUpdateResult() {
1891
1895
  newVersion: "",
1892
1896
  warnings: [],
1893
1897
  syncedRootFiles: [],
1894
- removedDeprecatedFiles: []
1898
+ removedDeprecatedFiles: [],
1899
+ namespaceSynced: []
1895
1900
  };
1896
1901
  }
1897
1902
  async function handleBackupIfRequested(targetDir, backup, result) {
@@ -1916,6 +1921,10 @@ async function processComponentUpdate(targetDir, component, updateCheck, customi
1916
1921
  const preserved = await updateComponent(targetDir, component, customizations, options, config, lockfile);
1917
1922
  result.updatedComponents.push(component);
1918
1923
  result.preservedFiles.push(...preserved);
1924
+ if (options.hard) {
1925
+ const synced = await applyNamespaceSync(targetDir, component, lockfile);
1926
+ result.namespaceSynced.push(...synced);
1927
+ }
1919
1928
  } catch (err) {
1920
1929
  const message = err instanceof Error ? err.message : String(err);
1921
1930
  result.warnings.push(`Failed to update ${component}: ${message}`);
@@ -2388,6 +2397,75 @@ async function removeDeprecatedFiles(targetDir, options) {
2388
2397
  }
2389
2398
  return removed;
2390
2399
  }
2400
+ function extractFrontmatterName(content) {
2401
+ const match = content.match(/^---\n([\s\S]*?)\n---/);
2402
+ if (!match)
2403
+ return null;
2404
+ const nameMatch = match[1].match(/^name:\s*(.+)$/m);
2405
+ if (!nameMatch)
2406
+ return null;
2407
+ return nameMatch[1].trim().replace(/^["']|["']$/g, "");
2408
+ }
2409
+ async function syncNamespaceInFile(targetFilePath, upstreamFilePath) {
2410
+ const targetContent = await readTextFile(targetFilePath);
2411
+ const upstreamContent = await readTextFile(upstreamFilePath);
2412
+ const upstreamName = extractFrontmatterName(upstreamContent);
2413
+ const targetName = extractFrontmatterName(targetContent);
2414
+ if (!upstreamName || !targetName || upstreamName === targetName)
2415
+ return false;
2416
+ const safeUpstreamName = upstreamName.replace(/\$/g, "$$$$");
2417
+ const updated = targetContent.replace(/^(name:\s*).+$/m, `$1${safeUpstreamName}`);
2418
+ if (updated === targetContent)
2419
+ return false;
2420
+ await writeTextFile(targetFilePath, updated);
2421
+ return true;
2422
+ }
2423
+ async function processNamespaceSyncEntry(entry, relPath, fullSrcPath, destPath, componentPath, lockfile) {
2424
+ if (!entry.isFile() || !entry.name.endsWith(".md"))
2425
+ return null;
2426
+ const targetFilePath = join6(destPath, relPath);
2427
+ const lockfileKey = `${componentPath}/${relPath}`.replace(/\\/g, "/");
2428
+ const shouldSkip = await shouldSkipProtectedFile(targetFilePath, lockfileKey, lockfile);
2429
+ if (shouldSkip)
2430
+ return null;
2431
+ if (!await fileExists(targetFilePath))
2432
+ return null;
2433
+ const didSync = await syncNamespaceInFile(targetFilePath, fullSrcPath);
2434
+ return didSync ? `${componentPath}/${relPath}` : null;
2435
+ }
2436
+ async function applyNamespaceSync(targetDir, component, lockfile) {
2437
+ if (!lockfile)
2438
+ return [];
2439
+ const componentPath = getComponentPath2(component);
2440
+ const srcPath = resolveTemplatePath(componentPath);
2441
+ const destPath = join6(targetDir, componentPath);
2442
+ const fs = await import("node:fs/promises");
2443
+ const synced = [];
2444
+ const queue = [{ dir: srcPath, relDir: "" }];
2445
+ while (queue.length > 0) {
2446
+ const { dir, relDir } = queue.shift();
2447
+ let entries;
2448
+ try {
2449
+ entries = await fs.readdir(dir, { withFileTypes: true });
2450
+ } catch {
2451
+ continue;
2452
+ }
2453
+ for (const entry of entries) {
2454
+ const relPath = relDir ? `${relDir}/${entry.name}` : entry.name;
2455
+ const fullSrcPath = join6(dir, entry.name);
2456
+ if (entry.isDirectory()) {
2457
+ queue.push({ dir: fullSrcPath, relDir: relPath });
2458
+ continue;
2459
+ }
2460
+ const syncedPath = await processNamespaceSyncEntry(entry, relPath, fullSrcPath, destPath, componentPath, lockfile);
2461
+ if (syncedPath) {
2462
+ synced.push(syncedPath);
2463
+ info("update.namespace_synced", { file: relPath, component });
2464
+ }
2465
+ }
2466
+ }
2467
+ return synced;
2468
+ }
2391
2469
  function getComponentPath2(component) {
2392
2470
  const layout = getProviderLayout();
2393
2471
  if (component === "guides") {
package/package.json CHANGED
@@ -1,7 +1,9 @@
1
1
  {
2
2
  "name": "oh-my-customcode",
3
- "workspaces": ["packages/*"],
4
- "version": "0.55.0",
3
+ "workspaces": [
4
+ "packages/*"
5
+ ],
6
+ "version": "0.57.0",
5
7
  "description": "Batteries-included agent harness for Claude Code",
6
8
  "type": "module",
7
9
  "bin": {
@@ -49,7 +51,7 @@
49
51
  "yaml": "^2.8.2"
50
52
  },
51
53
  "devDependencies": {
52
- "@anthropic-ai/sdk": "^0.78.0",
54
+ "@anthropic-ai/sdk": "^0.80.0",
53
55
  "@biomejs/biome": "^2.3.12",
54
56
  "@types/bun": "^1.3.6",
55
57
  "@types/js-yaml": "^4.0.9",
@@ -12,10 +12,33 @@ set -euo pipefail
12
12
  input=$(cat)
13
13
  PPID_FILE="/tmp/.claude-task-outcomes-${PPID}"
14
14
 
15
- # Only attempt collection if outcome file exists and eval-core is available
16
- if [ -f "$PPID_FILE" ] && command -v eval-core >/dev/null 2>&1; then
15
+ # Only attempt collection if outcome file exists
16
+ if [ ! -f "$PPID_FILE" ]; then
17
+ echo "$input"
18
+ exit 0
19
+ fi
20
+
21
+ # Discover eval-core CLI using multiple strategies
22
+ EVAL_CORE=""
23
+
24
+ # Strategy 1: Global CLI installation
25
+ if command -v eval-core >/dev/null 2>&1; then
26
+ EVAL_CORE="eval-core"
27
+ fi
28
+
29
+ # Strategy 2: Workspace package (oh-my-customcode development)
30
+ if [ -z "$EVAL_CORE" ]; then
31
+ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
32
+ PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
33
+ WORKSPACE_CLI="$PROJECT_ROOT/packages/eval-core/src/cli/index.ts"
34
+ if [ -f "$WORKSPACE_CLI" ] && command -v bun >/dev/null 2>&1; then
35
+ EVAL_CORE="bun run $WORKSPACE_CLI"
36
+ fi
37
+ fi
38
+
39
+ if [ -n "$EVAL_CORE" ]; then
17
40
  echo "[Hook] Collecting eval metrics via eval-core..." >&2
18
- eval-core collect --ppid "$PPID" 2>/dev/null || true
41
+ $EVAL_CORE collect --ppid "$PPID" 2>/dev/null || true
19
42
  fi
20
43
 
21
44
  # Always pass through input and exit 0 (advisory only)
@@ -41,10 +41,10 @@ Implemented in `.claude/hooks/hooks.json` (PreToolUse → Agent/Task matcher).
41
41
  ### Format
42
42
 
43
43
  ```
44
- {Cost} | {project} | {branch} | RL:{rate_limit}% | CTX:{usage}%
44
+ {Cost} | {project} | {branch} | RL:{rate_limit}% | WL:{weekly_limit}% | CTX:{usage}%
45
45
  ```
46
46
 
47
- Example: `$0.05 | my-project | develop | RL:45% | CTX:42%`
47
+ Example: `$0.05 | my-project | develop | RL:45% | WL:72% | CTX:42%`
48
48
 
49
49
  ### Configuration
50
50
 
@@ -70,12 +70,17 @@ Set in `.claude/settings.local.json`. The command receives JSON via stdin with m
70
70
  | Rate Limit | < 50% | Green |
71
71
  | Rate Limit | 50-79% | Yellow |
72
72
  | Rate Limit | >= 80% | Red |
73
+ | Weekly Limit | < 50% | Green |
74
+ | Weekly Limit | 50-79% | Yellow |
75
+ | Weekly Limit | >= 80% | Red |
73
76
  | Context | < 60% | Green |
74
77
  | Context | 60-79% | Yellow |
75
78
  | Context | >= 80% | Red |
76
79
 
77
80
  The `RL:{rate_limit}%` segment only appears when Claude Code v2.1.80+ provides `rate_limits` data. On older versions, this segment is omitted.
78
81
 
82
+ The `WL:{weekly_limit}%` segment shows the 7-day rolling rate limit percentage. Both RL and WL segments are omitted on older versions.
83
+
79
84
  ## Integration
80
85
 
81
86
  Integrates with R007 (Agent ID), R008 (Tool ID), R009 (Parallel).
@@ -0,0 +1,136 @@
1
+ ---
2
+ name: omcustom:auto-improve
3
+ description: Apply verified improvement suggestions from eval-core analysis to omcustom configuration
4
+ scope: harness
5
+ user-invocable: true
6
+ effort: high
7
+ ---
8
+
9
+ # /omcustom:auto-improve — Automated Improvement Workflow
10
+
11
+ ## Purpose
12
+
13
+ Reads improvement suggestions from eval-core analysis, lets the user select which to apply, applies changes in an isolated worktree with sauron verification, and creates a PR for review.
14
+
15
+ ## Usage
16
+
17
+ ```
18
+ /omcustom:auto-improve # Interactive selection from pending suggestions
19
+ ```
20
+
21
+ ## Prerequisites
22
+
23
+ - eval-core analysis data exists (run `/omcustom:improve-report` first if empty)
24
+ - Pending improvement suggestions in `proposed` status
25
+
26
+ ## Workflow
27
+
28
+ ### Step 1: Read Suggestions
29
+
30
+ 1. Run `bun run packages/eval-core/src/cli/index.ts analyze --format json --save` via Bash
31
+ 2. Parse JSON output for improvement suggestions
32
+ 3. If no suggestions: display "No improvement suggestions available" and exit
33
+
34
+ ### Step 2: Display & Select
35
+
36
+ Display numbered list:
37
+ ```
38
+ [Auto-Improve] Available suggestions:
39
+ 1. [HIGH] agent:lang-golang-expert — Escalate model sonnet→opus (3 failures in 5 uses)
40
+ 2. [MED] routing:dev-lead-routing — Add Flutter keyword mapping (2 routing misses)
41
+ 3. [LOW] skill:systematic-debugging — Add timeout guard (1 timeout in 10 uses)
42
+
43
+ Select items: [1,2,3] / "all" / "cancel"
44
+ ```
45
+
46
+ **Self-reference filter**: Exclude items where targetName matches:
47
+ - `omcustom-auto-improve`, `auto-improve`
48
+ - `pipeline-guards`, `evaluator-optimizer`
49
+ - Any item targeting this skill itself
50
+
51
+ ### Step 3: Approve (State Transition)
52
+
53
+ For each selected item:
54
+ 1. Call eval-core API: transition `proposed` → `approved`
55
+ 2. Display: `[Approved] {N} items selected for application`
56
+
57
+ ### Step 4: Worktree Isolation
58
+
59
+ - Use `EnterWorktree` tool with name `auto-improve-{YYYYMMDD}`
60
+ - Creates isolated branch from HEAD
61
+
62
+ ### Step 5: Apply Changes
63
+
64
+ Map each approved item to the appropriate subagent by `targetType`:
65
+
66
+ | targetType | Agent | Action |
67
+ |------------|-------|--------|
68
+ | agent | mgr-creator | Modify agent frontmatter/body |
69
+ | skill | Matching domain expert | Revise skill SKILL.md |
70
+ | routing | general-purpose | Update routing patterns |
71
+ | model-escalation | general-purpose | Update model field in agent frontmatter |
72
+
73
+ Spawn agents in parallel (max 4 per R009). Each agent receives:
74
+ - Action description and evidence data
75
+ - Target file path
76
+ - Specific modification instructions
77
+
78
+ ### Step 6: Verification
79
+
80
+ 1. Delegate to mgr-sauron: full R017 verification
81
+ 2. If **PASS**: proceed to Step 7
82
+ 3. If **FAIL**: display failures, offer options:
83
+ - `fix` → re-apply with sauron feedback (max 2 cycles)
84
+ - `reject` → transition all to `rejected`, ExitWorktree(remove)
85
+ - `manual` → keep worktree for user inspection
86
+
87
+ ### Step 7: PR & Finalize
88
+
89
+ 1. Delegate to mgr-gitnerd: commit + create PR
90
+ - Title: `chore(auto-improve): apply {N} improvement suggestions`
91
+ - Body: table of applied items with evidence
92
+ 2. Transition all items to `applied` with `appliedAt` timestamp and PR URL
93
+ 3. `ExitWorktree(action: "keep")` — keep branch for PR
94
+ 4. Display PR URL to user
95
+
96
+ ## Safety Guards
97
+
98
+ | Guard | Implementation |
99
+ |-------|---------------|
100
+ | Self-reference prevention | Blocklist filter in Step 2 |
101
+ | User approval gate | Step 2 interactive selection |
102
+ | Worktree isolation | Step 4 EnterWorktree |
103
+ | Sauron verification | Step 6 mandatory pass |
104
+ | PR-based merge | Step 7 — no direct push to develop |
105
+ | Max items per run | 20 default, 50 hard cap |
106
+ | Max fix cycles | 2 retries before rejection |
107
+ | Rollback | `git revert` via mgr-gitnerd post-merge |
108
+
109
+ ## Error Handling
110
+
111
+ | Scenario | Action |
112
+ |----------|--------|
113
+ | No suggestions available | Display message, exit |
114
+ | User cancels selection | Exit, no state changes |
115
+ | Sauron verification fails 2x | Reject all, cleanup worktree |
116
+ | Agent application error | Mark individual item as rejected, continue others |
117
+ | EnterWorktree fails | Report error, exit |
118
+
119
+ ## Display Format
120
+
121
+ ```
122
+ [Auto-Improve] Starting improvement workflow
123
+ ├── Suggestions: {N} available ({high}H/{medium}M/{low}L confidence)
124
+ ├── Self-reference filtered: {count} items excluded
125
+ └── Select items to apply: [1,2,3] or "all" or "cancel"
126
+
127
+ [Auto-Improve] Applying {N} improvements in worktree
128
+ ├── Worktree: auto-improve-{date}
129
+ ├── Agents: {count} parallel
130
+ └── Pipeline guards: max 20 items, 2 retry cycles
131
+
132
+ [Auto-Improve] Verification
133
+ ├── Sauron: {PASS|FAIL}
134
+ ├── PR: #{number} created
135
+ └── Status: {N} items → applied
136
+ ```
@@ -22,6 +22,7 @@ Defines mandatory safety constraints for all pipeline, workflow, and iterative e
22
22
  | Timeout per pipeline | 900s | 1800s | worker-reviewer-pipeline |
23
23
  | Max retry count | 2 | 3 | Failure retry strategies |
24
24
  | Max PR improvement items | 20 | 50 | pr-auto-improve |
25
+ | Max auto-improve items | 20 | 50 | omcustom-auto-improve |
25
26
 
26
27
  ## Enforcement
27
28
 
@@ -152,6 +153,7 @@ Guard warnings appear inline:
152
153
  | dag-orchestration | Node count and timeout limits |
153
154
  | worker-reviewer-pipeline | Iteration and pipeline timeout limits |
154
155
  | pr-auto-improve | Improvement item count limits |
156
+ | omcustom-auto-improve | Auto-improve item count limits |
155
157
  | stuck-recovery | Guard triggers feed into stuck detection |
156
158
  | model-escalation | Repeated failures trigger escalation advisory |
157
159
 
@@ -50,10 +50,16 @@ git → mgr-gitnerd
50
50
  verify → mgr-sauron
51
51
  spec → mgr-claude-code-bible
52
52
  memory → sys-memory-keeper
53
- todo → sys-naggy
54
- batch multiple (parallel)
53
+ todo → sys-naggy
54
+ improve-report omcustom-improve-report (skill invocation)
55
+ auto-improve → omcustom-auto-improve (skill invocation)
56
+ batch → multiple (parallel)
55
57
  ```
56
58
 
59
+ **improve-report keywords**: "improve-report", "improvement", "개선", "개선 리포트", "improve" → invoke `omcustom-improve-report` skill (read-only, no agent delegation needed)
60
+
61
+ **auto-improve keywords**: "auto-improve", "자동 개선", "개선 적용", "apply improvements", "improvement suggestions" → invoke `omcustom-auto-improve` skill (worktree isolation, sauron verification, PR creation)
62
+
57
63
  ### Ontology-RAG Enrichment (R019)
58
64
 
59
65
  If `get_agent_for_task` MCP tool is available, call it with the original query and inject `suggested_skills` into the agent prompt. Skip silently on failure.
@@ -66,16 +66,17 @@ fi
66
66
 
67
67
  # ---------------------------------------------------------------------------
68
68
  # 4. Single jq call — extract all fields as TSV
69
- # Fields: model_name, project_dir, ctx_pct, ctx_size, cost_usd, rl_5h_pct
69
+ # Fields: model_name, project_dir, ctx_pct, ctx_size, cost_usd, rl_5h_pct, rl_7d_pct
70
70
  # ---------------------------------------------------------------------------
71
- IFS=$'\t' read -r model_name project_dir ctx_pct ctx_size cost_usd rl_5h_pct <<< "$(
71
+ IFS=$'\t' read -r model_name project_dir ctx_pct ctx_size cost_usd rl_5h_pct rl_7d_pct <<< "$(
72
72
  printf '%s' "$json" | jq -r '[
73
73
  (.model.display_name // "unknown"),
74
74
  (.workspace.current_dir // ""),
75
75
  (if .context_window.used != null and .context_window.total != null and .context_window.total > 0 then (.context_window.used / .context_window.total * 100) elif .context_window.used_percentage != null then .context_window.used_percentage else 0 end),
76
76
  (.context_window.context_window_size // 0),
77
77
  (.cost.total_cost_usd // 0),
78
- (.rate_limits.five_hour.used_percentage // -1)
78
+ (.rate_limits.five_hour.used_percentage // -1),
79
+ (.rate_limits.seven_day.used_percentage // -1)
79
80
  ] | @tsv'
80
81
  )"
81
82
 
@@ -84,7 +85,7 @@ IFS=$'\t' read -r model_name project_dir ctx_pct ctx_size cost_usd rl_5h_pct <<<
84
85
  # ---------------------------------------------------------------------------
85
86
  COST_BRIDGE_FILE="/tmp/.claude-cost-${PPID}"
86
87
  _tmp="${COST_BRIDGE_FILE}.tmp.$$"
87
- printf '%s\t%s\t%s\t%s\n' "$cost_usd" "$ctx_pct" "$(date +%s)" "$rl_5h_pct" > "$_tmp" 2>/dev/null && mv -f "$_tmp" "$COST_BRIDGE_FILE" 2>/dev/null || true
88
+ printf '%s\t%s\t%s\t%s\t%s\n' "$cost_usd" "$ctx_pct" "$(date +%s)" "$rl_5h_pct" "$rl_7d_pct" > "$_tmp" 2>/dev/null && mv -f "$_tmp" "$COST_BRIDGE_FILE" 2>/dev/null || true
88
89
 
89
90
  # ---------------------------------------------------------------------------
90
91
  # 5. Model display name + color (bash 3.2 compatible case pattern matching)
@@ -270,6 +271,27 @@ if [[ "$rl_5h_int" -ge 0 ]]; then
270
271
  fi
271
272
  fi
272
273
 
274
+ # ---------------------------------------------------------------------------
275
+ # 9c. Weekly rate limit percentage with color (v2.1.80+, optional)
276
+ # ---------------------------------------------------------------------------
277
+ wl_display=""
278
+ wl_color=""
279
+ wl_7d_int="${rl_7d_pct%%.*}"
280
+ if ! [[ "$wl_7d_int" =~ ^-?[0-9]+$ ]]; then
281
+ wl_7d_int=-1
282
+ fi
283
+
284
+ if [[ "$wl_7d_int" -ge 0 ]]; then
285
+ wl_display="WL:${wl_7d_int}%"
286
+ if [[ "$wl_7d_int" -ge 80 ]]; then
287
+ wl_color="${COLOR_CTX_CRIT}" # Red (>= 80%)
288
+ elif [[ "$wl_7d_int" -ge 50 ]]; then
289
+ wl_color="${COLOR_CTX_WARN}" # Yellow (50-79%)
290
+ else
291
+ wl_color="${COLOR_CTX_OK}" # Green (< 50%)
292
+ fi
293
+ fi
294
+
273
295
  # ---------------------------------------------------------------------------
274
296
  # 10. Assemble and output the status line
275
297
  # ---------------------------------------------------------------------------
@@ -293,19 +315,27 @@ if [[ -n "$rl_display" ]]; then
293
315
  rl_segment=" | ${rl_color}${rl_display}${COLOR_RESET}"
294
316
  fi
295
317
 
318
+ # Build the WL segment (with separator) if present
319
+ wl_segment=""
320
+ if [[ -n "$wl_display" ]]; then
321
+ wl_segment=" | ${wl_color}${wl_display}${COLOR_RESET}"
322
+ fi
323
+
296
324
  if [[ -n "$git_branch" ]]; then
297
- printf "${cost_color}%s${COLOR_RESET} | %s | %s%s%s | ${ctx_color}%s${COLOR_RESET}\n" \
325
+ printf "${cost_color}%s${COLOR_RESET} | %s | %s%s%s%s | ${ctx_color}%s${COLOR_RESET}\n" \
298
326
  "$cost_display" \
299
327
  "$project_name" \
300
328
  "$branch_display" \
301
329
  "$pr_segment" \
302
330
  "$rl_segment" \
331
+ "$wl_segment" \
303
332
  "$ctx_display"
304
333
  else
305
- printf "${cost_color}%s${COLOR_RESET} | %s%s%s | ${ctx_color}%s${COLOR_RESET}\n" \
334
+ printf "${cost_color}%s${COLOR_RESET} | %s%s%s%s | ${ctx_color}%s${COLOR_RESET}\n" \
306
335
  "$cost_display" \
307
336
  "$project_name" \
308
337
  "$pr_segment" \
309
338
  "$rl_segment" \
339
+ "$wl_segment" \
310
340
  "$ctx_display"
311
341
  fi
@@ -101,6 +101,7 @@ oh-my-customcode로 구동됩니다.
101
101
  | `/omcustom:update-external` | 외부 소스에서 에이전트 업데이트 |
102
102
  | `/omcustom:audit-agents` | 에이전트 의존성 감사 |
103
103
  | `/omcustom:fix-refs` | 깨진 참조 수정 |
104
+ | `/omcustom:auto-improve` | 개선 사항 자동 적용 워크플로우 |
104
105
  | `/omcustom:improve-report` | eval-core 기반 개선 현황 리포트 |
105
106
  | `/omcustom-takeover` | 기존 에이전트/스킬에서 canonical spec 추출 |
106
107
  | `/adversarial-review` | 공격자 관점 보안 코드 리뷰 |
@@ -137,7 +138,7 @@ project/
137
138
  +-- CLAUDE.md # 진입점
138
139
  +-- .claude/
139
140
  | +-- agents/ # 서브에이전트 정의 (45 파일)
140
- | +-- skills/ # 스킬 (91 디렉토리)
141
+ | +-- skills/ # 스킬 (93 디렉토리)
141
142
  | +-- rules/ # 전역 규칙 (R000-R021)
142
143
  | +-- hooks/ # 훅 스크립트 (보안, 검증, HUD)
143
144
  | +-- contexts/ # 컨텍스트 파일 (ecomode)
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "0.55.0",
2
+ "version": "0.57.0",
3
3
  "lastUpdated": "2026-03-16T00:00:00.000Z",
4
4
  "components": [
5
5
  {
@@ -18,7 +18,7 @@
18
18
  "name": "skills",
19
19
  "path": ".claude/skills",
20
20
  "description": "Reusable skill modules (includes slash commands)",
21
- "files": 92
21
+ "files": 93
22
22
  },
23
23
  {
24
24
  "name": "guides",