claudekit-codex-sync 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +45 -0
- package/README.md +131 -0
- package/bin/ck-codex-sync +12 -0
- package/bin/ck-codex-sync.js +9 -0
- package/docs/code-standards.md +62 -0
- package/docs/codebase-summary.md +83 -0
- package/docs/codex-vs-claude-agents.md +74 -0
- package/docs/installation-guide.md +64 -0
- package/docs/project-overview-pdr.md +44 -0
- package/docs/project-roadmap.md +51 -0
- package/docs/system-architecture.md +106 -0
- package/package.json +16 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-01-productization.md +36 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-02-core-refactor.md +32 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-03-agent-transpiler.md +33 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-04-parity-harness.md +43 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-05-distribution-npm.md +35 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-06-git-clone-docs.md +28 -0
- package/plans/260222-2051-claudekit-codex-community-sync/phase-07-qa-release.md +35 -0
- package/plans/260222-2051-claudekit-codex-community-sync/plan.md +99 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-01-project-structure.md +79 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-02-extract-templates.md +36 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-03-modularize-python.md +107 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-04-live-source-detection.md +76 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-05-agent-toml-config.md +88 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-06-backup-registry.md +58 -0
- package/plans/260223-0951-refactor-and-upgrade/phase-07-tests-docs-push.md +54 -0
- package/plans/260223-0951-refactor-and-upgrade/plan.md +72 -0
- package/reports/brainstorm-260222-2051-claudekit-codex-community-sync.md +113 -0
- package/scripts/bootstrap-claudekit-skill-scripts.sh +150 -0
- package/scripts/claudekit-sync-all.py +1150 -0
- package/scripts/export-claudekit-prompts.sh +221 -0
- package/scripts/normalize-claudekit-for-codex.sh +261 -0
- package/src/claudekit_codex_sync/__init__.py +0 -0
- package/src/claudekit_codex_sync/asset_sync_dir.py +125 -0
- package/src/claudekit_codex_sync/asset_sync_zip.py +140 -0
- package/src/claudekit_codex_sync/bridge_generator.py +33 -0
- package/src/claudekit_codex_sync/cli.py +199 -0
- package/src/claudekit_codex_sync/config_enforcer.py +140 -0
- package/src/claudekit_codex_sync/constants.py +104 -0
- package/src/claudekit_codex_sync/dep_bootstrapper.py +73 -0
- package/src/claudekit_codex_sync/path_normalizer.py +248 -0
- package/src/claudekit_codex_sync/prompt_exporter.py +89 -0
- package/src/claudekit_codex_sync/runtime_verifier.py +32 -0
- package/src/claudekit_codex_sync/source_resolver.py +78 -0
- package/src/claudekit_codex_sync/sync_registry.py +77 -0
- package/src/claudekit_codex_sync/utils.py +130 -0
- package/templates/agents-md.md +45 -0
- package/templates/bridge-docs-init.sh +25 -0
- package/templates/bridge-project-status.sh +49 -0
- package/templates/bridge-resolve-command.py +52 -0
- package/templates/bridge-skill.md +63 -0
- package/templates/command-map.md +44 -0
- package/tests/__init__.py +1 -0
- package/tests/test_config_enforcer.py +44 -0
- package/tests/test_path_normalizer.py +61 -0
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Dependency bootstrap for skills."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import shutil
|
|
6
|
+
import subprocess
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Dict
|
|
9
|
+
|
|
10
|
+
from .utils import eprint, is_excluded_path, run_cmd
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def bootstrap_deps(
|
|
14
|
+
*,
|
|
15
|
+
codex_home: Path,
|
|
16
|
+
include_mcp: bool,
|
|
17
|
+
include_test_deps: bool,
|
|
18
|
+
dry_run: bool,
|
|
19
|
+
) -> Dict[str, int]:
|
|
20
|
+
"""Bootstrap Python and Node dependencies for skills."""
|
|
21
|
+
skills_dir = codex_home / "skills"
|
|
22
|
+
venv_dir = skills_dir / ".venv"
|
|
23
|
+
|
|
24
|
+
if not shutil.which("python3"):
|
|
25
|
+
from .utils import SyncError
|
|
26
|
+
raise SyncError("python3 not found")
|
|
27
|
+
|
|
28
|
+
py_ok = py_fail = node_ok = node_fail = 0
|
|
29
|
+
|
|
30
|
+
run_cmd(["python3", "-m", "venv", str(venv_dir)], dry_run=dry_run)
|
|
31
|
+
py_bin = venv_dir / "bin" / "python3"
|
|
32
|
+
run_cmd([str(py_bin), "-m", "pip", "install", "--upgrade", "pip"], dry_run=dry_run)
|
|
33
|
+
|
|
34
|
+
req_files = sorted(skills_dir.rglob("requirements*.txt"))
|
|
35
|
+
for req in req_files:
|
|
36
|
+
rel = req.relative_to(skills_dir).as_posix()
|
|
37
|
+
if is_excluded_path(req.parts):
|
|
38
|
+
continue
|
|
39
|
+
if not include_test_deps and "/test" in rel:
|
|
40
|
+
continue
|
|
41
|
+
if not include_mcp and ("mcp-builder" in req.parts or "mcp-management" in req.parts):
|
|
42
|
+
continue
|
|
43
|
+
try:
|
|
44
|
+
run_cmd([str(py_bin), "-m", "pip", "install", "-r", str(req)], dry_run=dry_run)
|
|
45
|
+
py_ok += 1
|
|
46
|
+
except subprocess.CalledProcessError:
|
|
47
|
+
py_fail += 1
|
|
48
|
+
eprint(f"python deps failed: {req}")
|
|
49
|
+
|
|
50
|
+
npm = shutil.which("npm")
|
|
51
|
+
if npm:
|
|
52
|
+
pkg_files = sorted(skills_dir.rglob("package.json"))
|
|
53
|
+
for pkg in pkg_files:
|
|
54
|
+
rel = pkg.relative_to(skills_dir).as_posix()
|
|
55
|
+
if is_excluded_path(pkg.parts):
|
|
56
|
+
continue
|
|
57
|
+
if not include_mcp and ("mcp-builder" in pkg.parts or "mcp-management" in pkg.parts):
|
|
58
|
+
continue
|
|
59
|
+
try:
|
|
60
|
+
run_cmd([npm, "install", "--prefix", str(pkg.parent)], dry_run=dry_run)
|
|
61
|
+
node_ok += 1
|
|
62
|
+
except subprocess.CalledProcessError:
|
|
63
|
+
node_fail += 1
|
|
64
|
+
eprint(f"node deps failed: {pkg.parent}")
|
|
65
|
+
else:
|
|
66
|
+
eprint("npm not found; skipping Node dependency bootstrap")
|
|
67
|
+
|
|
68
|
+
return {
|
|
69
|
+
"python_ok": py_ok,
|
|
70
|
+
"python_fail": py_fail,
|
|
71
|
+
"node_ok": node_ok,
|
|
72
|
+
"node_fail": node_fail,
|
|
73
|
+
}
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
"""Path normalization for Codex compatibility."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
import shutil
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from .constants import (
|
|
10
|
+
AGENT_TOML_REPLACEMENTS,
|
|
11
|
+
CLAUDE_SYNTAX_ADAPTATIONS,
|
|
12
|
+
SKILL_MD_REPLACEMENTS,
|
|
13
|
+
)
|
|
14
|
+
from .utils import apply_replacements, load_template, write_text_if_changed
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def normalize_files(
|
|
18
|
+
*,
|
|
19
|
+
codex_home: Path,
|
|
20
|
+
include_mcp: bool,
|
|
21
|
+
dry_run: bool,
|
|
22
|
+
) -> int:
|
|
23
|
+
"""Normalize paths in skill files and claudekit files."""
|
|
24
|
+
changed = 0
|
|
25
|
+
skills_dir = codex_home / "skills"
|
|
26
|
+
claudekit_dir = codex_home / "claudekit"
|
|
27
|
+
|
|
28
|
+
for path in sorted(skills_dir.rglob("SKILL.md")):
|
|
29
|
+
if ".system" in path.parts:
|
|
30
|
+
continue
|
|
31
|
+
rel = path.relative_to(codex_home).as_posix()
|
|
32
|
+
if not include_mcp and any(m in rel for m in ("/mcp-builder/", "/mcp-management/")):
|
|
33
|
+
continue
|
|
34
|
+
text = path.read_text(encoding="utf-8", errors="ignore")
|
|
35
|
+
new_text = apply_replacements(text, SKILL_MD_REPLACEMENTS)
|
|
36
|
+
if new_text != text:
|
|
37
|
+
changed += 1
|
|
38
|
+
print(f"normalize: {rel}")
|
|
39
|
+
if not dry_run:
|
|
40
|
+
path.write_text(new_text, encoding="utf-8")
|
|
41
|
+
|
|
42
|
+
for path in sorted(claudekit_dir.rglob("*.md")):
|
|
43
|
+
rel = path.relative_to(codex_home).as_posix()
|
|
44
|
+
text = path.read_text(encoding="utf-8", errors="ignore")
|
|
45
|
+
new_text = apply_replacements(text, SKILL_MD_REPLACEMENTS)
|
|
46
|
+
if new_text != text:
|
|
47
|
+
changed += 1
|
|
48
|
+
print(f"normalize: {rel}")
|
|
49
|
+
if not dry_run:
|
|
50
|
+
path.write_text(new_text, encoding="utf-8")
|
|
51
|
+
|
|
52
|
+
copy_script = skills_dir / "copywriting" / "scripts" / "extract-writing-styles.py"
|
|
53
|
+
if patch_copywriting_script(copy_script, dry_run=dry_run):
|
|
54
|
+
changed += 1
|
|
55
|
+
print("normalize: skills/copywriting/scripts/extract-writing-styles.py")
|
|
56
|
+
|
|
57
|
+
default_style = skills_dir / "copywriting" / "assets" / "writing-styles" / "default.md"
|
|
58
|
+
fallback_style = skills_dir / "copywriting" / "references" / "writing-styles.md"
|
|
59
|
+
if not default_style.exists() and fallback_style.exists():
|
|
60
|
+
changed += 1
|
|
61
|
+
print("add: skills/copywriting/assets/writing-styles/default.md")
|
|
62
|
+
if not dry_run:
|
|
63
|
+
default_style.parent.mkdir(parents=True, exist_ok=True)
|
|
64
|
+
shutil.copy2(fallback_style, default_style)
|
|
65
|
+
|
|
66
|
+
command_map = codex_home / "claudekit" / "commands" / "codex-command-map.md"
|
|
67
|
+
template = load_template("command-map.md")
|
|
68
|
+
if write_text_if_changed(command_map, template, dry_run=dry_run):
|
|
69
|
+
changed += 1
|
|
70
|
+
print("upsert: claudekit/commands/codex-command-map.md")
|
|
71
|
+
|
|
72
|
+
return changed
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def convert_agents_md_to_toml(*, codex_home: Path, dry_run: bool) -> int:
|
|
76
|
+
"""Convert ClaudeKit agent .md files to Codex .toml format."""
|
|
77
|
+
from .constants import (
|
|
78
|
+
CLAUDE_MODEL_REASONING_EFFORT,
|
|
79
|
+
CLAUDE_TO_CODEX_MODELS,
|
|
80
|
+
READ_ONLY_AGENT_ROLES,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
agents_dir = codex_home / "agents"
|
|
84
|
+
if not agents_dir.exists():
|
|
85
|
+
return 0
|
|
86
|
+
|
|
87
|
+
converted = 0
|
|
88
|
+
for md_file in sorted(agents_dir.glob("*.md")):
|
|
89
|
+
text = md_file.read_text(encoding="utf-8")
|
|
90
|
+
# Parse YAML frontmatter
|
|
91
|
+
if not text.startswith("---"):
|
|
92
|
+
continue
|
|
93
|
+
parts = text.split("---", 2)
|
|
94
|
+
if len(parts) < 3:
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
frontmatter = parts[1].strip()
|
|
98
|
+
body = parts[2].strip()
|
|
99
|
+
|
|
100
|
+
# Extract fields from frontmatter
|
|
101
|
+
claude_model = ""
|
|
102
|
+
for line in frontmatter.splitlines():
|
|
103
|
+
m = re.match(r"^model:\s*(.+)$", line)
|
|
104
|
+
if m:
|
|
105
|
+
claude_model = m.group(1).strip().strip("'\"")
|
|
106
|
+
|
|
107
|
+
# Map model
|
|
108
|
+
codex_model = CLAUDE_TO_CODEX_MODELS.get(claude_model, "gpt-5.3-codex")
|
|
109
|
+
effort = CLAUDE_MODEL_REASONING_EFFORT.get(claude_model, "high")
|
|
110
|
+
|
|
111
|
+
# Determine sandbox mode
|
|
112
|
+
slug = md_file.stem.replace("-", "_")
|
|
113
|
+
if slug in READ_ONLY_AGENT_ROLES:
|
|
114
|
+
sandbox = "read-only"
|
|
115
|
+
else:
|
|
116
|
+
sandbox = "workspace-write"
|
|
117
|
+
|
|
118
|
+
# Build TOML
|
|
119
|
+
toml_lines = []
|
|
120
|
+
if codex_model:
|
|
121
|
+
toml_lines.append(f'model = "{codex_model}"')
|
|
122
|
+
toml_lines.append(f'model_reasoning_effort = "{effort}"')
|
|
123
|
+
toml_lines.append(f'sandbox_mode = "{sandbox}"')
|
|
124
|
+
toml_lines.append("")
|
|
125
|
+
# Escape triple quotes in body
|
|
126
|
+
safe_body = body.replace('"""', '\\"\\"\\"\\"')
|
|
127
|
+
toml_lines.append(f'developer_instructions = """\n{safe_body}\n"""')
|
|
128
|
+
|
|
129
|
+
toml_content = "\n".join(toml_lines) + "\n"
|
|
130
|
+
toml_file = agents_dir / f"{slug}.toml"
|
|
131
|
+
|
|
132
|
+
if not dry_run:
|
|
133
|
+
toml_file.write_text(toml_content, encoding="utf-8")
|
|
134
|
+
converted += 1
|
|
135
|
+
print(f"convert: agents/{md_file.name} → agents/{slug}.toml ({codex_model}, {sandbox})")
|
|
136
|
+
|
|
137
|
+
return converted
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def normalize_agent_tomls(*, codex_home: Path, dry_run: bool) -> int:
|
|
141
|
+
"""Normalize paths and models in agent TOML files."""
|
|
142
|
+
from .constants import (
|
|
143
|
+
CLAUDE_MODEL_REASONING_EFFORT,
|
|
144
|
+
CLAUDE_TO_CODEX_MODELS,
|
|
145
|
+
READ_ONLY_AGENT_ROLES,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
agents_dir = codex_home / "agents"
|
|
149
|
+
if not agents_dir.exists():
|
|
150
|
+
return 0
|
|
151
|
+
|
|
152
|
+
# First convert any .md agents to .toml
|
|
153
|
+
convert_agents_md_to_toml(codex_home=codex_home, dry_run=dry_run)
|
|
154
|
+
|
|
155
|
+
changed = 0
|
|
156
|
+
for toml_file in sorted(agents_dir.glob("*.toml")):
|
|
157
|
+
text = toml_file.read_text(encoding="utf-8")
|
|
158
|
+
new_text = apply_replacements(text, AGENT_TOML_REPLACEMENTS)
|
|
159
|
+
new_text = apply_replacements(new_text, CLAUDE_SYNTAX_ADAPTATIONS)
|
|
160
|
+
|
|
161
|
+
# Map commented Claude models to active Codex models
|
|
162
|
+
for claude_name, codex_model in CLAUDE_TO_CODEX_MODELS.items():
|
|
163
|
+
pattern = rf'^#\s*model\s*=\s*"{claude_name}"\s*$'
|
|
164
|
+
if re.search(pattern, new_text, re.MULTILINE):
|
|
165
|
+
effort = CLAUDE_MODEL_REASONING_EFFORT.get(claude_name, "high")
|
|
166
|
+
if codex_model:
|
|
167
|
+
replacement = f'model = "{codex_model}"\nmodel_reasoning_effort = "{effort}"'
|
|
168
|
+
else:
|
|
169
|
+
replacement = ""
|
|
170
|
+
new_text = re.sub(pattern, replacement, new_text, flags=re.MULTILINE)
|
|
171
|
+
|
|
172
|
+
# Set read-only sandbox for appropriate roles
|
|
173
|
+
slug = toml_file.stem
|
|
174
|
+
if slug in READ_ONLY_AGENT_ROLES:
|
|
175
|
+
new_text = re.sub(
|
|
176
|
+
r'^sandbox_mode\s*=\s*"workspace-write"',
|
|
177
|
+
'sandbox_mode = "read-only"',
|
|
178
|
+
new_text,
|
|
179
|
+
flags=re.MULTILINE,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
if new_text != text:
|
|
183
|
+
changed += 1
|
|
184
|
+
if not dry_run:
|
|
185
|
+
toml_file.write_text(new_text, encoding="utf-8")
|
|
186
|
+
return changed
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def patch_copywriting_script(copy_script: Path, *, dry_run: bool) -> bool:
|
|
190
|
+
"""Patch copywriting script for Codex compatibility."""
|
|
191
|
+
from .utils import SyncError
|
|
192
|
+
|
|
193
|
+
if not copy_script.exists():
|
|
194
|
+
return False
|
|
195
|
+
|
|
196
|
+
text = copy_script.read_text(encoding="utf-8")
|
|
197
|
+
original = text
|
|
198
|
+
if "CODEX_HOME = Path(os.environ.get('CODEX_HOME'" in text:
|
|
199
|
+
return False
|
|
200
|
+
|
|
201
|
+
new_func = """def find_project_root(start_dir: Path) -> Path:
|
|
202
|
+
\"\"\"Find project root by preferring a directory that contains assets/writing-styles.\"\"\"
|
|
203
|
+
search_chain = [start_dir] + list(start_dir.parents)
|
|
204
|
+
for parent in search_chain:
|
|
205
|
+
if (parent / 'assets' / 'writing-styles').exists():
|
|
206
|
+
return parent
|
|
207
|
+
for parent in search_chain:
|
|
208
|
+
if (parent / 'SKILL.md').exists():
|
|
209
|
+
return parent
|
|
210
|
+
for parent in search_chain:
|
|
211
|
+
if (parent / '.codex').exists() or (parent / '.claude').exists():
|
|
212
|
+
return parent
|
|
213
|
+
return start_dir
|
|
214
|
+
"""
|
|
215
|
+
|
|
216
|
+
text, count_func = re.subn(
|
|
217
|
+
r"def find_project_root\(start_dir: Path\) -> Path:\n(?: .*\n)+? return start_dir\n",
|
|
218
|
+
new_func,
|
|
219
|
+
text,
|
|
220
|
+
count=1,
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
new_block = """PROJECT_ROOT = find_project_root(Path(__file__).parent)
|
|
224
|
+
STYLES_DIR = PROJECT_ROOT / 'assets' / 'writing-styles'
|
|
225
|
+
CODEX_HOME = Path(os.environ.get('CODEX_HOME', str(Path.home() / '.codex')))
|
|
226
|
+
|
|
227
|
+
_ai_multimodal_candidates = [
|
|
228
|
+
PROJECT_ROOT / '.claude' / 'skills' / 'ai-multimodal' / 'scripts',
|
|
229
|
+
CODEX_HOME / 'skills' / 'ai-multimodal' / 'scripts',
|
|
230
|
+
]
|
|
231
|
+
AI_MULTIMODAL_SCRIPTS = next((p for p in _ai_multimodal_candidates if p.exists()), _ai_multimodal_candidates[-1])
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
text, count_block = re.subn(
|
|
235
|
+
r"PROJECT_ROOT = find_project_root\(Path\(__file__\)\.parent\)\nSTYLES_DIR = PROJECT_ROOT / 'assets' / 'writing-styles'\nAI_MULTIMODAL_SCRIPTS = PROJECT_ROOT / '.claude' / 'skills' / 'ai-multimodal' / 'scripts'\n",
|
|
236
|
+
new_block,
|
|
237
|
+
text,
|
|
238
|
+
count=1,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if count_func == 0 or count_block == 0:
|
|
242
|
+
raise SyncError("copywriting patch failed: upstream pattern changed")
|
|
243
|
+
|
|
244
|
+
if text == original:
|
|
245
|
+
return False
|
|
246
|
+
if not dry_run:
|
|
247
|
+
copy_script.write_text(text, encoding="utf-8")
|
|
248
|
+
return True
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Prompt export functionality."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Dict, Set
|
|
7
|
+
|
|
8
|
+
from .constants import PROMPT_MANIFEST, PROMPT_REPLACEMENTS
|
|
9
|
+
from .utils import apply_replacements, load_manifest, save_manifest, write_bytes_if_changed
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def ensure_frontmatter(content: str, command_path: str) -> str:
|
|
13
|
+
"""Ensure content has YAML frontmatter."""
|
|
14
|
+
if content.lstrip().startswith("---"):
|
|
15
|
+
return content
|
|
16
|
+
return f"---\ndescription: ClaudeKit compatibility prompt for /{command_path}\n---\n\n{content}"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def export_prompts(
|
|
20
|
+
*,
|
|
21
|
+
codex_home: Path,
|
|
22
|
+
include_mcp: bool,
|
|
23
|
+
dry_run: bool,
|
|
24
|
+
) -> Dict[str, int]:
|
|
25
|
+
"""Export prompts from claudekit/commands to prompts directory."""
|
|
26
|
+
from .utils import SyncError
|
|
27
|
+
|
|
28
|
+
source = codex_home / "claudekit" / "commands"
|
|
29
|
+
prompts_dir = codex_home / "prompts"
|
|
30
|
+
manifest_path = prompts_dir / PROMPT_MANIFEST
|
|
31
|
+
|
|
32
|
+
if not source.exists():
|
|
33
|
+
if dry_run:
|
|
34
|
+
print(f"skip: prompt export dry-run requires existing {source}")
|
|
35
|
+
return {"added": 0, "updated": 0, "skipped": 0, "removed": 0, "collisions": 0, "total_generated": 0}
|
|
36
|
+
raise SyncError(f"Prompt source directory not found: {source}")
|
|
37
|
+
|
|
38
|
+
old_manifest = load_manifest(manifest_path)
|
|
39
|
+
files = sorted(source.rglob("*.md"))
|
|
40
|
+
generated: Set[str] = set()
|
|
41
|
+
added = updated = skipped = removed = collisions = 0
|
|
42
|
+
|
|
43
|
+
if not dry_run:
|
|
44
|
+
prompts_dir.mkdir(parents=True, exist_ok=True)
|
|
45
|
+
|
|
46
|
+
for src in files:
|
|
47
|
+
rel = src.relative_to(source).as_posix()
|
|
48
|
+
base = src.name
|
|
49
|
+
if base == "codex-command-map.md":
|
|
50
|
+
skipped += 1
|
|
51
|
+
print(f"skip: {rel}")
|
|
52
|
+
continue
|
|
53
|
+
if base == "use-mcp.md" and not include_mcp:
|
|
54
|
+
skipped += 1
|
|
55
|
+
print(f"skip: {rel}")
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
prompt_name = rel[:-3].replace("/", "-") + ".md"
|
|
59
|
+
dst = prompts_dir / prompt_name
|
|
60
|
+
text = src.read_text(encoding="utf-8", errors="ignore")
|
|
61
|
+
text = apply_replacements(text, PROMPT_REPLACEMENTS)
|
|
62
|
+
text = ensure_frontmatter(text, rel[:-3])
|
|
63
|
+
data = text.encode("utf-8")
|
|
64
|
+
|
|
65
|
+
if dst.exists() and prompt_name not in old_manifest:
|
|
66
|
+
collisions += 1
|
|
67
|
+
print(f"skip(collision): {prompt_name}")
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
generated.add(prompt_name)
|
|
71
|
+
changed, is_added = write_bytes_if_changed(dst, data, mode=0o644, dry_run=dry_run)
|
|
72
|
+
if changed:
|
|
73
|
+
if is_added:
|
|
74
|
+
added += 1
|
|
75
|
+
print(f"add: {prompt_name} <= {rel}")
|
|
76
|
+
else:
|
|
77
|
+
updated += 1
|
|
78
|
+
print(f"update: {prompt_name} <= {rel}")
|
|
79
|
+
|
|
80
|
+
for name in sorted(old_manifest - generated):
|
|
81
|
+
target = prompts_dir / name
|
|
82
|
+
if target.exists():
|
|
83
|
+
removed += 1
|
|
84
|
+
print(f"remove(stale): {name}")
|
|
85
|
+
if not dry_run:
|
|
86
|
+
target.unlink()
|
|
87
|
+
|
|
88
|
+
save_manifest(manifest_path, generated, dry_run=dry_run)
|
|
89
|
+
return {"added": added, "updated": updated, "skipped": skipped, "removed": removed, "collisions": collisions, "total_generated": len(generated)}
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Runtime verification."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Dict, Optional, Any
|
|
7
|
+
|
|
8
|
+
from .utils import run_cmd
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def verify_runtime(*, codex_home: Path, dry_run: bool) -> Dict[str, Any]:
|
|
12
|
+
"""Verify runtime health after sync."""
|
|
13
|
+
if dry_run:
|
|
14
|
+
return {"skipped": True}
|
|
15
|
+
|
|
16
|
+
run_cmd(["codex", "--help"], dry_run=False)
|
|
17
|
+
|
|
18
|
+
copy_script = codex_home / "skills" / "copywriting" / "scripts" / "extract-writing-styles.py"
|
|
19
|
+
py_bin = codex_home / "skills" / ".venv" / "bin" / "python3"
|
|
20
|
+
copywriting_ok = False
|
|
21
|
+
if copy_script.exists() and py_bin.exists():
|
|
22
|
+
run_cmd([str(py_bin), str(copy_script), "--list"], dry_run=False)
|
|
23
|
+
copywriting_ok = True
|
|
24
|
+
|
|
25
|
+
prompts_count = len(list((codex_home / "prompts").glob("*.md")))
|
|
26
|
+
skills_count = len(list((codex_home / "skills").rglob("SKILL.md")))
|
|
27
|
+
return {
|
|
28
|
+
"codex_help": "ok",
|
|
29
|
+
"copywriting": "ok" if copywriting_ok else "skipped",
|
|
30
|
+
"prompts": prompts_count,
|
|
31
|
+
"skills": skills_count,
|
|
32
|
+
}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""Source resolution for ClaudeKit zip or live directory."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import tempfile
|
|
7
|
+
import zipfile
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
from .utils import SyncError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def find_latest_zip(explicit_zip: Optional[Path]) -> Path:
|
|
15
|
+
"""Find the latest ClaudeKit zip file."""
|
|
16
|
+
if explicit_zip:
|
|
17
|
+
p = explicit_zip.expanduser().resolve()
|
|
18
|
+
if not p.exists():
|
|
19
|
+
raise SyncError(f"Zip not found: {p}")
|
|
20
|
+
return p
|
|
21
|
+
|
|
22
|
+
candidates: List[Path] = []
|
|
23
|
+
roots = {Path("/tmp"), Path(tempfile.gettempdir())}
|
|
24
|
+
for root in roots:
|
|
25
|
+
if root.exists():
|
|
26
|
+
candidates.extend(root.glob("claudekit-*/*.zip"))
|
|
27
|
+
|
|
28
|
+
if not candidates:
|
|
29
|
+
raise SyncError("No ClaudeKit zip found. Expected /tmp/claudekit-*/*.zip")
|
|
30
|
+
|
|
31
|
+
latest = max(candidates, key=lambda p: p.stat().st_mtime)
|
|
32
|
+
return latest.resolve()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def detect_claude_source() -> Path:
|
|
36
|
+
"""Auto-detect Claude Code installation directory."""
|
|
37
|
+
candidates = [
|
|
38
|
+
Path.home() / ".claude",
|
|
39
|
+
Path(os.environ.get("USERPROFILE", "")) / ".claude",
|
|
40
|
+
]
|
|
41
|
+
for p in candidates:
|
|
42
|
+
if p.exists() and (p / "skills").is_dir():
|
|
43
|
+
return p
|
|
44
|
+
raise SyncError("Claude Code not found. Use --source-dir to specify.")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def validate_source(source: Path) -> Dict[str, bool]:
|
|
48
|
+
"""Validate source directory structure."""
|
|
49
|
+
return {
|
|
50
|
+
"skills": (source / "skills").is_dir(),
|
|
51
|
+
"agents": (source / "agents").is_dir(),
|
|
52
|
+
"commands": (source / "commands").is_dir(),
|
|
53
|
+
"rules": (source / "rules").is_dir(),
|
|
54
|
+
"claude_md": (source / "CLAUDE.md").is_file(),
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def collect_skill_entries(zf: zipfile.ZipFile) -> Dict[str, List[Tuple[str, str]]]:
|
|
59
|
+
"""Collect skill entries from zip file."""
|
|
60
|
+
skill_files: Dict[str, List[Tuple[str, str]]] = {}
|
|
61
|
+
for name in zf.namelist():
|
|
62
|
+
if name.endswith("/") or not name.startswith(".claude/skills/"):
|
|
63
|
+
continue
|
|
64
|
+
rel = name[len(".claude/skills/") :]
|
|
65
|
+
parts = rel.split("/", 1)
|
|
66
|
+
if len(parts) != 2:
|
|
67
|
+
continue
|
|
68
|
+
skill, inner = parts
|
|
69
|
+
skill_files.setdefault(skill, []).append((name, inner))
|
|
70
|
+
return skill_files
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def zip_mode(info: zipfile.ZipInfo) -> Optional[int]:
|
|
74
|
+
"""Extract Unix mode from ZipInfo."""
|
|
75
|
+
unix_mode = (info.external_attr >> 16) & 0o777
|
|
76
|
+
if unix_mode:
|
|
77
|
+
return unix_mode
|
|
78
|
+
return None
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Sync registry with SHA-256 checksums and backup support."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
|
|
10
|
+
from .utils import compute_hash, create_backup
|
|
11
|
+
|
|
12
|
+
REGISTRY_FILE = ".claudekit-sync-registry.json"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def load_registry(codex_home: Path) -> Dict[str, Any]:
|
|
16
|
+
"""Load sync registry from disk."""
|
|
17
|
+
registry_path = codex_home / REGISTRY_FILE
|
|
18
|
+
if not registry_path.exists():
|
|
19
|
+
return {
|
|
20
|
+
"version": 1,
|
|
21
|
+
"lastSync": None,
|
|
22
|
+
"sourceDir": None,
|
|
23
|
+
"entries": {},
|
|
24
|
+
}
|
|
25
|
+
return json.loads(registry_path.read_text(encoding="utf-8"))
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def save_registry(codex_home: Path, registry: Dict[str, Any]) -> None:
|
|
29
|
+
"""Save sync registry to disk."""
|
|
30
|
+
registry_path = codex_home / REGISTRY_FILE
|
|
31
|
+
registry["lastSync"] = datetime.now(timezone.utc).isoformat()
|
|
32
|
+
registry_path.write_text(json.dumps(registry, indent=2), encoding="utf-8")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def check_user_edit(entry: Dict[str, str], target: Path) -> bool:
|
|
36
|
+
"""Check if user has modified the file since last sync."""
|
|
37
|
+
if not target.exists():
|
|
38
|
+
return False
|
|
39
|
+
current_hash = compute_hash(target)
|
|
40
|
+
return current_hash != entry.get("targetHash", "")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def update_entry(
|
|
44
|
+
registry: Dict[str, Any],
|
|
45
|
+
rel_path: str,
|
|
46
|
+
source: Path,
|
|
47
|
+
target: Path,
|
|
48
|
+
) -> None:
|
|
49
|
+
"""Update registry entry after sync."""
|
|
50
|
+
source_hash = compute_hash(source) if source.exists() else ""
|
|
51
|
+
target_hash = compute_hash(target) if target.exists() else ""
|
|
52
|
+
registry["entries"][rel_path] = {
|
|
53
|
+
"sourceHash": source_hash,
|
|
54
|
+
"targetHash": target_hash,
|
|
55
|
+
"syncedAt": datetime.now(timezone.utc).isoformat(),
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def maybe_backup(
|
|
60
|
+
registry: Dict[str, Any],
|
|
61
|
+
rel_path: str,
|
|
62
|
+
target: Path,
|
|
63
|
+
respect_edits: bool,
|
|
64
|
+
) -> Optional[Path]:
|
|
65
|
+
"""Backup file if user has edited it and respect_edits is enabled."""
|
|
66
|
+
if not target.exists():
|
|
67
|
+
return None
|
|
68
|
+
entry = registry.get("entries", {}).get(rel_path)
|
|
69
|
+
if not entry:
|
|
70
|
+
return None
|
|
71
|
+
if not respect_edits:
|
|
72
|
+
return None
|
|
73
|
+
if check_user_edit(entry, target):
|
|
74
|
+
backup = create_backup(target)
|
|
75
|
+
print(f"backup(user-edit): {rel_path} -> {backup.name}")
|
|
76
|
+
return backup
|
|
77
|
+
return None
|