@demig0d2/skills 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +111 -0
- package/bin/cli.js +313 -0
- package/package.json +44 -0
- package/skills/book-writer/SKILL.md +1396 -0
- package/skills/book-writer/references/kdp_specs.md +139 -0
- package/skills/book-writer/scripts/kdp_check.py +255 -0
- package/skills/book-writer/scripts/toc_extract.py +151 -0
- package/skills/book-writer/scripts/word_count.py +196 -0
- package/skills/chapter-auditor/SKILL.md +231 -0
- package/skills/chapter-auditor/scripts/score_report.py +237 -0
- package/skills/concept-expander/SKILL.md +170 -0
- package/skills/concept-expander/scripts/validate_concept.py +255 -0
- package/skills/continuity-tracker/SKILL.md +251 -0
- package/skills/continuity-tracker/references/log_schema.md +149 -0
- package/skills/continuity-tracker/scripts/conflict_check.py +179 -0
- package/skills/continuity-tracker/scripts/log_manager.py +258 -0
- package/skills/humanizer/SKILL.md +632 -0
- package/skills/humanizer/references/patterns_quick_ref.md +71 -0
- package/skills/humanizer/scripts/dna_scan.py +168 -0
- package/skills/humanizer/scripts/scan_ai_patterns.py +279 -0
- package/skills/overhaul/SKILL.md +697 -0
- package/skills/overhaul/references/upgrade_checklist.md +81 -0
- package/skills/overhaul/scripts/changelog_gen.py +183 -0
- package/skills/overhaul/scripts/skill_parser.py +265 -0
- package/skills/overhaul/scripts/version_bump.py +128 -0
- package/skills/research-aggregator/SKILL.md +194 -0
- package/skills/research-aggregator/references/thinkers_reference.md +104 -0
- package/skills/research-aggregator/scripts/bank_formatter.py +206 -0
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# Overhaul — Upgrade Quality Checklist
|
|
2
|
+
|
|
3
|
+
Run this checklist before delivering any upgraded skill. Every item must pass.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## PRE-DELIVERY CHECKLIST
|
|
8
|
+
|
|
9
|
+
### Frontmatter
|
|
10
|
+
- [ ] Version number bumped (patch for minor fixes, minor for new sections, major for rebuild)
|
|
11
|
+
- [ ] Description rewritten to reflect upgraded capabilities — not the old skill's description
|
|
12
|
+
- [ ] Description includes at least 3 specific trigger phrases
|
|
13
|
+
- [ ] Description is under 300 characters
|
|
14
|
+
- [ ] Description is "pushy" — errs toward triggering rather than waiting
|
|
15
|
+
|
|
16
|
+
### Structure
|
|
17
|
+
- [ ] Every distinct behavior has a named `## SECTION` heading
|
|
18
|
+
- [ ] Every multi-step process is numbered (not bulleted)
|
|
19
|
+
- [ ] Every output has a concrete template (not just described)
|
|
20
|
+
- [ ] Every module has a quality gate or verification checkpoint
|
|
21
|
+
- [ ] Every identified conflict has an explicit protection rule BEFORE the general rule
|
|
22
|
+
- [ ] Every missing dimension from Module 4 has a dedicated handler section
|
|
23
|
+
|
|
24
|
+
### Content
|
|
25
|
+
- [ ] All conflicts identified in Module 2 are resolved
|
|
26
|
+
- [ ] All compressed items from Module 3 are fully expanded
|
|
27
|
+
- [ ] All missing dimensions from Module 4 have handlers
|
|
28
|
+
- [ ] Integration gaps from Module 5 are addressed
|
|
29
|
+
- [ ] Output format is a concrete template
|
|
30
|
+
|
|
31
|
+
### The Final Test
|
|
32
|
+
- [ ] Would someone who used the original skill feel this is a **different generation** — not just a cleaner version?
|
|
33
|
+
|
|
34
|
+
If the answer to the final test is NO → go back and find the missing dimension that was overlooked.
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## VERSION BUMP GUIDE
|
|
39
|
+
|
|
40
|
+
| Change Type | Version Bump | Example |
|
|
41
|
+
|---|---|---|
|
|
42
|
+
| Typo fix, minor wording | Patch (0.0.X) | 1.0.0 → 1.0.1 |
|
|
43
|
+
| New section, expanded content, edge cases | Minor (0.X.0) | 1.0.0 → 1.1.0 |
|
|
44
|
+
| Generational rebuild, new modules, overhaul | Major (X.0.0) | 1.0.0 → 2.0.0 |
|
|
45
|
+
| No version in original | Assign 2.0.0 | — → 2.0.0 |
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## UPGRADE SEVERITY GUIDE
|
|
50
|
+
|
|
51
|
+
| Vectors Found | Severity | Expected Version Bump |
|
|
52
|
+
|---|---|---|
|
|
53
|
+
| 1–2 vectors | Minor upgrade | Minor (0.X.0) |
|
|
54
|
+
| 3–4 vectors | Major upgrade | Major (X.0.0) |
|
|
55
|
+
| 5–6 vectors | Generational | Major (X.0.0) |
|
|
56
|
+
|
|
57
|
+
---
|
|
58
|
+
|
|
59
|
+
## THE 6 DIAGNOSTIC MODULES — QUICK REFERENCE
|
|
60
|
+
|
|
61
|
+
| Module | What It Finds | Why It Matters |
|
|
62
|
+
|---|---|---|
|
|
63
|
+
| 1. Intent Reconstruction | Gap between stated and actual behavior | First upgrade vector |
|
|
64
|
+
| 2. Conflict Detection | Instructions that cancel each other | Silent failures at runtime |
|
|
65
|
+
| 3. Compression Audit | Logic buried too thin in one line | Missing handling |
|
|
66
|
+
| 4. Missing Dimension Scan | Categories not addressed at all | The highest-value module |
|
|
67
|
+
| 5. Integration Analysis | Ecosystem awareness, triggering accuracy | Skills that never fire |
|
|
68
|
+
| 6. Output Standard Check | Format precision, quality definition | Inconsistent output |
|
|
69
|
+
|
|
70
|
+
---
|
|
71
|
+
|
|
72
|
+
## COMMON MISSING DIMENSIONS (found repeatedly across skills)
|
|
73
|
+
|
|
74
|
+
These are frequently missing — check for them in every upgrade:
|
|
75
|
+
|
|
76
|
+
1. **What happens when input is too thin?** (Most skills have no "seed too vague" handler)
|
|
77
|
+
2. **What happens when a required step is skipped?** (Most pipelines assume linear execution)
|
|
78
|
+
3. **Is there a quality gate before the final output?** (Most skills just output without self-checking)
|
|
79
|
+
4. **Are protected constructions defined?** (Skills that strip content often destroy intentional choices)
|
|
80
|
+
5. **Is the output format a template or just described?** (Described outputs are inconsistent across runs)
|
|
81
|
+
6. **What is the recovery path when a module fails?** (Most skills have no error → retry path)
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
changelog_gen.py — Generate a changelog by diffing two SKILL.md versions
|
|
4
|
+
|
|
5
|
+
Compares old and new skill files. Detects added sections, removed sections,
|
|
6
|
+
expanded content, and version changes.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python changelog_gen.py <old_SKILL.md> <new_SKILL.md>
|
|
10
|
+
python changelog_gen.py <old_SKILL.md> <new_SKILL.md> --json
|
|
11
|
+
python changelog_gen.py <old_SKILL.md> <new_SKILL.md> --markdown
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import sys
|
|
15
|
+
import re
|
|
16
|
+
import json
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def extract_sections(text: str) -> dict:
|
|
21
|
+
"""Return dict of {section_title: content}."""
|
|
22
|
+
sections = {}
|
|
23
|
+
current_title = "__preamble__"
|
|
24
|
+
current_lines = []
|
|
25
|
+
|
|
26
|
+
for line in text.splitlines():
|
|
27
|
+
heading = re.match(r"^(#{1,3})\s+(.+)", line)
|
|
28
|
+
if heading:
|
|
29
|
+
sections[current_title] = "\n".join(current_lines).strip()
|
|
30
|
+
current_title = heading.group(2).strip()
|
|
31
|
+
current_lines = []
|
|
32
|
+
else:
|
|
33
|
+
current_lines.append(line)
|
|
34
|
+
|
|
35
|
+
sections[current_title] = "\n".join(current_lines).strip()
|
|
36
|
+
return sections
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def get_version(text: str) -> str:
|
|
40
|
+
match = re.search(r"^version:\s*(.+)$", text, re.MULTILINE)
|
|
41
|
+
return match.group(1).strip() if match else "unknown"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def get_description(text: str) -> str:
|
|
45
|
+
match = re.search(r"^description:\s*\|?\s*(.+?)(?=\n\w+:|---)", text, re.DOTALL | re.MULTILINE)
|
|
46
|
+
return match.group(1).strip()[:200] if match else ""
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def diff_skills(old_path: str, new_path: str) -> dict:
|
|
50
|
+
old_text = Path(old_path).read_text(encoding="utf-8")
|
|
51
|
+
new_text = Path(new_path).read_text(encoding="utf-8")
|
|
52
|
+
|
|
53
|
+
old_sections = extract_sections(old_text)
|
|
54
|
+
new_sections = extract_sections(new_text)
|
|
55
|
+
|
|
56
|
+
old_keys = set(old_sections.keys())
|
|
57
|
+
new_keys = set(new_sections.keys())
|
|
58
|
+
|
|
59
|
+
added = sorted(new_keys - old_keys - {"__preamble__"})
|
|
60
|
+
removed = sorted(old_keys - new_keys - {"__preamble__"})
|
|
61
|
+
common = old_keys & new_keys
|
|
62
|
+
|
|
63
|
+
expanded = []
|
|
64
|
+
modified = []
|
|
65
|
+
unchanged = []
|
|
66
|
+
|
|
67
|
+
for key in sorted(common):
|
|
68
|
+
if key == "__preamble__":
|
|
69
|
+
continue
|
|
70
|
+
old_wc = len(old_sections[key].split())
|
|
71
|
+
new_wc = len(new_sections[key].split())
|
|
72
|
+
if old_wc == 0 and new_wc == 0:
|
|
73
|
+
unchanged.append(key)
|
|
74
|
+
elif new_wc > old_wc * 1.3:
|
|
75
|
+
expanded.append({"section": key, "old_words": old_wc, "new_words": new_wc})
|
|
76
|
+
elif old_sections[key] != new_sections[key]:
|
|
77
|
+
modified.append({"section": key, "old_words": old_wc, "new_words": new_wc})
|
|
78
|
+
else:
|
|
79
|
+
unchanged.append(key)
|
|
80
|
+
|
|
81
|
+
old_version = get_version(old_text)
|
|
82
|
+
new_version = get_version(new_text)
|
|
83
|
+
old_wc = len(old_text.split())
|
|
84
|
+
new_wc = len(new_text.split())
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
"old_file": old_path,
|
|
88
|
+
"new_file": new_path,
|
|
89
|
+
"version_change": f"{old_version} → {new_version}",
|
|
90
|
+
"word_count_change": f"{old_wc:,} → {new_wc:,} words ({new_wc - old_wc:+,})",
|
|
91
|
+
"added": added,
|
|
92
|
+
"removed": removed,
|
|
93
|
+
"expanded": expanded,
|
|
94
|
+
"modified": modified,
|
|
95
|
+
"unchanged_count": len(unchanged),
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def print_changelog(diff: dict):
|
|
100
|
+
print(f"\n{'═' * 60}")
|
|
101
|
+
print(f" CHANGELOG")
|
|
102
|
+
print(f" Version: {diff['version_change']}")
|
|
103
|
+
print(f" Size: {diff['word_count_change']}")
|
|
104
|
+
print(f"{'═' * 60}\n")
|
|
105
|
+
|
|
106
|
+
if diff["added"]:
|
|
107
|
+
print(f" ADDED ({len(diff['added'])} new sections):")
|
|
108
|
+
for s in diff["added"]:
|
|
109
|
+
print(f" + {s}")
|
|
110
|
+
print()
|
|
111
|
+
|
|
112
|
+
if diff["expanded"]:
|
|
113
|
+
print(f" EXPANDED ({len(diff['expanded'])} sections grown significantly):")
|
|
114
|
+
for s in diff["expanded"]:
|
|
115
|
+
print(f" ↑ {s['section']} ({s['old_words']} → {s['new_words']} words)")
|
|
116
|
+
print()
|
|
117
|
+
|
|
118
|
+
if diff["modified"]:
|
|
119
|
+
print(f" MODIFIED ({len(diff['modified'])} sections changed):")
|
|
120
|
+
for s in diff["modified"]:
|
|
121
|
+
print(f" ~ {s['section']} ({s['old_words']} → {s['new_words']} words)")
|
|
122
|
+
print()
|
|
123
|
+
|
|
124
|
+
if diff["removed"]:
|
|
125
|
+
print(f" REMOVED ({len(diff['removed'])} sections):")
|
|
126
|
+
for s in diff["removed"]:
|
|
127
|
+
print(f" - {s}")
|
|
128
|
+
print()
|
|
129
|
+
|
|
130
|
+
print(f" Unchanged sections: {diff['unchanged_count']}")
|
|
131
|
+
print(f"\n{'═' * 60}\n")
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def print_markdown_changelog(diff: dict):
|
|
135
|
+
print(f"## Changelog — {diff['version_change']}\n")
|
|
136
|
+
print(f"**Size**: {diff['word_count_change']}\n")
|
|
137
|
+
|
|
138
|
+
if diff["added"]:
|
|
139
|
+
print("### Added")
|
|
140
|
+
for s in diff["added"]:
|
|
141
|
+
print(f"- {s}")
|
|
142
|
+
print()
|
|
143
|
+
|
|
144
|
+
if diff["expanded"]:
|
|
145
|
+
print("### Expanded")
|
|
146
|
+
for s in diff["expanded"]:
|
|
147
|
+
print(f"- {s['section']} ({s['old_words']} → {s['new_words']} words)")
|
|
148
|
+
print()
|
|
149
|
+
|
|
150
|
+
if diff["modified"]:
|
|
151
|
+
print("### Modified")
|
|
152
|
+
for s in diff["modified"]:
|
|
153
|
+
print(f"- {s['section']}")
|
|
154
|
+
print()
|
|
155
|
+
|
|
156
|
+
if diff["removed"]:
|
|
157
|
+
print("### Removed")
|
|
158
|
+
for s in diff["removed"]:
|
|
159
|
+
print(f"- {s}")
|
|
160
|
+
print()
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
if __name__ == "__main__":
|
|
164
|
+
if len(sys.argv) < 3:
|
|
165
|
+
print("Usage: python changelog_gen.py <old_SKILL.md> <new_SKILL.md> [--json|--markdown]")
|
|
166
|
+
sys.exit(1)
|
|
167
|
+
|
|
168
|
+
old_path, new_path = sys.argv[1], sys.argv[2]
|
|
169
|
+
mode = sys.argv[3] if len(sys.argv) > 3 else "--report"
|
|
170
|
+
|
|
171
|
+
for p in [old_path, new_path]:
|
|
172
|
+
if not Path(p).exists():
|
|
173
|
+
print(f"Error: File not found: {p}", file=sys.stderr)
|
|
174
|
+
sys.exit(1)
|
|
175
|
+
|
|
176
|
+
diff = diff_skills(old_path, new_path)
|
|
177
|
+
|
|
178
|
+
if mode == "--json":
|
|
179
|
+
print(json.dumps(diff, indent=2))
|
|
180
|
+
elif mode == "--markdown":
|
|
181
|
+
print_markdown_changelog(diff)
|
|
182
|
+
else:
|
|
183
|
+
print_changelog(diff)
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
skill_parser.py — Parse and analyze a SKILL.md file
|
|
4
|
+
|
|
5
|
+
Extracts frontmatter, sections, version, word count, and structural
|
|
6
|
+
metrics. Used by the overhaul skill as the first diagnostic step.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python skill_parser.py <SKILL.md>
|
|
10
|
+
python skill_parser.py <SKILL.md> --json
|
|
11
|
+
python skill_parser.py <SKILL.md> --sections
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import sys
|
|
15
|
+
import re
|
|
16
|
+
import json
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
|
|
19
|
+
# ─── Parser ───────────────────────────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
def parse_frontmatter(text: str) -> tuple[dict, str]:
|
|
22
|
+
"""Extract YAML frontmatter and return (meta, body)."""
|
|
23
|
+
if not text.startswith("---"):
|
|
24
|
+
return {}, text
|
|
25
|
+
|
|
26
|
+
end = text.find("---", 3)
|
|
27
|
+
if end == -1:
|
|
28
|
+
return {}, text
|
|
29
|
+
|
|
30
|
+
yaml_block = text[3:end].strip()
|
|
31
|
+
body = text[end + 3:].strip()
|
|
32
|
+
|
|
33
|
+
meta = {}
|
|
34
|
+
for line in yaml_block.splitlines():
|
|
35
|
+
if ":" in line:
|
|
36
|
+
key, _, value = line.partition(":")
|
|
37
|
+
key = key.strip()
|
|
38
|
+
value = value.strip().strip('"').strip("'")
|
|
39
|
+
if key == "description":
|
|
40
|
+
# multiline description — grab until next key
|
|
41
|
+
meta[key] = value
|
|
42
|
+
else:
|
|
43
|
+
meta[key] = value
|
|
44
|
+
|
|
45
|
+
# Handle multiline description
|
|
46
|
+
desc_match = re.search(r"description:\s*\|(.+?)(?=\n\w+:|$)", yaml_block, re.DOTALL)
|
|
47
|
+
if desc_match:
|
|
48
|
+
meta["description"] = desc_match.group(1).strip()
|
|
49
|
+
|
|
50
|
+
return meta, body
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def extract_sections(body: str) -> list:
|
|
54
|
+
"""Extract all ## and ### sections with their content."""
|
|
55
|
+
sections = []
|
|
56
|
+
lines = body.splitlines()
|
|
57
|
+
current = None
|
|
58
|
+
|
|
59
|
+
for line in lines:
|
|
60
|
+
h2 = re.match(r"^## (.+)", line)
|
|
61
|
+
h3 = re.match(r"^### (.+)", line)
|
|
62
|
+
|
|
63
|
+
if h2:
|
|
64
|
+
if current:
|
|
65
|
+
sections.append(current)
|
|
66
|
+
current = {
|
|
67
|
+
"level": 2,
|
|
68
|
+
"title": h2.group(1).strip(),
|
|
69
|
+
"content": "",
|
|
70
|
+
"word_count": 0,
|
|
71
|
+
"subsections": [],
|
|
72
|
+
}
|
|
73
|
+
elif h3 and current:
|
|
74
|
+
# Count words in current section before starting subsection
|
|
75
|
+
sub = {
|
|
76
|
+
"level": 3,
|
|
77
|
+
"title": h3.group(1).strip(),
|
|
78
|
+
"content": "",
|
|
79
|
+
"word_count": 0,
|
|
80
|
+
}
|
|
81
|
+
current["subsections"].append(sub)
|
|
82
|
+
elif current:
|
|
83
|
+
if current["subsections"]:
|
|
84
|
+
current["subsections"][-1]["content"] += line + "\n"
|
|
85
|
+
else:
|
|
86
|
+
current["content"] += line + "\n"
|
|
87
|
+
|
|
88
|
+
if current:
|
|
89
|
+
sections.append(current)
|
|
90
|
+
|
|
91
|
+
# Calculate word counts
|
|
92
|
+
for sec in sections:
|
|
93
|
+
sec["word_count"] = len(sec["content"].split())
|
|
94
|
+
for sub in sec.get("subsections", []):
|
|
95
|
+
sub["word_count"] = len(sub["content"].split())
|
|
96
|
+
|
|
97
|
+
return sections
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def detect_issues(meta: dict, body: str, sections: list) -> list:
|
|
101
|
+
"""Detect potential skill quality issues."""
|
|
102
|
+
issues = []
|
|
103
|
+
|
|
104
|
+
# No version
|
|
105
|
+
if "version" not in meta:
|
|
106
|
+
issues.append({
|
|
107
|
+
"severity": "low",
|
|
108
|
+
"issue": "No version number in frontmatter",
|
|
109
|
+
"fix": "Add `version: 1.0.0` to frontmatter",
|
|
110
|
+
})
|
|
111
|
+
|
|
112
|
+
# Description too short
|
|
113
|
+
desc = meta.get("description", "")
|
|
114
|
+
if len(desc) < 50:
|
|
115
|
+
issues.append({
|
|
116
|
+
"severity": "medium",
|
|
117
|
+
"issue": f"Description too short ({len(desc)} chars) — won't trigger reliably",
|
|
118
|
+
"fix": "Expand description to include trigger phrases and use cases",
|
|
119
|
+
})
|
|
120
|
+
|
|
121
|
+
if len(desc) > 400:
|
|
122
|
+
issues.append({
|
|
123
|
+
"severity": "low",
|
|
124
|
+
"issue": f"Description very long ({len(desc)} chars) — may be truncated",
|
|
125
|
+
"fix": "Trim to under 300 chars while keeping key trigger phrases",
|
|
126
|
+
})
|
|
127
|
+
|
|
128
|
+
# No output format defined
|
|
129
|
+
output_keywords = ["output", "format", "deliver", "result", "produce"]
|
|
130
|
+
has_output_section = any(
|
|
131
|
+
any(kw in sec["title"].lower() for kw in output_keywords)
|
|
132
|
+
for sec in sections
|
|
133
|
+
)
|
|
134
|
+
body_lower = body.lower()
|
|
135
|
+
has_output_mention = any(kw in body_lower for kw in output_keywords)
|
|
136
|
+
|
|
137
|
+
if not has_output_section and not has_output_mention:
|
|
138
|
+
issues.append({
|
|
139
|
+
"severity": "high",
|
|
140
|
+
"issue": "No output format defined",
|
|
141
|
+
"fix": "Add an OUTPUT section with a concrete template",
|
|
142
|
+
})
|
|
143
|
+
|
|
144
|
+
# No error handling
|
|
145
|
+
error_keywords = ["error", "fail", "if not", "missing", "invalid", "edge case"]
|
|
146
|
+
has_error_handling = any(kw in body_lower for kw in error_keywords)
|
|
147
|
+
if not has_error_handling:
|
|
148
|
+
issues.append({
|
|
149
|
+
"severity": "medium",
|
|
150
|
+
"issue": "No error handling or edge case coverage",
|
|
151
|
+
"fix": "Add an ERROR HANDLING section for common failure modes",
|
|
152
|
+
})
|
|
153
|
+
|
|
154
|
+
# No quality gate / verification
|
|
155
|
+
quality_keywords = ["verify", "check", "validate", "quality", "review", "audit", "pass"]
|
|
156
|
+
has_quality = any(kw in body_lower for kw in quality_keywords)
|
|
157
|
+
if not has_quality:
|
|
158
|
+
issues.append({
|
|
159
|
+
"severity": "medium",
|
|
160
|
+
"issue": "No quality gate or self-verification step",
|
|
161
|
+
"fix": "Add a verification checklist before final output",
|
|
162
|
+
})
|
|
163
|
+
|
|
164
|
+
# Sections with very little content (compressed)
|
|
165
|
+
for sec in sections:
|
|
166
|
+
if sec["word_count"] < 20 and sec["title"] not in ("", "Quick Reference"):
|
|
167
|
+
issues.append({
|
|
168
|
+
"severity": "medium",
|
|
169
|
+
"issue": f'Section "{sec["title"]}" is very thin ({sec["word_count"]} words) — likely over-compressed',
|
|
170
|
+
"fix": f'Expand "{sec["title"]}" with concrete instructions, examples, or rules',
|
|
171
|
+
})
|
|
172
|
+
|
|
173
|
+
# No examples
|
|
174
|
+
example_keywords = ["example", "before:", "after:", "e.g.", "for instance", "sample"]
|
|
175
|
+
has_examples = any(kw in body_lower for kw in example_keywords)
|
|
176
|
+
if not has_examples:
|
|
177
|
+
issues.append({
|
|
178
|
+
"severity": "low",
|
|
179
|
+
"issue": "No examples in skill body",
|
|
180
|
+
"fix": "Add at least one before/after example or sample output",
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
return issues
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def analyze_skill(filepath: str) -> dict:
|
|
187
|
+
path = Path(filepath)
|
|
188
|
+
if not path.exists():
|
|
189
|
+
print(f"Error: File not found: {filepath}", file=sys.stderr)
|
|
190
|
+
sys.exit(1)
|
|
191
|
+
|
|
192
|
+
text = path.read_text(encoding="utf-8")
|
|
193
|
+
meta, body = parse_frontmatter(text)
|
|
194
|
+
sections = extract_sections(body)
|
|
195
|
+
|
|
196
|
+
total_words = len(text.split())
|
|
197
|
+
total_lines = len(text.splitlines())
|
|
198
|
+
issues = detect_issues(meta, body, sections)
|
|
199
|
+
|
|
200
|
+
return {
|
|
201
|
+
"file": str(path),
|
|
202
|
+
"name": meta.get("name", "unknown"),
|
|
203
|
+
"version": meta.get("version", "unversioned"),
|
|
204
|
+
"description": meta.get("description", ""),
|
|
205
|
+
"description_length": len(meta.get("description", "")),
|
|
206
|
+
"total_words": total_words,
|
|
207
|
+
"total_lines": total_lines,
|
|
208
|
+
"section_count": len(sections),
|
|
209
|
+
"sections": [
|
|
210
|
+
{
|
|
211
|
+
"title": s["title"],
|
|
212
|
+
"level": s["level"],
|
|
213
|
+
"word_count": s["word_count"],
|
|
214
|
+
"subsection_count": len(s.get("subsections", [])),
|
|
215
|
+
}
|
|
216
|
+
for s in sections
|
|
217
|
+
],
|
|
218
|
+
"issues": issues,
|
|
219
|
+
"issue_count": len(issues),
|
|
220
|
+
"meta": meta,
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def print_analysis(result: dict):
|
|
225
|
+
print(f"\n{'═' * 60}")
|
|
226
|
+
print(f" SKILL ANALYSIS: {result['name']} v{result['version']}")
|
|
227
|
+
print(f" File: {result['file']}")
|
|
228
|
+
print(f"{'═' * 60}")
|
|
229
|
+
print(f" Words: {result['total_words']:,} | Lines: {result['total_lines']:,} | Sections: {result['section_count']}")
|
|
230
|
+
print(f" Description: {result['description_length']} chars")
|
|
231
|
+
|
|
232
|
+
print(f"\n SECTIONS:")
|
|
233
|
+
for sec in result["sections"]:
|
|
234
|
+
indent = " " if sec["level"] == 2 else " "
|
|
235
|
+
print(f" {indent}{'##' if sec['level'] == 2 else '###'} {sec['title']:<40} {sec['word_count']:>5} words")
|
|
236
|
+
|
|
237
|
+
if result["issues"]:
|
|
238
|
+
print(f"\n ISSUES DETECTED ({result['issue_count']}):")
|
|
239
|
+
for issue in sorted(result["issues"], key=lambda x: {"high": 0, "medium": 1, "low": 2}[x["severity"]]):
|
|
240
|
+
icon = "✗" if issue["severity"] == "high" else "⚠" if issue["severity"] == "medium" else "○"
|
|
241
|
+
print(f"\n {icon} [{issue['severity'].upper()}] {issue['issue']}")
|
|
242
|
+
print(f" Fix: {issue['fix']}")
|
|
243
|
+
else:
|
|
244
|
+
print(f"\n ✓ No structural issues detected.")
|
|
245
|
+
|
|
246
|
+
print(f"\n{'═' * 60}\n")
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
if __name__ == "__main__":
|
|
250
|
+
if len(sys.argv) < 2:
|
|
251
|
+
print("Usage: python skill_parser.py <SKILL.md> [--json|--sections]")
|
|
252
|
+
sys.exit(1)
|
|
253
|
+
|
|
254
|
+
filepath = sys.argv[1]
|
|
255
|
+
mode = sys.argv[2] if len(sys.argv) > 2 else "--report"
|
|
256
|
+
|
|
257
|
+
result = analyze_skill(filepath)
|
|
258
|
+
|
|
259
|
+
if mode == "--json":
|
|
260
|
+
print(json.dumps(result, indent=2))
|
|
261
|
+
elif mode == "--sections":
|
|
262
|
+
for sec in result["sections"]:
|
|
263
|
+
print(f"{' ' * (sec['level'] - 1)}{'#' * sec['level']} {sec['title']} ({sec['word_count']} words)")
|
|
264
|
+
else:
|
|
265
|
+
print_analysis(result)
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
version_bump.py — Bump version number in a SKILL.md file
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python version_bump.py <SKILL.md> # auto-bump patch
|
|
7
|
+
python version_bump.py <SKILL.md> --minor # bump minor
|
|
8
|
+
python version_bump.py <SKILL.md> --major # bump major
|
|
9
|
+
python version_bump.py <SKILL.md> --set 2.1.0 # set specific version
|
|
10
|
+
python version_bump.py <SKILL.md> --dry-run # preview only
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import sys
|
|
14
|
+
import re
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def parse_version(v: str) -> tuple:
|
|
19
|
+
parts = v.strip().split(".")
|
|
20
|
+
if len(parts) != 3:
|
|
21
|
+
raise ValueError(f"Invalid version format: {v}")
|
|
22
|
+
return tuple(int(p) for p in parts)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def format_version(major: int, minor: int, patch: int) -> str:
|
|
26
|
+
return f"{major}.{minor}.{patch}"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def bump_version(current: str, bump_type: str) -> str:
|
|
30
|
+
major, minor, patch = parse_version(current)
|
|
31
|
+
if bump_type == "major":
|
|
32
|
+
return format_version(major + 1, 0, 0)
|
|
33
|
+
elif bump_type == "minor":
|
|
34
|
+
return format_version(major, minor + 1, 0)
|
|
35
|
+
else: # patch
|
|
36
|
+
return format_version(major, minor, patch + 1)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def update_skill_version(filepath: str, new_version: str, dry_run: bool = False) -> dict:
|
|
40
|
+
path = Path(filepath)
|
|
41
|
+
if not path.exists():
|
|
42
|
+
print(f"Error: File not found: {filepath}", file=sys.stderr)
|
|
43
|
+
sys.exit(1)
|
|
44
|
+
|
|
45
|
+
text = path.read_text(encoding="utf-8")
|
|
46
|
+
|
|
47
|
+
# Find existing version
|
|
48
|
+
version_match = re.search(r"^version:\s*(.+)$", text, re.MULTILINE)
|
|
49
|
+
old_version = version_match.group(1).strip() if version_match else None
|
|
50
|
+
|
|
51
|
+
if version_match:
|
|
52
|
+
new_text = re.sub(
|
|
53
|
+
r"^(version:\s*)(.+)$",
|
|
54
|
+
f"\\g<1>{new_version}",
|
|
55
|
+
text,
|
|
56
|
+
count=1,
|
|
57
|
+
flags=re.MULTILINE,
|
|
58
|
+
)
|
|
59
|
+
else:
|
|
60
|
+
# Insert version after name line
|
|
61
|
+
new_text = re.sub(
|
|
62
|
+
r"^(name:\s*.+)$",
|
|
63
|
+
f"\\g<1>\nversion: {new_version}",
|
|
64
|
+
text,
|
|
65
|
+
count=1,
|
|
66
|
+
flags=re.MULTILINE,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
result = {
|
|
70
|
+
"file": str(path),
|
|
71
|
+
"old_version": old_version or "none",
|
|
72
|
+
"new_version": new_version,
|
|
73
|
+
"dry_run": dry_run,
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
if not dry_run:
|
|
77
|
+
path.write_text(new_text, encoding="utf-8")
|
|
78
|
+
result["written"] = True
|
|
79
|
+
else:
|
|
80
|
+
result["written"] = False
|
|
81
|
+
|
|
82
|
+
return result
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
if __name__ == "__main__":
|
|
86
|
+
if len(sys.argv) < 2:
|
|
87
|
+
print("Usage: python version_bump.py <SKILL.md> [--major|--minor|--patch|--set X.Y.Z] [--dry-run]")
|
|
88
|
+
sys.exit(1)
|
|
89
|
+
|
|
90
|
+
filepath = sys.argv[1]
|
|
91
|
+
args = sys.argv[2:]
|
|
92
|
+
|
|
93
|
+
bump_type = "patch"
|
|
94
|
+
set_version = None
|
|
95
|
+
dry_run = "--dry-run" in args
|
|
96
|
+
|
|
97
|
+
if "--major" in args:
|
|
98
|
+
bump_type = "major"
|
|
99
|
+
elif "--minor" in args:
|
|
100
|
+
bump_type = "minor"
|
|
101
|
+
elif "--set" in args:
|
|
102
|
+
idx = args.index("--set")
|
|
103
|
+
if idx + 1 < len(args):
|
|
104
|
+
set_version = args[idx + 1]
|
|
105
|
+
|
|
106
|
+
# Read current version
|
|
107
|
+
path = Path(filepath)
|
|
108
|
+
text = path.read_text(encoding="utf-8")
|
|
109
|
+
version_match = re.search(r"^version:\s*(.+)$", text, re.MULTILINE)
|
|
110
|
+
current = version_match.group(1).strip() if version_match else "1.0.0"
|
|
111
|
+
|
|
112
|
+
if set_version:
|
|
113
|
+
new_version = set_version
|
|
114
|
+
else:
|
|
115
|
+
try:
|
|
116
|
+
new_version = bump_version(current, bump_type)
|
|
117
|
+
except ValueError:
|
|
118
|
+
new_version = "2.0.0"
|
|
119
|
+
|
|
120
|
+
result = update_skill_version(filepath, new_version, dry_run)
|
|
121
|
+
|
|
122
|
+
action = "Would update" if dry_run else "Updated"
|
|
123
|
+
print(f"\n {action}: {result['file']}")
|
|
124
|
+
print(f" Version: {result['old_version']} → {result['new_version']}")
|
|
125
|
+
if dry_run:
|
|
126
|
+
print(f" (dry run — no changes written)\n")
|
|
127
|
+
else:
|
|
128
|
+
print(f" ✓ Written\n")
|