neo-skill 0.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.shared/skill-creator/data/domains.json +56 -0
- package/.shared/skill-creator/data/output-patterns.csv +8 -0
- package/.shared/skill-creator/data/resource-patterns.csv +8 -0
- package/.shared/skill-creator/data/skill-reasoning.csv +11 -0
- package/.shared/skill-creator/data/trigger-patterns.csv +8 -0
- package/.shared/skill-creator/data/validation-rules.csv +11 -0
- package/.shared/skill-creator/data/workflow-patterns.csv +6 -0
- package/.shared/skill-creator/scripts/generate.py +300 -0
- package/.shared/skill-creator/scripts/package.py +140 -0
- package/.shared/skill-creator/scripts/search.py +231 -0
- package/.shared/skill-creator/scripts/validate.py +213 -0
- package/LICENSE +21 -0
- package/README.md +117 -0
- package/bin/omni-skill.js +55 -0
- package/bin/skill-creator.js +55 -0
- package/package.json +25 -0
- package/skills/review-gate/references/review-gate.md +228 -0
- package/skills/review-gate/skillspec.json +131 -0
- package/skills/skill-creator/references/output-patterns.md +82 -0
- package/skills/skill-creator/references/pre-delivery-checklist.md +70 -0
- package/skills/skill-creator/references/requirement-collection.md +80 -0
- package/skills/skill-creator/references/skill-system-design.md +112 -0
- package/skills/skill-creator/references/sources.md +5 -0
- package/skills/skill-creator/references/workflow-step-editing.md +103 -0
- package/skills/skill-creator/references/workflows.md +28 -0
- package/skills/skill-creator/scripts/init_skill.py +34 -0
- package/skills/skill-creator/scripts/package_skill.py +34 -0
- package/skills/skill-creator/scripts/validate_skill.py +35 -0
- package/skills/skill-creator/skillspec.json +117 -0
- package/src/omni_skill/__init__.py +1 -0
- package/src/omni_skill/cli.py +270 -0
- package/src/skill_creator/__init__.py +1 -0
- package/src/skill_creator/cli.py +278 -0
- package/src/skill_creator/packaging/package.py +30 -0
- package/src/skill_creator/packaging/ziputil.py +26 -0
- package/src/skill_creator/spec/model.py +111 -0
- package/src/skill_creator/spec/render.py +108 -0
- package/src/skill_creator/spec/validate.py +18 -0
- package/src/skill_creator/targets/claude.py +53 -0
- package/src/skill_creator/targets/common.py +46 -0
- package/src/skill_creator/targets/cursor.py +34 -0
- package/src/skill_creator/targets/github_skills.py +40 -0
- package/src/skill_creator/targets/windsurf.py +123 -0
- package/src/skill_creator/util/frontmatter.py +24 -0
- package/src/skill_creator/util/fs.py +32 -0
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Skill Creator Search Tool
|
|
4
|
+
|
|
5
|
+
Search skill patterns, templates, and reasoning rules.
|
|
6
|
+
Inspired by ui-ux-pro-max's multi-domain search architecture.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python3 .shared/skill-creator/scripts/search.py "<query>" --domain <domain>
|
|
10
|
+
python3 .shared/skill-creator/scripts/search.py "<query>" --skill-system
|
|
11
|
+
python3 .shared/skill-creator/scripts/search.py "<query>" --stack windsurf
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import argparse
|
|
17
|
+
import csv
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Any, Dict, List, Optional
|
|
22
|
+
|
|
23
|
+
SCRIPT_DIR = Path(__file__).parent
|
|
24
|
+
DATA_DIR = SCRIPT_DIR.parent / "data"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def load_json(path: Path) -> Dict[str, Any]:
|
|
28
|
+
return json.loads(path.read_text(encoding="utf-8"))
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def load_csv(path: Path) -> List[Dict[str, str]]:
|
|
32
|
+
with path.open(encoding="utf-8") as f:
|
|
33
|
+
return list(csv.DictReader(f))
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def search_csv(data: List[Dict[str, str]], query: str, fields: Optional[List[str]] = None) -> List[Dict[str, str]]:
|
|
37
|
+
"""Search CSV data for matching rows."""
|
|
38
|
+
query_lower = query.lower()
|
|
39
|
+
keywords = query_lower.split()
|
|
40
|
+
results = []
|
|
41
|
+
|
|
42
|
+
for row in data:
|
|
43
|
+
score = 0
|
|
44
|
+
search_fields = fields or list(row.keys())
|
|
45
|
+
text = " ".join(str(row.get(f, "")) for f in search_fields).lower()
|
|
46
|
+
|
|
47
|
+
for kw in keywords:
|
|
48
|
+
if kw in text:
|
|
49
|
+
score += 1
|
|
50
|
+
|
|
51
|
+
if score > 0:
|
|
52
|
+
results.append((score, row))
|
|
53
|
+
|
|
54
|
+
results.sort(key=lambda x: -x[0])
|
|
55
|
+
return [r[1] for r in results]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def search_domain(domain: str, query: str, max_results: int = 5) -> List[Dict[str, str]]:
|
|
59
|
+
"""Search a specific domain."""
|
|
60
|
+
domains_config = load_json(DATA_DIR / "domains.json")
|
|
61
|
+
|
|
62
|
+
if domain not in domains_config["domains"]:
|
|
63
|
+
print(f"Unknown domain: {domain}", file=sys.stderr)
|
|
64
|
+
print(f"Available: {', '.join(domains_config['domains'].keys())}", file=sys.stderr)
|
|
65
|
+
sys.exit(1)
|
|
66
|
+
|
|
67
|
+
domain_info = domains_config["domains"][domain]
|
|
68
|
+
data_file = DATA_DIR / domain_info["data_file"]
|
|
69
|
+
|
|
70
|
+
if not data_file.exists():
|
|
71
|
+
print(f"Data file not found: {data_file}", file=sys.stderr)
|
|
72
|
+
sys.exit(1)
|
|
73
|
+
|
|
74
|
+
data = load_csv(data_file)
|
|
75
|
+
results = search_csv(data, query)
|
|
76
|
+
return results[:max_results]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def search_skill_system(query: str, project_name: Optional[str] = None) -> Dict[str, Any]:
|
|
80
|
+
"""
|
|
81
|
+
Generate a complete skill system recommendation.
|
|
82
|
+
Searches all domains in parallel and applies reasoning rules.
|
|
83
|
+
"""
|
|
84
|
+
domains_config = load_json(DATA_DIR / "domains.json")
|
|
85
|
+
reasoning_rules = load_csv(DATA_DIR / domains_config["reasoning_file"])
|
|
86
|
+
|
|
87
|
+
# Search all domains
|
|
88
|
+
results = {}
|
|
89
|
+
for domain in domains_config["domains"]:
|
|
90
|
+
results[domain] = search_domain(domain, query, max_results=3)
|
|
91
|
+
|
|
92
|
+
# Apply reasoning rules
|
|
93
|
+
applicable_rules = search_csv(reasoning_rules, query, ["condition"])
|
|
94
|
+
|
|
95
|
+
# Build recommendation
|
|
96
|
+
recommendation = {
|
|
97
|
+
"project": project_name or "Untitled Skill",
|
|
98
|
+
"query": query,
|
|
99
|
+
"workflow_pattern": results.get("workflow", [{}])[0] if results.get("workflow") else {},
|
|
100
|
+
"output_pattern": results.get("output", [{}])[0] if results.get("output") else {},
|
|
101
|
+
"resource_strategy": results.get("resource", [{}])[0] if results.get("resource") else {},
|
|
102
|
+
"trigger_examples": [r.get("example_triggers", "") for r in results.get("trigger", [])[:3]],
|
|
103
|
+
"applicable_rules": [
|
|
104
|
+
{"condition": r["condition"], "recommendation": r["recommendation"], "rationale": r["rationale"]}
|
|
105
|
+
for r in applicable_rules[:5]
|
|
106
|
+
],
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
return recommendation
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def search_stack(stack: str, query: str) -> Dict[str, Any]:
|
|
113
|
+
"""Get stack-specific guidelines."""
|
|
114
|
+
domains_config = load_json(DATA_DIR / "domains.json")
|
|
115
|
+
|
|
116
|
+
if stack not in domains_config["stacks"]:
|
|
117
|
+
print(f"Unknown stack: {stack}", file=sys.stderr)
|
|
118
|
+
print(f"Available: {', '.join(domains_config['stacks'].keys())}", file=sys.stderr)
|
|
119
|
+
sys.exit(1)
|
|
120
|
+
|
|
121
|
+
stack_info = domains_config["stacks"][stack]
|
|
122
|
+
return {
|
|
123
|
+
"stack": stack,
|
|
124
|
+
"query": query,
|
|
125
|
+
"description": stack_info["description"],
|
|
126
|
+
"format": stack_info["format"],
|
|
127
|
+
"output_path": stack_info["output_path"],
|
|
128
|
+
"priority": stack_info["priority"],
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def format_domain_results(domain: str, results: List[Dict[str, str]]) -> str:
|
|
133
|
+
"""Format domain search results."""
|
|
134
|
+
if not results:
|
|
135
|
+
return f"No results found for domain '{domain}'"
|
|
136
|
+
|
|
137
|
+
lines = [f"## {domain.upper()} Domain Results", ""]
|
|
138
|
+
for i, row in enumerate(results, 1):
|
|
139
|
+
lines.append(f"### {i}. {row.get('name', row.get('id', 'Unknown'))}")
|
|
140
|
+
for key, value in row.items():
|
|
141
|
+
if key not in ("id", "name") and value:
|
|
142
|
+
lines.append(f"- **{key}**: {value}")
|
|
143
|
+
lines.append("")
|
|
144
|
+
|
|
145
|
+
return "\n".join(lines)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def format_skill_system(rec: Dict[str, Any]) -> str:
|
|
149
|
+
"""Format skill system recommendation."""
|
|
150
|
+
lines = [
|
|
151
|
+
"╔════════════════════════════════════════════════════════════════╗",
|
|
152
|
+
f"║ SKILL SYSTEM: {rec['project']:<47} ║",
|
|
153
|
+
"╠════════════════════════════════════════════════════════════════╣",
|
|
154
|
+
f"║ Query: {rec['query']:<54} ║",
|
|
155
|
+
"╠════════════════════════════════════════════════════════════════╣",
|
|
156
|
+
]
|
|
157
|
+
|
|
158
|
+
# Workflow pattern
|
|
159
|
+
wp = rec.get("workflow_pattern", {})
|
|
160
|
+
if wp:
|
|
161
|
+
lines.append(f"║ WORKFLOW: {wp.get('name', 'N/A'):<51} ║")
|
|
162
|
+
lines.append(f"║ {wp.get('description', '')[:58]:<58} ║")
|
|
163
|
+
|
|
164
|
+
# Output pattern
|
|
165
|
+
op = rec.get("output_pattern", {})
|
|
166
|
+
if op:
|
|
167
|
+
lines.append(f"║ OUTPUT: {op.get('name', 'N/A'):<53} ║")
|
|
168
|
+
lines.append(f"║ {op.get('description', '')[:58]:<58} ║")
|
|
169
|
+
|
|
170
|
+
# Resource strategy
|
|
171
|
+
rs = rec.get("resource_strategy", {})
|
|
172
|
+
if rs:
|
|
173
|
+
lines.append(f"║ RESOURCES: {rs.get('name', 'N/A'):<50} ║")
|
|
174
|
+
|
|
175
|
+
lines.append("╠════════════════════════════════════════════════════════════════╣")
|
|
176
|
+
lines.append("║ APPLICABLE RULES: ║")
|
|
177
|
+
for rule in rec.get("applicable_rules", [])[:3]:
|
|
178
|
+
cond = rule.get("condition", "")[:20]
|
|
179
|
+
rec_text = rule.get("recommendation", "")[:35]
|
|
180
|
+
lines.append(f"║ • {cond}: {rec_text:<38} ║")
|
|
181
|
+
|
|
182
|
+
lines.append("╚════════════════════════════════════════════════════════════════╝")
|
|
183
|
+
|
|
184
|
+
return "\n".join(lines)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def main() -> None:
|
|
188
|
+
parser = argparse.ArgumentParser(
|
|
189
|
+
description="Search skill patterns, templates, and reasoning rules"
|
|
190
|
+
)
|
|
191
|
+
parser.add_argument("query", help="Search query")
|
|
192
|
+
parser.add_argument("--domain", "-d", help="Search specific domain")
|
|
193
|
+
parser.add_argument("--skill-system", "-s", action="store_true", help="Generate complete skill system")
|
|
194
|
+
parser.add_argument("--stack", help="Get stack-specific guidelines")
|
|
195
|
+
parser.add_argument("--project", "-p", help="Project name (for --skill-system)")
|
|
196
|
+
parser.add_argument("--max-results", "-n", type=int, default=5, help="Max results per domain")
|
|
197
|
+
parser.add_argument("--format", "-f", choices=["text", "json"], default="text", help="Output format")
|
|
198
|
+
|
|
199
|
+
args = parser.parse_args()
|
|
200
|
+
|
|
201
|
+
if args.skill_system:
|
|
202
|
+
result = search_skill_system(args.query, args.project)
|
|
203
|
+
if args.format == "json":
|
|
204
|
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
205
|
+
else:
|
|
206
|
+
print(format_skill_system(result))
|
|
207
|
+
elif args.stack:
|
|
208
|
+
result = search_stack(args.stack, args.query)
|
|
209
|
+
if args.format == "json":
|
|
210
|
+
print(json.dumps(result, indent=2, ensure_ascii=False))
|
|
211
|
+
else:
|
|
212
|
+
print(f"Stack: {result['stack']}")
|
|
213
|
+
print(f"Format: {result['format']}")
|
|
214
|
+
print(f"Output: {result['output_path']}")
|
|
215
|
+
elif args.domain:
|
|
216
|
+
results = search_domain(args.domain, args.query, args.max_results)
|
|
217
|
+
if args.format == "json":
|
|
218
|
+
print(json.dumps(results, indent=2, ensure_ascii=False))
|
|
219
|
+
else:
|
|
220
|
+
print(format_domain_results(args.domain, results))
|
|
221
|
+
else:
|
|
222
|
+
# Default: search all domains briefly
|
|
223
|
+
domains_config = load_json(DATA_DIR / "domains.json")
|
|
224
|
+
for domain in domains_config["domains"]:
|
|
225
|
+
results = search_domain(domain, args.query, max_results=2)
|
|
226
|
+
if results:
|
|
227
|
+
print(format_domain_results(domain, results))
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
if __name__ == "__main__":
|
|
231
|
+
main()
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Skill Validator - Validate skillspec.json and generated outputs
|
|
4
|
+
|
|
5
|
+
Usage:
|
|
6
|
+
python3 .shared/skill-creator/scripts/validate.py skills/<skill>/skillspec.json
|
|
7
|
+
python3 .shared/skill-creator/scripts/validate.py skills/<skill>/skillspec.json --strict
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import csv
|
|
14
|
+
import json
|
|
15
|
+
import re
|
|
16
|
+
import sys
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Any, Dict, List, Tuple
|
|
19
|
+
|
|
20
|
+
SCRIPT_DIR = Path(__file__).parent
|
|
21
|
+
DATA_DIR = SCRIPT_DIR.parent / "data"
|
|
22
|
+
REPO_ROOT = SCRIPT_DIR.parent.parent.parent
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def load_validation_rules() -> List[Dict[str, str]]:
|
|
26
|
+
"""Load validation rules from CSV."""
|
|
27
|
+
rules_path = DATA_DIR / "validation-rules.csv"
|
|
28
|
+
if not rules_path.exists():
|
|
29
|
+
return []
|
|
30
|
+
with rules_path.open(encoding="utf-8") as f:
|
|
31
|
+
return list(csv.DictReader(f))
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def validate_kebab_case(name: str) -> bool:
|
|
35
|
+
"""Check if name is valid kebab-case."""
|
|
36
|
+
return bool(re.match(r"^[a-z][a-z0-9]*(-[a-z0-9]+)*$", name))
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def validate_spec(spec: Dict[str, Any], spec_path: Path) -> List[Tuple[str, str, str]]:
|
|
40
|
+
"""
|
|
41
|
+
Validate skillspec.json structure and content.
|
|
42
|
+
Returns list of (severity, rule_id, message) tuples.
|
|
43
|
+
"""
|
|
44
|
+
errors: List[Tuple[str, str, str]] = []
|
|
45
|
+
|
|
46
|
+
# name validation
|
|
47
|
+
name = spec.get("name", "")
|
|
48
|
+
if not name:
|
|
49
|
+
errors.append(("error", "name_required", "name is required"))
|
|
50
|
+
elif not validate_kebab_case(name):
|
|
51
|
+
errors.append(("error", "name_kebab", f"name '{name}' is not valid kebab-case"))
|
|
52
|
+
|
|
53
|
+
# description validation
|
|
54
|
+
desc = spec.get("description", "")
|
|
55
|
+
if not desc:
|
|
56
|
+
errors.append(("error", "description_required", "description is required"))
|
|
57
|
+
elif len(desc) < 20:
|
|
58
|
+
errors.append(("warning", "description_short", "description is very short (<20 chars)"))
|
|
59
|
+
|
|
60
|
+
# questions validation
|
|
61
|
+
questions = spec.get("questions", [])
|
|
62
|
+
if len(questions) > 10:
|
|
63
|
+
errors.append(("error", "questions_max_10", f"questions must be <= 10 (got {len(questions)})"))
|
|
64
|
+
|
|
65
|
+
# triggers validation
|
|
66
|
+
triggers = spec.get("triggers", [])
|
|
67
|
+
if len(triggers) < 3:
|
|
68
|
+
errors.append(("warning", "triggers_min_3", f"recommend at least 3 triggers (got {len(triggers)})"))
|
|
69
|
+
|
|
70
|
+
# workflow validation
|
|
71
|
+
workflow = spec.get("workflow", {})
|
|
72
|
+
if not workflow:
|
|
73
|
+
errors.append(("error", "workflow_required", "workflow is required"))
|
|
74
|
+
else:
|
|
75
|
+
workflow_type = workflow.get("type", "")
|
|
76
|
+
if workflow_type not in ("sequential", "conditional"):
|
|
77
|
+
errors.append(("error", "workflow_type_invalid", f"workflow.type must be sequential or conditional"))
|
|
78
|
+
|
|
79
|
+
steps = workflow.get("steps", [])
|
|
80
|
+
if not steps:
|
|
81
|
+
errors.append(("error", "steps_not_empty", "workflow.steps must not be empty"))
|
|
82
|
+
else:
|
|
83
|
+
for i, step in enumerate(steps):
|
|
84
|
+
if not step.get("id"):
|
|
85
|
+
errors.append(("error", "step_id_required", f"step[{i}].id is required"))
|
|
86
|
+
if not step.get("title"):
|
|
87
|
+
errors.append(("error", "step_title_required", f"step[{i}].title is required"))
|
|
88
|
+
|
|
89
|
+
notes = step.get("notes", "")
|
|
90
|
+
if len(notes) > 500:
|
|
91
|
+
errors.append(("warning", "notes_max_length",
|
|
92
|
+
f"step[{i}].notes is long ({len(notes)} chars) - consider using references/"))
|
|
93
|
+
|
|
94
|
+
# freedom_level validation
|
|
95
|
+
freedom = spec.get("freedom_level", "low")
|
|
96
|
+
if freedom not in ("low", "medium", "high"):
|
|
97
|
+
errors.append(("error", "freedom_level_invalid", "freedom_level must be low, medium, or high"))
|
|
98
|
+
|
|
99
|
+
return errors
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def validate_claude_output(skill_name: str, repo_root: Path) -> List[Tuple[str, str, str]]:
|
|
103
|
+
"""Validate Claude SKILL.md strict frontmatter."""
|
|
104
|
+
errors: List[Tuple[str, str, str]] = []
|
|
105
|
+
|
|
106
|
+
skill_md = repo_root / ".claude" / "skills" / skill_name / "SKILL.md"
|
|
107
|
+
if not skill_md.exists():
|
|
108
|
+
return errors # Not generated yet, skip
|
|
109
|
+
|
|
110
|
+
content = skill_md.read_text(encoding="utf-8")
|
|
111
|
+
|
|
112
|
+
# Check frontmatter
|
|
113
|
+
if not content.startswith("---\n"):
|
|
114
|
+
errors.append(("error", "claude_frontmatter_missing", "Claude SKILL.md missing YAML frontmatter"))
|
|
115
|
+
return errors
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
end_idx = content.index("\n---\n", 4)
|
|
119
|
+
frontmatter = content[4:end_idx]
|
|
120
|
+
except ValueError:
|
|
121
|
+
errors.append(("error", "claude_frontmatter_unterminated", "Claude SKILL.md frontmatter not closed"))
|
|
122
|
+
return errors
|
|
123
|
+
|
|
124
|
+
# Parse frontmatter keys
|
|
125
|
+
keys = set()
|
|
126
|
+
for line in frontmatter.splitlines():
|
|
127
|
+
if ":" in line:
|
|
128
|
+
key = line.split(":")[0].strip()
|
|
129
|
+
if key:
|
|
130
|
+
keys.add(key)
|
|
131
|
+
|
|
132
|
+
allowed_keys = {"name", "description"}
|
|
133
|
+
extra_keys = keys - allowed_keys
|
|
134
|
+
if extra_keys:
|
|
135
|
+
errors.append(("error", "claude_frontmatter_strict",
|
|
136
|
+
f"Claude frontmatter has extra keys (strict mode): {extra_keys}"))
|
|
137
|
+
|
|
138
|
+
return errors
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def validate_banned_files(skill_name: str, repo_root: Path) -> List[Tuple[str, str, str]]:
|
|
142
|
+
"""Check for banned files in skill bundles."""
|
|
143
|
+
errors: List[Tuple[str, str, str]] = []
|
|
144
|
+
|
|
145
|
+
banned = {"README.md", "CHANGELOG.md", "INSTALL.md", "LICENSE", "LICENSE.md"}
|
|
146
|
+
|
|
147
|
+
for target_dir in [
|
|
148
|
+
repo_root / ".claude" / "skills" / skill_name,
|
|
149
|
+
repo_root / ".github" / "skills" / skill_name,
|
|
150
|
+
]:
|
|
151
|
+
if not target_dir.exists():
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
for path in target_dir.rglob("*"):
|
|
155
|
+
if path.is_file() and path.name in banned:
|
|
156
|
+
errors.append(("error", "no_banned_files", f"Banned file in skill bundle: {path}"))
|
|
157
|
+
|
|
158
|
+
return errors
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def main() -> None:
|
|
162
|
+
parser = argparse.ArgumentParser(description="Validate skillspec.json and outputs")
|
|
163
|
+
parser.add_argument("spec", help="Path to skillspec.json")
|
|
164
|
+
parser.add_argument("--strict", action="store_true", help="Treat warnings as errors")
|
|
165
|
+
parser.add_argument("--repo-root", "-r", default=str(REPO_ROOT), help="Repository root")
|
|
166
|
+
|
|
167
|
+
args = parser.parse_args()
|
|
168
|
+
|
|
169
|
+
spec_path = Path(args.spec).resolve()
|
|
170
|
+
repo_root = Path(args.repo_root).resolve()
|
|
171
|
+
|
|
172
|
+
if not spec_path.exists():
|
|
173
|
+
print(f"ERROR: Spec not found: {spec_path}", file=sys.stderr)
|
|
174
|
+
sys.exit(1)
|
|
175
|
+
|
|
176
|
+
spec = json.loads(spec_path.read_text(encoding="utf-8"))
|
|
177
|
+
skill_name = spec.get("name", "")
|
|
178
|
+
|
|
179
|
+
all_errors: List[Tuple[str, str, str]] = []
|
|
180
|
+
|
|
181
|
+
# Validate spec
|
|
182
|
+
all_errors.extend(validate_spec(spec, spec_path))
|
|
183
|
+
|
|
184
|
+
# Validate outputs
|
|
185
|
+
all_errors.extend(validate_claude_output(skill_name, repo_root))
|
|
186
|
+
all_errors.extend(validate_banned_files(skill_name, repo_root))
|
|
187
|
+
|
|
188
|
+
# Report
|
|
189
|
+
has_errors = False
|
|
190
|
+
has_warnings = False
|
|
191
|
+
|
|
192
|
+
for severity, rule_id, message in all_errors:
|
|
193
|
+
prefix = "ERROR" if severity == "error" else "WARN"
|
|
194
|
+
print(f"[{prefix}] {rule_id}: {message}")
|
|
195
|
+
|
|
196
|
+
if severity == "error":
|
|
197
|
+
has_errors = True
|
|
198
|
+
else:
|
|
199
|
+
has_warnings = True
|
|
200
|
+
|
|
201
|
+
if not all_errors:
|
|
202
|
+
print("✓ Validation passed")
|
|
203
|
+
sys.exit(0)
|
|
204
|
+
|
|
205
|
+
if has_errors or (args.strict and has_warnings):
|
|
206
|
+
sys.exit(1)
|
|
207
|
+
|
|
208
|
+
print("✓ Validation passed with warnings")
|
|
209
|
+
sys.exit(0)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
if __name__ == "__main__":
|
|
213
|
+
main()
|
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 liuminxin45
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
# neo-skill(多 AI 助手技能生成器)
|
|
2
|
+
|
|
3
|
+
一个确定性的 **skill-creator** 仓库。
|
|
4
|
+
|
|
5
|
+
**GitHub**: https://github.com/liuminxin45/neo-skill
|
|
6
|
+
|
|
7
|
+
## 安装
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# 全局安装
|
|
11
|
+
npm install -g neo-skill
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
**前置依赖**:需要安装 Python 3.8+(命令行执行依赖 Python)
|
|
15
|
+
|
|
16
|
+
## 功能说明
|
|
17
|
+
- 使用 canonical `skills/<skill>/skillspec.json` 作为单一真源(single source of truth)
|
|
18
|
+
- 为多个 AI 助手生成入口文件:
|
|
19
|
+
- Claude: `.claude/skills/<skill>/SKILL.md` + `resources/`
|
|
20
|
+
- Windsurf: `.windsurf/workflows/<skill>.md`
|
|
21
|
+
- Cursor: `.cursor/commands/<skill>.md`
|
|
22
|
+
- GitHub / VS Code Skills: `.github/skills/<skill>/SKILL.md` + resources
|
|
23
|
+
- 校验生成的 `SKILL.md` 是否符合 Claude 严格的元数据规则
|
|
24
|
+
- 打包 Claude `.skill`(zip 格式,符合正确的根目录结构)
|
|
25
|
+
|
|
26
|
+
## 支持的 Skills
|
|
27
|
+
|
|
28
|
+
| Skill 名称 | 描述 | 来源 |
|
|
29
|
+
|-----------|------|------|
|
|
30
|
+
| **skill-creator** | 对话式收集需求,生成可在多 AI Assistant 运行的技能包 | 内置 |
|
|
31
|
+
| **review-gate** | 建立架构与工程化 PR Review 规范,提供可执行的 Review Checklist | 内置 |
|
|
32
|
+
|
|
33
|
+
**触发示例**:
|
|
34
|
+
- skill-creator: "我想做一个 skill"、"帮我生成 SKILL.md"、"把我的 prompt 工作流变成 skill"
|
|
35
|
+
- review-gate: "我想建立 PR Review 架构规范检查点"、"帮我生成 PR Review Checklist 模板"、"软评审代码"
|
|
36
|
+
|
|
37
|
+
## 快速开始
|
|
38
|
+
|
|
39
|
+
### 典型使用场景
|
|
40
|
+
|
|
41
|
+
**场景 1:在 neo-skill 仓库内开发/维护 skill**
|
|
42
|
+
```bash
|
|
43
|
+
# 从仓库根目录执行,初始化指定 AI 助手的技能文件
|
|
44
|
+
omni-skill init --ai claude
|
|
45
|
+
omni-skill init --ai windsurf
|
|
46
|
+
omni-skill init --ai all # 初始化所有支持的 AI 助手
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
**场景 2:在 neo-skill 仓库内更新并重新生成所有 IDE 入口文件**
|
|
50
|
+
在 neo-skill 仓库根目录执行:
|
|
51
|
+
|
|
52
|
+
```bash
|
|
53
|
+
omni-skill update
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
行为说明:
|
|
57
|
+
- 根据上次 init 时保存的 AI 目标,重新同步/生成所有入口文件
|
|
58
|
+
|
|
59
|
+
**场景 3:在其他项目中使用 neo-skill 的 skill**
|
|
60
|
+
将 neo-skill 仓库克隆到你的项目中(例如 `vendor/neo-skill/`),然后根据你使用的 IDE,复制对应的入口文件到项目根目录:
|
|
61
|
+
- **Windsurf**:复制 `.windsurf/workflows/<skill>.md` 和 `.windsurf/workflows/data/`
|
|
62
|
+
- **Cursor**:复制 `.cursor/commands/<skill>.md`
|
|
63
|
+
- **Claude Desktop**:复制 `.claude/skills/<skill>/`
|
|
64
|
+
- **GitHub / VS Code Skills**:复制 `.github/skills/<skill>/`
|
|
65
|
+
|
|
66
|
+
### 推荐用法
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
# 初始化指定 AI 助手
|
|
70
|
+
omni-skill init --ai claude
|
|
71
|
+
omni-skill init --ai windsurf --ai cursor # 可指定多个
|
|
72
|
+
|
|
73
|
+
# 初始化所有支持的 AI 助手
|
|
74
|
+
omni-skill init --ai all
|
|
75
|
+
|
|
76
|
+
# 更新(基于上次 init 的配置)
|
|
77
|
+
omni-skill update
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### 关于 npm 安装后的 init 行为
|
|
81
|
+
|
|
82
|
+
当你通过 `npm install -g neo-skill` 安装后:
|
|
83
|
+
|
|
84
|
+
- **命令来源**:`omni-skill` 和 `skill-creator` 命令来自全局 npm 包(内部调用 Python)
|
|
85
|
+
- **同步内容**:`omni-skill init` 会把包内的技能/资源内容同步到当前项目目录(覆盖式同步):
|
|
86
|
+
- `skills/`
|
|
87
|
+
- `.shared/skill-creator/`
|
|
88
|
+
- 以及指定 AI 对应的目录(如 `.claude/skills/`、`.windsurf/workflows/` 等)
|
|
89
|
+
|
|
90
|
+
**核心行为:**
|
|
91
|
+
- 根据 `--ai` 参数生成对应 IDE 的入口文件
|
|
92
|
+
- 不同 IDE 可以共存,切换 IDE 时无需重新生成
|
|
93
|
+
- 生成的文件都是从 canonical `skills/<skill>/skillspec.json` 渲染而来
|
|
94
|
+
|
|
95
|
+
**参数说明:**
|
|
96
|
+
- `init`:初始化技能文件
|
|
97
|
+
- `update`:基于上次 init 保存的配置重新同步
|
|
98
|
+
- `--ai <target>`:指定目标 AI 助手(可重复使用)
|
|
99
|
+
|
|
100
|
+
**支持的 AI 助手:**
|
|
101
|
+
claude, cursor, windsurf, antigravity, copilot, kiro, codex, qoder, roocode, gemini, trae, opencode, continue, all
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
## Canonical 与生成文件
|
|
105
|
+
|
|
106
|
+
### Canonical(可编辑)
|
|
107
|
+
- `skills/<skill>/skillspec.json`
|
|
108
|
+
- `skills/<skill>/references/**`
|
|
109
|
+
- `skills/<skill>/scripts/**`(可选)
|
|
110
|
+
- `skills/<skill>/assets/**`(可选)
|
|
111
|
+
|
|
112
|
+
### 生成文件(不要手动编辑)
|
|
113
|
+
- `.windsurf/workflows/<skill>.md`
|
|
114
|
+
- `.windsurf/workflows/data/<skill>/**`(从 `skills/<skill>/assets/windsurf-workflow-data` 同步)
|
|
115
|
+
- `.claude/skills/<skill>/**`
|
|
116
|
+
- `.cursor/commands/<skill>.md`
|
|
117
|
+
- `.github/skills/<skill>/**`
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
const { spawnSync } = require("child_process");
|
|
4
|
+
const path = require("path");
|
|
5
|
+
|
|
6
|
+
function main() {
|
|
7
|
+
const pkgRoot = path.resolve(__dirname, "..");
|
|
8
|
+
const pySrc = path.join(pkgRoot, "src");
|
|
9
|
+
|
|
10
|
+
const env = { ...process.env };
|
|
11
|
+
env.PYTHONPATH = env.PYTHONPATH ? `${pySrc}${path.delimiter}${env.PYTHONPATH}` : pySrc;
|
|
12
|
+
|
|
13
|
+
const baseArgs = ["-m", "omni_skill.cli", ...process.argv.slice(2)];
|
|
14
|
+
|
|
15
|
+
const candidates = [];
|
|
16
|
+
if (env.OMNI_SKILL_PYTHON) {
|
|
17
|
+
candidates.push({ cmd: env.OMNI_SKILL_PYTHON, extraArgs: [] });
|
|
18
|
+
}
|
|
19
|
+
if (process.platform === "win32") {
|
|
20
|
+
candidates.push({ cmd: "python", extraArgs: [] });
|
|
21
|
+
candidates.push({ cmd: "python3", extraArgs: [] });
|
|
22
|
+
candidates.push({ cmd: "py", extraArgs: ["-3"] });
|
|
23
|
+
} else {
|
|
24
|
+
candidates.push({ cmd: "python3", extraArgs: [] });
|
|
25
|
+
candidates.push({ cmd: "python", extraArgs: [] });
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
let lastError = null;
|
|
29
|
+
for (const c of candidates) {
|
|
30
|
+
const r = spawnSync(c.cmd, [...c.extraArgs, ...baseArgs], { stdio: "inherit", env });
|
|
31
|
+
if (r.error) {
|
|
32
|
+
lastError = r.error;
|
|
33
|
+
const msg = String(r.error.message || "").toLowerCase();
|
|
34
|
+
if (msg.includes("enoent")) {
|
|
35
|
+
continue;
|
|
36
|
+
}
|
|
37
|
+
console.error(r.error);
|
|
38
|
+
process.exit(1);
|
|
39
|
+
}
|
|
40
|
+
if (typeof r.status === "number" && r.status === 9009) {
|
|
41
|
+
continue;
|
|
42
|
+
}
|
|
43
|
+
process.exit(r.status == null ? 1 : r.status);
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
console.error(
|
|
47
|
+
"Python not found. Please install Python 3 and ensure 'python' works, or set OMNI_SKILL_PYTHON to your interpreter path.",
|
|
48
|
+
);
|
|
49
|
+
if (lastError) {
|
|
50
|
+
console.error(lastError);
|
|
51
|
+
}
|
|
52
|
+
process.exit(127);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
main();
|