flight-rules 0.15.4 → 0.15.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/adapter.d.ts +4 -1
- package/dist/commands/adapter.js +17 -11
- package/package.json +1 -1
- package/payload/AGENTS.md +1 -1
- package/payload/skills/skill-improve.md +206 -0
|
@@ -6,7 +6,10 @@ export declare function copyCommandsWithConflictHandling(sourceDir: string, dest
|
|
|
6
6
|
skipped: string[];
|
|
7
7
|
}>;
|
|
8
8
|
/**
|
|
9
|
-
* Copy skill files to a destination directory with conflict handling
|
|
9
|
+
* Copy skill files to a destination directory with conflict handling.
|
|
10
|
+
* Source skills are flat .md files (e.g., web-prototype.md).
|
|
11
|
+
* They are deployed as directories containing SKILL.md (e.g., web-prototype/SKILL.md),
|
|
12
|
+
* which is the format Claude Code expects.
|
|
10
13
|
*/
|
|
11
14
|
export declare function copySkillsWithConflictHandling(sourceDir: string, destDir: string, skipPrompts?: boolean): Promise<{
|
|
12
15
|
copied: string[];
|
package/dist/commands/adapter.js
CHANGED
|
@@ -141,7 +141,10 @@ async function promptForConflict(filename, showBatchOptions) {
|
|
|
141
141
|
return action;
|
|
142
142
|
}
|
|
143
143
|
/**
|
|
144
|
-
* Copy skill files to a destination directory with conflict handling
|
|
144
|
+
* Copy skill files to a destination directory with conflict handling.
|
|
145
|
+
* Source skills are flat .md files (e.g., web-prototype.md).
|
|
146
|
+
* They are deployed as directories containing SKILL.md (e.g., web-prototype/SKILL.md),
|
|
147
|
+
* which is the format Claude Code expects.
|
|
145
148
|
*/
|
|
146
149
|
export async function copySkillsWithConflictHandling(sourceDir, destDir, skipPrompts = false) {
|
|
147
150
|
const copied = [];
|
|
@@ -152,44 +155,47 @@ export async function copySkillsWithConflictHandling(sourceDir, destDir, skipPro
|
|
|
152
155
|
const files = readdirSync(sourceDir).filter(f => f.endsWith('.md'));
|
|
153
156
|
let batchAction = null;
|
|
154
157
|
for (const file of files) {
|
|
158
|
+
const skillName = file.replace(/\.md$/, '');
|
|
155
159
|
const srcPath = join(sourceDir, file);
|
|
156
|
-
const
|
|
160
|
+
const destSkillDir = join(destDir, skillName);
|
|
161
|
+
const destPath = join(destSkillDir, 'SKILL.md');
|
|
157
162
|
if (existsSync(destPath)) {
|
|
158
163
|
if (skipPrompts) {
|
|
159
164
|
cpSync(srcPath, destPath);
|
|
160
|
-
copied.push(
|
|
165
|
+
copied.push(skillName);
|
|
161
166
|
continue;
|
|
162
167
|
}
|
|
163
168
|
if (batchAction === 'replace_all') {
|
|
164
169
|
cpSync(srcPath, destPath);
|
|
165
|
-
copied.push(
|
|
170
|
+
copied.push(skillName);
|
|
166
171
|
continue;
|
|
167
172
|
}
|
|
168
173
|
else if (batchAction === 'skip_all') {
|
|
169
|
-
skipped.push(
|
|
174
|
+
skipped.push(skillName);
|
|
170
175
|
continue;
|
|
171
176
|
}
|
|
172
|
-
const action = await promptForConflict(
|
|
177
|
+
const action = await promptForConflict(skillName, files.length > 1);
|
|
173
178
|
if (action === 'replace_all') {
|
|
174
179
|
batchAction = 'replace_all';
|
|
175
180
|
cpSync(srcPath, destPath);
|
|
176
|
-
copied.push(
|
|
181
|
+
copied.push(skillName);
|
|
177
182
|
}
|
|
178
183
|
else if (action === 'skip_all') {
|
|
179
184
|
batchAction = 'skip_all';
|
|
180
|
-
skipped.push(
|
|
185
|
+
skipped.push(skillName);
|
|
181
186
|
}
|
|
182
187
|
else if (action === 'replace') {
|
|
183
188
|
cpSync(srcPath, destPath);
|
|
184
|
-
copied.push(
|
|
189
|
+
copied.push(skillName);
|
|
185
190
|
}
|
|
186
191
|
else {
|
|
187
|
-
skipped.push(
|
|
192
|
+
skipped.push(skillName);
|
|
188
193
|
}
|
|
189
194
|
}
|
|
190
195
|
else {
|
|
196
|
+
ensureDir(destSkillDir);
|
|
191
197
|
cpSync(srcPath, destPath);
|
|
192
|
-
copied.push(
|
|
198
|
+
copied.push(skillName);
|
|
193
199
|
}
|
|
194
200
|
}
|
|
195
201
|
return { copied, skipped };
|
package/package.json
CHANGED
package/payload/AGENTS.md
CHANGED
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: skill-improve
|
|
3
|
+
description: >
|
|
4
|
+
Research-driven skill improvement. Analyzes an existing skill for weaknesses, then
|
|
5
|
+
searches the internet extensively for popular similar skills, prompts, and techniques
|
|
6
|
+
from the community (GitHub repos, awesome-lists, prompt engineering resources, official
|
|
7
|
+
Anthropic docs) to find ideas worth incorporating. Produces a prioritized improvement
|
|
8
|
+
report and implements approved changes. Use this skill whenever the user says things like
|
|
9
|
+
"improve this skill", "make this skill better", "enhance skill", "optimize skill",
|
|
10
|
+
"research better approaches for this skill", "find ways to improve this skill",
|
|
11
|
+
"what are other people doing for skills like this", "benchmark this skill against others",
|
|
12
|
+
or "level up this skill". Also trigger when the user wants to compare their skill against
|
|
13
|
+
community alternatives or find inspiration from popular skills in the ecosystem.
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
# Skill Improver
|
|
17
|
+
|
|
18
|
+
Improve an existing skill by combining internal analysis with extensive internet research.
|
|
19
|
+
The goal is to find concrete, actionable improvements — not generic advice — by studying
|
|
20
|
+
what the best similar skills in the ecosystem actually do.
|
|
21
|
+
|
|
22
|
+
## How it works
|
|
23
|
+
|
|
24
|
+
1. **Understand the skill** — Read it deeply, understand its purpose and mechanics
|
|
25
|
+
2. **Analyze internally** — Find structural and content issues
|
|
26
|
+
3. **Research externally** — Search the internet for similar popular skills and techniques
|
|
27
|
+
4. **Compare and synthesize** — Extract the best ideas from what you found
|
|
28
|
+
5. **Report** — Present prioritized recommendations to the user
|
|
29
|
+
6. **Implement** — Apply the approved improvements
|
|
30
|
+
|
|
31
|
+
## Step 1: Understand the target skill
|
|
32
|
+
|
|
33
|
+
Ask the user which skill to improve if not already specified. Then read the skill file
|
|
34
|
+
thoroughly. Before doing anything else, write a brief summary of:
|
|
35
|
+
|
|
36
|
+
- What the skill does
|
|
37
|
+
- How it's structured (sections, flow, resources)
|
|
38
|
+
- What triggers it (the description field)
|
|
39
|
+
- What tools/techniques it uses
|
|
40
|
+
- Any bundled resources (scripts, references, assets)
|
|
41
|
+
|
|
42
|
+
Share this summary with the user to confirm you understand the skill correctly. Misunderstanding
|
|
43
|
+
the skill's purpose will lead to bad recommendations.
|
|
44
|
+
|
|
45
|
+
## Step 2: Internal analysis
|
|
46
|
+
|
|
47
|
+
Examine the skill against these quality dimensions. Be specific — cite line numbers or
|
|
48
|
+
quote passages when noting issues.
|
|
49
|
+
|
|
50
|
+
### Structure and clarity
|
|
51
|
+
- Is the flow logical? Would someone following it step-by-step get good results?
|
|
52
|
+
- Are instructions clear enough that a model can follow them without guessing?
|
|
53
|
+
- Is there unnecessary repetition or bloat that could be cut?
|
|
54
|
+
- Are there gaps where the model would need to improvise?
|
|
55
|
+
|
|
56
|
+
### Description quality
|
|
57
|
+
- Does the description cover the right trigger phrases?
|
|
58
|
+
- Is it specific enough to avoid false triggers?
|
|
59
|
+
- Is it "pushy" enough to trigger when it should? (Skills tend to under-trigger)
|
|
60
|
+
|
|
61
|
+
### Instruction effectiveness
|
|
62
|
+
- Are there heavy-handed MUST/NEVER/ALWAYS directives that could be replaced with
|
|
63
|
+
explanations of *why* something matters?
|
|
64
|
+
- Are examples included where they'd help?
|
|
65
|
+
- Is the skill explaining the reasoning behind its instructions, or just giving orders?
|
|
66
|
+
|
|
67
|
+
### Progressive disclosure
|
|
68
|
+
- Is the SKILL.md under 500 lines? If longer, should content move to reference files?
|
|
69
|
+
- Are bundled resources referenced clearly with guidance on when to read them?
|
|
70
|
+
|
|
71
|
+
### Robustness
|
|
72
|
+
- Does it handle edge cases the user might encounter?
|
|
73
|
+
- Are there implicit assumptions that could fail in different contexts?
|
|
74
|
+
|
|
75
|
+
Produce a concise internal analysis report organized by dimension.
|
|
76
|
+
|
|
77
|
+
## Step 3: External research
|
|
78
|
+
|
|
79
|
+
This is the core differentiator of this skill. Search the internet extensively to find
|
|
80
|
+
popular, well-regarded skills and techniques that address similar problems. The goal is
|
|
81
|
+
to discover ideas, patterns, and approaches the current skill is missing.
|
|
82
|
+
|
|
83
|
+
### Where to search
|
|
84
|
+
|
|
85
|
+
Run multiple searches across these categories. Use subagents for parallel research when
|
|
86
|
+
available — this step benefits enormously from breadth.
|
|
87
|
+
|
|
88
|
+
**Similar skills in the ecosystem:**
|
|
89
|
+
- Search GitHub for skills with similar names or purposes
|
|
90
|
+
(e.g., `claude skill [topic]`, `awesome claude skills [topic]`)
|
|
91
|
+
- Check curated lists: `awesome-claude-skills`, `awesome-claude-code-toolkit`
|
|
92
|
+
- Look for skills on the Claude Skills directory if available
|
|
93
|
+
- Search for Cursor rules, Windsurf rules, or other AI coding tool configurations
|
|
94
|
+
that address the same domain
|
|
95
|
+
|
|
96
|
+
**Prompt engineering techniques:**
|
|
97
|
+
- Search for prompt engineering patterns relevant to the skill's domain
|
|
98
|
+
- Look for academic or blog posts on techniques like chain-of-thought, few-shot
|
|
99
|
+
examples, self-correction, or structured output formats
|
|
100
|
+
- Check the Anthropic documentation for relevant best practices
|
|
101
|
+
|
|
102
|
+
**Domain-specific best practices:**
|
|
103
|
+
- If the skill targets a specific domain (e.g., testing, code review, documentation),
|
|
104
|
+
search for industry best practices in that domain
|
|
105
|
+
- Look for popular tools, linters, or frameworks that encode domain expertise
|
|
106
|
+
|
|
107
|
+
**Community discussions:**
|
|
108
|
+
- Search for discussions about similar workflows on GitHub issues, forums, or blogs
|
|
109
|
+
- Look for "how I use Claude for X" posts that might reveal techniques
|
|
110
|
+
|
|
111
|
+
### What to extract from each source
|
|
112
|
+
|
|
113
|
+
For each relevant source you find, note:
|
|
114
|
+
|
|
115
|
+
- **Source**: URL and brief description
|
|
116
|
+
- **Key technique or pattern**: What does it do that's interesting?
|
|
117
|
+
- **Relevance**: How does this relate to the skill being improved?
|
|
118
|
+
- **Adoption signal**: Stars, installs, mentions — is this actually popular/proven?
|
|
119
|
+
- **Adaptability**: How easily could this be incorporated into the current skill?
|
|
120
|
+
|
|
121
|
+
### Research depth
|
|
122
|
+
|
|
123
|
+
Aim for at least 5-8 distinct sources across the categories above. Quality matters more
|
|
124
|
+
than quantity — a single well-designed skill that solves the same problem is worth more
|
|
125
|
+
than ten tangentially related blog posts.
|
|
126
|
+
|
|
127
|
+
If the skill's domain is niche and you can't find direct comparables, broaden the search
|
|
128
|
+
to adjacent domains or look for transferable patterns from popular skills in other domains.
|
|
129
|
+
|
|
130
|
+
## Step 4: Compare and synthesize
|
|
131
|
+
|
|
132
|
+
Now bring together your internal analysis and external research. For each finding from
|
|
133
|
+
the research, ask:
|
|
134
|
+
|
|
135
|
+
- Does the current skill already do this? If so, does it do it as well?
|
|
136
|
+
- Would incorporating this idea make the skill meaningfully better?
|
|
137
|
+
- Is this idea compatible with the skill's existing approach, or would it require
|
|
138
|
+
a significant restructure?
|
|
139
|
+
- Is this a proven pattern (used in multiple popular skills) or a one-off experiment?
|
|
140
|
+
|
|
141
|
+
Organize your findings into a synthesis that highlights the gaps between the current
|
|
142
|
+
skill and the best practices you found.
|
|
143
|
+
|
|
144
|
+
## Step 5: Present the improvement report
|
|
145
|
+
|
|
146
|
+
Present a structured report to the user with three sections:
|
|
147
|
+
|
|
148
|
+
### High-priority improvements
|
|
149
|
+
Changes that would significantly improve the skill's effectiveness. These are things
|
|
150
|
+
where popular, proven alternatives exist and the current skill is clearly weaker.
|
|
151
|
+
|
|
152
|
+
### Medium-priority improvements
|
|
153
|
+
Useful enhancements that would make the skill more robust or cover more cases, but
|
|
154
|
+
the current approach isn't broken.
|
|
155
|
+
|
|
156
|
+
### Low-priority / exploratory ideas
|
|
157
|
+
Interesting techniques spotted in the research that might be worth trying but aren't
|
|
158
|
+
clearly better than the current approach. Include these so the user can decide.
|
|
159
|
+
|
|
160
|
+
For each recommendation:
|
|
161
|
+
- **What to change**: Specific, actionable description
|
|
162
|
+
- **Why**: What problem it solves or what improvement it brings
|
|
163
|
+
- **Source**: Where you found this idea (with link if from external research)
|
|
164
|
+
- **Effort**: How much work it would take (small tweak vs. significant rewrite)
|
|
165
|
+
|
|
166
|
+
After presenting the report, ask the user which improvements they'd like to implement.
|
|
167
|
+
|
|
168
|
+
## Step 6: Implement approved improvements
|
|
169
|
+
|
|
170
|
+
Apply the changes the user approved. When implementing:
|
|
171
|
+
|
|
172
|
+
- Make changes incrementally — don't rewrite the entire skill at once
|
|
173
|
+
- Preserve the skill's voice and style unless the user wants a different tone
|
|
174
|
+
- If a change requires restructuring, explain the new structure before doing it
|
|
175
|
+
- After implementing, show a summary of what changed
|
|
176
|
+
|
|
177
|
+
If the user wants to test the improvements, suggest using the skill-creator's evaluation
|
|
178
|
+
workflow to compare before and after.
|
|
179
|
+
|
|
180
|
+
## Tips for effective research
|
|
181
|
+
|
|
182
|
+
**Cast a wide net, then narrow.** Start with broad searches, scan the results for the
|
|
183
|
+
most relevant hits, then dig deep into those. A search for "claude skill code review"
|
|
184
|
+
might surface a skill that has nothing to do with code review but uses a brilliant
|
|
185
|
+
self-correction pattern you can steal.
|
|
186
|
+
|
|
187
|
+
**Look at structure, not just content.** Sometimes the most valuable thing about a
|
|
188
|
+
popular skill isn't what it says but how it's organized — how it uses progressive
|
|
189
|
+
disclosure, how it sequences instructions, how it handles edge cases.
|
|
190
|
+
|
|
191
|
+
**Pay attention to what's missing.** If every popular skill in a domain does X and the
|
|
192
|
+
target skill doesn't, that's a strong signal. If no popular skill does Y and the target
|
|
193
|
+
skill does, ask whether Y is genuinely innovative or just unnecessary.
|
|
194
|
+
|
|
195
|
+
**Check the stars and installs.** A skill with 100K+ installs that does something
|
|
196
|
+
differently is stronger evidence than a blog post with 3 likes. Popularity isn't
|
|
197
|
+
everything, but it's a useful signal for proven approaches.
|
|
198
|
+
|
|
199
|
+
## Notes
|
|
200
|
+
|
|
201
|
+
- This skill works best when you have web search capabilities. Without them, the
|
|
202
|
+
external research step is limited to what's already in your training data.
|
|
203
|
+
- For skills that are part of a larger system (like flight-rules commands), consider
|
|
204
|
+
how improvements might affect integration with other components.
|
|
205
|
+
- If the skill-creator skill is available, consider using its evaluation workflow
|
|
206
|
+
after implementing improvements to measure the impact quantitatively.
|