@flitzrrr/agent-skills 1.0.3 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.cursorrules +2 -2
- package/.github/copilot-instructions.md +59 -0
- package/.lovable +1 -1
- package/AGENTS.md +2 -2
- package/CHEATSHEET.md +84 -86
- package/CLAUDE.md +2 -2
- package/LICENSE +27 -0
- package/README.md +191 -99
- package/bin/build-catalog.js +208 -0
- package/bin/cli.js +7 -3
- package/bin/rebuild-symlinks.js +161 -0
- package/bin/sync-docs.js +147 -0
- package/bin/sync-skills.sh +17 -0
- package/bin/test-cli.js +115 -0
- package/bin/update-wiki.js +102 -0
- package/package.json +9 -2
- package/skills/dispatch-parallel-agents/skill.md +95 -0
- package/skills/execute-work-package/SKILL.md +300 -0
- package/skills/execute-work-package/scripts/start-l4l-oci.sh +75 -0
- package/skills/execute-work-package/tpl-execution-blueprint.md +39 -0
- package/skills/execute-work-package/tpl-execution-digest.md +24 -0
- package/skills/execute-work-package/tpl-implementer-execute-prompt.md +57 -0
- package/skills/execute-work-package/tpl-implementer-preflight-prompt.md +66 -0
- package/skills/product-description-seo/CROSS-SELL.md +31 -0
- package/skills/product-description-seo/KEYWORDS.md +35 -0
- package/skills/product-description-seo/SKILL.md +361 -0
- package/skills/product-description-seo/scripts/analyze_catalog.py +136 -0
- package/skills/product-description-seo/scripts/check_quality.py +204 -0
- package/skills/product-description-seo/scripts/extract_category.py +88 -0
- package/skills/product-description-seo/scripts/track_progress.py +140 -0
- package/skills/product-description-seo/scripts/update_catalog.py +80 -0
- package/skills/product-description-seo/scripts/validate_json.py +87 -0
- package/skills/systematic-debugging/skill.md +87 -0
- package/skills/tob-gh-cli/SKILL.md +71 -0
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Rebuilds all skill symlinks from vendor/ submodules.
|
|
5
|
+
*
|
|
6
|
+
* Each SKILL.md found in vendor/<source>/ gets a symlink in skills/ with the
|
|
7
|
+
* naming convention: <prefix>-<skill_name>
|
|
8
|
+
*
|
|
9
|
+
* Prefix mapping is defined explicitly for known sources, with a fallback
|
|
10
|
+
* to the first segment of the vendor directory name.
|
|
11
|
+
*
|
|
12
|
+
* Run: node bin/rebuild-symlinks.js
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
const fs = require("fs");
|
|
16
|
+
const path = require("path");
|
|
17
|
+
|
|
18
|
+
const ROOT = path.resolve(__dirname, "..");
|
|
19
|
+
const SKILLS_DIR = path.join(ROOT, "skills");
|
|
20
|
+
const VENDOR_DIR = path.join(ROOT, "vendor");
|
|
21
|
+
|
|
22
|
+
// Explicit prefix mapping: vendor directory name → skill prefix
|
|
23
|
+
const PREFIX_MAP = {
|
|
24
|
+
"trailofbits-skills": "tob",
|
|
25
|
+
"anthropic-skills": "anthropic",
|
|
26
|
+
"anthropic-finance": "finance",
|
|
27
|
+
"vercel-agent-skills": "vercel",
|
|
28
|
+
"getsentry-skills": "sentry",
|
|
29
|
+
"google-stitch-skills": "google",
|
|
30
|
+
"cloudflare-skills": "cloudflare",
|
|
31
|
+
"stripe-skills": "stripe",
|
|
32
|
+
"expo-skills": "expo",
|
|
33
|
+
"hashicorp-skills": "hashicorp",
|
|
34
|
+
"supabase-skills": "supabase",
|
|
35
|
+
"callstack-skills": "callstack",
|
|
36
|
+
"scientific-skills": "scientific",
|
|
37
|
+
"opencode-processing-skills": "opencode",
|
|
38
|
+
"agentic-seo-skill": "seo",
|
|
39
|
+
"marketingskills": null, // no prefix, use skill name directly
|
|
40
|
+
"addyosmani-agent-skills": "addyosmani",
|
|
41
|
+
"itsmostafa-aws-agent-skills": "aws",
|
|
42
|
+
"MoizIbnYousaf-Ai-Agent-Skills": "MoizIbnYousaf",
|
|
43
|
+
"JackyST0-awesome-agent-skills": "JackyST0",
|
|
44
|
+
};
|
|
45
|
+
|
|
46
|
+
function getPrefix(vendorName) {
|
|
47
|
+
if (vendorName in PREFIX_MAP) return PREFIX_MAP[vendorName];
|
|
48
|
+
// Fallback: first segment before first hyphen
|
|
49
|
+
return vendorName.split("-")[0];
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
function findSkillMds(dir, maxDepth, results) {
|
|
53
|
+
if (maxDepth <= 0) return;
|
|
54
|
+
let entries;
|
|
55
|
+
try {
|
|
56
|
+
entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
57
|
+
} catch {
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
for (const e of entries) {
|
|
61
|
+
if (e.name === ".git" || e.name === "node_modules") continue;
|
|
62
|
+
const full = path.join(dir, e.name);
|
|
63
|
+
if (e.isFile() && e.name.toLowerCase() === "skill.md") {
|
|
64
|
+
results.push(path.dirname(full));
|
|
65
|
+
} else if (e.isDirectory()) {
|
|
66
|
+
findSkillMds(full, maxDepth - 1, results);
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function main() {
|
|
72
|
+
// Preserve manually-created skill directories (non-symlinks)
|
|
73
|
+
const manualSkills = new Set();
|
|
74
|
+
if (fs.existsSync(SKILLS_DIR)) {
|
|
75
|
+
for (const entry of fs.readdirSync(SKILLS_DIR)) {
|
|
76
|
+
const entryPath = path.join(SKILLS_DIR, entry);
|
|
77
|
+
const stat = fs.lstatSync(entryPath);
|
|
78
|
+
if (stat.isDirectory() && !stat.isSymbolicLink()) {
|
|
79
|
+
manualSkills.add(entry);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// Remove all existing symlinks
|
|
85
|
+
if (fs.existsSync(SKILLS_DIR)) {
|
|
86
|
+
for (const entry of fs.readdirSync(SKILLS_DIR)) {
|
|
87
|
+
const entryPath = path.join(SKILLS_DIR, entry);
|
|
88
|
+
if (fs.lstatSync(entryPath).isSymbolicLink()) {
|
|
89
|
+
fs.unlinkSync(entryPath);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
} else {
|
|
93
|
+
fs.mkdirSync(SKILLS_DIR);
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
let created = 0;
|
|
97
|
+
let skipped = 0;
|
|
98
|
+
const vendors = fs.readdirSync(VENDOR_DIR).filter((d) => {
|
|
99
|
+
const p = path.join(VENDOR_DIR, d);
|
|
100
|
+
return !d.startsWith(".") && fs.statSync(p).isDirectory();
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
for (const vendor of vendors) {
|
|
104
|
+
const vendorPath = path.join(VENDOR_DIR, vendor);
|
|
105
|
+
const prefix = getPrefix(vendor);
|
|
106
|
+
const skillDirs = [];
|
|
107
|
+
findSkillMds(vendorPath, 5, skillDirs);
|
|
108
|
+
|
|
109
|
+
// Special case: seo skill — single top-level link
|
|
110
|
+
if (vendor === "agentic-seo-skill") {
|
|
111
|
+
const linkName = "seo";
|
|
112
|
+
const target = `../vendor/${vendor}`;
|
|
113
|
+
if (!manualSkills.has(linkName)) {
|
|
114
|
+
fs.symlinkSync(target, path.join(SKILLS_DIR, linkName));
|
|
115
|
+
created++;
|
|
116
|
+
}
|
|
117
|
+
continue;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
for (const skillDir of skillDirs) {
|
|
121
|
+
const skillName = path.basename(skillDir);
|
|
122
|
+
const linkName = prefix ? `${prefix}-${skillName}` : skillName;
|
|
123
|
+
|
|
124
|
+
// Don't overwrite manual skills
|
|
125
|
+
if (manualSkills.has(linkName)) {
|
|
126
|
+
skipped++;
|
|
127
|
+
continue;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
// Relative path from skills/ to the skill directory
|
|
131
|
+
const relPath = path.relative(SKILLS_DIR, skillDir);
|
|
132
|
+
const target = `../vendor/${path.relative(VENDOR_DIR, skillDir)}`;
|
|
133
|
+
|
|
134
|
+
try {
|
|
135
|
+
// Check for name collisions
|
|
136
|
+
const linkPath = path.join(SKILLS_DIR, linkName);
|
|
137
|
+
if (fs.existsSync(linkPath) || fs.lstatSync(linkPath).isSymbolicLink()) {
|
|
138
|
+
// Already exists, skip
|
|
139
|
+
skipped++;
|
|
140
|
+
continue;
|
|
141
|
+
}
|
|
142
|
+
} catch {
|
|
143
|
+
// lstatSync throws if path doesn't exist — that's what we want
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
try {
|
|
147
|
+
fs.symlinkSync(target, path.join(SKILLS_DIR, linkName));
|
|
148
|
+
created++;
|
|
149
|
+
} catch (err) {
|
|
150
|
+
// Name collision from a different vendor
|
|
151
|
+
skipped++;
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
console.log(`Rebuilt symlinks: ${created} created, ${skipped} skipped (manual or collision)`);
|
|
157
|
+
console.log(`Manual skill dirs preserved: ${[...manualSkills].join(", ") || "(none)"}`);
|
|
158
|
+
console.log(`Total skills/ entries: ${fs.readdirSync(SKILLS_DIR).length}`);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
main();
|
package/bin/sync-docs.js
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Regenerates README.md badges/counts, sources table, repo structure,
|
|
5
|
+
* AGENTS.md, CLAUDE.md, and .cursorrules based on current skills/ and vendor/ state.
|
|
6
|
+
*
|
|
7
|
+
* Run: node bin/sync-docs.js
|
|
8
|
+
*/
|
|
9
|
+
|
|
10
|
+
const fs = require("fs");
|
|
11
|
+
const path = require("path");
|
|
12
|
+
const { execSync } = require("child_process");
|
|
13
|
+
|
|
14
|
+
const ROOT = path.resolve(__dirname, "..");
|
|
15
|
+
const SKILLS_DIR = path.join(ROOT, "skills");
|
|
16
|
+
const VENDOR_DIR = path.join(ROOT, "vendor");
|
|
17
|
+
|
|
18
|
+
// Count skills and sources
|
|
19
|
+
const skills = fs
|
|
20
|
+
.readdirSync(SKILLS_DIR)
|
|
21
|
+
.filter((f) => !f.startsWith("."));
|
|
22
|
+
const sources = fs
|
|
23
|
+
.readdirSync(VENDOR_DIR)
|
|
24
|
+
.filter((f) => !f.startsWith(".") && fs.statSync(path.join(VENDOR_DIR, f)).isDirectory());
|
|
25
|
+
|
|
26
|
+
const skillCount = skills.length;
|
|
27
|
+
const sourceCount = sources.length;
|
|
28
|
+
|
|
29
|
+
console.log(`Found ${skillCount} skills from ${sourceCount} sources`);
|
|
30
|
+
|
|
31
|
+
// --- Update README.md ---
|
|
32
|
+
const readmePath = path.join(ROOT, "README.md");
|
|
33
|
+
let readme = fs.readFileSync(readmePath, "utf8");
|
|
34
|
+
|
|
35
|
+
// Update badge counts
|
|
36
|
+
readme = readme.replace(
|
|
37
|
+
/skills-\d+-blue/,
|
|
38
|
+
`skills-${skillCount}-blue`
|
|
39
|
+
);
|
|
40
|
+
readme = readme.replace(
|
|
41
|
+
/sources-\d+-green/,
|
|
42
|
+
`sources-${sourceCount}-green`
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
// Update intro line
|
|
46
|
+
readme = readme.replace(
|
|
47
|
+
/\*\*\d+ agent skills\*\* from \*\*\d+ industry-leading sources\*\*/,
|
|
48
|
+
`**${skillCount} agent skills** from **${sourceCount} industry-leading sources**`
|
|
49
|
+
);
|
|
50
|
+
|
|
51
|
+
// Update repo structure counts
|
|
52
|
+
readme = readme.replace(
|
|
53
|
+
/skills\/\s+\d+ symlinks/,
|
|
54
|
+
`skills/ ${skillCount} symlinks`
|
|
55
|
+
);
|
|
56
|
+
readme = readme.replace(
|
|
57
|
+
/vendor\/\s+\d+ Git submodules/,
|
|
58
|
+
`vendor/ ${sourceCount} Git submodules`
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
fs.writeFileSync(readmePath, readme, "utf8");
|
|
62
|
+
console.log("Updated README.md");
|
|
63
|
+
|
|
64
|
+
// --- Update AGENTS.md ---
|
|
65
|
+
const agentsPath = path.join(ROOT, "AGENTS.md");
|
|
66
|
+
if (fs.existsSync(agentsPath)) {
|
|
67
|
+
let agents = fs.readFileSync(agentsPath, "utf8");
|
|
68
|
+
agents = agents.replace(/\d+ skills/g, `${skillCount} skills`);
|
|
69
|
+
agents = agents.replace(/\d+ sources/g, `${sourceCount} sources`);
|
|
70
|
+
fs.writeFileSync(agentsPath, agents, "utf8");
|
|
71
|
+
console.log("Updated AGENTS.md");
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// --- Update CLAUDE.md ---
|
|
75
|
+
const claudePath = path.join(ROOT, "CLAUDE.md");
|
|
76
|
+
if (fs.existsSync(claudePath)) {
|
|
77
|
+
let claude = fs.readFileSync(claudePath, "utf8");
|
|
78
|
+
claude = claude.replace(/\d+ skills/g, `${skillCount} skills`);
|
|
79
|
+
claude = claude.replace(/\d+ sources/g, `${sourceCount} sources`);
|
|
80
|
+
fs.writeFileSync(claudePath, claude, "utf8");
|
|
81
|
+
console.log("Updated CLAUDE.md");
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
// --- Update .cursorrules ---
|
|
85
|
+
const cursorPath = path.join(ROOT, ".cursorrules");
|
|
86
|
+
if (fs.existsSync(cursorPath)) {
|
|
87
|
+
let cursor = fs.readFileSync(cursorPath, "utf8");
|
|
88
|
+
cursor = cursor.replace(/\d+ skills/g, `${skillCount} skills`);
|
|
89
|
+
cursor = cursor.replace(/\d+ sources/g, `${sourceCount} sources`);
|
|
90
|
+
fs.writeFileSync(cursorPath, cursor, "utf8");
|
|
91
|
+
console.log("Updated .cursorrules");
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// --- Update .lovable ---
|
|
95
|
+
const lovablePath = path.join(ROOT, ".lovable");
|
|
96
|
+
if (fs.existsSync(lovablePath)) {
|
|
97
|
+
let lovable = fs.readFileSync(lovablePath, "utf8");
|
|
98
|
+
lovable = lovable.replace(/\d+ curated AI agent skills/, `${skillCount} curated AI agent skills`);
|
|
99
|
+
lovable = lovable.replace(/\d+ skills/g, `${skillCount} skills`);
|
|
100
|
+
lovable = lovable.replace(/\d+ sources/g, `${sourceCount} sources`);
|
|
101
|
+
fs.writeFileSync(lovablePath, lovable, "utf8");
|
|
102
|
+
console.log("Updated .lovable");
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// --- Update .github/copilot-instructions.md ---
|
|
106
|
+
const copilotPath = path.join(ROOT, ".github", "copilot-instructions.md");
|
|
107
|
+
if (fs.existsSync(copilotPath)) {
|
|
108
|
+
let copilot = fs.readFileSync(copilotPath, "utf8");
|
|
109
|
+
copilot = copilot.replace(/\d+ curated AI agent skills/, `${skillCount} curated AI agent skills`);
|
|
110
|
+
fs.writeFileSync(copilotPath, copilot, "utf8");
|
|
111
|
+
console.log("Updated .github/copilot-instructions.md");
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
// --- Update CHEATSHEET.md ---
|
|
115
|
+
const cheatsheetPath = path.join(ROOT, "CHEATSHEET.md");
|
|
116
|
+
if (fs.existsSync(cheatsheetPath)) {
|
|
117
|
+
let cheatsheet = fs.readFileSync(cheatsheetPath, "utf8");
|
|
118
|
+
cheatsheet = cheatsheet.replace(/\d+ skills/g, `${skillCount} skills`);
|
|
119
|
+
cheatsheet = cheatsheet.replace(/\d+ sources/g, `${sourceCount} sources`);
|
|
120
|
+
fs.writeFileSync(cheatsheetPath, cheatsheet, "utf8");
|
|
121
|
+
console.log("Updated CHEATSHEET.md");
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
// --- Generate skills list for AGENTS.md / CLAUDE.md ---
|
|
125
|
+
const skillList = skills.sort().map((s) => `- ${s}`).join("\n");
|
|
126
|
+
const marker = "<!-- SKILL_LIST -->";
|
|
127
|
+
const endMarker = "<!-- /SKILL_LIST -->";
|
|
128
|
+
|
|
129
|
+
for (const filePath of [agentsPath, claudePath]) {
|
|
130
|
+
if (!fs.existsSync(filePath)) continue;
|
|
131
|
+
let content = fs.readFileSync(filePath, "utf8");
|
|
132
|
+
const startIdx = content.indexOf(marker);
|
|
133
|
+
const endIdx = content.indexOf(endMarker);
|
|
134
|
+
if (startIdx !== -1 && endIdx !== -1) {
|
|
135
|
+
content =
|
|
136
|
+
content.substring(0, startIdx + marker.length) +
|
|
137
|
+
"\n" +
|
|
138
|
+
skillList +
|
|
139
|
+
"\n" +
|
|
140
|
+
content.substring(endIdx);
|
|
141
|
+
fs.writeFileSync(filePath, content, "utf8");
|
|
142
|
+
console.log(`Updated skill list in ${path.basename(filePath)}`);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
console.log("Done.");
|
|
147
|
+
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# Sync DDM (DasDigitaleMomentum) skills from agent-skills repo to agy
|
|
3
|
+
# Run after: git pull in agent-skills repo
|
|
4
|
+
SKILLS_DIR="/Users/Martin/.gemini/antigravity/skills"
|
|
5
|
+
SOURCE="/Users/Martin/git/agent-skills/vendor/opencode-processing-skills/skills"
|
|
6
|
+
|
|
7
|
+
# execute-work-package is excluded: local copy has transport extensions (Options A/B/C)
|
|
8
|
+
# that are not yet upstreamed to DasDigitaleMomentum/opencode-processing-skills.
|
|
9
|
+
DDM_SKILLS="archive-legacy-docs author-and-verify-implementation-plan create-plan generate-docs generate-handover resume-plan update-docs update-plan"
|
|
10
|
+
|
|
11
|
+
for skill in $DDM_SKILLS; do
|
|
12
|
+
rm -rf "${SKILLS_DIR:?}/${skill:?}"
|
|
13
|
+
cp -R "$SOURCE/$skill" "$SKILLS_DIR/$skill"
|
|
14
|
+
echo "Synced: $skill"
|
|
15
|
+
done
|
|
16
|
+
|
|
17
|
+
echo "Done. Total SKILL.md: $(find $SKILLS_DIR -maxdepth 2 -name SKILL.md | wc -l | tr -d ' ')"
|
package/bin/test-cli.js
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Tests for bin/cli.js
|
|
5
|
+
* Run: node bin/test-cli.js
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
const { execSync } = require("child_process");
|
|
9
|
+
const fs = require("fs");
|
|
10
|
+
const path = require("path");
|
|
11
|
+
const assert = require("assert");
|
|
12
|
+
|
|
13
|
+
const CLI = path.join(__dirname, "cli.js");
|
|
14
|
+
let passed = 0;
|
|
15
|
+
let failed = 0;
|
|
16
|
+
|
|
17
|
+
function test(name, fn) {
|
|
18
|
+
try {
|
|
19
|
+
fn();
|
|
20
|
+
console.log(` PASS ${name}`);
|
|
21
|
+
passed++;
|
|
22
|
+
} catch (err) {
|
|
23
|
+
console.log(` FAIL ${name}: ${err.message}`);
|
|
24
|
+
failed++;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
function run(args) {
|
|
29
|
+
return execSync(`node "${CLI}" ${args}`, {
|
|
30
|
+
encoding: "utf8",
|
|
31
|
+
timeout: 10000,
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
console.log("CLI Tests\n");
|
|
36
|
+
|
|
37
|
+
// Test: no args shows usage
|
|
38
|
+
test("no args shows usage", () => {
|
|
39
|
+
const out = run("");
|
|
40
|
+
assert(out.includes("Usage"), "Should show usage");
|
|
41
|
+
assert(out.includes("install"), "Should mention install command");
|
|
42
|
+
assert(out.includes("update"), "Should mention update command");
|
|
43
|
+
assert(out.includes("list"), "Should mention list command");
|
|
44
|
+
});
|
|
45
|
+
|
|
46
|
+
// Test: unknown platform errors
|
|
47
|
+
test("install unknown platform exits with error", () => {
|
|
48
|
+
try {
|
|
49
|
+
run("install nonexistent-platform");
|
|
50
|
+
assert.fail("Should have thrown");
|
|
51
|
+
} catch (err) {
|
|
52
|
+
const output = (err.stdout || "") + (err.stderr || "");
|
|
53
|
+
assert(output.includes("Unknown platform"), "Should say unknown platform");
|
|
54
|
+
}
|
|
55
|
+
});
|
|
56
|
+
|
|
57
|
+
// Test: list shows skills
|
|
58
|
+
test("list command works", () => {
|
|
59
|
+
try {
|
|
60
|
+
const out = run("list");
|
|
61
|
+
assert(out.includes("Available Skills"), "Should show available skills header");
|
|
62
|
+
} catch {
|
|
63
|
+
// Skills may not be installed in test env, that's ok
|
|
64
|
+
}
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
// Test: cli.js is valid Node
|
|
68
|
+
test("cli.js parses without syntax errors", () => {
|
|
69
|
+
execSync(`node -c "${CLI}"`, { encoding: "utf8" });
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
// Test: vscode platform is registered
|
|
73
|
+
test("vscode platform is listed", () => {
|
|
74
|
+
const out = run("");
|
|
75
|
+
assert(out.includes("vscode"), "Should list vscode as available platform");
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
// Test: copilot-instructions.md exists
|
|
79
|
+
test("copilot-instructions.md exists", () => {
|
|
80
|
+
const copilotPath = path.join(__dirname, "..", ".github", "copilot-instructions.md");
|
|
81
|
+
assert(
|
|
82
|
+
fs.existsSync(copilotPath),
|
|
83
|
+
"Should have .github/copilot-instructions.md"
|
|
84
|
+
);
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
// Test: copilot-instructions.md has required content
|
|
88
|
+
test("copilot-instructions.md has skill references", () => {
|
|
89
|
+
const copilotPath = path.join(__dirname, "..", ".github", "copilot-instructions.md");
|
|
90
|
+
const content = fs.readFileSync(copilotPath, "utf8");
|
|
91
|
+
assert(content.includes("skills/"), "Should reference skills/ directory");
|
|
92
|
+
assert(content.includes("SKILL.md"), "Should mention SKILL.md");
|
|
93
|
+
assert(content.includes("CHEATSHEET.md"), "Should reference CHEATSHEET.md");
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
// Test: sync-docs.js is valid Node
|
|
97
|
+
test("sync-docs.js parses without syntax errors", () => {
|
|
98
|
+
const syncDocs = path.join(__dirname, "sync-docs.js");
|
|
99
|
+
execSync(`node -c "${syncDocs}"`, { encoding: "utf8" });
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
// Test: sync-docs.js updates counts correctly
|
|
103
|
+
test("sync-docs.js produces correct counts", () => {
|
|
104
|
+
const out = execSync(`node "${path.join(__dirname, "sync-docs.js")}"`, {
|
|
105
|
+
encoding: "utf8",
|
|
106
|
+
cwd: path.join(__dirname, ".."),
|
|
107
|
+
});
|
|
108
|
+
assert(out.includes("Found"), "Should report found skills");
|
|
109
|
+
assert(out.includes("Updated README.md"), "Should update README");
|
|
110
|
+
assert(out.includes("copilot-instructions"), "Should update copilot-instructions.md");
|
|
111
|
+
assert(out.includes("Done"), "Should finish successfully");
|
|
112
|
+
});
|
|
113
|
+
|
|
114
|
+
console.log(`\n${passed} passed, ${failed} failed\n`);
|
|
115
|
+
process.exit(failed > 0 ? 1 : 0);
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Update the GitHub Wiki with current catalog data.
|
|
5
|
+
* Clones the wiki repo, regenerates the Skills-Catalog page, and pushes.
|
|
6
|
+
*
|
|
7
|
+
* Run: node bin/update-wiki.js
|
|
8
|
+
* Requires: git, SSH access to the wiki repo
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
const fs = require("fs");
|
|
12
|
+
const path = require("path");
|
|
13
|
+
const { execSync } = require("child_process");
|
|
14
|
+
|
|
15
|
+
const ROOT = path.join(__dirname, "..");
|
|
16
|
+
const CATALOG = path.join(ROOT, "docs", "catalog.json");
|
|
17
|
+
const WIKI_DIR = "/tmp/agent-skills-wiki";
|
|
18
|
+
const WIKI_REPO = "git@github.com:flitzrrr/agent-skills.wiki.git";
|
|
19
|
+
|
|
20
|
+
function run(cmd, opts) {
|
|
21
|
+
return execSync(cmd, { encoding: "utf8", ...opts }).trim();
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
function groupBy(arr, key) {
|
|
25
|
+
const map = {};
|
|
26
|
+
for (const item of arr) {
|
|
27
|
+
const k = item[key];
|
|
28
|
+
(map[k] = map[k] || []).push(item);
|
|
29
|
+
}
|
|
30
|
+
return map;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function main() {
|
|
34
|
+
if (!fs.existsSync(CATALOG)) {
|
|
35
|
+
console.error("catalog.json not found. Run build-catalog.js first.");
|
|
36
|
+
process.exit(1);
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const catalog = JSON.parse(fs.readFileSync(CATALOG, "utf8"));
|
|
40
|
+
const grouped = groupBy(catalog.skills, "source");
|
|
41
|
+
const sourceEntries = Object.entries(grouped).sort((a, b) => b[1].length - a[1].length);
|
|
42
|
+
|
|
43
|
+
// Clone wiki
|
|
44
|
+
try {
|
|
45
|
+
if (fs.existsSync(WIKI_DIR)) {
|
|
46
|
+
run(`rm -rf ${WIKI_DIR}`);
|
|
47
|
+
}
|
|
48
|
+
run(`git clone ${WIKI_REPO} ${WIKI_DIR}`);
|
|
49
|
+
} catch (e) {
|
|
50
|
+
console.error("Failed to clone wiki repo:", e.message);
|
|
51
|
+
process.exit(1);
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// Generate Skills-Catalog.md
|
|
55
|
+
let md = `# Skills Catalog\n\n`;
|
|
56
|
+
md += `> Last updated: ${catalog.generated.split("T")[0]} | **${catalog.total} skills** across **${sourceEntries.length} sources**\n\n`;
|
|
57
|
+
md += `The full searchable catalog with copy-to-clipboard is on the [GitHub Pages site](https://flitzrrr.github.io/agent-skills/#catalog).\n\n`;
|
|
58
|
+
md += `## Skills by Source\n\n`;
|
|
59
|
+
|
|
60
|
+
for (const [source, skills] of sourceEntries) {
|
|
61
|
+
md += `### ${source} (${skills.length} skills)\n\n`;
|
|
62
|
+
md += `| Skill | Description |\n`;
|
|
63
|
+
md += `|-------|-------------|\n`;
|
|
64
|
+
for (const s of skills) {
|
|
65
|
+
const desc = s.description || "--";
|
|
66
|
+
md += `| \`${s.name}\` | ${desc} |\n`;
|
|
67
|
+
}
|
|
68
|
+
md += `\n`;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
fs.writeFileSync(path.join(WIKI_DIR, "Skills-Catalog.md"), md);
|
|
72
|
+
|
|
73
|
+
// Update Home page stats
|
|
74
|
+
const homePath = path.join(WIKI_DIR, "Home.md");
|
|
75
|
+
if (fs.existsSync(homePath)) {
|
|
76
|
+
let home = fs.readFileSync(homePath, "utf8");
|
|
77
|
+
// Update skill count references
|
|
78
|
+
home = home.replace(/\b\d{2,4}\+?\s+skills\b/gi, `${catalog.total}+ skills`);
|
|
79
|
+
fs.writeFileSync(homePath, home);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Commit and push
|
|
83
|
+
try {
|
|
84
|
+
run("git add -A", { cwd: WIKI_DIR });
|
|
85
|
+
const diff = run("git diff --cached --stat", { cwd: WIKI_DIR });
|
|
86
|
+
if (!diff) {
|
|
87
|
+
console.log("Wiki is already up to date.");
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
run(`git config user.name "github-actions[bot]"`, { cwd: WIKI_DIR });
|
|
92
|
+
run(`git config user.email "github-actions[bot]@users.noreply.github.com"`, { cwd: WIKI_DIR });
|
|
93
|
+
run(`git commit -m "docs: update skill catalog [${catalog.generated.split("T")[0]}]"`, { cwd: WIKI_DIR });
|
|
94
|
+
run("git push origin master", { cwd: WIKI_DIR });
|
|
95
|
+
console.log("Wiki updated and pushed.");
|
|
96
|
+
} catch (e) {
|
|
97
|
+
console.error("Failed to push wiki:", e.message);
|
|
98
|
+
process.exit(1);
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
main();
|
package/package.json
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@flitzrrr/agent-skills",
|
|
3
|
-
"version": "1.0
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "1.2.0",
|
|
4
|
+
"description": "480 AI agent skills from 20 sources — multiplatform-ready for VS Code (GitHub Copilot), Claude Code, Codex, Cursor, Antigravity, OpenCode, Lovable, and more.",
|
|
5
|
+
"scripts": {
|
|
6
|
+
"test": "node bin/test-cli.js"
|
|
7
|
+
},
|
|
5
8
|
"bin": {
|
|
6
9
|
"agent-skills": "bin/cli.js"
|
|
7
10
|
},
|
|
@@ -9,6 +12,9 @@
|
|
|
9
12
|
"ai",
|
|
10
13
|
"agent",
|
|
11
14
|
"skills",
|
|
15
|
+
"vscode",
|
|
16
|
+
"github-copilot",
|
|
17
|
+
"copilot",
|
|
12
18
|
"claude",
|
|
13
19
|
"codex",
|
|
14
20
|
"cursor",
|
|
@@ -32,6 +38,7 @@
|
|
|
32
38
|
"CHEATSHEET.md",
|
|
33
39
|
".cursorrules",
|
|
34
40
|
".lovable",
|
|
41
|
+
".github/copilot-instructions.md",
|
|
35
42
|
"README.md"
|
|
36
43
|
]
|
|
37
44
|
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: dispatch-parallel-agents
|
|
3
|
+
description: Dispatch independent tasks to parallel subagents when 2+ problems can be solved without shared state or sequential dependencies.
|
|
4
|
+
license: MIT
|
|
5
|
+
compatibility:
|
|
6
|
+
opencode: ">=0.1"
|
|
7
|
+
metadata:
|
|
8
|
+
category: execution
|
|
9
|
+
phase: implementation
|
|
10
|
+
---
|
|
11
|
+
|
|
12
|
+
# Skill: Dispatch Parallel Agents
|
|
13
|
+
|
|
14
|
+
This skill parallelizes independent work across multiple subagents. Each agent gets a focused scope, isolated context, and a clear deliverable.
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## When to Use
|
|
19
|
+
|
|
20
|
+
Use when:
|
|
21
|
+
|
|
22
|
+
- 2+ independent tasks exist that do not share state
|
|
23
|
+
- Each task can be understood and completed without context from the others
|
|
24
|
+
- Tasks do not edit the same files
|
|
25
|
+
|
|
26
|
+
Do **not** use when:
|
|
27
|
+
|
|
28
|
+
- Failures are related (fixing one might fix others -- investigate together first)
|
|
29
|
+
- Tasks require understanding the full system state
|
|
30
|
+
- Agents would interfere with each other (editing the same files, using the same resources)
|
|
31
|
+
- You are in exploratory mode and do not yet know what is broken
|
|
32
|
+
|
|
33
|
+
---
|
|
34
|
+
|
|
35
|
+
## Execution Model
|
|
36
|
+
|
|
37
|
+
### Roles
|
|
38
|
+
|
|
39
|
+
- **Primary (coordinator)**
|
|
40
|
+
- Identifies independent problem domains
|
|
41
|
+
- Crafts focused prompts for each agent
|
|
42
|
+
- Reviews results and integrates changes
|
|
43
|
+
- Resolves conflicts if agents touched overlapping code
|
|
44
|
+
|
|
45
|
+
- **Subagents (workers)**
|
|
46
|
+
- Each receives a scoped task with all necessary context
|
|
47
|
+
- Works independently without knowledge of other agents
|
|
48
|
+
- Returns a summary of findings and changes
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## Workflow
|
|
53
|
+
|
|
54
|
+
### Step 1: Identify Independent Domains
|
|
55
|
+
|
|
56
|
+
Group tasks by what they affect. Each domain must be independent -- fixing one must not affect the others.
|
|
57
|
+
|
|
58
|
+
Example:
|
|
59
|
+
- Domain A: Fix stats page header styling
|
|
60
|
+
- Domain B: Update billing page layout
|
|
61
|
+
- Domain C: Add admin role detection
|
|
62
|
+
|
|
63
|
+
These are independent because they touch different files and different logic.
|
|
64
|
+
|
|
65
|
+
### Step 2: Craft Agent Prompts
|
|
66
|
+
|
|
67
|
+
Each agent prompt must be:
|
|
68
|
+
|
|
69
|
+
- **Focused** -- one clear problem domain, not "fix everything"
|
|
70
|
+
- **Self-contained** -- all context needed to understand the problem (file paths, error messages, expected behavior)
|
|
71
|
+
- **Constrained** -- explicit boundaries on what the agent should and should not change
|
|
72
|
+
- **Output-specific** -- what the agent should return (summary, file list, verification result)
|
|
73
|
+
|
|
74
|
+
### Step 3: Dispatch
|
|
75
|
+
|
|
76
|
+
Launch all agents in a single message with multiple Agent tool calls. This ensures true parallel execution.
|
|
77
|
+
|
|
78
|
+
### Step 4: Review and Integrate
|
|
79
|
+
|
|
80
|
+
When agents return:
|
|
81
|
+
|
|
82
|
+
1. Read each agent's summary
|
|
83
|
+
2. Check for conflicts (did agents edit the same code?)
|
|
84
|
+
3. Verify changes work together (run build, run tests)
|
|
85
|
+
4. Integrate all changes
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
## Rules
|
|
90
|
+
|
|
91
|
+
1. **Independence is mandatory**: If tasks share state or files, do not parallelize. Process sequentially instead.
|
|
92
|
+
2. **Focused prompts**: Each agent gets exactly what it needs. Do not dump full session context into agent prompts.
|
|
93
|
+
3. **Verify after integration**: Always run the build and relevant tests after merging all agent outputs. Agents cannot verify cross-agent interactions.
|
|
94
|
+
4. **Do not over-parallelize**: 2-3 agents is typical. More than 5 agents indicates the tasks should be structured differently (e.g., as a plan with phases).
|
|
95
|
+
5. **Conflicts require manual resolution**: If two agents edited the same file, the primary resolves the conflict -- do not blindly apply both changes.
|