EvoScientist 0.1.0rc1__py3-none-any.whl → 0.1.0rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- EvoScientist/EvoScientist.py +1 -1
- EvoScientist/cli.py +450 -178
- EvoScientist/middleware.py +5 -1
- EvoScientist/skills/accelerate/SKILL.md +332 -0
- EvoScientist/skills/accelerate/references/custom-plugins.md +453 -0
- EvoScientist/skills/accelerate/references/megatron-integration.md +489 -0
- EvoScientist/skills/accelerate/references/performance.md +525 -0
- EvoScientist/skills/bitsandbytes/SKILL.md +411 -0
- EvoScientist/skills/bitsandbytes/references/memory-optimization.md +521 -0
- EvoScientist/skills/bitsandbytes/references/qlora-training.md +521 -0
- EvoScientist/skills/bitsandbytes/references/quantization-formats.md +447 -0
- EvoScientist/skills/clip/SKILL.md +253 -0
- EvoScientist/skills/clip/references/applications.md +207 -0
- EvoScientist/skills/find-skills/SKILL.md +133 -0
- EvoScientist/skills/find-skills/scripts/install_skill.py +211 -0
- EvoScientist/skills/flash-attention/SKILL.md +367 -0
- EvoScientist/skills/flash-attention/references/benchmarks.md +215 -0
- EvoScientist/skills/flash-attention/references/transformers-integration.md +293 -0
- EvoScientist/skills/langgraph-docs/SKILL.md +36 -0
- EvoScientist/skills/llama-cpp/SKILL.md +258 -0
- EvoScientist/skills/llama-cpp/references/optimization.md +89 -0
- EvoScientist/skills/llama-cpp/references/quantization.md +213 -0
- EvoScientist/skills/llama-cpp/references/server.md +125 -0
- EvoScientist/skills/lm-evaluation-harness/SKILL.md +490 -0
- EvoScientist/skills/lm-evaluation-harness/references/api-evaluation.md +490 -0
- EvoScientist/skills/lm-evaluation-harness/references/benchmark-guide.md +488 -0
- EvoScientist/skills/lm-evaluation-harness/references/custom-tasks.md +602 -0
- EvoScientist/skills/lm-evaluation-harness/references/distributed-eval.md +519 -0
- EvoScientist/skills/ml-paper-writing/SKILL.md +937 -0
- EvoScientist/skills/ml-paper-writing/references/checklists.md +361 -0
- EvoScientist/skills/ml-paper-writing/references/citation-workflow.md +562 -0
- EvoScientist/skills/ml-paper-writing/references/reviewer-guidelines.md +367 -0
- EvoScientist/skills/ml-paper-writing/references/sources.md +159 -0
- EvoScientist/skills/ml-paper-writing/references/writing-guide.md +476 -0
- EvoScientist/skills/ml-paper-writing/templates/README.md +251 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/README.md +534 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-supp.tex +144 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026-unified-template.tex +952 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bib +111 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.bst +1493 -0
- EvoScientist/skills/ml-paper-writing/templates/aaai2026/aaai2026.sty +315 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/README.md +50 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl.sty +312 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_latex.tex +377 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_lualatex.tex +101 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/acl_natbib.bst +1940 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/anthology.bib.txt +26 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/custom.bib +70 -0
- EvoScientist/skills/ml-paper-writing/templates/acl/formatting.md +326 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/README.md +3 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bib +11 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.bst +1440 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.sty +218 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/colm2025_conference.tex +305 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/fancyhdr.sty +485 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/math_commands.tex +508 -0
- EvoScientist/skills/ml-paper-writing/templates/colm2025/natbib.sty +1246 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/fancyhdr.sty +485 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bib +24 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.bst +1440 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.sty +246 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/iclr2026_conference.tex +414 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/math_commands.tex +508 -0
- EvoScientist/skills/ml-paper-writing/templates/iclr2026/natbib.sty +1246 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithm.sty +79 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/algorithmic.sty +201 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.bib +75 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/example_paper.tex +662 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/fancyhdr.sty +864 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.bst +1443 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml2026.sty +767 -0
- EvoScientist/skills/ml-paper-writing/templates/icml2026/icml_numpapers.pdf +0 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/Makefile +36 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/extra_pkgs.tex +53 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/main.tex +38 -0
- EvoScientist/skills/ml-paper-writing/templates/neurips2025/neurips.sty +382 -0
- EvoScientist/skills/peft/SKILL.md +431 -0
- EvoScientist/skills/peft/references/advanced-usage.md +514 -0
- EvoScientist/skills/peft/references/troubleshooting.md +480 -0
- EvoScientist/skills/ray-data/SKILL.md +326 -0
- EvoScientist/skills/ray-data/references/integration.md +82 -0
- EvoScientist/skills/ray-data/references/transformations.md +83 -0
- EvoScientist/skills/skill-creator/LICENSE.txt +202 -0
- EvoScientist/skills/skill-creator/SKILL.md +356 -0
- EvoScientist/skills/skill-creator/references/output-patterns.md +82 -0
- EvoScientist/skills/skill-creator/references/workflows.md +28 -0
- EvoScientist/skills/skill-creator/scripts/init_skill.py +303 -0
- EvoScientist/skills/skill-creator/scripts/package_skill.py +110 -0
- EvoScientist/skills/skill-creator/scripts/quick_validate.py +95 -0
- EvoScientist/skills/tensorboard/SKILL.md +629 -0
- EvoScientist/skills/tensorboard/references/integrations.md +638 -0
- EvoScientist/skills/tensorboard/references/profiling.md +545 -0
- EvoScientist/skills/tensorboard/references/visualization.md +620 -0
- EvoScientist/skills/vllm/SKILL.md +364 -0
- EvoScientist/skills/vllm/references/optimization.md +226 -0
- EvoScientist/skills/vllm/references/quantization.md +284 -0
- EvoScientist/skills/vllm/references/server-deployment.md +255 -0
- EvoScientist/skills/vllm/references/troubleshooting.md +447 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/METADATA +26 -3
- evoscientist-0.1.0rc2.dist-info/RECORD +119 -0
- evoscientist-0.1.0rc1.dist-info/RECORD +0 -21
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/WHEEL +0 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/entry_points.txt +0 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/licenses/LICENSE +0 -0
- {evoscientist-0.1.0rc1.dist-info → evoscientist-0.1.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
# CLIP Applications Guide
|
|
2
|
+
|
|
3
|
+
Practical applications and use cases for CLIP.
|
|
4
|
+
|
|
5
|
+
## Zero-shot image classification
|
|
6
|
+
|
|
7
|
+
```python
|
|
8
|
+
import torch
|
|
9
|
+
import clip
|
|
10
|
+
from PIL import Image
|
|
11
|
+
|
|
12
|
+
model, preprocess = clip.load("ViT-B/32")
|
|
13
|
+
|
|
14
|
+
# Define categories
|
|
15
|
+
categories = [
|
|
16
|
+
"a photo of a dog",
|
|
17
|
+
"a photo of a cat",
|
|
18
|
+
"a photo of a bird",
|
|
19
|
+
"a photo of a car",
|
|
20
|
+
"a photo of a person"
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
# Prepare image
|
|
24
|
+
image = preprocess(Image.open("photo.jpg")).unsqueeze(0)
|
|
25
|
+
text = clip.tokenize(categories)
|
|
26
|
+
|
|
27
|
+
# Classify
|
|
28
|
+
with torch.no_grad():
|
|
29
|
+
image_features = model.encode_image(image)
|
|
30
|
+
text_features = model.encode_text(text)
|
|
31
|
+
|
|
32
|
+
logits_per_image, _ = model(image, text)
|
|
33
|
+
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
|
|
34
|
+
|
|
35
|
+
# Print results
|
|
36
|
+
for category, prob in zip(categories, probs[0]):
|
|
37
|
+
print(f"{category}: {prob:.2%}")
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
## Semantic image search
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
# Index images
|
|
44
|
+
image_database = []
|
|
45
|
+
image_paths = ["img1.jpg", "img2.jpg", "img3.jpg"]
|
|
46
|
+
|
|
47
|
+
for img_path in image_paths:
|
|
48
|
+
image = preprocess(Image.open(img_path)).unsqueeze(0)
|
|
49
|
+
with torch.no_grad():
|
|
50
|
+
features = model.encode_image(image)
|
|
51
|
+
features /= features.norm(dim=-1, keepdim=True)
|
|
52
|
+
image_database.append((img_path, features))
|
|
53
|
+
|
|
54
|
+
# Search with text
|
|
55
|
+
query = "a sunset over mountains"
|
|
56
|
+
text_input = clip.tokenize([query])
|
|
57
|
+
|
|
58
|
+
with torch.no_grad():
|
|
59
|
+
text_features = model.encode_text(text_input)
|
|
60
|
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
|
61
|
+
|
|
62
|
+
# Find matches
|
|
63
|
+
similarities = []
|
|
64
|
+
for img_path, img_features in image_database:
|
|
65
|
+
similarity = (text_features @ img_features.T).item()
|
|
66
|
+
similarities.append((img_path, similarity))
|
|
67
|
+
|
|
68
|
+
# Sort by similarity
|
|
69
|
+
similarities.sort(key=lambda x: x[1], reverse=True)
|
|
70
|
+
for img_path, score in similarities[:3]:
|
|
71
|
+
print(f"{img_path}: {score:.3f}")
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Content moderation
|
|
75
|
+
|
|
76
|
+
```python
|
|
77
|
+
# Define safety categories
|
|
78
|
+
categories = [
|
|
79
|
+
"safe for work content",
|
|
80
|
+
"not safe for work content",
|
|
81
|
+
"violent or graphic content",
|
|
82
|
+
"hate speech or offensive content",
|
|
83
|
+
"spam or misleading content"
|
|
84
|
+
]
|
|
85
|
+
|
|
86
|
+
text = clip.tokenize(categories)
|
|
87
|
+
|
|
88
|
+
# Check image
|
|
89
|
+
with torch.no_grad():
|
|
90
|
+
logits, _ = model(image, text)
|
|
91
|
+
probs = logits.softmax(dim=-1)
|
|
92
|
+
|
|
93
|
+
# Get classification
|
|
94
|
+
max_idx = probs.argmax().item()
|
|
95
|
+
confidence = probs[0, max_idx].item()
|
|
96
|
+
|
|
97
|
+
if confidence > 0.7:
|
|
98
|
+
print(f"Classified as: {categories[max_idx]} ({confidence:.2%})")
|
|
99
|
+
else:
|
|
100
|
+
print(f"Uncertain classification (confidence: {confidence:.2%})")
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
## Image-to-text retrieval
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# Text database
|
|
107
|
+
captions = [
|
|
108
|
+
"A beautiful sunset over the ocean",
|
|
109
|
+
"A cute dog playing in the park",
|
|
110
|
+
"A modern city skyline at night",
|
|
111
|
+
"A delicious pizza with toppings"
|
|
112
|
+
]
|
|
113
|
+
|
|
114
|
+
# Encode captions
|
|
115
|
+
caption_features = []
|
|
116
|
+
for caption in captions:
|
|
117
|
+
text = clip.tokenize([caption])
|
|
118
|
+
with torch.no_grad():
|
|
119
|
+
features = model.encode_text(text)
|
|
120
|
+
features /= features.norm(dim=-1, keepdim=True)
|
|
121
|
+
caption_features.append(features)
|
|
122
|
+
|
|
123
|
+
caption_features = torch.cat(caption_features)
|
|
124
|
+
|
|
125
|
+
# Find matching captions for image
|
|
126
|
+
with torch.no_grad():
|
|
127
|
+
image_features = model.encode_image(image)
|
|
128
|
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
|
129
|
+
|
|
130
|
+
similarities = (image_features @ caption_features.T).squeeze(0)
|
|
131
|
+
top_k = similarities.topk(3)
|
|
132
|
+
|
|
133
|
+
for idx, score in zip(top_k.indices, top_k.values):
|
|
134
|
+
print(f"{captions[idx]}: {score:.3f}")
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## Visual question answering
|
|
138
|
+
|
|
139
|
+
```python
|
|
140
|
+
# Create yes/no questions
|
|
141
|
+
image = preprocess(Image.open("photo.jpg")).unsqueeze(0)
|
|
142
|
+
|
|
143
|
+
questions = [
|
|
144
|
+
"a photo showing people",
|
|
145
|
+
"a photo showing animals",
|
|
146
|
+
"a photo taken indoors",
|
|
147
|
+
"a photo taken outdoors",
|
|
148
|
+
"a photo taken during daytime",
|
|
149
|
+
"a photo taken at night"
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
text = clip.tokenize(questions)
|
|
153
|
+
|
|
154
|
+
with torch.no_grad():
|
|
155
|
+
logits, _ = model(image, text)
|
|
156
|
+
probs = logits.softmax(dim=-1)
|
|
157
|
+
|
|
158
|
+
# Answer questions
|
|
159
|
+
for question, prob in zip(questions, probs[0]):
|
|
160
|
+
answer = "Yes" if prob > 0.5 else "No"
|
|
161
|
+
print(f"{question}: {answer} ({prob:.2%})")
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
## Image deduplication
|
|
165
|
+
|
|
166
|
+
```python
|
|
167
|
+
# Detect duplicate/similar images
|
|
168
|
+
def compute_similarity(img1_path, img2_path):
|
|
169
|
+
img1 = preprocess(Image.open(img1_path)).unsqueeze(0)
|
|
170
|
+
img2 = preprocess(Image.open(img2_path)).unsqueeze(0)
|
|
171
|
+
|
|
172
|
+
with torch.no_grad():
|
|
173
|
+
feat1 = model.encode_image(img1)
|
|
174
|
+
feat2 = model.encode_image(img2)
|
|
175
|
+
|
|
176
|
+
feat1 /= feat1.norm(dim=-1, keepdim=True)
|
|
177
|
+
feat2 /= feat2.norm(dim=-1, keepdim=True)
|
|
178
|
+
|
|
179
|
+
similarity = (feat1 @ feat2.T).item()
|
|
180
|
+
|
|
181
|
+
return similarity
|
|
182
|
+
|
|
183
|
+
# Check for duplicates
|
|
184
|
+
threshold = 0.95
|
|
185
|
+
image_pairs = [("img1.jpg", "img2.jpg"), ("img1.jpg", "img3.jpg")]
|
|
186
|
+
|
|
187
|
+
for img1, img2 in image_pairs:
|
|
188
|
+
sim = compute_similarity(img1, img2)
|
|
189
|
+
if sim > threshold:
|
|
190
|
+
print(f"{img1} and {img2} are duplicates (similarity: {sim:.3f})")
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
## Best practices
|
|
194
|
+
|
|
195
|
+
1. **Use descriptive labels** - "a photo of X" works better than just "X"
|
|
196
|
+
2. **Normalize embeddings** - Always normalize for cosine similarity
|
|
197
|
+
3. **Batch processing** - Process multiple images/texts together
|
|
198
|
+
4. **Cache embeddings** - Expensive to recompute
|
|
199
|
+
5. **Set appropriate thresholds** - Test on validation data
|
|
200
|
+
6. **Use GPU** - 10-50× faster than CPU
|
|
201
|
+
7. **Consider model size** - ViT-B/32 good default, ViT-L/14 for best quality
|
|
202
|
+
|
|
203
|
+
## Resources
|
|
204
|
+
|
|
205
|
+
- **Paper**: https://arxiv.org/abs/2103.00020
|
|
206
|
+
- **GitHub**: https://github.com/openai/CLIP
|
|
207
|
+
- **Colab**: https://colab.research.google.com/github/openai/clip/
|
|
@@ -0,0 +1,133 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: find-skills
|
|
3
|
+
description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", or express interest in extending capabilities. Uses a non-interactive installer script suitable for automated agents.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Find Skills
|
|
7
|
+
|
|
8
|
+
This skill helps you discover and install skills from the open agent skills ecosystem.
|
|
9
|
+
|
|
10
|
+
## When to Use This Skill
|
|
11
|
+
|
|
12
|
+
Use this skill when the user:
|
|
13
|
+
|
|
14
|
+
- Asks "how do I do X" where X might be a common task with an existing skill
|
|
15
|
+
- Says "find a skill for X" or "is there a skill for X"
|
|
16
|
+
- Wants to search for tools, templates, or workflows
|
|
17
|
+
- Expresses interest in extending agent capabilities
|
|
18
|
+
- Mentions they wish they had help with a specific domain (design, testing, deployment, etc.)
|
|
19
|
+
|
|
20
|
+
## Step 1: Search for Skills
|
|
21
|
+
|
|
22
|
+
Use `npx -y skills find` with a relevant keyword to search the ecosystem:
|
|
23
|
+
|
|
24
|
+
```bash
|
|
25
|
+
npx -y skills find [query]
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Examples:
|
|
29
|
+
- User asks "help me with React performance" → `npx -y skills find react performance`
|
|
30
|
+
- User asks "is there a skill for PR reviews?" → `npx -y skills find pr review`
|
|
31
|
+
- User asks "I need to create a changelog" → `npx -y skills find changelog`
|
|
32
|
+
|
|
33
|
+
The search results will show installable skills like:
|
|
34
|
+
|
|
35
|
+
```
|
|
36
|
+
vercel-labs/agent-skills@vercel-react-best-practices
|
|
37
|
+
└ https://skills.sh/vercel-labs/agent-skills/vercel-react-best-practices
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
Browse all available skills at: https://skills.sh/
|
|
41
|
+
|
|
42
|
+
## Step 2: Present Options
|
|
43
|
+
|
|
44
|
+
When you find relevant skills, present them to the user with:
|
|
45
|
+
1. The skill name and what it does
|
|
46
|
+
2. A link to learn more on skills.sh
|
|
47
|
+
|
|
48
|
+
Ask the user which skill(s) they want to install. All skills are installed to `./skills/` in the current working directory.
|
|
49
|
+
|
|
50
|
+
## Step 3: Install with the Script
|
|
51
|
+
|
|
52
|
+
**IMPORTANT: Do NOT use `npx -y skills add` for installation** — it requires interactive prompts.
|
|
53
|
+
|
|
54
|
+
Use the bundled installer script instead:
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
python /skills/find-skills/scripts/install_skill.py --url <github_url>
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
### Install Commands
|
|
61
|
+
|
|
62
|
+
**From a GitHub URL** (most common — copy the URL from search results):
|
|
63
|
+
```bash
|
|
64
|
+
python /skills/find-skills/scripts/install_skill.py \
|
|
65
|
+
--url https://github.com/owner/repo/tree/main/skill-name
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
**From skills.sh shorthand** (owner/repo@skill):
|
|
69
|
+
```bash
|
|
70
|
+
python /skills/find-skills/scripts/install_skill.py \
|
|
71
|
+
--url vercel-labs/agent-skills@vercel-react-best-practices
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
**From repo + path** (install specific skills from a multi-skill repo):
|
|
75
|
+
```bash
|
|
76
|
+
# Single skill
|
|
77
|
+
python /skills/find-skills/scripts/install_skill.py \
|
|
78
|
+
--repo owner/repo --path skill-name
|
|
79
|
+
|
|
80
|
+
# Multiple skills from same repo
|
|
81
|
+
python /skills/find-skills/scripts/install_skill.py \
|
|
82
|
+
--repo owner/repo --path skill-a --path skill-b
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
**With a specific git branch or tag**:
|
|
86
|
+
```bash
|
|
87
|
+
python /skills/find-skills/scripts/install_skill.py \
|
|
88
|
+
--repo owner/repo --path skill-name --ref v2.0
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
### Installer Options
|
|
92
|
+
|
|
93
|
+
| Option | Description |
|
|
94
|
+
|--------|-------------|
|
|
95
|
+
| `--url` | GitHub URL or owner/repo@skill shorthand |
|
|
96
|
+
| `--repo` | GitHub repo (owner/repo format) |
|
|
97
|
+
| `--path` | Path to skill inside repo (repeatable) |
|
|
98
|
+
| `--ref` | Git branch or tag |
|
|
99
|
+
| `--dest` | Custom destination directory (default: `./skills`) |
|
|
100
|
+
|
|
101
|
+
## Step 4: Confirm Installation
|
|
102
|
+
|
|
103
|
+
After installation, verify by listing the skills directory:
|
|
104
|
+
|
|
105
|
+
```bash
|
|
106
|
+
ls /skills/ # all skills (system + user merged)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
Then read the installed skill's SKILL.md to confirm it loaded correctly:
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
read_file /skills/<skill-name>/SKILL.md
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
## Common Skill Categories
|
|
116
|
+
|
|
117
|
+
| Category | Example Queries |
|
|
118
|
+
|----------|----------------|
|
|
119
|
+
| Web Development | react, nextjs, typescript, css, tailwind |
|
|
120
|
+
| Testing | testing, jest, playwright, e2e |
|
|
121
|
+
| DevOps | deploy, docker, kubernetes, ci-cd |
|
|
122
|
+
| Documentation | docs, readme, changelog, api-docs |
|
|
123
|
+
| Code Quality | review, lint, refactor, best-practices |
|
|
124
|
+
| Design | ui, ux, design-system, accessibility |
|
|
125
|
+
| Productivity | workflow, automation, git |
|
|
126
|
+
|
|
127
|
+
## When No Skills Are Found
|
|
128
|
+
|
|
129
|
+
If no relevant skills exist:
|
|
130
|
+
|
|
131
|
+
1. Acknowledge that no existing skill was found
|
|
132
|
+
2. Offer to help with the task directly using your general capabilities
|
|
133
|
+
3. Mention the user could create their own skill with `npx -y skills init`
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Install a skill from GitHub into a local skills directory.
|
|
3
|
+
|
|
4
|
+
Self-contained installer — no external dependencies beyond git.
|
|
5
|
+
|
|
6
|
+
Usage examples:
|
|
7
|
+
# Install from a GitHub URL (auto-detects repo, ref, path)
|
|
8
|
+
python install_skill.py --url https://github.com/anthropics/skills/tree/main/excel
|
|
9
|
+
|
|
10
|
+
# Install from repo + path
|
|
11
|
+
python install_skill.py --repo anthropics/skills --path excel
|
|
12
|
+
|
|
13
|
+
# Install multiple skills from the same repo
|
|
14
|
+
python install_skill.py --repo anthropics/skills --path excel --path pdf
|
|
15
|
+
|
|
16
|
+
# Install with a specific git ref
|
|
17
|
+
python install_skill.py --repo org/repo --path my-skill --ref v2.0
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
import argparse
|
|
23
|
+
import os
|
|
24
|
+
import re
|
|
25
|
+
import shutil
|
|
26
|
+
import subprocess
|
|
27
|
+
import sys
|
|
28
|
+
import tempfile
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def parse_github_url(url: str) -> tuple[str, str | None, str | None]:
|
|
32
|
+
"""Parse a GitHub URL into (repo, ref, path).
|
|
33
|
+
|
|
34
|
+
Supports formats:
|
|
35
|
+
https://github.com/owner/repo
|
|
36
|
+
https://github.com/owner/repo/tree/main/path/to/skill
|
|
37
|
+
github.com/owner/repo/tree/branch/path
|
|
38
|
+
owner/repo@skill-name (shorthand from skills.sh)
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
(repo, ref_or_none, path_or_none)
|
|
42
|
+
"""
|
|
43
|
+
# Shorthand: owner/repo@path
|
|
44
|
+
if "@" in url and "://" not in url:
|
|
45
|
+
repo, path = url.split("@", 1)
|
|
46
|
+
return repo.strip(), None, path.strip()
|
|
47
|
+
|
|
48
|
+
# Strip protocol and github.com prefix
|
|
49
|
+
cleaned = re.sub(r"^https?://", "", url)
|
|
50
|
+
cleaned = re.sub(r"^github\.com/", "", cleaned)
|
|
51
|
+
cleaned = cleaned.rstrip("/")
|
|
52
|
+
|
|
53
|
+
# Match: owner/repo/tree/ref/path...
|
|
54
|
+
m = re.match(r"^([^/]+/[^/]+)/tree/([^/]+)(?:/(.+))?$", cleaned)
|
|
55
|
+
if m:
|
|
56
|
+
return m.group(1), m.group(2), m.group(3)
|
|
57
|
+
|
|
58
|
+
# Match: owner/repo (no tree)
|
|
59
|
+
m = re.match(r"^([^/]+/[^/]+)$", cleaned)
|
|
60
|
+
if m:
|
|
61
|
+
return m.group(1), None, None
|
|
62
|
+
|
|
63
|
+
raise ValueError(f"Cannot parse GitHub URL: {url}")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def clone_repo(repo: str, ref: str | None, dest: str) -> None:
|
|
67
|
+
"""Shallow-clone a GitHub repo."""
|
|
68
|
+
clone_url = f"https://github.com/{repo}.git"
|
|
69
|
+
cmd = ["git", "clone", "--depth", "1"]
|
|
70
|
+
if ref:
|
|
71
|
+
cmd += ["--branch", ref]
|
|
72
|
+
cmd += [clone_url, dest]
|
|
73
|
+
|
|
74
|
+
result = subprocess.run(cmd, capture_output=True, text=True)
|
|
75
|
+
if result.returncode != 0:
|
|
76
|
+
raise RuntimeError(f"git clone failed: {result.stderr.strip()}")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def copy_skill(src: str, dest_dir: str) -> str:
|
|
80
|
+
"""Copy a skill directory to the destination.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
The skill name (directory basename).
|
|
84
|
+
"""
|
|
85
|
+
skill_name = os.path.basename(src.rstrip("/"))
|
|
86
|
+
target = os.path.join(dest_dir, skill_name)
|
|
87
|
+
|
|
88
|
+
if os.path.exists(target):
|
|
89
|
+
shutil.rmtree(target)
|
|
90
|
+
print(f" Replaced existing: {skill_name}")
|
|
91
|
+
|
|
92
|
+
shutil.copytree(src, target)
|
|
93
|
+
return skill_name
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def validate_skill(path: str) -> bool:
|
|
97
|
+
"""Check that a directory looks like a valid skill (has SKILL.md)."""
|
|
98
|
+
return os.path.isfile(os.path.join(path, "SKILL.md"))
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def install(
|
|
102
|
+
repo: str,
|
|
103
|
+
paths: list[str],
|
|
104
|
+
ref: str | None,
|
|
105
|
+
dest: str,
|
|
106
|
+
) -> list[str]:
|
|
107
|
+
"""Install skill(s) from a GitHub repo.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
List of installed skill names.
|
|
111
|
+
"""
|
|
112
|
+
os.makedirs(dest, exist_ok=True)
|
|
113
|
+
installed: list[str] = []
|
|
114
|
+
|
|
115
|
+
with tempfile.TemporaryDirectory(prefix="skill-install-") as tmp:
|
|
116
|
+
clone_dir = os.path.join(tmp, "repo")
|
|
117
|
+
print(f"Cloning {repo}" + (f" @{ref}" if ref else "") + "...")
|
|
118
|
+
clone_repo(repo, ref, clone_dir)
|
|
119
|
+
|
|
120
|
+
if not paths:
|
|
121
|
+
# No path specified — treat entire repo as a single skill
|
|
122
|
+
if validate_skill(clone_dir):
|
|
123
|
+
name = copy_skill(clone_dir, dest)
|
|
124
|
+
installed.append(name)
|
|
125
|
+
else:
|
|
126
|
+
# List top-level directories that look like skills
|
|
127
|
+
for entry in sorted(os.listdir(clone_dir)):
|
|
128
|
+
entry_path = os.path.join(clone_dir, entry)
|
|
129
|
+
if os.path.isdir(entry_path) and validate_skill(entry_path):
|
|
130
|
+
name = copy_skill(entry_path, dest)
|
|
131
|
+
installed.append(name)
|
|
132
|
+
|
|
133
|
+
if not installed:
|
|
134
|
+
print("No valid skills found in repository root.", file=sys.stderr)
|
|
135
|
+
else:
|
|
136
|
+
for p in paths:
|
|
137
|
+
skill_path = os.path.join(clone_dir, p.strip("/"))
|
|
138
|
+
if not os.path.isdir(skill_path):
|
|
139
|
+
print(f" Path not found: {p}", file=sys.stderr)
|
|
140
|
+
continue
|
|
141
|
+
if not validate_skill(skill_path):
|
|
142
|
+
print(f" No SKILL.md in: {p}", file=sys.stderr)
|
|
143
|
+
continue
|
|
144
|
+
name = copy_skill(skill_path, dest)
|
|
145
|
+
installed.append(name)
|
|
146
|
+
|
|
147
|
+
return installed
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def main() -> int:
|
|
151
|
+
parser = argparse.ArgumentParser(
|
|
152
|
+
description="Install skills from GitHub into a local skills directory.",
|
|
153
|
+
)
|
|
154
|
+
src = parser.add_mutually_exclusive_group(required=True)
|
|
155
|
+
src.add_argument(
|
|
156
|
+
"--url",
|
|
157
|
+
help="GitHub URL (e.g. https://github.com/owner/repo/tree/main/skill-name)",
|
|
158
|
+
)
|
|
159
|
+
src.add_argument(
|
|
160
|
+
"--repo",
|
|
161
|
+
help="GitHub repo (e.g. owner/repo)",
|
|
162
|
+
)
|
|
163
|
+
parser.add_argument(
|
|
164
|
+
"--path",
|
|
165
|
+
action="append",
|
|
166
|
+
default=[],
|
|
167
|
+
help="Path to skill inside repo (repeatable)",
|
|
168
|
+
)
|
|
169
|
+
parser.add_argument(
|
|
170
|
+
"--ref",
|
|
171
|
+
default=None,
|
|
172
|
+
help="Git branch or tag (default: repo default branch)",
|
|
173
|
+
)
|
|
174
|
+
parser.add_argument(
|
|
175
|
+
"--dest",
|
|
176
|
+
default="./skills",
|
|
177
|
+
help="Destination directory (default: ./skills)",
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
args = parser.parse_args()
|
|
181
|
+
dest = args.dest
|
|
182
|
+
|
|
183
|
+
# Parse source
|
|
184
|
+
if args.url:
|
|
185
|
+
repo, ref, path = parse_github_url(args.url)
|
|
186
|
+
ref = args.ref or ref
|
|
187
|
+
paths = [path] if path else args.path
|
|
188
|
+
else:
|
|
189
|
+
repo = args.repo
|
|
190
|
+
ref = args.ref
|
|
191
|
+
paths = args.path
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
installed = install(repo, paths, ref, dest)
|
|
195
|
+
except RuntimeError as e:
|
|
196
|
+
print(f"Error: {e}", file=sys.stderr)
|
|
197
|
+
return 1
|
|
198
|
+
|
|
199
|
+
if installed:
|
|
200
|
+
print(f"\nInstalled {len(installed)} skill(s) to {dest}/:")
|
|
201
|
+
for name in installed:
|
|
202
|
+
print(f" - {name}")
|
|
203
|
+
else:
|
|
204
|
+
print("No skills were installed.", file=sys.stderr)
|
|
205
|
+
return 1
|
|
206
|
+
|
|
207
|
+
return 0
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
if __name__ == "__main__":
|
|
211
|
+
raise SystemExit(main())
|