opencode-skills-antigravity 1.0.40 → 1.0.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bundled-skills/.antigravity-install-manifest.json +7 -1
- package/bundled-skills/docs/integrations/jetski-cortex.md +3 -3
- package/bundled-skills/docs/integrations/jetski-gemini-loader/README.md +1 -1
- package/bundled-skills/docs/maintainers/repo-growth-seo.md +3 -3
- package/bundled-skills/docs/maintainers/skills-update-guide.md +1 -1
- package/bundled-skills/docs/sources/sources.md +2 -2
- package/bundled-skills/docs/users/bundles.md +1 -1
- package/bundled-skills/docs/users/claude-code-skills.md +1 -1
- package/bundled-skills/docs/users/gemini-cli-skills.md +1 -1
- package/bundled-skills/docs/users/getting-started.md +1 -1
- package/bundled-skills/docs/users/kiro-integration.md +1 -1
- package/bundled-skills/docs/users/usage.md +4 -4
- package/bundled-skills/docs/users/visual-guide.md +4 -4
- package/bundled-skills/hugging-face-cli/SKILL.md +192 -195
- package/bundled-skills/hugging-face-community-evals/SKILL.md +213 -0
- package/bundled-skills/hugging-face-community-evals/examples/.env.example +3 -0
- package/bundled-skills/hugging-face-community-evals/examples/USAGE_EXAMPLES.md +101 -0
- package/bundled-skills/hugging-face-community-evals/scripts/inspect_eval_uv.py +104 -0
- package/bundled-skills/hugging-face-community-evals/scripts/inspect_vllm_uv.py +306 -0
- package/bundled-skills/hugging-face-community-evals/scripts/lighteval_vllm_uv.py +297 -0
- package/bundled-skills/hugging-face-dataset-viewer/SKILL.md +120 -120
- package/bundled-skills/hugging-face-gradio/SKILL.md +304 -0
- package/bundled-skills/hugging-face-gradio/examples.md +613 -0
- package/bundled-skills/hugging-face-jobs/SKILL.md +25 -18
- package/bundled-skills/hugging-face-jobs/index.html +216 -0
- package/bundled-skills/hugging-face-jobs/references/hardware_guide.md +336 -0
- package/bundled-skills/hugging-face-jobs/references/hub_saving.md +352 -0
- package/bundled-skills/hugging-face-jobs/references/token_usage.md +570 -0
- package/bundled-skills/hugging-face-jobs/references/troubleshooting.md +475 -0
- package/bundled-skills/hugging-face-jobs/scripts/cot-self-instruct.py +718 -0
- package/bundled-skills/hugging-face-jobs/scripts/finepdfs-stats.py +546 -0
- package/bundled-skills/hugging-face-jobs/scripts/generate-responses.py +587 -0
- package/bundled-skills/hugging-face-model-trainer/SKILL.md +11 -12
- package/bundled-skills/hugging-face-model-trainer/references/gguf_conversion.md +296 -0
- package/bundled-skills/hugging-face-model-trainer/references/hardware_guide.md +283 -0
- package/bundled-skills/hugging-face-model-trainer/references/hub_saving.md +364 -0
- package/bundled-skills/hugging-face-model-trainer/references/local_training_macos.md +231 -0
- package/bundled-skills/hugging-face-model-trainer/references/reliability_principles.md +371 -0
- package/bundled-skills/hugging-face-model-trainer/references/trackio_guide.md +189 -0
- package/bundled-skills/hugging-face-model-trainer/references/training_methods.md +150 -0
- package/bundled-skills/hugging-face-model-trainer/references/training_patterns.md +203 -0
- package/bundled-skills/hugging-face-model-trainer/references/troubleshooting.md +282 -0
- package/bundled-skills/hugging-face-model-trainer/references/unsloth.md +313 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/convert_to_gguf.py +424 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/dataset_inspector.py +417 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/estimate_cost.py +150 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/train_dpo_example.py +106 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/train_grpo_example.py +89 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/train_sft_example.py +122 -0
- package/bundled-skills/hugging-face-model-trainer/scripts/unsloth_sft_example.py +512 -0
- package/bundled-skills/hugging-face-paper-publisher/SKILL.md +11 -4
- package/bundled-skills/hugging-face-paper-publisher/examples/example_usage.md +326 -0
- package/bundled-skills/hugging-face-paper-publisher/references/quick_reference.md +216 -0
- package/bundled-skills/hugging-face-paper-publisher/scripts/paper_manager.py +606 -0
- package/bundled-skills/hugging-face-paper-publisher/templates/arxiv.md +299 -0
- package/bundled-skills/hugging-face-paper-publisher/templates/ml-report.md +358 -0
- package/bundled-skills/hugging-face-paper-publisher/templates/modern.md +319 -0
- package/bundled-skills/hugging-face-paper-publisher/templates/standard.md +201 -0
- package/bundled-skills/hugging-face-papers/SKILL.md +241 -0
- package/bundled-skills/hugging-face-trackio/.claude-plugin/plugin.json +19 -0
- package/bundled-skills/hugging-face-trackio/SKILL.md +117 -0
- package/bundled-skills/hugging-face-trackio/references/alerts.md +196 -0
- package/bundled-skills/hugging-face-trackio/references/logging_metrics.md +206 -0
- package/bundled-skills/hugging-face-trackio/references/retrieving_metrics.md +251 -0
- package/bundled-skills/hugging-face-vision-trainer/SKILL.md +595 -0
- package/bundled-skills/hugging-face-vision-trainer/references/finetune_sam2_trainer.md +254 -0
- package/bundled-skills/hugging-face-vision-trainer/references/hub_saving.md +618 -0
- package/bundled-skills/hugging-face-vision-trainer/references/image_classification_training_notebook.md +279 -0
- package/bundled-skills/hugging-face-vision-trainer/references/object_detection_training_notebook.md +700 -0
- package/bundled-skills/hugging-face-vision-trainer/references/reliability_principles.md +310 -0
- package/bundled-skills/hugging-face-vision-trainer/references/timm_trainer.md +91 -0
- package/bundled-skills/hugging-face-vision-trainer/scripts/dataset_inspector.py +814 -0
- package/bundled-skills/hugging-face-vision-trainer/scripts/estimate_cost.py +217 -0
- package/bundled-skills/hugging-face-vision-trainer/scripts/image_classification_training.py +383 -0
- package/bundled-skills/hugging-face-vision-trainer/scripts/object_detection_training.py +710 -0
- package/bundled-skills/hugging-face-vision-trainer/scripts/sam_segmentation_training.py +382 -0
- package/bundled-skills/transformers-js/SKILL.md +639 -0
- package/bundled-skills/transformers-js/references/CACHE.md +339 -0
- package/bundled-skills/transformers-js/references/CONFIGURATION.md +390 -0
- package/bundled-skills/transformers-js/references/EXAMPLES.md +605 -0
- package/bundled-skills/transformers-js/references/MODEL_ARCHITECTURES.md +167 -0
- package/bundled-skills/transformers-js/references/PIPELINE_OPTIONS.md +545 -0
- package/bundled-skills/transformers-js/references/TEXT_GENERATION.md +315 -0
- package/package.json +1 -1
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
# Fine-tuning SAM2 with HF Trainer
|
|
2
|
+
|
|
3
|
+
Fine-tune SAM2.1 on a small part of the MicroMat dataset for image matting,
|
|
4
|
+
using the Hugging Face Trainer with a custom loss function.
|
|
5
|
+
|
|
6
|
+
```python
|
|
7
|
+
!pip install -q transformers datasets monai trackio
|
|
8
|
+
```
|
|
9
|
+
|
|
10
|
+
## Load and explore the dataset
|
|
11
|
+
|
|
12
|
+
```python
|
|
13
|
+
from datasets import load_dataset
|
|
14
|
+
|
|
15
|
+
dataset = load_dataset("merve/MicroMat-mini", split="train")
|
|
16
|
+
dataset
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
```python
|
|
20
|
+
dataset = dataset.train_test_split(test_size=0.1)
|
|
21
|
+
train_ds = dataset["train"]
|
|
22
|
+
val_ds = dataset["test"]
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
```python
|
|
26
|
+
import json
|
|
27
|
+
|
|
28
|
+
train_ds[0]
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
json.loads(train_ds["prompt"][0])["bbox"]
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## Visualize a sample
|
|
36
|
+
|
|
37
|
+
```python
|
|
38
|
+
import matplotlib.pyplot as plt
|
|
39
|
+
import numpy as np
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def show_mask(mask, ax, bbox):
|
|
43
|
+
color = np.array([0.12, 0.56, 1.0, 0.6])
|
|
44
|
+
mask = np.array(mask)
|
|
45
|
+
h, w = mask.shape
|
|
46
|
+
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, 4)
|
|
47
|
+
ax.imshow(mask_image)
|
|
48
|
+
x0, y0, x1, y1 = bbox
|
|
49
|
+
ax.add_patch(
|
|
50
|
+
plt.Rectangle(
|
|
51
|
+
(x0, y0), x1 - x0, y1 - y0, fill=False, edgecolor="lime", linewidth=2
|
|
52
|
+
)
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
example = train_ds[0]
|
|
57
|
+
image = np.array(example["image"])
|
|
58
|
+
ground_truth_mask = np.array(example["mask"])
|
|
59
|
+
|
|
60
|
+
fig, ax = plt.subplots()
|
|
61
|
+
ax.imshow(image)
|
|
62
|
+
show_mask(ground_truth_mask, ax, json.loads(example["prompt"])["bbox"])
|
|
63
|
+
ax.set_title("Ground truth mask")
|
|
64
|
+
ax.set_axis_off()
|
|
65
|
+
plt.show()
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## Build the dataset and collator
|
|
69
|
+
|
|
70
|
+
`SAMDataset` wraps each sample into the format expected by the SAM2 processor.
|
|
71
|
+
Ground-truth masks are stored under the key `"labels"` so the Trainer
|
|
72
|
+
automatically pops them before calling `model.forward()`.
|
|
73
|
+
|
|
74
|
+
```python
|
|
75
|
+
from torch.utils.data import Dataset
|
|
76
|
+
import torch
|
|
77
|
+
import torch.nn.functional as F
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class SAMDataset(Dataset):
|
|
81
|
+
def __init__(self, dataset, processor):
|
|
82
|
+
self.dataset = dataset
|
|
83
|
+
self.processor = processor
|
|
84
|
+
|
|
85
|
+
def __len__(self):
|
|
86
|
+
return len(self.dataset)
|
|
87
|
+
|
|
88
|
+
def __getitem__(self, idx):
|
|
89
|
+
item = self.dataset[idx]
|
|
90
|
+
image = item["image"]
|
|
91
|
+
prompt = json.loads(item["prompt"])["bbox"]
|
|
92
|
+
inputs = self.processor(image, input_boxes=[[prompt]], return_tensors="pt")
|
|
93
|
+
inputs["labels"] = (np.array(item["mask"]) > 0).astype(np.float32)
|
|
94
|
+
inputs["original_image_size"] = torch.tensor(image.size[::-1])
|
|
95
|
+
return inputs
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def collate_fn(batch):
|
|
99
|
+
pixel_values = torch.cat([item["pixel_values"] for item in batch], dim=0)
|
|
100
|
+
original_sizes = torch.stack([item["original_sizes"] for item in batch])
|
|
101
|
+
input_boxes = torch.cat([item["input_boxes"] for item in batch], dim=0)
|
|
102
|
+
labels = torch.cat(
|
|
103
|
+
[
|
|
104
|
+
F.interpolate(
|
|
105
|
+
torch.as_tensor(x["labels"]).unsqueeze(0).unsqueeze(0).float(),
|
|
106
|
+
size=(256, 256),
|
|
107
|
+
mode="nearest",
|
|
108
|
+
)
|
|
109
|
+
for x in batch
|
|
110
|
+
],
|
|
111
|
+
dim=0,
|
|
112
|
+
).long()
|
|
113
|
+
|
|
114
|
+
return {
|
|
115
|
+
"pixel_values": pixel_values,
|
|
116
|
+
"original_sizes": original_sizes,
|
|
117
|
+
"input_boxes": input_boxes,
|
|
118
|
+
"labels": labels,
|
|
119
|
+
"original_image_size": torch.stack(
|
|
120
|
+
[item["original_image_size"] for item in batch]
|
|
121
|
+
),
|
|
122
|
+
"multimask_output": False,
|
|
123
|
+
}
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
```python
|
|
127
|
+
from transformers import Sam2Processor
|
|
128
|
+
|
|
129
|
+
processor = Sam2Processor.from_pretrained("facebook/sam2.1-hiera-small")
|
|
130
|
+
|
|
131
|
+
train_dataset = SAMDataset(dataset=train_ds, processor=processor)
|
|
132
|
+
val_dataset = SAMDataset(dataset=val_ds, processor=processor)
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
## Load model and freeze encoder layers
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
from transformers import Sam2Model
|
|
139
|
+
|
|
140
|
+
model = Sam2Model.from_pretrained("facebook/sam2.1-hiera-small")
|
|
141
|
+
|
|
142
|
+
for name, param in model.named_parameters():
|
|
143
|
+
if name.startswith("vision_encoder") or name.startswith("prompt_encoder"):
|
|
144
|
+
param.requires_grad_(False)
|
|
145
|
+
```
|
|
146
|
+
|
|
147
|
+
## Inference before training
|
|
148
|
+
|
|
149
|
+
```python
|
|
150
|
+
item = val_ds[1]
|
|
151
|
+
img = item["image"]
|
|
152
|
+
bbox = json.loads(item["prompt"])["bbox"]
|
|
153
|
+
inputs = processor(images=img, input_boxes=[[bbox]], return_tensors="pt").to(
|
|
154
|
+
model.device
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
with torch.no_grad():
|
|
158
|
+
outputs = model(**inputs)
|
|
159
|
+
|
|
160
|
+
masks = processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"])[0]
|
|
161
|
+
preds = masks.squeeze(0)
|
|
162
|
+
mask = (preds[0] > 0).cpu().numpy()
|
|
163
|
+
|
|
164
|
+
overlay = np.asarray(img, dtype=np.uint8).copy()
|
|
165
|
+
overlay[mask] = 0.55 * overlay[mask] + 0.45 * np.array([0, 255, 0], dtype=np.float32)
|
|
166
|
+
|
|
167
|
+
plt.imshow(overlay)
|
|
168
|
+
plt.title("Before training")
|
|
169
|
+
plt.axis("off")
|
|
170
|
+
plt.show()
|
|
171
|
+
```
|
|
172
|
+
|
|
173
|
+
## Define custom loss
|
|
174
|
+
|
|
175
|
+
SAM2 does not compute loss in its `forward()`, so we provide a
|
|
176
|
+
`compute_loss_func` to the Trainer. The Trainer pops `"labels"` from the
|
|
177
|
+
batch before calling `model(**inputs)`, then passes `(outputs, labels)` to
|
|
178
|
+
this function.
|
|
179
|
+
|
|
180
|
+
```python
|
|
181
|
+
import monai
|
|
182
|
+
from transformers import Trainer, TrainingArguments
|
|
183
|
+
import trackio
|
|
184
|
+
|
|
185
|
+
seg_loss = monai.losses.DiceCELoss(sigmoid=True, squared_pred=True, reduction="mean")
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def compute_loss(outputs, labels, num_items_in_batch=None):
|
|
189
|
+
predicted_masks = outputs.pred_masks.squeeze(1)
|
|
190
|
+
return seg_loss(predicted_masks, labels.float())
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
## Train with Trainer
|
|
194
|
+
|
|
195
|
+
Key settings:
|
|
196
|
+
- `remove_unused_columns=False`: the Trainer must keep `input_boxes`,
|
|
197
|
+
`original_sizes`, etc. that are not in the model's `forward()` signature.
|
|
198
|
+
- `compute_loss_func`: our custom DiceCE loss.
|
|
199
|
+
- `report_to="trackio"`: logs the training loss to trackio.
|
|
200
|
+
|
|
201
|
+
```python
|
|
202
|
+
training_args = TrainingArguments(
|
|
203
|
+
output_dir="sam2-finetuned",
|
|
204
|
+
num_train_epochs=30,
|
|
205
|
+
per_device_train_batch_size=4,
|
|
206
|
+
learning_rate=1e-5,
|
|
207
|
+
weight_decay=0,
|
|
208
|
+
logging_steps=1,
|
|
209
|
+
save_strategy="epoch",
|
|
210
|
+
save_total_limit=2,
|
|
211
|
+
remove_unused_columns=False,
|
|
212
|
+
dataloader_pin_memory=False,
|
|
213
|
+
report_to="trackio",
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
trainer = Trainer(
|
|
217
|
+
model=model,
|
|
218
|
+
args=training_args,
|
|
219
|
+
train_dataset=train_dataset,
|
|
220
|
+
data_collator=collate_fn,
|
|
221
|
+
compute_loss_func=compute_loss,
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
trainer.train()
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
## Inference after training
|
|
228
|
+
|
|
229
|
+
```python
|
|
230
|
+
item = val_ds[1]
|
|
231
|
+
img = item["image"]
|
|
232
|
+
bbox = json.loads(item["prompt"])["bbox"]
|
|
233
|
+
|
|
234
|
+
inputs = processor(images=img, input_boxes=[[bbox]], return_tensors="pt").to(
|
|
235
|
+
model.device
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
with torch.no_grad():
|
|
239
|
+
outputs = model(**inputs)
|
|
240
|
+
|
|
241
|
+
preds = processor.post_process_masks(
|
|
242
|
+
outputs.pred_masks.cpu(), inputs["original_sizes"]
|
|
243
|
+
)[0]
|
|
244
|
+
preds = preds.squeeze(0)
|
|
245
|
+
mask = (preds[0] > 0).cpu().numpy()
|
|
246
|
+
|
|
247
|
+
overlay = np.asarray(img, dtype=np.uint8).copy()
|
|
248
|
+
overlay[mask] = 0.55 * overlay[mask] + 0.45 * np.array([0, 255, 0], dtype=np.float32)
|
|
249
|
+
|
|
250
|
+
plt.imshow(overlay)
|
|
251
|
+
plt.title("After training")
|
|
252
|
+
plt.axis("off")
|
|
253
|
+
plt.show()
|
|
254
|
+
```
|