opencode-skills-antigravity 1.0.39 → 1.0.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. package/bundled-skills/.antigravity-install-manifest.json +10 -1
  2. package/bundled-skills/docs/integrations/jetski-cortex.md +3 -3
  3. package/bundled-skills/docs/integrations/jetski-gemini-loader/README.md +1 -1
  4. package/bundled-skills/docs/maintainers/repo-growth-seo.md +3 -3
  5. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-29-refresh.csv +34 -0
  6. package/bundled-skills/docs/maintainers/security-findings-triage-2026-03-29-refresh.md +2 -0
  7. package/bundled-skills/docs/maintainers/skills-update-guide.md +1 -1
  8. package/bundled-skills/docs/sources/sources.md +2 -2
  9. package/bundled-skills/docs/users/bundles.md +1 -1
  10. package/bundled-skills/docs/users/claude-code-skills.md +1 -1
  11. package/bundled-skills/docs/users/gemini-cli-skills.md +1 -1
  12. package/bundled-skills/docs/users/getting-started.md +1 -1
  13. package/bundled-skills/docs/users/kiro-integration.md +1 -1
  14. package/bundled-skills/docs/users/usage.md +4 -4
  15. package/bundled-skills/docs/users/visual-guide.md +4 -4
  16. package/bundled-skills/hugging-face-cli/SKILL.md +192 -195
  17. package/bundled-skills/hugging-face-community-evals/SKILL.md +213 -0
  18. package/bundled-skills/hugging-face-community-evals/examples/.env.example +3 -0
  19. package/bundled-skills/hugging-face-community-evals/examples/USAGE_EXAMPLES.md +101 -0
  20. package/bundled-skills/hugging-face-community-evals/scripts/inspect_eval_uv.py +104 -0
  21. package/bundled-skills/hugging-face-community-evals/scripts/inspect_vllm_uv.py +306 -0
  22. package/bundled-skills/hugging-face-community-evals/scripts/lighteval_vllm_uv.py +297 -0
  23. package/bundled-skills/hugging-face-dataset-viewer/SKILL.md +120 -120
  24. package/bundled-skills/hugging-face-gradio/SKILL.md +304 -0
  25. package/bundled-skills/hugging-face-gradio/examples.md +613 -0
  26. package/bundled-skills/hugging-face-jobs/SKILL.md +25 -18
  27. package/bundled-skills/hugging-face-jobs/index.html +216 -0
  28. package/bundled-skills/hugging-face-jobs/references/hardware_guide.md +336 -0
  29. package/bundled-skills/hugging-face-jobs/references/hub_saving.md +352 -0
  30. package/bundled-skills/hugging-face-jobs/references/token_usage.md +570 -0
  31. package/bundled-skills/hugging-face-jobs/references/troubleshooting.md +475 -0
  32. package/bundled-skills/hugging-face-jobs/scripts/cot-self-instruct.py +718 -0
  33. package/bundled-skills/hugging-face-jobs/scripts/finepdfs-stats.py +546 -0
  34. package/bundled-skills/hugging-face-jobs/scripts/generate-responses.py +587 -0
  35. package/bundled-skills/hugging-face-model-trainer/SKILL.md +11 -12
  36. package/bundled-skills/hugging-face-model-trainer/references/gguf_conversion.md +296 -0
  37. package/bundled-skills/hugging-face-model-trainer/references/hardware_guide.md +283 -0
  38. package/bundled-skills/hugging-face-model-trainer/references/hub_saving.md +364 -0
  39. package/bundled-skills/hugging-face-model-trainer/references/local_training_macos.md +231 -0
  40. package/bundled-skills/hugging-face-model-trainer/references/reliability_principles.md +371 -0
  41. package/bundled-skills/hugging-face-model-trainer/references/trackio_guide.md +189 -0
  42. package/bundled-skills/hugging-face-model-trainer/references/training_methods.md +150 -0
  43. package/bundled-skills/hugging-face-model-trainer/references/training_patterns.md +203 -0
  44. package/bundled-skills/hugging-face-model-trainer/references/troubleshooting.md +282 -0
  45. package/bundled-skills/hugging-face-model-trainer/references/unsloth.md +313 -0
  46. package/bundled-skills/hugging-face-model-trainer/scripts/convert_to_gguf.py +424 -0
  47. package/bundled-skills/hugging-face-model-trainer/scripts/dataset_inspector.py +417 -0
  48. package/bundled-skills/hugging-face-model-trainer/scripts/estimate_cost.py +150 -0
  49. package/bundled-skills/hugging-face-model-trainer/scripts/train_dpo_example.py +106 -0
  50. package/bundled-skills/hugging-face-model-trainer/scripts/train_grpo_example.py +89 -0
  51. package/bundled-skills/hugging-face-model-trainer/scripts/train_sft_example.py +122 -0
  52. package/bundled-skills/hugging-face-model-trainer/scripts/unsloth_sft_example.py +512 -0
  53. package/bundled-skills/hugging-face-paper-publisher/SKILL.md +11 -4
  54. package/bundled-skills/hugging-face-paper-publisher/examples/example_usage.md +326 -0
  55. package/bundled-skills/hugging-face-paper-publisher/references/quick_reference.md +216 -0
  56. package/bundled-skills/hugging-face-paper-publisher/scripts/paper_manager.py +606 -0
  57. package/bundled-skills/hugging-face-paper-publisher/templates/arxiv.md +299 -0
  58. package/bundled-skills/hugging-face-paper-publisher/templates/ml-report.md +358 -0
  59. package/bundled-skills/hugging-face-paper-publisher/templates/modern.md +319 -0
  60. package/bundled-skills/hugging-face-paper-publisher/templates/standard.md +201 -0
  61. package/bundled-skills/hugging-face-papers/SKILL.md +241 -0
  62. package/bundled-skills/hugging-face-trackio/.claude-plugin/plugin.json +19 -0
  63. package/bundled-skills/hugging-face-trackio/SKILL.md +117 -0
  64. package/bundled-skills/hugging-face-trackio/references/alerts.md +196 -0
  65. package/bundled-skills/hugging-face-trackio/references/logging_metrics.md +206 -0
  66. package/bundled-skills/hugging-face-trackio/references/retrieving_metrics.md +251 -0
  67. package/bundled-skills/hugging-face-vision-trainer/SKILL.md +595 -0
  68. package/bundled-skills/hugging-face-vision-trainer/references/finetune_sam2_trainer.md +254 -0
  69. package/bundled-skills/hugging-face-vision-trainer/references/hub_saving.md +618 -0
  70. package/bundled-skills/hugging-face-vision-trainer/references/image_classification_training_notebook.md +279 -0
  71. package/bundled-skills/hugging-face-vision-trainer/references/object_detection_training_notebook.md +700 -0
  72. package/bundled-skills/hugging-face-vision-trainer/references/reliability_principles.md +310 -0
  73. package/bundled-skills/hugging-face-vision-trainer/references/timm_trainer.md +91 -0
  74. package/bundled-skills/hugging-face-vision-trainer/scripts/dataset_inspector.py +814 -0
  75. package/bundled-skills/hugging-face-vision-trainer/scripts/estimate_cost.py +217 -0
  76. package/bundled-skills/hugging-face-vision-trainer/scripts/image_classification_training.py +383 -0
  77. package/bundled-skills/hugging-face-vision-trainer/scripts/object_detection_training.py +710 -0
  78. package/bundled-skills/hugging-face-vision-trainer/scripts/sam_segmentation_training.py +382 -0
  79. package/bundled-skills/jq/SKILL.md +273 -0
  80. package/bundled-skills/odoo-edi-connector/SKILL.md +32 -10
  81. package/bundled-skills/odoo-woocommerce-bridge/SKILL.md +9 -5
  82. package/bundled-skills/tmux/SKILL.md +370 -0
  83. package/bundled-skills/transformers-js/SKILL.md +639 -0
  84. package/bundled-skills/transformers-js/references/CACHE.md +339 -0
  85. package/bundled-skills/transformers-js/references/CONFIGURATION.md +390 -0
  86. package/bundled-skills/transformers-js/references/EXAMPLES.md +605 -0
  87. package/bundled-skills/transformers-js/references/MODEL_ARCHITECTURES.md +167 -0
  88. package/bundled-skills/transformers-js/references/PIPELINE_OPTIONS.md +545 -0
  89. package/bundled-skills/transformers-js/references/TEXT_GENERATION.md +315 -0
  90. package/bundled-skills/viboscope/SKILL.md +64 -0
  91. package/package.json +1 -1
@@ -0,0 +1,710 @@
1
+ # /// script
2
+ # dependencies = [
3
+ # "transformers>=5.2.0",
4
+ # "accelerate>=1.1.0",
5
+ # "albumentations >= 1.4.16",
6
+ # "timm",
7
+ # "datasets>=4.0",
8
+ # "torchmetrics",
9
+ # "pycocotools",
10
+ # "trackio",
11
+ # "huggingface_hub",
12
+ # ]
13
+ # ///
14
+
15
+ """Finetuning any 🤗 Transformers model supported by AutoModelForObjectDetection for object detection leveraging the Trainer API."""
16
+
17
+ import logging
18
+ import math
19
+ import os
20
+ import sys
21
+ from collections.abc import Mapping
22
+ from dataclasses import dataclass, field
23
+ from functools import partial
24
+ from typing import Any
25
+
26
+ import albumentations as A
27
+ import numpy as np
28
+ import torch
29
+ from datasets import load_dataset
30
+ from torchmetrics.detection.mean_ap import MeanAveragePrecision
31
+
32
+ import trackio
33
+
34
+ import transformers
35
+ from transformers import (
36
+ AutoConfig,
37
+ AutoImageProcessor,
38
+ AutoModelForObjectDetection,
39
+ HfArgumentParser,
40
+ Trainer,
41
+ TrainingArguments,
42
+ )
43
+ from transformers.image_processing_utils import BatchFeature
44
+ from transformers.image_transforms import center_to_corners_format
45
+ from transformers.trainer import EvalPrediction
46
+ from transformers.utils import check_min_version
47
+ from transformers.utils.versions import require_version
48
+
49
+
50
+ logger = logging.getLogger(__name__)
51
+
52
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
53
+ check_min_version("4.57.0.dev0")
54
+
55
+ require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/object-detection/requirements.txt")
56
+
57
+
58
+ @dataclass
59
+ class ModelOutput:
60
+ logits: torch.Tensor
61
+ pred_boxes: torch.Tensor
62
+
63
+
64
+ def format_image_annotations_as_coco(
65
+ image_id: str, categories: list[int], areas: list[float], bboxes: list[tuple[float]]
66
+ ) -> dict:
67
+ """Format one set of image annotations to the COCO format
68
+
69
+ Args:
70
+ image_id (str): image id. e.g. "0001"
71
+ categories (list[int]): list of categories/class labels corresponding to provided bounding boxes
72
+ areas (list[float]): list of corresponding areas to provided bounding boxes
73
+ bboxes (list[tuple[float]]): list of bounding boxes provided in COCO format
74
+ ([center_x, center_y, width, height] in absolute coordinates)
75
+
76
+ Returns:
77
+ dict: {
78
+ "image_id": image id,
79
+ "annotations": list of formatted annotations
80
+ }
81
+ """
82
+ annotations = []
83
+ for category, area, bbox in zip(categories, areas, bboxes):
84
+ formatted_annotation = {
85
+ "image_id": image_id,
86
+ "category_id": category,
87
+ "iscrowd": 0,
88
+ "area": area,
89
+ "bbox": list(bbox),
90
+ }
91
+ annotations.append(formatted_annotation)
92
+
93
+ return {
94
+ "image_id": image_id,
95
+ "annotations": annotations,
96
+ }
97
+
98
+
99
+ def detect_bbox_format_from_samples(dataset, image_col="image", objects_col="objects", num_samples=50):
100
+ """
101
+ Detect whether bboxes are xyxy (Pascal VOC) or xywh (COCO) by checking
102
+ bbox coordinates against image dimensions. The correct format interpretation
103
+ should keep bboxes within image bounds.
104
+ """
105
+ exceeds_if_xywh = 0
106
+ exceeds_if_xyxy = 0
107
+ total = 0
108
+
109
+ for example in dataset.select(range(min(num_samples, len(dataset)))):
110
+ img_w, img_h = example[image_col].size
111
+ for bbox in example[objects_col]["bbox"]:
112
+ if len(bbox) != 4:
113
+ continue
114
+ a, b, c, d = float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3])
115
+ total += 1
116
+
117
+ # If 3rd < 1st or 4th < 2nd, can't be xyxy (x_max must exceed x_min)
118
+ if c < a or d < b:
119
+ return "xywh"
120
+
121
+ # xywh: right/bottom edge = origin + size; exceeding image → wrong format
122
+ if a + c > img_w * 1.05:
123
+ exceeds_if_xywh += 1
124
+ if b + d > img_h * 1.05:
125
+ exceeds_if_xywh += 1
126
+ # xyxy: right/bottom edge = coordinate itself
127
+ if c > img_w * 1.05:
128
+ exceeds_if_xyxy += 1
129
+ if d > img_h * 1.05:
130
+ exceeds_if_xyxy += 1
131
+
132
+ if total == 0:
133
+ return "xywh"
134
+
135
+ fmt = "xyxy" if exceeds_if_xywh > exceeds_if_xyxy else "xywh"
136
+ logger.info(
137
+ f"Detected bbox format: {fmt} (checked {total} bboxes from {min(num_samples, len(dataset))} images)"
138
+ )
139
+ return fmt
140
+
141
+
142
+ def sanitize_dataset(dataset, bbox_format="xywh", image_col="image", objects_col="objects"):
143
+ """
144
+ Validate bboxes, convert xyxy→xywh if needed, clip to image bounds, and remove
145
+ entries with non-finite values, non-positive dimensions, or degenerate area (<1 px).
146
+ Drops images with no remaining valid bboxes.
147
+ """
148
+ convert_xyxy = bbox_format == "xyxy"
149
+
150
+ def _validate(example):
151
+ img_w, img_h = example[image_col].size
152
+ objects = example[objects_col]
153
+ bboxes = objects["bbox"]
154
+ n = len(bboxes)
155
+
156
+ valid_indices = []
157
+ converted_bboxes = []
158
+
159
+ for i, bbox in enumerate(bboxes):
160
+ if len(bbox) != 4:
161
+ continue
162
+ vals = [float(v) for v in bbox]
163
+ if not all(math.isfinite(v) for v in vals):
164
+ continue
165
+
166
+ if convert_xyxy:
167
+ x_min, y_min, x_max, y_max = vals
168
+ w, h = x_max - x_min, y_max - y_min
169
+ else:
170
+ x_min, y_min, w, h = vals
171
+
172
+ if w <= 0 or h <= 0:
173
+ continue
174
+
175
+ x_min, y_min = max(0.0, x_min), max(0.0, y_min)
176
+ if x_min >= img_w or y_min >= img_h:
177
+ continue
178
+ w = min(w, img_w - x_min)
179
+ h = min(h, img_h - y_min)
180
+
181
+ if w * h < 1.0:
182
+ continue
183
+
184
+ valid_indices.append(i)
185
+ converted_bboxes.append([x_min, y_min, w, h])
186
+
187
+ # Rebuild objects dict, filtering all list-valued fields by valid_indices
188
+ new_objects = {}
189
+ for key, value in objects.items():
190
+ if key == "bbox":
191
+ new_objects["bbox"] = converted_bboxes
192
+ elif isinstance(value, list) and len(value) == n:
193
+ new_objects[key] = [value[j] for j in valid_indices]
194
+ else:
195
+ new_objects[key] = value
196
+
197
+ if "area" not in new_objects or len(new_objects.get("area", [])) != len(converted_bboxes):
198
+ new_objects["area"] = [b[2] * b[3] for b in converted_bboxes]
199
+
200
+ example[objects_col] = new_objects
201
+ return example
202
+
203
+ before = len(dataset)
204
+ dataset = dataset.map(_validate)
205
+ dataset = dataset.filter(lambda ex: len(ex[objects_col]["bbox"]) > 0)
206
+ after = len(dataset)
207
+ if before != after:
208
+ logger.warning(f"Dropped {before - after}/{before} images with no valid bboxes after sanitization")
209
+ logger.info(f"Bbox sanitization complete: {after} images with valid bboxes remain")
210
+ return dataset
211
+
212
+
213
+ def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: tuple[int, int]) -> torch.Tensor:
214
+ """
215
+ Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1]
216
+ to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates.
217
+
218
+ Args:
219
+ boxes (torch.Tensor): Bounding boxes in YOLO format
220
+ image_size (tuple[int, int]): Image size in format (height, width)
221
+
222
+ Returns:
223
+ torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max)
224
+ """
225
+ # convert center to corners format
226
+ boxes = center_to_corners_format(boxes)
227
+
228
+
229
+ if isinstance(image_size, torch.Tensor):
230
+ image_size = image_size.tolist()
231
+ elif isinstance(image_size, np.ndarray):
232
+ image_size = image_size.tolist()
233
+ height, width = image_size
234
+ boxes = boxes * torch.tensor([[width, height, width, height]])
235
+
236
+ return boxes
237
+
238
+
239
+ def augment_and_transform_batch(
240
+ examples: Mapping[str, Any],
241
+ transform: A.Compose,
242
+ image_processor: AutoImageProcessor,
243
+ return_pixel_mask: bool = False,
244
+ ) -> BatchFeature:
245
+ """Apply augmentations and format annotations in COCO format for object detection task"""
246
+
247
+ images = []
248
+ annotations = []
249
+ image_ids = examples["image_id"] if "image_id" in examples else range(len(examples["image"]))
250
+ for image_id, image, objects in zip(image_ids, examples["image"], examples["objects"]):
251
+ image = np.array(image.convert("RGB"))
252
+
253
+ # Filter invalid bboxes before augmentation (safety net after sanitize_dataset)
254
+ bboxes = objects["bbox"]
255
+ categories = objects["category"]
256
+ areas = objects["area"]
257
+ valid = [
258
+ (b, c, a)
259
+ for b, c, a in zip(bboxes, categories, areas)
260
+ if len(b) == 4 and b[2] > 0 and b[3] > 0 and b[0] >= 0 and b[1] >= 0
261
+ ]
262
+ if valid:
263
+ bboxes, categories, areas = zip(*valid)
264
+ else:
265
+ bboxes, categories, areas = [], [], []
266
+
267
+ # apply augmentations
268
+ output = transform(image=image, bboxes=list(bboxes), category=list(categories))
269
+ images.append(output["image"])
270
+
271
+ # format annotations in COCO format (recompute areas from post-augmentation bboxes)
272
+ post_areas = [b[2] * b[3] for b in output["bboxes"]] if output["bboxes"] else []
273
+ formatted_annotations = format_image_annotations_as_coco(
274
+ image_id, output["category"], post_areas, output["bboxes"]
275
+ )
276
+ annotations.append(formatted_annotations)
277
+
278
+ # Apply the image processor transformations: resizing, rescaling, normalization
279
+ result = image_processor(images=images, annotations=annotations, return_tensors="pt")
280
+
281
+ if not return_pixel_mask:
282
+ result.pop("pixel_mask", None)
283
+
284
+ return result
285
+
286
+
287
+ def collate_fn(batch: list[BatchFeature]) -> Mapping[str, torch.Tensor | list[Any]]:
288
+ data = {}
289
+ data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch])
290
+ data["labels"] = [x["labels"] for x in batch]
291
+ if "pixel_mask" in batch[0]:
292
+ data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch])
293
+ return data
294
+
295
+
296
+ @torch.no_grad()
297
+ def compute_metrics(
298
+ evaluation_results: EvalPrediction,
299
+ image_processor: AutoImageProcessor,
300
+ threshold: float = 0.0,
301
+ id2label: Mapping[int, str] | None = None,
302
+ ) -> Mapping[str, float]:
303
+ """
304
+ Compute mean average mAP, mAR and their variants for the object detection task.
305
+
306
+ Args:
307
+ evaluation_results (EvalPrediction): Predictions and targets from evaluation.
308
+ threshold (float, optional): Threshold to filter predicted boxes by confidence. Defaults to 0.0.
309
+ id2label (Optional[dict], optional): Mapping from class id to class name. Defaults to None.
310
+
311
+ Returns:
312
+ Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>}
313
+ """
314
+
315
+ predictions, targets = evaluation_results.predictions, evaluation_results.label_ids
316
+
317
+ # For metric computation we need to provide:
318
+ # - targets in a form of list of dictionaries with keys "boxes", "labels"
319
+ # - predictions in a form of list of dictionaries with keys "boxes", "scores", "labels"
320
+
321
+ image_sizes = []
322
+ post_processed_targets = []
323
+ post_processed_predictions = []
324
+
325
+ # Collect targets in the required format for metric computation
326
+ for batch in targets:
327
+ # collect image sizes, we will need them for predictions post processing
328
+ batch_image_sizes = torch.tensor([x["orig_size"] for x in batch])
329
+ image_sizes.append(batch_image_sizes)
330
+ # collect targets in the required format for metric computation
331
+ # boxes were converted to YOLO format needed for model training
332
+ # here we will convert them to Pascal VOC format (x_min, y_min, x_max, y_max)
333
+ for image_target in batch:
334
+ boxes = torch.tensor(image_target["boxes"])
335
+ boxes = convert_bbox_yolo_to_pascal(boxes, image_target["orig_size"])
336
+ labels = torch.tensor(image_target["class_labels"])
337
+ post_processed_targets.append({"boxes": boxes, "labels": labels})
338
+
339
+ # Collect predictions in the required format for metric computation,
340
+ # model produce boxes in YOLO format, then image_processor convert them to Pascal VOC format
341
+ for batch, target_sizes in zip(predictions, image_sizes):
342
+ batch_logits, batch_boxes = batch[1], batch[2]
343
+ output = ModelOutput(logits=torch.tensor(batch_logits), pred_boxes=torch.tensor(batch_boxes))
344
+ post_processed_output = image_processor.post_process_object_detection(
345
+ output, threshold=threshold, target_sizes=target_sizes
346
+ )
347
+ post_processed_predictions.extend(post_processed_output)
348
+
349
+ # Compute metrics
350
+ metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True)
351
+ metric.update(post_processed_predictions, post_processed_targets)
352
+ metrics = metric.compute()
353
+
354
+ # Replace list of per class metrics with separate metric for each class
355
+ classes = metrics.pop("classes")
356
+ map_per_class = metrics.pop("map_per_class")
357
+ mar_100_per_class = metrics.pop("mar_100_per_class")
358
+ # Single-class datasets return 0-d scalar tensors; make them iterable
359
+ if classes.dim() == 0:
360
+ classes = classes.unsqueeze(0)
361
+ map_per_class = map_per_class.unsqueeze(0)
362
+ mar_100_per_class = mar_100_per_class.unsqueeze(0)
363
+ for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class):
364
+ class_name = id2label[class_id.item()] if id2label is not None else class_id.item()
365
+ metrics[f"map_{class_name}"] = class_map
366
+ metrics[f"mar_100_{class_name}"] = class_mar
367
+
368
+ metrics = {k: round(v.item(), 4) for k, v in metrics.items()}
369
+
370
+ return metrics
371
+
372
+
373
+ @dataclass
374
+ class DataTrainingArguments:
375
+ """
376
+ Arguments pertaining to what data we are going to input our model for training and eval.
377
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify
378
+ them on the command line.
379
+ """
380
+
381
+ dataset_name: str = field(
382
+ default="cppe-5",
383
+ metadata={
384
+ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
385
+ },
386
+ )
387
+ dataset_config_name: str | None = field(
388
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
389
+ )
390
+ train_val_split: float | None = field(
391
+ default=0.15, metadata={"help": "Percent to split off of train for validation."}
392
+ )
393
+ image_square_size: int | None = field(
394
+ default=600,
395
+ metadata={"help": "Image longest size will be resized to this value, then image will be padded to square."},
396
+ )
397
+ max_train_samples: int | None = field(
398
+ default=None,
399
+ metadata={
400
+ "help": (
401
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
402
+ "value if set."
403
+ )
404
+ },
405
+ )
406
+ max_eval_samples: int | None = field(
407
+ default=None,
408
+ metadata={
409
+ "help": (
410
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
411
+ "value if set."
412
+ )
413
+ },
414
+ )
415
+ use_fast: bool | None = field(
416
+ default=True,
417
+ metadata={"help": "Use a fast torchvision-base image processor if it is supported for a given model."},
418
+ )
419
+
420
+
421
+ @dataclass
422
+ class ModelArguments:
423
+ """
424
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
425
+ """
426
+
427
+ model_name_or_path: str = field(
428
+ default="facebook/detr-resnet-50",
429
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
430
+ )
431
+ config_name: str | None = field(
432
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
433
+ )
434
+ cache_dir: str | None = field(
435
+ default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
436
+ )
437
+ model_revision: str = field(
438
+ default="main",
439
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
440
+ )
441
+ image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
442
+ ignore_mismatched_sizes: bool = field(
443
+ default=True,
444
+ metadata={
445
+ "help": "Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels)."
446
+ },
447
+ )
448
+ token: str = field(
449
+ default=None,
450
+ metadata={
451
+ "help": (
452
+ "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
453
+ "generated when running `hf auth login` (stored in `~/.huggingface`)."
454
+ )
455
+ },
456
+ )
457
+ trust_remote_code: bool = field(
458
+ default=False,
459
+ metadata={
460
+ "help": (
461
+ "Whether to trust the execution of code from datasets/models defined on the Hub."
462
+ " This option should only be set to `True` for repositories you trust and in which you have read the"
463
+ " code, as it will execute code present on the Hub on your local machine."
464
+ )
465
+ },
466
+ )
467
+
468
+
469
+ def main():
470
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
471
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
472
+
473
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
474
+ else:
475
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
476
+
477
+
478
+ from huggingface_hub import login
479
+ hf_token = os.environ.get("HF_TOKEN") or os.environ.get("hfjob")
480
+ if hf_token:
481
+ login(token=hf_token)
482
+ training_args.hub_token = hf_token
483
+ logger.info("Logged in to Hugging Face Hub")
484
+ elif training_args.push_to_hub:
485
+ logger.warning("HF_TOKEN not found in environment. Hub push will likely fail.")
486
+
487
+ # Initialize Trackio for real-time experiment tracking
488
+ trackio.init(project=training_args.output_dir, name=training_args.run_name)
489
+
490
+ logging.basicConfig(
491
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
492
+ datefmt="%m/%d/%Y %H:%M:%S",
493
+ handlers=[logging.StreamHandler(sys.stdout)],
494
+ )
495
+
496
+ if training_args.should_log:
497
+ # The default of training_args.log_level is passive, so we set log level at info here to have that default.
498
+ transformers.utils.logging.set_verbosity_info()
499
+
500
+ log_level = training_args.get_process_log_level()
501
+ logger.setLevel(log_level)
502
+ transformers.utils.logging.set_verbosity(log_level)
503
+ transformers.utils.logging.enable_default_handler()
504
+ transformers.utils.logging.enable_explicit_format()
505
+
506
+ # Log on each process the small summary:
507
+ logger.warning(
508
+ f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
509
+ + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
510
+ )
511
+ logger.info(f"Training/evaluation parameters {training_args}")
512
+
513
+ dataset = load_dataset(
514
+ data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code
515
+ )
516
+
517
+ bbox_format = detect_bbox_format_from_samples(dataset["train"])
518
+ if bbox_format == "xyxy":
519
+ logger.info("Converting bboxes from xyxy (Pascal VOC) → xywh (COCO) format across all splits")
520
+ for split_name in list(dataset.keys()):
521
+ dataset[split_name] = sanitize_dataset(dataset[split_name], bbox_format=bbox_format)
522
+
523
+ for split_name in list(dataset.keys()):
524
+ if "image_id" not in dataset[split_name].column_names:
525
+ dataset[split_name] = dataset[split_name].add_column(
526
+ "image_id", list(range(len(dataset[split_name])))
527
+ )
528
+
529
+ dataset["train"] = dataset["train"].shuffle(seed=training_args.seed)
530
+
531
+ data_args.train_val_split = None if "validation" in dataset else data_args.train_val_split
532
+ if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0:
533
+ split = dataset["train"].train_test_split(data_args.train_val_split, seed=training_args.seed)
534
+ dataset["train"] = split["train"]
535
+ dataset["validation"] = split["test"]
536
+
537
+ categories = None
538
+ try:
539
+ if isinstance(dataset["train"].features["objects"], dict):
540
+ cat_feature = dataset["train"].features["objects"]["category"].feature
541
+ else:
542
+ cat_feature = dataset["train"].features["objects"].feature["category"]
543
+
544
+ if hasattr(cat_feature, "names"):
545
+ categories = cat_feature.names
546
+ except (AttributeError, KeyError):
547
+ pass
548
+
549
+ if categories is None:
550
+ # Category is a Value type (not ClassLabel) — scan dataset to discover labels
551
+ logger.info("Category feature is not ClassLabel — scanning dataset to discover category labels...")
552
+ unique_cats = set()
553
+ for example in dataset["train"]:
554
+ cats = example["objects"]["category"]
555
+ if isinstance(cats, list):
556
+ unique_cats.update(cats)
557
+ else:
558
+ unique_cats.add(cats)
559
+
560
+ if all(isinstance(c, int) for c in unique_cats):
561
+ max_cat = max(unique_cats)
562
+ categories = [f"class_{i}" for i in range(max_cat + 1)]
563
+ elif all(isinstance(c, str) for c in unique_cats):
564
+ categories = sorted(unique_cats)
565
+ else:
566
+ categories = [str(c) for c in sorted(unique_cats, key=str)]
567
+ logger.info(f"Discovered {len(categories)} categories: {categories}")
568
+
569
+ id2label = dict(enumerate(categories))
570
+ label2id = {v: k for k, v in id2label.items()}
571
+
572
+ # Remap string categories to integer IDs if needed
573
+ sample_cats = dataset["train"][0]["objects"]["category"]
574
+ if sample_cats and isinstance(sample_cats[0], str):
575
+ logger.info(f"Remapping string categories to integer IDs: {label2id}")
576
+
577
+ def _remap_categories(example):
578
+ objects = example["objects"]
579
+ objects["category"] = [label2id[c] for c in objects["category"]]
580
+ example["objects"] = objects
581
+ return example
582
+
583
+ for split_name in list(dataset.keys()):
584
+ dataset[split_name] = dataset[split_name].map(_remap_categories)
585
+ logger.info("Category remapping complete")
586
+
587
+ if data_args.max_train_samples is not None:
588
+ max_train = min(data_args.max_train_samples, len(dataset["train"]))
589
+ dataset["train"] = dataset["train"].select(range(max_train))
590
+ logger.info(f"Truncated training set to {max_train} samples")
591
+ if data_args.max_eval_samples is not None and "validation" in dataset:
592
+ max_eval = min(data_args.max_eval_samples, len(dataset["validation"]))
593
+ dataset["validation"] = dataset["validation"].select(range(max_eval))
594
+ logger.info(f"Truncated validation set to {max_eval} samples")
595
+
596
+ common_pretrained_args = {
597
+ "cache_dir": model_args.cache_dir,
598
+ "revision": model_args.model_revision,
599
+ "token": model_args.token,
600
+ "trust_remote_code": model_args.trust_remote_code,
601
+ }
602
+ config = AutoConfig.from_pretrained(
603
+ model_args.config_name or model_args.model_name_or_path,
604
+ label2id=label2id,
605
+ id2label=id2label,
606
+ **common_pretrained_args,
607
+ )
608
+ model = AutoModelForObjectDetection.from_pretrained(
609
+ model_args.model_name_or_path,
610
+ config=config,
611
+ ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
612
+ **common_pretrained_args,
613
+ )
614
+ image_processor = AutoImageProcessor.from_pretrained(
615
+ model_args.image_processor_name or model_args.model_name_or_path,
616
+ do_resize=True,
617
+ size={"max_height": data_args.image_square_size, "max_width": data_args.image_square_size},
618
+ do_pad=True,
619
+ pad_size={"height": data_args.image_square_size, "width": data_args.image_square_size},
620
+ use_fast=data_args.use_fast,
621
+ **common_pretrained_args,
622
+ )
623
+
624
+ max_size = data_args.image_square_size
625
+ train_augment_and_transform = A.Compose(
626
+ [
627
+ A.Compose(
628
+ [
629
+ A.SmallestMaxSize(max_size=max_size, p=1.0),
630
+ A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0),
631
+ ],
632
+ p=0.2,
633
+ ),
634
+ A.OneOf(
635
+ [
636
+ A.Blur(blur_limit=7, p=0.5),
637
+ A.MotionBlur(blur_limit=7, p=0.5),
638
+ A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1),
639
+ ],
640
+ p=0.1,
641
+ ),
642
+ A.Perspective(p=0.1),
643
+ A.HorizontalFlip(p=0.5),
644
+ A.RandomBrightnessContrast(p=0.5),
645
+ A.HueSaturationValue(p=0.1),
646
+ ],
647
+ bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25),
648
+ )
649
+ validation_transform = A.Compose(
650
+ [A.NoOp()],
651
+ bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True),
652
+ )
653
+
654
+ train_transform_batch = partial(
655
+ augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor
656
+ )
657
+ validation_transform_batch = partial(
658
+ augment_and_transform_batch, transform=validation_transform, image_processor=image_processor
659
+ )
660
+
661
+ dataset["train"] = dataset["train"].with_transform(train_transform_batch)
662
+ dataset["validation"] = dataset["validation"].with_transform(validation_transform_batch)
663
+ if "test" in dataset:
664
+ dataset["test"] = dataset["test"].with_transform(validation_transform_batch)
665
+
666
+
667
+ eval_compute_metrics_fn = partial(
668
+ compute_metrics, image_processor=image_processor, id2label=id2label, threshold=0.0
669
+ )
670
+
671
+ trainer = Trainer(
672
+ model=model,
673
+ args=training_args,
674
+ train_dataset=dataset["train"] if training_args.do_train else None,
675
+ eval_dataset=dataset["validation"] if training_args.do_eval else None,
676
+ processing_class=image_processor,
677
+ data_collator=collate_fn,
678
+ compute_metrics=eval_compute_metrics_fn,
679
+ )
680
+
681
+ # Training
682
+ if training_args.do_train:
683
+ train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
684
+ trainer.save_model()
685
+ trainer.log_metrics("train", train_result.metrics)
686
+ trainer.save_metrics("train", train_result.metrics)
687
+ trainer.save_state()
688
+
689
+ if training_args.do_eval:
690
+ test_dataset = dataset["test"] if "test" in dataset else dataset["validation"]
691
+ test_prefix = "test" if "test" in dataset else "eval"
692
+ metrics = trainer.evaluate(eval_dataset=test_dataset, metric_key_prefix=test_prefix)
693
+ trainer.log_metrics(test_prefix, metrics)
694
+ trainer.save_metrics(test_prefix, metrics)
695
+
696
+ trackio.finish()
697
+
698
+ kwargs = {
699
+ "finetuned_from": model_args.model_name_or_path,
700
+ "dataset": data_args.dataset_name,
701
+ "tags": ["object-detection", "vision"],
702
+ }
703
+ if training_args.push_to_hub:
704
+ trainer.push_to_hub(**kwargs)
705
+ else:
706
+ trainer.create_model_card(**kwargs)
707
+
708
+
709
+ if __name__ == "__main__":
710
+ main()