@synsci/cli-darwin-x64 1.1.49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (373) hide show
  1. package/bin/skills/accelerate/SKILL.md +332 -0
  2. package/bin/skills/accelerate/references/custom-plugins.md +453 -0
  3. package/bin/skills/accelerate/references/megatron-integration.md +489 -0
  4. package/bin/skills/accelerate/references/performance.md +525 -0
  5. package/bin/skills/audiocraft/SKILL.md +564 -0
  6. package/bin/skills/audiocraft/references/advanced-usage.md +666 -0
  7. package/bin/skills/audiocraft/references/troubleshooting.md +504 -0
  8. package/bin/skills/autogpt/SKILL.md +403 -0
  9. package/bin/skills/autogpt/references/advanced-usage.md +535 -0
  10. package/bin/skills/autogpt/references/troubleshooting.md +420 -0
  11. package/bin/skills/awq/SKILL.md +310 -0
  12. package/bin/skills/awq/references/advanced-usage.md +324 -0
  13. package/bin/skills/awq/references/troubleshooting.md +344 -0
  14. package/bin/skills/axolotl/SKILL.md +158 -0
  15. package/bin/skills/axolotl/references/api.md +5548 -0
  16. package/bin/skills/axolotl/references/dataset-formats.md +1029 -0
  17. package/bin/skills/axolotl/references/index.md +15 -0
  18. package/bin/skills/axolotl/references/other.md +3563 -0
  19. package/bin/skills/bigcode-evaluation-harness/SKILL.md +405 -0
  20. package/bin/skills/bigcode-evaluation-harness/references/benchmarks.md +393 -0
  21. package/bin/skills/bigcode-evaluation-harness/references/custom-tasks.md +424 -0
  22. package/bin/skills/bigcode-evaluation-harness/references/issues.md +394 -0
  23. package/bin/skills/bitsandbytes/SKILL.md +411 -0
  24. package/bin/skills/bitsandbytes/references/memory-optimization.md +521 -0
  25. package/bin/skills/bitsandbytes/references/qlora-training.md +521 -0
  26. package/bin/skills/bitsandbytes/references/quantization-formats.md +447 -0
  27. package/bin/skills/blip-2/SKILL.md +564 -0
  28. package/bin/skills/blip-2/references/advanced-usage.md +680 -0
  29. package/bin/skills/blip-2/references/troubleshooting.md +526 -0
  30. package/bin/skills/chroma/SKILL.md +406 -0
  31. package/bin/skills/chroma/references/integration.md +38 -0
  32. package/bin/skills/clip/SKILL.md +253 -0
  33. package/bin/skills/clip/references/applications.md +207 -0
  34. package/bin/skills/constitutional-ai/SKILL.md +290 -0
  35. package/bin/skills/crewai/SKILL.md +498 -0
  36. package/bin/skills/crewai/references/flows.md +438 -0
  37. package/bin/skills/crewai/references/tools.md +429 -0
  38. package/bin/skills/crewai/references/troubleshooting.md +480 -0
  39. package/bin/skills/deepspeed/SKILL.md +141 -0
  40. package/bin/skills/deepspeed/references/08.md +17 -0
  41. package/bin/skills/deepspeed/references/09.md +173 -0
  42. package/bin/skills/deepspeed/references/2020.md +378 -0
  43. package/bin/skills/deepspeed/references/2023.md +279 -0
  44. package/bin/skills/deepspeed/references/assets.md +179 -0
  45. package/bin/skills/deepspeed/references/index.md +35 -0
  46. package/bin/skills/deepspeed/references/mii.md +118 -0
  47. package/bin/skills/deepspeed/references/other.md +1191 -0
  48. package/bin/skills/deepspeed/references/tutorials.md +6554 -0
  49. package/bin/skills/dspy/SKILL.md +590 -0
  50. package/bin/skills/dspy/references/examples.md +663 -0
  51. package/bin/skills/dspy/references/modules.md +475 -0
  52. package/bin/skills/dspy/references/optimizers.md +566 -0
  53. package/bin/skills/faiss/SKILL.md +221 -0
  54. package/bin/skills/faiss/references/index_types.md +280 -0
  55. package/bin/skills/flash-attention/SKILL.md +367 -0
  56. package/bin/skills/flash-attention/references/benchmarks.md +215 -0
  57. package/bin/skills/flash-attention/references/transformers-integration.md +293 -0
  58. package/bin/skills/gguf/SKILL.md +427 -0
  59. package/bin/skills/gguf/references/advanced-usage.md +504 -0
  60. package/bin/skills/gguf/references/troubleshooting.md +442 -0
  61. package/bin/skills/gptq/SKILL.md +450 -0
  62. package/bin/skills/gptq/references/calibration.md +337 -0
  63. package/bin/skills/gptq/references/integration.md +129 -0
  64. package/bin/skills/gptq/references/troubleshooting.md +95 -0
  65. package/bin/skills/grpo-rl-training/README.md +97 -0
  66. package/bin/skills/grpo-rl-training/SKILL.md +572 -0
  67. package/bin/skills/grpo-rl-training/examples/reward_functions_library.py +393 -0
  68. package/bin/skills/grpo-rl-training/templates/basic_grpo_training.py +228 -0
  69. package/bin/skills/guidance/SKILL.md +572 -0
  70. package/bin/skills/guidance/references/backends.md +554 -0
  71. package/bin/skills/guidance/references/constraints.md +674 -0
  72. package/bin/skills/guidance/references/examples.md +767 -0
  73. package/bin/skills/hqq/SKILL.md +445 -0
  74. package/bin/skills/hqq/references/advanced-usage.md +528 -0
  75. package/bin/skills/hqq/references/troubleshooting.md +503 -0
  76. package/bin/skills/hugging-face-cli/SKILL.md +191 -0
  77. package/bin/skills/hugging-face-cli/references/commands.md +954 -0
  78. package/bin/skills/hugging-face-cli/references/examples.md +374 -0
  79. package/bin/skills/hugging-face-datasets/SKILL.md +547 -0
  80. package/bin/skills/hugging-face-datasets/examples/diverse_training_examples.json +239 -0
  81. package/bin/skills/hugging-face-datasets/examples/system_prompt_template.txt +196 -0
  82. package/bin/skills/hugging-face-datasets/examples/training_examples.json +176 -0
  83. package/bin/skills/hugging-face-datasets/scripts/dataset_manager.py +522 -0
  84. package/bin/skills/hugging-face-datasets/scripts/sql_manager.py +844 -0
  85. package/bin/skills/hugging-face-datasets/templates/chat.json +55 -0
  86. package/bin/skills/hugging-face-datasets/templates/classification.json +62 -0
  87. package/bin/skills/hugging-face-datasets/templates/completion.json +51 -0
  88. package/bin/skills/hugging-face-datasets/templates/custom.json +75 -0
  89. package/bin/skills/hugging-face-datasets/templates/qa.json +54 -0
  90. package/bin/skills/hugging-face-datasets/templates/tabular.json +81 -0
  91. package/bin/skills/hugging-face-evaluation/SKILL.md +656 -0
  92. package/bin/skills/hugging-face-evaluation/examples/USAGE_EXAMPLES.md +382 -0
  93. package/bin/skills/hugging-face-evaluation/examples/artificial_analysis_to_hub.py +141 -0
  94. package/bin/skills/hugging-face-evaluation/examples/example_readme_tables.md +135 -0
  95. package/bin/skills/hugging-face-evaluation/examples/metric_mapping.json +50 -0
  96. package/bin/skills/hugging-face-evaluation/requirements.txt +20 -0
  97. package/bin/skills/hugging-face-evaluation/scripts/evaluation_manager.py +1374 -0
  98. package/bin/skills/hugging-face-evaluation/scripts/inspect_eval_uv.py +104 -0
  99. package/bin/skills/hugging-face-evaluation/scripts/inspect_vllm_uv.py +317 -0
  100. package/bin/skills/hugging-face-evaluation/scripts/lighteval_vllm_uv.py +303 -0
  101. package/bin/skills/hugging-face-evaluation/scripts/run_eval_job.py +98 -0
  102. package/bin/skills/hugging-face-evaluation/scripts/run_vllm_eval_job.py +331 -0
  103. package/bin/skills/hugging-face-evaluation/scripts/test_extraction.py +206 -0
  104. package/bin/skills/hugging-face-jobs/SKILL.md +1041 -0
  105. package/bin/skills/hugging-face-jobs/index.html +216 -0
  106. package/bin/skills/hugging-face-jobs/references/hardware_guide.md +336 -0
  107. package/bin/skills/hugging-face-jobs/references/hub_saving.md +352 -0
  108. package/bin/skills/hugging-face-jobs/references/token_usage.md +546 -0
  109. package/bin/skills/hugging-face-jobs/references/troubleshooting.md +475 -0
  110. package/bin/skills/hugging-face-jobs/scripts/cot-self-instruct.py +718 -0
  111. package/bin/skills/hugging-face-jobs/scripts/finepdfs-stats.py +546 -0
  112. package/bin/skills/hugging-face-jobs/scripts/generate-responses.py +587 -0
  113. package/bin/skills/hugging-face-model-trainer/SKILL.md +711 -0
  114. package/bin/skills/hugging-face-model-trainer/references/gguf_conversion.md +296 -0
  115. package/bin/skills/hugging-face-model-trainer/references/hardware_guide.md +283 -0
  116. package/bin/skills/hugging-face-model-trainer/references/hub_saving.md +364 -0
  117. package/bin/skills/hugging-face-model-trainer/references/reliability_principles.md +371 -0
  118. package/bin/skills/hugging-face-model-trainer/references/trackio_guide.md +189 -0
  119. package/bin/skills/hugging-face-model-trainer/references/training_methods.md +150 -0
  120. package/bin/skills/hugging-face-model-trainer/references/training_patterns.md +203 -0
  121. package/bin/skills/hugging-face-model-trainer/references/troubleshooting.md +282 -0
  122. package/bin/skills/hugging-face-model-trainer/scripts/convert_to_gguf.py +424 -0
  123. package/bin/skills/hugging-face-model-trainer/scripts/dataset_inspector.py +417 -0
  124. package/bin/skills/hugging-face-model-trainer/scripts/estimate_cost.py +150 -0
  125. package/bin/skills/hugging-face-model-trainer/scripts/train_dpo_example.py +106 -0
  126. package/bin/skills/hugging-face-model-trainer/scripts/train_grpo_example.py +89 -0
  127. package/bin/skills/hugging-face-model-trainer/scripts/train_sft_example.py +122 -0
  128. package/bin/skills/hugging-face-paper-publisher/SKILL.md +627 -0
  129. package/bin/skills/hugging-face-paper-publisher/examples/example_usage.md +327 -0
  130. package/bin/skills/hugging-face-paper-publisher/references/quick_reference.md +216 -0
  131. package/bin/skills/hugging-face-paper-publisher/scripts/paper_manager.py +508 -0
  132. package/bin/skills/hugging-face-paper-publisher/templates/arxiv.md +299 -0
  133. package/bin/skills/hugging-face-paper-publisher/templates/ml-report.md +358 -0
  134. package/bin/skills/hugging-face-paper-publisher/templates/modern.md +319 -0
  135. package/bin/skills/hugging-face-paper-publisher/templates/standard.md +201 -0
  136. package/bin/skills/hugging-face-tool-builder/SKILL.md +115 -0
  137. package/bin/skills/hugging-face-tool-builder/references/baseline_hf_api.py +57 -0
  138. package/bin/skills/hugging-face-tool-builder/references/baseline_hf_api.sh +40 -0
  139. package/bin/skills/hugging-face-tool-builder/references/baseline_hf_api.tsx +57 -0
  140. package/bin/skills/hugging-face-tool-builder/references/find_models_by_paper.sh +230 -0
  141. package/bin/skills/hugging-face-tool-builder/references/hf_enrich_models.sh +96 -0
  142. package/bin/skills/hugging-face-tool-builder/references/hf_model_card_frontmatter.sh +188 -0
  143. package/bin/skills/hugging-face-tool-builder/references/hf_model_papers_auth.sh +171 -0
  144. package/bin/skills/hugging-face-trackio/SKILL.md +65 -0
  145. package/bin/skills/hugging-face-trackio/references/logging_metrics.md +206 -0
  146. package/bin/skills/hugging-face-trackio/references/retrieving_metrics.md +223 -0
  147. package/bin/skills/huggingface-tokenizers/SKILL.md +516 -0
  148. package/bin/skills/huggingface-tokenizers/references/algorithms.md +653 -0
  149. package/bin/skills/huggingface-tokenizers/references/integration.md +637 -0
  150. package/bin/skills/huggingface-tokenizers/references/pipeline.md +723 -0
  151. package/bin/skills/huggingface-tokenizers/references/training.md +565 -0
  152. package/bin/skills/instructor/SKILL.md +740 -0
  153. package/bin/skills/instructor/references/examples.md +107 -0
  154. package/bin/skills/instructor/references/providers.md +70 -0
  155. package/bin/skills/instructor/references/validation.md +606 -0
  156. package/bin/skills/knowledge-distillation/SKILL.md +458 -0
  157. package/bin/skills/knowledge-distillation/references/minillm.md +334 -0
  158. package/bin/skills/lambda-labs/SKILL.md +545 -0
  159. package/bin/skills/lambda-labs/references/advanced-usage.md +611 -0
  160. package/bin/skills/lambda-labs/references/troubleshooting.md +530 -0
  161. package/bin/skills/langchain/SKILL.md +480 -0
  162. package/bin/skills/langchain/references/agents.md +499 -0
  163. package/bin/skills/langchain/references/integration.md +562 -0
  164. package/bin/skills/langchain/references/rag.md +600 -0
  165. package/bin/skills/langsmith/SKILL.md +422 -0
  166. package/bin/skills/langsmith/references/advanced-usage.md +548 -0
  167. package/bin/skills/langsmith/references/troubleshooting.md +537 -0
  168. package/bin/skills/litgpt/SKILL.md +469 -0
  169. package/bin/skills/litgpt/references/custom-models.md +568 -0
  170. package/bin/skills/litgpt/references/distributed-training.md +451 -0
  171. package/bin/skills/litgpt/references/supported-models.md +336 -0
  172. package/bin/skills/litgpt/references/training-recipes.md +619 -0
  173. package/bin/skills/llama-cpp/SKILL.md +258 -0
  174. package/bin/skills/llama-cpp/references/optimization.md +89 -0
  175. package/bin/skills/llama-cpp/references/quantization.md +213 -0
  176. package/bin/skills/llama-cpp/references/server.md +125 -0
  177. package/bin/skills/llama-factory/SKILL.md +80 -0
  178. package/bin/skills/llama-factory/references/_images.md +23 -0
  179. package/bin/skills/llama-factory/references/advanced.md +1055 -0
  180. package/bin/skills/llama-factory/references/getting_started.md +349 -0
  181. package/bin/skills/llama-factory/references/index.md +19 -0
  182. package/bin/skills/llama-factory/references/other.md +31 -0
  183. package/bin/skills/llamaguard/SKILL.md +337 -0
  184. package/bin/skills/llamaindex/SKILL.md +569 -0
  185. package/bin/skills/llamaindex/references/agents.md +83 -0
  186. package/bin/skills/llamaindex/references/data_connectors.md +108 -0
  187. package/bin/skills/llamaindex/references/query_engines.md +406 -0
  188. package/bin/skills/llava/SKILL.md +304 -0
  189. package/bin/skills/llava/references/training.md +197 -0
  190. package/bin/skills/lm-evaluation-harness/SKILL.md +490 -0
  191. package/bin/skills/lm-evaluation-harness/references/api-evaluation.md +490 -0
  192. package/bin/skills/lm-evaluation-harness/references/benchmark-guide.md +488 -0
  193. package/bin/skills/lm-evaluation-harness/references/custom-tasks.md +602 -0
  194. package/bin/skills/lm-evaluation-harness/references/distributed-eval.md +519 -0
  195. package/bin/skills/long-context/SKILL.md +536 -0
  196. package/bin/skills/long-context/references/extension_methods.md +468 -0
  197. package/bin/skills/long-context/references/fine_tuning.md +611 -0
  198. package/bin/skills/long-context/references/rope.md +402 -0
  199. package/bin/skills/mamba/SKILL.md +260 -0
  200. package/bin/skills/mamba/references/architecture-details.md +206 -0
  201. package/bin/skills/mamba/references/benchmarks.md +255 -0
  202. package/bin/skills/mamba/references/training-guide.md +388 -0
  203. package/bin/skills/megatron-core/SKILL.md +366 -0
  204. package/bin/skills/megatron-core/references/benchmarks.md +249 -0
  205. package/bin/skills/megatron-core/references/parallelism-guide.md +404 -0
  206. package/bin/skills/megatron-core/references/production-examples.md +473 -0
  207. package/bin/skills/megatron-core/references/training-recipes.md +547 -0
  208. package/bin/skills/miles/SKILL.md +315 -0
  209. package/bin/skills/miles/references/api-reference.md +141 -0
  210. package/bin/skills/miles/references/troubleshooting.md +352 -0
  211. package/bin/skills/mlflow/SKILL.md +704 -0
  212. package/bin/skills/mlflow/references/deployment.md +744 -0
  213. package/bin/skills/mlflow/references/model-registry.md +770 -0
  214. package/bin/skills/mlflow/references/tracking.md +680 -0
  215. package/bin/skills/modal/SKILL.md +341 -0
  216. package/bin/skills/modal/references/advanced-usage.md +503 -0
  217. package/bin/skills/modal/references/troubleshooting.md +494 -0
  218. package/bin/skills/model-merging/SKILL.md +539 -0
  219. package/bin/skills/model-merging/references/evaluation.md +462 -0
  220. package/bin/skills/model-merging/references/examples.md +428 -0
  221. package/bin/skills/model-merging/references/methods.md +352 -0
  222. package/bin/skills/model-pruning/SKILL.md +495 -0
  223. package/bin/skills/model-pruning/references/wanda.md +347 -0
  224. package/bin/skills/moe-training/SKILL.md +526 -0
  225. package/bin/skills/moe-training/references/architectures.md +432 -0
  226. package/bin/skills/moe-training/references/inference.md +348 -0
  227. package/bin/skills/moe-training/references/training.md +425 -0
  228. package/bin/skills/nanogpt/SKILL.md +290 -0
  229. package/bin/skills/nanogpt/references/architecture.md +382 -0
  230. package/bin/skills/nanogpt/references/data.md +476 -0
  231. package/bin/skills/nanogpt/references/training.md +564 -0
  232. package/bin/skills/nemo-curator/SKILL.md +383 -0
  233. package/bin/skills/nemo-curator/references/deduplication.md +87 -0
  234. package/bin/skills/nemo-curator/references/filtering.md +102 -0
  235. package/bin/skills/nemo-evaluator/SKILL.md +494 -0
  236. package/bin/skills/nemo-evaluator/references/adapter-system.md +340 -0
  237. package/bin/skills/nemo-evaluator/references/configuration.md +447 -0
  238. package/bin/skills/nemo-evaluator/references/custom-benchmarks.md +315 -0
  239. package/bin/skills/nemo-evaluator/references/execution-backends.md +361 -0
  240. package/bin/skills/nemo-guardrails/SKILL.md +297 -0
  241. package/bin/skills/nnsight/SKILL.md +436 -0
  242. package/bin/skills/nnsight/references/README.md +78 -0
  243. package/bin/skills/nnsight/references/api.md +344 -0
  244. package/bin/skills/nnsight/references/tutorials.md +300 -0
  245. package/bin/skills/openrlhf/SKILL.md +249 -0
  246. package/bin/skills/openrlhf/references/algorithm-comparison.md +404 -0
  247. package/bin/skills/openrlhf/references/custom-rewards.md +530 -0
  248. package/bin/skills/openrlhf/references/hybrid-engine.md +287 -0
  249. package/bin/skills/openrlhf/references/multi-node-training.md +454 -0
  250. package/bin/skills/outlines/SKILL.md +652 -0
  251. package/bin/skills/outlines/references/backends.md +615 -0
  252. package/bin/skills/outlines/references/examples.md +773 -0
  253. package/bin/skills/outlines/references/json_generation.md +652 -0
  254. package/bin/skills/peft/SKILL.md +431 -0
  255. package/bin/skills/peft/references/advanced-usage.md +514 -0
  256. package/bin/skills/peft/references/troubleshooting.md +480 -0
  257. package/bin/skills/phoenix/SKILL.md +475 -0
  258. package/bin/skills/phoenix/references/advanced-usage.md +619 -0
  259. package/bin/skills/phoenix/references/troubleshooting.md +538 -0
  260. package/bin/skills/pinecone/SKILL.md +358 -0
  261. package/bin/skills/pinecone/references/deployment.md +181 -0
  262. package/bin/skills/pytorch-fsdp/SKILL.md +126 -0
  263. package/bin/skills/pytorch-fsdp/references/index.md +7 -0
  264. package/bin/skills/pytorch-fsdp/references/other.md +4249 -0
  265. package/bin/skills/pytorch-lightning/SKILL.md +346 -0
  266. package/bin/skills/pytorch-lightning/references/callbacks.md +436 -0
  267. package/bin/skills/pytorch-lightning/references/distributed.md +490 -0
  268. package/bin/skills/pytorch-lightning/references/hyperparameter-tuning.md +556 -0
  269. package/bin/skills/pyvene/SKILL.md +473 -0
  270. package/bin/skills/pyvene/references/README.md +73 -0
  271. package/bin/skills/pyvene/references/api.md +383 -0
  272. package/bin/skills/pyvene/references/tutorials.md +376 -0
  273. package/bin/skills/qdrant/SKILL.md +493 -0
  274. package/bin/skills/qdrant/references/advanced-usage.md +648 -0
  275. package/bin/skills/qdrant/references/troubleshooting.md +631 -0
  276. package/bin/skills/ray-data/SKILL.md +326 -0
  277. package/bin/skills/ray-data/references/integration.md +82 -0
  278. package/bin/skills/ray-data/references/transformations.md +83 -0
  279. package/bin/skills/ray-train/SKILL.md +406 -0
  280. package/bin/skills/ray-train/references/multi-node.md +628 -0
  281. package/bin/skills/rwkv/SKILL.md +260 -0
  282. package/bin/skills/rwkv/references/architecture-details.md +344 -0
  283. package/bin/skills/rwkv/references/rwkv7.md +386 -0
  284. package/bin/skills/rwkv/references/state-management.md +369 -0
  285. package/bin/skills/saelens/SKILL.md +386 -0
  286. package/bin/skills/saelens/references/README.md +70 -0
  287. package/bin/skills/saelens/references/api.md +333 -0
  288. package/bin/skills/saelens/references/tutorials.md +318 -0
  289. package/bin/skills/segment-anything/SKILL.md +500 -0
  290. package/bin/skills/segment-anything/references/advanced-usage.md +589 -0
  291. package/bin/skills/segment-anything/references/troubleshooting.md +484 -0
  292. package/bin/skills/sentence-transformers/SKILL.md +255 -0
  293. package/bin/skills/sentence-transformers/references/models.md +123 -0
  294. package/bin/skills/sentencepiece/SKILL.md +235 -0
  295. package/bin/skills/sentencepiece/references/algorithms.md +200 -0
  296. package/bin/skills/sentencepiece/references/training.md +304 -0
  297. package/bin/skills/sglang/SKILL.md +442 -0
  298. package/bin/skills/sglang/references/deployment.md +490 -0
  299. package/bin/skills/sglang/references/radix-attention.md +413 -0
  300. package/bin/skills/sglang/references/structured-generation.md +541 -0
  301. package/bin/skills/simpo/SKILL.md +219 -0
  302. package/bin/skills/simpo/references/datasets.md +478 -0
  303. package/bin/skills/simpo/references/hyperparameters.md +452 -0
  304. package/bin/skills/simpo/references/loss-functions.md +350 -0
  305. package/bin/skills/skypilot/SKILL.md +509 -0
  306. package/bin/skills/skypilot/references/advanced-usage.md +491 -0
  307. package/bin/skills/skypilot/references/troubleshooting.md +570 -0
  308. package/bin/skills/slime/SKILL.md +464 -0
  309. package/bin/skills/slime/references/api-reference.md +392 -0
  310. package/bin/skills/slime/references/troubleshooting.md +386 -0
  311. package/bin/skills/speculative-decoding/SKILL.md +467 -0
  312. package/bin/skills/speculative-decoding/references/lookahead.md +309 -0
  313. package/bin/skills/speculative-decoding/references/medusa.md +350 -0
  314. package/bin/skills/stable-diffusion/SKILL.md +519 -0
  315. package/bin/skills/stable-diffusion/references/advanced-usage.md +716 -0
  316. package/bin/skills/stable-diffusion/references/troubleshooting.md +555 -0
  317. package/bin/skills/tensorboard/SKILL.md +629 -0
  318. package/bin/skills/tensorboard/references/integrations.md +638 -0
  319. package/bin/skills/tensorboard/references/profiling.md +545 -0
  320. package/bin/skills/tensorboard/references/visualization.md +620 -0
  321. package/bin/skills/tensorrt-llm/SKILL.md +187 -0
  322. package/bin/skills/tensorrt-llm/references/multi-gpu.md +298 -0
  323. package/bin/skills/tensorrt-llm/references/optimization.md +242 -0
  324. package/bin/skills/tensorrt-llm/references/serving.md +470 -0
  325. package/bin/skills/tinker/SKILL.md +362 -0
  326. package/bin/skills/tinker/references/api-reference.md +168 -0
  327. package/bin/skills/tinker/references/getting-started.md +157 -0
  328. package/bin/skills/tinker/references/loss-functions.md +163 -0
  329. package/bin/skills/tinker/references/models-and-lora.md +139 -0
  330. package/bin/skills/tinker/references/recipes.md +280 -0
  331. package/bin/skills/tinker/references/reinforcement-learning.md +212 -0
  332. package/bin/skills/tinker/references/rendering.md +243 -0
  333. package/bin/skills/tinker/references/supervised-learning.md +232 -0
  334. package/bin/skills/tinker-training-cost/SKILL.md +187 -0
  335. package/bin/skills/tinker-training-cost/scripts/calculate_cost.py +123 -0
  336. package/bin/skills/torchforge/SKILL.md +433 -0
  337. package/bin/skills/torchforge/references/api-reference.md +327 -0
  338. package/bin/skills/torchforge/references/troubleshooting.md +409 -0
  339. package/bin/skills/torchtitan/SKILL.md +358 -0
  340. package/bin/skills/torchtitan/references/checkpoint.md +181 -0
  341. package/bin/skills/torchtitan/references/custom-models.md +258 -0
  342. package/bin/skills/torchtitan/references/float8.md +133 -0
  343. package/bin/skills/torchtitan/references/fsdp.md +126 -0
  344. package/bin/skills/transformer-lens/SKILL.md +346 -0
  345. package/bin/skills/transformer-lens/references/README.md +54 -0
  346. package/bin/skills/transformer-lens/references/api.md +362 -0
  347. package/bin/skills/transformer-lens/references/tutorials.md +339 -0
  348. package/bin/skills/trl-fine-tuning/SKILL.md +455 -0
  349. package/bin/skills/trl-fine-tuning/references/dpo-variants.md +227 -0
  350. package/bin/skills/trl-fine-tuning/references/online-rl.md +82 -0
  351. package/bin/skills/trl-fine-tuning/references/reward-modeling.md +122 -0
  352. package/bin/skills/trl-fine-tuning/references/sft-training.md +168 -0
  353. package/bin/skills/unsloth/SKILL.md +80 -0
  354. package/bin/skills/unsloth/references/index.md +7 -0
  355. package/bin/skills/unsloth/references/llms-full.md +16799 -0
  356. package/bin/skills/unsloth/references/llms-txt.md +12044 -0
  357. package/bin/skills/unsloth/references/llms.md +82 -0
  358. package/bin/skills/verl/SKILL.md +391 -0
  359. package/bin/skills/verl/references/api-reference.md +301 -0
  360. package/bin/skills/verl/references/troubleshooting.md +391 -0
  361. package/bin/skills/vllm/SKILL.md +364 -0
  362. package/bin/skills/vllm/references/optimization.md +226 -0
  363. package/bin/skills/vllm/references/quantization.md +284 -0
  364. package/bin/skills/vllm/references/server-deployment.md +255 -0
  365. package/bin/skills/vllm/references/troubleshooting.md +447 -0
  366. package/bin/skills/weights-and-biases/SKILL.md +590 -0
  367. package/bin/skills/weights-and-biases/references/artifacts.md +584 -0
  368. package/bin/skills/weights-and-biases/references/integrations.md +700 -0
  369. package/bin/skills/weights-and-biases/references/sweeps.md +847 -0
  370. package/bin/skills/whisper/SKILL.md +317 -0
  371. package/bin/skills/whisper/references/languages.md +189 -0
  372. package/bin/synsc +0 -0
  373. package/package.json +10 -0
@@ -0,0 +1,589 @@
1
+ # Segment Anything Advanced Usage Guide
2
+
3
+ ## SAM 2 (Video Segmentation)
4
+
5
+ ### Overview
6
+
7
+ SAM 2 extends SAM to video segmentation with streaming memory architecture:
8
+
9
+ ```bash
10
+ pip install git+https://github.com/facebookresearch/segment-anything-2.git
11
+ ```
12
+
13
+ ### Video segmentation
14
+
15
+ ```python
16
+ from sam2.build_sam import build_sam2_video_predictor
17
+
18
+ predictor = build_sam2_video_predictor("sam2_hiera_l.yaml", "sam2_hiera_large.pt")
19
+
20
+ # Initialize with video
21
+ predictor.init_state(video_path="video.mp4")
22
+
23
+ # Add prompt on first frame
24
+ predictor.add_new_points(
25
+ frame_idx=0,
26
+ obj_id=1,
27
+ points=[[100, 200]],
28
+ labels=[1]
29
+ )
30
+
31
+ # Propagate through video
32
+ for frame_idx, masks in predictor.propagate_in_video():
33
+ # masks contains segmentation for all tracked objects
34
+ process_frame(frame_idx, masks)
35
+ ```
36
+
37
+ ### SAM 2 vs SAM comparison
38
+
39
+ | Feature | SAM | SAM 2 |
40
+ |---------|-----|-------|
41
+ | Input | Images only | Images + Videos |
42
+ | Architecture | ViT + Decoder | Hiera + Memory |
43
+ | Memory | Per-image | Streaming memory bank |
44
+ | Tracking | No | Yes, across frames |
45
+ | Models | ViT-B/L/H | Hiera-T/S/B+/L |
46
+
47
+ ## Grounded SAM (Text-Prompted Segmentation)
48
+
49
+ ### Setup
50
+
51
+ ```bash
52
+ pip install groundingdino-py
53
+ pip install git+https://github.com/facebookresearch/segment-anything.git
54
+ ```
55
+
56
+ ### Text-to-mask pipeline
57
+
58
+ ```python
59
+ from groundingdino.util.inference import load_model, predict
60
+ from segment_anything import sam_model_registry, SamPredictor
61
+ import cv2
62
+
63
+ # Load Grounding DINO
64
+ grounding_model = load_model("groundingdino_swint_ogc.pth", "GroundingDINO_SwinT_OGC.py")
65
+
66
+ # Load SAM
67
+ sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")
68
+ predictor = SamPredictor(sam)
69
+
70
+ def text_to_mask(image, text_prompt, box_threshold=0.3, text_threshold=0.25):
71
+ """Generate masks from text description."""
72
+ # Get bounding boxes from text
73
+ boxes, logits, phrases = predict(
74
+ model=grounding_model,
75
+ image=image,
76
+ caption=text_prompt,
77
+ box_threshold=box_threshold,
78
+ text_threshold=text_threshold
79
+ )
80
+
81
+ # Generate masks with SAM
82
+ predictor.set_image(image)
83
+
84
+ masks = []
85
+ for box in boxes:
86
+ # Convert normalized box to pixel coordinates
87
+ h, w = image.shape[:2]
88
+ box_pixels = box * np.array([w, h, w, h])
89
+
90
+ mask, score, _ = predictor.predict(
91
+ box=box_pixels,
92
+ multimask_output=False
93
+ )
94
+ masks.append(mask[0])
95
+
96
+ return masks, boxes, phrases
97
+
98
+ # Usage
99
+ image = cv2.imread("image.jpg")
100
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
101
+
102
+ masks, boxes, phrases = text_to_mask(image, "person . dog . car")
103
+ ```
104
+
105
+ ## Batched Processing
106
+
107
+ ### Efficient multi-image processing
108
+
109
+ ```python
110
+ import torch
111
+ from segment_anything import SamPredictor, sam_model_registry
112
+
113
+ class BatchedSAM:
114
+ def __init__(self, checkpoint, model_type="vit_h", device="cuda"):
115
+ self.sam = sam_model_registry[model_type](checkpoint=checkpoint)
116
+ self.sam.to(device)
117
+ self.predictor = SamPredictor(self.sam)
118
+ self.device = device
119
+
120
+ def process_batch(self, images, prompts):
121
+ """Process multiple images with corresponding prompts."""
122
+ results = []
123
+
124
+ for image, prompt in zip(images, prompts):
125
+ self.predictor.set_image(image)
126
+
127
+ if "point" in prompt:
128
+ masks, scores, _ = self.predictor.predict(
129
+ point_coords=prompt["point"],
130
+ point_labels=prompt["label"],
131
+ multimask_output=True
132
+ )
133
+ elif "box" in prompt:
134
+ masks, scores, _ = self.predictor.predict(
135
+ box=prompt["box"],
136
+ multimask_output=False
137
+ )
138
+
139
+ results.append({
140
+ "masks": masks,
141
+ "scores": scores,
142
+ "best_mask": masks[np.argmax(scores)]
143
+ })
144
+
145
+ return results
146
+
147
+ # Usage
148
+ batch_sam = BatchedSAM("sam_vit_h_4b8939.pth")
149
+
150
+ images = [cv2.imread(f"image_{i}.jpg") for i in range(10)]
151
+ prompts = [{"point": np.array([[100, 100]]), "label": np.array([1])} for _ in range(10)]
152
+
153
+ results = batch_sam.process_batch(images, prompts)
154
+ ```
155
+
156
+ ### Parallel automatic mask generation
157
+
158
+ ```python
159
+ from concurrent.futures import ThreadPoolExecutor
160
+ from segment_anything import SamAutomaticMaskGenerator
161
+
162
+ def generate_masks_parallel(images, num_workers=4):
163
+ """Generate masks for multiple images in parallel."""
164
+ # Note: Each worker needs its own model instance
165
+ def worker_init():
166
+ sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b_01ec64.pth")
167
+ return SamAutomaticMaskGenerator(sam)
168
+
169
+ generators = [worker_init() for _ in range(num_workers)]
170
+
171
+ def process_image(args):
172
+ idx, image = args
173
+ generator = generators[idx % num_workers]
174
+ return generator.generate(image)
175
+
176
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
177
+ results = list(executor.map(process_image, enumerate(images)))
178
+
179
+ return results
180
+ ```
181
+
182
+ ## Custom Integration
183
+
184
+ ### FastAPI service
185
+
186
+ ```python
187
+ from fastapi import FastAPI, File, UploadFile
188
+ from pydantic import BaseModel
189
+ import numpy as np
190
+ import cv2
191
+ import io
192
+
193
+ app = FastAPI()
194
+
195
+ # Load model once
196
+ sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")
197
+ sam.to("cuda")
198
+ predictor = SamPredictor(sam)
199
+
200
+ class PointPrompt(BaseModel):
201
+ x: int
202
+ y: int
203
+ label: int = 1
204
+
205
+ @app.post("/segment/point")
206
+ async def segment_with_point(
207
+ file: UploadFile = File(...),
208
+ points: list[PointPrompt] = []
209
+ ):
210
+ # Read image
211
+ contents = await file.read()
212
+ nparr = np.frombuffer(contents, np.uint8)
213
+ image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
214
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
215
+
216
+ # Set image
217
+ predictor.set_image(image)
218
+
219
+ # Prepare prompts
220
+ point_coords = np.array([[p.x, p.y] for p in points])
221
+ point_labels = np.array([p.label for p in points])
222
+
223
+ # Generate masks
224
+ masks, scores, _ = predictor.predict(
225
+ point_coords=point_coords,
226
+ point_labels=point_labels,
227
+ multimask_output=True
228
+ )
229
+
230
+ best_idx = np.argmax(scores)
231
+
232
+ return {
233
+ "mask": masks[best_idx].tolist(),
234
+ "score": float(scores[best_idx]),
235
+ "all_scores": scores.tolist()
236
+ }
237
+
238
+ @app.post("/segment/auto")
239
+ async def segment_automatic(file: UploadFile = File(...)):
240
+ contents = await file.read()
241
+ nparr = np.frombuffer(contents, np.uint8)
242
+ image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
243
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
244
+
245
+ mask_generator = SamAutomaticMaskGenerator(sam)
246
+ masks = mask_generator.generate(image)
247
+
248
+ return {
249
+ "num_masks": len(masks),
250
+ "masks": [
251
+ {
252
+ "bbox": m["bbox"],
253
+ "area": m["area"],
254
+ "predicted_iou": m["predicted_iou"],
255
+ "stability_score": m["stability_score"]
256
+ }
257
+ for m in masks
258
+ ]
259
+ }
260
+ ```
261
+
262
+ ### Gradio interface
263
+
264
+ ```python
265
+ import gradio as gr
266
+ import numpy as np
267
+
268
+ # Load model
269
+ sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")
270
+ predictor = SamPredictor(sam)
271
+
272
+ def segment_image(image, evt: gr.SelectData):
273
+ """Segment object at clicked point."""
274
+ predictor.set_image(image)
275
+
276
+ point = np.array([[evt.index[0], evt.index[1]]])
277
+ label = np.array([1])
278
+
279
+ masks, scores, _ = predictor.predict(
280
+ point_coords=point,
281
+ point_labels=label,
282
+ multimask_output=True
283
+ )
284
+
285
+ best_mask = masks[np.argmax(scores)]
286
+
287
+ # Overlay mask on image
288
+ overlay = image.copy()
289
+ overlay[best_mask] = overlay[best_mask] * 0.5 + np.array([255, 0, 0]) * 0.5
290
+
291
+ return overlay
292
+
293
+ with gr.Blocks() as demo:
294
+ gr.Markdown("# SAM Interactive Segmentation")
295
+ gr.Markdown("Click on an object to segment it")
296
+
297
+ with gr.Row():
298
+ input_image = gr.Image(label="Input Image", interactive=True)
299
+ output_image = gr.Image(label="Segmented Image")
300
+
301
+ input_image.select(segment_image, inputs=[input_image], outputs=[output_image])
302
+
303
+ demo.launch()
304
+ ```
305
+
306
+ ## Fine-Tuning SAM
307
+
308
+ ### LoRA fine-tuning (experimental)
309
+
310
+ ```python
311
+ from peft import LoraConfig, get_peft_model
312
+ from transformers import SamModel
313
+
314
+ # Load model
315
+ model = SamModel.from_pretrained("facebook/sam-vit-base")
316
+
317
+ # Configure LoRA
318
+ lora_config = LoraConfig(
319
+ r=16,
320
+ lora_alpha=32,
321
+ target_modules=["qkv"], # Attention layers
322
+ lora_dropout=0.1,
323
+ bias="none",
324
+ )
325
+
326
+ # Apply LoRA
327
+ model = get_peft_model(model, lora_config)
328
+
329
+ # Training loop (simplified)
330
+ optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)
331
+
332
+ for batch in dataloader:
333
+ outputs = model(
334
+ pixel_values=batch["pixel_values"],
335
+ input_points=batch["input_points"],
336
+ input_labels=batch["input_labels"]
337
+ )
338
+
339
+ # Custom loss (e.g., IoU loss with ground truth)
340
+ loss = compute_loss(outputs.pred_masks, batch["gt_masks"])
341
+ loss.backward()
342
+ optimizer.step()
343
+ optimizer.zero_grad()
344
+ ```
345
+
346
+ ### MedSAM (Medical imaging)
347
+
348
+ ```python
349
+ # MedSAM is a fine-tuned SAM for medical images
350
+ # https://github.com/bowang-lab/MedSAM
351
+
352
+ from segment_anything import sam_model_registry, SamPredictor
353
+ import torch
354
+
355
+ # Load MedSAM checkpoint
356
+ medsam = sam_model_registry["vit_b"](checkpoint="medsam_vit_b.pth")
357
+ medsam.to("cuda")
358
+
359
+ predictor = SamPredictor(medsam)
360
+
361
+ # Process medical image
362
+ # Convert grayscale to RGB if needed
363
+ medical_image = cv2.imread("ct_scan.png", cv2.IMREAD_GRAYSCALE)
364
+ rgb_image = np.stack([medical_image] * 3, axis=-1)
365
+
366
+ predictor.set_image(rgb_image)
367
+
368
+ # Segment with box prompt (common for medical imaging)
369
+ masks, scores, _ = predictor.predict(
370
+ box=np.array([x1, y1, x2, y2]),
371
+ multimask_output=False
372
+ )
373
+ ```
374
+
375
+ ## Advanced Mask Processing
376
+
377
+ ### Mask refinement
378
+
379
+ ```python
380
+ import cv2
381
+ from scipy import ndimage
382
+
383
+ def refine_mask(mask, kernel_size=5, iterations=2):
384
+ """Refine mask with morphological operations."""
385
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernel_size, kernel_size))
386
+
387
+ # Close small holes
388
+ closed = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, kernel, iterations=iterations)
389
+
390
+ # Remove small noise
391
+ opened = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel, iterations=iterations)
392
+
393
+ return opened.astype(bool)
394
+
395
+ def fill_holes(mask):
396
+ """Fill holes in mask."""
397
+ filled = ndimage.binary_fill_holes(mask)
398
+ return filled
399
+
400
+ def remove_small_regions(mask, min_area=100):
401
+ """Remove small disconnected regions."""
402
+ labeled, num_features = ndimage.label(mask)
403
+ sizes = ndimage.sum(mask, labeled, range(1, num_features + 1))
404
+
405
+ # Keep only regions larger than min_area
406
+ mask_clean = np.zeros_like(mask)
407
+ for i, size in enumerate(sizes, 1):
408
+ if size >= min_area:
409
+ mask_clean[labeled == i] = True
410
+
411
+ return mask_clean
412
+ ```
413
+
414
+ ### Mask to polygon conversion
415
+
416
+ ```python
417
+ import cv2
418
+
419
+ def mask_to_polygons(mask, epsilon_factor=0.01):
420
+ """Convert binary mask to polygon coordinates."""
421
+ contours, _ = cv2.findContours(
422
+ mask.astype(np.uint8),
423
+ cv2.RETR_EXTERNAL,
424
+ cv2.CHAIN_APPROX_SIMPLE
425
+ )
426
+
427
+ polygons = []
428
+ for contour in contours:
429
+ epsilon = epsilon_factor * cv2.arcLength(contour, True)
430
+ approx = cv2.approxPolyDP(contour, epsilon, True)
431
+ polygon = approx.squeeze().tolist()
432
+ if len(polygon) >= 3: # Valid polygon
433
+ polygons.append(polygon)
434
+
435
+ return polygons
436
+
437
+ def polygons_to_mask(polygons, height, width):
438
+ """Convert polygons back to binary mask."""
439
+ mask = np.zeros((height, width), dtype=np.uint8)
440
+ for polygon in polygons:
441
+ pts = np.array(polygon, dtype=np.int32)
442
+ cv2.fillPoly(mask, [pts], 1)
443
+ return mask.astype(bool)
444
+ ```
445
+
446
+ ### Multi-scale segmentation
447
+
448
+ ```python
449
+ def multiscale_segment(image, predictor, point, scales=[0.5, 1.0, 2.0]):
450
+ """Generate masks at multiple scales and combine."""
451
+ h, w = image.shape[:2]
452
+ masks_all = []
453
+
454
+ for scale in scales:
455
+ # Resize image
456
+ new_h, new_w = int(h * scale), int(w * scale)
457
+ scaled_image = cv2.resize(image, (new_w, new_h))
458
+ scaled_point = (point * scale).astype(int)
459
+
460
+ # Segment
461
+ predictor.set_image(scaled_image)
462
+ masks, scores, _ = predictor.predict(
463
+ point_coords=scaled_point.reshape(1, 2),
464
+ point_labels=np.array([1]),
465
+ multimask_output=True
466
+ )
467
+
468
+ # Resize mask back
469
+ best_mask = masks[np.argmax(scores)]
470
+ original_mask = cv2.resize(best_mask.astype(np.uint8), (w, h)) > 0.5
471
+
472
+ masks_all.append(original_mask)
473
+
474
+ # Combine masks (majority voting)
475
+ combined = np.stack(masks_all, axis=0)
476
+ final_mask = np.sum(combined, axis=0) >= len(scales) // 2 + 1
477
+
478
+ return final_mask
479
+ ```
480
+
481
+ ## Performance Optimization
482
+
483
+ ### TensorRT acceleration
484
+
485
+ ```python
486
+ import tensorrt as trt
487
+ import pycuda.driver as cuda
488
+ import pycuda.autoinit
489
+
490
+ def export_to_tensorrt(onnx_path, engine_path, fp16=True):
491
+ """Convert ONNX model to TensorRT engine."""
492
+ logger = trt.Logger(trt.Logger.WARNING)
493
+ builder = trt.Builder(logger)
494
+ network = builder.create_network(1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
495
+ parser = trt.OnnxParser(network, logger)
496
+
497
+ with open(onnx_path, 'rb') as f:
498
+ if not parser.parse(f.read()):
499
+ for error in range(parser.num_errors):
500
+ print(parser.get_error(error))
501
+ return None
502
+
503
+ config = builder.create_builder_config()
504
+ config.max_workspace_size = 1 << 30 # 1GB
505
+
506
+ if fp16:
507
+ config.set_flag(trt.BuilderFlag.FP16)
508
+
509
+ engine = builder.build_engine(network, config)
510
+
511
+ with open(engine_path, 'wb') as f:
512
+ f.write(engine.serialize())
513
+
514
+ return engine
515
+ ```
516
+
517
+ ### Memory-efficient inference
518
+
519
+ ```python
520
+ class MemoryEfficientSAM:
521
+ def __init__(self, checkpoint, model_type="vit_b"):
522
+ self.sam = sam_model_registry[model_type](checkpoint=checkpoint)
523
+ self.sam.eval()
524
+ self.predictor = None
525
+
526
+ def __enter__(self):
527
+ self.sam.to("cuda")
528
+ self.predictor = SamPredictor(self.sam)
529
+ return self
530
+
531
+ def __exit__(self, *args):
532
+ self.sam.to("cpu")
533
+ torch.cuda.empty_cache()
534
+
535
+ def segment(self, image, points, labels):
536
+ self.predictor.set_image(image)
537
+ masks, scores, _ = self.predictor.predict(
538
+ point_coords=points,
539
+ point_labels=labels,
540
+ multimask_output=True
541
+ )
542
+ return masks, scores
543
+
544
+ # Usage with context manager (auto-cleanup)
545
+ with MemoryEfficientSAM("sam_vit_b_01ec64.pth") as sam:
546
+ masks, scores = sam.segment(image, points, labels)
547
+ # CUDA memory freed automatically
548
+ ```
549
+
550
+ ## Dataset Generation
551
+
552
+ ### Create segmentation dataset
553
+
554
+ ```python
555
+ import json
556
+
557
+ def generate_dataset(images_dir, output_dir, mask_generator):
558
+ """Generate segmentation dataset from images."""
559
+ annotations = []
560
+
561
+ for img_path in Path(images_dir).glob("*.jpg"):
562
+ image = cv2.imread(str(img_path))
563
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
564
+
565
+ # Generate masks
566
+ masks = mask_generator.generate(image)
567
+
568
+ # Filter high-quality masks
569
+ good_masks = [m for m in masks if m["predicted_iou"] > 0.9]
570
+
571
+ # Save annotations
572
+ for i, mask_data in enumerate(good_masks):
573
+ annotation = {
574
+ "image_id": img_path.stem,
575
+ "mask_id": i,
576
+ "bbox": mask_data["bbox"],
577
+ "area": mask_data["area"],
578
+ "segmentation": mask_to_rle(mask_data["segmentation"]),
579
+ "predicted_iou": mask_data["predicted_iou"],
580
+ "stability_score": mask_data["stability_score"]
581
+ }
582
+ annotations.append(annotation)
583
+
584
+ # Save dataset
585
+ with open(output_dir / "annotations.json", "w") as f:
586
+ json.dump(annotations, f)
587
+
588
+ return annotations
589
+ ```