@huggingface/tasks 0.13.1 → 0.13.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commonjs/dataset-libraries.js +65 -0
- package/dist/{src → commonjs}/default-widget-inputs.d.ts +2 -2
- package/dist/commonjs/default-widget-inputs.d.ts.map +1 -0
- package/dist/commonjs/default-widget-inputs.js +698 -0
- package/dist/commonjs/gguf.js +43 -0
- package/dist/commonjs/hardware.js +461 -0
- package/dist/{src → commonjs}/index.d.ts +19 -19
- package/dist/commonjs/index.d.ts.map +1 -0
- package/dist/commonjs/index.js +57 -0
- package/dist/{src → commonjs}/library-to-tasks.d.ts +2 -2
- package/dist/commonjs/library-to-tasks.d.ts.map +1 -0
- package/dist/commonjs/library-to-tasks.js +76 -0
- package/dist/{src → commonjs}/local-apps.d.ts +2 -2
- package/dist/commonjs/local-apps.d.ts.map +1 -0
- package/dist/commonjs/local-apps.js +334 -0
- package/dist/{src → commonjs}/model-data.d.ts +3 -3
- package/dist/commonjs/model-data.d.ts.map +1 -0
- package/dist/commonjs/model-data.js +2 -0
- package/dist/commonjs/model-libraries-downloads.js +18 -0
- package/dist/{src → commonjs}/model-libraries-snippets.d.ts +1 -1
- package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -0
- package/dist/commonjs/model-libraries-snippets.js +1093 -0
- package/dist/{src → commonjs}/model-libraries.d.ts +3 -3
- package/dist/{src → commonjs}/model-libraries.d.ts.map +1 -1
- package/dist/commonjs/model-libraries.js +793 -0
- package/dist/commonjs/package.json +3 -0
- package/dist/{src → commonjs}/pipelines.d.ts +1 -1
- package/dist/{src → commonjs}/pipelines.d.ts.map +1 -1
- package/dist/commonjs/pipelines.js +645 -0
- package/dist/{src → commonjs}/snippets/common.d.ts +1 -1
- package/dist/commonjs/snippets/common.d.ts.map +1 -0
- package/dist/commonjs/snippets/common.js +23 -0
- package/dist/commonjs/snippets/curl.js +100 -0
- package/dist/commonjs/snippets/curl.spec.js +89 -0
- package/dist/commonjs/snippets/index.d.ts +7 -0
- package/dist/commonjs/snippets/index.d.ts.map +1 -0
- package/dist/commonjs/snippets/index.js +38 -0
- package/dist/commonjs/snippets/inputs.d.ts +4 -0
- package/dist/commonjs/snippets/inputs.d.ts.map +1 -0
- package/dist/commonjs/snippets/inputs.js +127 -0
- package/dist/commonjs/snippets/js.js +278 -0
- package/dist/commonjs/snippets/js.spec.js +141 -0
- package/dist/{src → commonjs}/snippets/python.d.ts +1 -1
- package/dist/{src → commonjs}/snippets/python.d.ts.map +1 -1
- package/dist/commonjs/snippets/python.js +293 -0
- package/dist/commonjs/snippets/python.spec.js +135 -0
- package/dist/{src → commonjs}/snippets/types.d.ts +1 -1
- package/dist/commonjs/snippets/types.d.ts.map +1 -0
- package/dist/commonjs/snippets/types.js +2 -0
- package/dist/commonjs/tasks/audio-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/audio-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/audio-classification/data.js +79 -0
- package/dist/commonjs/tasks/audio-classification/inference.js +2 -0
- package/dist/commonjs/tasks/audio-to-audio/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/audio-to-audio/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/audio-to-audio/data.js +66 -0
- package/dist/commonjs/tasks/automatic-speech-recognition/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/automatic-speech-recognition/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/automatic-speech-recognition/data.js +80 -0
- package/dist/commonjs/tasks/automatic-speech-recognition/inference.js +7 -0
- package/dist/commonjs/tasks/chat-completion/inference.js +7 -0
- package/dist/commonjs/tasks/depth-estimation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/depth-estimation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/depth-estimation/data.js +69 -0
- package/dist/commonjs/tasks/depth-estimation/inference.js +7 -0
- package/dist/commonjs/tasks/document-question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/document-question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/document-question-answering/data.js +80 -0
- package/dist/commonjs/tasks/document-question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/feature-extraction/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/feature-extraction/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/feature-extraction/data.js +55 -0
- package/dist/commonjs/tasks/feature-extraction/inference.js +7 -0
- package/dist/commonjs/tasks/fill-mask/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/fill-mask/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/fill-mask/data.js +75 -0
- package/dist/commonjs/tasks/fill-mask/inference.js +2 -0
- package/dist/commonjs/tasks/image-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-classification/data.js +86 -0
- package/dist/commonjs/tasks/image-classification/inference.js +2 -0
- package/dist/commonjs/tasks/image-feature-extraction/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-feature-extraction/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-feature-extraction/data.js +57 -0
- package/dist/commonjs/tasks/image-segmentation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-segmentation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-segmentation/data.js +95 -0
- package/dist/commonjs/tasks/image-segmentation/inference.js +2 -0
- package/dist/commonjs/tasks/image-text-to-text/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-text-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-text-to-text/data.js +99 -0
- package/dist/commonjs/tasks/image-to-3d/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-to-3d/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-3d/data.js +74 -0
- package/dist/commonjs/tasks/image-to-image/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-to-image/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-image/data.js +95 -0
- package/dist/commonjs/tasks/image-to-image/inference.js +7 -0
- package/dist/commonjs/tasks/image-to-text/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-text/data.js +80 -0
- package/dist/commonjs/tasks/image-to-text/inference.js +7 -0
- package/dist/{src → commonjs}/tasks/index.d.ts +29 -29
- package/dist/commonjs/tasks/index.d.ts.map +1 -0
- package/dist/commonjs/tasks/index.js +183 -0
- package/dist/commonjs/tasks/keypoint-detection/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/keypoint-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/keypoint-detection/data.js +49 -0
- package/dist/commonjs/tasks/mask-generation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/mask-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/mask-generation/data.js +52 -0
- package/dist/commonjs/tasks/object-detection/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/object-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/object-detection/data.js +82 -0
- package/dist/commonjs/tasks/object-detection/inference.js +2 -0
- package/dist/commonjs/tasks/placeholder/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/placeholder/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/placeholder/data.js +20 -0
- package/dist/commonjs/tasks/question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/question-answering/data.js +71 -0
- package/dist/commonjs/tasks/question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/reinforcement-learning/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/reinforcement-learning/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/reinforcement-learning/data.js +69 -0
- package/dist/commonjs/tasks/sentence-similarity/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/sentence-similarity/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/sentence-similarity/data.js +95 -0
- package/dist/commonjs/tasks/sentence-similarity/inference.js +7 -0
- package/dist/commonjs/tasks/summarization/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/summarization/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/summarization/data.js +69 -0
- package/dist/commonjs/tasks/summarization/inference.js +7 -0
- package/dist/commonjs/tasks/table-question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/table-question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/table-question-answering/data.js +54 -0
- package/dist/commonjs/tasks/table-question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/tabular-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/tabular-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/tabular-classification/data.js +67 -0
- package/dist/commonjs/tasks/tabular-regression/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/tabular-regression/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/tabular-regression/data.js +55 -0
- package/dist/commonjs/tasks/text-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-classification/data.js +100 -0
- package/dist/commonjs/tasks/text-classification/inference.js +2 -0
- package/dist/commonjs/tasks/text-generation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-generation/data.js +108 -0
- package/dist/commonjs/tasks/text-generation/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-3d/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-3d/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-3d/data.js +55 -0
- package/dist/commonjs/tasks/text-to-audio/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-image/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-image/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-image/data.js +95 -0
- package/dist/commonjs/tasks/text-to-image/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-speech/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-speech/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-speech/data.js +76 -0
- package/dist/commonjs/tasks/text-to-speech/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-video/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-video/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-video/data.js +95 -0
- package/dist/commonjs/tasks/text2text-generation/inference.js +7 -0
- package/dist/commonjs/tasks/token-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/token-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/token-classification/data.js +87 -0
- package/dist/commonjs/tasks/token-classification/inference.js +2 -0
- package/dist/commonjs/tasks/translation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/translation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/translation/data.js +65 -0
- package/dist/commonjs/tasks/translation/inference.js +7 -0
- package/dist/commonjs/tasks/unconditional-image-generation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/unconditional-image-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/unconditional-image-generation/data.js +65 -0
- package/dist/commonjs/tasks/video-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/video-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/video-classification/data.js +82 -0
- package/dist/commonjs/tasks/video-classification/inference.js +2 -0
- package/dist/commonjs/tasks/video-text-to-text/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/video-text-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/video-text-to-text/data.js +63 -0
- package/dist/commonjs/tasks/visual-question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/visual-question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/visual-question-answering/data.js +93 -0
- package/dist/commonjs/tasks/visual-question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/zero-shot-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/zero-shot-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-classification/data.js +66 -0
- package/dist/commonjs/tasks/zero-shot-classification/inference.js +2 -0
- package/dist/commonjs/tasks/zero-shot-image-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-image-classification/data.js +81 -0
- package/dist/commonjs/tasks/zero-shot-image-classification/inference.js +2 -0
- package/dist/commonjs/tasks/zero-shot-object-detection/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/zero-shot-object-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-object-detection/data.js +62 -0
- package/dist/commonjs/tasks/zero-shot-object-detection/inference.js +2 -0
- package/dist/commonjs/tokenizer-data.js +13 -0
- package/dist/{src → commonjs}/widget-example.d.ts +1 -1
- package/dist/commonjs/widget-example.d.ts.map +1 -0
- package/dist/commonjs/widget-example.js +5 -0
- package/dist/esm/dataset-libraries.d.ts +87 -0
- package/dist/esm/dataset-libraries.d.ts.map +1 -0
- package/dist/esm/dataset-libraries.js +62 -0
- package/dist/esm/default-widget-inputs.d.ts +6 -0
- package/dist/esm/default-widget-inputs.d.ts.map +1 -0
- package/dist/esm/default-widget-inputs.js +695 -0
- package/dist/esm/gguf.d.ts +35 -0
- package/dist/esm/gguf.d.ts.map +1 -0
- package/dist/esm/gguf.js +39 -0
- package/dist/esm/hardware.d.ts +478 -0
- package/dist/esm/hardware.d.ts.map +1 -0
- package/dist/esm/hardware.js +458 -0
- package/dist/esm/index.d.ts +21 -0
- package/dist/esm/index.d.ts.map +1 -0
- package/dist/esm/index.js +12 -0
- package/dist/esm/library-to-tasks.d.ts +11 -0
- package/dist/esm/library-to-tasks.d.ts.map +1 -0
- package/dist/esm/library-to-tasks.js +73 -0
- package/dist/esm/local-apps.d.ts +195 -0
- package/dist/esm/local-apps.d.ts.map +1 -0
- package/dist/esm/local-apps.js +331 -0
- package/dist/esm/model-data.d.ts +146 -0
- package/dist/esm/model-data.d.ts.map +1 -0
- package/dist/esm/model-data.js +1 -0
- package/dist/esm/model-libraries-downloads.d.ts +18 -0
- package/dist/esm/model-libraries-downloads.d.ts.map +1 -0
- package/dist/esm/model-libraries-downloads.js +17 -0
- package/dist/esm/model-libraries-snippets.d.ts +72 -0
- package/dist/esm/model-libraries-snippets.d.ts.map +1 -0
- package/dist/esm/model-libraries-snippets.js +1019 -0
- package/dist/esm/model-libraries.d.ts +804 -0
- package/dist/esm/model-libraries.d.ts.map +1 -0
- package/dist/esm/model-libraries.js +767 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/pipelines.d.ts +425 -0
- package/dist/esm/pipelines.d.ts.map +1 -0
- package/dist/esm/pipelines.js +642 -0
- package/dist/esm/snippets/common.d.ts +14 -0
- package/dist/esm/snippets/common.d.ts.map +1 -0
- package/dist/esm/snippets/common.js +19 -0
- package/dist/esm/snippets/curl.d.ts +17 -0
- package/dist/esm/snippets/curl.d.ts.map +1 -0
- package/dist/esm/snippets/curl.js +91 -0
- package/dist/esm/snippets/curl.spec.d.ts +2 -0
- package/dist/esm/snippets/curl.spec.d.ts.map +1 -0
- package/dist/esm/snippets/curl.spec.js +87 -0
- package/dist/esm/snippets/index.d.ts +7 -0
- package/dist/esm/snippets/index.d.ts.map +1 -0
- package/dist/esm/snippets/index.js +6 -0
- package/dist/esm/snippets/inputs.d.ts +4 -0
- package/dist/esm/snippets/inputs.d.ts.map +1 -0
- package/dist/esm/snippets/inputs.js +124 -0
- package/dist/esm/snippets/js.d.ts +19 -0
- package/dist/esm/snippets/js.d.ts.map +1 -0
- package/dist/esm/snippets/js.js +267 -0
- package/dist/esm/snippets/js.spec.d.ts +2 -0
- package/dist/esm/snippets/js.spec.d.ts.map +1 -0
- package/dist/esm/snippets/js.spec.js +139 -0
- package/dist/esm/snippets/python.d.ts +22 -0
- package/dist/esm/snippets/python.d.ts.map +1 -0
- package/dist/esm/snippets/python.js +279 -0
- package/dist/esm/snippets/python.spec.d.ts +2 -0
- package/dist/esm/snippets/python.spec.d.ts.map +1 -0
- package/dist/esm/snippets/python.spec.js +133 -0
- package/dist/esm/snippets/types.d.ts +12 -0
- package/dist/esm/snippets/types.d.ts.map +1 -0
- package/dist/esm/snippets/types.js +1 -0
- package/dist/esm/tasks/audio-classification/data.d.ts +4 -0
- package/dist/esm/tasks/audio-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/audio-classification/data.js +77 -0
- package/dist/esm/tasks/audio-classification/inference.d.ts +53 -0
- package/dist/esm/tasks/audio-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/audio-classification/inference.js +1 -0
- package/dist/esm/tasks/audio-to-audio/data.d.ts +4 -0
- package/dist/esm/tasks/audio-to-audio/data.d.ts.map +1 -0
- package/dist/esm/tasks/audio-to-audio/data.js +64 -0
- package/dist/esm/tasks/automatic-speech-recognition/data.d.ts +4 -0
- package/dist/esm/tasks/automatic-speech-recognition/data.d.ts.map +1 -0
- package/dist/esm/tasks/automatic-speech-recognition/data.js +78 -0
- package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts +155 -0
- package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts.map +1 -0
- package/dist/esm/tasks/automatic-speech-recognition/inference.js +6 -0
- package/dist/esm/tasks/chat-completion/inference.d.ts +291 -0
- package/dist/esm/tasks/chat-completion/inference.d.ts.map +1 -0
- package/dist/esm/tasks/chat-completion/inference.js +6 -0
- package/dist/esm/tasks/depth-estimation/data.d.ts +4 -0
- package/dist/esm/tasks/depth-estimation/data.d.ts.map +1 -0
- package/dist/esm/tasks/depth-estimation/data.js +67 -0
- package/dist/esm/tasks/depth-estimation/inference.d.ts +36 -0
- package/dist/esm/tasks/depth-estimation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/depth-estimation/inference.js +6 -0
- package/dist/esm/tasks/document-question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/document-question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/document-question-answering/data.js +78 -0
- package/dist/esm/tasks/document-question-answering/inference.d.ts +111 -0
- package/dist/esm/tasks/document-question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/document-question-answering/inference.js +1 -0
- package/dist/esm/tasks/feature-extraction/data.d.ts +4 -0
- package/dist/esm/tasks/feature-extraction/data.d.ts.map +1 -0
- package/dist/esm/tasks/feature-extraction/data.js +53 -0
- package/dist/esm/tasks/feature-extraction/inference.d.ts +38 -0
- package/dist/esm/tasks/feature-extraction/inference.d.ts.map +1 -0
- package/dist/esm/tasks/feature-extraction/inference.js +6 -0
- package/dist/esm/tasks/fill-mask/data.d.ts +4 -0
- package/dist/esm/tasks/fill-mask/data.d.ts.map +1 -0
- package/dist/esm/tasks/fill-mask/data.js +73 -0
- package/dist/esm/tasks/fill-mask/inference.d.ts +63 -0
- package/dist/esm/tasks/fill-mask/inference.d.ts.map +1 -0
- package/dist/esm/tasks/fill-mask/inference.js +1 -0
- package/dist/esm/tasks/image-classification/data.d.ts +4 -0
- package/dist/esm/tasks/image-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-classification/data.js +84 -0
- package/dist/esm/tasks/image-classification/inference.d.ts +53 -0
- package/dist/esm/tasks/image-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-classification/inference.js +1 -0
- package/dist/esm/tasks/image-feature-extraction/data.d.ts +4 -0
- package/dist/esm/tasks/image-feature-extraction/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-feature-extraction/data.js +55 -0
- package/dist/esm/tasks/image-segmentation/data.d.ts +4 -0
- package/dist/esm/tasks/image-segmentation/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-segmentation/data.js +93 -0
- package/dist/esm/tasks/image-segmentation/inference.d.ts +70 -0
- package/dist/esm/tasks/image-segmentation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-segmentation/inference.js +1 -0
- package/dist/esm/tasks/image-text-to-text/data.d.ts +4 -0
- package/dist/esm/tasks/image-text-to-text/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-text-to-text/data.js +97 -0
- package/dist/esm/tasks/image-to-3d/data.d.ts +4 -0
- package/dist/esm/tasks/image-to-3d/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-3d/data.js +72 -0
- package/dist/esm/tasks/image-to-image/data.d.ts +4 -0
- package/dist/esm/tasks/image-to-image/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-image/data.js +93 -0
- package/dist/esm/tasks/image-to-image/inference.d.ts +65 -0
- package/dist/esm/tasks/image-to-image/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-image/inference.js +6 -0
- package/dist/esm/tasks/image-to-text/data.d.ts +4 -0
- package/dist/esm/tasks/image-to-text/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-text/data.js +78 -0
- package/dist/esm/tasks/image-to-text/inference.d.ts +139 -0
- package/dist/esm/tasks/image-to-text/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-text/inference.js +6 -0
- package/dist/esm/tasks/index.d.ts +87 -0
- package/dist/esm/tasks/index.d.ts.map +1 -0
- package/dist/esm/tasks/index.js +177 -0
- package/dist/esm/tasks/keypoint-detection/data.d.ts +4 -0
- package/dist/esm/tasks/keypoint-detection/data.d.ts.map +1 -0
- package/dist/esm/tasks/keypoint-detection/data.js +47 -0
- package/dist/esm/tasks/mask-generation/data.d.ts +4 -0
- package/dist/esm/tasks/mask-generation/data.d.ts.map +1 -0
- package/dist/esm/tasks/mask-generation/data.js +50 -0
- package/dist/esm/tasks/object-detection/data.d.ts +4 -0
- package/dist/esm/tasks/object-detection/data.d.ts.map +1 -0
- package/dist/esm/tasks/object-detection/data.js +80 -0
- package/dist/esm/tasks/object-detection/inference.d.ts +76 -0
- package/dist/esm/tasks/object-detection/inference.d.ts.map +1 -0
- package/dist/esm/tasks/object-detection/inference.js +1 -0
- package/dist/esm/tasks/placeholder/data.d.ts +4 -0
- package/dist/esm/tasks/placeholder/data.d.ts.map +1 -0
- package/dist/esm/tasks/placeholder/data.js +18 -0
- package/dist/esm/tasks/question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/question-answering/data.js +69 -0
- package/dist/esm/tasks/question-answering/inference.d.ts +100 -0
- package/dist/esm/tasks/question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/question-answering/inference.js +1 -0
- package/dist/esm/tasks/reinforcement-learning/data.d.ts +4 -0
- package/dist/esm/tasks/reinforcement-learning/data.d.ts.map +1 -0
- package/dist/esm/tasks/reinforcement-learning/data.js +67 -0
- package/dist/esm/tasks/sentence-similarity/data.d.ts +4 -0
- package/dist/esm/tasks/sentence-similarity/data.d.ts.map +1 -0
- package/dist/esm/tasks/sentence-similarity/data.js +93 -0
- package/dist/esm/tasks/sentence-similarity/inference.d.ts +32 -0
- package/dist/esm/tasks/sentence-similarity/inference.d.ts.map +1 -0
- package/dist/esm/tasks/sentence-similarity/inference.js +6 -0
- package/dist/esm/tasks/summarization/data.d.ts +4 -0
- package/dist/esm/tasks/summarization/data.d.ts.map +1 -0
- package/dist/esm/tasks/summarization/data.js +67 -0
- package/dist/esm/tasks/summarization/inference.d.ts +56 -0
- package/dist/esm/tasks/summarization/inference.d.ts.map +1 -0
- package/dist/esm/tasks/summarization/inference.js +6 -0
- package/dist/esm/tasks/table-question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/table-question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/table-question-answering/data.js +52 -0
- package/dist/esm/tasks/table-question-answering/inference.d.ts +62 -0
- package/dist/esm/tasks/table-question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/table-question-answering/inference.js +1 -0
- package/dist/esm/tasks/tabular-classification/data.d.ts +4 -0
- package/dist/esm/tasks/tabular-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/tabular-classification/data.js +65 -0
- package/dist/esm/tasks/tabular-regression/data.d.ts +4 -0
- package/dist/esm/tasks/tabular-regression/data.d.ts.map +1 -0
- package/dist/esm/tasks/tabular-regression/data.js +53 -0
- package/dist/esm/tasks/text-classification/data.d.ts +4 -0
- package/dist/esm/tasks/text-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-classification/data.js +98 -0
- package/dist/esm/tasks/text-classification/inference.d.ts +52 -0
- package/dist/esm/tasks/text-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-classification/inference.js +1 -0
- package/dist/esm/tasks/text-generation/data.d.ts +4 -0
- package/dist/esm/tasks/text-generation/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-generation/data.js +106 -0
- package/dist/esm/tasks/text-generation/inference.d.ts +188 -0
- package/dist/esm/tasks/text-generation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-generation/inference.js +6 -0
- package/dist/esm/tasks/text-to-3d/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-3d/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-3d/data.js +53 -0
- package/dist/esm/tasks/text-to-audio/inference.d.ts +139 -0
- package/dist/esm/tasks/text-to-audio/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-audio/inference.js +6 -0
- package/dist/esm/tasks/text-to-image/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-image/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-image/data.js +93 -0
- package/dist/esm/tasks/text-to-image/inference.d.ts +72 -0
- package/dist/esm/tasks/text-to-image/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-image/inference.js +6 -0
- package/dist/esm/tasks/text-to-speech/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-speech/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-speech/data.js +74 -0
- package/dist/esm/tasks/text-to-speech/inference.d.ts +141 -0
- package/dist/esm/tasks/text-to-speech/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-speech/inference.js +6 -0
- package/dist/esm/tasks/text-to-video/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-video/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-video/data.js +93 -0
- package/dist/esm/tasks/text2text-generation/inference.d.ts +54 -0
- package/dist/esm/tasks/text2text-generation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text2text-generation/inference.js +6 -0
- package/dist/esm/tasks/token-classification/data.d.ts +4 -0
- package/dist/esm/tasks/token-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/token-classification/data.js +85 -0
- package/dist/esm/tasks/token-classification/inference.d.ts +86 -0
- package/dist/esm/tasks/token-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/token-classification/inference.js +1 -0
- package/dist/esm/tasks/translation/data.d.ts +4 -0
- package/dist/esm/tasks/translation/data.d.ts.map +1 -0
- package/dist/esm/tasks/translation/data.js +63 -0
- package/dist/esm/tasks/translation/inference.d.ts +66 -0
- package/dist/esm/tasks/translation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/translation/inference.js +6 -0
- package/dist/esm/tasks/unconditional-image-generation/data.d.ts +4 -0
- package/dist/esm/tasks/unconditional-image-generation/data.d.ts.map +1 -0
- package/dist/esm/tasks/unconditional-image-generation/data.js +63 -0
- package/dist/esm/tasks/video-classification/data.d.ts +4 -0
- package/dist/esm/tasks/video-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/video-classification/data.js +80 -0
- package/dist/esm/tasks/video-classification/inference.d.ts +60 -0
- package/dist/esm/tasks/video-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/video-classification/inference.js +1 -0
- package/dist/esm/tasks/video-text-to-text/data.d.ts +4 -0
- package/dist/esm/tasks/video-text-to-text/data.d.ts.map +1 -0
- package/dist/esm/tasks/video-text-to-text/data.js +61 -0
- package/dist/esm/tasks/visual-question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/visual-question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/visual-question-answering/data.js +91 -0
- package/dist/esm/tasks/visual-question-answering/inference.d.ts +63 -0
- package/dist/esm/tasks/visual-question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/visual-question-answering/inference.js +1 -0
- package/dist/esm/tasks/zero-shot-classification/data.d.ts +4 -0
- package/dist/esm/tasks/zero-shot-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-classification/data.js +64 -0
- package/dist/esm/tasks/zero-shot-classification/inference.d.ts +68 -0
- package/dist/esm/tasks/zero-shot-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-classification/inference.js +1 -0
- package/dist/esm/tasks/zero-shot-image-classification/data.d.ts +4 -0
- package/dist/esm/tasks/zero-shot-image-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-image-classification/data.js +79 -0
- package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts +62 -0
- package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-image-classification/inference.js +1 -0
- package/dist/esm/tasks/zero-shot-object-detection/data.d.ts +4 -0
- package/dist/esm/tasks/zero-shot-object-detection/data.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-object-detection/data.js +60 -0
- package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts +67 -0
- package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-object-detection/inference.js +1 -0
- package/dist/esm/tokenizer-data.d.ts +26 -0
- package/dist/esm/tokenizer-data.d.ts.map +1 -0
- package/dist/esm/tokenizer-data.js +10 -0
- package/dist/esm/widget-example.d.ts +83 -0
- package/dist/esm/widget-example.d.ts.map +1 -0
- package/dist/esm/widget-example.js +4 -0
- package/package.json +24 -20
- package/src/default-widget-inputs.ts +2 -2
- package/src/index.ts +23 -19
- package/src/library-to-tasks.ts +2 -2
- package/src/local-apps.ts +3 -3
- package/src/model-data.ts +3 -3
- package/src/model-libraries-snippets.ts +3 -3
- package/src/model-libraries.ts +3 -3
- package/src/snippets/common.ts +1 -1
- package/src/snippets/curl.spec.ts +2 -2
- package/src/snippets/index.ts +5 -4
- package/src/snippets/inputs.ts +3 -3
- package/src/snippets/js.spec.ts +10 -10
- package/src/snippets/js.ts +8 -8
- package/src/snippets/python.spec.ts +39 -2
- package/src/snippets/python.ts +22 -5
- package/src/snippets/types.ts +1 -1
- package/src/tasks/audio-classification/data.ts +1 -1
- package/src/tasks/audio-to-audio/data.ts +1 -1
- package/src/tasks/automatic-speech-recognition/data.ts +1 -1
- package/src/tasks/depth-estimation/data.ts +1 -1
- package/src/tasks/document-question-answering/data.ts +1 -1
- package/src/tasks/feature-extraction/data.ts +1 -1
- package/src/tasks/fill-mask/data.ts +1 -1
- package/src/tasks/image-classification/data.ts +1 -1
- package/src/tasks/image-feature-extraction/data.ts +1 -1
- package/src/tasks/image-segmentation/data.ts +1 -1
- package/src/tasks/image-text-to-text/data.ts +1 -1
- package/src/tasks/image-to-3d/data.ts +1 -1
- package/src/tasks/image-to-image/data.ts +1 -1
- package/src/tasks/image-to-text/data.ts +1 -1
- package/src/tasks/index.ts +70 -70
- package/src/tasks/keypoint-detection/data.ts +1 -1
- package/src/tasks/mask-generation/data.ts +1 -1
- package/src/tasks/object-detection/data.ts +1 -1
- package/src/tasks/placeholder/data.ts +1 -1
- package/src/tasks/question-answering/data.ts +1 -1
- package/src/tasks/reinforcement-learning/data.ts +1 -1
- package/src/tasks/sentence-similarity/data.ts +1 -1
- package/src/tasks/summarization/data.ts +1 -1
- package/src/tasks/table-question-answering/data.ts +1 -1
- package/src/tasks/tabular-classification/data.ts +1 -1
- package/src/tasks/tabular-regression/data.ts +1 -1
- package/src/tasks/text-classification/data.ts +1 -1
- package/src/tasks/text-generation/data.ts +1 -1
- package/src/tasks/text-to-3d/data.ts +1 -1
- package/src/tasks/text-to-image/data.ts +1 -1
- package/src/tasks/text-to-speech/data.ts +1 -1
- package/src/tasks/text-to-video/data.ts +1 -1
- package/src/tasks/token-classification/data.ts +1 -1
- package/src/tasks/translation/data.ts +1 -1
- package/src/tasks/unconditional-image-generation/data.ts +1 -1
- package/src/tasks/video-classification/data.ts +1 -1
- package/src/tasks/video-text-to-text/data.ts +1 -1
- package/src/tasks/visual-question-answering/data.ts +1 -1
- package/src/tasks/zero-shot-classification/data.ts +1 -1
- package/src/tasks/zero-shot-image-classification/data.ts +1 -1
- package/src/tasks/zero-shot-object-detection/data.ts +1 -1
- package/src/widget-example.ts +1 -1
- package/tsconfig.json +3 -3
- package/dist/index.cjs +0 -7976
- package/dist/index.js +0 -7933
- package/dist/scripts/inference-codegen.d.ts +0 -2
- package/dist/scripts/inference-codegen.d.ts.map +0 -1
- package/dist/scripts/inference-tei-import.d.ts +0 -2
- package/dist/scripts/inference-tei-import.d.ts.map +0 -1
- package/dist/scripts/inference-tgi-import.d.ts +0 -2
- package/dist/scripts/inference-tgi-import.d.ts.map +0 -1
- package/dist/src/default-widget-inputs.d.ts.map +0 -1
- package/dist/src/index.d.ts.map +0 -1
- package/dist/src/library-to-tasks.d.ts.map +0 -1
- package/dist/src/local-apps.d.ts.map +0 -1
- package/dist/src/model-data.d.ts.map +0 -1
- package/dist/src/model-libraries-snippets.d.ts.map +0 -1
- package/dist/src/snippets/common.d.ts.map +0 -1
- package/dist/src/snippets/index.d.ts +0 -6
- package/dist/src/snippets/index.d.ts.map +0 -1
- package/dist/src/snippets/inputs.d.ts +0 -4
- package/dist/src/snippets/inputs.d.ts.map +0 -1
- package/dist/src/snippets/types.d.ts.map +0 -1
- package/dist/src/tasks/audio-classification/data.d.ts +0 -4
- package/dist/src/tasks/audio-to-audio/data.d.ts +0 -4
- package/dist/src/tasks/automatic-speech-recognition/data.d.ts +0 -4
- package/dist/src/tasks/depth-estimation/data.d.ts +0 -4
- package/dist/src/tasks/document-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/feature-extraction/data.d.ts +0 -4
- package/dist/src/tasks/fill-mask/data.d.ts +0 -4
- package/dist/src/tasks/image-classification/data.d.ts +0 -4
- package/dist/src/tasks/image-feature-extraction/data.d.ts +0 -4
- package/dist/src/tasks/image-segmentation/data.d.ts +0 -4
- package/dist/src/tasks/image-text-to-text/data.d.ts +0 -4
- package/dist/src/tasks/image-to-3d/data.d.ts +0 -4
- package/dist/src/tasks/image-to-image/data.d.ts +0 -4
- package/dist/src/tasks/image-to-text/data.d.ts +0 -4
- package/dist/src/tasks/index.d.ts.map +0 -1
- package/dist/src/tasks/keypoint-detection/data.d.ts +0 -4
- package/dist/src/tasks/mask-generation/data.d.ts +0 -4
- package/dist/src/tasks/object-detection/data.d.ts +0 -4
- package/dist/src/tasks/placeholder/data.d.ts +0 -4
- package/dist/src/tasks/question-answering/data.d.ts +0 -4
- package/dist/src/tasks/reinforcement-learning/data.d.ts +0 -4
- package/dist/src/tasks/sentence-similarity/data.d.ts +0 -4
- package/dist/src/tasks/summarization/data.d.ts +0 -4
- package/dist/src/tasks/table-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/tabular-classification/data.d.ts +0 -4
- package/dist/src/tasks/tabular-regression/data.d.ts +0 -4
- package/dist/src/tasks/text-classification/data.d.ts +0 -4
- package/dist/src/tasks/text-generation/data.d.ts +0 -4
- package/dist/src/tasks/text-to-3d/data.d.ts +0 -4
- package/dist/src/tasks/text-to-image/data.d.ts +0 -4
- package/dist/src/tasks/text-to-speech/data.d.ts +0 -4
- package/dist/src/tasks/text-to-video/data.d.ts +0 -4
- package/dist/src/tasks/token-classification/data.d.ts +0 -4
- package/dist/src/tasks/translation/data.d.ts +0 -4
- package/dist/src/tasks/unconditional-image-generation/data.d.ts +0 -4
- package/dist/src/tasks/video-classification/data.d.ts +0 -4
- package/dist/src/tasks/video-text-to-text/data.d.ts +0 -4
- package/dist/src/tasks/visual-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-classification/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts +0 -4
- package/dist/src/widget-example.d.ts.map +0 -1
- /package/dist/{src → commonjs}/dataset-libraries.d.ts +0 -0
- /package/dist/{src → commonjs}/dataset-libraries.d.ts.map +0 -0
- /package/dist/{src → commonjs}/gguf.d.ts +0 -0
- /package/dist/{src → commonjs}/gguf.d.ts.map +0 -0
- /package/dist/{src → commonjs}/hardware.d.ts +0 -0
- /package/dist/{src → commonjs}/hardware.d.ts.map +0 -0
- /package/dist/{src → commonjs}/model-libraries-downloads.d.ts +0 -0
- /package/dist/{src → commonjs}/model-libraries-downloads.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/curl.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/curl.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/curl.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/curl.spec.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/js.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/js.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/js.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/js.spec.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/python.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/python.spec.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/audio-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/audio-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/automatic-speech-recognition/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/automatic-speech-recognition/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/chat-completion/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/chat-completion/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/depth-estimation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/depth-estimation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/document-question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/document-question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/feature-extraction/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/feature-extraction/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/fill-mask/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/fill-mask/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-segmentation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-segmentation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-image/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-image/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-text/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-text/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/object-detection/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/object-detection/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/sentence-similarity/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/sentence-similarity/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/summarization/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/summarization/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/table-question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/table-question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-generation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-generation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-audio/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-audio/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-image/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-image/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-speech/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-speech/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text2text-generation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text2text-generation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/token-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/token-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/translation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/translation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/video-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/video-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/visual-question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/visual-question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-image-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-image-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-object-detection/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-object-detection/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tokenizer-data.d.ts +0 -0
- /package/dist/{src → commonjs}/tokenizer-data.d.ts.map +0 -0
|
@@ -0,0 +1,1019 @@
|
|
|
1
|
+
import { LIBRARY_TASK_MAPPING } from "./library-to-tasks.js";
|
|
2
|
+
const TAG_CUSTOM_CODE = "custom_code";
|
|
3
|
+
function nameWithoutNamespace(modelId) {
|
|
4
|
+
const splitted = modelId.split("/");
|
|
5
|
+
return splitted.length === 1 ? splitted[0] : splitted[1];
|
|
6
|
+
}
|
|
7
|
+
const escapeStringForJson = (str) => JSON.stringify(str).slice(1, -1); // slice is needed to remove surrounding quotes added by JSON.stringify
|
|
8
|
+
//#region snippets
|
|
9
|
+
export const adapters = (model) => [
|
|
10
|
+
`from adapters import AutoAdapterModel
|
|
11
|
+
|
|
12
|
+
model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}")
|
|
13
|
+
model.load_adapter("${model.id}", set_active=True)`,
|
|
14
|
+
];
|
|
15
|
+
const allennlpUnknown = (model) => [
|
|
16
|
+
`import allennlp_models
|
|
17
|
+
from allennlp.predictors.predictor import Predictor
|
|
18
|
+
|
|
19
|
+
predictor = Predictor.from_path("hf://${model.id}")`,
|
|
20
|
+
];
|
|
21
|
+
const allennlpQuestionAnswering = (model) => [
|
|
22
|
+
`import allennlp_models
|
|
23
|
+
from allennlp.predictors.predictor import Predictor
|
|
24
|
+
|
|
25
|
+
predictor = Predictor.from_path("hf://${model.id}")
|
|
26
|
+
predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"}
|
|
27
|
+
predictions = predictor.predict_json(predictor_input)`,
|
|
28
|
+
];
|
|
29
|
+
export const allennlp = (model) => {
|
|
30
|
+
if (model.tags.includes("question-answering")) {
|
|
31
|
+
return allennlpQuestionAnswering(model);
|
|
32
|
+
}
|
|
33
|
+
return allennlpUnknown(model);
|
|
34
|
+
};
|
|
35
|
+
export const asteroid = (model) => [
|
|
36
|
+
`from asteroid.models import BaseModel
|
|
37
|
+
|
|
38
|
+
model = BaseModel.from_pretrained("${model.id}")`,
|
|
39
|
+
];
|
|
40
|
+
export const audioseal = (model) => {
|
|
41
|
+
const watermarkSnippet = `# Watermark Generator
|
|
42
|
+
from audioseal import AudioSeal
|
|
43
|
+
|
|
44
|
+
model = AudioSeal.load_generator("${model.id}")
|
|
45
|
+
# pass a tensor (tensor_wav) of shape (batch, channels, samples) and a sample rate
|
|
46
|
+
wav, sr = tensor_wav, 16000
|
|
47
|
+
|
|
48
|
+
watermark = model.get_watermark(wav, sr)
|
|
49
|
+
watermarked_audio = wav + watermark`;
|
|
50
|
+
const detectorSnippet = `# Watermark Detector
|
|
51
|
+
from audioseal import AudioSeal
|
|
52
|
+
|
|
53
|
+
detector = AudioSeal.load_detector("${model.id}")
|
|
54
|
+
|
|
55
|
+
result, message = detector.detect_watermark(watermarked_audio, sr)`;
|
|
56
|
+
return [watermarkSnippet, detectorSnippet];
|
|
57
|
+
};
|
|
58
|
+
function get_base_diffusers_model(model) {
|
|
59
|
+
return model.cardData?.base_model?.toString() ?? "fill-in-base-model";
|
|
60
|
+
}
|
|
61
|
+
function get_prompt_from_diffusers_model(model) {
|
|
62
|
+
const prompt = model.widgetData?.[0]?.text ?? model.cardData?.instance_prompt;
|
|
63
|
+
if (prompt) {
|
|
64
|
+
return escapeStringForJson(prompt);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
export const bertopic = (model) => [
|
|
68
|
+
`from bertopic import BERTopic
|
|
69
|
+
|
|
70
|
+
model = BERTopic.load("${model.id}")`,
|
|
71
|
+
];
|
|
72
|
+
export const bm25s = (model) => [
|
|
73
|
+
`from bm25s.hf import BM25HF
|
|
74
|
+
|
|
75
|
+
retriever = BM25HF.load_from_hub("${model.id}")`,
|
|
76
|
+
];
|
|
77
|
+
export const depth_anything_v2 = (model) => {
|
|
78
|
+
let encoder;
|
|
79
|
+
let features;
|
|
80
|
+
let out_channels;
|
|
81
|
+
encoder = "<ENCODER>";
|
|
82
|
+
features = "<NUMBER_OF_FEATURES>";
|
|
83
|
+
out_channels = "<OUT_CHANNELS>";
|
|
84
|
+
if (model.id === "depth-anything/Depth-Anything-V2-Small") {
|
|
85
|
+
encoder = "vits";
|
|
86
|
+
features = "64";
|
|
87
|
+
out_channels = "[48, 96, 192, 384]";
|
|
88
|
+
}
|
|
89
|
+
else if (model.id === "depth-anything/Depth-Anything-V2-Base") {
|
|
90
|
+
encoder = "vitb";
|
|
91
|
+
features = "128";
|
|
92
|
+
out_channels = "[96, 192, 384, 768]";
|
|
93
|
+
}
|
|
94
|
+
else if (model.id === "depth-anything/Depth-Anything-V2-Large") {
|
|
95
|
+
encoder = "vitl";
|
|
96
|
+
features = "256";
|
|
97
|
+
out_channels = "[256, 512, 1024, 1024";
|
|
98
|
+
}
|
|
99
|
+
return [
|
|
100
|
+
`
|
|
101
|
+
# Install from https://github.com/DepthAnything/Depth-Anything-V2
|
|
102
|
+
|
|
103
|
+
# Load the model and infer depth from an image
|
|
104
|
+
import cv2
|
|
105
|
+
import torch
|
|
106
|
+
|
|
107
|
+
from depth_anything_v2.dpt import DepthAnythingV2
|
|
108
|
+
|
|
109
|
+
# instantiate the model
|
|
110
|
+
model = DepthAnythingV2(encoder="${encoder}", features=${features}, out_channels=${out_channels})
|
|
111
|
+
|
|
112
|
+
# load the weights
|
|
113
|
+
filepath = hf_hub_download(repo_id="${model.id}", filename="depth_anything_v2_${encoder}.pth", repo_type="model")
|
|
114
|
+
state_dict = torch.load(filepath, map_location="cpu")
|
|
115
|
+
model.load_state_dict(state_dict).eval()
|
|
116
|
+
|
|
117
|
+
raw_img = cv2.imread("your/image/path")
|
|
118
|
+
depth = model.infer_image(raw_img) # HxW raw depth map in numpy
|
|
119
|
+
`,
|
|
120
|
+
];
|
|
121
|
+
};
|
|
122
|
+
export const depth_pro = (model) => {
|
|
123
|
+
const installSnippet = `# Download checkpoint
|
|
124
|
+
pip install huggingface-hub
|
|
125
|
+
huggingface-cli download --local-dir checkpoints ${model.id}`;
|
|
126
|
+
const inferenceSnippet = `import depth_pro
|
|
127
|
+
|
|
128
|
+
# Load model and preprocessing transform
|
|
129
|
+
model, transform = depth_pro.create_model_and_transforms()
|
|
130
|
+
model.eval()
|
|
131
|
+
|
|
132
|
+
# Load and preprocess an image.
|
|
133
|
+
image, _, f_px = depth_pro.load_rgb("example.png")
|
|
134
|
+
image = transform(image)
|
|
135
|
+
|
|
136
|
+
# Run inference.
|
|
137
|
+
prediction = model.infer(image, f_px=f_px)
|
|
138
|
+
|
|
139
|
+
# Results: 1. Depth in meters
|
|
140
|
+
depth = prediction["depth"]
|
|
141
|
+
# Results: 2. Focal length in pixels
|
|
142
|
+
focallength_px = prediction["focallength_px"]`;
|
|
143
|
+
return [installSnippet, inferenceSnippet];
|
|
144
|
+
};
|
|
145
|
+
const diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k";
|
|
146
|
+
const diffusers_default = (model) => [
|
|
147
|
+
`from diffusers import DiffusionPipeline
|
|
148
|
+
|
|
149
|
+
pipe = DiffusionPipeline.from_pretrained("${model.id}")
|
|
150
|
+
|
|
151
|
+
prompt = "${get_prompt_from_diffusers_model(model) ?? diffusersDefaultPrompt}"
|
|
152
|
+
image = pipe(prompt).images[0]`,
|
|
153
|
+
];
|
|
154
|
+
const diffusers_controlnet = (model) => [
|
|
155
|
+
`from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
|
|
156
|
+
|
|
157
|
+
controlnet = ControlNetModel.from_pretrained("${model.id}")
|
|
158
|
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
159
|
+
"${get_base_diffusers_model(model)}", controlnet=controlnet
|
|
160
|
+
)`,
|
|
161
|
+
];
|
|
162
|
+
const diffusers_lora = (model) => [
|
|
163
|
+
`from diffusers import DiffusionPipeline
|
|
164
|
+
|
|
165
|
+
pipe = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
|
|
166
|
+
pipe.load_lora_weights("${model.id}")
|
|
167
|
+
|
|
168
|
+
prompt = "${get_prompt_from_diffusers_model(model) ?? diffusersDefaultPrompt}"
|
|
169
|
+
image = pipe(prompt).images[0]`,
|
|
170
|
+
];
|
|
171
|
+
const diffusers_textual_inversion = (model) => [
|
|
172
|
+
`from diffusers import DiffusionPipeline
|
|
173
|
+
|
|
174
|
+
pipe = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
|
|
175
|
+
pipe.load_textual_inversion("${model.id}")`,
|
|
176
|
+
];
|
|
177
|
+
export const diffusers = (model) => {
|
|
178
|
+
if (model.tags.includes("controlnet")) {
|
|
179
|
+
return diffusers_controlnet(model);
|
|
180
|
+
}
|
|
181
|
+
else if (model.tags.includes("lora")) {
|
|
182
|
+
return diffusers_lora(model);
|
|
183
|
+
}
|
|
184
|
+
else if (model.tags.includes("textual_inversion")) {
|
|
185
|
+
return diffusers_textual_inversion(model);
|
|
186
|
+
}
|
|
187
|
+
else {
|
|
188
|
+
return diffusers_default(model);
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
export const diffusionkit = (model) => {
|
|
192
|
+
const sd3Snippet = `# Pipeline for Stable Diffusion 3
|
|
193
|
+
from diffusionkit.mlx import DiffusionPipeline
|
|
194
|
+
|
|
195
|
+
pipeline = DiffusionPipeline(
|
|
196
|
+
shift=3.0,
|
|
197
|
+
use_t5=False,
|
|
198
|
+
model_version=${model.id},
|
|
199
|
+
low_memory_mode=True,
|
|
200
|
+
a16=True,
|
|
201
|
+
w16=True,
|
|
202
|
+
)`;
|
|
203
|
+
const fluxSnippet = `# Pipeline for Flux
|
|
204
|
+
from diffusionkit.mlx import FluxPipeline
|
|
205
|
+
|
|
206
|
+
pipeline = FluxPipeline(
|
|
207
|
+
shift=1.0,
|
|
208
|
+
model_version=${model.id},
|
|
209
|
+
low_memory_mode=True,
|
|
210
|
+
a16=True,
|
|
211
|
+
w16=True,
|
|
212
|
+
)`;
|
|
213
|
+
const generateSnippet = `# Image Generation
|
|
214
|
+
HEIGHT = 512
|
|
215
|
+
WIDTH = 512
|
|
216
|
+
NUM_STEPS = ${model.tags.includes("flux") ? 4 : 50}
|
|
217
|
+
CFG_WEIGHT = ${model.tags.includes("flux") ? 0 : 5}
|
|
218
|
+
|
|
219
|
+
image, _ = pipeline.generate_image(
|
|
220
|
+
"a photo of a cat",
|
|
221
|
+
cfg_weight=CFG_WEIGHT,
|
|
222
|
+
num_steps=NUM_STEPS,
|
|
223
|
+
latent_size=(HEIGHT // 8, WIDTH // 8),
|
|
224
|
+
)`;
|
|
225
|
+
const pipelineSnippet = model.tags.includes("flux") ? fluxSnippet : sd3Snippet;
|
|
226
|
+
return [pipelineSnippet, generateSnippet];
|
|
227
|
+
};
|
|
228
|
+
export const cartesia_pytorch = (model) => [
|
|
229
|
+
`# pip install --no-binary :all: cartesia-pytorch
|
|
230
|
+
from cartesia_pytorch import ReneLMHeadModel
|
|
231
|
+
from transformers import AutoTokenizer
|
|
232
|
+
|
|
233
|
+
model = ReneLMHeadModel.from_pretrained("${model.id}")
|
|
234
|
+
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
|
|
235
|
+
|
|
236
|
+
in_message = ["Rene Descartes was"]
|
|
237
|
+
inputs = tokenizer(in_message, return_tensors="pt")
|
|
238
|
+
|
|
239
|
+
outputs = model.generate(inputs.input_ids, max_length=50, top_k=100, top_p=0.99)
|
|
240
|
+
out_message = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
|
241
|
+
|
|
242
|
+
print(out_message)
|
|
243
|
+
)`,
|
|
244
|
+
];
|
|
245
|
+
export const cartesia_mlx = (model) => [
|
|
246
|
+
`import mlx.core as mx
|
|
247
|
+
import cartesia_mlx as cmx
|
|
248
|
+
|
|
249
|
+
model = cmx.from_pretrained("${model.id}")
|
|
250
|
+
model.set_dtype(mx.float32)
|
|
251
|
+
|
|
252
|
+
prompt = "Rene Descartes was"
|
|
253
|
+
|
|
254
|
+
for text in model.generate(
|
|
255
|
+
prompt,
|
|
256
|
+
max_tokens=500,
|
|
257
|
+
eval_every_n=5,
|
|
258
|
+
verbose=True,
|
|
259
|
+
top_p=0.99,
|
|
260
|
+
temperature=0.85,
|
|
261
|
+
):
|
|
262
|
+
print(text, end="", flush=True)
|
|
263
|
+
`,
|
|
264
|
+
];
|
|
265
|
+
export const edsnlp = (model) => {
|
|
266
|
+
const packageName = nameWithoutNamespace(model.id).replaceAll("-", "_");
|
|
267
|
+
return [
|
|
268
|
+
`# Load it from the Hub directly
|
|
269
|
+
import edsnlp
|
|
270
|
+
nlp = edsnlp.load("${model.id}")
|
|
271
|
+
`,
|
|
272
|
+
`# Or install it as a package
|
|
273
|
+
!pip install git+https://huggingface.co/${model.id}
|
|
274
|
+
|
|
275
|
+
# and import it as a module
|
|
276
|
+
import ${packageName}
|
|
277
|
+
|
|
278
|
+
nlp = ${packageName}.load() # or edsnlp.load("${packageName}")
|
|
279
|
+
`,
|
|
280
|
+
];
|
|
281
|
+
};
|
|
282
|
+
export const espnetTTS = (model) => [
|
|
283
|
+
`from espnet2.bin.tts_inference import Text2Speech
|
|
284
|
+
|
|
285
|
+
model = Text2Speech.from_pretrained("${model.id}")
|
|
286
|
+
|
|
287
|
+
speech, *_ = model("text to generate speech from")`,
|
|
288
|
+
];
|
|
289
|
+
export const espnetASR = (model) => [
|
|
290
|
+
`from espnet2.bin.asr_inference import Speech2Text
|
|
291
|
+
|
|
292
|
+
model = Speech2Text.from_pretrained(
|
|
293
|
+
"${model.id}"
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
speech, rate = soundfile.read("speech.wav")
|
|
297
|
+
text, *_ = model(speech)[0]`,
|
|
298
|
+
];
|
|
299
|
+
const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
|
|
300
|
+
export const espnet = (model) => {
|
|
301
|
+
if (model.tags.includes("text-to-speech")) {
|
|
302
|
+
return espnetTTS(model);
|
|
303
|
+
}
|
|
304
|
+
else if (model.tags.includes("automatic-speech-recognition")) {
|
|
305
|
+
return espnetASR(model);
|
|
306
|
+
}
|
|
307
|
+
return espnetUnknown();
|
|
308
|
+
};
|
|
309
|
+
export const fairseq = (model) => [
|
|
310
|
+
`from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
|
311
|
+
|
|
312
|
+
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
|
313
|
+
"${model.id}"
|
|
314
|
+
)`,
|
|
315
|
+
];
|
|
316
|
+
export const flair = (model) => [
|
|
317
|
+
`from flair.models import SequenceTagger
|
|
318
|
+
|
|
319
|
+
tagger = SequenceTagger.load("${model.id}")`,
|
|
320
|
+
];
|
|
321
|
+
export const gliner = (model) => [
|
|
322
|
+
`from gliner import GLiNER
|
|
323
|
+
|
|
324
|
+
model = GLiNER.from_pretrained("${model.id}")`,
|
|
325
|
+
];
|
|
326
|
+
export const htrflow = (model) => [
|
|
327
|
+
`# CLI usage
|
|
328
|
+
# see docs: https://ai-riksarkivet.github.io/htrflow/latest/getting_started/quick_start.html
|
|
329
|
+
htrflow pipeline <path/to/pipeline.yaml> <path/to/image>`,
|
|
330
|
+
`# Python usage
|
|
331
|
+
from htrflow.pipeline.pipeline import Pipeline
|
|
332
|
+
from htrflow.pipeline.steps import Task
|
|
333
|
+
from htrflow.models.framework.model import ModelClass
|
|
334
|
+
|
|
335
|
+
pipeline = Pipeline(
|
|
336
|
+
[
|
|
337
|
+
Task(
|
|
338
|
+
ModelClass, {"model": "${model.id}"}, {}
|
|
339
|
+
),
|
|
340
|
+
])`,
|
|
341
|
+
];
|
|
342
|
+
export const keras = (model) => [
|
|
343
|
+
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
344
|
+
import os
|
|
345
|
+
os.environ["KERAS_BACKEND"] = "jax"
|
|
346
|
+
|
|
347
|
+
import keras
|
|
348
|
+
|
|
349
|
+
model = keras.saving.load_model("hf://${model.id}")
|
|
350
|
+
`,
|
|
351
|
+
];
|
|
352
|
+
export const keras_nlp = (model) => [
|
|
353
|
+
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
354
|
+
import os
|
|
355
|
+
os.environ["KERAS_BACKEND"] = "jax"
|
|
356
|
+
|
|
357
|
+
import keras_nlp
|
|
358
|
+
|
|
359
|
+
tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
|
|
360
|
+
backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
|
|
361
|
+
`,
|
|
362
|
+
];
|
|
363
|
+
export const keras_hub = (model) => [
|
|
364
|
+
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
365
|
+
import os
|
|
366
|
+
os.environ["KERAS_BACKEND"] = "jax"
|
|
367
|
+
|
|
368
|
+
import keras_hub
|
|
369
|
+
|
|
370
|
+
# Load a task-specific model (*replace CausalLM with your task*)
|
|
371
|
+
model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat16")
|
|
372
|
+
|
|
373
|
+
# Possible tasks are CausalLM, TextToImage, ImageClassifier, ...
|
|
374
|
+
# full list here: https://keras.io/api/keras_hub/models/#api-documentation
|
|
375
|
+
`,
|
|
376
|
+
];
|
|
377
|
+
export const llama_cpp_python = (model) => [
|
|
378
|
+
`from llama_cpp import Llama
|
|
379
|
+
|
|
380
|
+
llm = Llama.from_pretrained(
|
|
381
|
+
repo_id="${model.id}",
|
|
382
|
+
filename="{{GGUF_FILE}}",
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
llm.create_chat_completion(
|
|
386
|
+
messages = [
|
|
387
|
+
{
|
|
388
|
+
"role": "user",
|
|
389
|
+
"content": "What is the capital of France?"
|
|
390
|
+
}
|
|
391
|
+
]
|
|
392
|
+
)`,
|
|
393
|
+
];
|
|
394
|
+
export const tf_keras = (model) => [
|
|
395
|
+
`# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy)
|
|
396
|
+
# See https://github.com/keras-team/tf-keras for more details.
|
|
397
|
+
from huggingface_hub import from_pretrained_keras
|
|
398
|
+
|
|
399
|
+
model = from_pretrained_keras("${model.id}")
|
|
400
|
+
`,
|
|
401
|
+
];
|
|
402
|
+
export const mamba_ssm = (model) => [
|
|
403
|
+
`from mamba_ssm import MambaLMHeadModel
|
|
404
|
+
|
|
405
|
+
model = MambaLMHeadModel.from_pretrained("${model.id}")`,
|
|
406
|
+
];
|
|
407
|
+
export const mars5_tts = (model) => [
|
|
408
|
+
`# Install from https://github.com/Camb-ai/MARS5-TTS
|
|
409
|
+
|
|
410
|
+
from inference import Mars5TTS
|
|
411
|
+
mars5 = Mars5TTS.from_pretrained("${model.id}")`,
|
|
412
|
+
];
|
|
413
|
+
export const mesh_anything = () => [
|
|
414
|
+
`# Install from https://github.com/buaacyw/MeshAnything.git
|
|
415
|
+
|
|
416
|
+
from MeshAnything.models.meshanything import MeshAnything
|
|
417
|
+
|
|
418
|
+
# refer to https://github.com/buaacyw/MeshAnything/blob/main/main.py#L91 on how to define args
|
|
419
|
+
# and https://github.com/buaacyw/MeshAnything/blob/main/app.py regarding usage
|
|
420
|
+
model = MeshAnything(args)`,
|
|
421
|
+
];
|
|
422
|
+
export const open_clip = (model) => [
|
|
423
|
+
`import open_clip
|
|
424
|
+
|
|
425
|
+
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}')
|
|
426
|
+
tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`,
|
|
427
|
+
];
|
|
428
|
+
export const paddlenlp = (model) => {
|
|
429
|
+
if (model.config?.architectures?.[0]) {
|
|
430
|
+
const architecture = model.config.architectures[0];
|
|
431
|
+
return [
|
|
432
|
+
[
|
|
433
|
+
`from paddlenlp.transformers import AutoTokenizer, ${architecture}`,
|
|
434
|
+
"",
|
|
435
|
+
`tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
436
|
+
`model = ${architecture}.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
437
|
+
].join("\n"),
|
|
438
|
+
];
|
|
439
|
+
}
|
|
440
|
+
else {
|
|
441
|
+
return [
|
|
442
|
+
[
|
|
443
|
+
`# ⚠️ Type of model unknown`,
|
|
444
|
+
`from paddlenlp.transformers import AutoTokenizer, AutoModel`,
|
|
445
|
+
"",
|
|
446
|
+
`tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
447
|
+
`model = AutoModel.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
448
|
+
].join("\n"),
|
|
449
|
+
];
|
|
450
|
+
}
|
|
451
|
+
};
|
|
452
|
+
export const pyannote_audio_pipeline = (model) => [
|
|
453
|
+
`from pyannote.audio import Pipeline
|
|
454
|
+
|
|
455
|
+
pipeline = Pipeline.from_pretrained("${model.id}")
|
|
456
|
+
|
|
457
|
+
# inference on the whole file
|
|
458
|
+
pipeline("file.wav")
|
|
459
|
+
|
|
460
|
+
# inference on an excerpt
|
|
461
|
+
from pyannote.core import Segment
|
|
462
|
+
excerpt = Segment(start=2.0, end=5.0)
|
|
463
|
+
|
|
464
|
+
from pyannote.audio import Audio
|
|
465
|
+
waveform, sample_rate = Audio().crop("file.wav", excerpt)
|
|
466
|
+
pipeline({"waveform": waveform, "sample_rate": sample_rate})`,
|
|
467
|
+
];
|
|
468
|
+
const pyannote_audio_model = (model) => [
|
|
469
|
+
`from pyannote.audio import Model, Inference
|
|
470
|
+
|
|
471
|
+
model = Model.from_pretrained("${model.id}")
|
|
472
|
+
inference = Inference(model)
|
|
473
|
+
|
|
474
|
+
# inference on the whole file
|
|
475
|
+
inference("file.wav")
|
|
476
|
+
|
|
477
|
+
# inference on an excerpt
|
|
478
|
+
from pyannote.core import Segment
|
|
479
|
+
excerpt = Segment(start=2.0, end=5.0)
|
|
480
|
+
inference.crop("file.wav", excerpt)`,
|
|
481
|
+
];
|
|
482
|
+
export const pyannote_audio = (model) => {
|
|
483
|
+
if (model.tags.includes("pyannote-audio-pipeline")) {
|
|
484
|
+
return pyannote_audio_pipeline(model);
|
|
485
|
+
}
|
|
486
|
+
return pyannote_audio_model(model);
|
|
487
|
+
};
|
|
488
|
+
export const relik = (model) => [
|
|
489
|
+
`from relik import Relik
|
|
490
|
+
|
|
491
|
+
relik = Relik.from_pretrained("${model.id}")`,
|
|
492
|
+
];
|
|
493
|
+
const tensorflowttsTextToMel = (model) => [
|
|
494
|
+
`from tensorflow_tts.inference import AutoProcessor, TFAutoModel
|
|
495
|
+
|
|
496
|
+
processor = AutoProcessor.from_pretrained("${model.id}")
|
|
497
|
+
model = TFAutoModel.from_pretrained("${model.id}")
|
|
498
|
+
`,
|
|
499
|
+
];
|
|
500
|
+
const tensorflowttsMelToWav = (model) => [
|
|
501
|
+
`from tensorflow_tts.inference import TFAutoModel
|
|
502
|
+
|
|
503
|
+
model = TFAutoModel.from_pretrained("${model.id}")
|
|
504
|
+
audios = model.inference(mels)
|
|
505
|
+
`,
|
|
506
|
+
];
|
|
507
|
+
const tensorflowttsUnknown = (model) => [
|
|
508
|
+
`from tensorflow_tts.inference import TFAutoModel
|
|
509
|
+
|
|
510
|
+
model = TFAutoModel.from_pretrained("${model.id}")
|
|
511
|
+
`,
|
|
512
|
+
];
|
|
513
|
+
export const tensorflowtts = (model) => {
|
|
514
|
+
if (model.tags.includes("text-to-mel")) {
|
|
515
|
+
return tensorflowttsTextToMel(model);
|
|
516
|
+
}
|
|
517
|
+
else if (model.tags.includes("mel-to-wav")) {
|
|
518
|
+
return tensorflowttsMelToWav(model);
|
|
519
|
+
}
|
|
520
|
+
return tensorflowttsUnknown(model);
|
|
521
|
+
};
|
|
522
|
+
export const timm = (model) => [
|
|
523
|
+
`import timm
|
|
524
|
+
|
|
525
|
+
model = timm.create_model("hf_hub:${model.id}", pretrained=True)`,
|
|
526
|
+
];
|
|
527
|
+
export const saelens = ( /* model: ModelData */) => [
|
|
528
|
+
`# pip install sae-lens
|
|
529
|
+
from sae_lens import SAE
|
|
530
|
+
|
|
531
|
+
sae, cfg_dict, sparsity = SAE.from_pretrained(
|
|
532
|
+
release = "RELEASE_ID", # e.g., "gpt2-small-res-jb". See other options in https://github.com/jbloomAus/SAELens/blob/main/sae_lens/pretrained_saes.yaml
|
|
533
|
+
sae_id = "SAE_ID", # e.g., "blocks.8.hook_resid_pre". Won't always be a hook point
|
|
534
|
+
)`,
|
|
535
|
+
];
|
|
536
|
+
export const seed_story = () => [
|
|
537
|
+
`# seed_story_cfg_path refers to 'https://github.com/TencentARC/SEED-Story/blob/master/configs/clm_models/agent_7b_sft.yaml'
|
|
538
|
+
# llm_cfg_path refers to 'https://github.com/TencentARC/SEED-Story/blob/master/configs/clm_models/llama2chat7b_lora.yaml'
|
|
539
|
+
from omegaconf import OmegaConf
|
|
540
|
+
import hydra
|
|
541
|
+
|
|
542
|
+
# load Llama2
|
|
543
|
+
llm_cfg = OmegaConf.load(llm_cfg_path)
|
|
544
|
+
llm = hydra.utils.instantiate(llm_cfg, torch_dtype="fp16")
|
|
545
|
+
|
|
546
|
+
# initialize seed_story
|
|
547
|
+
seed_story_cfg = OmegaConf.load(seed_story_cfg_path)
|
|
548
|
+
seed_story = hydra.utils.instantiate(seed_story_cfg, llm=llm) `,
|
|
549
|
+
];
|
|
550
|
+
const skopsPickle = (model, modelFile) => {
|
|
551
|
+
return [
|
|
552
|
+
`import joblib
|
|
553
|
+
from skops.hub_utils import download
|
|
554
|
+
download("${model.id}", "path_to_folder")
|
|
555
|
+
model = joblib.load(
|
|
556
|
+
"${modelFile}"
|
|
557
|
+
)
|
|
558
|
+
# only load pickle files from sources you trust
|
|
559
|
+
# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`,
|
|
560
|
+
];
|
|
561
|
+
};
|
|
562
|
+
const skopsFormat = (model, modelFile) => {
|
|
563
|
+
return [
|
|
564
|
+
`from skops.hub_utils import download
|
|
565
|
+
from skops.io import load
|
|
566
|
+
download("${model.id}", "path_to_folder")
|
|
567
|
+
# make sure model file is in skops format
|
|
568
|
+
# if model is a pickle file, make sure it's from a source you trust
|
|
569
|
+
model = load("path_to_folder/${modelFile}")`,
|
|
570
|
+
];
|
|
571
|
+
};
|
|
572
|
+
const skopsJobLib = (model) => {
|
|
573
|
+
return [
|
|
574
|
+
`from huggingface_hub import hf_hub_download
|
|
575
|
+
import joblib
|
|
576
|
+
model = joblib.load(
|
|
577
|
+
hf_hub_download("${model.id}", "sklearn_model.joblib")
|
|
578
|
+
)
|
|
579
|
+
# only load pickle files from sources you trust
|
|
580
|
+
# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`,
|
|
581
|
+
];
|
|
582
|
+
};
|
|
583
|
+
export const sklearn = (model) => {
|
|
584
|
+
if (model.tags.includes("skops")) {
|
|
585
|
+
const skopsmodelFile = model.config?.sklearn?.model?.file;
|
|
586
|
+
const skopssaveFormat = model.config?.sklearn?.model_format;
|
|
587
|
+
if (!skopsmodelFile) {
|
|
588
|
+
return [`# ⚠️ Model filename not specified in config.json`];
|
|
589
|
+
}
|
|
590
|
+
if (skopssaveFormat === "pickle") {
|
|
591
|
+
return skopsPickle(model, skopsmodelFile);
|
|
592
|
+
}
|
|
593
|
+
else {
|
|
594
|
+
return skopsFormat(model, skopsmodelFile);
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
else {
|
|
598
|
+
return skopsJobLib(model);
|
|
599
|
+
}
|
|
600
|
+
};
|
|
601
|
+
export const stable_audio_tools = (model) => [
|
|
602
|
+
`import torch
|
|
603
|
+
import torchaudio
|
|
604
|
+
from einops import rearrange
|
|
605
|
+
from stable_audio_tools import get_pretrained_model
|
|
606
|
+
from stable_audio_tools.inference.generation import generate_diffusion_cond
|
|
607
|
+
|
|
608
|
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
609
|
+
|
|
610
|
+
# Download model
|
|
611
|
+
model, model_config = get_pretrained_model("${model.id}")
|
|
612
|
+
sample_rate = model_config["sample_rate"]
|
|
613
|
+
sample_size = model_config["sample_size"]
|
|
614
|
+
|
|
615
|
+
model = model.to(device)
|
|
616
|
+
|
|
617
|
+
# Set up text and timing conditioning
|
|
618
|
+
conditioning = [{
|
|
619
|
+
"prompt": "128 BPM tech house drum loop",
|
|
620
|
+
}]
|
|
621
|
+
|
|
622
|
+
# Generate stereo audio
|
|
623
|
+
output = generate_diffusion_cond(
|
|
624
|
+
model,
|
|
625
|
+
conditioning=conditioning,
|
|
626
|
+
sample_size=sample_size,
|
|
627
|
+
device=device
|
|
628
|
+
)
|
|
629
|
+
|
|
630
|
+
# Rearrange audio batch to a single sequence
|
|
631
|
+
output = rearrange(output, "b d n -> d (b n)")
|
|
632
|
+
|
|
633
|
+
# Peak normalize, clip, convert to int16, and save to file
|
|
634
|
+
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
|
|
635
|
+
torchaudio.save("output.wav", output, sample_rate)`,
|
|
636
|
+
];
|
|
637
|
+
export const fastai = (model) => [
|
|
638
|
+
`from huggingface_hub import from_pretrained_fastai
|
|
639
|
+
|
|
640
|
+
learn = from_pretrained_fastai("${model.id}")`,
|
|
641
|
+
];
|
|
642
|
+
export const sam2 = (model) => {
|
|
643
|
+
const image_predictor = `# Use SAM2 with images
|
|
644
|
+
import torch
|
|
645
|
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
|
646
|
+
|
|
647
|
+
predictor = SAM2ImagePredictor.from_pretrained(${model.id})
|
|
648
|
+
|
|
649
|
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
|
|
650
|
+
predictor.set_image(<your_image>)
|
|
651
|
+
masks, _, _ = predictor.predict(<input_prompts>)`;
|
|
652
|
+
const video_predictor = `# Use SAM2 with videos
|
|
653
|
+
import torch
|
|
654
|
+
from sam2.sam2_video_predictor import SAM2VideoPredictor
|
|
655
|
+
|
|
656
|
+
predictor = SAM2VideoPredictor.from_pretrained(${model.id})
|
|
657
|
+
|
|
658
|
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
|
|
659
|
+
state = predictor.init_state(<your_video>)
|
|
660
|
+
|
|
661
|
+
# add new prompts and instantly get the output on the same frame
|
|
662
|
+
frame_idx, object_ids, masks = predictor.add_new_points(state, <your_prompts>):
|
|
663
|
+
|
|
664
|
+
# propagate the prompts to get masklets throughout the video
|
|
665
|
+
for frame_idx, object_ids, masks in predictor.propagate_in_video(state):
|
|
666
|
+
...`;
|
|
667
|
+
return [image_predictor, video_predictor];
|
|
668
|
+
};
|
|
669
|
+
export const sampleFactory = (model) => [
|
|
670
|
+
`python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`,
|
|
671
|
+
];
|
|
672
|
+
function get_widget_examples_from_st_model(model) {
|
|
673
|
+
const widgetExample = model.widgetData?.[0];
|
|
674
|
+
if (widgetExample) {
|
|
675
|
+
return [widgetExample.source_sentence, ...widgetExample.sentences];
|
|
676
|
+
}
|
|
677
|
+
}
|
|
678
|
+
export const sentenceTransformers = (model) => {
|
|
679
|
+
const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
|
|
680
|
+
const exampleSentences = get_widget_examples_from_st_model(model) ?? [
|
|
681
|
+
"The weather is lovely today.",
|
|
682
|
+
"It's so sunny outside!",
|
|
683
|
+
"He drove to the stadium.",
|
|
684
|
+
];
|
|
685
|
+
return [
|
|
686
|
+
`from sentence_transformers import SentenceTransformer
|
|
687
|
+
|
|
688
|
+
model = SentenceTransformer("${model.id}"${remote_code_snippet})
|
|
689
|
+
|
|
690
|
+
sentences = ${JSON.stringify(exampleSentences, null, 4)}
|
|
691
|
+
embeddings = model.encode(sentences)
|
|
692
|
+
|
|
693
|
+
similarities = model.similarity(embeddings, embeddings)
|
|
694
|
+
print(similarities.shape)
|
|
695
|
+
# [${exampleSentences.length}, ${exampleSentences.length}]`,
|
|
696
|
+
];
|
|
697
|
+
};
|
|
698
|
+
export const setfit = (model) => [
|
|
699
|
+
`from setfit import SetFitModel
|
|
700
|
+
|
|
701
|
+
model = SetFitModel.from_pretrained("${model.id}")`,
|
|
702
|
+
];
|
|
703
|
+
export const spacy = (model) => [
|
|
704
|
+
`!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
|
|
705
|
+
|
|
706
|
+
# Using spacy.load().
|
|
707
|
+
import spacy
|
|
708
|
+
nlp = spacy.load("${nameWithoutNamespace(model.id)}")
|
|
709
|
+
|
|
710
|
+
# Importing as module.
|
|
711
|
+
import ${nameWithoutNamespace(model.id)}
|
|
712
|
+
nlp = ${nameWithoutNamespace(model.id)}.load()`,
|
|
713
|
+
];
|
|
714
|
+
export const span_marker = (model) => [
|
|
715
|
+
`from span_marker import SpanMarkerModel
|
|
716
|
+
|
|
717
|
+
model = SpanMarkerModel.from_pretrained("${model.id}")`,
|
|
718
|
+
];
|
|
719
|
+
export const stanza = (model) => [
|
|
720
|
+
`import stanza
|
|
721
|
+
|
|
722
|
+
stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
|
|
723
|
+
nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")`,
|
|
724
|
+
];
|
|
725
|
+
const speechBrainMethod = (speechbrainInterface) => {
|
|
726
|
+
switch (speechbrainInterface) {
|
|
727
|
+
case "EncoderClassifier":
|
|
728
|
+
return "classify_file";
|
|
729
|
+
case "EncoderDecoderASR":
|
|
730
|
+
case "EncoderASR":
|
|
731
|
+
return "transcribe_file";
|
|
732
|
+
case "SpectralMaskEnhancement":
|
|
733
|
+
return "enhance_file";
|
|
734
|
+
case "SepformerSeparation":
|
|
735
|
+
return "separate_file";
|
|
736
|
+
default:
|
|
737
|
+
return undefined;
|
|
738
|
+
}
|
|
739
|
+
};
|
|
740
|
+
export const speechbrain = (model) => {
|
|
741
|
+
const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface;
|
|
742
|
+
if (speechbrainInterface === undefined) {
|
|
743
|
+
return [`# interface not specified in config.json`];
|
|
744
|
+
}
|
|
745
|
+
const speechbrainMethod = speechBrainMethod(speechbrainInterface);
|
|
746
|
+
if (speechbrainMethod === undefined) {
|
|
747
|
+
return [`# interface in config.json invalid`];
|
|
748
|
+
}
|
|
749
|
+
return [
|
|
750
|
+
`from speechbrain.pretrained import ${speechbrainInterface}
|
|
751
|
+
model = ${speechbrainInterface}.from_hparams(
|
|
752
|
+
"${model.id}"
|
|
753
|
+
)
|
|
754
|
+
model.${speechbrainMethod}("file.wav")`,
|
|
755
|
+
];
|
|
756
|
+
};
|
|
757
|
+
export const transformers = (model) => {
|
|
758
|
+
const info = model.transformersInfo;
|
|
759
|
+
if (!info) {
|
|
760
|
+
return [`# ⚠️ Type of model unknown`];
|
|
761
|
+
}
|
|
762
|
+
const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
|
|
763
|
+
let autoSnippet;
|
|
764
|
+
if (info.processor) {
|
|
765
|
+
const varName = info.processor === "AutoTokenizer"
|
|
766
|
+
? "tokenizer"
|
|
767
|
+
: info.processor === "AutoFeatureExtractor"
|
|
768
|
+
? "extractor"
|
|
769
|
+
: "processor";
|
|
770
|
+
autoSnippet = [
|
|
771
|
+
"# Load model directly",
|
|
772
|
+
`from transformers import ${info.processor}, ${info.auto_model}`,
|
|
773
|
+
"",
|
|
774
|
+
`${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
|
|
775
|
+
`model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
|
|
776
|
+
].join("\n");
|
|
777
|
+
}
|
|
778
|
+
else {
|
|
779
|
+
autoSnippet = [
|
|
780
|
+
"# Load model directly",
|
|
781
|
+
`from transformers import ${info.auto_model}`,
|
|
782
|
+
`model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
|
|
783
|
+
].join("\n");
|
|
784
|
+
}
|
|
785
|
+
if (model.pipeline_tag && LIBRARY_TASK_MAPPING.transformers?.includes(model.pipeline_tag)) {
|
|
786
|
+
const pipelineSnippet = ["# Use a pipeline as a high-level helper", "from transformers import pipeline", ""];
|
|
787
|
+
if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) {
|
|
788
|
+
pipelineSnippet.push("messages = [", ' {"role": "user", "content": "Who are you?"},', "]");
|
|
789
|
+
}
|
|
790
|
+
pipelineSnippet.push(`pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")");
|
|
791
|
+
if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) {
|
|
792
|
+
pipelineSnippet.push("pipe(messages)");
|
|
793
|
+
}
|
|
794
|
+
return [pipelineSnippet.join("\n"), autoSnippet];
|
|
795
|
+
}
|
|
796
|
+
return [autoSnippet];
|
|
797
|
+
};
|
|
798
|
+
export const transformersJS = (model) => {
|
|
799
|
+
if (!model.pipeline_tag) {
|
|
800
|
+
return [`// ⚠️ Unknown pipeline tag`];
|
|
801
|
+
}
|
|
802
|
+
const libName = "@huggingface/transformers";
|
|
803
|
+
return [
|
|
804
|
+
`// npm i ${libName}
|
|
805
|
+
import { pipeline } from '${libName}';
|
|
806
|
+
|
|
807
|
+
// Allocate pipeline
|
|
808
|
+
const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');`,
|
|
809
|
+
];
|
|
810
|
+
};
|
|
811
|
+
const peftTask = (peftTaskType) => {
|
|
812
|
+
switch (peftTaskType) {
|
|
813
|
+
case "CAUSAL_LM":
|
|
814
|
+
return "CausalLM";
|
|
815
|
+
case "SEQ_2_SEQ_LM":
|
|
816
|
+
return "Seq2SeqLM";
|
|
817
|
+
case "TOKEN_CLS":
|
|
818
|
+
return "TokenClassification";
|
|
819
|
+
case "SEQ_CLS":
|
|
820
|
+
return "SequenceClassification";
|
|
821
|
+
default:
|
|
822
|
+
return undefined;
|
|
823
|
+
}
|
|
824
|
+
};
|
|
825
|
+
export const peft = (model) => {
|
|
826
|
+
const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
|
|
827
|
+
const pefttask = peftTask(peftTaskType);
|
|
828
|
+
if (!pefttask) {
|
|
829
|
+
return [`Task type is invalid.`];
|
|
830
|
+
}
|
|
831
|
+
if (!peftBaseModel) {
|
|
832
|
+
return [`Base model is not found.`];
|
|
833
|
+
}
|
|
834
|
+
return [
|
|
835
|
+
`from peft import PeftModel, PeftConfig
|
|
836
|
+
from transformers import AutoModelFor${pefttask}
|
|
837
|
+
|
|
838
|
+
config = PeftConfig.from_pretrained("${model.id}")
|
|
839
|
+
base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}")
|
|
840
|
+
model = PeftModel.from_pretrained(base_model, "${model.id}")`,
|
|
841
|
+
];
|
|
842
|
+
};
|
|
843
|
+
export const fasttext = (model) => [
|
|
844
|
+
`from huggingface_hub import hf_hub_download
|
|
845
|
+
import fasttext
|
|
846
|
+
|
|
847
|
+
model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`,
|
|
848
|
+
];
|
|
849
|
+
export const stableBaselines3 = (model) => [
|
|
850
|
+
`from huggingface_sb3 import load_from_hub
|
|
851
|
+
checkpoint = load_from_hub(
|
|
852
|
+
repo_id="${model.id}",
|
|
853
|
+
filename="{MODEL FILENAME}.zip",
|
|
854
|
+
)`,
|
|
855
|
+
];
|
|
856
|
+
const nemoDomainResolver = (domain, model) => {
|
|
857
|
+
switch (domain) {
|
|
858
|
+
case "ASR":
|
|
859
|
+
return [
|
|
860
|
+
`import nemo.collections.asr as nemo_asr
|
|
861
|
+
asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}")
|
|
862
|
+
|
|
863
|
+
transcriptions = asr_model.transcribe(["file.wav"])`,
|
|
864
|
+
];
|
|
865
|
+
default:
|
|
866
|
+
return undefined;
|
|
867
|
+
}
|
|
868
|
+
};
|
|
869
|
+
export const mlAgents = (model) => [
|
|
870
|
+
`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"`,
|
|
871
|
+
];
|
|
872
|
+
export const sentis = ( /* model: ModelData */) => [
|
|
873
|
+
`string modelName = "[Your model name here].sentis";
|
|
874
|
+
Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName);
|
|
875
|
+
IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
|
|
876
|
+
// Please see provided C# file for more details
|
|
877
|
+
`,
|
|
878
|
+
];
|
|
879
|
+
export const vfimamba = (model) => [
|
|
880
|
+
`from Trainer_finetune import Model
|
|
881
|
+
|
|
882
|
+
model = Model.from_pretrained("${model.id}")`,
|
|
883
|
+
];
|
|
884
|
+
export const voicecraft = (model) => [
|
|
885
|
+
`from voicecraft import VoiceCraft
|
|
886
|
+
|
|
887
|
+
model = VoiceCraft.from_pretrained("${model.id}")`,
|
|
888
|
+
];
|
|
889
|
+
export const chattts = () => [
|
|
890
|
+
`import ChatTTS
|
|
891
|
+
import torchaudio
|
|
892
|
+
|
|
893
|
+
chat = ChatTTS.Chat()
|
|
894
|
+
chat.load_models(compile=False) # Set to True for better performance
|
|
895
|
+
|
|
896
|
+
texts = ["PUT YOUR TEXT HERE",]
|
|
897
|
+
|
|
898
|
+
wavs = chat.infer(texts, )
|
|
899
|
+
|
|
900
|
+
torchaudio.save("output1.wav", torch.from_numpy(wavs[0]), 24000)`,
|
|
901
|
+
];
|
|
902
|
+
export const yolov10 = (model) => [
|
|
903
|
+
`from ultralytics import YOLOv10
|
|
904
|
+
|
|
905
|
+
model = YOLOv10.from_pretrained("${model.id}")
|
|
906
|
+
source = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
|
907
|
+
model.predict(source=source, save=True)
|
|
908
|
+
`,
|
|
909
|
+
];
|
|
910
|
+
export const birefnet = (model) => [
|
|
911
|
+
`# Option 1: use with transformers
|
|
912
|
+
|
|
913
|
+
from transformers import AutoModelForImageSegmentation
|
|
914
|
+
birefnet = AutoModelForImageSegmentation.from_pretrained("${model.id}", trust_remote_code=True)
|
|
915
|
+
`,
|
|
916
|
+
`# Option 2: use with BiRefNet
|
|
917
|
+
|
|
918
|
+
# Install from https://github.com/ZhengPeng7/BiRefNet
|
|
919
|
+
|
|
920
|
+
from models.birefnet import BiRefNet
|
|
921
|
+
model = BiRefNet.from_pretrained("${model.id}")`,
|
|
922
|
+
];
|
|
923
|
+
export const mlx = (model) => [
|
|
924
|
+
`pip install huggingface_hub hf_transfer
|
|
925
|
+
|
|
926
|
+
export HF_HUB_ENABLE_HF_TRANS: string[]FER=1
|
|
927
|
+
huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`,
|
|
928
|
+
];
|
|
929
|
+
export const mlxim = (model) => [
|
|
930
|
+
`from mlxim.model import create_model
|
|
931
|
+
|
|
932
|
+
model = create_model(${model.id})`,
|
|
933
|
+
];
|
|
934
|
+
export const model2vec = (model) => [
|
|
935
|
+
`from model2vec import StaticModel
|
|
936
|
+
|
|
937
|
+
model = StaticModel.from_pretrained("${model.id}")`,
|
|
938
|
+
];
|
|
939
|
+
export const nemo = (model) => {
|
|
940
|
+
let command = undefined;
|
|
941
|
+
// Resolve the tag to a nemo domain/sub-domain
|
|
942
|
+
if (model.tags.includes("automatic-speech-recognition")) {
|
|
943
|
+
command = nemoDomainResolver("ASR", model);
|
|
944
|
+
}
|
|
945
|
+
return command ?? [`# tag did not correspond to a valid NeMo domain.`];
|
|
946
|
+
};
|
|
947
|
+
export const pxia = (model) => [
|
|
948
|
+
`from pxia import AutoModel
|
|
949
|
+
|
|
950
|
+
model = AutoModel.from_pretrained("${model.id}")`,
|
|
951
|
+
];
|
|
952
|
+
export const pythae = (model) => [
|
|
953
|
+
`from pythae.models import AutoModel
|
|
954
|
+
|
|
955
|
+
model = AutoModel.load_from_hf_hub("${model.id}")`,
|
|
956
|
+
];
|
|
957
|
+
const musicgen = (model) => [
|
|
958
|
+
`from audiocraft.models import MusicGen
|
|
959
|
+
|
|
960
|
+
model = MusicGen.get_pretrained("${model.id}")
|
|
961
|
+
|
|
962
|
+
descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
|
|
963
|
+
wav = model.generate(descriptions) # generates 3 samples.`,
|
|
964
|
+
];
|
|
965
|
+
const magnet = (model) => [
|
|
966
|
+
`from audiocraft.models import MAGNeT
|
|
967
|
+
|
|
968
|
+
model = MAGNeT.get_pretrained("${model.id}")
|
|
969
|
+
|
|
970
|
+
descriptions = ['disco beat', 'energetic EDM', 'funky groove']
|
|
971
|
+
wav = model.generate(descriptions) # generates 3 samples.`,
|
|
972
|
+
];
|
|
973
|
+
const audiogen = (model) => [
|
|
974
|
+
`from audiocraft.models import AudioGen
|
|
975
|
+
|
|
976
|
+
model = AudioGen.get_pretrained("${model.id}")
|
|
977
|
+
model.set_generation_params(duration=5) # generate 5 seconds.
|
|
978
|
+
descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor']
|
|
979
|
+
wav = model.generate(descriptions) # generates 3 samples.`,
|
|
980
|
+
];
|
|
981
|
+
export const audiocraft = (model) => {
|
|
982
|
+
if (model.tags.includes("musicgen")) {
|
|
983
|
+
return musicgen(model);
|
|
984
|
+
}
|
|
985
|
+
else if (model.tags.includes("audiogen")) {
|
|
986
|
+
return audiogen(model);
|
|
987
|
+
}
|
|
988
|
+
else if (model.tags.includes("magnet")) {
|
|
989
|
+
return magnet(model);
|
|
990
|
+
}
|
|
991
|
+
else {
|
|
992
|
+
return [`# Type of model unknown.`];
|
|
993
|
+
}
|
|
994
|
+
};
|
|
995
|
+
export const whisperkit = () => [
|
|
996
|
+
`# Install CLI with Homebrew on macOS device
|
|
997
|
+
brew install whisperkit-cli
|
|
998
|
+
|
|
999
|
+
# View all available inference options
|
|
1000
|
+
whisperkit-cli transcribe --help
|
|
1001
|
+
|
|
1002
|
+
# Download and run inference using whisper base model
|
|
1003
|
+
whisperkit-cli transcribe --audio-path /path/to/audio.mp3
|
|
1004
|
+
|
|
1005
|
+
# Or use your preferred model variant
|
|
1006
|
+
whisperkit-cli transcribe --model "large-v3" --model-prefix "distil" --audio-path /path/to/audio.mp3 --verbose`,
|
|
1007
|
+
];
|
|
1008
|
+
export const threedtopia_xl = (model) => [
|
|
1009
|
+
`from threedtopia_xl.models import threedtopia_xl
|
|
1010
|
+
|
|
1011
|
+
model = threedtopia_xl.from_pretrained("${model.id}")
|
|
1012
|
+
model.generate(cond="path/to/image.png")`,
|
|
1013
|
+
];
|
|
1014
|
+
export const hezar = (model) => [
|
|
1015
|
+
`from hezar import Model
|
|
1016
|
+
|
|
1017
|
+
model = Model.load("${model.id}")`,
|
|
1018
|
+
];
|
|
1019
|
+
//#endregion
|