@huggingface/tasks 0.13.1 → 0.13.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commonjs/dataset-libraries.js +65 -0
- package/dist/{src → commonjs}/default-widget-inputs.d.ts +2 -2
- package/dist/commonjs/default-widget-inputs.d.ts.map +1 -0
- package/dist/commonjs/default-widget-inputs.js +698 -0
- package/dist/commonjs/gguf.js +43 -0
- package/dist/commonjs/hardware.js +461 -0
- package/dist/{src → commonjs}/index.d.ts +19 -19
- package/dist/commonjs/index.d.ts.map +1 -0
- package/dist/commonjs/index.js +57 -0
- package/dist/{src → commonjs}/library-to-tasks.d.ts +2 -2
- package/dist/commonjs/library-to-tasks.d.ts.map +1 -0
- package/dist/commonjs/library-to-tasks.js +76 -0
- package/dist/{src → commonjs}/local-apps.d.ts +2 -2
- package/dist/commonjs/local-apps.d.ts.map +1 -0
- package/dist/commonjs/local-apps.js +334 -0
- package/dist/{src → commonjs}/model-data.d.ts +3 -3
- package/dist/commonjs/model-data.d.ts.map +1 -0
- package/dist/commonjs/model-data.js +2 -0
- package/dist/commonjs/model-libraries-downloads.js +18 -0
- package/dist/{src → commonjs}/model-libraries-snippets.d.ts +1 -1
- package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -0
- package/dist/commonjs/model-libraries-snippets.js +1093 -0
- package/dist/{src → commonjs}/model-libraries.d.ts +3 -3
- package/dist/{src → commonjs}/model-libraries.d.ts.map +1 -1
- package/dist/commonjs/model-libraries.js +793 -0
- package/dist/commonjs/package.json +3 -0
- package/dist/{src → commonjs}/pipelines.d.ts +1 -1
- package/dist/{src → commonjs}/pipelines.d.ts.map +1 -1
- package/dist/commonjs/pipelines.js +645 -0
- package/dist/{src → commonjs}/snippets/common.d.ts +1 -1
- package/dist/commonjs/snippets/common.d.ts.map +1 -0
- package/dist/commonjs/snippets/common.js +23 -0
- package/dist/commonjs/snippets/curl.js +100 -0
- package/dist/commonjs/snippets/curl.spec.js +89 -0
- package/dist/commonjs/snippets/index.d.ts +7 -0
- package/dist/commonjs/snippets/index.d.ts.map +1 -0
- package/dist/commonjs/snippets/index.js +38 -0
- package/dist/commonjs/snippets/inputs.d.ts +4 -0
- package/dist/commonjs/snippets/inputs.d.ts.map +1 -0
- package/dist/commonjs/snippets/inputs.js +127 -0
- package/dist/commonjs/snippets/js.js +278 -0
- package/dist/commonjs/snippets/js.spec.js +141 -0
- package/dist/{src → commonjs}/snippets/python.d.ts +1 -1
- package/dist/{src → commonjs}/snippets/python.d.ts.map +1 -1
- package/dist/commonjs/snippets/python.js +293 -0
- package/dist/commonjs/snippets/python.spec.js +135 -0
- package/dist/{src → commonjs}/snippets/types.d.ts +1 -1
- package/dist/commonjs/snippets/types.d.ts.map +1 -0
- package/dist/commonjs/snippets/types.js +2 -0
- package/dist/commonjs/tasks/audio-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/audio-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/audio-classification/data.js +79 -0
- package/dist/commonjs/tasks/audio-classification/inference.js +2 -0
- package/dist/commonjs/tasks/audio-to-audio/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/audio-to-audio/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/audio-to-audio/data.js +66 -0
- package/dist/commonjs/tasks/automatic-speech-recognition/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/automatic-speech-recognition/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/automatic-speech-recognition/data.js +80 -0
- package/dist/commonjs/tasks/automatic-speech-recognition/inference.js +7 -0
- package/dist/commonjs/tasks/chat-completion/inference.js +7 -0
- package/dist/commonjs/tasks/depth-estimation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/depth-estimation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/depth-estimation/data.js +69 -0
- package/dist/commonjs/tasks/depth-estimation/inference.js +7 -0
- package/dist/commonjs/tasks/document-question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/document-question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/document-question-answering/data.js +80 -0
- package/dist/commonjs/tasks/document-question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/feature-extraction/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/feature-extraction/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/feature-extraction/data.js +55 -0
- package/dist/commonjs/tasks/feature-extraction/inference.js +7 -0
- package/dist/commonjs/tasks/fill-mask/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/fill-mask/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/fill-mask/data.js +75 -0
- package/dist/commonjs/tasks/fill-mask/inference.js +2 -0
- package/dist/commonjs/tasks/image-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-classification/data.js +86 -0
- package/dist/commonjs/tasks/image-classification/inference.js +2 -0
- package/dist/commonjs/tasks/image-feature-extraction/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-feature-extraction/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-feature-extraction/data.js +57 -0
- package/dist/commonjs/tasks/image-segmentation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-segmentation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-segmentation/data.js +95 -0
- package/dist/commonjs/tasks/image-segmentation/inference.js +2 -0
- package/dist/commonjs/tasks/image-text-to-text/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-text-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-text-to-text/data.js +99 -0
- package/dist/commonjs/tasks/image-to-3d/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-to-3d/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-3d/data.js +74 -0
- package/dist/commonjs/tasks/image-to-image/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-to-image/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-image/data.js +95 -0
- package/dist/commonjs/tasks/image-to-image/inference.js +7 -0
- package/dist/commonjs/tasks/image-to-text/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/image-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/image-to-text/data.js +80 -0
- package/dist/commonjs/tasks/image-to-text/inference.js +7 -0
- package/dist/{src → commonjs}/tasks/index.d.ts +29 -29
- package/dist/commonjs/tasks/index.d.ts.map +1 -0
- package/dist/commonjs/tasks/index.js +183 -0
- package/dist/commonjs/tasks/keypoint-detection/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/keypoint-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/keypoint-detection/data.js +49 -0
- package/dist/commonjs/tasks/mask-generation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/mask-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/mask-generation/data.js +52 -0
- package/dist/commonjs/tasks/object-detection/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/object-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/object-detection/data.js +82 -0
- package/dist/commonjs/tasks/object-detection/inference.js +2 -0
- package/dist/commonjs/tasks/placeholder/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/placeholder/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/placeholder/data.js +20 -0
- package/dist/commonjs/tasks/question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/question-answering/data.js +71 -0
- package/dist/commonjs/tasks/question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/reinforcement-learning/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/reinforcement-learning/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/reinforcement-learning/data.js +69 -0
- package/dist/commonjs/tasks/sentence-similarity/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/sentence-similarity/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/sentence-similarity/data.js +95 -0
- package/dist/commonjs/tasks/sentence-similarity/inference.js +7 -0
- package/dist/commonjs/tasks/summarization/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/summarization/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/summarization/data.js +69 -0
- package/dist/commonjs/tasks/summarization/inference.js +7 -0
- package/dist/commonjs/tasks/table-question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/table-question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/table-question-answering/data.js +54 -0
- package/dist/commonjs/tasks/table-question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/tabular-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/tabular-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/tabular-classification/data.js +67 -0
- package/dist/commonjs/tasks/tabular-regression/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/tabular-regression/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/tabular-regression/data.js +55 -0
- package/dist/commonjs/tasks/text-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-classification/data.js +100 -0
- package/dist/commonjs/tasks/text-classification/inference.js +2 -0
- package/dist/commonjs/tasks/text-generation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-generation/data.js +108 -0
- package/dist/commonjs/tasks/text-generation/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-3d/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-3d/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-3d/data.js +55 -0
- package/dist/commonjs/tasks/text-to-audio/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-image/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-image/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-image/data.js +95 -0
- package/dist/commonjs/tasks/text-to-image/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-speech/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-speech/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-speech/data.js +76 -0
- package/dist/commonjs/tasks/text-to-speech/inference.js +7 -0
- package/dist/commonjs/tasks/text-to-video/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/text-to-video/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/text-to-video/data.js +95 -0
- package/dist/commonjs/tasks/text2text-generation/inference.js +7 -0
- package/dist/commonjs/tasks/token-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/token-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/token-classification/data.js +87 -0
- package/dist/commonjs/tasks/token-classification/inference.js +2 -0
- package/dist/commonjs/tasks/translation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/translation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/translation/data.js +65 -0
- package/dist/commonjs/tasks/translation/inference.js +7 -0
- package/dist/commonjs/tasks/unconditional-image-generation/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/unconditional-image-generation/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/unconditional-image-generation/data.js +65 -0
- package/dist/commonjs/tasks/video-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/video-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/video-classification/data.js +82 -0
- package/dist/commonjs/tasks/video-classification/inference.js +2 -0
- package/dist/commonjs/tasks/video-text-to-text/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/video-text-to-text/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/video-text-to-text/data.js +63 -0
- package/dist/commonjs/tasks/visual-question-answering/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/visual-question-answering/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/visual-question-answering/data.js +93 -0
- package/dist/commonjs/tasks/visual-question-answering/inference.js +2 -0
- package/dist/commonjs/tasks/zero-shot-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/zero-shot-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-classification/data.js +66 -0
- package/dist/commonjs/tasks/zero-shot-classification/inference.js +2 -0
- package/dist/commonjs/tasks/zero-shot-image-classification/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-image-classification/data.js +81 -0
- package/dist/commonjs/tasks/zero-shot-image-classification/inference.js +2 -0
- package/dist/commonjs/tasks/zero-shot-object-detection/data.d.ts +4 -0
- package/dist/{src → commonjs}/tasks/zero-shot-object-detection/data.d.ts.map +1 -1
- package/dist/commonjs/tasks/zero-shot-object-detection/data.js +62 -0
- package/dist/commonjs/tasks/zero-shot-object-detection/inference.js +2 -0
- package/dist/commonjs/tokenizer-data.js +13 -0
- package/dist/{src → commonjs}/widget-example.d.ts +1 -1
- package/dist/commonjs/widget-example.d.ts.map +1 -0
- package/dist/commonjs/widget-example.js +5 -0
- package/dist/esm/dataset-libraries.d.ts +87 -0
- package/dist/esm/dataset-libraries.d.ts.map +1 -0
- package/dist/esm/dataset-libraries.js +62 -0
- package/dist/esm/default-widget-inputs.d.ts +6 -0
- package/dist/esm/default-widget-inputs.d.ts.map +1 -0
- package/dist/esm/default-widget-inputs.js +695 -0
- package/dist/esm/gguf.d.ts +35 -0
- package/dist/esm/gguf.d.ts.map +1 -0
- package/dist/esm/gguf.js +39 -0
- package/dist/esm/hardware.d.ts +478 -0
- package/dist/esm/hardware.d.ts.map +1 -0
- package/dist/esm/hardware.js +458 -0
- package/dist/esm/index.d.ts +21 -0
- package/dist/esm/index.d.ts.map +1 -0
- package/dist/esm/index.js +12 -0
- package/dist/esm/library-to-tasks.d.ts +11 -0
- package/dist/esm/library-to-tasks.d.ts.map +1 -0
- package/dist/esm/library-to-tasks.js +73 -0
- package/dist/esm/local-apps.d.ts +195 -0
- package/dist/esm/local-apps.d.ts.map +1 -0
- package/dist/esm/local-apps.js +331 -0
- package/dist/esm/model-data.d.ts +146 -0
- package/dist/esm/model-data.d.ts.map +1 -0
- package/dist/esm/model-data.js +1 -0
- package/dist/esm/model-libraries-downloads.d.ts +18 -0
- package/dist/esm/model-libraries-downloads.d.ts.map +1 -0
- package/dist/esm/model-libraries-downloads.js +17 -0
- package/dist/esm/model-libraries-snippets.d.ts +72 -0
- package/dist/esm/model-libraries-snippets.d.ts.map +1 -0
- package/dist/esm/model-libraries-snippets.js +1019 -0
- package/dist/esm/model-libraries.d.ts +804 -0
- package/dist/esm/model-libraries.d.ts.map +1 -0
- package/dist/esm/model-libraries.js +767 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/pipelines.d.ts +425 -0
- package/dist/esm/pipelines.d.ts.map +1 -0
- package/dist/esm/pipelines.js +642 -0
- package/dist/esm/snippets/common.d.ts +14 -0
- package/dist/esm/snippets/common.d.ts.map +1 -0
- package/dist/esm/snippets/common.js +19 -0
- package/dist/esm/snippets/curl.d.ts +17 -0
- package/dist/esm/snippets/curl.d.ts.map +1 -0
- package/dist/esm/snippets/curl.js +91 -0
- package/dist/esm/snippets/curl.spec.d.ts +2 -0
- package/dist/esm/snippets/curl.spec.d.ts.map +1 -0
- package/dist/esm/snippets/curl.spec.js +87 -0
- package/dist/esm/snippets/index.d.ts +7 -0
- package/dist/esm/snippets/index.d.ts.map +1 -0
- package/dist/esm/snippets/index.js +6 -0
- package/dist/esm/snippets/inputs.d.ts +4 -0
- package/dist/esm/snippets/inputs.d.ts.map +1 -0
- package/dist/esm/snippets/inputs.js +124 -0
- package/dist/esm/snippets/js.d.ts +19 -0
- package/dist/esm/snippets/js.d.ts.map +1 -0
- package/dist/esm/snippets/js.js +267 -0
- package/dist/esm/snippets/js.spec.d.ts +2 -0
- package/dist/esm/snippets/js.spec.d.ts.map +1 -0
- package/dist/esm/snippets/js.spec.js +139 -0
- package/dist/esm/snippets/python.d.ts +22 -0
- package/dist/esm/snippets/python.d.ts.map +1 -0
- package/dist/esm/snippets/python.js +279 -0
- package/dist/esm/snippets/python.spec.d.ts +2 -0
- package/dist/esm/snippets/python.spec.d.ts.map +1 -0
- package/dist/esm/snippets/python.spec.js +133 -0
- package/dist/esm/snippets/types.d.ts +12 -0
- package/dist/esm/snippets/types.d.ts.map +1 -0
- package/dist/esm/snippets/types.js +1 -0
- package/dist/esm/tasks/audio-classification/data.d.ts +4 -0
- package/dist/esm/tasks/audio-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/audio-classification/data.js +77 -0
- package/dist/esm/tasks/audio-classification/inference.d.ts +53 -0
- package/dist/esm/tasks/audio-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/audio-classification/inference.js +1 -0
- package/dist/esm/tasks/audio-to-audio/data.d.ts +4 -0
- package/dist/esm/tasks/audio-to-audio/data.d.ts.map +1 -0
- package/dist/esm/tasks/audio-to-audio/data.js +64 -0
- package/dist/esm/tasks/automatic-speech-recognition/data.d.ts +4 -0
- package/dist/esm/tasks/automatic-speech-recognition/data.d.ts.map +1 -0
- package/dist/esm/tasks/automatic-speech-recognition/data.js +78 -0
- package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts +155 -0
- package/dist/esm/tasks/automatic-speech-recognition/inference.d.ts.map +1 -0
- package/dist/esm/tasks/automatic-speech-recognition/inference.js +6 -0
- package/dist/esm/tasks/chat-completion/inference.d.ts +291 -0
- package/dist/esm/tasks/chat-completion/inference.d.ts.map +1 -0
- package/dist/esm/tasks/chat-completion/inference.js +6 -0
- package/dist/esm/tasks/depth-estimation/data.d.ts +4 -0
- package/dist/esm/tasks/depth-estimation/data.d.ts.map +1 -0
- package/dist/esm/tasks/depth-estimation/data.js +67 -0
- package/dist/esm/tasks/depth-estimation/inference.d.ts +36 -0
- package/dist/esm/tasks/depth-estimation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/depth-estimation/inference.js +6 -0
- package/dist/esm/tasks/document-question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/document-question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/document-question-answering/data.js +78 -0
- package/dist/esm/tasks/document-question-answering/inference.d.ts +111 -0
- package/dist/esm/tasks/document-question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/document-question-answering/inference.js +1 -0
- package/dist/esm/tasks/feature-extraction/data.d.ts +4 -0
- package/dist/esm/tasks/feature-extraction/data.d.ts.map +1 -0
- package/dist/esm/tasks/feature-extraction/data.js +53 -0
- package/dist/esm/tasks/feature-extraction/inference.d.ts +38 -0
- package/dist/esm/tasks/feature-extraction/inference.d.ts.map +1 -0
- package/dist/esm/tasks/feature-extraction/inference.js +6 -0
- package/dist/esm/tasks/fill-mask/data.d.ts +4 -0
- package/dist/esm/tasks/fill-mask/data.d.ts.map +1 -0
- package/dist/esm/tasks/fill-mask/data.js +73 -0
- package/dist/esm/tasks/fill-mask/inference.d.ts +63 -0
- package/dist/esm/tasks/fill-mask/inference.d.ts.map +1 -0
- package/dist/esm/tasks/fill-mask/inference.js +1 -0
- package/dist/esm/tasks/image-classification/data.d.ts +4 -0
- package/dist/esm/tasks/image-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-classification/data.js +84 -0
- package/dist/esm/tasks/image-classification/inference.d.ts +53 -0
- package/dist/esm/tasks/image-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-classification/inference.js +1 -0
- package/dist/esm/tasks/image-feature-extraction/data.d.ts +4 -0
- package/dist/esm/tasks/image-feature-extraction/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-feature-extraction/data.js +55 -0
- package/dist/esm/tasks/image-segmentation/data.d.ts +4 -0
- package/dist/esm/tasks/image-segmentation/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-segmentation/data.js +93 -0
- package/dist/esm/tasks/image-segmentation/inference.d.ts +70 -0
- package/dist/esm/tasks/image-segmentation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-segmentation/inference.js +1 -0
- package/dist/esm/tasks/image-text-to-text/data.d.ts +4 -0
- package/dist/esm/tasks/image-text-to-text/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-text-to-text/data.js +97 -0
- package/dist/esm/tasks/image-to-3d/data.d.ts +4 -0
- package/dist/esm/tasks/image-to-3d/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-3d/data.js +72 -0
- package/dist/esm/tasks/image-to-image/data.d.ts +4 -0
- package/dist/esm/tasks/image-to-image/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-image/data.js +93 -0
- package/dist/esm/tasks/image-to-image/inference.d.ts +65 -0
- package/dist/esm/tasks/image-to-image/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-image/inference.js +6 -0
- package/dist/esm/tasks/image-to-text/data.d.ts +4 -0
- package/dist/esm/tasks/image-to-text/data.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-text/data.js +78 -0
- package/dist/esm/tasks/image-to-text/inference.d.ts +139 -0
- package/dist/esm/tasks/image-to-text/inference.d.ts.map +1 -0
- package/dist/esm/tasks/image-to-text/inference.js +6 -0
- package/dist/esm/tasks/index.d.ts +87 -0
- package/dist/esm/tasks/index.d.ts.map +1 -0
- package/dist/esm/tasks/index.js +177 -0
- package/dist/esm/tasks/keypoint-detection/data.d.ts +4 -0
- package/dist/esm/tasks/keypoint-detection/data.d.ts.map +1 -0
- package/dist/esm/tasks/keypoint-detection/data.js +47 -0
- package/dist/esm/tasks/mask-generation/data.d.ts +4 -0
- package/dist/esm/tasks/mask-generation/data.d.ts.map +1 -0
- package/dist/esm/tasks/mask-generation/data.js +50 -0
- package/dist/esm/tasks/object-detection/data.d.ts +4 -0
- package/dist/esm/tasks/object-detection/data.d.ts.map +1 -0
- package/dist/esm/tasks/object-detection/data.js +80 -0
- package/dist/esm/tasks/object-detection/inference.d.ts +76 -0
- package/dist/esm/tasks/object-detection/inference.d.ts.map +1 -0
- package/dist/esm/tasks/object-detection/inference.js +1 -0
- package/dist/esm/tasks/placeholder/data.d.ts +4 -0
- package/dist/esm/tasks/placeholder/data.d.ts.map +1 -0
- package/dist/esm/tasks/placeholder/data.js +18 -0
- package/dist/esm/tasks/question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/question-answering/data.js +69 -0
- package/dist/esm/tasks/question-answering/inference.d.ts +100 -0
- package/dist/esm/tasks/question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/question-answering/inference.js +1 -0
- package/dist/esm/tasks/reinforcement-learning/data.d.ts +4 -0
- package/dist/esm/tasks/reinforcement-learning/data.d.ts.map +1 -0
- package/dist/esm/tasks/reinforcement-learning/data.js +67 -0
- package/dist/esm/tasks/sentence-similarity/data.d.ts +4 -0
- package/dist/esm/tasks/sentence-similarity/data.d.ts.map +1 -0
- package/dist/esm/tasks/sentence-similarity/data.js +93 -0
- package/dist/esm/tasks/sentence-similarity/inference.d.ts +32 -0
- package/dist/esm/tasks/sentence-similarity/inference.d.ts.map +1 -0
- package/dist/esm/tasks/sentence-similarity/inference.js +6 -0
- package/dist/esm/tasks/summarization/data.d.ts +4 -0
- package/dist/esm/tasks/summarization/data.d.ts.map +1 -0
- package/dist/esm/tasks/summarization/data.js +67 -0
- package/dist/esm/tasks/summarization/inference.d.ts +56 -0
- package/dist/esm/tasks/summarization/inference.d.ts.map +1 -0
- package/dist/esm/tasks/summarization/inference.js +6 -0
- package/dist/esm/tasks/table-question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/table-question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/table-question-answering/data.js +52 -0
- package/dist/esm/tasks/table-question-answering/inference.d.ts +62 -0
- package/dist/esm/tasks/table-question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/table-question-answering/inference.js +1 -0
- package/dist/esm/tasks/tabular-classification/data.d.ts +4 -0
- package/dist/esm/tasks/tabular-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/tabular-classification/data.js +65 -0
- package/dist/esm/tasks/tabular-regression/data.d.ts +4 -0
- package/dist/esm/tasks/tabular-regression/data.d.ts.map +1 -0
- package/dist/esm/tasks/tabular-regression/data.js +53 -0
- package/dist/esm/tasks/text-classification/data.d.ts +4 -0
- package/dist/esm/tasks/text-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-classification/data.js +98 -0
- package/dist/esm/tasks/text-classification/inference.d.ts +52 -0
- package/dist/esm/tasks/text-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-classification/inference.js +1 -0
- package/dist/esm/tasks/text-generation/data.d.ts +4 -0
- package/dist/esm/tasks/text-generation/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-generation/data.js +106 -0
- package/dist/esm/tasks/text-generation/inference.d.ts +188 -0
- package/dist/esm/tasks/text-generation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-generation/inference.js +6 -0
- package/dist/esm/tasks/text-to-3d/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-3d/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-3d/data.js +53 -0
- package/dist/esm/tasks/text-to-audio/inference.d.ts +139 -0
- package/dist/esm/tasks/text-to-audio/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-audio/inference.js +6 -0
- package/dist/esm/tasks/text-to-image/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-image/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-image/data.js +93 -0
- package/dist/esm/tasks/text-to-image/inference.d.ts +72 -0
- package/dist/esm/tasks/text-to-image/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-image/inference.js +6 -0
- package/dist/esm/tasks/text-to-speech/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-speech/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-speech/data.js +74 -0
- package/dist/esm/tasks/text-to-speech/inference.d.ts +141 -0
- package/dist/esm/tasks/text-to-speech/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-speech/inference.js +6 -0
- package/dist/esm/tasks/text-to-video/data.d.ts +4 -0
- package/dist/esm/tasks/text-to-video/data.d.ts.map +1 -0
- package/dist/esm/tasks/text-to-video/data.js +93 -0
- package/dist/esm/tasks/text2text-generation/inference.d.ts +54 -0
- package/dist/esm/tasks/text2text-generation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/text2text-generation/inference.js +6 -0
- package/dist/esm/tasks/token-classification/data.d.ts +4 -0
- package/dist/esm/tasks/token-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/token-classification/data.js +85 -0
- package/dist/esm/tasks/token-classification/inference.d.ts +86 -0
- package/dist/esm/tasks/token-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/token-classification/inference.js +1 -0
- package/dist/esm/tasks/translation/data.d.ts +4 -0
- package/dist/esm/tasks/translation/data.d.ts.map +1 -0
- package/dist/esm/tasks/translation/data.js +63 -0
- package/dist/esm/tasks/translation/inference.d.ts +66 -0
- package/dist/esm/tasks/translation/inference.d.ts.map +1 -0
- package/dist/esm/tasks/translation/inference.js +6 -0
- package/dist/esm/tasks/unconditional-image-generation/data.d.ts +4 -0
- package/dist/esm/tasks/unconditional-image-generation/data.d.ts.map +1 -0
- package/dist/esm/tasks/unconditional-image-generation/data.js +63 -0
- package/dist/esm/tasks/video-classification/data.d.ts +4 -0
- package/dist/esm/tasks/video-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/video-classification/data.js +80 -0
- package/dist/esm/tasks/video-classification/inference.d.ts +60 -0
- package/dist/esm/tasks/video-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/video-classification/inference.js +1 -0
- package/dist/esm/tasks/video-text-to-text/data.d.ts +4 -0
- package/dist/esm/tasks/video-text-to-text/data.d.ts.map +1 -0
- package/dist/esm/tasks/video-text-to-text/data.js +61 -0
- package/dist/esm/tasks/visual-question-answering/data.d.ts +4 -0
- package/dist/esm/tasks/visual-question-answering/data.d.ts.map +1 -0
- package/dist/esm/tasks/visual-question-answering/data.js +91 -0
- package/dist/esm/tasks/visual-question-answering/inference.d.ts +63 -0
- package/dist/esm/tasks/visual-question-answering/inference.d.ts.map +1 -0
- package/dist/esm/tasks/visual-question-answering/inference.js +1 -0
- package/dist/esm/tasks/zero-shot-classification/data.d.ts +4 -0
- package/dist/esm/tasks/zero-shot-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-classification/data.js +64 -0
- package/dist/esm/tasks/zero-shot-classification/inference.d.ts +68 -0
- package/dist/esm/tasks/zero-shot-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-classification/inference.js +1 -0
- package/dist/esm/tasks/zero-shot-image-classification/data.d.ts +4 -0
- package/dist/esm/tasks/zero-shot-image-classification/data.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-image-classification/data.js +79 -0
- package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts +62 -0
- package/dist/esm/tasks/zero-shot-image-classification/inference.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-image-classification/inference.js +1 -0
- package/dist/esm/tasks/zero-shot-object-detection/data.d.ts +4 -0
- package/dist/esm/tasks/zero-shot-object-detection/data.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-object-detection/data.js +60 -0
- package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts +67 -0
- package/dist/esm/tasks/zero-shot-object-detection/inference.d.ts.map +1 -0
- package/dist/esm/tasks/zero-shot-object-detection/inference.js +1 -0
- package/dist/esm/tokenizer-data.d.ts +26 -0
- package/dist/esm/tokenizer-data.d.ts.map +1 -0
- package/dist/esm/tokenizer-data.js +10 -0
- package/dist/esm/widget-example.d.ts +83 -0
- package/dist/esm/widget-example.d.ts.map +1 -0
- package/dist/esm/widget-example.js +4 -0
- package/package.json +24 -20
- package/src/default-widget-inputs.ts +2 -2
- package/src/index.ts +23 -19
- package/src/library-to-tasks.ts +2 -2
- package/src/local-apps.ts +3 -3
- package/src/model-data.ts +3 -3
- package/src/model-libraries-snippets.ts +3 -3
- package/src/model-libraries.ts +3 -3
- package/src/snippets/common.ts +1 -1
- package/src/snippets/curl.spec.ts +2 -2
- package/src/snippets/index.ts +5 -4
- package/src/snippets/inputs.ts +3 -3
- package/src/snippets/js.spec.ts +10 -10
- package/src/snippets/js.ts +8 -8
- package/src/snippets/python.spec.ts +39 -2
- package/src/snippets/python.ts +22 -5
- package/src/snippets/types.ts +1 -1
- package/src/tasks/audio-classification/data.ts +1 -1
- package/src/tasks/audio-to-audio/data.ts +1 -1
- package/src/tasks/automatic-speech-recognition/data.ts +1 -1
- package/src/tasks/depth-estimation/data.ts +1 -1
- package/src/tasks/document-question-answering/data.ts +1 -1
- package/src/tasks/feature-extraction/data.ts +1 -1
- package/src/tasks/fill-mask/data.ts +1 -1
- package/src/tasks/image-classification/data.ts +1 -1
- package/src/tasks/image-feature-extraction/data.ts +1 -1
- package/src/tasks/image-segmentation/data.ts +1 -1
- package/src/tasks/image-text-to-text/data.ts +1 -1
- package/src/tasks/image-to-3d/data.ts +1 -1
- package/src/tasks/image-to-image/data.ts +1 -1
- package/src/tasks/image-to-text/data.ts +1 -1
- package/src/tasks/index.ts +70 -70
- package/src/tasks/keypoint-detection/data.ts +1 -1
- package/src/tasks/mask-generation/data.ts +1 -1
- package/src/tasks/object-detection/data.ts +1 -1
- package/src/tasks/placeholder/data.ts +1 -1
- package/src/tasks/question-answering/data.ts +1 -1
- package/src/tasks/reinforcement-learning/data.ts +1 -1
- package/src/tasks/sentence-similarity/data.ts +1 -1
- package/src/tasks/summarization/data.ts +1 -1
- package/src/tasks/table-question-answering/data.ts +1 -1
- package/src/tasks/tabular-classification/data.ts +1 -1
- package/src/tasks/tabular-regression/data.ts +1 -1
- package/src/tasks/text-classification/data.ts +1 -1
- package/src/tasks/text-generation/data.ts +1 -1
- package/src/tasks/text-to-3d/data.ts +1 -1
- package/src/tasks/text-to-image/data.ts +1 -1
- package/src/tasks/text-to-speech/data.ts +1 -1
- package/src/tasks/text-to-video/data.ts +1 -1
- package/src/tasks/token-classification/data.ts +1 -1
- package/src/tasks/translation/data.ts +1 -1
- package/src/tasks/unconditional-image-generation/data.ts +1 -1
- package/src/tasks/video-classification/data.ts +1 -1
- package/src/tasks/video-text-to-text/data.ts +1 -1
- package/src/tasks/visual-question-answering/data.ts +1 -1
- package/src/tasks/zero-shot-classification/data.ts +1 -1
- package/src/tasks/zero-shot-image-classification/data.ts +1 -1
- package/src/tasks/zero-shot-object-detection/data.ts +1 -1
- package/src/widget-example.ts +1 -1
- package/tsconfig.json +3 -3
- package/dist/index.cjs +0 -7976
- package/dist/index.js +0 -7933
- package/dist/scripts/inference-codegen.d.ts +0 -2
- package/dist/scripts/inference-codegen.d.ts.map +0 -1
- package/dist/scripts/inference-tei-import.d.ts +0 -2
- package/dist/scripts/inference-tei-import.d.ts.map +0 -1
- package/dist/scripts/inference-tgi-import.d.ts +0 -2
- package/dist/scripts/inference-tgi-import.d.ts.map +0 -1
- package/dist/src/default-widget-inputs.d.ts.map +0 -1
- package/dist/src/index.d.ts.map +0 -1
- package/dist/src/library-to-tasks.d.ts.map +0 -1
- package/dist/src/local-apps.d.ts.map +0 -1
- package/dist/src/model-data.d.ts.map +0 -1
- package/dist/src/model-libraries-snippets.d.ts.map +0 -1
- package/dist/src/snippets/common.d.ts.map +0 -1
- package/dist/src/snippets/index.d.ts +0 -6
- package/dist/src/snippets/index.d.ts.map +0 -1
- package/dist/src/snippets/inputs.d.ts +0 -4
- package/dist/src/snippets/inputs.d.ts.map +0 -1
- package/dist/src/snippets/types.d.ts.map +0 -1
- package/dist/src/tasks/audio-classification/data.d.ts +0 -4
- package/dist/src/tasks/audio-to-audio/data.d.ts +0 -4
- package/dist/src/tasks/automatic-speech-recognition/data.d.ts +0 -4
- package/dist/src/tasks/depth-estimation/data.d.ts +0 -4
- package/dist/src/tasks/document-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/feature-extraction/data.d.ts +0 -4
- package/dist/src/tasks/fill-mask/data.d.ts +0 -4
- package/dist/src/tasks/image-classification/data.d.ts +0 -4
- package/dist/src/tasks/image-feature-extraction/data.d.ts +0 -4
- package/dist/src/tasks/image-segmentation/data.d.ts +0 -4
- package/dist/src/tasks/image-text-to-text/data.d.ts +0 -4
- package/dist/src/tasks/image-to-3d/data.d.ts +0 -4
- package/dist/src/tasks/image-to-image/data.d.ts +0 -4
- package/dist/src/tasks/image-to-text/data.d.ts +0 -4
- package/dist/src/tasks/index.d.ts.map +0 -1
- package/dist/src/tasks/keypoint-detection/data.d.ts +0 -4
- package/dist/src/tasks/mask-generation/data.d.ts +0 -4
- package/dist/src/tasks/object-detection/data.d.ts +0 -4
- package/dist/src/tasks/placeholder/data.d.ts +0 -4
- package/dist/src/tasks/question-answering/data.d.ts +0 -4
- package/dist/src/tasks/reinforcement-learning/data.d.ts +0 -4
- package/dist/src/tasks/sentence-similarity/data.d.ts +0 -4
- package/dist/src/tasks/summarization/data.d.ts +0 -4
- package/dist/src/tasks/table-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/tabular-classification/data.d.ts +0 -4
- package/dist/src/tasks/tabular-regression/data.d.ts +0 -4
- package/dist/src/tasks/text-classification/data.d.ts +0 -4
- package/dist/src/tasks/text-generation/data.d.ts +0 -4
- package/dist/src/tasks/text-to-3d/data.d.ts +0 -4
- package/dist/src/tasks/text-to-image/data.d.ts +0 -4
- package/dist/src/tasks/text-to-speech/data.d.ts +0 -4
- package/dist/src/tasks/text-to-video/data.d.ts +0 -4
- package/dist/src/tasks/token-classification/data.d.ts +0 -4
- package/dist/src/tasks/translation/data.d.ts +0 -4
- package/dist/src/tasks/unconditional-image-generation/data.d.ts +0 -4
- package/dist/src/tasks/video-classification/data.d.ts +0 -4
- package/dist/src/tasks/video-text-to-text/data.d.ts +0 -4
- package/dist/src/tasks/visual-question-answering/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-classification/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-image-classification/data.d.ts +0 -4
- package/dist/src/tasks/zero-shot-object-detection/data.d.ts +0 -4
- package/dist/src/widget-example.d.ts.map +0 -1
- /package/dist/{src → commonjs}/dataset-libraries.d.ts +0 -0
- /package/dist/{src → commonjs}/dataset-libraries.d.ts.map +0 -0
- /package/dist/{src → commonjs}/gguf.d.ts +0 -0
- /package/dist/{src → commonjs}/gguf.d.ts.map +0 -0
- /package/dist/{src → commonjs}/hardware.d.ts +0 -0
- /package/dist/{src → commonjs}/hardware.d.ts.map +0 -0
- /package/dist/{src → commonjs}/model-libraries-downloads.d.ts +0 -0
- /package/dist/{src → commonjs}/model-libraries-downloads.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/curl.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/curl.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/curl.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/curl.spec.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/js.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/js.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/js.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/js.spec.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/python.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/python.spec.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/audio-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/audio-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/automatic-speech-recognition/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/automatic-speech-recognition/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/chat-completion/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/chat-completion/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/depth-estimation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/depth-estimation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/document-question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/document-question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/feature-extraction/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/feature-extraction/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/fill-mask/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/fill-mask/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-segmentation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-segmentation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-image/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-image/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-text/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/image-to-text/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/object-detection/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/object-detection/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/sentence-similarity/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/sentence-similarity/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/summarization/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/summarization/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/table-question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/table-question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-generation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-generation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-audio/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-audio/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-image/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-image/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-speech/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text-to-speech/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/text2text-generation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/text2text-generation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/token-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/token-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/translation/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/translation/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/video-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/video-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/visual-question-answering/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/visual-question-answering/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-image-classification/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-image-classification/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-object-detection/inference.d.ts +0 -0
- /package/dist/{src → commonjs}/tasks/zero-shot-object-detection/inference.d.ts.map +0 -0
- /package/dist/{src → commonjs}/tokenizer-data.d.ts +0 -0
- /package/dist/{src → commonjs}/tokenizer-data.d.ts.map +0 -0
|
@@ -0,0 +1,1093 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.transformersJS = exports.transformers = exports.speechbrain = exports.stanza = exports.span_marker = exports.spacy = exports.setfit = exports.sentenceTransformers = exports.sampleFactory = exports.sam2 = exports.fastai = exports.stable_audio_tools = exports.sklearn = exports.seed_story = exports.saelens = exports.timm = exports.tensorflowtts = exports.relik = exports.pyannote_audio = exports.pyannote_audio_pipeline = exports.paddlenlp = exports.open_clip = exports.mesh_anything = exports.mars5_tts = exports.mamba_ssm = exports.tf_keras = exports.llama_cpp_python = exports.keras_hub = exports.keras_nlp = exports.keras = exports.htrflow = exports.gliner = exports.flair = exports.fairseq = exports.espnet = exports.espnetASR = exports.espnetTTS = exports.edsnlp = exports.cartesia_mlx = exports.cartesia_pytorch = exports.diffusionkit = exports.diffusers = exports.depth_pro = exports.depth_anything_v2 = exports.bm25s = exports.bertopic = exports.audioseal = exports.asteroid = exports.allennlp = exports.adapters = void 0;
|
|
4
|
+
exports.hezar = exports.threedtopia_xl = exports.whisperkit = exports.audiocraft = exports.pythae = exports.pxia = exports.nemo = exports.model2vec = exports.mlxim = exports.mlx = exports.birefnet = exports.yolov10 = exports.chattts = exports.voicecraft = exports.vfimamba = exports.sentis = exports.mlAgents = exports.stableBaselines3 = exports.fasttext = exports.peft = void 0;
|
|
5
|
+
const library_to_tasks_js_1 = require("./library-to-tasks.js");
|
|
6
|
+
const TAG_CUSTOM_CODE = "custom_code";
|
|
7
|
+
function nameWithoutNamespace(modelId) {
|
|
8
|
+
const splitted = modelId.split("/");
|
|
9
|
+
return splitted.length === 1 ? splitted[0] : splitted[1];
|
|
10
|
+
}
|
|
11
|
+
const escapeStringForJson = (str) => JSON.stringify(str).slice(1, -1); // slice is needed to remove surrounding quotes added by JSON.stringify
|
|
12
|
+
//#region snippets
|
|
13
|
+
const adapters = (model) => [
|
|
14
|
+
`from adapters import AutoAdapterModel
|
|
15
|
+
|
|
16
|
+
model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}")
|
|
17
|
+
model.load_adapter("${model.id}", set_active=True)`,
|
|
18
|
+
];
|
|
19
|
+
exports.adapters = adapters;
|
|
20
|
+
const allennlpUnknown = (model) => [
|
|
21
|
+
`import allennlp_models
|
|
22
|
+
from allennlp.predictors.predictor import Predictor
|
|
23
|
+
|
|
24
|
+
predictor = Predictor.from_path("hf://${model.id}")`,
|
|
25
|
+
];
|
|
26
|
+
const allennlpQuestionAnswering = (model) => [
|
|
27
|
+
`import allennlp_models
|
|
28
|
+
from allennlp.predictors.predictor import Predictor
|
|
29
|
+
|
|
30
|
+
predictor = Predictor.from_path("hf://${model.id}")
|
|
31
|
+
predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"}
|
|
32
|
+
predictions = predictor.predict_json(predictor_input)`,
|
|
33
|
+
];
|
|
34
|
+
const allennlp = (model) => {
|
|
35
|
+
if (model.tags.includes("question-answering")) {
|
|
36
|
+
return allennlpQuestionAnswering(model);
|
|
37
|
+
}
|
|
38
|
+
return allennlpUnknown(model);
|
|
39
|
+
};
|
|
40
|
+
exports.allennlp = allennlp;
|
|
41
|
+
const asteroid = (model) => [
|
|
42
|
+
`from asteroid.models import BaseModel
|
|
43
|
+
|
|
44
|
+
model = BaseModel.from_pretrained("${model.id}")`,
|
|
45
|
+
];
|
|
46
|
+
exports.asteroid = asteroid;
|
|
47
|
+
const audioseal = (model) => {
|
|
48
|
+
const watermarkSnippet = `# Watermark Generator
|
|
49
|
+
from audioseal import AudioSeal
|
|
50
|
+
|
|
51
|
+
model = AudioSeal.load_generator("${model.id}")
|
|
52
|
+
# pass a tensor (tensor_wav) of shape (batch, channels, samples) and a sample rate
|
|
53
|
+
wav, sr = tensor_wav, 16000
|
|
54
|
+
|
|
55
|
+
watermark = model.get_watermark(wav, sr)
|
|
56
|
+
watermarked_audio = wav + watermark`;
|
|
57
|
+
const detectorSnippet = `# Watermark Detector
|
|
58
|
+
from audioseal import AudioSeal
|
|
59
|
+
|
|
60
|
+
detector = AudioSeal.load_detector("${model.id}")
|
|
61
|
+
|
|
62
|
+
result, message = detector.detect_watermark(watermarked_audio, sr)`;
|
|
63
|
+
return [watermarkSnippet, detectorSnippet];
|
|
64
|
+
};
|
|
65
|
+
exports.audioseal = audioseal;
|
|
66
|
+
function get_base_diffusers_model(model) {
|
|
67
|
+
return model.cardData?.base_model?.toString() ?? "fill-in-base-model";
|
|
68
|
+
}
|
|
69
|
+
function get_prompt_from_diffusers_model(model) {
|
|
70
|
+
const prompt = model.widgetData?.[0]?.text ?? model.cardData?.instance_prompt;
|
|
71
|
+
if (prompt) {
|
|
72
|
+
return escapeStringForJson(prompt);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
const bertopic = (model) => [
|
|
76
|
+
`from bertopic import BERTopic
|
|
77
|
+
|
|
78
|
+
model = BERTopic.load("${model.id}")`,
|
|
79
|
+
];
|
|
80
|
+
exports.bertopic = bertopic;
|
|
81
|
+
const bm25s = (model) => [
|
|
82
|
+
`from bm25s.hf import BM25HF
|
|
83
|
+
|
|
84
|
+
retriever = BM25HF.load_from_hub("${model.id}")`,
|
|
85
|
+
];
|
|
86
|
+
exports.bm25s = bm25s;
|
|
87
|
+
const depth_anything_v2 = (model) => {
|
|
88
|
+
let encoder;
|
|
89
|
+
let features;
|
|
90
|
+
let out_channels;
|
|
91
|
+
encoder = "<ENCODER>";
|
|
92
|
+
features = "<NUMBER_OF_FEATURES>";
|
|
93
|
+
out_channels = "<OUT_CHANNELS>";
|
|
94
|
+
if (model.id === "depth-anything/Depth-Anything-V2-Small") {
|
|
95
|
+
encoder = "vits";
|
|
96
|
+
features = "64";
|
|
97
|
+
out_channels = "[48, 96, 192, 384]";
|
|
98
|
+
}
|
|
99
|
+
else if (model.id === "depth-anything/Depth-Anything-V2-Base") {
|
|
100
|
+
encoder = "vitb";
|
|
101
|
+
features = "128";
|
|
102
|
+
out_channels = "[96, 192, 384, 768]";
|
|
103
|
+
}
|
|
104
|
+
else if (model.id === "depth-anything/Depth-Anything-V2-Large") {
|
|
105
|
+
encoder = "vitl";
|
|
106
|
+
features = "256";
|
|
107
|
+
out_channels = "[256, 512, 1024, 1024";
|
|
108
|
+
}
|
|
109
|
+
return [
|
|
110
|
+
`
|
|
111
|
+
# Install from https://github.com/DepthAnything/Depth-Anything-V2
|
|
112
|
+
|
|
113
|
+
# Load the model and infer depth from an image
|
|
114
|
+
import cv2
|
|
115
|
+
import torch
|
|
116
|
+
|
|
117
|
+
from depth_anything_v2.dpt import DepthAnythingV2
|
|
118
|
+
|
|
119
|
+
# instantiate the model
|
|
120
|
+
model = DepthAnythingV2(encoder="${encoder}", features=${features}, out_channels=${out_channels})
|
|
121
|
+
|
|
122
|
+
# load the weights
|
|
123
|
+
filepath = hf_hub_download(repo_id="${model.id}", filename="depth_anything_v2_${encoder}.pth", repo_type="model")
|
|
124
|
+
state_dict = torch.load(filepath, map_location="cpu")
|
|
125
|
+
model.load_state_dict(state_dict).eval()
|
|
126
|
+
|
|
127
|
+
raw_img = cv2.imread("your/image/path")
|
|
128
|
+
depth = model.infer_image(raw_img) # HxW raw depth map in numpy
|
|
129
|
+
`,
|
|
130
|
+
];
|
|
131
|
+
};
|
|
132
|
+
exports.depth_anything_v2 = depth_anything_v2;
|
|
133
|
+
const depth_pro = (model) => {
|
|
134
|
+
const installSnippet = `# Download checkpoint
|
|
135
|
+
pip install huggingface-hub
|
|
136
|
+
huggingface-cli download --local-dir checkpoints ${model.id}`;
|
|
137
|
+
const inferenceSnippet = `import depth_pro
|
|
138
|
+
|
|
139
|
+
# Load model and preprocessing transform
|
|
140
|
+
model, transform = depth_pro.create_model_and_transforms()
|
|
141
|
+
model.eval()
|
|
142
|
+
|
|
143
|
+
# Load and preprocess an image.
|
|
144
|
+
image, _, f_px = depth_pro.load_rgb("example.png")
|
|
145
|
+
image = transform(image)
|
|
146
|
+
|
|
147
|
+
# Run inference.
|
|
148
|
+
prediction = model.infer(image, f_px=f_px)
|
|
149
|
+
|
|
150
|
+
# Results: 1. Depth in meters
|
|
151
|
+
depth = prediction["depth"]
|
|
152
|
+
# Results: 2. Focal length in pixels
|
|
153
|
+
focallength_px = prediction["focallength_px"]`;
|
|
154
|
+
return [installSnippet, inferenceSnippet];
|
|
155
|
+
};
|
|
156
|
+
exports.depth_pro = depth_pro;
|
|
157
|
+
const diffusersDefaultPrompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k";
|
|
158
|
+
const diffusers_default = (model) => [
|
|
159
|
+
`from diffusers import DiffusionPipeline
|
|
160
|
+
|
|
161
|
+
pipe = DiffusionPipeline.from_pretrained("${model.id}")
|
|
162
|
+
|
|
163
|
+
prompt = "${get_prompt_from_diffusers_model(model) ?? diffusersDefaultPrompt}"
|
|
164
|
+
image = pipe(prompt).images[0]`,
|
|
165
|
+
];
|
|
166
|
+
const diffusers_controlnet = (model) => [
|
|
167
|
+
`from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
|
|
168
|
+
|
|
169
|
+
controlnet = ControlNetModel.from_pretrained("${model.id}")
|
|
170
|
+
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
|
171
|
+
"${get_base_diffusers_model(model)}", controlnet=controlnet
|
|
172
|
+
)`,
|
|
173
|
+
];
|
|
174
|
+
const diffusers_lora = (model) => [
|
|
175
|
+
`from diffusers import DiffusionPipeline
|
|
176
|
+
|
|
177
|
+
pipe = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
|
|
178
|
+
pipe.load_lora_weights("${model.id}")
|
|
179
|
+
|
|
180
|
+
prompt = "${get_prompt_from_diffusers_model(model) ?? diffusersDefaultPrompt}"
|
|
181
|
+
image = pipe(prompt).images[0]`,
|
|
182
|
+
];
|
|
183
|
+
const diffusers_textual_inversion = (model) => [
|
|
184
|
+
`from diffusers import DiffusionPipeline
|
|
185
|
+
|
|
186
|
+
pipe = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}")
|
|
187
|
+
pipe.load_textual_inversion("${model.id}")`,
|
|
188
|
+
];
|
|
189
|
+
const diffusers = (model) => {
|
|
190
|
+
if (model.tags.includes("controlnet")) {
|
|
191
|
+
return diffusers_controlnet(model);
|
|
192
|
+
}
|
|
193
|
+
else if (model.tags.includes("lora")) {
|
|
194
|
+
return diffusers_lora(model);
|
|
195
|
+
}
|
|
196
|
+
else if (model.tags.includes("textual_inversion")) {
|
|
197
|
+
return diffusers_textual_inversion(model);
|
|
198
|
+
}
|
|
199
|
+
else {
|
|
200
|
+
return diffusers_default(model);
|
|
201
|
+
}
|
|
202
|
+
};
|
|
203
|
+
exports.diffusers = diffusers;
|
|
204
|
+
const diffusionkit = (model) => {
|
|
205
|
+
const sd3Snippet = `# Pipeline for Stable Diffusion 3
|
|
206
|
+
from diffusionkit.mlx import DiffusionPipeline
|
|
207
|
+
|
|
208
|
+
pipeline = DiffusionPipeline(
|
|
209
|
+
shift=3.0,
|
|
210
|
+
use_t5=False,
|
|
211
|
+
model_version=${model.id},
|
|
212
|
+
low_memory_mode=True,
|
|
213
|
+
a16=True,
|
|
214
|
+
w16=True,
|
|
215
|
+
)`;
|
|
216
|
+
const fluxSnippet = `# Pipeline for Flux
|
|
217
|
+
from diffusionkit.mlx import FluxPipeline
|
|
218
|
+
|
|
219
|
+
pipeline = FluxPipeline(
|
|
220
|
+
shift=1.0,
|
|
221
|
+
model_version=${model.id},
|
|
222
|
+
low_memory_mode=True,
|
|
223
|
+
a16=True,
|
|
224
|
+
w16=True,
|
|
225
|
+
)`;
|
|
226
|
+
const generateSnippet = `# Image Generation
|
|
227
|
+
HEIGHT = 512
|
|
228
|
+
WIDTH = 512
|
|
229
|
+
NUM_STEPS = ${model.tags.includes("flux") ? 4 : 50}
|
|
230
|
+
CFG_WEIGHT = ${model.tags.includes("flux") ? 0 : 5}
|
|
231
|
+
|
|
232
|
+
image, _ = pipeline.generate_image(
|
|
233
|
+
"a photo of a cat",
|
|
234
|
+
cfg_weight=CFG_WEIGHT,
|
|
235
|
+
num_steps=NUM_STEPS,
|
|
236
|
+
latent_size=(HEIGHT // 8, WIDTH // 8),
|
|
237
|
+
)`;
|
|
238
|
+
const pipelineSnippet = model.tags.includes("flux") ? fluxSnippet : sd3Snippet;
|
|
239
|
+
return [pipelineSnippet, generateSnippet];
|
|
240
|
+
};
|
|
241
|
+
exports.diffusionkit = diffusionkit;
|
|
242
|
+
const cartesia_pytorch = (model) => [
|
|
243
|
+
`# pip install --no-binary :all: cartesia-pytorch
|
|
244
|
+
from cartesia_pytorch import ReneLMHeadModel
|
|
245
|
+
from transformers import AutoTokenizer
|
|
246
|
+
|
|
247
|
+
model = ReneLMHeadModel.from_pretrained("${model.id}")
|
|
248
|
+
tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf")
|
|
249
|
+
|
|
250
|
+
in_message = ["Rene Descartes was"]
|
|
251
|
+
inputs = tokenizer(in_message, return_tensors="pt")
|
|
252
|
+
|
|
253
|
+
outputs = model.generate(inputs.input_ids, max_length=50, top_k=100, top_p=0.99)
|
|
254
|
+
out_message = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
|
255
|
+
|
|
256
|
+
print(out_message)
|
|
257
|
+
)`,
|
|
258
|
+
];
|
|
259
|
+
exports.cartesia_pytorch = cartesia_pytorch;
|
|
260
|
+
const cartesia_mlx = (model) => [
|
|
261
|
+
`import mlx.core as mx
|
|
262
|
+
import cartesia_mlx as cmx
|
|
263
|
+
|
|
264
|
+
model = cmx.from_pretrained("${model.id}")
|
|
265
|
+
model.set_dtype(mx.float32)
|
|
266
|
+
|
|
267
|
+
prompt = "Rene Descartes was"
|
|
268
|
+
|
|
269
|
+
for text in model.generate(
|
|
270
|
+
prompt,
|
|
271
|
+
max_tokens=500,
|
|
272
|
+
eval_every_n=5,
|
|
273
|
+
verbose=True,
|
|
274
|
+
top_p=0.99,
|
|
275
|
+
temperature=0.85,
|
|
276
|
+
):
|
|
277
|
+
print(text, end="", flush=True)
|
|
278
|
+
`,
|
|
279
|
+
];
|
|
280
|
+
exports.cartesia_mlx = cartesia_mlx;
|
|
281
|
+
const edsnlp = (model) => {
|
|
282
|
+
const packageName = nameWithoutNamespace(model.id).replaceAll("-", "_");
|
|
283
|
+
return [
|
|
284
|
+
`# Load it from the Hub directly
|
|
285
|
+
import edsnlp
|
|
286
|
+
nlp = edsnlp.load("${model.id}")
|
|
287
|
+
`,
|
|
288
|
+
`# Or install it as a package
|
|
289
|
+
!pip install git+https://huggingface.co/${model.id}
|
|
290
|
+
|
|
291
|
+
# and import it as a module
|
|
292
|
+
import ${packageName}
|
|
293
|
+
|
|
294
|
+
nlp = ${packageName}.load() # or edsnlp.load("${packageName}")
|
|
295
|
+
`,
|
|
296
|
+
];
|
|
297
|
+
};
|
|
298
|
+
exports.edsnlp = edsnlp;
|
|
299
|
+
const espnetTTS = (model) => [
|
|
300
|
+
`from espnet2.bin.tts_inference import Text2Speech
|
|
301
|
+
|
|
302
|
+
model = Text2Speech.from_pretrained("${model.id}")
|
|
303
|
+
|
|
304
|
+
speech, *_ = model("text to generate speech from")`,
|
|
305
|
+
];
|
|
306
|
+
exports.espnetTTS = espnetTTS;
|
|
307
|
+
const espnetASR = (model) => [
|
|
308
|
+
`from espnet2.bin.asr_inference import Speech2Text
|
|
309
|
+
|
|
310
|
+
model = Speech2Text.from_pretrained(
|
|
311
|
+
"${model.id}"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
speech, rate = soundfile.read("speech.wav")
|
|
315
|
+
text, *_ = model(speech)[0]`,
|
|
316
|
+
];
|
|
317
|
+
exports.espnetASR = espnetASR;
|
|
318
|
+
const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
|
|
319
|
+
const espnet = (model) => {
|
|
320
|
+
if (model.tags.includes("text-to-speech")) {
|
|
321
|
+
return (0, exports.espnetTTS)(model);
|
|
322
|
+
}
|
|
323
|
+
else if (model.tags.includes("automatic-speech-recognition")) {
|
|
324
|
+
return (0, exports.espnetASR)(model);
|
|
325
|
+
}
|
|
326
|
+
return espnetUnknown();
|
|
327
|
+
};
|
|
328
|
+
exports.espnet = espnet;
|
|
329
|
+
const fairseq = (model) => [
|
|
330
|
+
`from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
|
331
|
+
|
|
332
|
+
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
|
333
|
+
"${model.id}"
|
|
334
|
+
)`,
|
|
335
|
+
];
|
|
336
|
+
exports.fairseq = fairseq;
|
|
337
|
+
const flair = (model) => [
|
|
338
|
+
`from flair.models import SequenceTagger
|
|
339
|
+
|
|
340
|
+
tagger = SequenceTagger.load("${model.id}")`,
|
|
341
|
+
];
|
|
342
|
+
exports.flair = flair;
|
|
343
|
+
const gliner = (model) => [
|
|
344
|
+
`from gliner import GLiNER
|
|
345
|
+
|
|
346
|
+
model = GLiNER.from_pretrained("${model.id}")`,
|
|
347
|
+
];
|
|
348
|
+
exports.gliner = gliner;
|
|
349
|
+
const htrflow = (model) => [
|
|
350
|
+
`# CLI usage
|
|
351
|
+
# see docs: https://ai-riksarkivet.github.io/htrflow/latest/getting_started/quick_start.html
|
|
352
|
+
htrflow pipeline <path/to/pipeline.yaml> <path/to/image>`,
|
|
353
|
+
`# Python usage
|
|
354
|
+
from htrflow.pipeline.pipeline import Pipeline
|
|
355
|
+
from htrflow.pipeline.steps import Task
|
|
356
|
+
from htrflow.models.framework.model import ModelClass
|
|
357
|
+
|
|
358
|
+
pipeline = Pipeline(
|
|
359
|
+
[
|
|
360
|
+
Task(
|
|
361
|
+
ModelClass, {"model": "${model.id}"}, {}
|
|
362
|
+
),
|
|
363
|
+
])`,
|
|
364
|
+
];
|
|
365
|
+
exports.htrflow = htrflow;
|
|
366
|
+
const keras = (model) => [
|
|
367
|
+
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
368
|
+
import os
|
|
369
|
+
os.environ["KERAS_BACKEND"] = "jax"
|
|
370
|
+
|
|
371
|
+
import keras
|
|
372
|
+
|
|
373
|
+
model = keras.saving.load_model("hf://${model.id}")
|
|
374
|
+
`,
|
|
375
|
+
];
|
|
376
|
+
exports.keras = keras;
|
|
377
|
+
const keras_nlp = (model) => [
|
|
378
|
+
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
379
|
+
import os
|
|
380
|
+
os.environ["KERAS_BACKEND"] = "jax"
|
|
381
|
+
|
|
382
|
+
import keras_nlp
|
|
383
|
+
|
|
384
|
+
tokenizer = keras_nlp.models.Tokenizer.from_preset("hf://${model.id}")
|
|
385
|
+
backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
|
|
386
|
+
`,
|
|
387
|
+
];
|
|
388
|
+
exports.keras_nlp = keras_nlp;
|
|
389
|
+
const keras_hub = (model) => [
|
|
390
|
+
`# Available backend options are: "jax", "torch", "tensorflow".
|
|
391
|
+
import os
|
|
392
|
+
os.environ["KERAS_BACKEND"] = "jax"
|
|
393
|
+
|
|
394
|
+
import keras_hub
|
|
395
|
+
|
|
396
|
+
# Load a task-specific model (*replace CausalLM with your task*)
|
|
397
|
+
model = keras_hub.models.CausalLM.from_preset("hf://${model.id}", dtype="bfloat16")
|
|
398
|
+
|
|
399
|
+
# Possible tasks are CausalLM, TextToImage, ImageClassifier, ...
|
|
400
|
+
# full list here: https://keras.io/api/keras_hub/models/#api-documentation
|
|
401
|
+
`,
|
|
402
|
+
];
|
|
403
|
+
exports.keras_hub = keras_hub;
|
|
404
|
+
const llama_cpp_python = (model) => [
|
|
405
|
+
`from llama_cpp import Llama
|
|
406
|
+
|
|
407
|
+
llm = Llama.from_pretrained(
|
|
408
|
+
repo_id="${model.id}",
|
|
409
|
+
filename="{{GGUF_FILE}}",
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
llm.create_chat_completion(
|
|
413
|
+
messages = [
|
|
414
|
+
{
|
|
415
|
+
"role": "user",
|
|
416
|
+
"content": "What is the capital of France?"
|
|
417
|
+
}
|
|
418
|
+
]
|
|
419
|
+
)`,
|
|
420
|
+
];
|
|
421
|
+
exports.llama_cpp_python = llama_cpp_python;
|
|
422
|
+
const tf_keras = (model) => [
|
|
423
|
+
`# Note: 'keras<3.x' or 'tf_keras' must be installed (legacy)
|
|
424
|
+
# See https://github.com/keras-team/tf-keras for more details.
|
|
425
|
+
from huggingface_hub import from_pretrained_keras
|
|
426
|
+
|
|
427
|
+
model = from_pretrained_keras("${model.id}")
|
|
428
|
+
`,
|
|
429
|
+
];
|
|
430
|
+
exports.tf_keras = tf_keras;
|
|
431
|
+
const mamba_ssm = (model) => [
|
|
432
|
+
`from mamba_ssm import MambaLMHeadModel
|
|
433
|
+
|
|
434
|
+
model = MambaLMHeadModel.from_pretrained("${model.id}")`,
|
|
435
|
+
];
|
|
436
|
+
exports.mamba_ssm = mamba_ssm;
|
|
437
|
+
const mars5_tts = (model) => [
|
|
438
|
+
`# Install from https://github.com/Camb-ai/MARS5-TTS
|
|
439
|
+
|
|
440
|
+
from inference import Mars5TTS
|
|
441
|
+
mars5 = Mars5TTS.from_pretrained("${model.id}")`,
|
|
442
|
+
];
|
|
443
|
+
exports.mars5_tts = mars5_tts;
|
|
444
|
+
const mesh_anything = () => [
|
|
445
|
+
`# Install from https://github.com/buaacyw/MeshAnything.git
|
|
446
|
+
|
|
447
|
+
from MeshAnything.models.meshanything import MeshAnything
|
|
448
|
+
|
|
449
|
+
# refer to https://github.com/buaacyw/MeshAnything/blob/main/main.py#L91 on how to define args
|
|
450
|
+
# and https://github.com/buaacyw/MeshAnything/blob/main/app.py regarding usage
|
|
451
|
+
model = MeshAnything(args)`,
|
|
452
|
+
];
|
|
453
|
+
exports.mesh_anything = mesh_anything;
|
|
454
|
+
const open_clip = (model) => [
|
|
455
|
+
`import open_clip
|
|
456
|
+
|
|
457
|
+
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}')
|
|
458
|
+
tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`,
|
|
459
|
+
];
|
|
460
|
+
exports.open_clip = open_clip;
|
|
461
|
+
const paddlenlp = (model) => {
|
|
462
|
+
if (model.config?.architectures?.[0]) {
|
|
463
|
+
const architecture = model.config.architectures[0];
|
|
464
|
+
return [
|
|
465
|
+
[
|
|
466
|
+
`from paddlenlp.transformers import AutoTokenizer, ${architecture}`,
|
|
467
|
+
"",
|
|
468
|
+
`tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
469
|
+
`model = ${architecture}.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
470
|
+
].join("\n"),
|
|
471
|
+
];
|
|
472
|
+
}
|
|
473
|
+
else {
|
|
474
|
+
return [
|
|
475
|
+
[
|
|
476
|
+
`# ⚠️ Type of model unknown`,
|
|
477
|
+
`from paddlenlp.transformers import AutoTokenizer, AutoModel`,
|
|
478
|
+
"",
|
|
479
|
+
`tokenizer = AutoTokenizer.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
480
|
+
`model = AutoModel.from_pretrained("${model.id}", from_hf_hub=True)`,
|
|
481
|
+
].join("\n"),
|
|
482
|
+
];
|
|
483
|
+
}
|
|
484
|
+
};
|
|
485
|
+
exports.paddlenlp = paddlenlp;
|
|
486
|
+
const pyannote_audio_pipeline = (model) => [
|
|
487
|
+
`from pyannote.audio import Pipeline
|
|
488
|
+
|
|
489
|
+
pipeline = Pipeline.from_pretrained("${model.id}")
|
|
490
|
+
|
|
491
|
+
# inference on the whole file
|
|
492
|
+
pipeline("file.wav")
|
|
493
|
+
|
|
494
|
+
# inference on an excerpt
|
|
495
|
+
from pyannote.core import Segment
|
|
496
|
+
excerpt = Segment(start=2.0, end=5.0)
|
|
497
|
+
|
|
498
|
+
from pyannote.audio import Audio
|
|
499
|
+
waveform, sample_rate = Audio().crop("file.wav", excerpt)
|
|
500
|
+
pipeline({"waveform": waveform, "sample_rate": sample_rate})`,
|
|
501
|
+
];
|
|
502
|
+
exports.pyannote_audio_pipeline = pyannote_audio_pipeline;
|
|
503
|
+
const pyannote_audio_model = (model) => [
|
|
504
|
+
`from pyannote.audio import Model, Inference
|
|
505
|
+
|
|
506
|
+
model = Model.from_pretrained("${model.id}")
|
|
507
|
+
inference = Inference(model)
|
|
508
|
+
|
|
509
|
+
# inference on the whole file
|
|
510
|
+
inference("file.wav")
|
|
511
|
+
|
|
512
|
+
# inference on an excerpt
|
|
513
|
+
from pyannote.core import Segment
|
|
514
|
+
excerpt = Segment(start=2.0, end=5.0)
|
|
515
|
+
inference.crop("file.wav", excerpt)`,
|
|
516
|
+
];
|
|
517
|
+
const pyannote_audio = (model) => {
|
|
518
|
+
if (model.tags.includes("pyannote-audio-pipeline")) {
|
|
519
|
+
return (0, exports.pyannote_audio_pipeline)(model);
|
|
520
|
+
}
|
|
521
|
+
return pyannote_audio_model(model);
|
|
522
|
+
};
|
|
523
|
+
exports.pyannote_audio = pyannote_audio;
|
|
524
|
+
const relik = (model) => [
|
|
525
|
+
`from relik import Relik
|
|
526
|
+
|
|
527
|
+
relik = Relik.from_pretrained("${model.id}")`,
|
|
528
|
+
];
|
|
529
|
+
exports.relik = relik;
|
|
530
|
+
const tensorflowttsTextToMel = (model) => [
|
|
531
|
+
`from tensorflow_tts.inference import AutoProcessor, TFAutoModel
|
|
532
|
+
|
|
533
|
+
processor = AutoProcessor.from_pretrained("${model.id}")
|
|
534
|
+
model = TFAutoModel.from_pretrained("${model.id}")
|
|
535
|
+
`,
|
|
536
|
+
];
|
|
537
|
+
const tensorflowttsMelToWav = (model) => [
|
|
538
|
+
`from tensorflow_tts.inference import TFAutoModel
|
|
539
|
+
|
|
540
|
+
model = TFAutoModel.from_pretrained("${model.id}")
|
|
541
|
+
audios = model.inference(mels)
|
|
542
|
+
`,
|
|
543
|
+
];
|
|
544
|
+
const tensorflowttsUnknown = (model) => [
|
|
545
|
+
`from tensorflow_tts.inference import TFAutoModel
|
|
546
|
+
|
|
547
|
+
model = TFAutoModel.from_pretrained("${model.id}")
|
|
548
|
+
`,
|
|
549
|
+
];
|
|
550
|
+
const tensorflowtts = (model) => {
|
|
551
|
+
if (model.tags.includes("text-to-mel")) {
|
|
552
|
+
return tensorflowttsTextToMel(model);
|
|
553
|
+
}
|
|
554
|
+
else if (model.tags.includes("mel-to-wav")) {
|
|
555
|
+
return tensorflowttsMelToWav(model);
|
|
556
|
+
}
|
|
557
|
+
return tensorflowttsUnknown(model);
|
|
558
|
+
};
|
|
559
|
+
exports.tensorflowtts = tensorflowtts;
|
|
560
|
+
const timm = (model) => [
|
|
561
|
+
`import timm
|
|
562
|
+
|
|
563
|
+
model = timm.create_model("hf_hub:${model.id}", pretrained=True)`,
|
|
564
|
+
];
|
|
565
|
+
exports.timm = timm;
|
|
566
|
+
const saelens = ( /* model: ModelData */) => [
|
|
567
|
+
`# pip install sae-lens
|
|
568
|
+
from sae_lens import SAE
|
|
569
|
+
|
|
570
|
+
sae, cfg_dict, sparsity = SAE.from_pretrained(
|
|
571
|
+
release = "RELEASE_ID", # e.g., "gpt2-small-res-jb". See other options in https://github.com/jbloomAus/SAELens/blob/main/sae_lens/pretrained_saes.yaml
|
|
572
|
+
sae_id = "SAE_ID", # e.g., "blocks.8.hook_resid_pre". Won't always be a hook point
|
|
573
|
+
)`,
|
|
574
|
+
];
|
|
575
|
+
exports.saelens = saelens;
|
|
576
|
+
const seed_story = () => [
|
|
577
|
+
`# seed_story_cfg_path refers to 'https://github.com/TencentARC/SEED-Story/blob/master/configs/clm_models/agent_7b_sft.yaml'
|
|
578
|
+
# llm_cfg_path refers to 'https://github.com/TencentARC/SEED-Story/blob/master/configs/clm_models/llama2chat7b_lora.yaml'
|
|
579
|
+
from omegaconf import OmegaConf
|
|
580
|
+
import hydra
|
|
581
|
+
|
|
582
|
+
# load Llama2
|
|
583
|
+
llm_cfg = OmegaConf.load(llm_cfg_path)
|
|
584
|
+
llm = hydra.utils.instantiate(llm_cfg, torch_dtype="fp16")
|
|
585
|
+
|
|
586
|
+
# initialize seed_story
|
|
587
|
+
seed_story_cfg = OmegaConf.load(seed_story_cfg_path)
|
|
588
|
+
seed_story = hydra.utils.instantiate(seed_story_cfg, llm=llm) `,
|
|
589
|
+
];
|
|
590
|
+
exports.seed_story = seed_story;
|
|
591
|
+
const skopsPickle = (model, modelFile) => {
|
|
592
|
+
return [
|
|
593
|
+
`import joblib
|
|
594
|
+
from skops.hub_utils import download
|
|
595
|
+
download("${model.id}", "path_to_folder")
|
|
596
|
+
model = joblib.load(
|
|
597
|
+
"${modelFile}"
|
|
598
|
+
)
|
|
599
|
+
# only load pickle files from sources you trust
|
|
600
|
+
# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`,
|
|
601
|
+
];
|
|
602
|
+
};
|
|
603
|
+
const skopsFormat = (model, modelFile) => {
|
|
604
|
+
return [
|
|
605
|
+
`from skops.hub_utils import download
|
|
606
|
+
from skops.io import load
|
|
607
|
+
download("${model.id}", "path_to_folder")
|
|
608
|
+
# make sure model file is in skops format
|
|
609
|
+
# if model is a pickle file, make sure it's from a source you trust
|
|
610
|
+
model = load("path_to_folder/${modelFile}")`,
|
|
611
|
+
];
|
|
612
|
+
};
|
|
613
|
+
const skopsJobLib = (model) => {
|
|
614
|
+
return [
|
|
615
|
+
`from huggingface_hub import hf_hub_download
|
|
616
|
+
import joblib
|
|
617
|
+
model = joblib.load(
|
|
618
|
+
hf_hub_download("${model.id}", "sklearn_model.joblib")
|
|
619
|
+
)
|
|
620
|
+
# only load pickle files from sources you trust
|
|
621
|
+
# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`,
|
|
622
|
+
];
|
|
623
|
+
};
|
|
624
|
+
const sklearn = (model) => {
|
|
625
|
+
if (model.tags.includes("skops")) {
|
|
626
|
+
const skopsmodelFile = model.config?.sklearn?.model?.file;
|
|
627
|
+
const skopssaveFormat = model.config?.sklearn?.model_format;
|
|
628
|
+
if (!skopsmodelFile) {
|
|
629
|
+
return [`# ⚠️ Model filename not specified in config.json`];
|
|
630
|
+
}
|
|
631
|
+
if (skopssaveFormat === "pickle") {
|
|
632
|
+
return skopsPickle(model, skopsmodelFile);
|
|
633
|
+
}
|
|
634
|
+
else {
|
|
635
|
+
return skopsFormat(model, skopsmodelFile);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
else {
|
|
639
|
+
return skopsJobLib(model);
|
|
640
|
+
}
|
|
641
|
+
};
|
|
642
|
+
exports.sklearn = sklearn;
|
|
643
|
+
const stable_audio_tools = (model) => [
|
|
644
|
+
`import torch
|
|
645
|
+
import torchaudio
|
|
646
|
+
from einops import rearrange
|
|
647
|
+
from stable_audio_tools import get_pretrained_model
|
|
648
|
+
from stable_audio_tools.inference.generation import generate_diffusion_cond
|
|
649
|
+
|
|
650
|
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
651
|
+
|
|
652
|
+
# Download model
|
|
653
|
+
model, model_config = get_pretrained_model("${model.id}")
|
|
654
|
+
sample_rate = model_config["sample_rate"]
|
|
655
|
+
sample_size = model_config["sample_size"]
|
|
656
|
+
|
|
657
|
+
model = model.to(device)
|
|
658
|
+
|
|
659
|
+
# Set up text and timing conditioning
|
|
660
|
+
conditioning = [{
|
|
661
|
+
"prompt": "128 BPM tech house drum loop",
|
|
662
|
+
}]
|
|
663
|
+
|
|
664
|
+
# Generate stereo audio
|
|
665
|
+
output = generate_diffusion_cond(
|
|
666
|
+
model,
|
|
667
|
+
conditioning=conditioning,
|
|
668
|
+
sample_size=sample_size,
|
|
669
|
+
device=device
|
|
670
|
+
)
|
|
671
|
+
|
|
672
|
+
# Rearrange audio batch to a single sequence
|
|
673
|
+
output = rearrange(output, "b d n -> d (b n)")
|
|
674
|
+
|
|
675
|
+
# Peak normalize, clip, convert to int16, and save to file
|
|
676
|
+
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
|
|
677
|
+
torchaudio.save("output.wav", output, sample_rate)`,
|
|
678
|
+
];
|
|
679
|
+
exports.stable_audio_tools = stable_audio_tools;
|
|
680
|
+
const fastai = (model) => [
|
|
681
|
+
`from huggingface_hub import from_pretrained_fastai
|
|
682
|
+
|
|
683
|
+
learn = from_pretrained_fastai("${model.id}")`,
|
|
684
|
+
];
|
|
685
|
+
exports.fastai = fastai;
|
|
686
|
+
const sam2 = (model) => {
|
|
687
|
+
const image_predictor = `# Use SAM2 with images
|
|
688
|
+
import torch
|
|
689
|
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
|
690
|
+
|
|
691
|
+
predictor = SAM2ImagePredictor.from_pretrained(${model.id})
|
|
692
|
+
|
|
693
|
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
|
|
694
|
+
predictor.set_image(<your_image>)
|
|
695
|
+
masks, _, _ = predictor.predict(<input_prompts>)`;
|
|
696
|
+
const video_predictor = `# Use SAM2 with videos
|
|
697
|
+
import torch
|
|
698
|
+
from sam2.sam2_video_predictor import SAM2VideoPredictor
|
|
699
|
+
|
|
700
|
+
predictor = SAM2VideoPredictor.from_pretrained(${model.id})
|
|
701
|
+
|
|
702
|
+
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
|
|
703
|
+
state = predictor.init_state(<your_video>)
|
|
704
|
+
|
|
705
|
+
# add new prompts and instantly get the output on the same frame
|
|
706
|
+
frame_idx, object_ids, masks = predictor.add_new_points(state, <your_prompts>):
|
|
707
|
+
|
|
708
|
+
# propagate the prompts to get masklets throughout the video
|
|
709
|
+
for frame_idx, object_ids, masks in predictor.propagate_in_video(state):
|
|
710
|
+
...`;
|
|
711
|
+
return [image_predictor, video_predictor];
|
|
712
|
+
};
|
|
713
|
+
exports.sam2 = sam2;
|
|
714
|
+
const sampleFactory = (model) => [
|
|
715
|
+
`python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`,
|
|
716
|
+
];
|
|
717
|
+
exports.sampleFactory = sampleFactory;
|
|
718
|
+
function get_widget_examples_from_st_model(model) {
|
|
719
|
+
const widgetExample = model.widgetData?.[0];
|
|
720
|
+
if (widgetExample) {
|
|
721
|
+
return [widgetExample.source_sentence, ...widgetExample.sentences];
|
|
722
|
+
}
|
|
723
|
+
}
|
|
724
|
+
const sentenceTransformers = (model) => {
|
|
725
|
+
const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
|
|
726
|
+
const exampleSentences = get_widget_examples_from_st_model(model) ?? [
|
|
727
|
+
"The weather is lovely today.",
|
|
728
|
+
"It's so sunny outside!",
|
|
729
|
+
"He drove to the stadium.",
|
|
730
|
+
];
|
|
731
|
+
return [
|
|
732
|
+
`from sentence_transformers import SentenceTransformer
|
|
733
|
+
|
|
734
|
+
model = SentenceTransformer("${model.id}"${remote_code_snippet})
|
|
735
|
+
|
|
736
|
+
sentences = ${JSON.stringify(exampleSentences, null, 4)}
|
|
737
|
+
embeddings = model.encode(sentences)
|
|
738
|
+
|
|
739
|
+
similarities = model.similarity(embeddings, embeddings)
|
|
740
|
+
print(similarities.shape)
|
|
741
|
+
# [${exampleSentences.length}, ${exampleSentences.length}]`,
|
|
742
|
+
];
|
|
743
|
+
};
|
|
744
|
+
exports.sentenceTransformers = sentenceTransformers;
|
|
745
|
+
const setfit = (model) => [
|
|
746
|
+
`from setfit import SetFitModel
|
|
747
|
+
|
|
748
|
+
model = SetFitModel.from_pretrained("${model.id}")`,
|
|
749
|
+
];
|
|
750
|
+
exports.setfit = setfit;
|
|
751
|
+
const spacy = (model) => [
|
|
752
|
+
`!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
|
|
753
|
+
|
|
754
|
+
# Using spacy.load().
|
|
755
|
+
import spacy
|
|
756
|
+
nlp = spacy.load("${nameWithoutNamespace(model.id)}")
|
|
757
|
+
|
|
758
|
+
# Importing as module.
|
|
759
|
+
import ${nameWithoutNamespace(model.id)}
|
|
760
|
+
nlp = ${nameWithoutNamespace(model.id)}.load()`,
|
|
761
|
+
];
|
|
762
|
+
exports.spacy = spacy;
|
|
763
|
+
const span_marker = (model) => [
|
|
764
|
+
`from span_marker import SpanMarkerModel
|
|
765
|
+
|
|
766
|
+
model = SpanMarkerModel.from_pretrained("${model.id}")`,
|
|
767
|
+
];
|
|
768
|
+
exports.span_marker = span_marker;
|
|
769
|
+
const stanza = (model) => [
|
|
770
|
+
`import stanza
|
|
771
|
+
|
|
772
|
+
stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
|
|
773
|
+
nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")`,
|
|
774
|
+
];
|
|
775
|
+
exports.stanza = stanza;
|
|
776
|
+
const speechBrainMethod = (speechbrainInterface) => {
|
|
777
|
+
switch (speechbrainInterface) {
|
|
778
|
+
case "EncoderClassifier":
|
|
779
|
+
return "classify_file";
|
|
780
|
+
case "EncoderDecoderASR":
|
|
781
|
+
case "EncoderASR":
|
|
782
|
+
return "transcribe_file";
|
|
783
|
+
case "SpectralMaskEnhancement":
|
|
784
|
+
return "enhance_file";
|
|
785
|
+
case "SepformerSeparation":
|
|
786
|
+
return "separate_file";
|
|
787
|
+
default:
|
|
788
|
+
return undefined;
|
|
789
|
+
}
|
|
790
|
+
};
|
|
791
|
+
const speechbrain = (model) => {
|
|
792
|
+
const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface;
|
|
793
|
+
if (speechbrainInterface === undefined) {
|
|
794
|
+
return [`# interface not specified in config.json`];
|
|
795
|
+
}
|
|
796
|
+
const speechbrainMethod = speechBrainMethod(speechbrainInterface);
|
|
797
|
+
if (speechbrainMethod === undefined) {
|
|
798
|
+
return [`# interface in config.json invalid`];
|
|
799
|
+
}
|
|
800
|
+
return [
|
|
801
|
+
`from speechbrain.pretrained import ${speechbrainInterface}
|
|
802
|
+
model = ${speechbrainInterface}.from_hparams(
|
|
803
|
+
"${model.id}"
|
|
804
|
+
)
|
|
805
|
+
model.${speechbrainMethod}("file.wav")`,
|
|
806
|
+
];
|
|
807
|
+
};
|
|
808
|
+
exports.speechbrain = speechbrain;
|
|
809
|
+
const transformers = (model) => {
|
|
810
|
+
const info = model.transformersInfo;
|
|
811
|
+
if (!info) {
|
|
812
|
+
return [`# ⚠️ Type of model unknown`];
|
|
813
|
+
}
|
|
814
|
+
const remote_code_snippet = model.tags.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
|
|
815
|
+
let autoSnippet;
|
|
816
|
+
if (info.processor) {
|
|
817
|
+
const varName = info.processor === "AutoTokenizer"
|
|
818
|
+
? "tokenizer"
|
|
819
|
+
: info.processor === "AutoFeatureExtractor"
|
|
820
|
+
? "extractor"
|
|
821
|
+
: "processor";
|
|
822
|
+
autoSnippet = [
|
|
823
|
+
"# Load model directly",
|
|
824
|
+
`from transformers import ${info.processor}, ${info.auto_model}`,
|
|
825
|
+
"",
|
|
826
|
+
`${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
|
|
827
|
+
`model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
|
|
828
|
+
].join("\n");
|
|
829
|
+
}
|
|
830
|
+
else {
|
|
831
|
+
autoSnippet = [
|
|
832
|
+
"# Load model directly",
|
|
833
|
+
`from transformers import ${info.auto_model}`,
|
|
834
|
+
`model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")",
|
|
835
|
+
].join("\n");
|
|
836
|
+
}
|
|
837
|
+
if (model.pipeline_tag && library_to_tasks_js_1.LIBRARY_TASK_MAPPING.transformers?.includes(model.pipeline_tag)) {
|
|
838
|
+
const pipelineSnippet = ["# Use a pipeline as a high-level helper", "from transformers import pipeline", ""];
|
|
839
|
+
if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) {
|
|
840
|
+
pipelineSnippet.push("messages = [", ' {"role": "user", "content": "Who are you?"},', "]");
|
|
841
|
+
}
|
|
842
|
+
pipelineSnippet.push(`pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")");
|
|
843
|
+
if (model.tags.includes("conversational") && model.config?.tokenizer_config?.chat_template) {
|
|
844
|
+
pipelineSnippet.push("pipe(messages)");
|
|
845
|
+
}
|
|
846
|
+
return [pipelineSnippet.join("\n"), autoSnippet];
|
|
847
|
+
}
|
|
848
|
+
return [autoSnippet];
|
|
849
|
+
};
|
|
850
|
+
exports.transformers = transformers;
|
|
851
|
+
const transformersJS = (model) => {
|
|
852
|
+
if (!model.pipeline_tag) {
|
|
853
|
+
return [`// ⚠️ Unknown pipeline tag`];
|
|
854
|
+
}
|
|
855
|
+
const libName = "@huggingface/transformers";
|
|
856
|
+
return [
|
|
857
|
+
`// npm i ${libName}
|
|
858
|
+
import { pipeline } from '${libName}';
|
|
859
|
+
|
|
860
|
+
// Allocate pipeline
|
|
861
|
+
const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');`,
|
|
862
|
+
];
|
|
863
|
+
};
|
|
864
|
+
exports.transformersJS = transformersJS;
|
|
865
|
+
const peftTask = (peftTaskType) => {
|
|
866
|
+
switch (peftTaskType) {
|
|
867
|
+
case "CAUSAL_LM":
|
|
868
|
+
return "CausalLM";
|
|
869
|
+
case "SEQ_2_SEQ_LM":
|
|
870
|
+
return "Seq2SeqLM";
|
|
871
|
+
case "TOKEN_CLS":
|
|
872
|
+
return "TokenClassification";
|
|
873
|
+
case "SEQ_CLS":
|
|
874
|
+
return "SequenceClassification";
|
|
875
|
+
default:
|
|
876
|
+
return undefined;
|
|
877
|
+
}
|
|
878
|
+
};
|
|
879
|
+
const peft = (model) => {
|
|
880
|
+
const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
|
|
881
|
+
const pefttask = peftTask(peftTaskType);
|
|
882
|
+
if (!pefttask) {
|
|
883
|
+
return [`Task type is invalid.`];
|
|
884
|
+
}
|
|
885
|
+
if (!peftBaseModel) {
|
|
886
|
+
return [`Base model is not found.`];
|
|
887
|
+
}
|
|
888
|
+
return [
|
|
889
|
+
`from peft import PeftModel, PeftConfig
|
|
890
|
+
from transformers import AutoModelFor${pefttask}
|
|
891
|
+
|
|
892
|
+
config = PeftConfig.from_pretrained("${model.id}")
|
|
893
|
+
base_model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}")
|
|
894
|
+
model = PeftModel.from_pretrained(base_model, "${model.id}")`,
|
|
895
|
+
];
|
|
896
|
+
};
|
|
897
|
+
exports.peft = peft;
|
|
898
|
+
const fasttext = (model) => [
|
|
899
|
+
`from huggingface_hub import hf_hub_download
|
|
900
|
+
import fasttext
|
|
901
|
+
|
|
902
|
+
model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`,
|
|
903
|
+
];
|
|
904
|
+
exports.fasttext = fasttext;
|
|
905
|
+
const stableBaselines3 = (model) => [
|
|
906
|
+
`from huggingface_sb3 import load_from_hub
|
|
907
|
+
checkpoint = load_from_hub(
|
|
908
|
+
repo_id="${model.id}",
|
|
909
|
+
filename="{MODEL FILENAME}.zip",
|
|
910
|
+
)`,
|
|
911
|
+
];
|
|
912
|
+
exports.stableBaselines3 = stableBaselines3;
|
|
913
|
+
const nemoDomainResolver = (domain, model) => {
|
|
914
|
+
switch (domain) {
|
|
915
|
+
case "ASR":
|
|
916
|
+
return [
|
|
917
|
+
`import nemo.collections.asr as nemo_asr
|
|
918
|
+
asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}")
|
|
919
|
+
|
|
920
|
+
transcriptions = asr_model.transcribe(["file.wav"])`,
|
|
921
|
+
];
|
|
922
|
+
default:
|
|
923
|
+
return undefined;
|
|
924
|
+
}
|
|
925
|
+
};
|
|
926
|
+
const mlAgents = (model) => [
|
|
927
|
+
`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"`,
|
|
928
|
+
];
|
|
929
|
+
exports.mlAgents = mlAgents;
|
|
930
|
+
const sentis = ( /* model: ModelData */) => [
|
|
931
|
+
`string modelName = "[Your model name here].sentis";
|
|
932
|
+
Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName);
|
|
933
|
+
IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
|
|
934
|
+
// Please see provided C# file for more details
|
|
935
|
+
`,
|
|
936
|
+
];
|
|
937
|
+
exports.sentis = sentis;
|
|
938
|
+
const vfimamba = (model) => [
|
|
939
|
+
`from Trainer_finetune import Model
|
|
940
|
+
|
|
941
|
+
model = Model.from_pretrained("${model.id}")`,
|
|
942
|
+
];
|
|
943
|
+
exports.vfimamba = vfimamba;
|
|
944
|
+
const voicecraft = (model) => [
|
|
945
|
+
`from voicecraft import VoiceCraft
|
|
946
|
+
|
|
947
|
+
model = VoiceCraft.from_pretrained("${model.id}")`,
|
|
948
|
+
];
|
|
949
|
+
exports.voicecraft = voicecraft;
|
|
950
|
+
const chattts = () => [
|
|
951
|
+
`import ChatTTS
|
|
952
|
+
import torchaudio
|
|
953
|
+
|
|
954
|
+
chat = ChatTTS.Chat()
|
|
955
|
+
chat.load_models(compile=False) # Set to True for better performance
|
|
956
|
+
|
|
957
|
+
texts = ["PUT YOUR TEXT HERE",]
|
|
958
|
+
|
|
959
|
+
wavs = chat.infer(texts, )
|
|
960
|
+
|
|
961
|
+
torchaudio.save("output1.wav", torch.from_numpy(wavs[0]), 24000)`,
|
|
962
|
+
];
|
|
963
|
+
exports.chattts = chattts;
|
|
964
|
+
const yolov10 = (model) => [
|
|
965
|
+
`from ultralytics import YOLOv10
|
|
966
|
+
|
|
967
|
+
model = YOLOv10.from_pretrained("${model.id}")
|
|
968
|
+
source = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
|
969
|
+
model.predict(source=source, save=True)
|
|
970
|
+
`,
|
|
971
|
+
];
|
|
972
|
+
exports.yolov10 = yolov10;
|
|
973
|
+
const birefnet = (model) => [
|
|
974
|
+
`# Option 1: use with transformers
|
|
975
|
+
|
|
976
|
+
from transformers import AutoModelForImageSegmentation
|
|
977
|
+
birefnet = AutoModelForImageSegmentation.from_pretrained("${model.id}", trust_remote_code=True)
|
|
978
|
+
`,
|
|
979
|
+
`# Option 2: use with BiRefNet
|
|
980
|
+
|
|
981
|
+
# Install from https://github.com/ZhengPeng7/BiRefNet
|
|
982
|
+
|
|
983
|
+
from models.birefnet import BiRefNet
|
|
984
|
+
model = BiRefNet.from_pretrained("${model.id}")`,
|
|
985
|
+
];
|
|
986
|
+
exports.birefnet = birefnet;
|
|
987
|
+
const mlx = (model) => [
|
|
988
|
+
`pip install huggingface_hub hf_transfer
|
|
989
|
+
|
|
990
|
+
export HF_HUB_ENABLE_HF_TRANS: string[]FER=1
|
|
991
|
+
huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`,
|
|
992
|
+
];
|
|
993
|
+
exports.mlx = mlx;
|
|
994
|
+
const mlxim = (model) => [
|
|
995
|
+
`from mlxim.model import create_model
|
|
996
|
+
|
|
997
|
+
model = create_model(${model.id})`,
|
|
998
|
+
];
|
|
999
|
+
exports.mlxim = mlxim;
|
|
1000
|
+
const model2vec = (model) => [
|
|
1001
|
+
`from model2vec import StaticModel
|
|
1002
|
+
|
|
1003
|
+
model = StaticModel.from_pretrained("${model.id}")`,
|
|
1004
|
+
];
|
|
1005
|
+
exports.model2vec = model2vec;
|
|
1006
|
+
const nemo = (model) => {
|
|
1007
|
+
let command = undefined;
|
|
1008
|
+
// Resolve the tag to a nemo domain/sub-domain
|
|
1009
|
+
if (model.tags.includes("automatic-speech-recognition")) {
|
|
1010
|
+
command = nemoDomainResolver("ASR", model);
|
|
1011
|
+
}
|
|
1012
|
+
return command ?? [`# tag did not correspond to a valid NeMo domain.`];
|
|
1013
|
+
};
|
|
1014
|
+
exports.nemo = nemo;
|
|
1015
|
+
const pxia = (model) => [
|
|
1016
|
+
`from pxia import AutoModel
|
|
1017
|
+
|
|
1018
|
+
model = AutoModel.from_pretrained("${model.id}")`,
|
|
1019
|
+
];
|
|
1020
|
+
exports.pxia = pxia;
|
|
1021
|
+
const pythae = (model) => [
|
|
1022
|
+
`from pythae.models import AutoModel
|
|
1023
|
+
|
|
1024
|
+
model = AutoModel.load_from_hf_hub("${model.id}")`,
|
|
1025
|
+
];
|
|
1026
|
+
exports.pythae = pythae;
|
|
1027
|
+
const musicgen = (model) => [
|
|
1028
|
+
`from audiocraft.models import MusicGen
|
|
1029
|
+
|
|
1030
|
+
model = MusicGen.get_pretrained("${model.id}")
|
|
1031
|
+
|
|
1032
|
+
descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
|
|
1033
|
+
wav = model.generate(descriptions) # generates 3 samples.`,
|
|
1034
|
+
];
|
|
1035
|
+
const magnet = (model) => [
|
|
1036
|
+
`from audiocraft.models import MAGNeT
|
|
1037
|
+
|
|
1038
|
+
model = MAGNeT.get_pretrained("${model.id}")
|
|
1039
|
+
|
|
1040
|
+
descriptions = ['disco beat', 'energetic EDM', 'funky groove']
|
|
1041
|
+
wav = model.generate(descriptions) # generates 3 samples.`,
|
|
1042
|
+
];
|
|
1043
|
+
const audiogen = (model) => [
|
|
1044
|
+
`from audiocraft.models import AudioGen
|
|
1045
|
+
|
|
1046
|
+
model = AudioGen.get_pretrained("${model.id}")
|
|
1047
|
+
model.set_generation_params(duration=5) # generate 5 seconds.
|
|
1048
|
+
descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor']
|
|
1049
|
+
wav = model.generate(descriptions) # generates 3 samples.`,
|
|
1050
|
+
];
|
|
1051
|
+
const audiocraft = (model) => {
|
|
1052
|
+
if (model.tags.includes("musicgen")) {
|
|
1053
|
+
return musicgen(model);
|
|
1054
|
+
}
|
|
1055
|
+
else if (model.tags.includes("audiogen")) {
|
|
1056
|
+
return audiogen(model);
|
|
1057
|
+
}
|
|
1058
|
+
else if (model.tags.includes("magnet")) {
|
|
1059
|
+
return magnet(model);
|
|
1060
|
+
}
|
|
1061
|
+
else {
|
|
1062
|
+
return [`# Type of model unknown.`];
|
|
1063
|
+
}
|
|
1064
|
+
};
|
|
1065
|
+
exports.audiocraft = audiocraft;
|
|
1066
|
+
const whisperkit = () => [
|
|
1067
|
+
`# Install CLI with Homebrew on macOS device
|
|
1068
|
+
brew install whisperkit-cli
|
|
1069
|
+
|
|
1070
|
+
# View all available inference options
|
|
1071
|
+
whisperkit-cli transcribe --help
|
|
1072
|
+
|
|
1073
|
+
# Download and run inference using whisper base model
|
|
1074
|
+
whisperkit-cli transcribe --audio-path /path/to/audio.mp3
|
|
1075
|
+
|
|
1076
|
+
# Or use your preferred model variant
|
|
1077
|
+
whisperkit-cli transcribe --model "large-v3" --model-prefix "distil" --audio-path /path/to/audio.mp3 --verbose`,
|
|
1078
|
+
];
|
|
1079
|
+
exports.whisperkit = whisperkit;
|
|
1080
|
+
const threedtopia_xl = (model) => [
|
|
1081
|
+
`from threedtopia_xl.models import threedtopia_xl
|
|
1082
|
+
|
|
1083
|
+
model = threedtopia_xl.from_pretrained("${model.id}")
|
|
1084
|
+
model.generate(cond="path/to/image.png")`,
|
|
1085
|
+
];
|
|
1086
|
+
exports.threedtopia_xl = threedtopia_xl;
|
|
1087
|
+
const hezar = (model) => [
|
|
1088
|
+
`from hezar import Model
|
|
1089
|
+
|
|
1090
|
+
model = Model.load("${model.id}")`,
|
|
1091
|
+
];
|
|
1092
|
+
exports.hezar = hezar;
|
|
1093
|
+
//#endregion
|