@huggingface/inference 3.13.2 → 3.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{src → commonjs}/InferenceClient.d.ts +2 -2
- package/dist/commonjs/InferenceClient.d.ts.map +1 -0
- package/dist/commonjs/InferenceClient.js +83 -0
- package/dist/commonjs/config.js +6 -0
- package/dist/commonjs/index.d.ts +7 -0
- package/dist/commonjs/index.d.ts.map +1 -0
- package/dist/commonjs/index.js +49 -0
- package/dist/commonjs/lib/InferenceOutputError.js +10 -0
- package/dist/commonjs/lib/getDefaultTask.js +48 -0
- package/dist/{src → commonjs}/lib/getInferenceProviderMapping.d.ts +1 -1
- package/dist/{src → commonjs}/lib/getInferenceProviderMapping.d.ts.map +1 -1
- package/dist/commonjs/lib/getInferenceProviderMapping.js +81 -0
- package/dist/{src → commonjs}/lib/getProviderHelper.d.ts +2 -2
- package/dist/{src → commonjs}/lib/getProviderHelper.d.ts.map +1 -1
- package/dist/commonjs/lib/getProviderHelper.js +168 -0
- package/dist/commonjs/lib/isUrl.js +6 -0
- package/dist/{src → commonjs}/lib/makeRequestOptions.d.ts +3 -3
- package/dist/{src → commonjs}/lib/makeRequestOptions.d.ts.map +1 -1
- package/dist/commonjs/lib/makeRequestOptions.js +161 -0
- package/dist/commonjs/package.d.ts +3 -0
- package/dist/commonjs/package.d.ts.map +1 -0
- package/dist/commonjs/package.js +6 -0
- package/dist/commonjs/package.json +3 -0
- package/dist/{src → commonjs}/providers/black-forest-labs.d.ts +2 -2
- package/dist/{src → commonjs}/providers/black-forest-labs.d.ts.map +1 -1
- package/dist/commonjs/providers/black-forest-labs.js +82 -0
- package/dist/{src → commonjs}/providers/cerebras.d.ts +1 -1
- package/dist/{src → commonjs}/providers/cerebras.d.ts.map +1 -1
- package/dist/commonjs/providers/cerebras.js +26 -0
- package/dist/{src → commonjs}/providers/cohere.d.ts +1 -1
- package/dist/{src → commonjs}/providers/cohere.d.ts.map +1 -1
- package/dist/commonjs/providers/cohere.js +29 -0
- package/dist/{src → commonjs}/providers/consts.d.ts +3 -3
- package/dist/commonjs/providers/consts.d.ts.map +1 -0
- package/dist/commonjs/providers/consts.js +35 -0
- package/dist/{src → commonjs}/providers/fal-ai.d.ts +3 -3
- package/dist/{src → commonjs}/providers/fal-ai.d.ts.map +1 -1
- package/dist/commonjs/providers/fal-ai.js +216 -0
- package/dist/{src → commonjs}/providers/featherless-ai.d.ts +2 -2
- package/dist/{src → commonjs}/providers/featherless-ai.d.ts.map +1 -1
- package/dist/commonjs/providers/featherless-ai.js +38 -0
- package/dist/{src → commonjs}/providers/fireworks-ai.d.ts +1 -1
- package/dist/commonjs/providers/fireworks-ai.d.ts.map +1 -0
- package/dist/commonjs/providers/fireworks-ai.js +29 -0
- package/dist/{src → commonjs}/providers/groq.d.ts +1 -1
- package/dist/{src → commonjs}/providers/groq.d.ts.map +1 -1
- package/dist/commonjs/providers/groq.js +39 -0
- package/dist/{src → commonjs}/providers/hf-inference.d.ts +6 -6
- package/dist/{src → commonjs}/providers/hf-inference.d.ts.map +1 -1
- package/dist/commonjs/providers/hf-inference.js +432 -0
- package/dist/{src → commonjs}/providers/hyperbolic.d.ts +2 -2
- package/dist/{src → commonjs}/providers/hyperbolic.d.ts.map +1 -1
- package/dist/commonjs/providers/hyperbolic.js +78 -0
- package/dist/{src → commonjs}/providers/nebius.d.ts +2 -2
- package/dist/{src → commonjs}/providers/nebius.d.ts.map +1 -1
- package/dist/commonjs/providers/nebius.js +70 -0
- package/dist/{src → commonjs}/providers/novita.d.ts +2 -2
- package/dist/{src → commonjs}/providers/novita.d.ts.map +1 -1
- package/dist/commonjs/providers/novita.js +73 -0
- package/dist/{src → commonjs}/providers/nscale.d.ts +2 -2
- package/dist/{src → commonjs}/providers/nscale.d.ts.map +1 -1
- package/dist/commonjs/providers/nscale.js +46 -0
- package/dist/{src → commonjs}/providers/openai.d.ts +1 -1
- package/dist/{src → commonjs}/providers/openai.d.ts.map +1 -1
- package/dist/commonjs/providers/openai.js +15 -0
- package/dist/{src → commonjs}/providers/ovhcloud.d.ts +2 -2
- package/dist/{src → commonjs}/providers/ovhcloud.d.ts.map +1 -1
- package/dist/commonjs/providers/ovhcloud.js +60 -0
- package/dist/{src → commonjs}/providers/providerHelper.d.ts +4 -4
- package/dist/{src → commonjs}/providers/providerHelper.d.ts.map +1 -1
- package/dist/commonjs/providers/providerHelper.js +108 -0
- package/dist/{src → commonjs}/providers/replicate.d.ts +2 -2
- package/dist/{src → commonjs}/providers/replicate.d.ts.map +1 -1
- package/dist/commonjs/providers/replicate.js +135 -0
- package/dist/{src → commonjs}/providers/sambanova.d.ts +3 -3
- package/dist/{src → commonjs}/providers/sambanova.d.ts.map +1 -1
- package/dist/commonjs/providers/sambanova.js +49 -0
- package/dist/{src → commonjs}/providers/together.d.ts +2 -2
- package/dist/{src → commonjs}/providers/together.d.ts.map +1 -1
- package/dist/commonjs/providers/together.js +71 -0
- package/dist/{src → commonjs}/snippets/getInferenceSnippets.d.ts +2 -2
- package/dist/{src → commonjs}/snippets/getInferenceSnippets.d.ts.map +1 -1
- package/dist/commonjs/snippets/getInferenceSnippets.js +312 -0
- package/dist/commonjs/snippets/index.js +5 -0
- package/dist/commonjs/snippets/templates.exported.js +81 -0
- package/dist/{src → commonjs}/tasks/audio/audioClassification.d.ts +2 -2
- package/dist/{src → commonjs}/tasks/audio/audioClassification.d.ts.map +1 -1
- package/dist/commonjs/tasks/audio/audioClassification.js +21 -0
- package/dist/{src → commonjs}/tasks/audio/audioToAudio.d.ts +2 -2
- package/dist/commonjs/tasks/audio/audioToAudio.d.ts.map +1 -0
- package/dist/commonjs/tasks/audio/audioToAudio.js +22 -0
- package/dist/{src → commonjs}/tasks/audio/automaticSpeechRecognition.d.ts +2 -2
- package/dist/commonjs/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -0
- package/dist/commonjs/tasks/audio/automaticSpeechRecognition.js +25 -0
- package/dist/{src → commonjs}/tasks/audio/textToSpeech.d.ts +1 -1
- package/dist/commonjs/tasks/audio/textToSpeech.d.ts.map +1 -0
- package/dist/commonjs/tasks/audio/textToSpeech.js +19 -0
- package/dist/{src → commonjs}/tasks/audio/utils.d.ts +1 -1
- package/dist/commonjs/tasks/audio/utils.d.ts.map +1 -0
- package/dist/commonjs/tasks/audio/utils.js +12 -0
- package/dist/{src → commonjs}/tasks/custom/request.d.ts +1 -1
- package/dist/commonjs/tasks/custom/request.d.ts.map +1 -0
- package/dist/commonjs/tasks/custom/request.js +17 -0
- package/dist/{src → commonjs}/tasks/custom/streamingRequest.d.ts +1 -1
- package/dist/commonjs/tasks/custom/streamingRequest.d.ts.map +1 -0
- package/dist/commonjs/tasks/custom/streamingRequest.js +16 -0
- package/dist/{src → commonjs}/tasks/cv/imageClassification.d.ts +2 -2
- package/dist/commonjs/tasks/cv/imageClassification.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/imageClassification.js +21 -0
- package/dist/{src → commonjs}/tasks/cv/imageSegmentation.d.ts +2 -2
- package/dist/commonjs/tasks/cv/imageSegmentation.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/imageSegmentation.js +21 -0
- package/dist/{src → commonjs}/tasks/cv/imageToImage.d.ts +1 -1
- package/dist/commonjs/tasks/cv/imageToImage.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/imageToImage.js +20 -0
- package/dist/{src → commonjs}/tasks/cv/imageToText.d.ts +2 -2
- package/dist/commonjs/tasks/cv/imageToText.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/imageToText.js +20 -0
- package/dist/{src → commonjs}/tasks/cv/objectDetection.d.ts +2 -2
- package/dist/commonjs/tasks/cv/objectDetection.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/objectDetection.js +21 -0
- package/dist/{src → commonjs}/tasks/cv/textToImage.d.ts +1 -1
- package/dist/commonjs/tasks/cv/textToImage.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/textToImage.js +17 -0
- package/dist/{src → commonjs}/tasks/cv/textToVideo.d.ts +1 -1
- package/dist/commonjs/tasks/cv/textToVideo.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/textToVideo.js +17 -0
- package/dist/{src → commonjs}/tasks/cv/utils.d.ts +1 -1
- package/dist/commonjs/tasks/cv/utils.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/utils.js +7 -0
- package/dist/{src → commonjs}/tasks/cv/zeroShotImageClassification.d.ts +1 -1
- package/dist/commonjs/tasks/cv/zeroShotImageClassification.d.ts.map +1 -0
- package/dist/commonjs/tasks/cv/zeroShotImageClassification.js +39 -0
- package/dist/commonjs/tasks/index.d.ts +33 -0
- package/dist/commonjs/tasks/index.d.ts.map +1 -0
- package/dist/commonjs/tasks/index.js +54 -0
- package/dist/{src → commonjs}/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
- package/dist/commonjs/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -0
- package/dist/commonjs/tasks/multimodal/documentQuestionAnswering.js +27 -0
- package/dist/{src → commonjs}/tasks/multimodal/visualQuestionAnswering.d.ts +1 -1
- package/dist/commonjs/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -0
- package/dist/commonjs/tasks/multimodal/visualQuestionAnswering.js +27 -0
- package/dist/{src → commonjs}/tasks/nlp/chatCompletion.d.ts +1 -1
- package/dist/{src → commonjs}/tasks/nlp/chatCompletion.d.ts.map +1 -1
- package/dist/commonjs/tasks/nlp/chatCompletion.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/chatCompletionStream.d.ts +1 -1
- package/dist/{src → commonjs}/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
- package/dist/commonjs/tasks/nlp/chatCompletionStream.js +17 -0
- package/dist/{src → commonjs}/tasks/nlp/featureExtraction.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/featureExtraction.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/featureExtraction.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/fillMask.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/fillMask.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/fillMask.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/questionAnswering.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/questionAnswering.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/questionAnswering.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/sentenceSimilarity.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/sentenceSimilarity.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/sentenceSimilarity.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/summarization.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/summarization.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/summarization.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/tableQuestionAnswering.d.ts +1 -1
- package/dist/{src → commonjs}/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -1
- package/dist/commonjs/tasks/nlp/tableQuestionAnswering.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/textClassification.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/textClassification.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/textClassification.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/textGeneration.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/textGeneration.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/textGeneration.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/textGenerationStream.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/textGenerationStream.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/textGenerationStream.js +17 -0
- package/dist/{src → commonjs}/tasks/nlp/tokenClassification.d.ts +1 -1
- package/dist/{src → commonjs}/tasks/nlp/tokenClassification.d.ts.map +1 -1
- package/dist/commonjs/tasks/nlp/tokenClassification.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/translation.d.ts +1 -1
- package/dist/commonjs/tasks/nlp/translation.d.ts.map +1 -0
- package/dist/commonjs/tasks/nlp/translation.js +18 -0
- package/dist/{src → commonjs}/tasks/nlp/zeroShotClassification.d.ts +1 -1
- package/dist/{src → commonjs}/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
- package/dist/commonjs/tasks/nlp/zeroShotClassification.js +18 -0
- package/dist/{src → commonjs}/tasks/tabular/tabularClassification.d.ts +1 -1
- package/dist/commonjs/tasks/tabular/tabularClassification.d.ts.map +1 -0
- package/dist/commonjs/tasks/tabular/tabularClassification.js +20 -0
- package/dist/{src → commonjs}/tasks/tabular/tabularRegression.d.ts +1 -1
- package/dist/commonjs/tasks/tabular/tabularRegression.d.ts.map +1 -0
- package/dist/commonjs/tasks/tabular/tabularRegression.js +20 -0
- package/dist/{src → commonjs}/types.d.ts +1 -1
- package/dist/{src → commonjs}/types.d.ts.map +1 -1
- package/dist/commonjs/types.js +23 -0
- package/dist/commonjs/utils/base64FromBytes.js +15 -0
- package/dist/commonjs/utils/delay.js +8 -0
- package/dist/commonjs/utils/distributive-omit.js +7 -0
- package/dist/commonjs/utils/isBackend.js +6 -0
- package/dist/commonjs/utils/isFrontend.js +5 -0
- package/dist/commonjs/utils/omit.js +13 -0
- package/dist/commonjs/utils/pick.js +13 -0
- package/dist/{src → commonjs}/utils/request.d.ts +2 -2
- package/dist/{src → commonjs}/utils/request.d.ts.map +1 -1
- package/dist/commonjs/utils/request.js +116 -0
- package/dist/commonjs/utils/toArray.js +9 -0
- package/dist/commonjs/utils/typedEntries.js +6 -0
- package/dist/commonjs/utils/typedInclude.js +6 -0
- package/dist/commonjs/vendor/fetch-event-source/parse.d.ts.map +1 -0
- package/dist/commonjs/vendor/fetch-event-source/parse.js +185 -0
- package/dist/commonjs/vendor/fetch-event-source/parse.spec.js +370 -0
- package/dist/esm/InferenceClient.d.ts +32 -0
- package/dist/esm/InferenceClient.d.ts.map +1 -0
- package/dist/esm/InferenceClient.js +44 -0
- package/dist/esm/config.d.ts +4 -0
- package/dist/esm/config.d.ts.map +1 -0
- package/dist/esm/config.js +3 -0
- package/dist/esm/index.d.ts +7 -0
- package/dist/esm/index.d.ts.map +1 -0
- package/dist/{src/index.d.ts → esm/index.js} +4 -5
- package/dist/esm/lib/InferenceOutputError.d.ts +4 -0
- package/dist/esm/lib/InferenceOutputError.d.ts.map +1 -0
- package/dist/esm/lib/InferenceOutputError.js +6 -0
- package/dist/esm/lib/getDefaultTask.d.ts +11 -0
- package/dist/esm/lib/getDefaultTask.d.ts.map +1 -0
- package/dist/esm/lib/getDefaultTask.js +45 -0
- package/dist/esm/lib/getInferenceProviderMapping.d.ts +25 -0
- package/dist/esm/lib/getInferenceProviderMapping.d.ts.map +1 -0
- package/dist/esm/lib/getInferenceProviderMapping.js +75 -0
- package/dist/esm/lib/getProviderHelper.d.ts +37 -0
- package/dist/esm/lib/getProviderHelper.d.ts.map +1 -0
- package/dist/esm/lib/getProviderHelper.js +131 -0
- package/dist/esm/lib/isUrl.d.ts +2 -0
- package/dist/esm/lib/isUrl.d.ts.map +1 -0
- package/dist/esm/lib/isUrl.js +3 -0
- package/dist/esm/lib/makeRequestOptions.d.ts +31 -0
- package/dist/esm/lib/makeRequestOptions.d.ts.map +1 -0
- package/dist/esm/lib/makeRequestOptions.js +157 -0
- package/dist/esm/package.d.ts +3 -0
- package/dist/esm/package.d.ts.map +1 -0
- package/dist/esm/package.js +3 -0
- package/dist/esm/package.json +3 -0
- package/dist/esm/providers/black-forest-labs.d.ts +15 -0
- package/dist/esm/providers/black-forest-labs.d.ts.map +1 -0
- package/dist/esm/providers/black-forest-labs.js +78 -0
- package/dist/esm/providers/cerebras.d.ts +21 -0
- package/dist/esm/providers/cerebras.d.ts.map +1 -0
- package/dist/esm/providers/cerebras.js +22 -0
- package/dist/esm/providers/cohere.d.ts +22 -0
- package/dist/esm/providers/cohere.d.ts.map +1 -0
- package/dist/esm/providers/cohere.js +25 -0
- package/dist/esm/providers/consts.d.ts +12 -0
- package/dist/esm/providers/consts.d.ts.map +1 -0
- package/dist/esm/providers/consts.js +32 -0
- package/dist/esm/providers/fal-ai.d.ts +42 -0
- package/dist/esm/providers/fal-ai.d.ts.map +1 -0
- package/dist/esm/providers/fal-ai.js +209 -0
- package/dist/esm/providers/featherless-ai.d.ts +22 -0
- package/dist/esm/providers/featherless-ai.d.ts.map +1 -0
- package/dist/esm/providers/featherless-ai.js +33 -0
- package/dist/esm/providers/fireworks-ai.d.ts +22 -0
- package/dist/esm/providers/fireworks-ai.d.ts.map +1 -0
- package/dist/esm/providers/fireworks-ai.js +25 -0
- package/dist/esm/providers/groq.d.ts +10 -0
- package/dist/esm/providers/groq.d.ts.map +1 -0
- package/dist/esm/providers/groq.js +34 -0
- package/dist/esm/providers/hf-inference.d.ts +131 -0
- package/dist/esm/providers/hf-inference.d.ts.map +1 -0
- package/dist/esm/providers/hf-inference.js +400 -0
- package/dist/esm/providers/hyperbolic.d.ts +48 -0
- package/dist/esm/providers/hyperbolic.d.ts.map +1 -0
- package/dist/esm/providers/hyperbolic.js +72 -0
- package/dist/esm/providers/nebius.d.ts +49 -0
- package/dist/esm/providers/nebius.d.ts.map +1 -0
- package/dist/esm/providers/nebius.js +63 -0
- package/dist/esm/providers/novita.d.ts +22 -0
- package/dist/esm/providers/novita.d.ts.map +1 -0
- package/dist/esm/providers/novita.js +67 -0
- package/dist/esm/providers/nscale.d.ts +35 -0
- package/dist/esm/providers/nscale.d.ts.map +1 -0
- package/dist/esm/providers/nscale.js +41 -0
- package/dist/esm/providers/openai.d.ts +8 -0
- package/dist/esm/providers/openai.d.ts.map +1 -0
- package/dist/esm/providers/openai.js +11 -0
- package/dist/esm/providers/ovhcloud.d.ts +38 -0
- package/dist/esm/providers/ovhcloud.d.ts.map +1 -0
- package/dist/esm/providers/ovhcloud.js +55 -0
- package/dist/esm/providers/providerHelper.d.ts +186 -0
- package/dist/esm/providers/providerHelper.d.ts.map +1 -0
- package/dist/esm/providers/providerHelper.js +102 -0
- package/dist/esm/providers/replicate.d.ts +25 -0
- package/dist/esm/providers/replicate.d.ts.map +1 -0
- package/dist/esm/providers/replicate.js +129 -0
- package/dist/esm/providers/sambanova.d.ts +14 -0
- package/dist/esm/providers/sambanova.d.ts.map +1 -0
- package/dist/esm/providers/sambanova.js +44 -0
- package/dist/esm/providers/together.d.ts +49 -0
- package/dist/esm/providers/together.d.ts.map +1 -0
- package/dist/esm/providers/together.js +65 -0
- package/dist/esm/snippets/getInferenceSnippets.d.ts +9 -0
- package/dist/esm/snippets/getInferenceSnippets.d.ts.map +1 -0
- package/dist/esm/snippets/getInferenceSnippets.js +309 -0
- package/dist/esm/snippets/index.d.ts +2 -0
- package/dist/esm/snippets/index.d.ts.map +1 -0
- package/dist/esm/snippets/index.js +1 -0
- package/dist/esm/snippets/templates.exported.d.ts +2 -0
- package/dist/esm/snippets/templates.exported.d.ts.map +1 -0
- package/dist/esm/snippets/templates.exported.js +78 -0
- package/dist/esm/tasks/audio/audioClassification.d.ts +10 -0
- package/dist/esm/tasks/audio/audioClassification.d.ts.map +1 -0
- package/dist/esm/tasks/audio/audioClassification.js +18 -0
- package/dist/esm/tasks/audio/audioToAudio.d.ts +29 -0
- package/dist/esm/tasks/audio/audioToAudio.d.ts.map +1 -0
- package/dist/esm/tasks/audio/audioToAudio.js +19 -0
- package/dist/esm/tasks/audio/automaticSpeechRecognition.d.ts +10 -0
- package/dist/esm/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -0
- package/dist/esm/tasks/audio/automaticSpeechRecognition.js +22 -0
- package/dist/esm/tasks/audio/textToSpeech.d.ts +10 -0
- package/dist/esm/tasks/audio/textToSpeech.d.ts.map +1 -0
- package/dist/esm/tasks/audio/textToSpeech.js +16 -0
- package/dist/esm/tasks/audio/utils.d.ts +12 -0
- package/dist/esm/tasks/audio/utils.d.ts.map +1 -0
- package/dist/esm/tasks/audio/utils.js +9 -0
- package/dist/esm/tasks/custom/request.d.ts +10 -0
- package/dist/esm/tasks/custom/request.d.ts.map +1 -0
- package/dist/esm/tasks/custom/request.js +14 -0
- package/dist/esm/tasks/custom/streamingRequest.d.ts +10 -0
- package/dist/esm/tasks/custom/streamingRequest.d.ts.map +1 -0
- package/dist/esm/tasks/custom/streamingRequest.js +13 -0
- package/dist/esm/tasks/cv/imageClassification.d.ts +10 -0
- package/dist/esm/tasks/cv/imageClassification.d.ts.map +1 -0
- package/dist/esm/tasks/cv/imageClassification.js +18 -0
- package/dist/esm/tasks/cv/imageSegmentation.d.ts +10 -0
- package/dist/esm/tasks/cv/imageSegmentation.d.ts.map +1 -0
- package/dist/esm/tasks/cv/imageSegmentation.js +18 -0
- package/dist/esm/tasks/cv/imageToImage.d.ts +9 -0
- package/dist/esm/tasks/cv/imageToImage.d.ts.map +1 -0
- package/dist/esm/tasks/cv/imageToImage.js +17 -0
- package/dist/esm/tasks/cv/imageToText.d.ts +9 -0
- package/dist/esm/tasks/cv/imageToText.d.ts.map +1 -0
- package/dist/esm/tasks/cv/imageToText.js +17 -0
- package/dist/esm/tasks/cv/objectDetection.d.ts +10 -0
- package/dist/esm/tasks/cv/objectDetection.d.ts.map +1 -0
- package/dist/esm/tasks/cv/objectDetection.js +18 -0
- package/dist/esm/tasks/cv/textToImage.d.ts +18 -0
- package/dist/esm/tasks/cv/textToImage.d.ts.map +1 -0
- package/dist/esm/tasks/cv/textToImage.js +14 -0
- package/dist/esm/tasks/cv/textToVideo.d.ts +6 -0
- package/dist/esm/tasks/cv/textToVideo.d.ts.map +1 -0
- package/dist/esm/tasks/cv/textToVideo.js +14 -0
- package/dist/esm/tasks/cv/utils.d.ts +11 -0
- package/dist/esm/tasks/cv/utils.d.ts.map +1 -0
- package/dist/esm/tasks/cv/utils.js +4 -0
- package/dist/esm/tasks/cv/zeroShotImageClassification.d.ts +18 -0
- package/dist/esm/tasks/cv/zeroShotImageClassification.d.ts.map +1 -0
- package/dist/esm/tasks/cv/zeroShotImageClassification.js +36 -0
- package/dist/esm/tasks/index.d.ts +33 -0
- package/dist/esm/tasks/index.d.ts.map +1 -0
- package/dist/esm/tasks/index.js +38 -0
- package/dist/esm/tasks/multimodal/documentQuestionAnswering.d.ts +12 -0
- package/dist/esm/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -0
- package/dist/esm/tasks/multimodal/documentQuestionAnswering.js +24 -0
- package/dist/esm/tasks/multimodal/visualQuestionAnswering.d.ts +12 -0
- package/dist/esm/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -0
- package/dist/esm/tasks/multimodal/visualQuestionAnswering.js +24 -0
- package/dist/esm/tasks/nlp/chatCompletion.d.ts +7 -0
- package/dist/esm/tasks/nlp/chatCompletion.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/chatCompletion.js +15 -0
- package/dist/esm/tasks/nlp/chatCompletionStream.d.ts +7 -0
- package/dist/esm/tasks/nlp/chatCompletionStream.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/chatCompletionStream.js +14 -0
- package/dist/esm/tasks/nlp/featureExtraction.d.ts +17 -0
- package/dist/esm/tasks/nlp/featureExtraction.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/featureExtraction.js +15 -0
- package/dist/esm/tasks/nlp/fillMask.d.ts +8 -0
- package/dist/esm/tasks/nlp/fillMask.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/fillMask.js +15 -0
- package/dist/esm/tasks/nlp/questionAnswering.d.ts +8 -0
- package/dist/esm/tasks/nlp/questionAnswering.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/questionAnswering.js +15 -0
- package/dist/esm/tasks/nlp/sentenceSimilarity.d.ts +8 -0
- package/dist/esm/tasks/nlp/sentenceSimilarity.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/sentenceSimilarity.js +15 -0
- package/dist/esm/tasks/nlp/summarization.d.ts +8 -0
- package/dist/esm/tasks/nlp/summarization.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/summarization.js +15 -0
- package/dist/esm/tasks/nlp/tableQuestionAnswering.d.ts +8 -0
- package/dist/esm/tasks/nlp/tableQuestionAnswering.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/tableQuestionAnswering.js +15 -0
- package/dist/esm/tasks/nlp/textClassification.d.ts +8 -0
- package/dist/esm/tasks/nlp/textClassification.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/textClassification.js +15 -0
- package/dist/esm/tasks/nlp/textGeneration.d.ts +8 -0
- package/dist/esm/tasks/nlp/textGeneration.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/textGeneration.js +15 -0
- package/dist/esm/tasks/nlp/textGenerationStream.d.ts +81 -0
- package/dist/esm/tasks/nlp/textGenerationStream.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/textGenerationStream.js +14 -0
- package/dist/esm/tasks/nlp/tokenClassification.d.ts +8 -0
- package/dist/esm/tasks/nlp/tokenClassification.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/tokenClassification.js +15 -0
- package/dist/esm/tasks/nlp/translation.d.ts +8 -0
- package/dist/esm/tasks/nlp/translation.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/translation.js +15 -0
- package/dist/esm/tasks/nlp/zeroShotClassification.d.ts +8 -0
- package/dist/esm/tasks/nlp/zeroShotClassification.d.ts.map +1 -0
- package/dist/esm/tasks/nlp/zeroShotClassification.js +15 -0
- package/dist/esm/tasks/tabular/tabularClassification.d.ts +20 -0
- package/dist/esm/tasks/tabular/tabularClassification.d.ts.map +1 -0
- package/dist/esm/tasks/tabular/tabularClassification.js +17 -0
- package/dist/esm/tasks/tabular/tabularRegression.d.ts +20 -0
- package/dist/esm/tasks/tabular/tabularRegression.d.ts.map +1 -0
- package/dist/esm/tasks/tabular/tabularRegression.js +17 -0
- package/dist/esm/types.d.ts +97 -0
- package/dist/esm/types.d.ts.map +1 -0
- package/dist/esm/types.js +20 -0
- package/dist/esm/utils/base64FromBytes.d.ts +2 -0
- package/dist/esm/utils/base64FromBytes.d.ts.map +1 -0
- package/dist/esm/utils/base64FromBytes.js +12 -0
- package/dist/esm/utils/delay.d.ts +2 -0
- package/dist/esm/utils/delay.d.ts.map +1 -0
- package/dist/esm/utils/delay.js +5 -0
- package/dist/esm/utils/distributive-omit.d.ts +9 -0
- package/dist/esm/utils/distributive-omit.d.ts.map +1 -0
- package/dist/esm/utils/distributive-omit.js +6 -0
- package/dist/esm/utils/isBackend.d.ts +2 -0
- package/dist/esm/utils/isBackend.d.ts.map +1 -0
- package/dist/esm/utils/isBackend.js +3 -0
- package/dist/esm/utils/isFrontend.d.ts +2 -0
- package/dist/esm/utils/isFrontend.d.ts.map +1 -0
- package/dist/esm/utils/isFrontend.js +2 -0
- package/dist/esm/utils/omit.d.ts +5 -0
- package/dist/esm/utils/omit.d.ts.map +1 -0
- package/dist/esm/utils/omit.js +10 -0
- package/dist/esm/utils/pick.d.ts +5 -0
- package/dist/esm/utils/pick.d.ts.map +1 -0
- package/dist/esm/utils/pick.js +10 -0
- package/dist/esm/utils/request.d.ts +28 -0
- package/dist/esm/utils/request.d.ts.map +1 -0
- package/dist/esm/utils/request.js +112 -0
- package/dist/esm/utils/toArray.d.ts +2 -0
- package/dist/esm/utils/toArray.d.ts.map +1 -0
- package/dist/esm/utils/toArray.js +6 -0
- package/dist/esm/utils/typedEntries.d.ts +4 -0
- package/dist/esm/utils/typedEntries.d.ts.map +1 -0
- package/dist/esm/utils/typedEntries.js +3 -0
- package/dist/esm/utils/typedInclude.d.ts +2 -0
- package/dist/esm/utils/typedInclude.d.ts.map +1 -0
- package/dist/esm/utils/typedInclude.js +3 -0
- package/dist/esm/vendor/fetch-event-source/parse.d.ts +69 -0
- package/dist/esm/vendor/fetch-event-source/parse.d.ts.map +1 -0
- package/dist/esm/vendor/fetch-event-source/parse.js +180 -0
- package/dist/esm/vendor/fetch-event-source/parse.spec.d.ts +2 -0
- package/dist/esm/vendor/fetch-event-source/parse.spec.d.ts.map +1 -0
- package/dist/esm/vendor/fetch-event-source/parse.spec.js +335 -0
- package/package.json +26 -12
- package/src/InferenceClient.ts +4 -4
- package/src/index.ts +4 -4
- package/src/lib/getDefaultTask.ts +2 -2
- package/src/lib/getInferenceProviderMapping.ts +5 -5
- package/src/lib/getProviderHelper.ts +19 -19
- package/src/lib/makeRequestOptions.ts +8 -8
- package/src/package.ts +3 -0
- package/src/providers/black-forest-labs.ts +5 -5
- package/src/providers/cerebras.ts +1 -1
- package/src/providers/cohere.ts +1 -1
- package/src/providers/consts.ts +3 -3
- package/src/providers/fal-ai.ts +9 -9
- package/src/providers/featherless-ai.ts +3 -3
- package/src/providers/fireworks-ai.ts +1 -1
- package/src/providers/groq.ts +1 -1
- package/src/providers/hf-inference.ts +11 -11
- package/src/providers/hyperbolic.ts +4 -4
- package/src/providers/nebius.ts +4 -4
- package/src/providers/novita.ts +5 -5
- package/src/providers/nscale.ts +4 -4
- package/src/providers/openai.ts +1 -1
- package/src/providers/ovhcloud.ts +4 -4
- package/src/providers/providerHelper.ts +7 -7
- package/src/providers/replicate.ts +5 -5
- package/src/providers/sambanova.ts +4 -4
- package/src/providers/together.ts +4 -4
- package/src/snippets/getInferenceSnippets.ts +7 -7
- package/src/tasks/audio/audioClassification.ts +6 -6
- package/src/tasks/audio/audioToAudio.ts +6 -6
- package/src/tasks/audio/automaticSpeechRecognition.ts +6 -6
- package/src/tasks/audio/textToSpeech.ts +4 -4
- package/src/tasks/audio/utils.ts +2 -2
- package/src/tasks/custom/request.ts +4 -4
- package/src/tasks/custom/streamingRequest.ts +4 -4
- package/src/tasks/cv/imageClassification.ts +5 -5
- package/src/tasks/cv/imageSegmentation.ts +5 -5
- package/src/tasks/cv/imageToImage.ts +4 -4
- package/src/tasks/cv/imageToText.ts +6 -6
- package/src/tasks/cv/objectDetection.ts +5 -5
- package/src/tasks/cv/textToImage.ts +5 -5
- package/src/tasks/cv/textToVideo.ts +8 -8
- package/src/tasks/cv/utils.ts +2 -2
- package/src/tasks/cv/zeroShotImageClassification.ts +5 -5
- package/src/tasks/index.ts +32 -32
- package/src/tasks/multimodal/documentQuestionAnswering.ts +5 -5
- package/src/tasks/multimodal/visualQuestionAnswering.ts +5 -5
- package/src/tasks/nlp/chatCompletion.ts +4 -4
- package/src/tasks/nlp/chatCompletionStream.ts +4 -4
- package/src/tasks/nlp/featureExtraction.ts +4 -4
- package/src/tasks/nlp/fillMask.ts +4 -4
- package/src/tasks/nlp/questionAnswering.ts +4 -4
- package/src/tasks/nlp/sentenceSimilarity.ts +4 -4
- package/src/tasks/nlp/summarization.ts +4 -4
- package/src/tasks/nlp/tableQuestionAnswering.ts +4 -4
- package/src/tasks/nlp/textClassification.ts +4 -4
- package/src/tasks/nlp/textGeneration.ts +5 -5
- package/src/tasks/nlp/textGenerationStream.ts +4 -4
- package/src/tasks/nlp/tokenClassification.ts +4 -4
- package/src/tasks/nlp/translation.ts +4 -4
- package/src/tasks/nlp/zeroShotClassification.ts +4 -4
- package/src/tasks/tabular/tabularClassification.ts +4 -4
- package/src/tasks/tabular/tabularRegression.ts +4 -4
- package/src/types.ts +1 -1
- package/src/utils/isFrontend.ts +1 -1
- package/src/utils/omit.ts +2 -2
- package/src/utils/request.ts +5 -5
- package/src/vendor/fetch-event-source/parse.spec.ts +1 -1
- package/src/vendor/fetch-event-source/parse.ts +6 -5
- package/dist/index.cjs +0 -2868
- package/dist/index.js +0 -2812
- package/dist/src/InferenceClient.d.ts.map +0 -1
- package/dist/src/index.d.ts.map +0 -1
- package/dist/src/providers/consts.d.ts.map +0 -1
- package/dist/src/providers/fireworks-ai.d.ts.map +0 -1
- package/dist/src/tasks/audio/audioToAudio.d.ts.map +0 -1
- package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +0 -1
- package/dist/src/tasks/audio/textToSpeech.d.ts.map +0 -1
- package/dist/src/tasks/audio/utils.d.ts.map +0 -1
- package/dist/src/tasks/custom/request.d.ts.map +0 -1
- package/dist/src/tasks/custom/streamingRequest.d.ts.map +0 -1
- package/dist/src/tasks/cv/imageClassification.d.ts.map +0 -1
- package/dist/src/tasks/cv/imageSegmentation.d.ts.map +0 -1
- package/dist/src/tasks/cv/imageToImage.d.ts.map +0 -1
- package/dist/src/tasks/cv/imageToText.d.ts.map +0 -1
- package/dist/src/tasks/cv/objectDetection.d.ts.map +0 -1
- package/dist/src/tasks/cv/textToImage.d.ts.map +0 -1
- package/dist/src/tasks/cv/textToVideo.d.ts.map +0 -1
- package/dist/src/tasks/cv/utils.d.ts.map +0 -1
- package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +0 -1
- package/dist/src/tasks/index.d.ts +0 -33
- package/dist/src/tasks/index.d.ts.map +0 -1
- package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +0 -1
- package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +0 -1
- package/dist/src/tasks/nlp/featureExtraction.d.ts.map +0 -1
- package/dist/src/tasks/nlp/fillMask.d.ts.map +0 -1
- package/dist/src/tasks/nlp/questionAnswering.d.ts.map +0 -1
- package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +0 -1
- package/dist/src/tasks/nlp/summarization.d.ts.map +0 -1
- package/dist/src/tasks/nlp/textClassification.d.ts.map +0 -1
- package/dist/src/tasks/nlp/textGeneration.d.ts.map +0 -1
- package/dist/src/tasks/nlp/textGenerationStream.d.ts.map +0 -1
- package/dist/src/tasks/nlp/translation.d.ts.map +0 -1
- package/dist/src/tasks/tabular/tabularClassification.d.ts.map +0 -1
- package/dist/src/tasks/tabular/tabularRegression.d.ts.map +0 -1
- package/dist/src/vendor/fetch-event-source/parse.d.ts.map +0 -1
- package/dist/test/InferenceClient.spec.d.ts +0 -2
- package/dist/test/InferenceClient.spec.d.ts.map +0 -1
- package/dist/test/expect-closeto.d.ts +0 -2
- package/dist/test/expect-closeto.d.ts.map +0 -1
- package/dist/test/test-files.d.ts +0 -2
- package/dist/test/test-files.d.ts.map +0 -1
- /package/dist/{src → commonjs}/config.d.ts +0 -0
- /package/dist/{src → commonjs}/config.d.ts.map +0 -0
- /package/dist/{src → commonjs}/lib/InferenceOutputError.d.ts +0 -0
- /package/dist/{src → commonjs}/lib/InferenceOutputError.d.ts.map +0 -0
- /package/dist/{src → commonjs}/lib/getDefaultTask.d.ts +0 -0
- /package/dist/{src → commonjs}/lib/getDefaultTask.d.ts.map +0 -0
- /package/dist/{src → commonjs}/lib/isUrl.d.ts +0 -0
- /package/dist/{src → commonjs}/lib/isUrl.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/index.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/index.d.ts.map +0 -0
- /package/dist/{src → commonjs}/snippets/templates.exported.d.ts +0 -0
- /package/dist/{src → commonjs}/snippets/templates.exported.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/base64FromBytes.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/base64FromBytes.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/delay.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/delay.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/distributive-omit.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/distributive-omit.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/isBackend.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/isBackend.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/isFrontend.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/isFrontend.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/omit.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/omit.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/pick.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/pick.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/toArray.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/toArray.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/typedEntries.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/typedEntries.d.ts.map +0 -0
- /package/dist/{src → commonjs}/utils/typedInclude.d.ts +0 -0
- /package/dist/{src → commonjs}/utils/typedInclude.d.ts.map +0 -0
- /package/dist/{src → commonjs}/vendor/fetch-event-source/parse.d.ts +0 -0
- /package/dist/{src → commonjs}/vendor/fetch-event-source/parse.spec.d.ts +0 -0
- /package/dist/{src → commonjs}/vendor/fetch-event-source/parse.spec.d.ts.map +0 -0
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
import { Template } from "@huggingface/jinja";
|
|
2
|
+
import { getModelInputSnippet, inferenceSnippetLanguages, } from "@huggingface/tasks";
|
|
3
|
+
import { getProviderHelper } from "../lib/getProviderHelper.js";
|
|
4
|
+
import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions.js";
|
|
5
|
+
import { templates } from "./templates.exported.js";
|
|
6
|
+
const PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"];
|
|
7
|
+
const JS_CLIENTS = ["fetch", "huggingface.js", "openai"];
|
|
8
|
+
const SH_CLIENTS = ["curl"];
|
|
9
|
+
const CLIENTS = {
|
|
10
|
+
js: [...JS_CLIENTS],
|
|
11
|
+
python: [...PYTHON_CLIENTS],
|
|
12
|
+
sh: [...SH_CLIENTS],
|
|
13
|
+
};
|
|
14
|
+
const CLIENTS_AUTO_POLICY = {
|
|
15
|
+
js: ["huggingface.js"],
|
|
16
|
+
python: ["huggingface_hub"],
|
|
17
|
+
};
|
|
18
|
+
// Helpers to find + load templates
|
|
19
|
+
const hasTemplate = (language, client, templateName) => templates[language]?.[client]?.[templateName] !== undefined;
|
|
20
|
+
const loadTemplate = (language, client, templateName) => {
|
|
21
|
+
const template = templates[language]?.[client]?.[templateName];
|
|
22
|
+
if (!template) {
|
|
23
|
+
throw new Error(`Template not found: ${language}/${client}/${templateName}`);
|
|
24
|
+
}
|
|
25
|
+
return (data) => new Template(template).render({ ...data });
|
|
26
|
+
};
|
|
27
|
+
const snippetImportPythonInferenceClient = loadTemplate("python", "huggingface_hub", "importInferenceClient");
|
|
28
|
+
const snippetImportRequests = loadTemplate("python", "requests", "importRequests");
|
|
29
|
+
// Needed for huggingface_hub basic snippets
|
|
30
|
+
const HF_PYTHON_METHODS = {
|
|
31
|
+
"audio-classification": "audio_classification",
|
|
32
|
+
"audio-to-audio": "audio_to_audio",
|
|
33
|
+
"automatic-speech-recognition": "automatic_speech_recognition",
|
|
34
|
+
"document-question-answering": "document_question_answering",
|
|
35
|
+
"feature-extraction": "feature_extraction",
|
|
36
|
+
"fill-mask": "fill_mask",
|
|
37
|
+
"image-classification": "image_classification",
|
|
38
|
+
"image-segmentation": "image_segmentation",
|
|
39
|
+
"image-to-image": "image_to_image",
|
|
40
|
+
"image-to-text": "image_to_text",
|
|
41
|
+
"object-detection": "object_detection",
|
|
42
|
+
"question-answering": "question_answering",
|
|
43
|
+
"sentence-similarity": "sentence_similarity",
|
|
44
|
+
summarization: "summarization",
|
|
45
|
+
"table-question-answering": "table_question_answering",
|
|
46
|
+
"tabular-classification": "tabular_classification",
|
|
47
|
+
"tabular-regression": "tabular_regression",
|
|
48
|
+
"text-classification": "text_classification",
|
|
49
|
+
"text-generation": "text_generation",
|
|
50
|
+
"text-to-image": "text_to_image",
|
|
51
|
+
"text-to-speech": "text_to_speech",
|
|
52
|
+
"text-to-video": "text_to_video",
|
|
53
|
+
"token-classification": "token_classification",
|
|
54
|
+
translation: "translation",
|
|
55
|
+
"visual-question-answering": "visual_question_answering",
|
|
56
|
+
"zero-shot-classification": "zero_shot_classification",
|
|
57
|
+
"zero-shot-image-classification": "zero_shot_image_classification",
|
|
58
|
+
};
|
|
59
|
+
// Needed for huggingface.js basic snippets
|
|
60
|
+
const HF_JS_METHODS = {
|
|
61
|
+
"automatic-speech-recognition": "automaticSpeechRecognition",
|
|
62
|
+
"feature-extraction": "featureExtraction",
|
|
63
|
+
"fill-mask": "fillMask",
|
|
64
|
+
"image-classification": "imageClassification",
|
|
65
|
+
"question-answering": "questionAnswering",
|
|
66
|
+
"sentence-similarity": "sentenceSimilarity",
|
|
67
|
+
summarization: "summarization",
|
|
68
|
+
"table-question-answering": "tableQuestionAnswering",
|
|
69
|
+
"text-classification": "textClassification",
|
|
70
|
+
"text-generation": "textGeneration",
|
|
71
|
+
"text2text-generation": "textGeneration",
|
|
72
|
+
"token-classification": "tokenClassification",
|
|
73
|
+
"text-to-speech": "textToSpeech",
|
|
74
|
+
translation: "translation",
|
|
75
|
+
};
|
|
76
|
+
// Snippet generators
|
|
77
|
+
const snippetGenerator = (templateName, inputPreparationFn) => {
|
|
78
|
+
return (model, accessToken, provider, inferenceProviderMapping, opts) => {
|
|
79
|
+
const providerModelId = inferenceProviderMapping?.providerId ?? model.id;
|
|
80
|
+
/// Hacky: hard-code conversational templates here
|
|
81
|
+
let task = model.pipeline_tag;
|
|
82
|
+
if (model.pipeline_tag &&
|
|
83
|
+
["text-generation", "image-text-to-text"].includes(model.pipeline_tag) &&
|
|
84
|
+
model.tags.includes("conversational")) {
|
|
85
|
+
templateName = opts?.streaming ? "conversationalStream" : "conversational";
|
|
86
|
+
inputPreparationFn = prepareConversationalInput;
|
|
87
|
+
task = "conversational";
|
|
88
|
+
}
|
|
89
|
+
let providerHelper;
|
|
90
|
+
try {
|
|
91
|
+
providerHelper = getProviderHelper(provider, task);
|
|
92
|
+
}
|
|
93
|
+
catch (e) {
|
|
94
|
+
console.error(`Failed to get provider helper for ${provider} (${task})`, e);
|
|
95
|
+
return [];
|
|
96
|
+
}
|
|
97
|
+
/// Prepare inputs + make request
|
|
98
|
+
const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: getModelInputSnippet(model) };
|
|
99
|
+
const request = makeRequestOptionsFromResolvedModel(providerModelId, providerHelper, {
|
|
100
|
+
accessToken,
|
|
101
|
+
provider,
|
|
102
|
+
...inputs,
|
|
103
|
+
}, inferenceProviderMapping, {
|
|
104
|
+
task,
|
|
105
|
+
billTo: opts?.billTo,
|
|
106
|
+
});
|
|
107
|
+
/// Parse request.info.body if not a binary.
|
|
108
|
+
/// This is the body sent to the provider. Important for snippets with raw payload (e.g curl, requests, etc.)
|
|
109
|
+
let providerInputs = inputs;
|
|
110
|
+
const bodyAsObj = request.info.body;
|
|
111
|
+
if (typeof bodyAsObj === "string") {
|
|
112
|
+
try {
|
|
113
|
+
providerInputs = JSON.parse(bodyAsObj);
|
|
114
|
+
}
|
|
115
|
+
catch (e) {
|
|
116
|
+
console.error("Failed to parse body as JSON", e);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
/// Prepare template injection data
|
|
120
|
+
const params = {
|
|
121
|
+
accessToken,
|
|
122
|
+
authorizationHeader: request.info.headers?.Authorization,
|
|
123
|
+
baseUrl: removeSuffix(request.url, "/chat/completions"),
|
|
124
|
+
fullUrl: request.url,
|
|
125
|
+
inputs: {
|
|
126
|
+
asObj: inputs,
|
|
127
|
+
asCurlString: formatBody(inputs, "curl"),
|
|
128
|
+
asJsonString: formatBody(inputs, "json"),
|
|
129
|
+
asPythonString: formatBody(inputs, "python"),
|
|
130
|
+
asTsString: formatBody(inputs, "ts"),
|
|
131
|
+
},
|
|
132
|
+
providerInputs: {
|
|
133
|
+
asObj: providerInputs,
|
|
134
|
+
asCurlString: formatBody(providerInputs, "curl"),
|
|
135
|
+
asJsonString: formatBody(providerInputs, "json"),
|
|
136
|
+
asPythonString: formatBody(providerInputs, "python"),
|
|
137
|
+
asTsString: formatBody(providerInputs, "ts"),
|
|
138
|
+
},
|
|
139
|
+
model,
|
|
140
|
+
provider,
|
|
141
|
+
providerModelId: providerModelId ?? model.id,
|
|
142
|
+
billTo: opts?.billTo,
|
|
143
|
+
};
|
|
144
|
+
/// Iterate over clients => check if a snippet exists => generate
|
|
145
|
+
const clients = provider === "auto" ? CLIENTS_AUTO_POLICY : CLIENTS;
|
|
146
|
+
return inferenceSnippetLanguages
|
|
147
|
+
.map((language) => {
|
|
148
|
+
const langClients = clients[language] ?? [];
|
|
149
|
+
return langClients
|
|
150
|
+
.map((client) => {
|
|
151
|
+
if (!hasTemplate(language, client, templateName)) {
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
const template = loadTemplate(language, client, templateName);
|
|
155
|
+
if (client === "huggingface_hub" && templateName.includes("basic")) {
|
|
156
|
+
if (!(model.pipeline_tag && model.pipeline_tag in HF_PYTHON_METHODS)) {
|
|
157
|
+
return;
|
|
158
|
+
}
|
|
159
|
+
params["methodName"] = HF_PYTHON_METHODS[model.pipeline_tag];
|
|
160
|
+
}
|
|
161
|
+
if (client === "huggingface.js" && templateName.includes("basic")) {
|
|
162
|
+
if (!(model.pipeline_tag && model.pipeline_tag in HF_JS_METHODS)) {
|
|
163
|
+
return;
|
|
164
|
+
}
|
|
165
|
+
params["methodName"] = HF_JS_METHODS[model.pipeline_tag];
|
|
166
|
+
}
|
|
167
|
+
/// Generate snippet
|
|
168
|
+
let snippet = template(params).trim();
|
|
169
|
+
if (!snippet) {
|
|
170
|
+
return;
|
|
171
|
+
}
|
|
172
|
+
/// Add import section separately
|
|
173
|
+
if (client === "huggingface_hub") {
|
|
174
|
+
const importSection = snippetImportPythonInferenceClient({ ...params });
|
|
175
|
+
snippet = `${importSection}\n\n${snippet}`;
|
|
176
|
+
}
|
|
177
|
+
else if (client === "requests") {
|
|
178
|
+
const importSection = snippetImportRequests({
|
|
179
|
+
...params,
|
|
180
|
+
importBase64: snippet.includes("base64"),
|
|
181
|
+
importJson: snippet.includes("json."),
|
|
182
|
+
});
|
|
183
|
+
snippet = `${importSection}\n\n${snippet}`;
|
|
184
|
+
}
|
|
185
|
+
/// Snippet is ready!
|
|
186
|
+
return { language, client: client, content: snippet };
|
|
187
|
+
})
|
|
188
|
+
.filter((snippet) => snippet !== undefined);
|
|
189
|
+
})
|
|
190
|
+
.flat();
|
|
191
|
+
};
|
|
192
|
+
};
|
|
193
|
+
const prepareDocumentQuestionAnsweringInput = (model) => {
|
|
194
|
+
return JSON.parse(getModelInputSnippet(model));
|
|
195
|
+
};
|
|
196
|
+
const prepareImageToImageInput = (model) => {
|
|
197
|
+
const data = JSON.parse(getModelInputSnippet(model));
|
|
198
|
+
return { inputs: data.image, parameters: { prompt: data.prompt } };
|
|
199
|
+
};
|
|
200
|
+
const prepareConversationalInput = (model, opts) => {
|
|
201
|
+
return {
|
|
202
|
+
messages: opts?.messages ?? getModelInputSnippet(model),
|
|
203
|
+
...(opts?.temperature ? { temperature: opts?.temperature } : undefined),
|
|
204
|
+
...(opts?.max_tokens ? { max_tokens: opts?.max_tokens } : undefined),
|
|
205
|
+
...(opts?.top_p ? { top_p: opts?.top_p } : undefined),
|
|
206
|
+
};
|
|
207
|
+
};
|
|
208
|
+
const prepareQuestionAnsweringInput = (model) => {
|
|
209
|
+
const data = JSON.parse(getModelInputSnippet(model));
|
|
210
|
+
return { question: data.question, context: data.context };
|
|
211
|
+
};
|
|
212
|
+
const prepareTableQuestionAnsweringInput = (model) => {
|
|
213
|
+
const data = JSON.parse(getModelInputSnippet(model));
|
|
214
|
+
return { query: data.query, table: JSON.stringify(data.table) };
|
|
215
|
+
};
|
|
216
|
+
const snippets = {
|
|
217
|
+
"audio-classification": snippetGenerator("basicAudio"),
|
|
218
|
+
"audio-to-audio": snippetGenerator("basicAudio"),
|
|
219
|
+
"automatic-speech-recognition": snippetGenerator("basicAudio"),
|
|
220
|
+
"document-question-answering": snippetGenerator("documentQuestionAnswering", prepareDocumentQuestionAnsweringInput),
|
|
221
|
+
"feature-extraction": snippetGenerator("basic"),
|
|
222
|
+
"fill-mask": snippetGenerator("basic"),
|
|
223
|
+
"image-classification": snippetGenerator("basicImage"),
|
|
224
|
+
"image-segmentation": snippetGenerator("basicImage"),
|
|
225
|
+
"image-text-to-text": snippetGenerator("conversational"),
|
|
226
|
+
"image-to-image": snippetGenerator("imageToImage", prepareImageToImageInput),
|
|
227
|
+
"image-to-text": snippetGenerator("basicImage"),
|
|
228
|
+
"object-detection": snippetGenerator("basicImage"),
|
|
229
|
+
"question-answering": snippetGenerator("questionAnswering", prepareQuestionAnsweringInput),
|
|
230
|
+
"sentence-similarity": snippetGenerator("basic"),
|
|
231
|
+
summarization: snippetGenerator("basic"),
|
|
232
|
+
"tabular-classification": snippetGenerator("tabular"),
|
|
233
|
+
"tabular-regression": snippetGenerator("tabular"),
|
|
234
|
+
"table-question-answering": snippetGenerator("tableQuestionAnswering", prepareTableQuestionAnsweringInput),
|
|
235
|
+
"text-classification": snippetGenerator("basic"),
|
|
236
|
+
"text-generation": snippetGenerator("basic"),
|
|
237
|
+
"text-to-audio": snippetGenerator("textToAudio"),
|
|
238
|
+
"text-to-image": snippetGenerator("textToImage"),
|
|
239
|
+
"text-to-speech": snippetGenerator("textToSpeech"),
|
|
240
|
+
"text-to-video": snippetGenerator("textToVideo"),
|
|
241
|
+
"text2text-generation": snippetGenerator("basic"),
|
|
242
|
+
"token-classification": snippetGenerator("basic"),
|
|
243
|
+
translation: snippetGenerator("basic"),
|
|
244
|
+
"zero-shot-classification": snippetGenerator("zeroShotClassification"),
|
|
245
|
+
"zero-shot-image-classification": snippetGenerator("zeroShotImageClassification"),
|
|
246
|
+
};
|
|
247
|
+
export function getInferenceSnippets(model, accessToken, provider, inferenceProviderMapping, opts) {
|
|
248
|
+
return model.pipeline_tag && model.pipeline_tag in snippets
|
|
249
|
+
? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping, opts) ?? []
|
|
250
|
+
: [];
|
|
251
|
+
}
|
|
252
|
+
// String manipulation helpers
|
|
253
|
+
function formatBody(obj, format) {
|
|
254
|
+
switch (format) {
|
|
255
|
+
case "curl":
|
|
256
|
+
return indentString(formatBody(obj, "json"));
|
|
257
|
+
case "json":
|
|
258
|
+
/// Hacky: remove outer brackets to make is extendable in templates
|
|
259
|
+
return JSON.stringify(obj, null, 4).split("\n").slice(1, -1).join("\n");
|
|
260
|
+
case "python":
|
|
261
|
+
return indentString(Object.entries(obj)
|
|
262
|
+
.map(([key, value]) => {
|
|
263
|
+
const formattedValue = JSON.stringify(value, null, 4).replace(/"/g, '"');
|
|
264
|
+
return `${key}=${formattedValue},`;
|
|
265
|
+
})
|
|
266
|
+
.join("\n"));
|
|
267
|
+
case "ts":
|
|
268
|
+
/// Hacky: remove outer brackets to make is extendable in templates
|
|
269
|
+
return formatTsObject(obj).split("\n").slice(1, -1).join("\n");
|
|
270
|
+
default:
|
|
271
|
+
throw new Error(`Unsupported format: ${format}`);
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
function formatTsObject(obj, depth) {
|
|
275
|
+
depth = depth ?? 0;
|
|
276
|
+
/// Case int, boolean, string, etc.
|
|
277
|
+
if (typeof obj !== "object" || obj === null) {
|
|
278
|
+
return JSON.stringify(obj);
|
|
279
|
+
}
|
|
280
|
+
/// Case array
|
|
281
|
+
if (Array.isArray(obj)) {
|
|
282
|
+
const items = obj
|
|
283
|
+
.map((item) => {
|
|
284
|
+
const formatted = formatTsObject(item, depth + 1);
|
|
285
|
+
return `${" ".repeat(4 * (depth + 1))}${formatted},`;
|
|
286
|
+
})
|
|
287
|
+
.join("\n");
|
|
288
|
+
return `[\n${items}\n${" ".repeat(4 * depth)}]`;
|
|
289
|
+
}
|
|
290
|
+
/// Case mapping
|
|
291
|
+
const entries = Object.entries(obj);
|
|
292
|
+
const lines = entries
|
|
293
|
+
.map(([key, value]) => {
|
|
294
|
+
const formattedValue = formatTsObject(value, depth + 1);
|
|
295
|
+
const keyStr = /^[a-zA-Z_$][a-zA-Z0-9_$]*$/.test(key) ? key : `"${key}"`;
|
|
296
|
+
return `${" ".repeat(4 * (depth + 1))}${keyStr}: ${formattedValue},`;
|
|
297
|
+
})
|
|
298
|
+
.join("\n");
|
|
299
|
+
return `{\n${lines}\n${" ".repeat(4 * depth)}}`;
|
|
300
|
+
}
|
|
301
|
+
function indentString(str) {
|
|
302
|
+
return str
|
|
303
|
+
.split("\n")
|
|
304
|
+
.map((line) => " ".repeat(4) + line)
|
|
305
|
+
.join("\n");
|
|
306
|
+
}
|
|
307
|
+
function removeSuffix(str, suffix) {
|
|
308
|
+
return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str;
|
|
309
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/snippets/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,KAAK,uBAAuB,EAAE,MAAM,2BAA2B,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { getInferenceSnippets } from "./getInferenceSnippets.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"templates.exported.d.ts","sourceRoot":"","sources":["../../../src/snippets/templates.exported.ts"],"names":[],"mappings":"AACA,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC,CA4EnE,CAAC"}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
// Generated file - do not edit directly
|
|
2
|
+
export const templates = {
|
|
3
|
+
"js": {
|
|
4
|
+
"fetch": {
|
|
5
|
+
"basic": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
|
|
6
|
+
"basicAudio": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"audio/flac\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
|
|
7
|
+
"basicImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.json();\n\treturn result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});",
|
|
8
|
+
"textToAudio": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
|
|
9
|
+
"textToImage": "async function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n\treturn result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});",
|
|
10
|
+
"textToSpeech": "{% if model.library_name == \"transformers\" %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n{% if billTo %}\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %}\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n\tconst result = await response.blob();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n\tconst response = await fetch(\n\t\t\"{{ fullUrl }}\",\n\t\t{\n\t\t\theaders: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t\tmethod: \"POST\",\n\t\t\tbody: JSON.stringify(data),\n\t\t}\n\t);\n const result = await response.json();\n return result;\n}\n\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ",
|
|
11
|
+
"zeroShotClassification": "async function query(data) {\n const response = await fetch(\n\t\t\"{{ fullUrl }}\",\n {\n headers: {\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\n \"Content-Type\": \"application/json\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\",\n{% endif %} },\n method: \"POST\",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});"
|
|
12
|
+
},
|
|
13
|
+
"huggingface.js": {
|
|
14
|
+
"basic": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst output = await client.{{ methodName }}({\n\tmodel: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tprovider: \"{{ provider }}\",\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(output);",
|
|
15
|
+
"basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(output);",
|
|
16
|
+
"basicImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\n\nconst output = await client.{{ methodName }}({\n\tdata,\n\tmodel: \"{{ model.id }}\",\n\tprovider: \"{{ provider }}\",\n}{% if billTo %}, {\n\tbillTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(output);",
|
|
17
|
+
"conversational": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst chatCompletion = await client.chatCompletion({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nconsole.log(chatCompletion.choices[0].message);",
|
|
18
|
+
"conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nlet out = \"\";\n\nconst stream = client.chatCompletionStream({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n{{ inputs.asTsString }}\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n\nfor await (const chunk of stream) {\n\tif (chunk.choices && chunk.choices.length > 0) {\n\t\tconst newContent = chunk.choices[0].delta.content;\n\t\tout += newContent;\n\t\tconsole.log(newContent);\n\t}\n}",
|
|
19
|
+
"textToImage": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst image = await client.textToImage({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n\tparameters: { num_inference_steps: 5 },\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n/// Use the generated image (it's a Blob)",
|
|
20
|
+
"textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst audio = await client.textToSpeech({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated audio (it's a Blob)",
|
|
21
|
+
"textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\n\nconst client = new InferenceClient(\"{{ accessToken }}\");\n\nconst video = await client.textToVideo({\n provider: \"{{ provider }}\",\n model: \"{{ model.id }}\",\n\tinputs: {{ inputs.asObj.inputs }},\n}{% if billTo %}, {\n billTo: \"{{ billTo }}\",\n}{% endif %});\n// Use the generated video (it's a Blob)"
|
|
22
|
+
},
|
|
23
|
+
"openai": {
|
|
24
|
+
"conversational": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n{% if billTo %}\n\tdefaultHeaders: {\n\t\t\"X-HF-Bill-To\": \"{{ billTo }}\" \n\t}\n{% endif %}\n});\n\nconst chatCompletion = await client.chat.completions.create({\n\tmodel: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n});\n\nconsole.log(chatCompletion.choices[0].message);",
|
|
25
|
+
"conversationalStream": "import { OpenAI } from \"openai\";\n\nconst client = new OpenAI({\n\tbaseURL: \"{{ baseUrl }}\",\n\tapiKey: \"{{ accessToken }}\",\n{% if billTo %}\n defaultHeaders: {\n\t\t\"X-HF-Bill-To\": \"{{ billTo }}\" \n\t}\n{% endif %}\n});\n\nconst stream = await client.chat.completions.create({\n model: \"{{ providerModelId }}\",\n{{ inputs.asTsString }}\n stream: true,\n});\n\nfor await (const chunk of stream) {\n process.stdout.write(chunk.choices[0]?.delta?.content || \"\");\n}"
|
|
26
|
+
}
|
|
27
|
+
},
|
|
28
|
+
"python": {
|
|
29
|
+
"fal_client": {
|
|
30
|
+
"textToImage": "{% if provider == \"fal-ai\" %}\nimport fal_client\n\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\n },\n)\n{% else %}\nresult = fal_client.subscribe(\n \"{{ providerModelId }}\",\n arguments={\n \"prompt\": {{ inputs.asObj.inputs }},\n },\n)\n{% endif %} \nprint(result)\n{% endif %} "
|
|
31
|
+
},
|
|
32
|
+
"huggingface_hub": {
|
|
33
|
+
"basic": "result = client.{{ methodName }}(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n)",
|
|
34
|
+
"basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
|
|
35
|
+
"basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
|
|
36
|
+
"conversational": "completion = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
|
|
37
|
+
"conversationalStream": "stream = client.chat.completions.create(\n model=\"{{ model.id }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\") ",
|
|
38
|
+
"documentQuestionAnswering": "output = client.document_question_answering(\n \"{{ inputs.asObj.image }}\",\n question=\"{{ inputs.asObj.question }}\",\n model=\"{{ model.id }}\",\n) ",
|
|
39
|
+
"imageToImage": "# output is a PIL.Image object\nimage = client.image_to_image(\n \"{{ inputs.asObj.inputs }}\",\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\n model=\"{{ model.id }}\",\n) ",
|
|
40
|
+
"importInferenceClient": "from huggingface_hub import InferenceClient\n\nclient = InferenceClient(\n provider=\"{{ provider }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n bill_to=\"{{ billTo }}\",\n{% endif %}\n)",
|
|
41
|
+
"questionAnswering": "answer = client.question_answering(\n question=\"{{ inputs.asObj.question }}\",\n context=\"{{ inputs.asObj.context }}\",\n model=\"{{ model.id }}\",\n) ",
|
|
42
|
+
"tableQuestionAnswering": "answer = client.question_answering(\n query=\"{{ inputs.asObj.query }}\",\n table={{ inputs.asObj.table }},\n model=\"{{ model.id }}\",\n) ",
|
|
43
|
+
"textToImage": "# output is a PIL.Image object\nimage = client.text_to_image(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) ",
|
|
44
|
+
"textToSpeech": "# audio is returned as bytes\naudio = client.text_to_speech(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) \n",
|
|
45
|
+
"textToVideo": "video = client.text_to_video(\n {{ inputs.asObj.inputs }},\n model=\"{{ model.id }}\",\n) "
|
|
46
|
+
},
|
|
47
|
+
"openai": {
|
|
48
|
+
"conversational": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n default_headers={\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n }\n{% endif %}\n)\n\ncompletion = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n)\n\nprint(completion.choices[0].message) ",
|
|
49
|
+
"conversationalStream": "from openai import OpenAI\n\nclient = OpenAI(\n base_url=\"{{ baseUrl }}\",\n api_key=\"{{ accessToken }}\",\n{% if billTo %}\n default_headers={\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n }\n{% endif %}\n)\n\nstream = client.chat.completions.create(\n model=\"{{ providerModelId }}\",\n{{ inputs.asPythonString }}\n stream=True,\n)\n\nfor chunk in stream:\n print(chunk.choices[0].delta.content, end=\"\")"
|
|
50
|
+
},
|
|
51
|
+
"requests": {
|
|
52
|
+
"basic": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n}) ",
|
|
53
|
+
"basicAudio": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"audio/flac\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
|
|
54
|
+
"basicImage": "def query(filename):\n with open(filename, \"rb\") as f:\n data = f.read()\n response = requests.post(API_URL, headers={\"Content-Type\": \"image/jpeg\", **headers}, data=data)\n return response.json()\n\noutput = query({{ providerInputs.asObj.inputs }})",
|
|
55
|
+
"conversational": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\nresponse = query({\n{{ providerInputs.asJsonString }}\n})\n\nprint(response[\"choices\"][0][\"message\"])",
|
|
56
|
+
"conversationalStream": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\n for line in response.iter_lines():\n if not line.startswith(b\"data:\"):\n continue\n if line.strip() == b\"data: [DONE]\":\n return\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\n\nchunks = query({\n{{ providerInputs.asJsonString }},\n \"stream\": True,\n})\n\nfor chunk in chunks:\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
|
|
57
|
+
"documentQuestionAnswering": "def query(payload):\n with open(payload[\"image\"], \"rb\") as f:\n img = f.read()\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {\n \"image\": \"{{ inputs.asObj.image }}\",\n \"question\": \"{{ inputs.asObj.question }}\",\n },\n}) ",
|
|
58
|
+
"imageToImage": "def query(payload):\n with open(payload[\"inputs\"], \"rb\") as f:\n img = f.read()\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n{{ providerInputs.asJsonString }}\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes)) ",
|
|
59
|
+
"importRequests": "{% if importBase64 %}\nimport base64\n{% endif %}\n{% if importJson %}\nimport json\n{% endif %}\nimport requests\n\nAPI_URL = \"{{ fullUrl }}\"\nheaders = {\n \"Authorization\": \"{{ authorizationHeader }}\",\n{% if billTo %}\n \"X-HF-Bill-To\": \"{{ billTo }}\"\n{% endif %}\n}",
|
|
60
|
+
"tabular": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nresponse = query({\n \"inputs\": {\n \"data\": {{ providerInputs.asObj.inputs }}\n },\n}) ",
|
|
61
|
+
"textToAudio": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"inputs\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
|
|
62
|
+
"textToImage": "{% if provider == \"hf-inference\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\nimage_bytes = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n})\n\n# You can access the image with PIL.Image for example\nimport io\nfrom PIL import Image\nimage = Image.open(io.BytesIO(image_bytes))\n{% endif %}",
|
|
63
|
+
"textToSpeech": "{% if model.library_name == \"transformers\" %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.content\n\naudio_bytes = query({\n \"text\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio_bytes)\n{% else %}\ndef query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\naudio, sampling_rate = query({\n \"text\": {{ inputs.asObj.inputs }},\n})\n# You can access the audio with IPython.display for example\nfrom IPython.display import Audio\nAudio(audio, rate=sampling_rate)\n{% endif %} ",
|
|
64
|
+
"zeroShotClassification": "def query(payload):\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"inputs\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\n}) ",
|
|
65
|
+
"zeroShotImageClassification": "def query(data):\n with open(data[\"image_path\"], \"rb\") as f:\n img = f.read()\n payload={\n \"parameters\": data[\"parameters\"],\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\n }\n response = requests.post(API_URL, headers=headers, json=payload)\n return response.json()\n\noutput = query({\n \"image_path\": {{ providerInputs.asObj.inputs }},\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\n}) "
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
"sh": {
|
|
69
|
+
"curl": {
|
|
70
|
+
"basic": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n -d '{\n{{ providerInputs.asCurlString }}\n }'",
|
|
71
|
+
"basicAudio": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: audio/flac' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n --data-binary @{{ providerInputs.asObj.inputs }}",
|
|
72
|
+
"basicImage": "curl {{ fullUrl }} \\\n -X POST \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: image/jpeg' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n --data-binary @{{ providerInputs.asObj.inputs }}",
|
|
73
|
+
"conversational": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": false\n }'",
|
|
74
|
+
"conversationalStream": "curl {{ fullUrl }} \\\n -H 'Authorization: {{ authorizationHeader }}' \\\n -H 'Content-Type: application/json' \\\n{% if billTo %}\n -H 'X-HF-Bill-To: {{ billTo }}' \\\n{% endif %}\n -d '{\n{{ providerInputs.asCurlString }},\n \"stream\": true\n }'",
|
|
75
|
+
"zeroShotClassification": "curl {{ fullUrl }} \\\n -X POST \\\n -d '{\"inputs\": {{ providerInputs.asObj.inputs }}, \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]}}' \\\n -H 'Content-Type: application/json' \\\n -H 'Authorization: {{ authorizationHeader }}'\n{% if billTo %} \\\n -H 'X-HF-Bill-To: {{ billTo }}'\n{% endif %}"
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
};
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { AudioClassificationInput, AudioClassificationOutput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types.js";
|
|
3
|
+
import type { LegacyAudioInput } from "./utils.js";
|
|
4
|
+
export type AudioClassificationArgs = BaseArgs & (AudioClassificationInput | LegacyAudioInput);
|
|
5
|
+
/**
|
|
6
|
+
* This task reads some audio input and outputs the likelihood of classes.
|
|
7
|
+
* Recommended model: superb/hubert-large-superb-er
|
|
8
|
+
*/
|
|
9
|
+
export declare function audioClassification(args: AudioClassificationArgs, options?: Options): Promise<AudioClassificationOutput>;
|
|
10
|
+
//# sourceMappingURL=audioClassification.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"audioClassification.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/audioClassification.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,wBAAwB,EAAE,yBAAyB,EAAE,MAAM,oBAAoB,CAAC;AAG9F,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,gBAAgB,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAGnD,MAAM,MAAM,uBAAuB,GAAG,QAAQ,GAAG,CAAC,wBAAwB,GAAG,gBAAgB,CAAC,CAAC;AAE/F;;;GAGG;AACH,wBAAsB,mBAAmB,CACxC,IAAI,EAAE,uBAAuB,EAC7B,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,yBAAyB,CAAC,CAUpC"}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
|
|
2
|
+
import { getProviderHelper } from "../../lib/getProviderHelper.js";
|
|
3
|
+
import { innerRequest } from "../../utils/request.js";
|
|
4
|
+
import { preparePayload } from "./utils.js";
|
|
5
|
+
/**
|
|
6
|
+
* This task reads some audio input and outputs the likelihood of classes.
|
|
7
|
+
* Recommended model: superb/hubert-large-superb-er
|
|
8
|
+
*/
|
|
9
|
+
export async function audioClassification(args, options) {
|
|
10
|
+
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
11
|
+
const providerHelper = getProviderHelper(provider, "audio-classification");
|
|
12
|
+
const payload = preparePayload(args);
|
|
13
|
+
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
14
|
+
...options,
|
|
15
|
+
task: "audio-classification",
|
|
16
|
+
});
|
|
17
|
+
return providerHelper.getResponse(res);
|
|
18
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import type { BaseArgs, Options } from "../../types.js";
|
|
2
|
+
import type { LegacyAudioInput } from "./utils.js";
|
|
3
|
+
export type AudioToAudioArgs = (BaseArgs & {
|
|
4
|
+
/**
|
|
5
|
+
* Binary audio data
|
|
6
|
+
*/
|
|
7
|
+
inputs: Blob;
|
|
8
|
+
}) | LegacyAudioInput;
|
|
9
|
+
export interface AudioToAudioOutputElem {
|
|
10
|
+
/**
|
|
11
|
+
* The label for the audio output (model specific)
|
|
12
|
+
*/
|
|
13
|
+
label: string;
|
|
14
|
+
/**
|
|
15
|
+
* Base64 encoded audio output.
|
|
16
|
+
*/
|
|
17
|
+
audio: Blob;
|
|
18
|
+
}
|
|
19
|
+
export interface AudioToAudioOutput {
|
|
20
|
+
blob: string;
|
|
21
|
+
"content-type": string;
|
|
22
|
+
label: string;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* This task reads some audio input and outputs one or multiple audio files.
|
|
26
|
+
* Example model: speechbrain/sepformer-wham does audio source separation.
|
|
27
|
+
*/
|
|
28
|
+
export declare function audioToAudio(args: AudioToAudioArgs, options?: Options): Promise<AudioToAudioOutput[]>;
|
|
29
|
+
//# sourceMappingURL=audioToAudio.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"audioToAudio.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/audioToAudio.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,gBAAgB,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAGnD,MAAM,MAAM,gBAAgB,GACzB,CAAC,QAAQ,GAAG;IACZ;;OAEG;IACH,MAAM,EAAE,IAAI,CAAC;CACZ,CAAC,GACF,gBAAgB,CAAC;AAEpB,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IAEd;;OAEG;IACH,KAAK,EAAE,IAAI,CAAC;CACZ;AAED,MAAM,WAAW,kBAAkB;IAClC,IAAI,EAAE,MAAM,CAAC;IACb,cAAc,EAAE,MAAM,CAAC;IACvB,KAAK,EAAE,MAAM,CAAC;CACd;AAED;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,kBAAkB,EAAE,CAAC,CAU3G"}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
|
|
2
|
+
import { getProviderHelper } from "../../lib/getProviderHelper.js";
|
|
3
|
+
import { innerRequest } from "../../utils/request.js";
|
|
4
|
+
import { preparePayload } from "./utils.js";
|
|
5
|
+
/**
|
|
6
|
+
* This task reads some audio input and outputs one or multiple audio files.
|
|
7
|
+
* Example model: speechbrain/sepformer-wham does audio source separation.
|
|
8
|
+
*/
|
|
9
|
+
export async function audioToAudio(args, options) {
|
|
10
|
+
const model = "inputs" in args ? args.model : undefined;
|
|
11
|
+
const provider = await resolveProvider(args.provider, model);
|
|
12
|
+
const providerHelper = getProviderHelper(provider, "audio-to-audio");
|
|
13
|
+
const payload = preparePayload(args);
|
|
14
|
+
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
15
|
+
...options,
|
|
16
|
+
task: "audio-to-audio",
|
|
17
|
+
});
|
|
18
|
+
return providerHelper.getResponse(res);
|
|
19
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types.js";
|
|
3
|
+
import type { LegacyAudioInput } from "./utils.js";
|
|
4
|
+
export type AutomaticSpeechRecognitionArgs = BaseArgs & (AutomaticSpeechRecognitionInput | LegacyAudioInput);
|
|
5
|
+
/**
|
|
6
|
+
* This task reads some audio input and outputs the said words within the audio files.
|
|
7
|
+
* Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
|
|
8
|
+
*/
|
|
9
|
+
export declare function automaticSpeechRecognition(args: AutomaticSpeechRecognitionArgs, options?: Options): Promise<AutomaticSpeechRecognitionOutput>;
|
|
10
|
+
//# sourceMappingURL=automaticSpeechRecognition.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"automaticSpeechRecognition.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/automaticSpeechRecognition.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,+BAA+B,EAAE,gCAAgC,EAAE,MAAM,oBAAoB,CAAC;AAI5G,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,gBAAgB,CAAC;AAExD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,MAAM,MAAM,8BAA8B,GAAG,QAAQ,GAAG,CAAC,+BAA+B,GAAG,gBAAgB,CAAC,CAAC;AAC7G;;;GAGG;AACH,wBAAsB,0BAA0B,CAC/C,IAAI,EAAE,8BAA8B,EACpC,OAAO,CAAC,EAAE,OAAO,GACf,OAAO,CAAC,gCAAgC,CAAC,CAa3C"}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
|
|
2
|
+
import { getProviderHelper } from "../../lib/getProviderHelper.js";
|
|
3
|
+
import { InferenceOutputError } from "../../lib/InferenceOutputError.js";
|
|
4
|
+
import { innerRequest } from "../../utils/request.js";
|
|
5
|
+
/**
|
|
6
|
+
* This task reads some audio input and outputs the said words within the audio files.
|
|
7
|
+
* Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
|
|
8
|
+
*/
|
|
9
|
+
export async function automaticSpeechRecognition(args, options) {
|
|
10
|
+
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
11
|
+
const providerHelper = getProviderHelper(provider, "automatic-speech-recognition");
|
|
12
|
+
const payload = await providerHelper.preparePayloadAsync(args);
|
|
13
|
+
const { data: res } = await innerRequest(payload, providerHelper, {
|
|
14
|
+
...options,
|
|
15
|
+
task: "automatic-speech-recognition",
|
|
16
|
+
});
|
|
17
|
+
const isValidOutput = typeof res?.text === "string";
|
|
18
|
+
if (!isValidOutput) {
|
|
19
|
+
throw new InferenceOutputError("Expected {text: string}");
|
|
20
|
+
}
|
|
21
|
+
return providerHelper.getResponse(res);
|
|
22
|
+
}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { TextToSpeechInput } from "@huggingface/tasks";
|
|
2
|
+
import type { BaseArgs, Options } from "../../types.js";
|
|
3
|
+
type TextToSpeechArgs = BaseArgs & TextToSpeechInput;
|
|
4
|
+
/**
|
|
5
|
+
* This task synthesize an audio of a voice pronouncing a given text.
|
|
6
|
+
* Recommended model: espnet/kan-bayashi_ljspeech_vits
|
|
7
|
+
*/
|
|
8
|
+
export declare function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<Blob>;
|
|
9
|
+
export {};
|
|
10
|
+
//# sourceMappingURL=textToSpeech.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"textToSpeech.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/textToSpeech.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,iBAAiB,EAAE,MAAM,oBAAoB,CAAC;AAG5D,OAAO,KAAK,EAAE,QAAQ,EAAE,OAAO,EAAE,MAAM,gBAAgB,CAAC;AAExD,KAAK,gBAAgB,GAAG,QAAQ,GAAG,iBAAiB,CAAC;AAKrD;;;GAGG;AACH,wBAAsB,YAAY,CAAC,IAAI,EAAE,gBAAgB,EAAE,OAAO,CAAC,EAAE,OAAO,GAAG,OAAO,CAAC,IAAI,CAAC,CAQ3F"}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { resolveProvider } from "../../lib/getInferenceProviderMapping.js";
|
|
2
|
+
import { getProviderHelper } from "../../lib/getProviderHelper.js";
|
|
3
|
+
import { innerRequest } from "../../utils/request.js";
|
|
4
|
+
/**
|
|
5
|
+
* This task synthesize an audio of a voice pronouncing a given text.
|
|
6
|
+
* Recommended model: espnet/kan-bayashi_ljspeech_vits
|
|
7
|
+
*/
|
|
8
|
+
export async function textToSpeech(args, options) {
|
|
9
|
+
const provider = await resolveProvider(args.provider, args.model, args.endpointUrl);
|
|
10
|
+
const providerHelper = getProviderHelper(provider, "text-to-speech");
|
|
11
|
+
const { data: res } = await innerRequest(args, providerHelper, {
|
|
12
|
+
...options,
|
|
13
|
+
task: "text-to-speech",
|
|
14
|
+
});
|
|
15
|
+
return providerHelper.getResponse(res);
|
|
16
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { BaseArgs, InferenceProvider, RequestArgs } from "../../types.js";
|
|
2
|
+
/**
|
|
3
|
+
* @deprecated
|
|
4
|
+
*/
|
|
5
|
+
export interface LegacyAudioInput {
|
|
6
|
+
data: Blob | ArrayBuffer;
|
|
7
|
+
provider?: InferenceProvider;
|
|
8
|
+
}
|
|
9
|
+
export declare function preparePayload(args: BaseArgs & ({
|
|
10
|
+
inputs: Blob;
|
|
11
|
+
} | LegacyAudioInput)): RequestArgs;
|
|
12
|
+
//# sourceMappingURL=utils.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"utils.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio/utils.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,QAAQ,EAAE,iBAAiB,EAAE,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAG/E;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC,IAAI,EAAE,IAAI,GAAG,WAAW,CAAC;IACzB,QAAQ,CAAC,EAAE,iBAAiB,CAAC;CAC7B;AAED,wBAAgB,cAAc,CAAC,IAAI,EAAE,QAAQ,GAAG,CAAC;IAAE,MAAM,EAAE,IAAI,CAAA;CAAE,GAAG,gBAAgB,CAAC,GAAG,WAAW,CAOlG"}
|