huggingface-hub 0.32.6__tar.gz → 0.33.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (156) hide show
  1. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/PKG-INFO +1 -1
  2. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/__init__.py +1 -1
  3. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_snapshot_download.py +2 -2
  4. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/file_download.py +1 -1
  5. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/hf_api.py +74 -46
  6. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_client.py +4 -5
  7. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/_async_client.py +4 -5
  8. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/mcp_client.py +19 -5
  9. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/__init__.py +15 -1
  10. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/_common.py +15 -3
  11. huggingface_hub-0.33.0/src/huggingface_hub/inference/_providers/featherless_ai.py +38 -0
  12. huggingface_hub-0.33.0/src/huggingface_hub/inference/_providers/groq.py +9 -0
  13. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/hf_inference.py +6 -2
  14. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/openai.py +3 -1
  15. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub.egg-info/PKG-INFO +1 -1
  16. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub.egg-info/SOURCES.txt +2 -0
  17. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/LICENSE +0 -0
  18. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/MANIFEST.in +0 -0
  19. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/README.md +0 -0
  20. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/pyproject.toml +0 -0
  21. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/setup.cfg +0 -0
  22. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/setup.py +0 -0
  23. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_commit_api.py +0 -0
  24. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_commit_scheduler.py +0 -0
  25. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_inference_endpoints.py +0 -0
  26. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_local_folder.py +0 -0
  27. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_login.py +0 -0
  28. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_oauth.py +0 -0
  29. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_space_api.py +0 -0
  30. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_tensorboard_logger.py +0 -0
  31. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_upload_large_folder.py +0 -0
  32. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_webhooks_payload.py +0 -0
  33. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/_webhooks_server.py +0 -0
  34. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/__init__.py +0 -0
  35. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/_cli_utils.py +0 -0
  36. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/delete_cache.py +0 -0
  37. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/download.py +0 -0
  38. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/env.py +0 -0
  39. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/huggingface_cli.py +0 -0
  40. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/lfs.py +0 -0
  41. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/repo.py +0 -0
  42. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/repo_files.py +0 -0
  43. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/scan_cache.py +0 -0
  44. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/tag.py +0 -0
  45. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/upload.py +0 -0
  46. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/upload_large_folder.py +0 -0
  47. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/user.py +0 -0
  48. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/commands/version.py +0 -0
  49. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/community.py +0 -0
  50. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/constants.py +0 -0
  51. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/dataclasses.py +0 -0
  52. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/errors.py +0 -0
  53. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/fastai_utils.py +0 -0
  54. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/hf_file_system.py +0 -0
  55. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/hub_mixin.py +0 -0
  56. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/__init__.py +0 -0
  57. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_common.py +0 -0
  58. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/__init__.py +0 -0
  59. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/__init__.py +0 -0
  60. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/audio_classification.py +0 -0
  61. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/audio_to_audio.py +0 -0
  62. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +0 -0
  63. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/base.py +0 -0
  64. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/chat_completion.py +0 -0
  65. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/depth_estimation.py +0 -0
  66. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/document_question_answering.py +0 -0
  67. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/feature_extraction.py +0 -0
  68. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/fill_mask.py +0 -0
  69. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/image_classification.py +0 -0
  70. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/image_segmentation.py +0 -0
  71. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/image_to_image.py +0 -0
  72. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/image_to_text.py +0 -0
  73. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/object_detection.py +0 -0
  74. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/question_answering.py +0 -0
  75. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/sentence_similarity.py +0 -0
  76. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/summarization.py +0 -0
  77. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/table_question_answering.py +0 -0
  78. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text2text_generation.py +0 -0
  79. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text_classification.py +0 -0
  80. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text_generation.py +0 -0
  81. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text_to_audio.py +0 -0
  82. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text_to_image.py +0 -0
  83. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text_to_speech.py +0 -0
  84. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/text_to_video.py +0 -0
  85. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/token_classification.py +0 -0
  86. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/translation.py +0 -0
  87. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/video_classification.py +0 -0
  88. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/visual_question_answering.py +0 -0
  89. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/zero_shot_classification.py +0 -0
  90. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +0 -0
  91. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +0 -0
  92. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/__init__.py +0 -0
  93. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/_cli_hacks.py +0 -0
  94. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/agent.py +0 -0
  95. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/cli.py +0 -0
  96. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/constants.py +0 -0
  97. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/types.py +0 -0
  98. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_mcp/utils.py +0 -0
  99. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/black_forest_labs.py +0 -0
  100. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/cerebras.py +0 -0
  101. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/cohere.py +0 -0
  102. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/fal_ai.py +0 -0
  103. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/fireworks_ai.py +0 -0
  104. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/hyperbolic.py +0 -0
  105. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/nebius.py +0 -0
  106. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/novita.py +0 -0
  107. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/nscale.py +0 -0
  108. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/replicate.py +0 -0
  109. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/sambanova.py +0 -0
  110. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference/_providers/together.py +0 -0
  111. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/inference_api.py +0 -0
  112. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/keras_mixin.py +0 -0
  113. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/lfs.py +0 -0
  114. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/py.typed +0 -0
  115. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/repocard.py +0 -0
  116. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/repocard_data.py +0 -0
  117. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/repository.py +0 -0
  118. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/serialization/__init__.py +0 -0
  119. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/serialization/_base.py +0 -0
  120. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/serialization/_dduf.py +0 -0
  121. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/serialization/_tensorflow.py +0 -0
  122. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/serialization/_torch.py +0 -0
  123. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/templates/datasetcard_template.md +0 -0
  124. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/templates/modelcard_template.md +0 -0
  125. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/__init__.py +0 -0
  126. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_auth.py +0 -0
  127. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_cache_assets.py +0 -0
  128. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_cache_manager.py +0 -0
  129. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_chunk_utils.py +0 -0
  130. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_datetime.py +0 -0
  131. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_deprecation.py +0 -0
  132. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_experimental.py +0 -0
  133. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_fixes.py +0 -0
  134. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_git_credential.py +0 -0
  135. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_headers.py +0 -0
  136. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_hf_folder.py +0 -0
  137. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_http.py +0 -0
  138. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_lfs.py +0 -0
  139. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_pagination.py +0 -0
  140. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_paths.py +0 -0
  141. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_runtime.py +0 -0
  142. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_safetensors.py +0 -0
  143. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_subprocess.py +0 -0
  144. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_telemetry.py +0 -0
  145. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_typing.py +0 -0
  146. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_validators.py +0 -0
  147. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/_xet.py +0 -0
  148. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/endpoint_helpers.py +0 -0
  149. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/insecure_hashlib.py +0 -0
  150. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/logging.py +0 -0
  151. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/sha.py +0 -0
  152. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub/utils/tqdm.py +0 -0
  153. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub.egg-info/dependency_links.txt +0 -0
  154. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub.egg-info/entry_points.txt +0 -0
  155. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub.egg-info/requires.txt +0 -0
  156. {huggingface_hub-0.32.6 → huggingface_hub-0.33.0}/src/huggingface_hub.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface_hub
3
- Version: 0.32.6
3
+ Version: 0.33.0
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -46,7 +46,7 @@ import sys
46
46
  from typing import TYPE_CHECKING
47
47
 
48
48
 
49
- __version__ = "0.32.6"
49
+ __version__ = "0.33.0"
50
50
 
51
51
  # Alphabetical order of definitions is ensured in tests
52
52
  # WARNING: any comment added in this dictionary definition will be lost when
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  from pathlib import Path
3
- from typing import Dict, Iterable, List, Literal, Optional, Union
3
+ from typing import Dict, Iterable, List, Literal, Optional, Type, Union
4
4
 
5
5
  import requests
6
6
  from tqdm.auto import tqdm as base_tqdm
@@ -44,7 +44,7 @@ def snapshot_download(
44
44
  allow_patterns: Optional[Union[List[str], str]] = None,
45
45
  ignore_patterns: Optional[Union[List[str], str]] = None,
46
46
  max_workers: int = 8,
47
- tqdm_class: Optional[base_tqdm] = None,
47
+ tqdm_class: Optional[Type[base_tqdm]] = None,
48
48
  headers: Optional[Dict[str, str]] = None,
49
49
  endpoint: Optional[str] = None,
50
50
  # Deprecated args
@@ -1706,7 +1706,7 @@ def _download_to_tmp_and_move(
1706
1706
  _check_disk_space(expected_size, destination_path.parent)
1707
1707
 
1708
1708
  if xet_file_data is not None and is_xet_available():
1709
- logger.info("Xet Storage is enabled for this repo. Downloading file from Xet Storage..")
1709
+ logger.debug("Xet Storage is enabled for this repo. Downloading file from Xet Storage..")
1710
1710
  xet_get(
1711
1711
  incomplete_path=incomplete_path,
1712
1712
  xet_file_data=xet_file_data,
@@ -28,6 +28,7 @@ from functools import wraps
28
28
  from itertools import islice
29
29
  from pathlib import Path
30
30
  from typing import (
31
+ TYPE_CHECKING,
31
32
  Any,
32
33
  BinaryIO,
33
34
  Callable,
@@ -38,6 +39,7 @@ from typing import (
38
39
  Literal,
39
40
  Optional,
40
41
  Tuple,
42
+ Type,
41
43
  TypeVar,
42
44
  Union,
43
45
  overload,
@@ -134,8 +136,11 @@ from .utils._typing import CallableT
134
136
  from .utils.endpoint_helpers import _is_emission_within_threshold
135
137
 
136
138
 
139
+ if TYPE_CHECKING:
140
+ from .inference._providers import PROVIDER_T
141
+
137
142
  R = TypeVar("R") # Return type
138
- CollectionItemType_T = Literal["model", "dataset", "space", "paper"]
143
+ CollectionItemType_T = Literal["model", "dataset", "space", "paper", "collection"]
139
144
 
140
145
  ExpandModelProperty_T = Literal[
141
146
  "author",
@@ -708,21 +713,26 @@ class RepoFolder:
708
713
 
709
714
  @dataclass
710
715
  class InferenceProviderMapping:
711
- hf_model_id: str
712
- status: Literal["live", "staging"]
713
- provider_id: str
716
+ provider: "PROVIDER_T" # Provider name
717
+ hf_model_id: str # ID of the model on the Hugging Face Hub
718
+ provider_id: str # ID of the model on the provider's side
719
+ status: Literal["error", "live", "staging"]
714
720
  task: str
715
721
 
716
722
  adapter: Optional[str] = None
717
723
  adapter_weights_path: Optional[str] = None
724
+ type: Optional[Literal["single-model", "tag-filter"]] = None
718
725
 
719
726
  def __init__(self, **kwargs):
727
+ self.provider = kwargs.pop("provider")
720
728
  self.hf_model_id = kwargs.pop("hf_model_id")
721
- self.status = kwargs.pop("status")
722
729
  self.provider_id = kwargs.pop("providerId")
730
+ self.status = kwargs.pop("status")
723
731
  self.task = kwargs.pop("task")
732
+
724
733
  self.adapter = kwargs.pop("adapter", None)
725
734
  self.adapter_weights_path = kwargs.pop("adapterWeightsPath", None)
735
+ self.type = kwargs.pop("type", None)
726
736
  self.__dict__.update(**kwargs)
727
737
 
728
738
 
@@ -764,12 +774,10 @@ class ModelInfo:
764
774
  If so, whether there is manual or automatic approval.
765
775
  gguf (`Dict`, *optional*):
766
776
  GGUF information of the model.
767
- inference (`Literal["cold", "frozen", "warm"]`, *optional*):
768
- Status of the model on the inference API.
769
- Warm models are available for immediate use. Cold models will be loaded on first inference call.
770
- Frozen models are not available in Inference API.
771
- inference_provider_mapping (`Dict`, *optional*):
772
- Model's inference provider mapping.
777
+ inference (`Literal["warm"]`, *optional*):
778
+ Status of the model on Inference Providers. Warm if the model is served by at least one provider.
779
+ inference_provider_mapping (`List[InferenceProviderMapping]`, *optional*):
780
+ A list of [`InferenceProviderMapping`] ordered after the user's provider order.
773
781
  likes (`int`):
774
782
  Number of likes of the model.
775
783
  library_name (`str`, *optional*):
@@ -814,8 +822,8 @@ class ModelInfo:
814
822
  downloads_all_time: Optional[int]
815
823
  gated: Optional[Literal["auto", "manual", False]]
816
824
  gguf: Optional[Dict]
817
- inference: Optional[Literal["warm", "cold", "frozen"]]
818
- inference_provider_mapping: Optional[Dict[str, InferenceProviderMapping]]
825
+ inference: Optional[Literal["warm"]]
826
+ inference_provider_mapping: Optional[List[InferenceProviderMapping]]
819
827
  likes: Optional[int]
820
828
  library_name: Optional[str]
821
829
  tags: Optional[List[str]]
@@ -851,14 +859,25 @@ class ModelInfo:
851
859
  self.gguf = kwargs.pop("gguf", None)
852
860
 
853
861
  self.inference = kwargs.pop("inference", None)
854
- self.inference_provider_mapping = kwargs.pop("inferenceProviderMapping", None)
855
- if self.inference_provider_mapping:
856
- self.inference_provider_mapping = {
857
- provider: InferenceProviderMapping(
858
- **{**value, "hf_model_id": self.id}
859
- ) # little hack to simplify Inference Providers logic
860
- for provider, value in self.inference_provider_mapping.items()
861
- }
862
+
863
+ # little hack to simplify Inference Providers logic and make it backward and forward compatible
864
+ # right now, API returns a dict on model_info and a list on list_models. Let's harmonize to list.
865
+ mapping = kwargs.pop("inferenceProviderMapping", None)
866
+ if isinstance(mapping, list):
867
+ self.inference_provider_mapping = [
868
+ InferenceProviderMapping(**{**value, "hf_model_id": self.id}) for value in mapping
869
+ ]
870
+ elif isinstance(mapping, dict):
871
+ self.inference_provider_mapping = [
872
+ InferenceProviderMapping(**{**value, "hf_model_id": self.id, "provider": provider})
873
+ for provider, value in mapping.items()
874
+ ]
875
+ elif mapping is None:
876
+ self.inference_provider_mapping = None
877
+ else:
878
+ raise ValueError(
879
+ f"Unexpected type for `inferenceProviderMapping`. Expecting `dict` or `list`. Got {mapping}."
880
+ )
862
881
 
863
882
  self.tags = kwargs.pop("tags", None)
864
883
  self.pipeline_tag = kwargs.pop("pipeline_tag", None)
@@ -1169,16 +1188,16 @@ class SpaceInfo:
1169
1188
  @dataclass
1170
1189
  class CollectionItem:
1171
1190
  """
1172
- Contains information about an item of a Collection (model, dataset, Space or paper).
1191
+ Contains information about an item of a Collection (model, dataset, Space, paper or collection).
1173
1192
 
1174
1193
  Attributes:
1175
1194
  item_object_id (`str`):
1176
1195
  Unique ID of the item in the collection.
1177
1196
  item_id (`str`):
1178
- ID of the underlying object on the Hub. Can be either a repo_id or a paper id
1179
- e.g. `"jbilcke-hf/ai-comic-factory"`, `"2307.09288"`.
1197
+ ID of the underlying object on the Hub. Can be either a repo_id, a paper id or a collection slug.
1198
+ e.g. `"jbilcke-hf/ai-comic-factory"`, `"2307.09288"`, `"celinah/cerebras-function-calling-682607169c35fbfa98b30b9a"`.
1180
1199
  item_type (`str`):
1181
- Type of the underlying object. Can be one of `"model"`, `"dataset"`, `"space"` or `"paper"`.
1200
+ Type of the underlying object. Can be one of `"model"`, `"dataset"`, `"space"`, `"paper"` or `"collection"`.
1182
1201
  position (`int`):
1183
1202
  Position of the item in the collection.
1184
1203
  note (`str`, *optional*):
@@ -1192,10 +1211,20 @@ class CollectionItem:
1192
1211
  note: Optional[str] = None
1193
1212
 
1194
1213
  def __init__(
1195
- self, _id: str, id: str, type: CollectionItemType_T, position: int, note: Optional[Dict] = None, **kwargs
1214
+ self,
1215
+ _id: str,
1216
+ id: str,
1217
+ type: CollectionItemType_T,
1218
+ position: int,
1219
+ note: Optional[Dict] = None,
1220
+ **kwargs,
1196
1221
  ) -> None:
1197
1222
  self.item_object_id: str = _id # id in database
1198
1223
  self.item_id: str = id # repo_id or paper id
1224
+ # if the item is a collection, override item_id with the slug
1225
+ slug = kwargs.get("slug")
1226
+ if slug is not None:
1227
+ self.item_id = slug # collection slug
1199
1228
  self.item_type: CollectionItemType_T = type
1200
1229
  self.position: int = position
1201
1230
  self.note: str = note["text"] if note is not None else None
@@ -1825,7 +1854,8 @@ class HfApi:
1825
1854
  filter: Union[str, Iterable[str], None] = None,
1826
1855
  author: Optional[str] = None,
1827
1856
  gated: Optional[bool] = None,
1828
- inference: Optional[Literal["cold", "frozen", "warm"]] = None,
1857
+ inference: Optional[Literal["warm"]] = None,
1858
+ inference_provider: Optional[Union[Literal["all"], "PROVIDER_T", List["PROVIDER_T"]]] = None,
1829
1859
  library: Optional[Union[str, List[str]]] = None,
1830
1860
  language: Optional[Union[str, List[str]]] = None,
1831
1861
  model_name: Optional[str] = None,
@@ -1859,10 +1889,11 @@ class HfApi:
1859
1889
  A boolean to filter models on the Hub that are gated or not. By default, all models are returned.
1860
1890
  If `gated=True` is passed, only gated models are returned.
1861
1891
  If `gated=False` is passed, only non-gated models are returned.
1862
- inference (`Literal["cold", "frozen", "warm"]`, *optional*):
1863
- A string to filter models on the Hub by their state on the Inference API.
1864
- Warm models are available for immediate use. Cold models will be loaded on first inference call.
1865
- Frozen models are not available in Inference API.
1892
+ inference (`Literal["warm"]`, *optional*):
1893
+ If "warm", filter models on the Hub currently served by at least one provider.
1894
+ inference_provider (`Literal["all"]` or `str`, *optional*):
1895
+ A string to filter models on the Hub that are served by a specific provider.
1896
+ Pass `"all"` to get all models served by at least one provider.
1866
1897
  library (`str` or `List`, *optional*):
1867
1898
  A string or list of strings of foundational libraries models were
1868
1899
  originally trained from, such as pytorch, tensorflow, or allennlp.
@@ -1922,7 +1953,7 @@ class HfApi:
1922
1953
  Returns:
1923
1954
  `Iterable[ModelInfo]`: an iterable of [`huggingface_hub.hf_api.ModelInfo`] objects.
1924
1955
 
1925
- Example usage with the `filter` argument:
1956
+ Example:
1926
1957
 
1927
1958
  ```python
1928
1959
  >>> from huggingface_hub import HfApi
@@ -1932,24 +1963,19 @@ class HfApi:
1932
1963
  # List all models
1933
1964
  >>> api.list_models()
1934
1965
 
1935
- # List only the text classification models
1966
+ # List text classification models
1936
1967
  >>> api.list_models(filter="text-classification")
1937
1968
 
1938
- # List only models from the AllenNLP library
1939
- >>> api.list_models(filter="allennlp")
1940
- ```
1941
-
1942
- Example usage with the `search` argument:
1969
+ # List models from the KerasHub library
1970
+ >>> api.list_models(filter="keras-hub")
1943
1971
 
1944
- ```python
1945
- >>> from huggingface_hub import HfApi
1946
-
1947
- >>> api = HfApi()
1972
+ # List models served by Cohere
1973
+ >>> api.list_models(inference_provider="cohere")
1948
1974
 
1949
- # List all models with "bert" in their name
1975
+ # List models with "bert" in their name
1950
1976
  >>> api.list_models(search="bert")
1951
1977
 
1952
- # List all models with "bert" in their name made by google
1978
+ # List models with "bert" in their name and pushed by google
1953
1979
  >>> api.list_models(search="bert", author="google")
1954
1980
  ```
1955
1981
  """
@@ -1992,6 +2018,8 @@ class HfApi:
1992
2018
  params["gated"] = gated
1993
2019
  if inference is not None:
1994
2020
  params["inference"] = inference
2021
+ if inference_provider is not None:
2022
+ params["inference_provider"] = inference_provider
1995
2023
  if pipeline_tag:
1996
2024
  params["pipeline_tag"] = pipeline_tag
1997
2025
  search_list = []
@@ -4482,7 +4510,7 @@ class HfApi:
4482
4510
  isinstance(addition.path_or_fileobj, io.BufferedIOBase) for addition in new_lfs_additions_to_upload
4483
4511
  )
4484
4512
  if xet_enabled and not has_buffered_io_data and is_xet_available():
4485
- logger.info("Uploading files using Xet Storage..")
4513
+ logger.debug("Uploading files using Xet Storage..")
4486
4514
  _upload_xet_files(**upload_kwargs, create_pr=create_pr) # type: ignore [arg-type]
4487
4515
  else:
4488
4516
  if xet_enabled and is_xet_available():
@@ -5523,7 +5551,7 @@ class HfApi:
5523
5551
  allow_patterns: Optional[Union[List[str], str]] = None,
5524
5552
  ignore_patterns: Optional[Union[List[str], str]] = None,
5525
5553
  max_workers: int = 8,
5526
- tqdm_class: Optional[base_tqdm] = None,
5554
+ tqdm_class: Optional[Type[base_tqdm]] = None,
5527
5555
  # Deprecated args
5528
5556
  local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto",
5529
5557
  resume_download: Optional[bool] = None,
@@ -134,7 +134,7 @@ class InferenceClient:
134
134
  path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api)
135
135
  documentation for details). When passing a URL as `model`, the client will not append any suffix path to it.
136
136
  provider (`str`, *optional*):
137
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"fireworks-ai"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
137
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
138
138
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
139
139
  If model is a URL or `base_url` is passed, then `provider` is not used.
140
140
  token (`str`, *optional*):
@@ -1685,9 +1685,8 @@ class InferenceClient:
1685
1685
  model_id = model or self.model
1686
1686
  provider_helper = get_provider_helper(self.provider, task="table-question-answering", model=model_id)
1687
1687
  request_parameters = provider_helper.prepare_request(
1688
- inputs=None,
1688
+ inputs={"query": query, "table": table},
1689
1689
  parameters={"model": model, "padding": padding, "sequential": sequential, "truncation": truncation},
1690
- extra_payload={"query": query, "table": table},
1691
1690
  headers=self.headers,
1692
1691
  model=model_id,
1693
1692
  api_key=self.token,
@@ -3196,7 +3195,7 @@ class InferenceClient:
3196
3195
  return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response)
3197
3196
 
3198
3197
  @_deprecate_method(
3199
- version="0.33.0",
3198
+ version="0.35.0",
3200
3199
  message=(
3201
3200
  "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
3202
3201
  " Use `HfApi.list_models(..., inference_provider='...')` to list warm models per provider."
@@ -3386,7 +3385,7 @@ class InferenceClient:
3386
3385
  return response.status_code == 200
3387
3386
 
3388
3387
  @_deprecate_method(
3389
- version="0.33.0",
3388
+ version="0.35.0",
3390
3389
  message=(
3391
3390
  "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
3392
3391
  " Use `HfApi.model_info` to get the model status both with HF Inference API and external providers."
@@ -122,7 +122,7 @@ class AsyncInferenceClient:
122
122
  path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api)
123
123
  documentation for details). When passing a URL as `model`, the client will not append any suffix path to it.
124
124
  provider (`str`, *optional*):
125
- Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"fireworks-ai"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
125
+ Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
126
126
  Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
127
127
  If model is a URL or `base_url` is passed, then `provider` is not used.
128
128
  token (`str`, *optional*):
@@ -1737,9 +1737,8 @@ class AsyncInferenceClient:
1737
1737
  model_id = model or self.model
1738
1738
  provider_helper = get_provider_helper(self.provider, task="table-question-answering", model=model_id)
1739
1739
  request_parameters = provider_helper.prepare_request(
1740
- inputs=None,
1740
+ inputs={"query": query, "table": table},
1741
1741
  parameters={"model": model, "padding": padding, "sequential": sequential, "truncation": truncation},
1742
- extra_payload={"query": query, "table": table},
1743
1742
  headers=self.headers,
1744
1743
  model=model_id,
1745
1744
  api_key=self.token,
@@ -3260,7 +3259,7 @@ class AsyncInferenceClient:
3260
3259
  return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response)
3261
3260
 
3262
3261
  @_deprecate_method(
3263
- version="0.33.0",
3262
+ version="0.35.0",
3264
3263
  message=(
3265
3264
  "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
3266
3265
  " Use `HfApi.list_models(..., inference_provider='...')` to list warm models per provider."
@@ -3496,7 +3495,7 @@ class AsyncInferenceClient:
3496
3495
  return response.status == 200
3497
3496
 
3498
3497
  @_deprecate_method(
3499
- version="0.33.0",
3498
+ version="0.35.0",
3500
3499
  message=(
3501
3500
  "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)."
3502
3501
  " Use `HfApi.model_info` to get the model status both with HF Inference API and external providers."
@@ -310,7 +310,19 @@ class MCPClient:
310
310
  # Process tool calls one by one
311
311
  for tool_call in final_tool_calls.values():
312
312
  function_name = tool_call.function.name
313
- function_args = json.loads(tool_call.function.arguments or "{}")
313
+ try:
314
+ function_args = json.loads(tool_call.function.arguments or "{}")
315
+ except json.JSONDecodeError as err:
316
+ tool_message = {
317
+ "role": "tool",
318
+ "tool_call_id": tool_call.id,
319
+ "name": function_name,
320
+ "content": f"Invalid JSON generated by the model: {err}",
321
+ }
322
+ tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
323
+ messages.append(tool_message_as_obj)
324
+ yield tool_message_as_obj
325
+ continue # move to next tool call
314
326
 
315
327
  tool_message = {"role": "tool", "tool_call_id": tool_call.id, "content": "", "name": function_name}
316
328
 
@@ -324,11 +336,13 @@ class MCPClient:
324
336
  # Execute tool call with the appropriate session
325
337
  session = self.sessions.get(function_name)
326
338
  if session is not None:
327
- result = await session.call_tool(function_name, function_args)
328
- tool_message["content"] = format_result(result)
339
+ try:
340
+ result = await session.call_tool(function_name, function_args)
341
+ tool_message["content"] = format_result(result)
342
+ except Exception as err:
343
+ tool_message["content"] = f"Error: MCP tool call failed with error message: {err}"
329
344
  else:
330
- error_msg = f"Error: No session found for tool: {function_name}"
331
- tool_message["content"] = error_msg
345
+ tool_message["content"] = f"Error: No session found for tool: {function_name}"
332
346
 
333
347
  # Yield tool message
334
348
  tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)
@@ -1,5 +1,9 @@
1
1
  from typing import Dict, Literal, Optional, Union
2
2
 
3
+ from huggingface_hub.inference._providers.featherless_ai import (
4
+ FeatherlessConversationalTask,
5
+ FeatherlessTextGenerationTask,
6
+ )
3
7
  from huggingface_hub.utils import logging
4
8
 
5
9
  from ._common import TaskProviderHelper, _fetch_inference_provider_mapping
@@ -13,6 +17,7 @@ from .fal_ai import (
13
17
  FalAITextToVideoTask,
14
18
  )
15
19
  from .fireworks_ai import FireworksAIConversationalTask
20
+ from .groq import GroqConversationalTask
16
21
  from .hf_inference import (
17
22
  HFInferenceBinaryInputTask,
18
23
  HFInferenceConversational,
@@ -42,7 +47,9 @@ PROVIDER_T = Literal[
42
47
  "cerebras",
43
48
  "cohere",
44
49
  "fal-ai",
50
+ "featherless-ai",
45
51
  "fireworks-ai",
52
+ "groq",
46
53
  "hf-inference",
47
54
  "hyperbolic",
48
55
  "nebius",
@@ -72,9 +79,16 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
72
79
  "text-to-speech": FalAITextToSpeechTask(),
73
80
  "text-to-video": FalAITextToVideoTask(),
74
81
  },
82
+ "featherless-ai": {
83
+ "conversational": FeatherlessConversationalTask(),
84
+ "text-generation": FeatherlessTextGenerationTask(),
85
+ },
75
86
  "fireworks-ai": {
76
87
  "conversational": FireworksAIConversationalTask(),
77
88
  },
89
+ "groq": {
90
+ "conversational": GroqConversationalTask(),
91
+ },
78
92
  "hf-inference": {
79
93
  "text-to-image": HFInferenceTask("text-to-image"),
80
94
  "conversational": HFInferenceConversational(),
@@ -174,7 +188,7 @@ def get_provider_helper(
174
188
  if model is None:
175
189
  raise ValueError("Specifying a model is required when provider is 'auto'")
176
190
  provider_mapping = _fetch_inference_provider_mapping(model)
177
- provider = next(iter(provider_mapping))
191
+ provider = next(iter(provider_mapping)).provider
178
192
 
179
193
  provider_tasks = PROVIDERS.get(provider) # type: ignore
180
194
  if provider_tasks is None:
@@ -1,5 +1,5 @@
1
1
  from functools import lru_cache
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Dict, List, Optional, Union
3
3
 
4
4
  from huggingface_hub import constants
5
5
  from huggingface_hub.hf_api import InferenceProviderMapping
@@ -9,6 +9,7 @@ from huggingface_hub.utils import build_hf_headers, get_token, logging
9
9
 
10
10
  logger = logging.get_logger(__name__)
11
11
 
12
+
12
13
  # Dev purposes only.
13
14
  # If you want to try to run inference for a new model locally before it's registered on huggingface.co
14
15
  # for a given Inference Provider, you can add it to the following dictionary.
@@ -24,6 +25,7 @@ HARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]
24
25
  "cohere": {},
25
26
  "fal-ai": {},
26
27
  "fireworks-ai": {},
28
+ "groq": {},
27
29
  "hf-inference": {},
28
30
  "hyperbolic": {},
29
31
  "nebius": {},
@@ -124,7 +126,12 @@ class TaskProviderHelper:
124
126
  if HARDCODED_MODEL_INFERENCE_MAPPING.get(self.provider, {}).get(model):
125
127
  return HARDCODED_MODEL_INFERENCE_MAPPING[self.provider][model]
126
128
 
127
- provider_mapping = _fetch_inference_provider_mapping(model).get(self.provider)
129
+ provider_mapping = None
130
+ for mapping in _fetch_inference_provider_mapping(model):
131
+ if mapping.provider == self.provider:
132
+ provider_mapping = mapping
133
+ break
134
+
128
135
  if provider_mapping is None:
129
136
  raise ValueError(f"Model {model} is not supported by provider {self.provider}.")
130
137
 
@@ -138,6 +145,11 @@ class TaskProviderHelper:
138
145
  logger.warning(
139
146
  f"Model {model} is in staging mode for provider {self.provider}. Meant for test purposes only."
140
147
  )
148
+ if provider_mapping.status == "error":
149
+ logger.warning(
150
+ f"Our latest automated health check on model '{model}' for provider '{self.provider}' did not complete successfully. "
151
+ "Inference call might fail."
152
+ )
141
153
  return provider_mapping
142
154
 
143
155
  def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:
@@ -236,7 +248,7 @@ class BaseTextGenerationTask(TaskProviderHelper):
236
248
 
237
249
 
238
250
  @lru_cache(maxsize=None)
239
- def _fetch_inference_provider_mapping(model: str) -> Dict:
251
+ def _fetch_inference_provider_mapping(model: str) -> List["InferenceProviderMapping"]:
240
252
  """
241
253
  Fetch provider mappings for a model from the Hub.
242
254
  """
@@ -0,0 +1,38 @@
1
+ from typing import Any, Dict, Optional, Union
2
+
3
+ from huggingface_hub.hf_api import InferenceProviderMapping
4
+ from huggingface_hub.inference._common import RequestParameters, _as_dict
5
+
6
+ from ._common import BaseConversationalTask, BaseTextGenerationTask, filter_none
7
+
8
+
9
+ _PROVIDER = "featherless-ai"
10
+ _BASE_URL = "https://api.featherless.ai"
11
+
12
+
13
+ class FeatherlessTextGenerationTask(BaseTextGenerationTask):
14
+ def __init__(self):
15
+ super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
16
+
17
+ def _prepare_payload_as_dict(
18
+ self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
19
+ ) -> Optional[Dict]:
20
+ params = filter_none(parameters.copy())
21
+ params["max_tokens"] = params.pop("max_new_tokens", None)
22
+
23
+ return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id}
24
+
25
+ def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
26
+ output = _as_dict(response)["choices"][0]
27
+ return {
28
+ "generated_text": output["text"],
29
+ "details": {
30
+ "finish_reason": output.get("finish_reason"),
31
+ "seed": output.get("seed"),
32
+ },
33
+ }
34
+
35
+
36
+ class FeatherlessConversationalTask(BaseConversationalTask):
37
+ def __init__(self):
38
+ super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
@@ -0,0 +1,9 @@
1
+ from ._common import BaseConversationalTask
2
+
3
+
4
+ class GroqConversationalTask(BaseConversationalTask):
5
+ def __init__(self):
6
+ super().__init__(provider="groq", base_url="https://api.groq.com")
7
+
8
+ def _prepare_route(self, mapped_model: str, api_key: str) -> str:
9
+ return "/openai/v1/chat/completions"
@@ -26,7 +26,9 @@ class HFInferenceTask(TaskProviderHelper):
26
26
 
27
27
  def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:
28
28
  if model is not None and model.startswith(("http://", "https://")):
29
- return InferenceProviderMapping(providerId=model, hf_model_id=model, task=self.task, status="live")
29
+ return InferenceProviderMapping(
30
+ provider="hf-inference", providerId=model, hf_model_id=model, task=self.task, status="live"
31
+ )
30
32
  model_id = model if model is not None else _fetch_recommended_models().get(self.task)
31
33
  if model_id is None:
32
34
  raise ValueError(
@@ -34,7 +36,9 @@ class HFInferenceTask(TaskProviderHelper):
34
36
  " explicitly. Visit https://huggingface.co/tasks for more info."
35
37
  )
36
38
  _check_supported_task(model_id, self.task)
37
- return InferenceProviderMapping(providerId=model_id, hf_model_id=model_id, task=self.task, status="live")
39
+ return InferenceProviderMapping(
40
+ provider="hf-inference", providerId=model_id, hf_model_id=model_id, task=self.task, status="live"
41
+ )
38
42
 
39
43
  def _prepare_url(self, api_key: str, mapped_model: str) -> str:
40
44
  # hf-inference provider can handle URLs (e.g. Inference Endpoints or TGI deployment)
@@ -20,4 +20,6 @@ class OpenAIConversationalTask(BaseConversationalTask):
20
20
  def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping:
21
21
  if model is None:
22
22
  raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.")
23
- return InferenceProviderMapping(providerId=model, task="conversational", status="live", hf_model_id=model)
23
+ return InferenceProviderMapping(
24
+ provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model
25
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface-hub
3
- Version: 0.32.6
3
+ Version: 0.33.0
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -105,7 +105,9 @@ src/huggingface_hub/inference/_providers/black_forest_labs.py
105
105
  src/huggingface_hub/inference/_providers/cerebras.py
106
106
  src/huggingface_hub/inference/_providers/cohere.py
107
107
  src/huggingface_hub/inference/_providers/fal_ai.py
108
+ src/huggingface_hub/inference/_providers/featherless_ai.py
108
109
  src/huggingface_hub/inference/_providers/fireworks_ai.py
110
+ src/huggingface_hub/inference/_providers/groq.py
109
111
  src/huggingface_hub/inference/_providers/hf_inference.py
110
112
  src/huggingface_hub/inference/_providers/hyperbolic.py
111
113
  src/huggingface_hub/inference/_providers/nebius.py