huggingface-hub 0.35.1__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (127) hide show
  1. huggingface_hub/__init__.py +28 -45
  2. huggingface_hub/_commit_api.py +28 -28
  3. huggingface_hub/_commit_scheduler.py +11 -8
  4. huggingface_hub/_inference_endpoints.py +8 -8
  5. huggingface_hub/_jobs_api.py +20 -20
  6. huggingface_hub/_login.py +13 -39
  7. huggingface_hub/_oauth.py +8 -8
  8. huggingface_hub/_snapshot_download.py +14 -28
  9. huggingface_hub/_space_api.py +4 -4
  10. huggingface_hub/_tensorboard_logger.py +5 -5
  11. huggingface_hub/_upload_large_folder.py +15 -15
  12. huggingface_hub/_webhooks_payload.py +3 -3
  13. huggingface_hub/_webhooks_server.py +2 -2
  14. huggingface_hub/cli/__init__.py +0 -14
  15. huggingface_hub/cli/_cli_utils.py +80 -3
  16. huggingface_hub/cli/auth.py +104 -150
  17. huggingface_hub/cli/cache.py +102 -126
  18. huggingface_hub/cli/download.py +93 -110
  19. huggingface_hub/cli/hf.py +37 -41
  20. huggingface_hub/cli/jobs.py +689 -1017
  21. huggingface_hub/cli/lfs.py +120 -143
  22. huggingface_hub/cli/repo.py +158 -216
  23. huggingface_hub/cli/repo_files.py +50 -84
  24. huggingface_hub/cli/system.py +6 -25
  25. huggingface_hub/cli/upload.py +198 -212
  26. huggingface_hub/cli/upload_large_folder.py +90 -105
  27. huggingface_hub/commands/_cli_utils.py +2 -2
  28. huggingface_hub/commands/delete_cache.py +11 -11
  29. huggingface_hub/commands/download.py +4 -13
  30. huggingface_hub/commands/lfs.py +4 -4
  31. huggingface_hub/commands/repo_files.py +2 -2
  32. huggingface_hub/commands/tag.py +1 -3
  33. huggingface_hub/commands/upload.py +4 -4
  34. huggingface_hub/commands/upload_large_folder.py +3 -3
  35. huggingface_hub/commands/user.py +4 -5
  36. huggingface_hub/community.py +5 -5
  37. huggingface_hub/constants.py +3 -41
  38. huggingface_hub/dataclasses.py +16 -22
  39. huggingface_hub/errors.py +43 -30
  40. huggingface_hub/fastai_utils.py +8 -9
  41. huggingface_hub/file_download.py +154 -253
  42. huggingface_hub/hf_api.py +329 -558
  43. huggingface_hub/hf_file_system.py +104 -62
  44. huggingface_hub/hub_mixin.py +32 -54
  45. huggingface_hub/inference/_client.py +178 -163
  46. huggingface_hub/inference/_common.py +38 -54
  47. huggingface_hub/inference/_generated/_async_client.py +219 -259
  48. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
  49. huggingface_hub/inference/_generated/types/base.py +10 -7
  50. huggingface_hub/inference/_generated/types/chat_completion.py +16 -16
  51. huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
  52. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
  53. huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
  54. huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
  55. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
  56. huggingface_hub/inference/_generated/types/summarization.py +2 -2
  57. huggingface_hub/inference/_generated/types/table_question_answering.py +4 -4
  58. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
  59. huggingface_hub/inference/_generated/types/text_generation.py +10 -10
  60. huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
  61. huggingface_hub/inference/_generated/types/token_classification.py +2 -2
  62. huggingface_hub/inference/_generated/types/translation.py +2 -2
  63. huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
  64. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
  65. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
  66. huggingface_hub/inference/_mcp/agent.py +3 -3
  67. huggingface_hub/inference/_mcp/constants.py +1 -2
  68. huggingface_hub/inference/_mcp/mcp_client.py +33 -22
  69. huggingface_hub/inference/_mcp/types.py +10 -10
  70. huggingface_hub/inference/_mcp/utils.py +4 -4
  71. huggingface_hub/inference/_providers/__init__.py +2 -13
  72. huggingface_hub/inference/_providers/_common.py +24 -25
  73. huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
  74. huggingface_hub/inference/_providers/cohere.py +3 -3
  75. huggingface_hub/inference/_providers/fal_ai.py +25 -25
  76. huggingface_hub/inference/_providers/featherless_ai.py +4 -4
  77. huggingface_hub/inference/_providers/fireworks_ai.py +3 -3
  78. huggingface_hub/inference/_providers/hf_inference.py +13 -13
  79. huggingface_hub/inference/_providers/hyperbolic.py +4 -4
  80. huggingface_hub/inference/_providers/nebius.py +10 -10
  81. huggingface_hub/inference/_providers/novita.py +5 -5
  82. huggingface_hub/inference/_providers/nscale.py +4 -4
  83. huggingface_hub/inference/_providers/replicate.py +15 -15
  84. huggingface_hub/inference/_providers/sambanova.py +6 -6
  85. huggingface_hub/inference/_providers/together.py +7 -7
  86. huggingface_hub/lfs.py +24 -33
  87. huggingface_hub/repocard.py +16 -17
  88. huggingface_hub/repocard_data.py +56 -56
  89. huggingface_hub/serialization/__init__.py +0 -1
  90. huggingface_hub/serialization/_base.py +9 -9
  91. huggingface_hub/serialization/_dduf.py +7 -7
  92. huggingface_hub/serialization/_torch.py +28 -28
  93. huggingface_hub/utils/__init__.py +10 -4
  94. huggingface_hub/utils/_auth.py +5 -5
  95. huggingface_hub/utils/_cache_manager.py +31 -31
  96. huggingface_hub/utils/_deprecation.py +1 -1
  97. huggingface_hub/utils/_dotenv.py +3 -3
  98. huggingface_hub/utils/_fixes.py +0 -10
  99. huggingface_hub/utils/_git_credential.py +3 -3
  100. huggingface_hub/utils/_headers.py +7 -29
  101. huggingface_hub/utils/_http.py +369 -209
  102. huggingface_hub/utils/_pagination.py +4 -4
  103. huggingface_hub/utils/_paths.py +5 -5
  104. huggingface_hub/utils/_runtime.py +15 -13
  105. huggingface_hub/utils/_safetensors.py +21 -21
  106. huggingface_hub/utils/_subprocess.py +9 -9
  107. huggingface_hub/utils/_telemetry.py +3 -3
  108. huggingface_hub/utils/_typing.py +3 -3
  109. huggingface_hub/utils/_validators.py +53 -72
  110. huggingface_hub/utils/_xet.py +16 -16
  111. huggingface_hub/utils/_xet_progress_reporting.py +1 -1
  112. huggingface_hub/utils/insecure_hashlib.py +3 -9
  113. huggingface_hub/utils/tqdm.py +3 -3
  114. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/METADATA +17 -26
  115. huggingface_hub-1.0.0rc1.dist-info/RECORD +161 -0
  116. huggingface_hub/inference/_providers/publicai.py +0 -6
  117. huggingface_hub/inference/_providers/scaleway.py +0 -28
  118. huggingface_hub/inference_api.py +0 -217
  119. huggingface_hub/keras_mixin.py +0 -500
  120. huggingface_hub/repository.py +0 -1477
  121. huggingface_hub/serialization/_tensorflow.py +0 -95
  122. huggingface_hub/utils/_hf_folder.py +0 -68
  123. huggingface_hub-0.35.1.dist-info/RECORD +0 -168
  124. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/LICENSE +0 -0
  125. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/WHEEL +0 -0
  126. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/entry_points.txt +0 -0
  127. {huggingface_hub-0.35.1.dist-info → huggingface_hub-1.0.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional
1
+ from typing import Any, Optional
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
 
@@ -13,8 +13,8 @@ class FireworksAIConversationalTask(BaseConversationalTask):
13
13
  return "/inference/v1/chat/completions"
14
14
 
15
15
  def _prepare_payload_as_dict(
16
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
17
- ) -> Optional[Dict]:
16
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
17
+ ) -> Optional[dict]:
18
18
  payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
19
19
  response_format = parameters.get("response_format")
20
20
  if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  from functools import lru_cache
3
3
  from pathlib import Path
4
- from typing import Any, Dict, Optional, Union
4
+ from typing import Any, Optional, Union
5
5
  from urllib.parse import urlparse, urlunparse
6
6
 
7
7
  from huggingface_hub import constants
@@ -60,8 +60,8 @@ class HFInferenceTask(TaskProviderHelper):
60
60
  )
61
61
 
62
62
  def _prepare_payload_as_dict(
63
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
64
- ) -> Optional[Dict]:
63
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
64
+ ) -> Optional[dict]:
65
65
  if isinstance(inputs, bytes):
66
66
  raise ValueError(f"Unexpected binary input for task {self.task}.")
67
67
  if isinstance(inputs, Path):
@@ -71,16 +71,16 @@ class HFInferenceTask(TaskProviderHelper):
71
71
 
72
72
  class HFInferenceBinaryInputTask(HFInferenceTask):
73
73
  def _prepare_payload_as_dict(
74
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
75
- ) -> Optional[Dict]:
74
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
75
+ ) -> Optional[dict]:
76
76
  return None
77
77
 
78
78
  def _prepare_payload_as_bytes(
79
79
  self,
80
80
  inputs: Any,
81
- parameters: Dict,
81
+ parameters: dict,
82
82
  provider_mapping_info: InferenceProviderMapping,
83
- extra_payload: Optional[Dict],
83
+ extra_payload: Optional[dict],
84
84
  ) -> Optional[MimeBytes]:
85
85
  parameters = filter_none(parameters)
86
86
  extra_payload = extra_payload or {}
@@ -106,8 +106,8 @@ class HFInferenceConversational(HFInferenceTask):
106
106
  super().__init__("conversational")
107
107
 
108
108
  def _prepare_payload_as_dict(
109
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
110
- ) -> Optional[Dict]:
109
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
110
+ ) -> Optional[dict]:
111
111
  payload = filter_none(parameters)
112
112
  mapped_model = provider_mapping_info.provider_id
113
113
  payload_model = parameters.get("model") or mapped_model
@@ -156,7 +156,7 @@ def _build_chat_completion_url(model_url: str) -> str:
156
156
 
157
157
 
158
158
  @lru_cache(maxsize=1)
159
- def _fetch_recommended_models() -> Dict[str, Optional[str]]:
159
+ def _fetch_recommended_models() -> dict[str, Optional[str]]:
160
160
  response = get_session().get(f"{constants.ENDPOINT}/api/tasks", headers=build_hf_headers())
161
161
  hf_raise_for_status(response)
162
162
  return {task: next(iter(details["widgetModels"]), None) for task, details in response.json().items()}
@@ -211,8 +211,8 @@ class HFInferenceFeatureExtractionTask(HFInferenceTask):
211
211
  super().__init__("feature-extraction")
212
212
 
213
213
  def _prepare_payload_as_dict(
214
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
215
- ) -> Optional[Dict]:
214
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
215
+ ) -> Optional[dict]:
216
216
  if isinstance(inputs, bytes):
217
217
  raise ValueError(f"Unexpected binary input for task {self.task}.")
218
218
  if isinstance(inputs, Path):
@@ -222,7 +222,7 @@ class HFInferenceFeatureExtractionTask(HFInferenceTask):
222
222
  # See specs: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/tasks/feature-extraction/spec/input.json
223
223
  return {"inputs": inputs, **filter_none(parameters)}
224
224
 
225
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
225
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
226
226
  if isinstance(response, bytes):
227
227
  return _bytes_to_dict(response)
228
228
  return response
@@ -1,5 +1,5 @@
1
1
  import base64
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Optional, Union
3
3
 
4
4
  from huggingface_hub.hf_api import InferenceProviderMapping
5
5
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -14,8 +14,8 @@ class HyperbolicTextToImageTask(TaskProviderHelper):
14
14
  return "/v1/images/generations"
15
15
 
16
16
  def _prepare_payload_as_dict(
17
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
18
- ) -> Optional[Dict]:
17
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
18
+ ) -> Optional[dict]:
19
19
  mapped_model = provider_mapping_info.provider_id
20
20
  parameters = filter_none(parameters)
21
21
  if "num_inference_steps" in parameters:
@@ -29,7 +29,7 @@ class HyperbolicTextToImageTask(TaskProviderHelper):
29
29
  parameters["height"] = 512
30
30
  return {"prompt": inputs, "model_name": mapped_model, **parameters}
31
31
 
32
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
32
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
33
33
  response_dict = _as_dict(response)
34
34
  return base64.b64decode(response_dict["images"][0]["image"])
35
35
 
@@ -1,5 +1,5 @@
1
1
  import base64
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Optional, Union
3
3
 
4
4
  from huggingface_hub.hf_api import InferenceProviderMapping
5
5
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -15,7 +15,7 @@ class NebiusTextGenerationTask(BaseTextGenerationTask):
15
15
  def __init__(self):
16
16
  super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
17
17
 
18
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
18
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
19
19
  output = _as_dict(response)["choices"][0]
20
20
  return {
21
21
  "generated_text": output["text"],
@@ -31,8 +31,8 @@ class NebiusConversationalTask(BaseConversationalTask):
31
31
  super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai")
32
32
 
33
33
  def _prepare_payload_as_dict(
34
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
35
- ) -> Optional[Dict]:
34
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
35
+ ) -> Optional[dict]:
36
36
  payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
37
37
  response_format = parameters.get("response_format")
38
38
  if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
@@ -50,8 +50,8 @@ class NebiusTextToImageTask(TaskProviderHelper):
50
50
  return "/v1/images/generations"
51
51
 
52
52
  def _prepare_payload_as_dict(
53
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
54
- ) -> Optional[Dict]:
53
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
54
+ ) -> Optional[dict]:
55
55
  mapped_model = provider_mapping_info.provider_id
56
56
  parameters = filter_none(parameters)
57
57
  if "guidance_scale" in parameters:
@@ -61,7 +61,7 @@ class NebiusTextToImageTask(TaskProviderHelper):
61
61
 
62
62
  return {"prompt": inputs, **parameters, "model": mapped_model}
63
63
 
64
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
64
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
65
65
  response_dict = _as_dict(response)
66
66
  return base64.b64decode(response_dict["data"][0]["b64_json"])
67
67
 
@@ -74,10 +74,10 @@ class NebiusFeatureExtractionTask(TaskProviderHelper):
74
74
  return "/v1/embeddings"
75
75
 
76
76
  def _prepare_payload_as_dict(
77
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
78
- ) -> Optional[Dict]:
77
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
78
+ ) -> Optional[dict]:
79
79
  return {"input": inputs, "model": provider_mapping_info.provider_id}
80
80
 
81
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
81
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
82
82
  embeddings = _as_dict(response)["data"]
83
83
  return [embedding["embedding"] for embedding in embeddings]
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -23,7 +23,7 @@ class NovitaTextGenerationTask(BaseTextGenerationTask):
23
23
  # there is no v1/ route for novita
24
24
  return "/v3/openai/completions"
25
25
 
26
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
26
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
27
27
  output = _as_dict(response)["choices"][0]
28
28
  return {
29
29
  "generated_text": output["text"],
@@ -51,11 +51,11 @@ class NovitaTextToVideoTask(TaskProviderHelper):
51
51
  return f"/v3/hf/{mapped_model}"
52
52
 
53
53
  def _prepare_payload_as_dict(
54
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
55
- ) -> Optional[Dict]:
54
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
55
+ ) -> Optional[dict]:
56
56
  return {"prompt": inputs, **filter_none(parameters)}
57
57
 
58
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
58
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
59
59
  response_dict = _as_dict(response)
60
60
  if not (
61
61
  isinstance(response_dict, dict)
@@ -1,5 +1,5 @@
1
1
  import base64
2
- from typing import Any, Dict, Optional, Union
2
+ from typing import Any, Optional, Union
3
3
 
4
4
  from huggingface_hub.hf_api import InferenceProviderMapping
5
5
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -20,8 +20,8 @@ class NscaleTextToImageTask(TaskProviderHelper):
20
20
  return "/v1/images/generations"
21
21
 
22
22
  def _prepare_payload_as_dict(
23
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
24
- ) -> Optional[Dict]:
23
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
24
+ ) -> Optional[dict]:
25
25
  mapped_model = provider_mapping_info.provider_id
26
26
  # Combine all parameters except inputs and parameters
27
27
  parameters = filter_none(parameters)
@@ -39,6 +39,6 @@ class NscaleTextToImageTask(TaskProviderHelper):
39
39
  }
40
40
  return payload
41
41
 
42
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
42
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
43
43
  response_dict = _as_dict(response)
44
44
  return base64.b64decode(response_dict["data"][0]["b64_json"])
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
  from huggingface_hub.inference._common import RequestParameters, _as_dict, _as_url
@@ -14,7 +14,7 @@ class ReplicateTask(TaskProviderHelper):
14
14
  def __init__(self, task: str):
15
15
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task)
16
16
 
17
- def _prepare_headers(self, headers: Dict, api_key: str) -> Dict[str, Any]:
17
+ def _prepare_headers(self, headers: dict, api_key: str) -> dict[str, Any]:
18
18
  headers = super()._prepare_headers(headers, api_key)
19
19
  headers["Prefer"] = "wait"
20
20
  return headers
@@ -25,16 +25,16 @@ class ReplicateTask(TaskProviderHelper):
25
25
  return f"/v1/models/{mapped_model}/predictions"
26
26
 
27
27
  def _prepare_payload_as_dict(
28
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
29
- ) -> Optional[Dict]:
28
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
29
+ ) -> Optional[dict]:
30
30
  mapped_model = provider_mapping_info.provider_id
31
- payload: Dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}
31
+ payload: dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}}
32
32
  if ":" in mapped_model:
33
33
  version = mapped_model.split(":", 1)[1]
34
34
  payload["version"] = version
35
35
  return payload
36
36
 
37
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
37
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
38
38
  response_dict = _as_dict(response)
39
39
  if response_dict.get("output") is None:
40
40
  raise TimeoutError(
@@ -52,9 +52,9 @@ class ReplicateTextToImageTask(ReplicateTask):
52
52
  super().__init__("text-to-image")
53
53
 
54
54
  def _prepare_payload_as_dict(
55
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
56
- ) -> Optional[Dict]:
57
- payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
55
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
56
+ ) -> Optional[dict]:
57
+ payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
58
58
  if provider_mapping_info.adapter_weights_path is not None:
59
59
  payload["input"]["lora_weights"] = f"https://huggingface.co/{provider_mapping_info.hf_model_id}"
60
60
  return payload
@@ -65,9 +65,9 @@ class ReplicateTextToSpeechTask(ReplicateTask):
65
65
  super().__init__("text-to-speech")
66
66
 
67
67
  def _prepare_payload_as_dict(
68
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
69
- ) -> Optional[Dict]:
70
- payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
68
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
69
+ ) -> Optional[dict]:
70
+ payload: dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment]
71
71
  payload["input"]["text"] = payload["input"].pop("prompt") # rename "prompt" to "text" for TTS
72
72
  return payload
73
73
 
@@ -77,11 +77,11 @@ class ReplicateImageToImageTask(ReplicateTask):
77
77
  super().__init__("image-to-image")
78
78
 
79
79
  def _prepare_payload_as_dict(
80
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
81
- ) -> Optional[Dict]:
80
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
81
+ ) -> Optional[dict]:
82
82
  image_url = _as_url(inputs, default_mime_type="image/jpeg")
83
83
 
84
- payload: Dict[str, Any] = {"input": {"input_image": image_url, **filter_none(parameters)}}
84
+ payload: dict[str, Any] = {"input": {"input_image": image_url, **filter_none(parameters)}}
85
85
 
86
86
  mapped_model = provider_mapping_info.provider_id
87
87
  if ":" in mapped_model:
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Optional, Union
1
+ from typing import Any, Optional, Union
2
2
 
3
3
  from huggingface_hub.hf_api import InferenceProviderMapping
4
4
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -10,8 +10,8 @@ class SambanovaConversationalTask(BaseConversationalTask):
10
10
  super().__init__(provider="sambanova", base_url="https://api.sambanova.ai")
11
11
 
12
12
  def _prepare_payload_as_dict(
13
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
14
- ) -> Optional[Dict]:
13
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
14
+ ) -> Optional[dict]:
15
15
  response_format_config = parameters.get("response_format")
16
16
  if isinstance(response_format_config, dict):
17
17
  if response_format_config.get("type") == "json_schema":
@@ -32,11 +32,11 @@ class SambanovaFeatureExtractionTask(TaskProviderHelper):
32
32
  return "/v1/embeddings"
33
33
 
34
34
  def _prepare_payload_as_dict(
35
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
36
- ) -> Optional[Dict]:
35
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
36
+ ) -> Optional[dict]:
37
37
  parameters = filter_none(parameters)
38
38
  return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
39
39
 
40
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
40
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
41
41
  embeddings = _as_dict(response)["data"]
42
42
  return [embedding["embedding"] for embedding in embeddings]
@@ -1,6 +1,6 @@
1
1
  import base64
2
2
  from abc import ABC
3
- from typing import Any, Dict, Optional, Union
3
+ from typing import Any, Optional, Union
4
4
 
5
5
  from huggingface_hub.hf_api import InferenceProviderMapping
6
6
  from huggingface_hub.inference._common import RequestParameters, _as_dict
@@ -36,7 +36,7 @@ class TogetherTextGenerationTask(BaseTextGenerationTask):
36
36
  def __init__(self):
37
37
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
38
38
 
39
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
39
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
40
40
  output = _as_dict(response)["choices"][0]
41
41
  return {
42
42
  "generated_text": output["text"],
@@ -52,8 +52,8 @@ class TogetherConversationalTask(BaseConversationalTask):
52
52
  super().__init__(provider=_PROVIDER, base_url=_BASE_URL)
53
53
 
54
54
  def _prepare_payload_as_dict(
55
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
56
- ) -> Optional[Dict]:
55
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
56
+ ) -> Optional[dict]:
57
57
  payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)
58
58
  response_format = parameters.get("response_format")
59
59
  if isinstance(response_format, dict) and response_format.get("type") == "json_schema":
@@ -72,8 +72,8 @@ class TogetherTextToImageTask(TogetherTask):
72
72
  super().__init__("text-to-image")
73
73
 
74
74
  def _prepare_payload_as_dict(
75
- self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
76
- ) -> Optional[Dict]:
75
+ self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping
76
+ ) -> Optional[dict]:
77
77
  mapped_model = provider_mapping_info.provider_id
78
78
  parameters = filter_none(parameters)
79
79
  if "num_inference_steps" in parameters:
@@ -83,6 +83,6 @@ class TogetherTextToImageTask(TogetherTask):
83
83
 
84
84
  return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model}
85
85
 
86
- def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
86
+ def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any:
87
87
  response_dict = _as_dict(response)
88
88
  return base64.b64decode(response_dict["data"][0]["b64_json"])
huggingface_hub/lfs.py CHANGED
@@ -14,7 +14,6 @@
14
14
  # limitations under the License.
15
15
  """Git LFS related type definitions and utilities"""
16
16
 
17
- import inspect
18
17
  import io
19
18
  import re
20
19
  import warnings
@@ -22,7 +21,7 @@ from dataclasses import dataclass
22
21
  from math import ceil
23
22
  from os.path import getsize
24
23
  from pathlib import Path
25
- from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional, Tuple, TypedDict
24
+ from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, TypedDict
26
25
  from urllib.parse import unquote
27
26
 
28
27
  from huggingface_hub import constants
@@ -107,8 +106,8 @@ def post_lfs_batch_info(
107
106
  repo_id: str,
108
107
  revision: Optional[str] = None,
109
108
  endpoint: Optional[str] = None,
110
- headers: Optional[Dict[str, str]] = None,
111
- ) -> Tuple[List[dict], List[dict]]:
109
+ headers: Optional[dict[str, str]] = None,
110
+ ) -> tuple[list[dict], list[dict]]:
112
111
  """
113
112
  Requests the LFS batch endpoint to retrieve upload instructions
114
113
 
@@ -136,7 +135,7 @@ def post_lfs_batch_info(
136
135
  Raises:
137
136
  [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
138
137
  If an argument is invalid or the server response is malformed.
139
- [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
138
+ [`HfHubHTTPError`]
140
139
  If the server returned an error.
141
140
  """
142
141
  endpoint = endpoint if endpoint is not None else constants.ENDPOINT
@@ -144,7 +143,7 @@ def post_lfs_batch_info(
144
143
  if repo_type in constants.REPO_TYPES_URL_PREFIXES:
145
144
  url_prefix = constants.REPO_TYPES_URL_PREFIXES[repo_type]
146
145
  batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
147
- payload: Dict = {
146
+ payload: dict = {
148
147
  "operation": "upload",
149
148
  "transfers": ["basic", "multipart"],
150
149
  "objects": [
@@ -187,14 +186,14 @@ class CompletionPayloadT(TypedDict):
187
186
  """Payload that will be sent to the Hub when uploading multi-part."""
188
187
 
189
188
  oid: str
190
- parts: List[PayloadPartT]
189
+ parts: list[PayloadPartT]
191
190
 
192
191
 
193
192
  def lfs_upload(
194
193
  operation: "CommitOperationAdd",
195
- lfs_batch_action: Dict,
194
+ lfs_batch_action: dict,
196
195
  token: Optional[str] = None,
197
- headers: Optional[Dict[str, str]] = None,
196
+ headers: Optional[dict[str, str]] = None,
198
197
  endpoint: Optional[str] = None,
199
198
  ) -> None:
200
199
  """
@@ -214,7 +213,7 @@ def lfs_upload(
214
213
  Raises:
215
214
  [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
216
215
  If `lfs_batch_action` is improperly formatted
217
- [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
216
+ [`HfHubHTTPError`]
218
217
  If the upload resulted in an error
219
218
  """
220
219
  # 0. If LFS file is already present, skip upload
@@ -308,19 +307,17 @@ def _upload_single_part(operation: "CommitOperationAdd", upload_url: str) -> Non
308
307
  fileobj:
309
308
  The file-like object holding the data to upload.
310
309
 
311
- Returns: `requests.Response`
312
-
313
310
  Raises:
314
- [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
315
- If the upload resulted in an error.
311
+ [`HfHubHTTPError`]
312
+ If the upload resulted in an error.
316
313
  """
317
314
  with operation.as_file(with_tqdm=True) as fileobj:
318
315
  # S3 might raise a transient 500 error -> let's retry if that happens
319
- response = http_backoff("PUT", upload_url, data=fileobj)
316
+ response = http_backoff("PUT", upload_url, data=fileobj, retry_on_status_codes=(500, 502, 503, 504))
320
317
  hf_raise_for_status(response)
321
318
 
322
319
 
323
- def _upload_multi_part(operation: "CommitOperationAdd", header: Dict, chunk_size: int, upload_url: str) -> None:
320
+ def _upload_multi_part(operation: "CommitOperationAdd", header: dict, chunk_size: int, upload_url: str) -> None:
324
321
  """
325
322
  Uploads file using HF multipart LFS transfer protocol.
326
323
  """
@@ -355,7 +352,7 @@ def _upload_multi_part(operation: "CommitOperationAdd", header: Dict, chunk_size
355
352
  hf_raise_for_status(completion_res)
356
353
 
357
354
 
358
- def _get_sorted_parts_urls(header: Dict, upload_info: UploadInfo, chunk_size: int) -> List[str]:
355
+ def _get_sorted_parts_urls(header: dict, upload_info: UploadInfo, chunk_size: int) -> list[str]:
359
356
  sorted_part_upload_urls = [
360
357
  upload_url
361
358
  for _, upload_url in sorted(
@@ -373,8 +370,8 @@ def _get_sorted_parts_urls(header: Dict, upload_info: UploadInfo, chunk_size: in
373
370
  return sorted_part_upload_urls
374
371
 
375
372
 
376
- def _get_completion_payload(response_headers: List[Dict], oid: str) -> CompletionPayloadT:
377
- parts: List[PayloadPartT] = []
373
+ def _get_completion_payload(response_headers: list[dict], oid: str) -> CompletionPayloadT:
374
+ parts: list[PayloadPartT] = []
378
375
  for part_number, header in enumerate(response_headers):
379
376
  etag = header.get("etag")
380
377
  if etag is None or etag == "":
@@ -389,8 +386,8 @@ def _get_completion_payload(response_headers: List[Dict], oid: str) -> Completio
389
386
 
390
387
 
391
388
  def _upload_parts_iteratively(
392
- operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
393
- ) -> List[Dict]:
389
+ operation: "CommitOperationAdd", sorted_parts_urls: list[str], chunk_size: int
390
+ ) -> list[dict]:
394
391
  headers = []
395
392
  with operation.as_file(with_tqdm=True) as fileobj:
396
393
  for part_idx, part_upload_url in enumerate(sorted_parts_urls):
@@ -400,15 +397,17 @@ def _upload_parts_iteratively(
400
397
  read_limit=chunk_size,
401
398
  ) as fileobj_slice:
402
399
  # S3 might raise a transient 500 error -> let's retry if that happens
403
- part_upload_res = http_backoff("PUT", part_upload_url, data=fileobj_slice)
400
+ part_upload_res = http_backoff(
401
+ "PUT", part_upload_url, data=fileobj_slice, retry_on_status_codes=(500, 502, 503, 504)
402
+ )
404
403
  hf_raise_for_status(part_upload_res)
405
404
  headers.append(part_upload_res.headers)
406
405
  return headers # type: ignore
407
406
 
408
407
 
409
408
  def _upload_parts_hf_transfer(
410
- operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
411
- ) -> List[Dict]:
409
+ operation: "CommitOperationAdd", sorted_parts_urls: list[str], chunk_size: int
410
+ ) -> list[dict]:
412
411
  # Upload file using an external Rust-based package. Upload is faster but support less features (no progress bars).
413
412
  try:
414
413
  from hf_transfer import multipart_upload
@@ -418,12 +417,6 @@ def _upload_parts_hf_transfer(
418
417
  " not available in your environment. Try `pip install hf_transfer`."
419
418
  )
420
419
 
421
- supports_callback = "callback" in inspect.signature(multipart_upload).parameters
422
- if not supports_callback:
423
- warnings.warn(
424
- "You are using an outdated version of `hf_transfer`. Consider upgrading to latest version to enable progress bars using `pip install -U hf_transfer`."
425
- )
426
-
427
420
  total = operation.upload_info.size
428
421
  desc = operation.path_in_repo
429
422
  if len(desc) > 40:
@@ -446,13 +439,11 @@ def _upload_parts_hf_transfer(
446
439
  max_files=128,
447
440
  parallel_failures=127, # could be removed
448
441
  max_retries=5,
449
- **({"callback": progress.update} if supports_callback else {}),
442
+ callback=progress.update,
450
443
  )
451
444
  except Exception as e:
452
445
  raise RuntimeError(
453
446
  "An error occurred while uploading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for"
454
447
  " better error handling."
455
448
  ) from e
456
- if not supports_callback:
457
- progress.update(total)
458
449
  return output