huggingface-hub 0.27.0rc1__py3-none-any.whl → 0.28.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (40) hide show
  1. huggingface_hub/__init__.py +418 -12
  2. huggingface_hub/_commit_api.py +33 -4
  3. huggingface_hub/_inference_endpoints.py +8 -2
  4. huggingface_hub/_local_folder.py +14 -3
  5. huggingface_hub/commands/scan_cache.py +1 -1
  6. huggingface_hub/commands/upload_large_folder.py +1 -1
  7. huggingface_hub/constants.py +7 -2
  8. huggingface_hub/file_download.py +1 -2
  9. huggingface_hub/hf_api.py +65 -84
  10. huggingface_hub/hub_mixin.py +12 -9
  11. huggingface_hub/inference/_client.py +706 -450
  12. huggingface_hub/inference/_common.py +32 -64
  13. huggingface_hub/inference/_generated/_async_client.py +722 -470
  14. huggingface_hub/inference/_generated/types/__init__.py +1 -0
  15. huggingface_hub/inference/_generated/types/image_to_image.py +3 -3
  16. huggingface_hub/inference/_generated/types/text_to_audio.py +1 -2
  17. huggingface_hub/inference/_generated/types/text_to_image.py +3 -3
  18. huggingface_hub/inference/_generated/types/text_to_speech.py +3 -6
  19. huggingface_hub/inference/_generated/types/text_to_video.py +47 -0
  20. huggingface_hub/inference/_generated/types/visual_question_answering.py +1 -1
  21. huggingface_hub/inference/_providers/__init__.py +89 -0
  22. huggingface_hub/inference/_providers/fal_ai.py +159 -0
  23. huggingface_hub/inference/_providers/hf_inference.py +202 -0
  24. huggingface_hub/inference/_providers/replicate.py +148 -0
  25. huggingface_hub/inference/_providers/sambanova.py +89 -0
  26. huggingface_hub/inference/_providers/together.py +153 -0
  27. huggingface_hub/py.typed +0 -0
  28. huggingface_hub/repocard.py +1 -1
  29. huggingface_hub/repocard_data.py +2 -1
  30. huggingface_hub/serialization/_base.py +1 -1
  31. huggingface_hub/serialization/_torch.py +1 -1
  32. huggingface_hub/utils/_fixes.py +25 -13
  33. huggingface_hub/utils/_http.py +3 -3
  34. huggingface_hub/utils/logging.py +1 -1
  35. {huggingface_hub-0.27.0rc1.dist-info → huggingface_hub-0.28.0.dist-info}/METADATA +4 -4
  36. {huggingface_hub-0.27.0rc1.dist-info → huggingface_hub-0.28.0.dist-info}/RECORD +40 -32
  37. {huggingface_hub-0.27.0rc1.dist-info → huggingface_hub-0.28.0.dist-info}/LICENSE +0 -0
  38. {huggingface_hub-0.27.0rc1.dist-info → huggingface_hub-0.28.0.dist-info}/WHEEL +0 -0
  39. {huggingface_hub-0.27.0rc1.dist-info → huggingface_hub-0.28.0.dist-info}/entry_points.txt +0 -0
  40. {huggingface_hub-0.27.0rc1.dist-info → huggingface_hub-0.28.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,148 @@
1
+ from typing import Any, Dict, Optional, Union
2
+
3
+ from huggingface_hub import constants
4
+ from huggingface_hub.inference._common import RequestParameters, TaskProviderHelper, _as_dict
5
+ from huggingface_hub.utils import build_hf_headers, get_session, get_token, logging
6
+
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+
11
+ BASE_URL = "https://api.replicate.com"
12
+
13
+ SUPPORTED_MODELS = {
14
+ "text-to-image": {
15
+ "black-forest-labs/FLUX.1-dev": "black-forest-labs/flux-dev",
16
+ "black-forest-labs/FLUX.1-schnell": "black-forest-labs/flux-schnell",
17
+ "ByteDance/Hyper-SD": "bytedance/hyper-flux-16step:382cf8959fb0f0d665b26e7e80b8d6dc3faaef1510f14ce017e8c732bb3d1eb7",
18
+ "ByteDance/SDXL-Lightning": "bytedance/sdxl-lightning-4step:5599ed30703defd1d160a25a63321b4dec97101d98b4674bcc56e41f62f35637",
19
+ "playgroundai/playground-v2.5-1024px-aesthetic": "playgroundai/playground-v2.5-1024px-aesthetic:a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
20
+ "stabilityai/stable-diffusion-3.5-large-turbo": "stability-ai/stable-diffusion-3.5-large-turbo",
21
+ "stabilityai/stable-diffusion-3.5-large": "stability-ai/stable-diffusion-3.5-large",
22
+ "stabilityai/stable-diffusion-3.5-medium": "stability-ai/stable-diffusion-3.5-medium",
23
+ "stabilityai/stable-diffusion-xl-base-1.0": "stability-ai/sdxl:7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc",
24
+ },
25
+ "text-to-speech": {
26
+ "OuteAI/OuteTTS-0.3-500M": "jbilcke/oute-tts:39a59319327b27327fa3095149c5a746e7f2aee18c75055c3368237a6503cd26",
27
+ },
28
+ "text-to-video": {
29
+ "genmo/mochi-1-preview": "genmoai/mochi-1:1944af04d098ef69bed7f9d335d102e652203f268ec4aaa2d836f6217217e460",
30
+ },
31
+ }
32
+
33
+
34
+ def _build_url(base_url: str, model: str) -> str:
35
+ if ":" in model:
36
+ return f"{base_url}/v1/predictions"
37
+ return f"{base_url}/v1/models/{model}/predictions"
38
+
39
+
40
+ class ReplicateTask(TaskProviderHelper):
41
+ def __init__(self, task: str):
42
+ self.task = task
43
+
44
+ def prepare_request(
45
+ self,
46
+ *,
47
+ inputs: Any,
48
+ parameters: Dict[str, Any],
49
+ headers: Dict,
50
+ model: Optional[str],
51
+ api_key: Optional[str],
52
+ extra_payload: Optional[Dict[str, Any]] = None,
53
+ ) -> RequestParameters:
54
+ if api_key is None:
55
+ api_key = get_token()
56
+ if api_key is None:
57
+ raise ValueError(
58
+ "You must provide an api_key to work with Replicate API or log in with `huggingface-cli login`."
59
+ )
60
+
61
+ # Route to the proxy if the api_key is a HF TOKEN
62
+ if api_key.startswith("hf_"):
63
+ base_url = constants.INFERENCE_PROXY_TEMPLATE.format(provider="replicate")
64
+ logger.info("Calling Replicate provider through Hugging Face proxy.")
65
+ else:
66
+ base_url = BASE_URL
67
+ logger.info("Calling Replicate provider directly.")
68
+ mapped_model = self._map_model(model)
69
+ url = _build_url(base_url, mapped_model)
70
+
71
+ headers = {
72
+ **build_hf_headers(token=api_key),
73
+ **headers,
74
+ "Prefer": "wait",
75
+ }
76
+
77
+ payload = self._prepare_payload(inputs, parameters=parameters, model=mapped_model)
78
+
79
+ return RequestParameters(
80
+ url=url,
81
+ task=self.task,
82
+ model=mapped_model,
83
+ json=payload,
84
+ data=None,
85
+ headers=headers,
86
+ )
87
+
88
+ def _map_model(self, model: Optional[str]) -> str:
89
+ if model is None:
90
+ raise ValueError("Please provide a model available on Replicate.")
91
+ if self.task not in SUPPORTED_MODELS:
92
+ raise ValueError(f"Task {self.task} not supported with Replicate.")
93
+ mapped_model = SUPPORTED_MODELS[self.task].get(model)
94
+ if mapped_model is None:
95
+ raise ValueError(f"Model {model} is not supported with Replicate for task {self.task}.")
96
+ return mapped_model
97
+
98
+ def _prepare_payload(
99
+ self,
100
+ inputs: Any,
101
+ parameters: Dict[str, Any],
102
+ model: str,
103
+ ) -> Dict[str, Any]:
104
+ payload: Dict[str, Any] = {
105
+ "input": {
106
+ "prompt": inputs,
107
+ **{k: v for k, v in parameters.items() if v is not None},
108
+ }
109
+ }
110
+ if ":" in model:
111
+ version = model.split(":", 1)[1]
112
+ payload["version"] = version
113
+ return payload
114
+
115
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
116
+ response_dict = _as_dict(response)
117
+ if response_dict.get("output") is None:
118
+ raise TimeoutError(
119
+ f"Inference request timed out after 60 seconds. No output generated for model {response_dict.get('model')}"
120
+ "The model might be in cold state or starting up. Please try again later."
121
+ )
122
+ output_url = (
123
+ response_dict["output"] if isinstance(response_dict["output"], str) else response_dict["output"][0]
124
+ )
125
+ return get_session().get(output_url).content
126
+
127
+
128
+ class ReplicateTextToSpeechTask(ReplicateTask):
129
+ def __init__(self):
130
+ super().__init__("text-to-speech")
131
+
132
+ def _prepare_payload(
133
+ self,
134
+ inputs: Any,
135
+ parameters: Dict[str, Any],
136
+ model: str,
137
+ ) -> Dict[str, Any]:
138
+ # The following payload might work only for a subset of text-to-speech Replicate models.
139
+ payload: Dict[str, Any] = {
140
+ "input": {
141
+ "inputs": inputs,
142
+ **{k: v for k, v in parameters.items() if v is not None},
143
+ },
144
+ }
145
+ if ":" in model:
146
+ version = model.split(":", 1)[1]
147
+ payload["version"] = version
148
+ return payload
@@ -0,0 +1,89 @@
1
+ from typing import Any, Dict, Optional, Union
2
+
3
+ from huggingface_hub import constants
4
+ from huggingface_hub.inference._common import RequestParameters, TaskProviderHelper
5
+ from huggingface_hub.utils import build_hf_headers, get_token, logging
6
+
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+
11
+ BASE_URL = "https://api.sambanova.ai"
12
+
13
+ SUPPORTED_MODELS = {
14
+ "conversational": {
15
+ "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
16
+ "Qwen/Qwen2.5-72B-Instruct": "Qwen2.5-72B-Instruct",
17
+ "Qwen/QwQ-32B-Preview": "QwQ-32B-Preview",
18
+ "meta-llama/Llama-3.3-70B-Instruct": "Meta-Llama-3.3-70B-Instruct",
19
+ "meta-llama/Llama-3.2-1B": "Meta-Llama-3.2-1B-Instruct",
20
+ "meta-llama/Llama-3.2-3B": "Meta-Llama-3.2-3B-Instruct",
21
+ "meta-llama/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
22
+ "meta-llama/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
23
+ "meta-llama/Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
24
+ "meta-llama/Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
25
+ "meta-llama/Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
26
+ "meta-llama/Llama-Guard-3-8B": "Meta-Llama-Guard-3-8B",
27
+ },
28
+ }
29
+
30
+
31
+ class SambanovaConversationalTask(TaskProviderHelper):
32
+ def __init__(self):
33
+ # TODO: adapt in a base class when supporting multiple tasks
34
+ self.task = "conversational"
35
+
36
+ def prepare_request(
37
+ self,
38
+ *,
39
+ inputs: Any,
40
+ parameters: Dict[str, Any],
41
+ headers: Dict,
42
+ model: Optional[str],
43
+ api_key: Optional[str],
44
+ extra_payload: Optional[Dict[str, Any]] = None,
45
+ ) -> RequestParameters:
46
+ if api_key is None:
47
+ api_key = get_token()
48
+ if api_key is None:
49
+ raise ValueError(
50
+ "You must provide an api_key to work with Sambanova API or log in with `huggingface-cli login`."
51
+ )
52
+
53
+ # Route to the proxy if the api_key is a HF TOKEN
54
+ if api_key.startswith("hf_"):
55
+ base_url = constants.INFERENCE_PROXY_TEMPLATE.format(provider="sambanova")
56
+ logger.info("Calling Sambanova provider through Hugging Face proxy.")
57
+ else:
58
+ base_url = BASE_URL
59
+ logger.info("Calling Sambanova provider directly.")
60
+ headers = {**build_hf_headers(token=api_key), **headers}
61
+
62
+ mapped_model = self._map_model(model)
63
+ payload = {
64
+ "messages": inputs,
65
+ **{k: v for k, v in parameters.items() if v is not None},
66
+ "model": mapped_model,
67
+ }
68
+
69
+ return RequestParameters(
70
+ url=f"{base_url}/v1/chat/completions",
71
+ task=self.task,
72
+ model=mapped_model,
73
+ json=payload,
74
+ data=None,
75
+ headers=headers,
76
+ )
77
+
78
+ def _map_model(self, model: Optional[str]) -> str:
79
+ if model is None:
80
+ raise ValueError("Please provide a model available on Sambanova.")
81
+ if self.task not in SUPPORTED_MODELS:
82
+ raise ValueError(f"Task {self.task} not supported with Sambanova.")
83
+ mapped_model = SUPPORTED_MODELS[self.task].get(model)
84
+ if mapped_model is None:
85
+ raise ValueError(f"Model {model} is not supported with Sambanova for task {self.task}.")
86
+ return mapped_model
87
+
88
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
89
+ return response
@@ -0,0 +1,153 @@
1
+ import base64
2
+ from abc import ABC, abstractmethod
3
+ from typing import Any, Dict, Optional, Union
4
+
5
+ from huggingface_hub import constants
6
+ from huggingface_hub.inference._common import RequestParameters, TaskProviderHelper, _as_dict
7
+ from huggingface_hub.utils import build_hf_headers, get_token, logging
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ BASE_URL = "https://api.together.xyz"
14
+
15
+ SUPPORTED_MODELS = {
16
+ "conversational": {
17
+ "databricks/dbrx-instruct": "databricks/dbrx-instruct",
18
+ "deepseek-ai/DeepSeek-R1": "deepseek-ai/DeepSeek-R1",
19
+ "deepseek-ai/DeepSeek-V3": "deepseek-ai/DeepSeek-V3",
20
+ "deepseek-ai/deepseek-llm-67b-chat": "deepseek-ai/deepseek-llm-67b-chat",
21
+ "google/gemma-2-9b-it": "google/gemma-2-9b-it",
22
+ "google/gemma-2b-it": "google/gemma-2-27b-it",
23
+ "meta-llama/Llama-2-13b-chat-hf": "meta-llama/Llama-2-13b-chat-hf",
24
+ "meta-llama/Llama-2-7b-chat-hf": "meta-llama/Llama-2-7b-chat-hf",
25
+ "meta-llama/Llama-3.2-11B-Vision-Instruct": "meta-llama/Llama-Vision-Free",
26
+ "meta-llama/Llama-3.2-3B-Instruct": "meta-llama/Llama-3.2-3B-Instruct-Turbo",
27
+ "meta-llama/Llama-3.2-90B-Vision-Instruct": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
28
+ "meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
29
+ "meta-llama/Meta-Llama-3-70B-Instruct": "meta-llama/Llama-3-70b-chat-hf",
30
+ "meta-llama/Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
31
+ "meta-llama/Meta-Llama-3.1-405B-Instruct": "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
32
+ "meta-llama/Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
33
+ "meta-llama/Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
34
+ "microsoft/WizardLM-2-8x22B": "microsoft/WizardLM-2-8x22B",
35
+ "mistralai/Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
36
+ "mistralai/Mixtral-8x22B-Instruct-v0.1": "mistralai/Mixtral-8x22B-Instruct-v0.1",
37
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
38
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
39
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
40
+ "Qwen/Qwen2-72B-Instruct": "Qwen/Qwen2-72B-Instruct",
41
+ "Qwen/Qwen2.5-72B-Instruct": "Qwen/Qwen2.5-72B-Instruct-Turbo",
42
+ "Qwen/Qwen2.5-7B-Instruct": "Qwen/Qwen2.5-7B-Instruct-Turbo",
43
+ "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen/Qwen2.5-Coder-32B-Instruct",
44
+ "Qwen/QwQ-32B-Preview": "Qwen/QwQ-32B-Preview",
45
+ "scb10x/llama-3-typhoon-v1.5-8b-instruct": "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
46
+ "scb10x/llama-3-typhoon-v1.5x-70b-instruct-awq": "scb10x/scb10x-llama3-typhoon-v1-5x-4f316",
47
+ },
48
+ "text-generation": {
49
+ "meta-llama/Llama-2-70b-hf": "meta-llama/Llama-2-70b-hf",
50
+ "meta-llama/Meta-Llama-3-8B": "meta-llama/Meta-Llama-3-8B",
51
+ "mistralai/Mixtral-8x7B-v0.1": "mistralai/Mixtral-8x7B-v0.1",
52
+ },
53
+ "text-to-image": {
54
+ "black-forest-labs/FLUX.1-Canny-dev": "black-forest-labs/FLUX.1-canny",
55
+ "black-forest-labs/FLUX.1-Depth-dev": "black-forest-labs/FLUX.1-depth",
56
+ "black-forest-labs/FLUX.1-dev": "black-forest-labs/FLUX.1-dev",
57
+ "black-forest-labs/FLUX.1-Redux-dev": "black-forest-labs/FLUX.1-redux",
58
+ "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-pro",
59
+ "stabilityai/stable-diffusion-xl-base-1.0": "stabilityai/stable-diffusion-xl-base-1.0",
60
+ },
61
+ }
62
+
63
+
64
+ PER_TASK_ROUTES = {
65
+ "conversational": "v1/chat/completions",
66
+ "text-generation": "v1/completions",
67
+ "text-to-image": "v1/images/generations",
68
+ }
69
+
70
+
71
+ class TogetherTask(TaskProviderHelper, ABC):
72
+ """Base class for Together API tasks."""
73
+
74
+ def __init__(self, task: str):
75
+ self.task = task
76
+
77
+ def prepare_request(
78
+ self,
79
+ *,
80
+ inputs: Any,
81
+ parameters: Dict[str, Any],
82
+ headers: Dict,
83
+ model: Optional[str],
84
+ api_key: Optional[str],
85
+ extra_payload: Optional[Dict[str, Any]] = None,
86
+ ) -> RequestParameters:
87
+ if api_key is None:
88
+ api_key = get_token()
89
+ if api_key is None:
90
+ raise ValueError(
91
+ "You must provide an api_key to work with Together API or log in with `huggingface-cli login`."
92
+ )
93
+ headers = {**build_hf_headers(token=api_key), **headers}
94
+
95
+ # Route to the proxy if the api_key is a HF TOKEN
96
+ if api_key.startswith("hf_"):
97
+ base_url = constants.INFERENCE_PROXY_TEMPLATE.format(provider="together")
98
+ logger.info("Calling Together provider through Hugging Face proxy.")
99
+ else:
100
+ base_url = BASE_URL
101
+ logger.info("Calling Together provider directly.")
102
+ mapped_model = self._map_model(model)
103
+ if "model" in parameters:
104
+ parameters["model"] = mapped_model
105
+ payload = self._prepare_payload(inputs, parameters=parameters)
106
+
107
+ return RequestParameters(
108
+ url=f"{base_url}/{PER_TASK_ROUTES[self.task]}",
109
+ task=self.task,
110
+ model=mapped_model,
111
+ json=payload,
112
+ data=None,
113
+ headers=headers,
114
+ )
115
+
116
+ def _map_model(self, model: Optional[str]) -> str:
117
+ if model is None:
118
+ raise ValueError("Please provide a model available on Together.")
119
+ if self.task not in SUPPORTED_MODELS:
120
+ raise ValueError(f"Task {self.task} not supported with Together.")
121
+ mapped_model = SUPPORTED_MODELS[self.task].get(model)
122
+ if mapped_model is None:
123
+ raise ValueError(f"Model {model} is not supported with Together for task {self.task}.")
124
+ return mapped_model
125
+
126
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
127
+ return response
128
+
129
+ @abstractmethod
130
+ def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]: ...
131
+
132
+
133
+ class TogetherTextGenerationTask(TogetherTask):
134
+ # Handle both "text-generation" and "conversational"
135
+ def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]:
136
+ return {"messages": inputs, **{k: v for k, v in parameters.items() if v is not None}}
137
+
138
+
139
+ class TogetherTextToImageTask(TogetherTask):
140
+ def __init__(self):
141
+ super().__init__("text-to-image")
142
+
143
+ def _prepare_payload(self, inputs: Any, parameters: Dict[str, Any]) -> Dict[str, Any]:
144
+ payload = {
145
+ "prompt": inputs,
146
+ "response_format": "base64",
147
+ **{k: v for k, v in parameters.items() if v is not None},
148
+ }
149
+ return payload
150
+
151
+ def get_response(self, response: Union[bytes, Dict]) -> Any:
152
+ response_dict = _as_dict(response)
153
+ return base64.b64decode(response_dict["data"][0]["b64_json"])
File without changes
@@ -171,7 +171,7 @@ class RepoCard:
171
171
  ```
172
172
  """
173
173
 
174
- if Path(repo_id_or_path).exists():
174
+ if Path(repo_id_or_path).is_file():
175
175
  card_path = Path(repo_id_or_path)
176
176
  elif isinstance(repo_id_or_path, str):
177
177
  card_path = Path(
@@ -221,7 +221,8 @@ class CardData:
221
221
 
222
222
  def get(self, key: str, default: Any = None) -> Any:
223
223
  """Get value for a given metadata key."""
224
- return self.__dict__.get(key, default)
224
+ value = self.__dict__.get(key)
225
+ return default if value is None else value
225
226
 
226
227
  def pop(self, key: str, default: Any = None) -> Any:
227
228
  """Pop value for a given metadata key."""
@@ -164,7 +164,7 @@ def split_state_dict_into_shards_factory(
164
164
  tensor_name_to_filename = {}
165
165
  filename_to_tensors = {}
166
166
  for idx, shard in enumerate(shard_list):
167
- filename = filename_pattern.format(suffix=f"-{idx+1:05d}-of-{nb_shards:05d}")
167
+ filename = filename_pattern.format(suffix=f"-{idx + 1:05d}-of-{nb_shards:05d}")
168
168
  for key in shard:
169
169
  tensor_name_to_filename[key] = filename
170
170
  filename_to_tensors[filename] = list(shard.keys())
@@ -649,7 +649,7 @@ def load_state_dict_from_file(
649
649
  from torch import load
650
650
  except ImportError as e:
651
651
  raise ImportError(
652
- "Please install `torch` to load torch tensors. " "You can install it with `pip install torch`."
652
+ "Please install `torch` to load torch tensors. You can install it with `pip install torch`."
653
653
  ) from e
654
654
  # Add additional kwargs, mmap is only supported in torch >= 2.1.0
655
655
  additional_kwargs = {}
@@ -13,6 +13,7 @@ import os
13
13
  import shutil
14
14
  import stat
15
15
  import tempfile
16
+ import time
16
17
  from functools import partial
17
18
  from pathlib import Path
18
19
  from typing import Callable, Generator, Optional, Union
@@ -83,7 +84,9 @@ def _set_write_permission_and_retry(func, path, excinfo):
83
84
 
84
85
 
85
86
  @contextlib.contextmanager
86
- def WeakFileLock(lock_file: Union[str, Path]) -> Generator[BaseFileLock, None, None]:
87
+ def WeakFileLock(
88
+ lock_file: Union[str, Path], *, timeout: Optional[float] = None
89
+ ) -> Generator[BaseFileLock, None, None]:
87
90
  """A filelock with some custom logic.
88
91
 
89
92
  This filelock is weaker than the default filelock in that:
@@ -91,31 +94,40 @@ def WeakFileLock(lock_file: Union[str, Path]) -> Generator[BaseFileLock, None, N
91
94
  2. It will default to a SoftFileLock if the filesystem does not support flock.
92
95
 
93
96
  An INFO log message is emitted every 10 seconds if the lock is not acquired immediately.
97
+ If a timeout is provided, a `filelock.Timeout` exception is raised if the lock is not acquired within the timeout.
94
98
  """
95
- lock = FileLock(lock_file, timeout=constants.FILELOCK_LOG_EVERY_SECONDS)
99
+ log_interval = constants.FILELOCK_LOG_EVERY_SECONDS
100
+ lock = FileLock(lock_file, timeout=log_interval)
101
+ start_time = time.time()
102
+
96
103
  while True:
104
+ elapsed_time = time.time() - start_time
105
+ if timeout is not None and elapsed_time >= timeout:
106
+ raise Timeout(str(lock_file))
107
+
97
108
  try:
98
- lock.acquire()
109
+ lock.acquire(timeout=min(log_interval, timeout - elapsed_time) if timeout else log_interval)
99
110
  except Timeout:
100
- logger.info("still waiting to acquire lock on %s", lock_file)
111
+ logger.info(
112
+ f"Still waiting to acquire lock on {lock_file} (elapsed: {time.time() - start_time:.1f} seconds)"
113
+ )
101
114
  except NotImplementedError as e:
102
115
  if "use SoftFileLock instead" in str(e):
103
- # It's possible that the system does support flock, expect for one partition or filesystem.
104
- # In this case, let's default to a SoftFileLock.
105
116
  logger.warning(
106
117
  "FileSystem does not appear to support flock. Falling back to SoftFileLock for %s", lock_file
107
118
  )
108
- lock = SoftFileLock(lock_file, timeout=constants.FILELOCK_LOG_EVERY_SECONDS)
119
+ lock = SoftFileLock(lock_file, timeout=log_interval)
109
120
  continue
110
121
  else:
111
122
  break
112
123
 
113
- yield lock
114
-
115
124
  try:
116
- return lock.release()
117
- except OSError:
125
+ yield lock
126
+ finally:
118
127
  try:
119
- Path(lock_file).unlink()
128
+ lock.release()
120
129
  except OSError:
121
- pass
130
+ try:
131
+ Path(lock_file).unlink()
132
+ except OSError:
133
+ pass
@@ -338,9 +338,9 @@ def fix_hf_endpoint_in_url(url: str, endpoint: Optional[str]) -> str:
338
338
 
339
339
  This is useful when using a proxy and the Hugging Face Hub returns a URL with the default endpoint.
340
340
  """
341
- endpoint = endpoint or constants.ENDPOINT
341
+ endpoint = endpoint.rstrip("/") if endpoint else constants.ENDPOINT
342
342
  # check if a proxy has been set => if yes, update the returned URL to use the proxy
343
- if endpoint not in (None, constants._HF_DEFAULT_ENDPOINT, constants._HF_DEFAULT_STAGING_ENDPOINT):
343
+ if endpoint not in (constants._HF_DEFAULT_ENDPOINT, constants._HF_DEFAULT_STAGING_ENDPOINT):
344
344
  url = url.replace(constants._HF_DEFAULT_ENDPOINT, endpoint)
345
345
  url = url.replace(constants._HF_DEFAULT_STAGING_ENDPOINT, endpoint)
346
346
  return url
@@ -513,7 +513,7 @@ def _format(error_type: Type[HfHubHTTPError], custom_message: str, response: Res
513
513
  server_errors.append(response.text)
514
514
 
515
515
  # Strip all server messages
516
- server_errors = [line.strip() for line in server_errors if line.strip()]
516
+ server_errors = [str(line).strip() for line in server_errors if str(line).strip()]
517
517
 
518
518
  # Deduplicate server messages (keep order)
519
519
  # taken from https://stackoverflow.com/a/17016257
@@ -59,7 +59,7 @@ def _get_default_logging_level():
59
59
  return log_levels[env_level_str]
60
60
  else:
61
61
  logging.getLogger().warning(
62
- f"Unknown option HF_HUB_VERBOSITY={env_level_str}, has to be one of: { ', '.join(log_levels.keys()) }"
62
+ f"Unknown option HF_HUB_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}"
63
63
  )
64
64
  return _default_log_level
65
65
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface-hub
3
- Version: 0.27.0rc1
3
+ Version: 0.28.0
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -51,7 +51,7 @@ Requires-Dist: Pillow; extra == "all"
51
51
  Requires-Dist: gradio>=4.0.0; extra == "all"
52
52
  Requires-Dist: numpy; extra == "all"
53
53
  Requires-Dist: fastapi; extra == "all"
54
- Requires-Dist: ruff>=0.5.0; extra == "all"
54
+ Requires-Dist: ruff>=0.9.0; extra == "all"
55
55
  Requires-Dist: mypy==1.5.1; extra == "all"
56
56
  Requires-Dist: libcst==1.4.0; extra == "all"
57
57
  Requires-Dist: typing-extensions>=4.8.0; extra == "all"
@@ -82,7 +82,7 @@ Requires-Dist: Pillow; extra == "dev"
82
82
  Requires-Dist: gradio>=4.0.0; extra == "dev"
83
83
  Requires-Dist: numpy; extra == "dev"
84
84
  Requires-Dist: fastapi; extra == "dev"
85
- Requires-Dist: ruff>=0.5.0; extra == "dev"
85
+ Requires-Dist: ruff>=0.9.0; extra == "dev"
86
86
  Requires-Dist: mypy==1.5.1; extra == "dev"
87
87
  Requires-Dist: libcst==1.4.0; extra == "dev"
88
88
  Requires-Dist: typing-extensions>=4.8.0; extra == "dev"
@@ -101,7 +101,7 @@ Requires-Dist: hf-transfer>=0.1.4; extra == "hf-transfer"
101
101
  Provides-Extra: inference
102
102
  Requires-Dist: aiohttp; extra == "inference"
103
103
  Provides-Extra: quality
104
- Requires-Dist: ruff>=0.5.0; extra == "quality"
104
+ Requires-Dist: ruff>=0.9.0; extra == "quality"
105
105
  Requires-Dist: mypy==1.5.1; extra == "quality"
106
106
  Requires-Dist: libcst==1.4.0; extra == "quality"
107
107
  Provides-Extra: tensorflow