huggingface-hub 0.35.2__py3-none-any.whl → 0.36.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (43) hide show
  1. huggingface_hub/__init__.py +7 -1
  2. huggingface_hub/_commit_api.py +125 -65
  3. huggingface_hub/_commit_scheduler.py +4 -7
  4. huggingface_hub/_jobs_api.py +1 -1
  5. huggingface_hub/_login.py +9 -15
  6. huggingface_hub/_tensorboard_logger.py +2 -5
  7. huggingface_hub/_webhooks_server.py +9 -21
  8. huggingface_hub/cli/download.py +2 -2
  9. huggingface_hub/cli/repo_files.py +1 -1
  10. huggingface_hub/cli/upload.py +1 -1
  11. huggingface_hub/cli/upload_large_folder.py +1 -1
  12. huggingface_hub/community.py +16 -8
  13. huggingface_hub/fastai_utils.py +22 -32
  14. huggingface_hub/file_download.py +17 -20
  15. huggingface_hub/hf_api.py +514 -541
  16. huggingface_hub/hf_file_system.py +45 -40
  17. huggingface_hub/inference/_client.py +28 -49
  18. huggingface_hub/inference/_generated/_async_client.py +28 -49
  19. huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
  20. huggingface_hub/inference/_mcp/agent.py +2 -5
  21. huggingface_hub/inference/_mcp/mcp_client.py +6 -8
  22. huggingface_hub/inference/_providers/__init__.py +5 -0
  23. huggingface_hub/inference/_providers/_common.py +1 -0
  24. huggingface_hub/inference/_providers/clarifai.py +13 -0
  25. huggingface_hub/inference/_providers/fal_ai.py +2 -0
  26. huggingface_hub/keras_mixin.py +3 -6
  27. huggingface_hub/lfs.py +12 -4
  28. huggingface_hub/repocard.py +12 -16
  29. huggingface_hub/repository.py +15 -21
  30. huggingface_hub/serialization/_base.py +3 -6
  31. huggingface_hub/serialization/_tensorflow.py +3 -6
  32. huggingface_hub/serialization/_torch.py +17 -35
  33. huggingface_hub/utils/_cache_manager.py +41 -71
  34. huggingface_hub/utils/_chunk_utils.py +2 -3
  35. huggingface_hub/utils/_http.py +29 -34
  36. huggingface_hub/utils/_validators.py +2 -2
  37. huggingface_hub/utils/logging.py +8 -11
  38. {huggingface_hub-0.35.2.dist-info → huggingface_hub-0.36.0rc0.dist-info}/METADATA +2 -2
  39. {huggingface_hub-0.35.2.dist-info → huggingface_hub-0.36.0rc0.dist-info}/RECORD +43 -42
  40. {huggingface_hub-0.35.2.dist-info → huggingface_hub-0.36.0rc0.dist-info}/LICENSE +0 -0
  41. {huggingface_hub-0.35.2.dist-info → huggingface_hub-0.36.0rc0.dist-info}/WHEEL +0 -0
  42. {huggingface_hub-0.35.2.dist-info → huggingface_hub-0.36.0rc0.dist-info}/entry_points.txt +0 -0
  43. {huggingface_hub-0.35.2.dist-info → huggingface_hub-0.36.0rc0.dist-info}/top_level.txt +0 -0
huggingface_hub/lfs.py CHANGED
@@ -108,7 +108,8 @@ def post_lfs_batch_info(
108
108
  revision: Optional[str] = None,
109
109
  endpoint: Optional[str] = None,
110
110
  headers: Optional[Dict[str, str]] = None,
111
- ) -> Tuple[List[dict], List[dict]]:
111
+ transfers: Optional[List[str]] = None,
112
+ ) -> Tuple[List[dict], List[dict], Optional[str]]:
112
113
  """
113
114
  Requests the LFS batch endpoint to retrieve upload instructions
114
115
 
@@ -127,11 +128,14 @@ def post_lfs_batch_info(
127
128
  The git revision to upload to.
128
129
  headers (`dict`, *optional*):
129
130
  Additional headers to include in the request
131
+ transfers (`list`, *optional*):
132
+ List of transfer methods to use. Defaults to ["basic", "multipart"].
130
133
 
131
134
  Returns:
132
- `LfsBatchInfo`: 2-tuple:
135
+ `LfsBatchInfo`: 3-tuple:
133
136
  - First element is the list of upload instructions from the server
134
- - Second element is an list of errors, if any
137
+ - Second element is a list of errors, if any
138
+ - Third element is the chosen transfer adapter if provided by the server (e.g. "basic", "multipart", "xet")
135
139
 
136
140
  Raises:
137
141
  [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
@@ -146,7 +150,7 @@ def post_lfs_batch_info(
146
150
  batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
147
151
  payload: Dict = {
148
152
  "operation": "upload",
149
- "transfers": ["basic", "multipart"],
153
+ "transfers": transfers if transfers is not None else ["basic", "multipart"],
150
154
  "objects": [
151
155
  {
152
156
  "oid": upload.sha256.hex(),
@@ -172,9 +176,13 @@ def post_lfs_batch_info(
172
176
  if not isinstance(objects, list):
173
177
  raise ValueError("Malformed response from server")
174
178
 
179
+ chosen_transfer = batch_info.get("transfer")
180
+ chosen_transfer = chosen_transfer if isinstance(chosen_transfer, str) else None
181
+
175
182
  return (
176
183
  [_validate_batch_actions(obj) for obj in objects if "error" not in obj],
177
184
  [_validate_batch_error(obj) for obj in objects if "error" in obj],
185
+ chosen_transfer,
178
186
  )
179
187
 
180
188
 
@@ -65,13 +65,11 @@ class RepoCard:
65
65
  '\\n# My repo\\n'
66
66
 
67
67
  ```
68
- <Tip>
69
- Raises the following error:
70
-
71
- - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
72
- when the content of the repo card metadata is not a dictionary.
73
-
74
- </Tip>
68
+ > [!TIP]
69
+ > Raises the following error:
70
+ >
71
+ > - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
72
+ > when the content of the repo card metadata is not a dictionary.
75
73
  """
76
74
 
77
75
  # Set the content of the RepoCard, as well as underlying .data and .text attributes.
@@ -199,15 +197,13 @@ class RepoCard:
199
197
  The type of Hugging Face repo to push to. Options are "model", "dataset", and "space".
200
198
  If this function is called from a child class, the default will be the child class's `repo_type`.
201
199
 
202
- <Tip>
203
- Raises the following errors:
204
-
205
- - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
206
- if the card fails validation checks.
207
- - [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
208
- if the request to the Hub API fails for any other reason.
209
-
210
- </Tip>
200
+ > [!TIP]
201
+ > Raises the following errors:
202
+ >
203
+ > - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
204
+ > if the card fails validation checks.
205
+ > - [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
206
+ > if the request to the Hub API fails for any other reason.
211
207
  """
212
208
 
213
209
  # If repo type is provided, otherwise, use the repo type of the card.
@@ -431,14 +431,11 @@ class Repository:
431
431
  dataset repos, though not a lot here (if any) is actually specific to
432
432
  huggingface.co.
433
433
 
434
- <Tip warning={true}>
435
-
436
- [`Repository`] is deprecated in favor of the http-based alternatives implemented in
437
- [`HfApi`]. Given its large adoption in legacy code, the complete removal of
438
- [`Repository`] will only happen in release `v1.0`. For more details, please read
439
- https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http.
440
-
441
- </Tip>
434
+ > [!WARNING]
435
+ > [`Repository`] is deprecated in favor of the http-based alternatives implemented in
436
+ > [`HfApi`]. Given its large adoption in legacy code, the complete removal of
437
+ > [`Repository`] will only happen in release `v1.0`. For more details, please read
438
+ > https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http.
442
439
  """
443
440
 
444
441
  command_queue: List[CommandInProgress]
@@ -620,19 +617,16 @@ class Repository:
620
617
  - `None`, which would retrieve the value of
621
618
  `self.huggingface_token`.
622
619
 
623
- <Tip>
624
-
625
- Raises the following error:
626
-
627
- - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
628
- if an organization token (starts with "api_org") is passed. Use must use
629
- your own personal access token (see https://hf.co/settings/tokens).
630
-
631
- - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
632
- if you are trying to clone the repository in a non-empty folder, or if the
633
- `git` operations raise errors.
634
-
635
- </Tip>
620
+ > [!TIP]
621
+ > Raises the following error:
622
+ >
623
+ > - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
624
+ > if an organization token (starts with "api_org") is passed. Use must use
625
+ > your own personal access token (see https://hf.co/settings/tokens).
626
+ >
627
+ > - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
628
+ > if you are trying to clone the repository in a non-empty folder, or if the
629
+ > `git` operations raise errors.
636
630
  """
637
631
  token = (
638
632
  token # str -> use it
@@ -62,12 +62,9 @@ def split_state_dict_into_shards_factory(
62
62
  have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
63
63
  [6+2+2GB], [6+2GB], [6GB].
64
64
 
65
- <Tip warning={true}>
66
-
67
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
68
- size greater than `max_shard_size`.
69
-
70
- </Tip>
65
+ > [!WARNING]
66
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
67
+ > size greater than `max_shard_size`.
71
68
 
72
69
  Args:
73
70
  state_dict (`Dict[str, Tensor]`):
@@ -39,12 +39,9 @@ def split_tf_state_dict_into_shards(
39
39
  have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
40
40
  [6+2+2GB], [6+2GB], [6GB].
41
41
 
42
- <Tip warning={true}>
43
-
44
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
45
- size greater than `max_shard_size`.
46
-
47
- </Tip>
42
+ > [!WARNING]
43
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
44
+ > size greater than `max_shard_size`.
48
45
 
49
46
  Args:
50
47
  state_dict (`Dict[str, Tensor]`):
@@ -63,18 +63,12 @@ def save_torch_model(
63
63
 
64
64
  Before saving the model, the `save_directory` is cleaned from any previous shard files.
65
65
 
66
- <Tip warning={true}>
66
+ > [!WARNING]
67
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
68
+ > size greater than `max_shard_size`.
67
69
 
68
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
69
- size greater than `max_shard_size`.
70
-
71
- </Tip>
72
-
73
- <Tip warning={true}>
74
-
75
- If your model is a `transformers.PreTrainedModel`, you should pass `model._tied_weights_keys` as `shared_tensors_to_discard` to properly handle shared tensors saving. This ensures the correct duplicate tensors are discarded during saving.
76
-
77
- </Tip>
70
+ > [!WARNING]
71
+ > If your model is a `transformers.PreTrainedModel`, you should pass `model._tied_weights_keys` as `shared_tensors_to_discard` to properly handle shared tensors saving. This ensures the correct duplicate tensors are discarded during saving.
78
72
 
79
73
  Args:
80
74
  model (`torch.nn.Module`):
@@ -163,18 +157,12 @@ def save_torch_state_dict(
163
157
 
164
158
  Before saving the model, the `save_directory` is cleaned from any previous shard files.
165
159
 
166
- <Tip warning={true}>
167
-
168
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
169
- size greater than `max_shard_size`.
170
-
171
- </Tip>
160
+ > [!WARNING]
161
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
162
+ > size greater than `max_shard_size`.
172
163
 
173
- <Tip warning={true}>
174
-
175
- If your model is a `transformers.PreTrainedModel`, you should pass `model._tied_weights_keys` as `shared_tensors_to_discard` to properly handle shared tensors saving. This ensures the correct duplicate tensors are discarded during saving.
176
-
177
- </Tip>
164
+ > [!WARNING]
165
+ > If your model is a `transformers.PreTrainedModel`, you should pass `model._tied_weights_keys` as `shared_tensors_to_discard` to properly handle shared tensors saving. This ensures the correct duplicate tensors are discarded during saving.
178
166
 
179
167
  Args:
180
168
  state_dict (`Dict[str, torch.Tensor]`):
@@ -278,7 +266,7 @@ def save_torch_state_dict(
278
266
  safe_file_kwargs = {"metadata": per_file_metadata} if safe_serialization else {}
279
267
  for filename, tensors in state_dict_split.filename_to_tensors.items():
280
268
  shard = {tensor: state_dict[tensor] for tensor in tensors}
281
- save_file_fn(shard, os.path.join(save_directory, filename), **safe_file_kwargs)
269
+ save_file_fn(shard, os.path.join(save_directory, filename), **safe_file_kwargs) # ty: ignore[invalid-argument-type]
282
270
  logger.debug(f"Shard saved to {filename}")
283
271
 
284
272
  # Save the index (if any)
@@ -314,19 +302,13 @@ def split_torch_state_dict_into_shards(
314
302
  [6+2+2GB], [6+2GB], [6GB].
315
303
 
316
304
 
317
- <Tip>
318
-
319
- To save a model state dictionary to the disk, see [`save_torch_state_dict`]. This helper uses
320
- `split_torch_state_dict_into_shards` under the hood.
321
-
322
- </Tip>
323
-
324
- <Tip warning={true}>
325
-
326
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
327
- size greater than `max_shard_size`.
305
+ > [!TIP]
306
+ > To save a model state dictionary to the disk, see [`save_torch_state_dict`]. This helper uses
307
+ > `split_torch_state_dict_into_shards` under the hood.
328
308
 
329
- </Tip>
309
+ > [!WARNING]
310
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
311
+ > size greater than `max_shard_size`.
330
312
 
331
313
  Args:
332
314
  state_dict (`Dict[str, torch.Tensor]`):
@@ -57,13 +57,10 @@ class CachedFileInfo:
57
57
  blob_last_modified (`float`):
58
58
  Timestamp of the last time the blob file has been modified/created.
59
59
 
60
- <Tip warning={true}>
61
-
62
- `blob_last_accessed` and `blob_last_modified` reliability can depend on the OS you
63
- are using. See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
64
- for more details.
65
-
66
- </Tip>
60
+ > [!WARNING]
61
+ > `blob_last_accessed` and `blob_last_modified` reliability can depend on the OS you
62
+ > are using. See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
63
+ > for more details.
67
64
  """
68
65
 
69
66
  file_name: str
@@ -130,20 +127,14 @@ class CachedRevisionInfo:
130
127
  last_modified (`float`):
131
128
  Timestamp of the last time the revision has been created/modified.
132
129
 
133
- <Tip warning={true}>
134
-
135
- `last_accessed` cannot be determined correctly on a single revision as blob files
136
- are shared across revisions.
137
-
138
- </Tip>
139
-
140
- <Tip warning={true}>
141
-
142
- `size_on_disk` is not necessarily the sum of all file sizes because of possible
143
- duplicated files. Besides, only blobs are taken into account, not the (negligible)
144
- size of folders and symlinks.
130
+ > [!WARNING]
131
+ > `last_accessed` cannot be determined correctly on a single revision as blob files
132
+ > are shared across revisions.
145
133
 
146
- </Tip>
134
+ > [!WARNING]
135
+ > `size_on_disk` is not necessarily the sum of all file sizes because of possible
136
+ > duplicated files. Besides, only blobs are taken into account, not the (negligible)
137
+ > size of folders and symlinks.
147
138
  """
148
139
 
149
140
  commit_hash: str
@@ -203,21 +194,15 @@ class CachedRepoInfo:
203
194
  last_modified (`float`):
204
195
  Timestamp of the last time a blob file of the repo has been modified/created.
205
196
 
206
- <Tip warning={true}>
197
+ > [!WARNING]
198
+ > `size_on_disk` is not necessarily the sum of all revisions sizes because of
199
+ > duplicated files. Besides, only blobs are taken into account, not the (negligible)
200
+ > size of folders and symlinks.
207
201
 
208
- `size_on_disk` is not necessarily the sum of all revisions sizes because of
209
- duplicated files. Besides, only blobs are taken into account, not the (negligible)
210
- size of folders and symlinks.
211
-
212
- </Tip>
213
-
214
- <Tip warning={true}>
215
-
216
- `last_accessed` and `last_modified` reliability can depend on the OS you are using.
217
- See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
218
- for more details.
219
-
220
- </Tip>
202
+ > [!WARNING]
203
+ > `last_accessed` and `last_modified` reliability can depend on the OS you are using.
204
+ > See [python documentation](https://docs.python.org/3/library/os.html#os.stat_result)
205
+ > for more details.
221
206
  """
222
207
 
223
208
  repo_id: str
@@ -305,20 +290,14 @@ class DeleteCacheStrategy:
305
290
  def execute(self) -> None:
306
291
  """Execute the defined strategy.
307
292
 
308
- <Tip warning={true}>
309
-
310
- If this method is interrupted, the cache might get corrupted. Deletion order is
311
- implemented so that references and symlinks are deleted before the actual blob
312
- files.
293
+ > [!WARNING]
294
+ > If this method is interrupted, the cache might get corrupted. Deletion order is
295
+ > implemented so that references and symlinks are deleted before the actual blob
296
+ > files.
313
297
 
314
- </Tip>
315
-
316
- <Tip warning={true}>
317
-
318
- This method is irreversible. If executed, cached files are erased and must be
319
- downloaded again.
320
-
321
- </Tip>
298
+ > [!WARNING]
299
+ > This method is irreversible. If executed, cached files are erased and must be
300
+ > downloaded again.
322
301
  """
323
302
  # Deletion order matters. Blobs are deleted in last so that the user can't end
324
303
  # up in a state where a `ref`` refers to a missing snapshot or a snapshot
@@ -360,12 +339,9 @@ class HFCacheInfo:
360
339
  Those exceptions are captured so that the scan can continue. Corrupted repos
361
340
  are skipped from the scan.
362
341
 
363
- <Tip warning={true}>
364
-
365
- Here `size_on_disk` is equal to the sum of all repo sizes (only blobs). However if
366
- some cached repos are corrupted, their sizes are not taken into account.
367
-
368
- </Tip>
342
+ > [!WARNING]
343
+ > Here `size_on_disk` is equal to the sum of all repo sizes (only blobs). However if
344
+ > some cached repos are corrupted, their sizes are not taken into account.
369
345
  """
370
346
 
371
347
  size_on_disk: int
@@ -412,13 +388,10 @@ class HFCacheInfo:
412
388
  Cache deletion done. Saved 8.6G.
413
389
  ```
414
390
 
415
- <Tip warning={true}>
416
-
417
- `delete_revisions` returns a [`~utils.DeleteCacheStrategy`] object that needs to
418
- be executed. The [`~utils.DeleteCacheStrategy`] is not meant to be modified but
419
- allows having a dry run before actually executing the deletion.
420
-
421
- </Tip>
391
+ > [!WARNING]
392
+ > `delete_revisions` returns a [`~utils.DeleteCacheStrategy`] object that needs to
393
+ > be executed. The [`~utils.DeleteCacheStrategy`] is not meant to be modified but
394
+ > allows having a dry run before actually executing the deletion.
422
395
  """
423
396
  hashes_to_delete: Set[str] = set(revisions)
424
397
 
@@ -652,17 +625,14 @@ def scan_cache_dir(cache_dir: Optional[Union[str, Path]] = None) -> HFCacheInfo:
652
625
  cache_dir (`str` or `Path`, `optional`):
653
626
  Cache directory to cache. Defaults to the default HF cache directory.
654
627
 
655
- <Tip warning={true}>
656
-
657
- Raises:
658
-
659
- `CacheNotFound`
660
- If the cache directory does not exist.
661
-
662
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
663
- If the cache directory is a file, instead of a directory.
664
-
665
- </Tip>
628
+ > [!WARNING]
629
+ > Raises:
630
+ >
631
+ > `CacheNotFound`
632
+ > If the cache directory does not exist.
633
+ >
634
+ > [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
635
+ > If the cache directory is a file, instead of a directory.
666
636
 
667
637
  Returns: a [`~HFCacheInfo`] object.
668
638
  """
@@ -49,9 +49,8 @@ def chunk_iterable(iterable: Iterable[T], chunk_size: int) -> Iterable[Iterable[
49
49
  [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
50
50
  If `chunk_size` <= 0.
51
51
 
52
- <Tip warning={true}>
53
- The last chunk can be smaller than `chunk_size`.
54
- </Tip>
52
+ > [!WARNING]
53
+ > The last chunk can be smaller than `chunk_size`.
55
54
  """
56
55
  if not isinstance(chunk_size, int) or chunk_size <= 0:
57
56
  raise ValueError("`chunk_size` must be a strictly positive integer (>0).")
@@ -219,6 +219,7 @@ def http_backoff(
219
219
  retry_on_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]] = (
220
220
  requests.Timeout,
221
221
  requests.ConnectionError,
222
+ requests.exceptions.ChunkedEncodingError,
222
223
  ),
223
224
  retry_on_status_codes: Union[int, Tuple[int, ...]] = (500, 502, 503, 504),
224
225
  **kwargs,
@@ -248,7 +249,7 @@ def http_backoff(
248
249
  Maximum duration (in seconds) to wait before retrying.
249
250
  retry_on_exceptions (`Type[Exception]` or `Tuple[Type[Exception]]`, *optional*):
250
251
  Define which exceptions must be caught to retry the request. Can be a single type or a tuple of types.
251
- By default, retry on `requests.Timeout` and `requests.ConnectionError`.
252
+ By default, retry on `requests.Timeout`, `requests.ConnectionError` and `requests.exceptions.ChunkedEncodingError`.
252
253
  retry_on_status_codes (`int` or `Tuple[int]`, *optional*, defaults to `(500, 502, 503, 504)`):
253
254
  Define on which status codes the request must be retried. By default, 5xx errors are retried.
254
255
  **kwargs (`dict`, *optional*):
@@ -267,17 +268,14 @@ def http_backoff(
267
268
  >>> response.raise_for_status()
268
269
  ```
269
270
 
270
- <Tip warning={true}>
271
-
272
- When using `requests` it is possible to stream data by passing an iterator to the
273
- `data` argument. On http backoff this is a problem as the iterator is not reset
274
- after a failed call. This issue is mitigated for file objects or any IO streams
275
- by saving the initial position of the cursor (with `data.tell()`) and resetting the
276
- cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff
277
- will fail. If this is a hard constraint for you, please let us know by opening an
278
- issue on [Github](https://github.com/huggingface/huggingface_hub).
279
-
280
- </Tip>
271
+ > [!WARNING]
272
+ > When using `requests` it is possible to stream data by passing an iterator to the
273
+ > `data` argument. On http backoff this is a problem as the iterator is not reset
274
+ > after a failed call. This issue is mitigated for file objects or any IO streams
275
+ > by saving the initial position of the cursor (with `data.tell()`) and resetting the
276
+ > cursor between each call (with `data.seek()`). For arbitrary iterators, http backoff
277
+ > will fail. If this is a hard constraint for you, please let us know by opening an
278
+ > issue on [Github](https://github.com/huggingface/huggingface_hub).
281
279
  """
282
280
  if isinstance(retry_on_exceptions, type): # Tuple from single exception type
283
281
  retry_on_exceptions = (retry_on_exceptions,)
@@ -380,28 +378,25 @@ def hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None)
380
378
  Name of the endpoint that has been called. If provided, the error message
381
379
  will be more complete.
382
380
 
383
- <Tip warning={true}>
384
-
385
- Raises when the request has failed:
386
-
387
- - [`~utils.RepositoryNotFoundError`]
388
- If the repository to download from cannot be found. This may be because it
389
- doesn't exist, because `repo_type` is not set correctly, or because the repo
390
- is `private` and you do not have access.
391
- - [`~utils.GatedRepoError`]
392
- If the repository exists but is gated and the user is not on the authorized
393
- list.
394
- - [`~utils.RevisionNotFoundError`]
395
- If the repository exists but the revision couldn't be find.
396
- - [`~utils.EntryNotFoundError`]
397
- If the repository exists but the entry (e.g. the requested file) couldn't be
398
- find.
399
- - [`~utils.BadRequestError`]
400
- If request failed with a HTTP 400 BadRequest error.
401
- - [`~utils.HfHubHTTPError`]
402
- If request failed for a reason not listed above.
403
-
404
- </Tip>
381
+ > [!WARNING]
382
+ > Raises when the request has failed:
383
+ >
384
+ > - [`~utils.RepositoryNotFoundError`]
385
+ > If the repository to download from cannot be found. This may be because it
386
+ > doesn't exist, because `repo_type` is not set correctly, or because the repo
387
+ > is `private` and you do not have access.
388
+ > - [`~utils.GatedRepoError`]
389
+ > If the repository exists but is gated and the user is not on the authorized
390
+ > list.
391
+ > - [`~utils.RevisionNotFoundError`]
392
+ > If the repository exists but the revision couldn't be find.
393
+ > - [`~utils.EntryNotFoundError`]
394
+ > If the repository exists but the entry (e.g. the requested file) couldn't be
395
+ > find.
396
+ > - [`~utils.BadRequestError`]
397
+ > If request failed with a HTTP 400 BadRequest error.
398
+ > - [`~utils.HfHubHTTPError`]
399
+ > If request failed for a reason not listed above.
405
400
  """
406
401
  try:
407
402
  response.raise_for_status()
@@ -158,8 +158,8 @@ def validate_repo_id(repo_id: str) -> None:
158
158
 
159
159
  if not REPO_ID_REGEX.match(repo_id):
160
160
  raise HFValidationError(
161
- "Repo id must use alphanumeric chars or '-', '_', '.', '--' and '..' are"
162
- " forbidden, '-' and '.' cannot start or end the name, max length is 96:"
161
+ "Repo id must use alphanumeric chars, '-', '_' or '.'."
162
+ " The name cannot start or end with '-' or '.' and the maximum length is 96:"
163
163
  f" '{repo_id}'."
164
164
  )
165
165
 
@@ -109,17 +109,14 @@ def get_verbosity() -> int:
109
109
  Logging level, e.g., `huggingface_hub.logging.DEBUG` and
110
110
  `huggingface_hub.logging.INFO`.
111
111
 
112
- <Tip>
113
-
114
- HuggingFace Hub has following logging levels:
115
-
116
- - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL`
117
- - `huggingface_hub.logging.ERROR`
118
- - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN`
119
- - `huggingface_hub.logging.INFO`
120
- - `huggingface_hub.logging.DEBUG`
121
-
122
- </Tip>
112
+ > [!TIP]
113
+ > HuggingFace Hub has following logging levels:
114
+ >
115
+ > - `huggingface_hub.logging.CRITICAL`, `huggingface_hub.logging.FATAL`
116
+ > - `huggingface_hub.logging.ERROR`
117
+ > - `huggingface_hub.logging.WARNING`, `huggingface_hub.logging.WARN`
118
+ > - `huggingface_hub.logging.INFO`
119
+ > - `huggingface_hub.logging.DEBUG`
123
120
  """
124
121
  return _get_library_root_logger().getEffectiveLevel()
125
122
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface-hub
3
- Version: 0.35.2
3
+ Version: 0.36.0rc0
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -236,7 +236,7 @@ If you prefer, you can also install it with [conda](https://huggingface.co/docs/
236
236
  In order to keep the package minimal by default, `huggingface_hub` comes with optional dependencies useful for some use cases. For example, if you want have a complete experience for Inference, run:
237
237
 
238
238
  ```bash
239
- pip install huggingface_hub[inference]
239
+ pip install "huggingface_hub[inference]"
240
240
  ```
241
241
 
242
242
  To learn more installation and optional dependencies, check out the [installation guide](https://huggingface.co/docs/huggingface_hub/en/installation).