huggingface-hub 0.23.4__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (43) hide show
  1. huggingface_hub/__init__.py +47 -15
  2. huggingface_hub/_commit_api.py +38 -8
  3. huggingface_hub/_inference_endpoints.py +11 -4
  4. huggingface_hub/_local_folder.py +22 -13
  5. huggingface_hub/_snapshot_download.py +12 -7
  6. huggingface_hub/_webhooks_server.py +3 -1
  7. huggingface_hub/commands/huggingface_cli.py +4 -3
  8. huggingface_hub/commands/repo_files.py +128 -0
  9. huggingface_hub/constants.py +12 -0
  10. huggingface_hub/file_download.py +127 -91
  11. huggingface_hub/hf_api.py +976 -341
  12. huggingface_hub/hf_file_system.py +30 -3
  13. huggingface_hub/hub_mixin.py +17 -6
  14. huggingface_hub/inference/_client.py +379 -43
  15. huggingface_hub/inference/_common.py +0 -2
  16. huggingface_hub/inference/_generated/_async_client.py +396 -49
  17. huggingface_hub/inference/_generated/types/__init__.py +4 -1
  18. huggingface_hub/inference/_generated/types/chat_completion.py +41 -21
  19. huggingface_hub/inference/_generated/types/feature_extraction.py +23 -5
  20. huggingface_hub/inference/_generated/types/text_generation.py +29 -0
  21. huggingface_hub/lfs.py +11 -6
  22. huggingface_hub/repocard_data.py +3 -3
  23. huggingface_hub/repository.py +6 -6
  24. huggingface_hub/serialization/__init__.py +8 -3
  25. huggingface_hub/serialization/_base.py +13 -16
  26. huggingface_hub/serialization/_tensorflow.py +4 -3
  27. huggingface_hub/serialization/_torch.py +399 -22
  28. huggingface_hub/utils/__init__.py +0 -1
  29. huggingface_hub/utils/_errors.py +1 -1
  30. huggingface_hub/utils/_fixes.py +14 -3
  31. huggingface_hub/utils/_paths.py +17 -6
  32. huggingface_hub/utils/_subprocess.py +0 -1
  33. huggingface_hub/utils/_telemetry.py +9 -1
  34. huggingface_hub/utils/endpoint_helpers.py +2 -186
  35. huggingface_hub/utils/sha.py +36 -1
  36. huggingface_hub/utils/tqdm.py +0 -1
  37. {huggingface_hub-0.23.4.dist-info → huggingface_hub-0.24.0.dist-info}/METADATA +12 -9
  38. {huggingface_hub-0.23.4.dist-info → huggingface_hub-0.24.0.dist-info}/RECORD +42 -42
  39. huggingface_hub/serialization/_numpy.py +0 -68
  40. {huggingface_hub-0.23.4.dist-info → huggingface_hub-0.24.0.dist-info}/LICENSE +0 -0
  41. {huggingface_hub-0.23.4.dist-info → huggingface_hub-0.24.0.dist-info}/WHEEL +0 -0
  42. {huggingface_hub-0.23.4.dist-info → huggingface_hub-0.24.0.dist-info}/entry_points.txt +0 -0
  43. {huggingface_hub-0.23.4.dist-info → huggingface_hub-0.24.0.dist-info}/top_level.txt +0 -0
@@ -14,20 +14,256 @@
14
14
  """Contains pytorch-specific helpers."""
15
15
 
16
16
  import importlib
17
+ import json
18
+ import os
19
+ import re
20
+ from collections import defaultdict
17
21
  from functools import lru_cache
18
- from typing import TYPE_CHECKING, Dict, Tuple, Union
22
+ from pathlib import Path
23
+ from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
19
24
 
20
- from ._base import FILENAME_PATTERN, MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
25
+ from .. import constants, logging
26
+ from ._base import MAX_SHARD_SIZE, StateDictSplit, split_state_dict_into_shards_factory
21
27
 
22
28
 
29
+ logger = logging.get_logger(__file__)
30
+
23
31
  if TYPE_CHECKING:
24
32
  import torch
25
33
 
26
34
 
35
+ def save_torch_model(
36
+ model: "torch.nn.Module",
37
+ save_directory: Union[str, Path],
38
+ *,
39
+ filename_pattern: Optional[str] = None,
40
+ force_contiguous: bool = True,
41
+ max_shard_size: Union[int, str] = MAX_SHARD_SIZE,
42
+ metadata: Optional[Dict[str, str]] = None,
43
+ safe_serialization: bool = True,
44
+ ):
45
+ """
46
+ Saves a given torch model to disk, handling sharding and shared tensors issues.
47
+
48
+ See also [`save_torch_state_dict`] to save a state dict with more flexibility.
49
+
50
+ For more information about tensor sharing, check out [this guide](https://huggingface.co/docs/safetensors/torch_shared_tensors).
51
+
52
+ The model state dictionary is split into shards so that each shard is smaller than a given size. The shards are
53
+ saved in the `save_directory` with the given `filename_pattern`. If the model is too big to fit in a single shard,
54
+ an index file is saved in the `save_directory` to indicate where each tensor is saved. This helper uses
55
+ [`split_torch_state_dict_into_shards`] under the hood. If `safe_serialization` is `True`, the shards are saved as
56
+ safetensors (the default). Otherwise, the shards are saved as pickle.
57
+
58
+ Before saving the model, the `save_directory` is cleaned from any previous shard files.
59
+
60
+ <Tip warning={true}>
61
+
62
+ If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
63
+ size greater than `max_shard_size`.
64
+
65
+ </Tip>
66
+
67
+ Args:
68
+ model (`torch.nn.Module`):
69
+ The model to save on disk.
70
+ save_directory (`str` or `Path`):
71
+ The directory in which the model will be saved.
72
+ filename_pattern (`str`, *optional*):
73
+ The pattern to generate the files names in which the model will be saved. Pattern must be a string that
74
+ can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
75
+ Defaults to `"model{suffix}.safetensors"` or `pytorch_model{suffix}.bin` depending on `safe_serialization`
76
+ parameter.
77
+ force_contiguous (`boolean`, *optional*):
78
+ Forcing the state_dict to be saved as contiguous tensors. This has no effect on the correctness of the
79
+ model, but it could potentially change performance if the layout of the tensor was chosen specifically for
80
+ that reason. Defaults to `True`.
81
+ max_shard_size (`int` or `str`, *optional*):
82
+ The maximum size of each shard, in bytes. Defaults to 5GB.
83
+ metadata (`Dict[str, str]`, *optional*):
84
+ Extra information to save along with the model. Some metadata will be added for each dropped tensors.
85
+ This information will not be enough to recover the entire shared structure but might help understanding
86
+ things.
87
+ safe_serialization (`bool`, *optional*):
88
+ Whether to save as safetensors, which is the default behavior. If `False`, the shards are saved as pickle.
89
+ Safe serialization is recommended for security reasons. Saving as pickle is deprecated and will be removed
90
+ in a future version.
91
+
92
+ Example:
93
+
94
+ ```py
95
+ >>> from huggingface_hub import save_torch_model
96
+ >>> model = ... # A PyTorch model
97
+
98
+ # Save state dict to "path/to/folder". The model will be split into shards of 5GB each and saved as safetensors.
99
+ >>> save_torch_model(model, "path/to/folder")
100
+
101
+ # Load model back
102
+ >>> from huggingface_hub import load_torch_model # TODO
103
+ >>> load_torch_model(model, "path/to/folder")
104
+ >>>
105
+ ```
106
+ """
107
+ save_torch_state_dict(
108
+ state_dict=model.state_dict(),
109
+ filename_pattern=filename_pattern,
110
+ force_contiguous=force_contiguous,
111
+ max_shard_size=max_shard_size,
112
+ metadata=metadata,
113
+ safe_serialization=safe_serialization,
114
+ save_directory=save_directory,
115
+ )
116
+
117
+
118
+ def save_torch_state_dict(
119
+ state_dict: Dict[str, "torch.Tensor"],
120
+ save_directory: Union[str, Path],
121
+ *,
122
+ filename_pattern: Optional[str] = None,
123
+ force_contiguous: bool = True,
124
+ max_shard_size: Union[int, str] = MAX_SHARD_SIZE,
125
+ metadata: Optional[Dict[str, str]] = None,
126
+ safe_serialization: bool = True,
127
+ ) -> None:
128
+ """
129
+ Save a model state dictionary to the disk, handling sharding and shared tensors issues.
130
+
131
+ See also [`save_torch_model`] to directly save a PyTorch model.
132
+
133
+ For more information about tensor sharing, check out [this guide](https://huggingface.co/docs/safetensors/torch_shared_tensors).
134
+
135
+ The model state dictionary is split into shards so that each shard is smaller than a given size. The shards are
136
+ saved in the `save_directory` with the given `filename_pattern`. If the model is too big to fit in a single shard,
137
+ an index file is saved in the `save_directory` to indicate where each tensor is saved. This helper uses
138
+ [`split_torch_state_dict_into_shards`] under the hood. If `safe_serialization` is `True`, the shards are saved as
139
+ safetensors (the default). Otherwise, the shards are saved as pickle.
140
+
141
+ Before saving the model, the `save_directory` is cleaned from any previous shard files.
142
+
143
+ <Tip warning={true}>
144
+
145
+ If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
146
+ size greater than `max_shard_size`.
147
+
148
+ </Tip>
149
+
150
+ Args:
151
+ state_dict (`Dict[str, torch.Tensor]`):
152
+ The state dictionary to save.
153
+ save_directory (`str` or `Path`):
154
+ The directory in which the model will be saved.
155
+ filename_pattern (`str`, *optional*):
156
+ The pattern to generate the files names in which the model will be saved. Pattern must be a string that
157
+ can be formatted with `filename_pattern.format(suffix=...)` and must contain the keyword `suffix`
158
+ Defaults to `"model{suffix}.safetensors"` or `pytorch_model{suffix}.bin` depending on `safe_serialization`
159
+ parameter.
160
+ force_contiguous (`boolean`, *optional*):
161
+ Forcing the state_dict to be saved as contiguous tensors. This has no effect on the correctness of the
162
+ model, but it could potentially change performance if the layout of the tensor was chosen specifically for
163
+ that reason. Defaults to `True`.
164
+ max_shard_size (`int` or `str`, *optional*):
165
+ The maximum size of each shard, in bytes. Defaults to 5GB.
166
+ metadata (`Dict[str, str]`, *optional*):
167
+ Extra information to save along with the model. Some metadata will be added for each dropped tensors.
168
+ This information will not be enough to recover the entire shared structure but might help understanding
169
+ things.
170
+ safe_serialization (`bool`, *optional*):
171
+ Whether to save as safetensors, which is the default behavior. If `False`, the shards are saved as pickle.
172
+ Safe serialization is recommended for security reasons. Saving as pickle is deprecated and will be removed
173
+ in a future version.
174
+
175
+ Example:
176
+
177
+ ```py
178
+ >>> from huggingface_hub import save_torch_state_dict
179
+ >>> model = ... # A PyTorch model
180
+
181
+ # Save state dict to "path/to/folder". The model will be split into shards of 5GB each and saved as safetensors.
182
+ >>> state_dict = model_to_save.state_dict()
183
+ >>> save_torch_state_dict(state_dict, "path/to/folder")
184
+ ```
185
+ """
186
+ save_directory = str(save_directory)
187
+
188
+ if filename_pattern is None:
189
+ filename_pattern = (
190
+ constants.SAFETENSORS_WEIGHTS_FILE_PATTERN
191
+ if safe_serialization
192
+ else constants.PYTORCH_WEIGHTS_FILE_PATTERN
193
+ )
194
+
195
+ # Imports correct library
196
+ if safe_serialization:
197
+ try:
198
+ from safetensors.torch import save_file as save_file_fn
199
+ except ImportError as e:
200
+ raise ImportError(
201
+ "Please install `safetensors` to use safe serialization. "
202
+ "You can install it with `pip install safetensors`."
203
+ ) from e
204
+
205
+ else:
206
+ from torch import save as save_file_fn # type: ignore[assignment]
207
+
208
+ logger.warning(
209
+ "You are using unsafe serialization. Due to security reasons, it is recommended not to load "
210
+ "pickled models from untrusted sources. If you intend to share your model, we strongly recommend "
211
+ "using safe serialization by installing `safetensors` with `pip install safetensors`."
212
+ )
213
+
214
+ # Clean state dict for safetensors
215
+ if metadata is None:
216
+ metadata = {}
217
+ if safe_serialization:
218
+ state_dict = _clean_state_dict_for_safetensors(state_dict, metadata, force_contiguous=force_contiguous)
219
+
220
+ # Split dict
221
+ state_dict_split = split_torch_state_dict_into_shards(
222
+ state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
223
+ )
224
+
225
+ # Clean the folder from previous save
226
+ existing_files_regex = re.compile(filename_pattern.format(suffix=r"(-\d{5}-of-\d{5})?") + r"(\.index\.json)?")
227
+ for filename in os.listdir(save_directory):
228
+ if existing_files_regex.match(filename):
229
+ try:
230
+ logger.debug(f"Removing existing file '{filename}' from folder.")
231
+ os.remove(os.path.join(save_directory, filename))
232
+ except Exception as e:
233
+ logger.warning(f"Error when trying to remove existing '{filename}' from folder: {e}. Continuing...")
234
+
235
+ # Save each shard
236
+ per_file_metadata = {"format": "pt"}
237
+ if not state_dict_split.is_sharded:
238
+ per_file_metadata.update(metadata)
239
+ safe_file_kwargs = {"metadata": per_file_metadata} if safe_serialization else {}
240
+ for filename, tensors in state_dict_split.filename_to_tensors.items():
241
+ shard = {tensor: state_dict[tensor] for tensor in tensors}
242
+ save_file_fn(shard, os.path.join(save_directory, filename), **safe_file_kwargs)
243
+ logger.debug(f"Shard saved to {filename}")
244
+
245
+ # Save the index (if any)
246
+ if state_dict_split.is_sharded:
247
+ index_path = filename_pattern.format(suffix="") + ".index.json"
248
+ index = {
249
+ "metadata": {**state_dict_split.metadata, **metadata},
250
+ "weight_map": state_dict_split.tensor_to_filename,
251
+ }
252
+ with open(os.path.join(save_directory, index_path), "w") as f:
253
+ json.dump(index, f, indent=2)
254
+ logger.info(
255
+ f"The model is bigger than the maximum size per checkpoint ({max_shard_size}). "
256
+ f"Model weighs have been saved in {len(state_dict_split.filename_to_tensors)} checkpoint shards. "
257
+ f"You can find where each parameters has been saved in the index located at {index_path}."
258
+ )
259
+
260
+ logger.info(f"Model weights successfully saved to {save_directory}!")
261
+
262
+
27
263
  def split_torch_state_dict_into_shards(
28
264
  state_dict: Dict[str, "torch.Tensor"],
29
265
  *,
30
- filename_pattern: str = FILENAME_PATTERN,
266
+ filename_pattern: str = constants.SAFETENSORS_WEIGHTS_FILE_PATTERN,
31
267
  max_shard_size: Union[int, str] = MAX_SHARD_SIZE,
32
268
  ) -> StateDictSplit:
33
269
  """
@@ -38,6 +274,14 @@ def split_torch_state_dict_into_shards(
38
274
  have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
39
275
  [6+2+2GB], [6+2GB], [6GB].
40
276
 
277
+
278
+ <Tip>
279
+
280
+ To save a model state dictionary to the disk, see [`save_torch_state_dict`]. This helper uses
281
+ `split_torch_state_dict_into_shards` under the hood.
282
+
283
+ </Tip>
284
+
41
285
  <Tip warning={true}>
42
286
 
43
287
  If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
@@ -87,12 +331,12 @@ def split_torch_state_dict_into_shards(
87
331
  state_dict,
88
332
  max_shard_size=max_shard_size,
89
333
  filename_pattern=filename_pattern,
90
- get_tensor_size=get_tensor_size,
91
- get_storage_id=get_storage_id,
334
+ get_storage_size=get_torch_storage_size,
335
+ get_storage_id=get_torch_storage_id,
92
336
  )
93
337
 
94
338
 
95
- def get_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", int, int]:
339
+ def get_torch_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", int, int]:
96
340
  """
97
341
  Return unique identifier to a tensor storage.
98
342
 
@@ -114,11 +358,23 @@ def get_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", int, int]:
114
358
  else:
115
359
  unique_id = storage_ptr(tensor)
116
360
 
117
- return tensor.device, unique_id, get_storage_size(tensor)
361
+ return tensor.device, unique_id, get_torch_storage_size(tensor)
118
362
 
119
363
 
120
- def get_tensor_size(tensor: "torch.Tensor") -> int:
121
- return tensor.numel() * tensor.element_size()
364
+ def get_torch_storage_size(tensor: "torch.Tensor") -> int:
365
+ """
366
+ Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L31C1-L41C59
367
+ """
368
+ try:
369
+ return tensor.untyped_storage().nbytes()
370
+ except AttributeError:
371
+ # Fallback for torch==1.10
372
+ try:
373
+ return tensor.storage().size() * _get_dtype_size(tensor.dtype)
374
+ except NotImplementedError:
375
+ # Fallback for meta storage
376
+ # On torch >=2.0 this is the tensor size
377
+ return tensor.nelement() * _get_dtype_size(tensor.dtype)
122
378
 
123
379
 
124
380
  @lru_cache()
@@ -144,7 +400,7 @@ def is_torch_tpu_available(check_device=True):
144
400
 
145
401
  def storage_ptr(tensor: "torch.Tensor") -> int:
146
402
  """
147
- Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L11C1-L20C21.
403
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L11.
148
404
  """
149
405
  try:
150
406
  return tensor.untyped_storage().data_ptr()
@@ -157,20 +413,141 @@ def storage_ptr(tensor: "torch.Tensor") -> int:
157
413
  return 0
158
414
 
159
415
 
160
- def get_storage_size(tensor: "torch.Tensor") -> int:
416
+ def _clean_state_dict_for_safetensors(
417
+ state_dict: Dict[str, "torch.Tensor"], metadata: Dict[str, str], force_contiguous: bool = True
418
+ ):
419
+ """Remove shared tensors from state_dict and update metadata accordingly (for reloading).
420
+
421
+ Warning: `state_dict` and `metadata` are mutated in-place!
422
+
423
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L155.
161
424
  """
162
- Taken from https://github.com/huggingface/safetensors/blob/08db34094e9e59e2f9218f2df133b7b4aaff5a99/bindings/python/py_src/safetensors/torch.py#L31C1-L41C59
425
+ to_removes = _remove_duplicate_names(state_dict)
426
+ for kept_name, to_remove_group in to_removes.items():
427
+ for to_remove in to_remove_group:
428
+ if metadata is None:
429
+ metadata = {}
430
+
431
+ if to_remove not in metadata:
432
+ # Do not override user data
433
+ metadata[to_remove] = kept_name
434
+ del state_dict[to_remove]
435
+ if force_contiguous:
436
+ state_dict = {k: v.contiguous() for k, v in state_dict.items()}
437
+ return state_dict
438
+
439
+
440
+ def _end_ptr(tensor: "torch.Tensor") -> int:
163
441
  """
164
- try:
165
- return tensor.untyped_storage().nbytes()
166
- except AttributeError:
167
- # Fallback for torch==1.10
168
- try:
169
- return tensor.storage().size() * _get_dtype_size(tensor.dtype)
170
- except NotImplementedError:
171
- # Fallback for meta storage
172
- # On torch >=2.0 this is the tensor size
173
- return tensor.nelement() * _get_dtype_size(tensor.dtype)
442
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L23.
443
+ """
444
+ if tensor.nelement():
445
+ stop = tensor.view(-1)[-1].data_ptr() + _get_dtype_size(tensor.dtype)
446
+ else:
447
+ stop = tensor.data_ptr()
448
+ return stop
449
+
450
+
451
+ def _filter_shared_not_shared(tensors: List[Set[str]], state_dict: Dict[str, "torch.Tensor"]) -> List[Set[str]]:
452
+ """
453
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L44
454
+ """
455
+ filtered_tensors = []
456
+ for shared in tensors:
457
+ if len(shared) < 2:
458
+ filtered_tensors.append(shared)
459
+ continue
460
+
461
+ areas = []
462
+ for name in shared:
463
+ tensor = state_dict[name]
464
+ areas.append((tensor.data_ptr(), _end_ptr(tensor), name))
465
+ areas.sort()
466
+
467
+ _, last_stop, last_name = areas[0]
468
+ filtered_tensors.append({last_name})
469
+ for start, stop, name in areas[1:]:
470
+ if start >= last_stop:
471
+ filtered_tensors.append({name})
472
+ else:
473
+ filtered_tensors[-1].add(name)
474
+ last_stop = stop
475
+
476
+ return filtered_tensors
477
+
478
+
479
+ def _find_shared_tensors(state_dict: Dict[str, "torch.Tensor"]) -> List[Set[str]]:
480
+ """
481
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L69.
482
+ """
483
+ import torch
484
+
485
+ tensors_dict = defaultdict(set)
486
+ for k, v in state_dict.items():
487
+ if v.device != torch.device("meta") and storage_ptr(v) != 0 and get_torch_storage_size(v) != 0:
488
+ # Need to add device as key because of multiple GPU.
489
+ tensors_dict[(v.device, storage_ptr(v), get_torch_storage_size(v))].add(k)
490
+ tensors = list(sorted(tensors_dict.values()))
491
+ tensors = _filter_shared_not_shared(tensors, state_dict)
492
+ return tensors
493
+
494
+
495
+ def _is_complete(tensor: "torch.Tensor") -> bool:
496
+ """
497
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L80
498
+ """
499
+ return tensor.data_ptr() == storage_ptr(tensor) and tensor.nelement() * _get_dtype_size(
500
+ tensor.dtype
501
+ ) == get_torch_storage_size(tensor)
502
+
503
+
504
+ def _remove_duplicate_names(
505
+ state_dict: Dict[str, "torch.Tensor"],
506
+ *,
507
+ preferred_names: Optional[List[str]] = None,
508
+ discard_names: Optional[List[str]] = None,
509
+ ) -> Dict[str, List[str]]:
510
+ """
511
+ Taken from https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L80
512
+ """
513
+ if preferred_names is None:
514
+ preferred_names = []
515
+ unique_preferred_names = set(preferred_names)
516
+ if discard_names is None:
517
+ discard_names = []
518
+ unique_discard_names = set(discard_names)
519
+
520
+ shareds = _find_shared_tensors(state_dict)
521
+ to_remove = defaultdict(list)
522
+ for shared in shareds:
523
+ complete_names = set([name for name in shared if _is_complete(state_dict[name])])
524
+ if not complete_names:
525
+ raise RuntimeError(
526
+ "Error while trying to find names to remove to save state dict, but found no suitable name to keep"
527
+ f" for saving amongst: {shared}. None is covering the entire storage. Refusing to save/load the model"
528
+ " since you could be storing much more memory than needed. Please refer to"
529
+ " https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an"
530
+ " issue."
531
+ )
532
+
533
+ keep_name = sorted(list(complete_names))[0]
534
+
535
+ # Mechanism to preferentially select keys to keep
536
+ # coming from the on-disk file to allow
537
+ # loading models saved with a different choice
538
+ # of keep_name
539
+ preferred = complete_names.difference(unique_discard_names)
540
+ if preferred:
541
+ keep_name = sorted(list(preferred))[0]
542
+
543
+ if unique_preferred_names:
544
+ preferred = unique_preferred_names.intersection(complete_names)
545
+ if preferred:
546
+ keep_name = sorted(list(preferred))[0]
547
+ for name in sorted(shared):
548
+ if name != keep_name:
549
+ to_remove[keep_name].append(name)
550
+ return to_remove
174
551
 
175
552
 
176
553
  @lru_cache()
@@ -1,4 +1,3 @@
1
- #!/usr/bin/env python
2
1
  # coding=utf-8
3
2
  # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
3
  #
@@ -361,7 +361,7 @@ def hf_raise_for_status(response: Response, endpoint_name: Optional[str] = None)
361
361
  message = (
362
362
  f"\n\n{response.status_code} Forbidden: {error_message}."
363
363
  + f"\nCannot access content at: {response.url}."
364
- + "\nIf you are trying to create or update content,"
364
+ + "\nIf you are trying to create or update content, "
365
365
  + "make sure you have a token with the `write` role."
366
366
  )
367
367
  raise HfHubHTTPError(message, response=response) from e
@@ -18,8 +18,13 @@ from pathlib import Path
18
18
  from typing import Callable, Generator, Optional, Union
19
19
 
20
20
  import yaml
21
- from filelock import BaseFileLock, FileLock
21
+ from filelock import BaseFileLock, FileLock, Timeout
22
22
 
23
+ from .. import constants
24
+ from . import logging
25
+
26
+
27
+ logger = logging.get_logger(__name__)
23
28
 
24
29
  # Wrap `yaml.dump` to set `allow_unicode=True` by default.
25
30
  #
@@ -80,8 +85,14 @@ def _set_write_permission_and_retry(func, path, excinfo):
80
85
  @contextlib.contextmanager
81
86
  def WeakFileLock(lock_file: Union[str, Path]) -> Generator[BaseFileLock, None, None]:
82
87
  """A filelock that won't raise an exception if release fails."""
83
- lock = FileLock(lock_file)
84
- lock.acquire()
88
+ lock = FileLock(lock_file, timeout=constants.FILELOCK_LOG_EVERY_SECONDS)
89
+ while True:
90
+ try:
91
+ lock.acquire()
92
+ except Timeout:
93
+ logger.info("still waiting to acquire lock on %s", lock_file)
94
+ else:
95
+ break
85
96
 
86
97
  yield lock
87
98
 
@@ -21,19 +21,19 @@ from typing import Callable, Generator, Iterable, List, Optional, TypeVar, Union
21
21
 
22
22
  T = TypeVar("T")
23
23
 
24
- # Always ignore `.git` and `.huggingface` folders in commits
24
+ # Always ignore `.git` and `.cache/huggingface` folders in commits
25
25
  DEFAULT_IGNORE_PATTERNS = [
26
26
  ".git",
27
27
  ".git/*",
28
28
  "*/.git",
29
29
  "**/.git/**",
30
- ".huggingface",
31
- ".huggingface/*",
32
- "*/.huggingface",
33
- "**/.huggingface/**",
30
+ ".cache/huggingface",
31
+ ".cache/huggingface/*",
32
+ "*/.cache/huggingface",
33
+ "**/.cache/huggingface/**",
34
34
  ]
35
35
  # Forbidden to commit these folders
36
- FORBIDDEN_FOLDERS = [".git", ".huggingface"]
36
+ FORBIDDEN_FOLDERS = [".git", ".cache"]
37
37
 
38
38
 
39
39
  def filter_repo_objects(
@@ -105,6 +105,11 @@ def filter_repo_objects(
105
105
  if isinstance(ignore_patterns, str):
106
106
  ignore_patterns = [ignore_patterns]
107
107
 
108
+ if allow_patterns is not None:
109
+ allow_patterns = [_add_wildcard_to_directories(p) for p in allow_patterns]
110
+ if ignore_patterns is not None:
111
+ ignore_patterns = [_add_wildcard_to_directories(p) for p in ignore_patterns]
112
+
108
113
  if key is None:
109
114
 
110
115
  def _identity(item: T) -> str:
@@ -128,3 +133,9 @@ def filter_repo_objects(
128
133
  continue
129
134
 
130
135
  yield item
136
+
137
+
138
+ def _add_wildcard_to_directories(pattern: str) -> str:
139
+ if pattern[-1] == "/":
140
+ return pattern + "*"
141
+ return pattern
@@ -1,4 +1,3 @@
1
- #!/usr/bin/env python
2
1
  # coding=utf-8
3
2
  # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
3
  #
@@ -100,7 +100,15 @@ def _send_telemetry_in_thread(
100
100
  library_version: Optional[str] = None,
101
101
  user_agent: Union[Dict, str, None] = None,
102
102
  ) -> None:
103
- """Contains the actual data sending data to the Hub."""
103
+ """Contains the actual data sending data to the Hub.
104
+
105
+ This function is called directly in gradio's analytics because
106
+ it is not possible to send telemetry from a daemon thread.
107
+
108
+ See here: https://github.com/gradio-app/gradio/pull/8180
109
+
110
+ Please do not rename or remove this function.
111
+ """
104
112
  path = "/".join(quote(part) for part in topic.split("/") if len(part) > 0)
105
113
  try:
106
114
  r = get_session().head(