huggingface-hub 0.26.3__py3-none-any.whl → 0.27.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (61) hide show
  1. huggingface_hub/__init__.py +49 -23
  2. huggingface_hub/_commit_scheduler.py +30 -4
  3. huggingface_hub/_local_folder.py +0 -4
  4. huggingface_hub/_login.py +38 -54
  5. huggingface_hub/_snapshot_download.py +6 -3
  6. huggingface_hub/_tensorboard_logger.py +2 -3
  7. huggingface_hub/_upload_large_folder.py +1 -1
  8. huggingface_hub/errors.py +19 -0
  9. huggingface_hub/fastai_utils.py +3 -2
  10. huggingface_hub/file_download.py +10 -12
  11. huggingface_hub/hf_api.py +102 -498
  12. huggingface_hub/hf_file_system.py +274 -35
  13. huggingface_hub/hub_mixin.py +5 -25
  14. huggingface_hub/inference/_client.py +185 -136
  15. huggingface_hub/inference/_common.py +2 -2
  16. huggingface_hub/inference/_generated/_async_client.py +186 -137
  17. huggingface_hub/inference/_generated/types/__init__.py +31 -10
  18. huggingface_hub/inference/_generated/types/audio_classification.py +3 -5
  19. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +4 -8
  20. huggingface_hub/inference/_generated/types/chat_completion.py +8 -5
  21. huggingface_hub/inference/_generated/types/depth_estimation.py +1 -1
  22. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -6
  23. huggingface_hub/inference/_generated/types/feature_extraction.py +1 -1
  24. huggingface_hub/inference/_generated/types/fill_mask.py +2 -4
  25. huggingface_hub/inference/_generated/types/image_classification.py +3 -5
  26. huggingface_hub/inference/_generated/types/image_segmentation.py +2 -4
  27. huggingface_hub/inference/_generated/types/image_to_image.py +2 -4
  28. huggingface_hub/inference/_generated/types/image_to_text.py +4 -8
  29. huggingface_hub/inference/_generated/types/object_detection.py +2 -4
  30. huggingface_hub/inference/_generated/types/question_answering.py +2 -4
  31. huggingface_hub/inference/_generated/types/sentence_similarity.py +1 -1
  32. huggingface_hub/inference/_generated/types/summarization.py +2 -4
  33. huggingface_hub/inference/_generated/types/table_question_answering.py +21 -3
  34. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -4
  35. huggingface_hub/inference/_generated/types/text_classification.py +4 -10
  36. huggingface_hub/inference/_generated/types/text_to_audio.py +6 -10
  37. huggingface_hub/inference/_generated/types/text_to_image.py +2 -4
  38. huggingface_hub/inference/_generated/types/text_to_speech.py +6 -10
  39. huggingface_hub/inference/_generated/types/token_classification.py +11 -12
  40. huggingface_hub/inference/_generated/types/translation.py +2 -4
  41. huggingface_hub/inference/_generated/types/video_classification.py +3 -4
  42. huggingface_hub/inference/_generated/types/visual_question_answering.py +2 -5
  43. huggingface_hub/inference/_generated/types/zero_shot_classification.py +8 -18
  44. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +9 -19
  45. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +7 -9
  46. huggingface_hub/keras_mixin.py +3 -2
  47. huggingface_hub/lfs.py +2 -5
  48. huggingface_hub/repocard_data.py +4 -4
  49. huggingface_hub/serialization/__init__.py +2 -0
  50. huggingface_hub/serialization/_dduf.py +387 -0
  51. huggingface_hub/serialization/_torch.py +407 -25
  52. huggingface_hub/utils/_cache_manager.py +1 -1
  53. huggingface_hub/utils/_headers.py +9 -25
  54. huggingface_hub/utils/tqdm.py +15 -0
  55. {huggingface_hub-0.26.3.dist-info → huggingface_hub-0.27.0rc0.dist-info}/METADATA +8 -3
  56. {huggingface_hub-0.26.3.dist-info → huggingface_hub-0.27.0rc0.dist-info}/RECORD +60 -60
  57. huggingface_hub/_multi_commits.py +0 -306
  58. {huggingface_hub-0.26.3.dist-info → huggingface_hub-0.27.0rc0.dist-info}/LICENSE +0 -0
  59. {huggingface_hub-0.26.3.dist-info → huggingface_hub-0.27.0rc0.dist-info}/WHEEL +0 -0
  60. {huggingface_hub-0.26.3.dist-info → huggingface_hub-0.27.0rc0.dist-info}/entry_points.txt +0 -0
  61. {huggingface_hub-0.26.3.dist-info → huggingface_hub-0.27.0rc0.dist-info}/top_level.txt +0 -0
@@ -14,9 +14,7 @@ TextToSpeechEarlyStoppingEnum = Literal["never"]
14
14
 
15
15
  @dataclass
16
16
  class TextToSpeechGenerationParameters(BaseInferenceType):
17
- """Parametrization of the text generation process
18
- Ad-hoc parametrization of the text generation process
19
- """
17
+ """Parametrization of the text generation process"""
20
18
 
21
19
  do_sample: Optional[bool] = None
22
20
  """Whether to use sampling instead of greedy decoding when generating new tokens."""
@@ -40,11 +38,11 @@ class TextToSpeechGenerationParameters(BaseInferenceType):
40
38
  max_length: Optional[int] = None
41
39
  """The maximum length (in tokens) of the generated text, including the input."""
42
40
  max_new_tokens: Optional[int] = None
43
- """The maximum number of tokens to generate. Takes precedence over maxLength."""
41
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
44
42
  min_length: Optional[int] = None
45
43
  """The minimum length (in tokens) of the generated text, including the input."""
46
44
  min_new_tokens: Optional[int] = None
47
- """The minimum number of tokens to generate. Takes precedence over maxLength."""
45
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
48
46
  num_beam_groups: Optional[int] = None
49
47
  """Number of groups to divide num_beams into in order to ensure diversity among different
50
48
  groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
@@ -76,11 +74,9 @@ class TextToSpeechGenerationParameters(BaseInferenceType):
76
74
 
77
75
  @dataclass
78
76
  class TextToSpeechParameters(BaseInferenceType):
79
- """Additional inference parameters
80
- Additional inference parameters for Text To Speech
81
- """
77
+ """Additional inference parameters for Text To Speech"""
82
78
 
83
- generate: Optional[TextToSpeechGenerationParameters] = None
79
+ generation_parameters: Optional[TextToSpeechGenerationParameters] = None
84
80
  """Parametrization of the text generation process"""
85
81
 
86
82
 
@@ -91,7 +87,7 @@ class TextToSpeechInput(BaseInferenceType):
91
87
  inputs: str
92
88
  """The input text data"""
93
89
  parameters: Optional[TextToSpeechParameters] = None
94
- """Additional inference parameters"""
90
+ """Additional inference parameters for Text To Speech"""
95
91
 
96
92
 
97
93
  @dataclass
@@ -4,7 +4,7 @@
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
6
  from dataclasses import dataclass
7
- from typing import Any, List, Literal, Optional
7
+ from typing import List, Literal, Optional
8
8
 
9
9
  from .base import BaseInferenceType
10
10
 
@@ -14,9 +14,7 @@ TokenClassificationAggregationStrategy = Literal["none", "simple", "first", "ave
14
14
 
15
15
  @dataclass
16
16
  class TokenClassificationParameters(BaseInferenceType):
17
- """Additional inference parameters
18
- Additional inference parameters for Token Classification
19
- """
17
+ """Additional inference parameters for Token Classification"""
20
18
 
21
19
  aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None
22
20
  """The strategy used to fuse tokens based on model predictions"""
@@ -33,21 +31,22 @@ class TokenClassificationInput(BaseInferenceType):
33
31
  inputs: str
34
32
  """The input text data"""
35
33
  parameters: Optional[TokenClassificationParameters] = None
36
- """Additional inference parameters"""
34
+ """Additional inference parameters for Token Classification"""
37
35
 
38
36
 
39
37
  @dataclass
40
38
  class TokenClassificationOutputElement(BaseInferenceType):
41
39
  """Outputs of inference for the Token Classification task"""
42
40
 
43
- label: Any
41
+ end: int
42
+ """The character position in the input where this group ends."""
44
43
  score: float
45
44
  """The associated score / probability"""
46
- end: Optional[int] = None
47
- """The character position in the input where this group ends."""
48
- entity_group: Optional[str] = None
49
- """The predicted label for that group of tokens"""
50
- start: Optional[int] = None
45
+ start: int
51
46
  """The character position in the input where this group begins."""
52
- word: Optional[str] = None
47
+ word: str
53
48
  """The corresponding text"""
49
+ entity: Optional[str] = None
50
+ """The predicted label for a single token"""
51
+ entity_group: Optional[str] = None
52
+ """The predicted label for a group of one or more tokens"""
@@ -14,9 +14,7 @@ TranslationTruncationStrategy = Literal["do_not_truncate", "longest_first", "onl
14
14
 
15
15
  @dataclass
16
16
  class TranslationParameters(BaseInferenceType):
17
- """Additional inference parameters
18
- Additional inference parameters for Translation
19
- """
17
+ """Additional inference parameters for Translation"""
20
18
 
21
19
  clean_up_tokenization_spaces: Optional[bool] = None
22
20
  """Whether to clean up the potential extra spaces in the text output."""
@@ -41,7 +39,7 @@ class TranslationInput(BaseInferenceType):
41
39
  inputs: str
42
40
  """The text to translate."""
43
41
  parameters: Optional[TranslationParameters] = None
44
- """Additional inference parameters"""
42
+ """Additional inference parameters for Translation"""
45
43
 
46
44
 
47
45
  @dataclass
@@ -14,13 +14,12 @@ VideoClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
14
14
 
15
15
  @dataclass
16
16
  class VideoClassificationParameters(BaseInferenceType):
17
- """Additional inference parameters
18
- Additional inference parameters for Video Classification
19
- """
17
+ """Additional inference parameters for Video Classification"""
20
18
 
21
19
  frame_sampling_rate: Optional[int] = None
22
20
  """The sampling rate used to select frames from the video."""
23
21
  function_to_apply: Optional["VideoClassificationOutputTransform"] = None
22
+ """The function to apply to the model outputs in order to retrieve the scores."""
24
23
  num_frames: Optional[int] = None
25
24
  """The number of sampled frames to consider for classification."""
26
25
  top_k: Optional[int] = None
@@ -34,7 +33,7 @@ class VideoClassificationInput(BaseInferenceType):
34
33
  inputs: Any
35
34
  """The input video data"""
36
35
  parameters: Optional[VideoClassificationParameters] = None
37
- """Additional inference parameters"""
36
+ """Additional inference parameters for Video Classification"""
38
37
 
39
38
 
40
39
  @dataclass
@@ -21,9 +21,7 @@ class VisualQuestionAnsweringInputData(BaseInferenceType):
21
21
 
22
22
  @dataclass
23
23
  class VisualQuestionAnsweringParameters(BaseInferenceType):
24
- """Additional inference parameters
25
- Additional inference parameters for Visual Question Answering
26
- """
24
+ """Additional inference parameters for Visual Question Answering"""
27
25
 
28
26
  top_k: Optional[int] = None
29
27
  """The number of answers to return (will be chosen by order of likelihood). Note that we
@@ -39,14 +37,13 @@ class VisualQuestionAnsweringInput(BaseInferenceType):
39
37
  inputs: VisualQuestionAnsweringInputData
40
38
  """One (image, question) pair to answer"""
41
39
  parameters: Optional[VisualQuestionAnsweringParameters] = None
42
- """Additional inference parameters"""
40
+ """Additional inference parameters for Visual Question Answering"""
43
41
 
44
42
 
45
43
  @dataclass
46
44
  class VisualQuestionAnsweringOutputElement(BaseInferenceType):
47
45
  """Outputs of inference for the Visual Question Answering task"""
48
46
 
49
- label: Any
50
47
  score: float
51
48
  """The associated score / probability"""
52
49
  answer: Optional[str] = None
@@ -10,24 +10,14 @@ from .base import BaseInferenceType
10
10
 
11
11
 
12
12
  @dataclass
13
- class ZeroShotClassificationInputData(BaseInferenceType):
14
- """The input text data, with candidate labels"""
13
+ class ZeroShotClassificationParameters(BaseInferenceType):
14
+ """Additional inference parameters for Zero Shot Classification"""
15
15
 
16
16
  candidate_labels: List[str]
17
17
  """The set of possible class labels to classify the text into."""
18
- text: str
19
- """The text to classify"""
20
-
21
-
22
- @dataclass
23
- class ZeroShotClassificationParameters(BaseInferenceType):
24
- """Additional inference parameters
25
- Additional inference parameters for Zero Shot Classification
26
- """
27
-
28
18
  hypothesis_template: Optional[str] = None
29
- """The sentence used in conjunction with candidateLabels to attempt the text classification
30
- by replacing the placeholder with the candidate labels.
19
+ """The sentence used in conjunction with `candidate_labels` to attempt the text
20
+ classification by replacing the placeholder with the candidate labels.
31
21
  """
32
22
  multi_label: Optional[bool] = None
33
23
  """Whether multiple candidate labels can be true. If false, the scores are normalized such
@@ -40,10 +30,10 @@ class ZeroShotClassificationParameters(BaseInferenceType):
40
30
  class ZeroShotClassificationInput(BaseInferenceType):
41
31
  """Inputs for Zero Shot Classification inference"""
42
32
 
43
- inputs: ZeroShotClassificationInputData
44
- """The input text data, with candidate labels"""
45
- parameters: Optional[ZeroShotClassificationParameters] = None
46
- """Additional inference parameters"""
33
+ inputs: str
34
+ """The text to classify"""
35
+ parameters: ZeroShotClassificationParameters
36
+ """Additional inference parameters for Zero Shot Classification"""
47
37
 
48
38
 
49
39
  @dataclass
@@ -4,30 +4,20 @@
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
6
  from dataclasses import dataclass
7
- from typing import Any, List, Optional
7
+ from typing import List, Optional
8
8
 
9
9
  from .base import BaseInferenceType
10
10
 
11
11
 
12
12
  @dataclass
13
- class ZeroShotImageClassificationInputData(BaseInferenceType):
14
- """The input image data, with candidate labels"""
13
+ class ZeroShotImageClassificationParameters(BaseInferenceType):
14
+ """Additional inference parameters for Zero Shot Image Classification"""
15
15
 
16
16
  candidate_labels: List[str]
17
17
  """The candidate labels for this image"""
18
- image: Any
19
- """The image data to classify"""
20
-
21
-
22
- @dataclass
23
- class ZeroShotImageClassificationParameters(BaseInferenceType):
24
- """Additional inference parameters
25
- Additional inference parameters for Zero Shot Image Classification
26
- """
27
-
28
18
  hypothesis_template: Optional[str] = None
29
- """The sentence used in conjunction with candidateLabels to attempt the text classification
30
- by replacing the placeholder with the candidate labels.
19
+ """The sentence used in conjunction with `candidate_labels` to attempt the image
20
+ classification by replacing the placeholder with the candidate labels.
31
21
  """
32
22
 
33
23
 
@@ -35,10 +25,10 @@ class ZeroShotImageClassificationParameters(BaseInferenceType):
35
25
  class ZeroShotImageClassificationInput(BaseInferenceType):
36
26
  """Inputs for Zero Shot Image Classification inference"""
37
27
 
38
- inputs: ZeroShotImageClassificationInputData
39
- """The input image data, with candidate labels"""
40
- parameters: Optional[ZeroShotImageClassificationParameters] = None
41
- """Additional inference parameters"""
28
+ inputs: str
29
+ """The input image data to classify as a base64-encoded string."""
30
+ parameters: ZeroShotImageClassificationParameters
31
+ """Additional inference parameters for Zero Shot Image Classification"""
42
32
 
43
33
 
44
34
  @dataclass
@@ -4,29 +4,27 @@
4
4
  # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
5
  # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
6
  from dataclasses import dataclass
7
- from typing import Any, Dict, List, Optional
7
+ from typing import List
8
8
 
9
9
  from .base import BaseInferenceType
10
10
 
11
11
 
12
12
  @dataclass
13
- class ZeroShotObjectDetectionInputData(BaseInferenceType):
14
- """The input image data, with candidate labels"""
13
+ class ZeroShotObjectDetectionParameters(BaseInferenceType):
14
+ """Additional inference parameters for Zero Shot Object Detection"""
15
15
 
16
16
  candidate_labels: List[str]
17
17
  """The candidate labels for this image"""
18
- image: Any
19
- """The image data to generate bounding boxes from"""
20
18
 
21
19
 
22
20
  @dataclass
23
21
  class ZeroShotObjectDetectionInput(BaseInferenceType):
24
22
  """Inputs for Zero Shot Object Detection inference"""
25
23
 
26
- inputs: ZeroShotObjectDetectionInputData
27
- """The input image data, with candidate labels"""
28
- parameters: Optional[Dict[str, Any]] = None
29
- """Additional inference parameters"""
24
+ inputs: str
25
+ """The input image data as a base64-encoded string."""
26
+ parameters: ZeroShotObjectDetectionParameters
27
+ """Additional inference parameters for Zero Shot Object Detection"""
30
28
 
31
29
 
32
30
  @dataclass
@@ -301,7 +301,7 @@ def push_to_hub_keras(
301
301
  *,
302
302
  config: Optional[dict] = None,
303
303
  commit_message: str = "Push Keras model using huggingface_hub.",
304
- private: bool = False,
304
+ private: Optional[bool] = None,
305
305
  api_endpoint: Optional[str] = None,
306
306
  token: Optional[str] = None,
307
307
  branch: Optional[str] = None,
@@ -330,8 +330,9 @@ def push_to_hub_keras(
330
330
  ID of the repository to push to (example: `"username/my-model"`).
331
331
  commit_message (`str`, *optional*, defaults to "Add Keras model"):
332
332
  Message to commit while pushing.
333
- private (`bool`, *optional*, defaults to `False`):
333
+ private (`bool`, *optional*):
334
334
  Whether the repository created should be private.
335
+ If `None` (default), the repo will be public unless the organization's default is private.
335
336
  api_endpoint (`str`, *optional*):
336
337
  The API endpoint to use when pushing the model to the hub.
337
338
  token (`str`, *optional*):
huggingface_hub/lfs.py CHANGED
@@ -39,6 +39,7 @@ from .utils import (
39
39
  )
40
40
  from .utils._lfs import SliceFileObj
41
41
  from .utils.sha import sha256, sha_fileobj
42
+ from .utils.tqdm import is_tqdm_disabled
42
43
 
43
44
 
44
45
  if TYPE_CHECKING:
@@ -430,17 +431,13 @@ def _upload_parts_hf_transfer(
430
431
  if len(desc) > 40:
431
432
  desc = f"(…){desc[-40:]}"
432
433
 
433
- # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached
434
- # see https://github.com/huggingface/huggingface_hub/pull/2000
435
- disable = True if (logger.getEffectiveLevel() == logging.NOTSET) else None
436
-
437
434
  with tqdm(
438
435
  unit="B",
439
436
  unit_scale=True,
440
437
  total=total,
441
438
  initial=0,
442
439
  desc=desc,
443
- disable=disable,
440
+ disable=is_tqdm_disabled(logger.getEffectiveLevel()),
444
441
  name="huggingface_hub.lfs_upload",
445
442
  ) as progress:
446
443
  try:
@@ -185,7 +185,7 @@ class CardData:
185
185
 
186
186
  data_dict = copy.deepcopy(self.__dict__)
187
187
  self._to_dict(data_dict)
188
- return _remove_none(data_dict)
188
+ return {key: value for key, value in data_dict.items() if value is not None}
189
189
 
190
190
  def _to_dict(self, data_dict):
191
191
  """Use this method in child classes to alter the dict representation of the data. Alter the dict in-place.
@@ -252,8 +252,8 @@ class ModelCardData(CardData):
252
252
  The identifier of the base model from which the model derives. This is applicable for example if your model is a
253
253
  fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs
254
254
  if your model derives from multiple models). Defaults to None.
255
- datasets (`List[str]`, *optional*):
256
- List of datasets that were used to train this model. Should be a dataset ID
255
+ datasets (`Union[str, List[str]]`, *optional*):
256
+ Dataset or list of datasets that were used to train this model. Should be a dataset ID
257
257
  found on https://hf.co/datasets. Defaults to None.
258
258
  eval_results (`Union[List[EvalResult], EvalResult]`, *optional*):
259
259
  List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided,
@@ -312,7 +312,7 @@ class ModelCardData(CardData):
312
312
  self,
313
313
  *,
314
314
  base_model: Optional[Union[str, List[str]]] = None,
315
- datasets: Optional[List[str]] = None,
315
+ datasets: Optional[Union[str, List[str]]] = None,
316
316
  eval_results: Optional[List[EvalResult]] = None,
317
317
  language: Optional[Union[str, List[str]]] = None,
318
318
  library_name: Optional[str] = None,
@@ -19,6 +19,8 @@ from ._tensorflow import get_tf_storage_size, split_tf_state_dict_into_shards
19
19
  from ._torch import (
20
20
  get_torch_storage_id,
21
21
  get_torch_storage_size,
22
+ load_state_dict_from_file,
23
+ load_torch_model,
22
24
  save_torch_model,
23
25
  save_torch_state_dict,
24
26
  split_torch_state_dict_into_shards,