huggingface-hub 0.25.2__py3-none-any.whl → 0.26.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (45) hide show
  1. huggingface_hub/__init__.py +45 -11
  2. huggingface_hub/_login.py +172 -33
  3. huggingface_hub/commands/user.py +125 -9
  4. huggingface_hub/constants.py +1 -1
  5. huggingface_hub/errors.py +6 -9
  6. huggingface_hub/file_download.py +2 -372
  7. huggingface_hub/hf_api.py +170 -13
  8. huggingface_hub/hf_file_system.py +3 -3
  9. huggingface_hub/hub_mixin.py +2 -1
  10. huggingface_hub/inference/_client.py +500 -145
  11. huggingface_hub/inference/_common.py +42 -4
  12. huggingface_hub/inference/_generated/_async_client.py +499 -144
  13. huggingface_hub/inference/_generated/types/__init__.py +37 -7
  14. huggingface_hub/inference/_generated/types/audio_classification.py +8 -5
  15. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +9 -7
  16. huggingface_hub/inference/_generated/types/chat_completion.py +23 -4
  17. huggingface_hub/inference/_generated/types/image_classification.py +8 -5
  18. huggingface_hub/inference/_generated/types/image_segmentation.py +9 -7
  19. huggingface_hub/inference/_generated/types/image_to_image.py +7 -5
  20. huggingface_hub/inference/_generated/types/image_to_text.py +4 -4
  21. huggingface_hub/inference/_generated/types/object_detection.py +11 -5
  22. huggingface_hub/inference/_generated/types/summarization.py +11 -13
  23. huggingface_hub/inference/_generated/types/text_classification.py +10 -5
  24. huggingface_hub/inference/_generated/types/text_generation.py +1 -0
  25. huggingface_hub/inference/_generated/types/text_to_audio.py +2 -2
  26. huggingface_hub/inference/_generated/types/text_to_image.py +9 -7
  27. huggingface_hub/inference/_generated/types/text_to_speech.py +107 -0
  28. huggingface_hub/inference/_generated/types/translation.py +17 -11
  29. huggingface_hub/inference/_generated/types/video_classification.py +2 -2
  30. huggingface_hub/repocard.py +2 -1
  31. huggingface_hub/repocard_data.py +10 -2
  32. huggingface_hub/serialization/_torch.py +7 -4
  33. huggingface_hub/utils/__init__.py +4 -20
  34. huggingface_hub/utils/{_token.py → _auth.py} +86 -3
  35. huggingface_hub/utils/_headers.py +1 -1
  36. huggingface_hub/utils/_hf_folder.py +1 -1
  37. huggingface_hub/utils/_http.py +10 -4
  38. huggingface_hub/utils/_runtime.py +1 -10
  39. {huggingface_hub-0.25.2.dist-info → huggingface_hub-0.26.0rc0.dist-info}/METADATA +12 -12
  40. {huggingface_hub-0.25.2.dist-info → huggingface_hub-0.26.0rc0.dist-info}/RECORD +44 -44
  41. huggingface_hub/inference/_templating.py +0 -102
  42. {huggingface_hub-0.25.2.dist-info → huggingface_hub-0.26.0rc0.dist-info}/LICENSE +0 -0
  43. {huggingface_hub-0.25.2.dist-info → huggingface_hub-0.26.0rc0.dist-info}/WHEEL +0 -0
  44. {huggingface_hub-0.25.2.dist-info → huggingface_hub-0.26.0rc0.dist-info}/entry_points.txt +0 -0
  45. {huggingface_hub-0.25.2.dist-info → huggingface_hub-0.26.0rc0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,107 @@
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from dataclasses import dataclass
7
+ from typing import Any, Literal, Optional, Union
8
+
9
+ from .base import BaseInferenceType
10
+
11
+
12
+ TextToSpeechEarlyStoppingEnum = Literal["never"]
13
+
14
+
15
+ @dataclass
16
+ class TextToSpeechGenerationParameters(BaseInferenceType):
17
+ """Parametrization of the text generation process
18
+ Ad-hoc parametrization of the text generation process
19
+ """
20
+
21
+ do_sample: Optional[bool] = None
22
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
23
+ early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None
24
+ """Controls the stopping condition for beam-based methods."""
25
+ epsilon_cutoff: Optional[float] = None
26
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
27
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
28
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
29
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
30
+ """
31
+ eta_cutoff: Optional[float] = None
32
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
33
+ float strictly between 0 and 1, a token is only considered if it is greater than either
34
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
35
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
36
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
37
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
38
+ for more details.
39
+ """
40
+ max_length: Optional[int] = None
41
+ """The maximum length (in tokens) of the generated text, including the input."""
42
+ max_new_tokens: Optional[int] = None
43
+ """The maximum number of tokens to generate. Takes precedence over maxLength."""
44
+ min_length: Optional[int] = None
45
+ """The minimum length (in tokens) of the generated text, including the input."""
46
+ min_new_tokens: Optional[int] = None
47
+ """The minimum number of tokens to generate. Takes precedence over maxLength."""
48
+ num_beam_groups: Optional[int] = None
49
+ """Number of groups to divide num_beams into in order to ensure diversity among different
50
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
51
+ """
52
+ num_beams: Optional[int] = None
53
+ """Number of beams to use for beam search."""
54
+ penalty_alpha: Optional[float] = None
55
+ """The value balances the model confidence and the degeneration penalty in contrastive
56
+ search decoding.
57
+ """
58
+ temperature: Optional[float] = None
59
+ """The value used to modulate the next token probabilities."""
60
+ top_k: Optional[int] = None
61
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
62
+ top_p: Optional[float] = None
63
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
64
+ that add up to top_p or higher are kept for generation.
65
+ """
66
+ typical_p: Optional[float] = None
67
+ """Local typicality measures how similar the conditional probability of predicting a target
68
+ token next is to the expected conditional probability of predicting a random token next,
69
+ given the partial text already generated. If set to float < 1, the smallest set of the
70
+ most locally typical tokens with probabilities that add up to typical_p or higher are
71
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
72
+ """
73
+ use_cache: Optional[bool] = None
74
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
75
+
76
+
77
+ @dataclass
78
+ class TextToSpeechParameters(BaseInferenceType):
79
+ """Additional inference parameters
80
+ Additional inference parameters for Text To Speech
81
+ """
82
+
83
+ generate: Optional[TextToSpeechGenerationParameters] = None
84
+ """Parametrization of the text generation process"""
85
+
86
+
87
+ @dataclass
88
+ class TextToSpeechInput(BaseInferenceType):
89
+ """Inputs for Text To Speech inference"""
90
+
91
+ inputs: str
92
+ """The input text data"""
93
+ parameters: Optional[TextToSpeechParameters] = None
94
+ """Additional inference parameters"""
95
+
96
+
97
+ @dataclass
98
+ class TextToSpeechOutput(BaseInferenceType):
99
+ """Outputs for Text to Speech inference
100
+ Outputs of inference for the Text To Audio task
101
+ """
102
+
103
+ audio: Any
104
+ """The generated audio waveform."""
105
+ sampling_rate: Any
106
+ text_to_speech_output_sampling_rate: Optional[float] = None
107
+ """The sampling rate of the generated audio waveform."""
@@ -9,32 +9,38 @@ from typing import Any, Dict, Literal, Optional
9
9
  from .base import BaseInferenceType
10
10
 
11
11
 
12
- TranslationGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
12
+ TranslationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
13
13
 
14
14
 
15
15
  @dataclass
16
- class TranslationGenerationParameters(BaseInferenceType):
16
+ class TranslationParameters(BaseInferenceType):
17
17
  """Additional inference parameters
18
- Additional inference parameters for Text2text Generation
18
+ Additional inference parameters for Translation
19
19
  """
20
20
 
21
21
  clean_up_tokenization_spaces: Optional[bool] = None
22
22
  """Whether to clean up the potential extra spaces in the text output."""
23
23
  generate_parameters: Optional[Dict[str, Any]] = None
24
- """Additional parametrization of the text generation algorithm"""
25
- truncation: Optional["TranslationGenerationTruncationStrategy"] = None
26
- """The truncation strategy to use"""
24
+ """Additional parametrization of the text generation algorithm."""
25
+ src_lang: Optional[str] = None
26
+ """The source language of the text. Required for models that can translate from multiple
27
+ languages.
28
+ """
29
+ tgt_lang: Optional[str] = None
30
+ """Target language to translate to. Required for models that can translate to multiple
31
+ languages.
32
+ """
33
+ truncation: Optional["TranslationTruncationStrategy"] = None
34
+ """The truncation strategy to use."""
27
35
 
28
36
 
29
37
  @dataclass
30
38
  class TranslationInput(BaseInferenceType):
31
- """Inputs for Translation inference
32
- Inputs for Text2text Generation inference
33
- """
39
+ """Inputs for Translation inference"""
34
40
 
35
41
  inputs: str
36
- """The input text data"""
37
- parameters: Optional[TranslationGenerationParameters] = None
42
+ """The text to translate."""
43
+ parameters: Optional[TranslationParameters] = None
38
44
  """Additional inference parameters"""
39
45
 
40
46
 
@@ -9,7 +9,7 @@ from typing import Any, Literal, Optional
9
9
  from .base import BaseInferenceType
10
10
 
11
11
 
12
- ClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+ VideoClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
13
13
 
14
14
 
15
15
  @dataclass
@@ -20,7 +20,7 @@ class VideoClassificationParameters(BaseInferenceType):
20
20
 
21
21
  frame_sampling_rate: Optional[int] = None
22
22
  """The sampling rate used to select frames from the video."""
23
- function_to_apply: Optional["ClassificationOutputTransform"] = None
23
+ function_to_apply: Optional["VideoClassificationOutputTransform"] = None
24
24
  num_frames: Optional[int] = None
25
25
  """The number of sampled frames to consider for classification."""
26
26
  top_k: Optional[int] = None
@@ -83,7 +83,7 @@ class RepoCard:
83
83
  def content(self):
84
84
  """The content of the RepoCard, including the YAML block and the Markdown body."""
85
85
  line_break = _detect_line_ending(self._content) or "\n"
86
- return f"---{line_break}{self.data.to_yaml(line_break=line_break)}{line_break}---{line_break}{self.text}"
86
+ return f"---{line_break}{self.data.to_yaml(line_break=line_break, original_order=self._original_order)}{line_break}---{line_break}{self.text}"
87
87
 
88
88
  @content.setter
89
89
  def content(self, content: str):
@@ -110,6 +110,7 @@ class RepoCard:
110
110
  self.text = content
111
111
 
112
112
  self.data = self.card_data_class(**data_dict, ignore_metadata_errors=self.ignore_metadata_errors)
113
+ self._original_order = list(data_dict.keys())
113
114
 
114
115
  def __str__(self):
115
116
  return self.content
@@ -175,7 +175,7 @@ class CardData:
175
175
  def __init__(self, ignore_metadata_errors: bool = False, **kwargs):
176
176
  self.__dict__.update(kwargs)
177
177
 
178
- def to_dict(self) -> Dict[str, Any]:
178
+ def to_dict(self):
179
179
  """Converts CardData to a dict.
180
180
 
181
181
  Returns:
@@ -195,7 +195,7 @@ class CardData:
195
195
  """
196
196
  pass
197
197
 
198
- def to_yaml(self, line_break=None) -> str:
198
+ def to_yaml(self, line_break=None, original_order: Optional[List[str]] = None) -> str:
199
199
  """Dumps CardData to a YAML block for inclusion in a README.md file.
200
200
 
201
201
  Args:
@@ -205,6 +205,12 @@ class CardData:
205
205
  Returns:
206
206
  `str`: CardData represented as a YAML block.
207
207
  """
208
+ if original_order:
209
+ self.__dict__ = {
210
+ k: self.__dict__[k]
211
+ for k in original_order + list(set(self.__dict__.keys()) - set(original_order))
212
+ if k in self.__dict__
213
+ }
208
214
  return yaml_dump(self.to_dict(), sort_keys=False, line_break=line_break).strip()
209
215
 
210
216
  def __repr__(self):
@@ -276,6 +282,8 @@ class ModelCardData(CardData):
276
282
  `eval_results` to construct the `model-index` within the card's metadata. The name
277
283
  you supply here is what will be used on PapersWithCode's leaderboards. If None is provided
278
284
  then the repo name is used as a default. Defaults to None.
285
+ pipeline_tag (`str`, *optional*):
286
+ The pipeline tag associated with the model. Example: "text-classification".
279
287
  tags (`List[str]`, *optional*):
280
288
  List of tags to add to your model that can be used when filtering on the Hugging
281
289
  Face Hub. Defaults to None.
@@ -368,18 +368,21 @@ def _get_unique_id(tensor: "torch.Tensor") -> Union[int, Tuple[Any, ...]]:
368
368
  return unique_id
369
369
 
370
370
 
371
- def get_torch_storage_id(tensor: "torch.Tensor") -> Tuple["torch.device", Union[int, Tuple[Any, ...]], int]:
371
+ def get_torch_storage_id(tensor: "torch.Tensor") -> Optional[Tuple["torch.device", Union[int, Tuple[Any, ...]], int]]:
372
372
  """
373
373
  Return unique identifier to a tensor storage.
374
374
 
375
- Multiple different tensors can share the same underlying storage. For
376
- example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
375
+ Multiple different tensors can share the same underlying storage. This identifier is
377
376
  guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
378
377
  non-overlapping lifetimes may have the same id.
378
+ In the case of meta tensors, we return None since we can't tell if they share the same storage.
379
379
 
380
380
  Taken from https://github.com/huggingface/transformers/blob/1ecf5f7c982d761b4daaa96719d162c324187c64/src/transformers/pytorch_utils.py#L278.
381
381
  """
382
- return tensor.device, _get_unique_id(tensor), get_torch_storage_size(tensor)
382
+ if tensor.device.type == "meta":
383
+ return None
384
+ else:
385
+ return tensor.device, _get_unique_id(tensor), get_torch_storage_size(tensor)
383
386
 
384
387
 
385
388
  def get_torch_storage_size(tensor: "torch.Tensor") -> int:
@@ -35,6 +35,7 @@ from huggingface_hub.errors import (
35
35
  )
36
36
 
37
37
  from . import tqdm as _tqdm # _tqdm is the module
38
+ from ._auth import get_stored_tokens, get_token
38
39
  from ._cache_assets import cached_assets_path
39
40
  from ._cache_manager import (
40
41
  CachedFileInfo,
@@ -72,7 +73,6 @@ from ._runtime import (
72
73
  get_hf_hub_version,
73
74
  get_hf_transfer_version,
74
75
  get_jinja_version,
75
- get_minijinja_version,
76
76
  get_numpy_version,
77
77
  get_pillow_version,
78
78
  get_pydantic_version,
@@ -91,7 +91,6 @@ from ._runtime import (
91
91
  is_graphviz_available,
92
92
  is_hf_transfer_available,
93
93
  is_jinja_available,
94
- is_minijinja_available,
95
94
  is_notebook,
96
95
  is_numpy_available,
97
96
  is_package_available,
@@ -103,24 +102,9 @@ from ._runtime import (
103
102
  is_tf_available,
104
103
  is_torch_available,
105
104
  )
106
- from ._safetensors import (
107
- SafetensorsFileMetadata,
108
- SafetensorsRepoMetadata,
109
- TensorInfo,
110
- )
105
+ from ._safetensors import SafetensorsFileMetadata, SafetensorsRepoMetadata, TensorInfo
111
106
  from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess
112
107
  from ._telemetry import send_telemetry
113
- from ._token import get_token
114
108
  from ._typing import is_jsonable, is_simple_optional_type, unwrap_simple_optional_type
115
- from ._validators import (
116
- smoothly_deprecate_use_auth_token,
117
- validate_hf_hub_args,
118
- validate_repo_id,
119
- )
120
- from .tqdm import (
121
- are_progress_bars_disabled,
122
- disable_progress_bars,
123
- enable_progress_bars,
124
- tqdm,
125
- tqdm_stream_file,
126
- )
109
+ from ._validators import smoothly_deprecate_use_auth_token, validate_hf_hub_args, validate_repo_id
110
+ from .tqdm import are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm, tqdm_stream_file
@@ -13,11 +13,13 @@
13
13
  # limitations under the License.
14
14
  """Contains an helper to get the token from machine (env variable, secret or config file)."""
15
15
 
16
+ import configparser
17
+ import logging
16
18
  import os
17
19
  import warnings
18
20
  from pathlib import Path
19
21
  from threading import Lock
20
- from typing import Optional
22
+ from typing import Dict, Optional
21
23
 
22
24
  from .. import constants
23
25
  from ._runtime import is_colab_enterprise, is_google_colab
@@ -27,6 +29,8 @@ _IS_GOOGLE_COLAB_CHECKED = False
27
29
  _GOOGLE_COLAB_SECRET_LOCK = Lock()
28
30
  _GOOGLE_COLAB_SECRET: Optional[str] = None
29
31
 
32
+ logger = logging.getLogger(__name__)
33
+
30
34
 
31
35
  def get_token() -> Optional[str]:
32
36
  """
@@ -68,8 +72,8 @@ def _get_token_from_google_colab() -> Optional[str]:
68
72
  return _GOOGLE_COLAB_SECRET
69
73
 
70
74
  try:
71
- from google.colab import userdata
72
- from google.colab.errors import Error as ColabError
75
+ from google.colab import userdata # type: ignore
76
+ from google.colab.errors import Error as ColabError # type: ignore
73
77
  except ImportError:
74
78
  return None
75
79
 
@@ -121,6 +125,85 @@ def _get_token_from_file() -> Optional[str]:
121
125
  return None
122
126
 
123
127
 
128
+ def get_stored_tokens() -> Dict[str, str]:
129
+ """
130
+ Returns the parsed INI file containing the access tokens.
131
+ The file is located at `HF_STORED_TOKENS_PATH`, defaulting to `~/.cache/huggingface/stored_tokens`.
132
+ If the file does not exist, an empty dictionary is returned.
133
+
134
+ Returns: `Dict[str, str]`
135
+ Key is the token name and value is the token.
136
+ """
137
+ tokens_path = Path(constants.HF_STORED_TOKENS_PATH)
138
+ if not tokens_path.exists():
139
+ stored_tokens = {}
140
+ config = configparser.ConfigParser()
141
+ try:
142
+ config.read(tokens_path)
143
+ stored_tokens = {token_name: config.get(token_name, "hf_token") for token_name in config.sections()}
144
+ except configparser.Error as e:
145
+ logger.error(f"Error parsing stored tokens file: {e}")
146
+ stored_tokens = {}
147
+ return stored_tokens
148
+
149
+
150
+ def _save_stored_tokens(stored_tokens: Dict[str, str]) -> None:
151
+ """
152
+ Saves the given configuration to the stored tokens file.
153
+
154
+ Args:
155
+ stored_tokens (`Dict[str, str]`):
156
+ The stored tokens to save. Key is the token name and value is the token.
157
+ """
158
+ stored_tokens_path = Path(constants.HF_STORED_TOKENS_PATH)
159
+
160
+ # Write the stored tokens into an INI file
161
+ config = configparser.ConfigParser()
162
+ for token_name in sorted(stored_tokens.keys()):
163
+ config.add_section(token_name)
164
+ config.set(token_name, "hf_token", stored_tokens[token_name])
165
+
166
+ stored_tokens_path.parent.mkdir(parents=True, exist_ok=True)
167
+ with stored_tokens_path.open("w") as config_file:
168
+ config.write(config_file)
169
+
170
+
171
+ def _get_token_by_name(token_name: str) -> Optional[str]:
172
+ """
173
+ Get the token by name.
174
+
175
+ Args:
176
+ token_name (`str`):
177
+ The name of the token to get.
178
+
179
+ Returns:
180
+ `str` or `None`: The token, `None` if it doesn't exist.
181
+
182
+ """
183
+ stored_tokens = get_stored_tokens()
184
+ if token_name not in stored_tokens:
185
+ return None
186
+ return _clean_token(stored_tokens[token_name])
187
+
188
+
189
+ def _save_token(token: str, token_name: str) -> None:
190
+ """
191
+ Save the given token.
192
+
193
+ If the stored tokens file does not exist, it will be created.
194
+ Args:
195
+ token (`str`):
196
+ The token to save.
197
+ token_name (`str`):
198
+ The name of the token.
199
+ """
200
+ tokens_path = Path(constants.HF_STORED_TOKENS_PATH)
201
+ stored_tokens = get_stored_tokens()
202
+ stored_tokens[token_name] = token
203
+ _save_stored_tokens(stored_tokens)
204
+ logger.info(f"The token `{token_name}` has been saved to {tokens_path}")
205
+
206
+
124
207
  def _clean_token(token: Optional[str]) -> Optional[str]:
125
208
  """Clean token by removing trailing and leading spaces and newlines.
126
209
 
@@ -19,6 +19,7 @@ from typing import Dict, Optional, Union
19
19
  from huggingface_hub.errors import LocalTokenNotFoundError
20
20
 
21
21
  from .. import constants
22
+ from ._auth import get_token
22
23
  from ._runtime import (
23
24
  get_fastai_version,
24
25
  get_fastcore_version,
@@ -31,7 +32,6 @@ from ._runtime import (
31
32
  is_tf_available,
32
33
  is_torch_available,
33
34
  )
34
- from ._token import get_token
35
35
  from ._validators import validate_hf_hub_args
36
36
 
37
37
 
@@ -19,7 +19,7 @@ from pathlib import Path
19
19
  from typing import Optional
20
20
 
21
21
  from .. import constants
22
- from ._token import get_token
22
+ from ._auth import get_token
23
23
 
24
24
 
25
25
  class HfFolder:
@@ -507,8 +507,9 @@ def _format(error_type: Type[HfHubHTTPError], custom_message: str, response: Res
507
507
  server_errors.append(error["message"])
508
508
 
509
509
  except JSONDecodeError:
510
- # Case error is directly returned as text
511
- if response.text:
510
+ # If content is not JSON and not HTML, append the text
511
+ content_type = response.headers.get("Content-Type", "")
512
+ if response.text and "html" not in content_type.lower():
512
513
  server_errors.append(response.text)
513
514
 
514
515
  # Strip all server messages
@@ -528,11 +529,16 @@ def _format(error_type: Type[HfHubHTTPError], custom_message: str, response: Res
528
529
  final_error_message += "\n" + server_message
529
530
  else:
530
531
  final_error_message += "\n\n" + server_message
531
-
532
532
  # Add Request ID
533
533
  request_id = str(response.headers.get(X_REQUEST_ID, ""))
534
- if len(request_id) > 0 and request_id.lower() not in final_error_message.lower():
534
+ if request_id:
535
535
  request_id_message = f" (Request ID: {request_id})"
536
+ else:
537
+ # Fallback to X-Amzn-Trace-Id
538
+ request_id = str(response.headers.get(X_AMZN_TRACE_ID, ""))
539
+ if request_id:
540
+ request_id_message = f" (Amzn Trace ID: {request_id})"
541
+ if request_id and request_id.lower() not in final_error_message.lower():
536
542
  if "\n" in final_error_message:
537
543
  newline_index = final_error_message.index("\n")
538
544
  final_error_message = (
@@ -38,7 +38,6 @@ _CANDIDATES = {
38
38
  "hf_transfer": {"hf_transfer"},
39
39
  "jinja": {"Jinja2"},
40
40
  "keras": {"keras"},
41
- "minijinja": {"minijinja"},
42
41
  "numpy": {"numpy"},
43
42
  "pillow": {"Pillow"},
44
43
  "pydantic": {"pydantic"},
@@ -161,15 +160,6 @@ def get_keras_version() -> str:
161
160
  return _get_version("keras")
162
161
 
163
162
 
164
- # Minijinja
165
- def is_minijinja_available() -> bool:
166
- return is_package_available("minijinja")
167
-
168
-
169
- def get_minijinja_version() -> str:
170
- return _get_version("minijinja")
171
-
172
-
173
163
  # Numpy
174
164
  def is_numpy_available() -> bool:
175
165
  return is_package_available("numpy")
@@ -373,6 +363,7 @@ def dump_environment_info() -> Dict[str, Any]:
373
363
  info["HF_HUB_CACHE"] = constants.HF_HUB_CACHE
374
364
  info["HF_ASSETS_CACHE"] = constants.HF_ASSETS_CACHE
375
365
  info["HF_TOKEN_PATH"] = constants.HF_TOKEN_PATH
366
+ info["HF_STORED_TOKENS_PATH"] = constants.HF_STORED_TOKENS_PATH
376
367
  info["HF_HUB_OFFLINE"] = constants.HF_HUB_OFFLINE
377
368
  info["HF_HUB_DISABLE_TELEMETRY"] = constants.HF_HUB_DISABLE_TELEMETRY
378
369
  info["HF_HUB_DISABLE_PROGRESS_BARS"] = constants.HF_HUB_DISABLE_PROGRESS_BARS
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: huggingface-hub
3
- Version: 0.25.2
3
+ Version: 0.26.0rc0
4
4
  Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
5
5
  Home-page: https://github.com/huggingface/huggingface_hub
6
6
  Author: Hugging Face, Inc.
@@ -19,6 +19,7 @@ Classifier: Programming Language :: Python :: 3.8
19
19
  Classifier: Programming Language :: Python :: 3.9
20
20
  Classifier: Programming Language :: Python :: 3.10
21
21
  Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
22
23
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
23
24
  Requires-Python: >=3.8.0
24
25
  Description-Content-Type: text/markdown
@@ -33,7 +34,6 @@ Requires-Dist: typing-extensions>=3.7.4.3
33
34
  Provides-Extra: all
34
35
  Requires-Dist: InquirerPy==0.3.4; extra == "all"
35
36
  Requires-Dist: aiohttp; extra == "all"
36
- Requires-Dist: minijinja>=1.0; extra == "all"
37
37
  Requires-Dist: jedi; extra == "all"
38
38
  Requires-Dist: Jinja2; extra == "all"
39
39
  Requires-Dist: pytest<8.2.2,>=8.1.1; extra == "all"
@@ -47,11 +47,12 @@ Requires-Dist: pytest-mock; extra == "all"
47
47
  Requires-Dist: urllib3<2.0; extra == "all"
48
48
  Requires-Dist: soundfile; extra == "all"
49
49
  Requires-Dist: Pillow; extra == "all"
50
- Requires-Dist: gradio; extra == "all"
50
+ Requires-Dist: gradio>=4.0.0; extra == "all"
51
51
  Requires-Dist: numpy; extra == "all"
52
52
  Requires-Dist: fastapi; extra == "all"
53
53
  Requires-Dist: ruff>=0.5.0; extra == "all"
54
54
  Requires-Dist: mypy==1.5.1; extra == "all"
55
+ Requires-Dist: libcst==1.4.0; extra == "all"
55
56
  Requires-Dist: typing-extensions>=4.8.0; extra == "all"
56
57
  Requires-Dist: types-PyYAML; extra == "all"
57
58
  Requires-Dist: types-requests; extra == "all"
@@ -64,7 +65,6 @@ Requires-Dist: InquirerPy==0.3.4; extra == "cli"
64
65
  Provides-Extra: dev
65
66
  Requires-Dist: InquirerPy==0.3.4; extra == "dev"
66
67
  Requires-Dist: aiohttp; extra == "dev"
67
- Requires-Dist: minijinja>=1.0; extra == "dev"
68
68
  Requires-Dist: jedi; extra == "dev"
69
69
  Requires-Dist: Jinja2; extra == "dev"
70
70
  Requires-Dist: pytest<8.2.2,>=8.1.1; extra == "dev"
@@ -78,11 +78,12 @@ Requires-Dist: pytest-mock; extra == "dev"
78
78
  Requires-Dist: urllib3<2.0; extra == "dev"
79
79
  Requires-Dist: soundfile; extra == "dev"
80
80
  Requires-Dist: Pillow; extra == "dev"
81
- Requires-Dist: gradio; extra == "dev"
81
+ Requires-Dist: gradio>=4.0.0; extra == "dev"
82
82
  Requires-Dist: numpy; extra == "dev"
83
83
  Requires-Dist: fastapi; extra == "dev"
84
84
  Requires-Dist: ruff>=0.5.0; extra == "dev"
85
85
  Requires-Dist: mypy==1.5.1; extra == "dev"
86
+ Requires-Dist: libcst==1.4.0; extra == "dev"
86
87
  Requires-Dist: typing-extensions>=4.8.0; extra == "dev"
87
88
  Requires-Dist: types-PyYAML; extra == "dev"
88
89
  Requires-Dist: types-requests; extra == "dev"
@@ -98,10 +99,10 @@ Provides-Extra: hf_transfer
98
99
  Requires-Dist: hf-transfer>=0.1.4; extra == "hf-transfer"
99
100
  Provides-Extra: inference
100
101
  Requires-Dist: aiohttp; extra == "inference"
101
- Requires-Dist: minijinja>=1.0; extra == "inference"
102
102
  Provides-Extra: quality
103
103
  Requires-Dist: ruff>=0.5.0; extra == "quality"
104
104
  Requires-Dist: mypy==1.5.1; extra == "quality"
105
+ Requires-Dist: libcst==1.4.0; extra == "quality"
105
106
  Provides-Extra: tensorflow
106
107
  Requires-Dist: tensorflow; extra == "tensorflow"
107
108
  Requires-Dist: pydot; extra == "tensorflow"
@@ -112,7 +113,6 @@ Requires-Dist: keras<3.0; extra == "tensorflow-testing"
112
113
  Provides-Extra: testing
113
114
  Requires-Dist: InquirerPy==0.3.4; extra == "testing"
114
115
  Requires-Dist: aiohttp; extra == "testing"
115
- Requires-Dist: minijinja>=1.0; extra == "testing"
116
116
  Requires-Dist: jedi; extra == "testing"
117
117
  Requires-Dist: Jinja2; extra == "testing"
118
118
  Requires-Dist: pytest<8.2.2,>=8.1.1; extra == "testing"
@@ -126,7 +126,7 @@ Requires-Dist: pytest-mock; extra == "testing"
126
126
  Requires-Dist: urllib3<2.0; extra == "testing"
127
127
  Requires-Dist: soundfile; extra == "testing"
128
128
  Requires-Dist: Pillow; extra == "testing"
129
- Requires-Dist: gradio; extra == "testing"
129
+ Requires-Dist: gradio>=4.0.0; extra == "testing"
130
130
  Requires-Dist: numpy; extra == "testing"
131
131
  Requires-Dist: fastapi; extra == "testing"
132
132
  Provides-Extra: torch
@@ -162,10 +162,10 @@ Requires-Dist: types-urllib3; extra == "typing"
162
162
  <h4 align="center">
163
163
  <p>
164
164
  <b>English</b> |
165
- <a href="https://github.com/huggingface/huggingface_hub/blob/main/README_de.md">Deutsch</a> |
166
- <a href="https://github.com/huggingface/huggingface_hub/blob/main/README_hi.md">हिंदी</a> |
167
- <a href="https://github.com/huggingface/huggingface_hub/blob/main/README_ko.md">한국어</a> |
168
- <a href="https://github.com/huggingface/huggingface_hub/blob/main/README_cn.md">中文(简体)</a>
165
+ <a href="https://github.com/huggingface/huggingface_hub/blob/main/i18n/README_de.md">Deutsch</a> |
166
+ <a href="https://github.com/huggingface/huggingface_hub/blob/main/i18n/README_hi.md">हिंदी</a> |
167
+ <a href="https://github.com/huggingface/huggingface_hub/blob/main/i18n/README_ko.md">한국어</a> |
168
+ <a href="https://github.com/huggingface/huggingface_hub/blob/main/i18n/README_cn.md">中文(简体)</a>
169
169
  <p>
170
170
  </h4>
171
171