huggingface-hub 0.28.0rc5__py3-none-any.whl → 0.29.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (63) hide show
  1. huggingface_hub/__init__.py +1 -4
  2. huggingface_hub/constants.py +16 -11
  3. huggingface_hub/file_download.py +10 -6
  4. huggingface_hub/hf_api.py +53 -23
  5. huggingface_hub/inference/_client.py +151 -84
  6. huggingface_hub/inference/_common.py +3 -27
  7. huggingface_hub/inference/_generated/_async_client.py +147 -83
  8. huggingface_hub/inference/_generated/types/__init__.py +1 -1
  9. huggingface_hub/inference/_generated/types/audio_classification.py +4 -5
  10. huggingface_hub/inference/_generated/types/audio_to_audio.py +3 -4
  11. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +7 -8
  12. huggingface_hub/inference/_generated/types/base.py +21 -0
  13. huggingface_hub/inference/_generated/types/chat_completion.py +29 -30
  14. huggingface_hub/inference/_generated/types/depth_estimation.py +3 -4
  15. huggingface_hub/inference/_generated/types/document_question_answering.py +5 -6
  16. huggingface_hub/inference/_generated/types/feature_extraction.py +5 -6
  17. huggingface_hub/inference/_generated/types/fill_mask.py +4 -5
  18. huggingface_hub/inference/_generated/types/image_classification.py +4 -5
  19. huggingface_hub/inference/_generated/types/image_segmentation.py +4 -5
  20. huggingface_hub/inference/_generated/types/image_to_image.py +5 -6
  21. huggingface_hub/inference/_generated/types/image_to_text.py +5 -6
  22. huggingface_hub/inference/_generated/types/object_detection.py +5 -6
  23. huggingface_hub/inference/_generated/types/question_answering.py +5 -6
  24. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -4
  25. huggingface_hub/inference/_generated/types/summarization.py +4 -5
  26. huggingface_hub/inference/_generated/types/table_question_answering.py +5 -6
  27. huggingface_hub/inference/_generated/types/text2text_generation.py +4 -5
  28. huggingface_hub/inference/_generated/types/text_classification.py +4 -5
  29. huggingface_hub/inference/_generated/types/text_generation.py +12 -13
  30. huggingface_hub/inference/_generated/types/text_to_audio.py +5 -6
  31. huggingface_hub/inference/_generated/types/text_to_image.py +8 -15
  32. huggingface_hub/inference/_generated/types/text_to_speech.py +5 -6
  33. huggingface_hub/inference/_generated/types/text_to_video.py +4 -5
  34. huggingface_hub/inference/_generated/types/token_classification.py +4 -5
  35. huggingface_hub/inference/_generated/types/translation.py +4 -5
  36. huggingface_hub/inference/_generated/types/video_classification.py +4 -5
  37. huggingface_hub/inference/_generated/types/visual_question_answering.py +5 -6
  38. huggingface_hub/inference/_generated/types/zero_shot_classification.py +4 -5
  39. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +4 -5
  40. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +5 -6
  41. huggingface_hub/inference/_providers/__init__.py +44 -8
  42. huggingface_hub/inference/_providers/_common.py +239 -0
  43. huggingface_hub/inference/_providers/black_forest_labs.py +66 -0
  44. huggingface_hub/inference/_providers/fal_ai.py +31 -100
  45. huggingface_hub/inference/_providers/fireworks_ai.py +6 -0
  46. huggingface_hub/inference/_providers/hf_inference.py +58 -142
  47. huggingface_hub/inference/_providers/hyperbolic.py +43 -0
  48. huggingface_hub/inference/_providers/nebius.py +41 -0
  49. huggingface_hub/inference/_providers/novita.py +26 -0
  50. huggingface_hub/inference/_providers/replicate.py +24 -119
  51. huggingface_hub/inference/_providers/sambanova.py +3 -86
  52. huggingface_hub/inference/_providers/together.py +36 -130
  53. huggingface_hub/utils/_headers.py +5 -0
  54. huggingface_hub/utils/_hf_folder.py +4 -32
  55. huggingface_hub/utils/_http.py +85 -2
  56. huggingface_hub/utils/_typing.py +1 -1
  57. huggingface_hub/utils/logging.py +6 -0
  58. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/METADATA +1 -1
  59. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/RECORD +63 -57
  60. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/LICENSE +0 -0
  61. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/WHEEL +0 -0
  62. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/entry_points.txt +0 -0
  63. {huggingface_hub-0.28.0rc5.dist-info → huggingface_hub-0.29.0rc0.dist-info}/top_level.txt +0 -0
@@ -46,7 +46,7 @@ import sys
46
46
  from typing import TYPE_CHECKING
47
47
 
48
48
 
49
- __version__ = "0.28.0.rc5"
49
+ __version__ = "0.29.0.rc0"
50
50
 
51
51
  # Alphabetical order of definitions is ensured in tests
52
52
  # WARNING: any comment added in this dictionary definition will be lost when
@@ -392,7 +392,6 @@ _SUBMOD_ATTRS = {
392
392
  "TextToImageInput",
393
393
  "TextToImageOutput",
394
394
  "TextToImageParameters",
395
- "TextToImageTargetSize",
396
395
  "TextToSpeechEarlyStoppingEnum",
397
396
  "TextToSpeechGenerationParameters",
398
397
  "TextToSpeechInput",
@@ -702,7 +701,6 @@ __all__ = [
702
701
  "TextToImageInput",
703
702
  "TextToImageOutput",
704
703
  "TextToImageParameters",
705
- "TextToImageTargetSize",
706
704
  "TextToSpeechEarlyStoppingEnum",
707
705
  "TextToSpeechGenerationParameters",
708
706
  "TextToSpeechInput",
@@ -1334,7 +1332,6 @@ if TYPE_CHECKING: # pragma: no cover
1334
1332
  TextToImageInput, # noqa: F401
1335
1333
  TextToImageOutput, # noqa: F401
1336
1334
  TextToImageParameters, # noqa: F401
1337
- TextToImageTargetSize, # noqa: F401
1338
1335
  TextToSpeechEarlyStoppingEnum, # noqa: F401
1339
1336
  TextToSpeechGenerationParameters, # noqa: F401
1340
1337
  TextToSpeechInput, # noqa: F401
@@ -2,7 +2,6 @@ import os
2
2
  import re
3
3
  import typing
4
4
  from typing import Literal, Optional, Tuple
5
- from urllib.parse import urljoin
6
5
 
7
6
 
8
7
  # Possible values for env variables
@@ -64,11 +63,13 @@ _staging_mode = _is_true(os.environ.get("HUGGINGFACE_CO_STAGING"))
64
63
 
65
64
  _HF_DEFAULT_ENDPOINT = "https://huggingface.co"
66
65
  _HF_DEFAULT_STAGING_ENDPOINT = "https://hub-ci.huggingface.co"
67
- ENDPOINT = os.getenv("HF_ENDPOINT", "").rstrip("/") or (
68
- _HF_DEFAULT_STAGING_ENDPOINT if _staging_mode else _HF_DEFAULT_ENDPOINT
69
- )
66
+ ENDPOINT = os.getenv("HF_ENDPOINT", _HF_DEFAULT_ENDPOINT).rstrip("/")
67
+ HUGGINGFACE_CO_URL_TEMPLATE = ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
68
+
69
+ if _staging_mode:
70
+ ENDPOINT = _HF_DEFAULT_STAGING_ENDPOINT
71
+ HUGGINGFACE_CO_URL_TEMPLATE = _HF_DEFAULT_STAGING_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
70
72
 
71
- HUGGINGFACE_CO_URL_TEMPLATE = urljoin(ENDPOINT, "/{repo_id}/resolve/{revision}/{filename}")
72
73
  HUGGINGFACE_HEADER_X_REPO_COMMIT = "X-Repo-Commit"
73
74
  HUGGINGFACE_HEADER_X_LINKED_ETAG = "X-Linked-Etag"
74
75
  HUGGINGFACE_HEADER_X_LINKED_SIZE = "X-Linked-Size"
@@ -79,7 +80,7 @@ INFERENCE_ENDPOINT = os.environ.get("HF_INFERENCE_ENDPOINT", "https://api-infere
79
80
  INFERENCE_ENDPOINTS_ENDPOINT = "https://api.endpoints.huggingface.cloud/v2"
80
81
 
81
82
  # Proxy for third-party providers
82
- INFERENCE_PROXY_TEMPLATE = urljoin(ENDPOINT, "/api/inference-proxy/{provider}")
83
+ INFERENCE_PROXY_TEMPLATE = "https://router.huggingface.co/{provider}"
83
84
 
84
85
  REPO_ID_SEPARATOR = "--"
85
86
  # ^ this substring is not allowed in repo_ids on hf.co
@@ -133,6 +134,10 @@ HF_ASSETS_CACHE = os.getenv("HF_ASSETS_CACHE", HUGGINGFACE_ASSETS_CACHE)
133
134
 
134
135
  HF_HUB_OFFLINE = _is_true(os.environ.get("HF_HUB_OFFLINE") or os.environ.get("TRANSFORMERS_OFFLINE"))
135
136
 
137
+ # If set, log level will be set to DEBUG and all requests made to the Hub will be logged
138
+ # as curl commands for reproducibility.
139
+ HF_DEBUG = _is_true(os.environ.get("HF_DEBUG"))
140
+
136
141
  # Opt-out from telemetry requests
137
142
  HF_HUB_DISABLE_TELEMETRY = (
138
143
  _is_true(os.environ.get("HF_HUB_DISABLE_TELEMETRY")) # HF-specific env variable
@@ -140,18 +145,15 @@ HF_HUB_DISABLE_TELEMETRY = (
140
145
  or _is_true(os.environ.get("DO_NOT_TRACK")) # https://consoledonottrack.com/
141
146
  )
142
147
 
143
- # In the past, token was stored in a hardcoded location
144
- # `_OLD_HF_TOKEN_PATH` is deprecated and will be removed "at some point".
145
- # See https://github.com/huggingface/huggingface_hub/issues/1232
146
- _OLD_HF_TOKEN_PATH = os.path.expanduser("~/.huggingface/token")
147
148
  HF_TOKEN_PATH = os.environ.get("HF_TOKEN_PATH", os.path.join(HF_HOME, "token"))
148
149
  HF_STORED_TOKENS_PATH = os.path.join(os.path.dirname(HF_TOKEN_PATH), "stored_tokens")
149
150
 
150
151
  if _staging_mode:
151
152
  # In staging mode, we use a different cache to ensure we don't mix up production and staging data or tokens
153
+ # In practice in `huggingface_hub` tests, we monkeypatch these values with temporary directories. The following
154
+ # lines are only used in third-party libraries tests (e.g. `transformers`, `diffusers`, etc.).
152
155
  _staging_home = os.path.join(os.path.expanduser("~"), ".cache", "huggingface_staging")
153
156
  HUGGINGFACE_HUB_CACHE = os.path.join(_staging_home, "hub")
154
- _OLD_HF_TOKEN_PATH = os.path.join(_staging_home, "_old_token")
155
157
  HF_TOKEN_PATH = os.path.join(_staging_home, "token")
156
158
 
157
159
  # Here, `True` will disable progress bars globally without possibility of enabling it
@@ -192,6 +194,9 @@ HF_HUB_ETAG_TIMEOUT: int = _as_int(os.environ.get("HF_HUB_ETAG_TIMEOUT")) or DEF
192
194
  # Used to override the get request timeout on a system level
193
195
  HF_HUB_DOWNLOAD_TIMEOUT: int = _as_int(os.environ.get("HF_HUB_DOWNLOAD_TIMEOUT")) or DEFAULT_DOWNLOAD_TIMEOUT
194
196
 
197
+ # Allows to add information about the requester in the user-agent (eg. partner name)
198
+ HF_HUB_USER_AGENT_ORIGIN: Optional[str] = os.environ.get("HF_HUB_USER_AGENT_ORIGIN")
199
+
195
200
  # List frameworks that are handled by the InferenceAPI service. Useful to scan endpoints and check which models are
196
201
  # deployed and running. Since 95% of the models are using the top 4 frameworks listed below, we scan only those by
197
202
  # default. We still keep the full list of supported frameworks in case we want to scan all of them.
@@ -29,6 +29,7 @@ from .errors import (
29
29
  EntryNotFoundError,
30
30
  FileMetadataError,
31
31
  GatedRepoError,
32
+ HfHubHTTPError,
32
33
  LocalEntryNotFoundError,
33
34
  RepositoryNotFoundError,
34
35
  RevisionNotFoundError,
@@ -59,6 +60,7 @@ from .utils import (
59
60
  tqdm,
60
61
  validate_hf_hub_args,
61
62
  )
63
+ from .utils._http import _adjust_range_header
62
64
  from .utils._runtime import _PY_VERSION # noqa: F401 # for backward compatibility
63
65
  from .utils._typing import HTTP_METHOD_T
64
66
  from .utils.sha import sha_fileobj
@@ -308,8 +310,8 @@ def http_get(
308
310
  temp_file: BinaryIO,
309
311
  *,
310
312
  proxies: Optional[Dict] = None,
311
- resume_size: float = 0,
312
- headers: Optional[Dict[str, str]] = None,
313
+ resume_size: int = 0,
314
+ headers: Optional[Dict[str, Any]] = None,
313
315
  expected_size: Optional[int] = None,
314
316
  displayed_filename: Optional[str] = None,
315
317
  _nb_retries: int = 5,
@@ -329,7 +331,7 @@ def http_get(
329
331
  The file-like object where to save the file.
330
332
  proxies (`dict`, *optional*):
331
333
  Dictionary mapping protocol to the URL of the proxy passed to `requests.request`.
332
- resume_size (`float`, *optional*):
334
+ resume_size (`int`, *optional*):
333
335
  The number of bytes already downloaded. If set to 0 (default), the whole file is download. If set to a
334
336
  positive number, the download will resume at the given position.
335
337
  headers (`dict`, *optional*):
@@ -364,7 +366,7 @@ def http_get(
364
366
  initial_headers = headers
365
367
  headers = copy.deepcopy(headers) or {}
366
368
  if resume_size > 0:
367
- headers["Range"] = "bytes=%d-" % (resume_size,)
369
+ headers["Range"] = _adjust_range_header(headers.get("Range"), resume_size)
368
370
 
369
371
  r = _request_wrapper(
370
372
  method="GET", url=url, stream=True, proxies=proxies, headers=headers, timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT
@@ -1461,7 +1463,6 @@ def _get_metadata_or_catch_error(
1461
1463
 
1462
1464
  def _raise_on_head_call_error(head_call_error: Exception, force_download: bool, local_files_only: bool) -> NoReturn:
1463
1465
  """Raise an appropriate error when the HEAD call failed and we cannot locate a local file."""
1464
-
1465
1466
  # No head call => we cannot force download.
1466
1467
  if force_download:
1467
1468
  if local_files_only:
@@ -1477,8 +1478,11 @@ def _raise_on_head_call_error(head_call_error: Exception, force_download: bool,
1477
1478
  "Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable"
1478
1479
  " hf.co look-ups and downloads online, set 'local_files_only' to False."
1479
1480
  )
1480
- elif isinstance(head_call_error, RepositoryNotFoundError) or isinstance(head_call_error, GatedRepoError):
1481
+ elif isinstance(head_call_error, (RepositoryNotFoundError, GatedRepoError)) or (
1482
+ isinstance(head_call_error, HfHubHTTPError) and head_call_error.response.status_code == 401
1483
+ ):
1481
1484
  # Repo not found or gated => let's raise the actual error
1485
+ # Unauthorized => likely a token issue => let's raise the actual error
1482
1486
  raise head_call_error
1483
1487
  else:
1484
1488
  # Otherwise: most likely a connection issue or Hub downtime => let's warn the user
huggingface_hub/hf_api.py CHANGED
@@ -115,6 +115,7 @@ from .utils import (
115
115
  filter_repo_objects,
116
116
  fix_hf_endpoint_in_url,
117
117
  get_session,
118
+ get_token,
118
119
  hf_raise_for_status,
119
120
  logging,
120
121
  paginate,
@@ -122,6 +123,7 @@ from .utils import (
122
123
  validate_hf_hub_args,
123
124
  )
124
125
  from .utils import tqdm as hf_tqdm
126
+ from .utils._auth import _get_token_from_environment, _get_token_from_file, _get_token_from_google_colab
125
127
  from .utils._deprecation import _deprecate_method
126
128
  from .utils._typing import CallableT
127
129
  from .utils.endpoint_helpers import _is_emission_within_threshold
@@ -143,6 +145,7 @@ ExpandModelProperty_T = Literal[
143
145
  "gated",
144
146
  "gguf",
145
147
  "inference",
148
+ "inferenceProviderMapping",
146
149
  "lastModified",
147
150
  "library_name",
148
151
  "likes",
@@ -150,6 +153,7 @@ ExpandModelProperty_T = Literal[
150
153
  "model-index",
151
154
  "pipeline_tag",
152
155
  "private",
156
+ "resourceGroup",
153
157
  "safetensors",
154
158
  "sha",
155
159
  "siblings",
@@ -157,9 +161,8 @@ ExpandModelProperty_T = Literal[
157
161
  "tags",
158
162
  "transformersInfo",
159
163
  "trendingScore",
160
- "widgetData",
161
164
  "usedStorage",
162
- "resourceGroup",
165
+ "widgetData",
163
166
  ]
164
167
 
165
168
  ExpandDatasetProperty_T = Literal[
@@ -167,8 +170,8 @@ ExpandDatasetProperty_T = Literal[
167
170
  "cardData",
168
171
  "citation",
169
172
  "createdAt",
170
- "disabled",
171
173
  "description",
174
+ "disabled",
172
175
  "downloads",
173
176
  "downloadsAllTime",
174
177
  "gated",
@@ -176,12 +179,12 @@ ExpandDatasetProperty_T = Literal[
176
179
  "likes",
177
180
  "paperswithcode_id",
178
181
  "private",
179
- "siblings",
182
+ "resourceGroup",
180
183
  "sha",
181
- "trendingScore",
184
+ "siblings",
182
185
  "tags",
186
+ "trendingScore",
183
187
  "usedStorage",
184
- "resourceGroup",
185
188
  ]
186
189
 
187
190
  ExpandSpaceProperty_T = Literal[
@@ -194,15 +197,15 @@ ExpandSpaceProperty_T = Literal[
194
197
  "likes",
195
198
  "models",
196
199
  "private",
200
+ "resourceGroup",
197
201
  "runtime",
198
202
  "sdk",
199
- "siblings",
200
203
  "sha",
204
+ "siblings",
201
205
  "subdomain",
202
206
  "tags",
203
207
  "trendingScore",
204
208
  "usedStorage",
205
- "resourceGroup",
206
209
  ]
207
210
 
208
211
  USERNAME_PLACEHOLDER = "hf_user"
@@ -695,6 +698,19 @@ class RepoFolder:
695
698
  self.last_commit = last_commit
696
699
 
697
700
 
701
+ @dataclass
702
+ class InferenceProviderMapping:
703
+ status: Literal["live", "staging"]
704
+ provider_id: str
705
+ task: str
706
+
707
+ def __init__(self, **kwargs):
708
+ self.status = kwargs.pop("status")
709
+ self.provider_id = kwargs.pop("providerId")
710
+ self.task = kwargs.pop("task")
711
+ self.__dict__.update(**kwargs)
712
+
713
+
698
714
  @dataclass
699
715
  class ModelInfo:
700
716
  """
@@ -737,6 +753,8 @@ class ModelInfo:
737
753
  Status of the model on the inference API.
738
754
  Warm models are available for immediate use. Cold models will be loaded on first inference call.
739
755
  Frozen models are not available in Inference API.
756
+ inference_provider_mapping (`Dict`, *optional*):
757
+ Model's inference provider mapping.
740
758
  likes (`int`):
741
759
  Number of likes of the model.
742
760
  library_name (`str`, *optional*):
@@ -782,6 +800,7 @@ class ModelInfo:
782
800
  gated: Optional[Literal["auto", "manual", False]]
783
801
  gguf: Optional[Dict]
784
802
  inference: Optional[Literal["warm", "cold", "frozen"]]
803
+ inference_provider_mapping: Optional[Dict[str, InferenceProviderMapping]]
785
804
  likes: Optional[int]
786
805
  library_name: Optional[str]
787
806
  tags: Optional[List[str]]
@@ -814,7 +833,15 @@ class ModelInfo:
814
833
  self.likes = kwargs.pop("likes", None)
815
834
  self.library_name = kwargs.pop("library_name", None)
816
835
  self.gguf = kwargs.pop("gguf", None)
836
+
817
837
  self.inference = kwargs.pop("inference", None)
838
+ self.inference_provider_mapping = kwargs.pop("inferenceProviderMapping", None)
839
+ if self.inference_provider_mapping:
840
+ self.inference_provider_mapping = {
841
+ provider: InferenceProviderMapping(**value)
842
+ for provider, value in self.inference_provider_mapping.items()
843
+ }
844
+
818
845
  self.tags = kwargs.pop("tags", None)
819
846
  self.pipeline_tag = kwargs.pop("pipeline_tag", None)
820
847
  self.mask_token = kwargs.pop("mask_token", None)
@@ -1627,24 +1654,27 @@ class HfApi:
1627
1654
  https://huggingface.co/docs/huggingface_hub/quick-start#authentication).
1628
1655
  To disable authentication, pass `False`.
1629
1656
  """
1657
+ # Get the effective token using the helper function get_token
1658
+ effective_token = token or self.token or get_token() or True
1630
1659
  r = get_session().get(
1631
1660
  f"{self.endpoint}/api/whoami-v2",
1632
- headers=self._build_hf_headers(
1633
- # If `token` is provided and not `None`, it will be used by default.
1634
- # Otherwise, the token must be retrieved from cache or env variable.
1635
- token=(token or self.token or True),
1636
- ),
1661
+ headers=self._build_hf_headers(token=effective_token),
1637
1662
  )
1638
1663
  try:
1639
1664
  hf_raise_for_status(r)
1640
1665
  except HTTPError as e:
1641
- raise HTTPError(
1642
- "Invalid user token. If you didn't pass a user token, make sure you "
1643
- "are properly logged in by executing `huggingface-cli login`, and "
1644
- "if you did pass a user token, double-check it's correct.",
1645
- request=e.request,
1646
- response=e.response,
1647
- ) from e
1666
+ error_message = "Invalid user token."
1667
+ # Check which token is the effective one and generate the error message accordingly
1668
+ if effective_token == _get_token_from_google_colab():
1669
+ error_message += " The token from Google Colab vault is invalid. Please update it from the UI."
1670
+ elif effective_token == _get_token_from_environment():
1671
+ error_message += (
1672
+ " The token from HF_TOKEN environment variable is invalid. "
1673
+ "Note that HF_TOKEN takes precedence over `huggingface-cli login`."
1674
+ )
1675
+ elif effective_token == _get_token_from_file():
1676
+ error_message += " The token stored is invalid. Please run `huggingface-cli login` to update it."
1677
+ raise HTTPError(error_message, request=e.request, response=e.response) from e
1648
1678
  return r.json()
1649
1679
 
1650
1680
  @_deprecate_method(
@@ -1788,7 +1818,7 @@ class HfApi:
1788
1818
  expand (`List[ExpandModelProperty_T]`, *optional*):
1789
1819
  List properties to return in the response. When used, only the properties in the list will be returned.
1790
1820
  This parameter cannot be used if `full`, `cardData` or `fetch_config` are passed.
1791
- Possible values are `"author"`, `"baseModels"`, `"cardData"`, `"childrenModelCount"`, `"config"`, `"createdAt"`, `"disabled"`, `"downloads"`, `"downloadsAllTime"`, `"gated"`, `"gguf"`, `"inference"`, `"lastModified"`, `"library_name"`, `"likes"`, `"mask_token"`, `"model-index"`, `"pipeline_tag"`, `"private"`, `"safetensors"`, `"sha"`, `"siblings"`, `"spaces"`, `"tags"`, `"transformersInfo"`, `"trendingScore"`, `"widgetData"`, `"usedStorage"` and `"resourceGroup"`.
1821
+ Possible values are `"author"`, `"baseModels"`, `"cardData"`, `"childrenModelCount"`, `"config"`, `"createdAt"`, `"disabled"`, `"downloads"`, `"downloadsAllTime"`, `"gated"`, `"gguf"`, `"inference"`, `"inferenceProviderMapping"`, `"lastModified"`, `"library_name"`, `"likes"`, `"mask_token"`, `"model-index"`, `"pipeline_tag"`, `"private"`, `"safetensors"`, `"sha"`, `"siblings"`, `"spaces"`, `"tags"`, `"transformersInfo"`, `"trendingScore"`, `"widgetData"`, `"usedStorage"` and `"resourceGroup"`.
1792
1822
  full (`bool`, *optional*):
1793
1823
  Whether to fetch all model data, including the `last_modified`,
1794
1824
  the `sha`, the files and the `tags`. This is set to `True` by
@@ -2447,7 +2477,7 @@ class HfApi:
2447
2477
  expand (`List[ExpandModelProperty_T]`, *optional*):
2448
2478
  List properties to return in the response. When used, only the properties in the list will be returned.
2449
2479
  This parameter cannot be used if `securityStatus` or `files_metadata` are passed.
2450
- Possible values are `"author"`, `"baseModels"`, `"cardData"`, `"childrenModelCount"`, `"config"`, `"createdAt"`, `"disabled"`, `"downloads"`, `"downloadsAllTime"`, `"gated"`, `"gguf"`, `"inference"`, `"lastModified"`, `"library_name"`, `"likes"`, `"mask_token"`, `"model-index"`, `"pipeline_tag"`, `"private"`, `"safetensors"`, `"sha"`, `"siblings"`, `"spaces"`, `"tags"`, `"transformersInfo"`, `"trendingScore"`, `"widgetData"`, `"usedStorage"` and `"resourceGroup"`.
2480
+ Possible values are `"author"`, `"baseModels"`, `"cardData"`, `"childrenModelCount"`, `"config"`, `"createdAt"`, `"disabled"`, `"downloads"`, `"downloadsAllTime"`, `"gated"`, `"gguf"`, `"inference"`, `"inferenceProviderMapping"`, `"lastModified"`, `"library_name"`, `"likes"`, `"mask_token"`, `"model-index"`, `"pipeline_tag"`, `"private"`, `"safetensors"`, `"sha"`, `"siblings"`, `"spaces"`, `"tags"`, `"transformersInfo"`, `"trendingScore"`, `"widgetData"`, `"usedStorage"` and `"resourceGroup"`.
2451
2481
  token (Union[bool, str, None], optional):
2452
2482
  A valid user access token (string). Defaults to the locally saved
2453
2483
  token, which is the recommended method for authentication (see
@@ -3548,7 +3578,7 @@ class HfApi:
3548
3578
  if not missing_ok:
3549
3579
  raise
3550
3580
 
3551
- @_deprecate_method(version="0.29", message="Please use `update_repo_settings` instead.")
3581
+ @_deprecate_method(version="0.32", message="Please use `update_repo_settings` instead.")
3552
3582
  @validate_hf_hub_args
3553
3583
  def update_repo_visibility(
3554
3584
  self,