huggingface-hub 0.31.0rc0__py3-none-any.whl → 1.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. huggingface_hub/__init__.py +145 -46
  2. huggingface_hub/_commit_api.py +168 -119
  3. huggingface_hub/_commit_scheduler.py +15 -15
  4. huggingface_hub/_inference_endpoints.py +15 -12
  5. huggingface_hub/_jobs_api.py +301 -0
  6. huggingface_hub/_local_folder.py +18 -3
  7. huggingface_hub/_login.py +31 -63
  8. huggingface_hub/_oauth.py +460 -0
  9. huggingface_hub/_snapshot_download.py +239 -80
  10. huggingface_hub/_space_api.py +5 -5
  11. huggingface_hub/_tensorboard_logger.py +15 -19
  12. huggingface_hub/_upload_large_folder.py +172 -76
  13. huggingface_hub/_webhooks_payload.py +3 -3
  14. huggingface_hub/_webhooks_server.py +13 -25
  15. huggingface_hub/{commands → cli}/__init__.py +1 -15
  16. huggingface_hub/cli/_cli_utils.py +173 -0
  17. huggingface_hub/cli/auth.py +147 -0
  18. huggingface_hub/cli/cache.py +841 -0
  19. huggingface_hub/cli/download.py +189 -0
  20. huggingface_hub/cli/hf.py +60 -0
  21. huggingface_hub/cli/inference_endpoints.py +377 -0
  22. huggingface_hub/cli/jobs.py +772 -0
  23. huggingface_hub/cli/lfs.py +175 -0
  24. huggingface_hub/cli/repo.py +315 -0
  25. huggingface_hub/cli/repo_files.py +94 -0
  26. huggingface_hub/{commands/env.py → cli/system.py} +10 -13
  27. huggingface_hub/cli/upload.py +294 -0
  28. huggingface_hub/cli/upload_large_folder.py +117 -0
  29. huggingface_hub/community.py +20 -12
  30. huggingface_hub/constants.py +38 -53
  31. huggingface_hub/dataclasses.py +609 -0
  32. huggingface_hub/errors.py +80 -30
  33. huggingface_hub/fastai_utils.py +30 -41
  34. huggingface_hub/file_download.py +435 -351
  35. huggingface_hub/hf_api.py +2050 -1124
  36. huggingface_hub/hf_file_system.py +269 -152
  37. huggingface_hub/hub_mixin.py +43 -63
  38. huggingface_hub/inference/_client.py +347 -434
  39. huggingface_hub/inference/_common.py +133 -121
  40. huggingface_hub/inference/_generated/_async_client.py +397 -541
  41. huggingface_hub/inference/_generated/types/__init__.py +5 -1
  42. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
  43. huggingface_hub/inference/_generated/types/base.py +10 -7
  44. huggingface_hub/inference/_generated/types/chat_completion.py +59 -23
  45. huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
  46. huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
  47. huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
  48. huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
  49. huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
  50. huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
  51. huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
  52. huggingface_hub/inference/_generated/types/summarization.py +2 -2
  53. huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
  54. huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
  55. huggingface_hub/inference/_generated/types/text_generation.py +10 -10
  56. huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
  57. huggingface_hub/inference/_generated/types/token_classification.py +2 -2
  58. huggingface_hub/inference/_generated/types/translation.py +2 -2
  59. huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
  60. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
  61. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
  62. huggingface_hub/inference/_mcp/__init__.py +0 -0
  63. huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
  64. huggingface_hub/inference/_mcp/agent.py +100 -0
  65. huggingface_hub/inference/_mcp/cli.py +247 -0
  66. huggingface_hub/inference/_mcp/constants.py +81 -0
  67. huggingface_hub/inference/_mcp/mcp_client.py +395 -0
  68. huggingface_hub/inference/_mcp/types.py +45 -0
  69. huggingface_hub/inference/_mcp/utils.py +128 -0
  70. huggingface_hub/inference/_providers/__init__.py +82 -7
  71. huggingface_hub/inference/_providers/_common.py +129 -27
  72. huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
  73. huggingface_hub/inference/_providers/cerebras.py +1 -1
  74. huggingface_hub/inference/_providers/clarifai.py +13 -0
  75. huggingface_hub/inference/_providers/cohere.py +20 -3
  76. huggingface_hub/inference/_providers/fal_ai.py +183 -56
  77. huggingface_hub/inference/_providers/featherless_ai.py +38 -0
  78. huggingface_hub/inference/_providers/fireworks_ai.py +18 -0
  79. huggingface_hub/inference/_providers/groq.py +9 -0
  80. huggingface_hub/inference/_providers/hf_inference.py +69 -30
  81. huggingface_hub/inference/_providers/hyperbolic.py +4 -4
  82. huggingface_hub/inference/_providers/nebius.py +33 -5
  83. huggingface_hub/inference/_providers/novita.py +5 -5
  84. huggingface_hub/inference/_providers/nscale.py +44 -0
  85. huggingface_hub/inference/_providers/openai.py +3 -1
  86. huggingface_hub/inference/_providers/publicai.py +6 -0
  87. huggingface_hub/inference/_providers/replicate.py +31 -13
  88. huggingface_hub/inference/_providers/sambanova.py +18 -4
  89. huggingface_hub/inference/_providers/scaleway.py +28 -0
  90. huggingface_hub/inference/_providers/together.py +20 -5
  91. huggingface_hub/inference/_providers/wavespeed.py +138 -0
  92. huggingface_hub/inference/_providers/zai_org.py +17 -0
  93. huggingface_hub/lfs.py +33 -100
  94. huggingface_hub/repocard.py +34 -38
  95. huggingface_hub/repocard_data.py +57 -57
  96. huggingface_hub/serialization/__init__.py +0 -1
  97. huggingface_hub/serialization/_base.py +12 -15
  98. huggingface_hub/serialization/_dduf.py +8 -8
  99. huggingface_hub/serialization/_torch.py +69 -69
  100. huggingface_hub/utils/__init__.py +19 -8
  101. huggingface_hub/utils/_auth.py +7 -7
  102. huggingface_hub/utils/_cache_manager.py +92 -147
  103. huggingface_hub/utils/_chunk_utils.py +2 -3
  104. huggingface_hub/utils/_deprecation.py +1 -1
  105. huggingface_hub/utils/_dotenv.py +55 -0
  106. huggingface_hub/utils/_experimental.py +7 -5
  107. huggingface_hub/utils/_fixes.py +0 -10
  108. huggingface_hub/utils/_git_credential.py +5 -5
  109. huggingface_hub/utils/_headers.py +8 -30
  110. huggingface_hub/utils/_http.py +398 -239
  111. huggingface_hub/utils/_pagination.py +4 -4
  112. huggingface_hub/utils/_parsing.py +98 -0
  113. huggingface_hub/utils/_paths.py +5 -5
  114. huggingface_hub/utils/_runtime.py +61 -24
  115. huggingface_hub/utils/_safetensors.py +21 -21
  116. huggingface_hub/utils/_subprocess.py +9 -9
  117. huggingface_hub/utils/_telemetry.py +4 -4
  118. huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
  119. huggingface_hub/utils/_typing.py +25 -5
  120. huggingface_hub/utils/_validators.py +55 -74
  121. huggingface_hub/utils/_verification.py +167 -0
  122. huggingface_hub/utils/_xet.py +64 -17
  123. huggingface_hub/utils/_xet_progress_reporting.py +162 -0
  124. huggingface_hub/utils/insecure_hashlib.py +3 -5
  125. huggingface_hub/utils/logging.py +8 -11
  126. huggingface_hub/utils/tqdm.py +5 -4
  127. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -85
  128. huggingface_hub-1.1.3.dist-info/RECORD +155 -0
  129. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
  130. huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
  131. huggingface_hub/commands/delete_cache.py +0 -474
  132. huggingface_hub/commands/download.py +0 -200
  133. huggingface_hub/commands/huggingface_cli.py +0 -61
  134. huggingface_hub/commands/lfs.py +0 -200
  135. huggingface_hub/commands/repo_files.py +0 -128
  136. huggingface_hub/commands/scan_cache.py +0 -181
  137. huggingface_hub/commands/tag.py +0 -159
  138. huggingface_hub/commands/upload.py +0 -314
  139. huggingface_hub/commands/upload_large_folder.py +0 -129
  140. huggingface_hub/commands/user.py +0 -304
  141. huggingface_hub/commands/version.py +0 -37
  142. huggingface_hub/inference_api.py +0 -217
  143. huggingface_hub/keras_mixin.py +0 -500
  144. huggingface_hub/repository.py +0 -1477
  145. huggingface_hub/serialization/_tensorflow.py +0 -95
  146. huggingface_hub/utils/_hf_folder.py +0 -68
  147. huggingface_hub-0.31.0rc0.dist-info/RECORD +0 -135
  148. huggingface_hub-0.31.0rc0.dist-info/entry_points.txt +0 -6
  149. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
  150. {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  import copy
2
2
  from collections import defaultdict
3
3
  from dataclasses import dataclass
4
- from typing import Any, Dict, List, Optional, Tuple, Union
4
+ from typing import Any, Optional, Union
5
5
 
6
6
  from huggingface_hub.utils import logging, yaml_dump
7
7
 
@@ -38,7 +38,7 @@ class EvalResult:
38
38
  dataset_revision (`str`, *optional*):
39
39
  The revision (AKA Git Sha) of the dataset used in `load_dataset()`.
40
40
  Example: 5503434ddd753f426f4b38109466949a1217c2bb
41
- dataset_args (`Dict[str, Any]`, *optional*):
41
+ dataset_args (`dict[str, Any]`, *optional*):
42
42
  The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}`
43
43
  metric_name (`str`, *optional*):
44
44
  A pretty name for the metric. Example: "Test WER".
@@ -46,7 +46,7 @@ class EvalResult:
46
46
  The name of the metric configuration used in `load_metric()`.
47
47
  Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
48
48
  See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations
49
- metric_args (`Dict[str, Any]`, *optional*):
49
+ metric_args (`dict[str, Any]`, *optional*):
50
50
  The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4
51
51
  verified (`bool`, *optional*):
52
52
  Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
@@ -102,7 +102,7 @@ class EvalResult:
102
102
 
103
103
  # The arguments passed during `Metric.compute()`.
104
104
  # Example for `bleu`: max_order: 4
105
- dataset_args: Optional[Dict[str, Any]] = None
105
+ dataset_args: Optional[dict[str, Any]] = None
106
106
 
107
107
  # A pretty name for the metric.
108
108
  # Example: Test WER
@@ -115,7 +115,7 @@ class EvalResult:
115
115
 
116
116
  # The arguments passed during `Metric.compute()`.
117
117
  # Example for `bleu`: max_order: 4
118
- metric_args: Optional[Dict[str, Any]] = None
118
+ metric_args: Optional[dict[str, Any]] = None
119
119
 
120
120
  # Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
121
121
  verified: Optional[bool] = None
@@ -195,7 +195,7 @@ class CardData:
195
195
  """
196
196
  pass
197
197
 
198
- def to_yaml(self, line_break=None, original_order: Optional[List[str]] = None) -> str:
198
+ def to_yaml(self, line_break=None, original_order: Optional[list[str]] = None) -> str:
199
199
  """Dumps CardData to a YAML block for inclusion in a README.md file.
200
200
 
201
201
  Args:
@@ -246,9 +246,9 @@ class CardData:
246
246
 
247
247
 
248
248
  def _validate_eval_results(
249
- eval_results: Optional[Union[EvalResult, List[EvalResult]]],
249
+ eval_results: Optional[Union[EvalResult, list[EvalResult]]],
250
250
  model_name: Optional[str],
251
- ) -> List[EvalResult]:
251
+ ) -> list[EvalResult]:
252
252
  if eval_results is None:
253
253
  return []
254
254
  if isinstance(eval_results, EvalResult):
@@ -266,17 +266,17 @@ class ModelCardData(CardData):
266
266
  """Model Card Metadata that is used by Hugging Face Hub when included at the top of your README.md
267
267
 
268
268
  Args:
269
- base_model (`str` or `List[str]`, *optional*):
269
+ base_model (`str` or `list[str]`, *optional*):
270
270
  The identifier of the base model from which the model derives. This is applicable for example if your model is a
271
271
  fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs
272
272
  if your model derives from multiple models). Defaults to None.
273
- datasets (`Union[str, List[str]]`, *optional*):
273
+ datasets (`Union[str, list[str]]`, *optional*):
274
274
  Dataset or list of datasets that were used to train this model. Should be a dataset ID
275
275
  found on https://hf.co/datasets. Defaults to None.
276
- eval_results (`Union[List[EvalResult], EvalResult]`, *optional*):
276
+ eval_results (`Union[list[EvalResult], EvalResult]`, *optional*):
277
277
  List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided,
278
278
  `model_name` is used to as a name on PapersWithCode's leaderboards. Defaults to `None`.
279
- language (`Union[str, List[str]]`, *optional*):
279
+ language (`Union[str, list[str]]`, *optional*):
280
280
  Language of model's training data or metadata. It must be an ISO 639-1, 639-2 or
281
281
  639-3 code (two/three letters), or a special value like "code", "multilingual". Defaults to `None`.
282
282
  library_name (`str`, *optional*):
@@ -292,7 +292,7 @@ class ModelCardData(CardData):
292
292
  license_link (`str`, *optional*):
293
293
  Link to the license of this model. Defaults to None. To be used in conjunction with `license_name`.
294
294
  Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a link. In that case, use `license` instead.
295
- metrics (`List[str]`, *optional*):
295
+ metrics (`list[str]`, *optional*):
296
296
  List of metrics used to evaluate this model. Should be a metric name that can be found
297
297
  at https://hf.co/metrics. Example: 'accuracy'. Defaults to None.
298
298
  model_name (`str`, *optional*):
@@ -302,7 +302,7 @@ class ModelCardData(CardData):
302
302
  then the repo name is used as a default. Defaults to None.
303
303
  pipeline_tag (`str`, *optional*):
304
304
  The pipeline tag associated with the model. Example: "text-classification".
305
- tags (`List[str]`, *optional*):
305
+ tags (`list[str]`, *optional*):
306
306
  List of tags to add to your model that can be used when filtering on the Hugging
307
307
  Face Hub. Defaults to None.
308
308
  ignore_metadata_errors (`str`):
@@ -329,18 +329,18 @@ class ModelCardData(CardData):
329
329
  def __init__(
330
330
  self,
331
331
  *,
332
- base_model: Optional[Union[str, List[str]]] = None,
333
- datasets: Optional[Union[str, List[str]]] = None,
334
- eval_results: Optional[List[EvalResult]] = None,
335
- language: Optional[Union[str, List[str]]] = None,
332
+ base_model: Optional[Union[str, list[str]]] = None,
333
+ datasets: Optional[Union[str, list[str]]] = None,
334
+ eval_results: Optional[list[EvalResult]] = None,
335
+ language: Optional[Union[str, list[str]]] = None,
336
336
  library_name: Optional[str] = None,
337
337
  license: Optional[str] = None,
338
338
  license_name: Optional[str] = None,
339
339
  license_link: Optional[str] = None,
340
- metrics: Optional[List[str]] = None,
340
+ metrics: Optional[list[str]] = None,
341
341
  model_name: Optional[str] = None,
342
342
  pipeline_tag: Optional[str] = None,
343
- tags: Optional[List[str]] = None,
343
+ tags: Optional[list[str]] = None,
344
344
  ignore_metadata_errors: bool = False,
345
345
  **kwargs,
346
346
  ):
@@ -387,7 +387,7 @@ class ModelCardData(CardData):
387
387
  def _to_dict(self, data_dict):
388
388
  """Format the internal data dict. In this case, we convert eval results to a valid model index"""
389
389
  if self.eval_results is not None:
390
- data_dict["model-index"] = eval_results_to_model_index(self.model_name, self.eval_results)
390
+ data_dict["model-index"] = eval_results_to_model_index(self.model_name, self.eval_results) # type: ignore
391
391
  del data_dict["eval_results"], data_dict["model_name"]
392
392
 
393
393
 
@@ -395,58 +395,58 @@ class DatasetCardData(CardData):
395
395
  """Dataset Card Metadata that is used by Hugging Face Hub when included at the top of your README.md
396
396
 
397
397
  Args:
398
- language (`List[str]`, *optional*):
398
+ language (`list[str]`, *optional*):
399
399
  Language of dataset's data or metadata. It must be an ISO 639-1, 639-2 or
400
400
  639-3 code (two/three letters), or a special value like "code", "multilingual".
401
- license (`Union[str, List[str]]`, *optional*):
401
+ license (`Union[str, list[str]]`, *optional*):
402
402
  License(s) of this dataset. Example: apache-2.0 or any license from
403
403
  https://huggingface.co/docs/hub/repositories-licenses.
404
- annotations_creators (`Union[str, List[str]]`, *optional*):
404
+ annotations_creators (`Union[str, list[str]]`, *optional*):
405
405
  How the annotations for the dataset were created.
406
406
  Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'no-annotation', 'other'.
407
- language_creators (`Union[str, List[str]]`, *optional*):
407
+ language_creators (`Union[str, list[str]]`, *optional*):
408
408
  How the text-based data in the dataset was created.
409
409
  Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'other'
410
- multilinguality (`Union[str, List[str]]`, *optional*):
410
+ multilinguality (`Union[str, list[str]]`, *optional*):
411
411
  Whether the dataset is multilingual.
412
412
  Options are: 'monolingual', 'multilingual', 'translation', 'other'.
413
- size_categories (`Union[str, List[str]]`, *optional*):
413
+ size_categories (`Union[str, list[str]]`, *optional*):
414
414
  The number of examples in the dataset. Options are: 'n<1K', '1K<n<10K', '10K<n<100K',
415
415
  '100K<n<1M', '1M<n<10M', '10M<n<100M', '100M<n<1B', '1B<n<10B', '10B<n<100B', '100B<n<1T', 'n>1T', and 'other'.
416
- source_datasets (`List[str]]`, *optional*):
416
+ source_datasets (`list[str]]`, *optional*):
417
417
  Indicates whether the dataset is an original dataset or extended from another existing dataset.
418
418
  Options are: 'original' and 'extended'.
419
- task_categories (`Union[str, List[str]]`, *optional*):
419
+ task_categories (`Union[str, list[str]]`, *optional*):
420
420
  What categories of task does the dataset support?
421
- task_ids (`Union[str, List[str]]`, *optional*):
421
+ task_ids (`Union[str, list[str]]`, *optional*):
422
422
  What specific tasks does the dataset support?
423
423
  paperswithcode_id (`str`, *optional*):
424
424
  ID of the dataset on PapersWithCode.
425
425
  pretty_name (`str`, *optional*):
426
426
  A more human-readable name for the dataset. (ex. "Cats vs. Dogs")
427
- train_eval_index (`Dict`, *optional*):
427
+ train_eval_index (`dict`, *optional*):
428
428
  A dictionary that describes the necessary spec for doing evaluation on the Hub.
429
429
  If not provided, it will be gathered from the 'train-eval-index' key of the kwargs.
430
- config_names (`Union[str, List[str]]`, *optional*):
430
+ config_names (`Union[str, list[str]]`, *optional*):
431
431
  A list of the available dataset configs for the dataset.
432
432
  """
433
433
 
434
434
  def __init__(
435
435
  self,
436
436
  *,
437
- language: Optional[Union[str, List[str]]] = None,
438
- license: Optional[Union[str, List[str]]] = None,
439
- annotations_creators: Optional[Union[str, List[str]]] = None,
440
- language_creators: Optional[Union[str, List[str]]] = None,
441
- multilinguality: Optional[Union[str, List[str]]] = None,
442
- size_categories: Optional[Union[str, List[str]]] = None,
443
- source_datasets: Optional[List[str]] = None,
444
- task_categories: Optional[Union[str, List[str]]] = None,
445
- task_ids: Optional[Union[str, List[str]]] = None,
437
+ language: Optional[Union[str, list[str]]] = None,
438
+ license: Optional[Union[str, list[str]]] = None,
439
+ annotations_creators: Optional[Union[str, list[str]]] = None,
440
+ language_creators: Optional[Union[str, list[str]]] = None,
441
+ multilinguality: Optional[Union[str, list[str]]] = None,
442
+ size_categories: Optional[Union[str, list[str]]] = None,
443
+ source_datasets: Optional[list[str]] = None,
444
+ task_categories: Optional[Union[str, list[str]]] = None,
445
+ task_ids: Optional[Union[str, list[str]]] = None,
446
446
  paperswithcode_id: Optional[str] = None,
447
447
  pretty_name: Optional[str] = None,
448
- train_eval_index: Optional[Dict] = None,
449
- config_names: Optional[Union[str, List[str]]] = None,
448
+ train_eval_index: Optional[dict] = None,
449
+ config_names: Optional[Union[str, list[str]]] = None,
450
450
  ignore_metadata_errors: bool = False,
451
451
  **kwargs,
452
452
  ):
@@ -495,11 +495,11 @@ class SpaceCardData(CardData):
495
495
  https://huggingface.co/docs/hub/repositories-licenses.
496
496
  duplicated_from (`str`, *optional*)
497
497
  ID of the original Space if this is a duplicated Space.
498
- models (List[`str`], *optional*)
498
+ models (list[`str`], *optional*)
499
499
  List of models related to this Space. Should be a dataset ID found on https://hf.co/models.
500
- datasets (`List[str]`, *optional*)
500
+ datasets (`list[str]`, *optional*)
501
501
  List of datasets related to this Space. Should be a dataset ID found on https://hf.co/datasets.
502
- tags (`List[str]`, *optional*)
502
+ tags (`list[str]`, *optional*)
503
503
  List of tags to add to your Space that can be used when filtering on the Hub.
504
504
  ignore_metadata_errors (`str`):
505
505
  If True, errors while parsing the metadata section will be ignored. Some information might be lost during
@@ -532,9 +532,9 @@ class SpaceCardData(CardData):
532
532
  app_port: Optional[int] = None,
533
533
  license: Optional[str] = None,
534
534
  duplicated_from: Optional[str] = None,
535
- models: Optional[List[str]] = None,
536
- datasets: Optional[List[str]] = None,
537
- tags: Optional[List[str]] = None,
535
+ models: Optional[list[str]] = None,
536
+ datasets: Optional[list[str]] = None,
537
+ tags: Optional[list[str]] = None,
538
538
  ignore_metadata_errors: bool = False,
539
539
  **kwargs,
540
540
  ):
@@ -552,14 +552,14 @@ class SpaceCardData(CardData):
552
552
  super().__init__(**kwargs)
553
553
 
554
554
 
555
- def model_index_to_eval_results(model_index: List[Dict[str, Any]]) -> Tuple[str, List[EvalResult]]:
555
+ def model_index_to_eval_results(model_index: list[dict[str, Any]]) -> tuple[str, list[EvalResult]]:
556
556
  """Takes in a model index and returns the model name and a list of `huggingface_hub.EvalResult` objects.
557
557
 
558
558
  A detailed spec of the model index can be found here:
559
559
  https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
560
560
 
561
561
  Args:
562
- model_index (`List[Dict[str, Any]]`):
562
+ model_index (`list[dict[str, Any]]`):
563
563
  A model index data structure, likely coming from a README.md file on the
564
564
  Hugging Face Hub.
565
565
 
@@ -567,7 +567,7 @@ def model_index_to_eval_results(model_index: List[Dict[str, Any]]) -> Tuple[str,
567
567
  model_name (`str`):
568
568
  The name of the model as found in the model index. This is used as the
569
569
  identifier for the model on leaderboards like PapersWithCode.
570
- eval_results (`List[EvalResult]`):
570
+ eval_results (`list[EvalResult]`):
571
571
  A list of `huggingface_hub.EvalResult` objects containing the metrics
572
572
  reported in the provided model_index.
573
573
 
@@ -668,7 +668,7 @@ def _remove_none(obj):
668
668
  return obj
669
669
 
670
670
 
671
- def eval_results_to_model_index(model_name: str, eval_results: List[EvalResult]) -> List[Dict[str, Any]]:
671
+ def eval_results_to_model_index(model_name: str, eval_results: list[EvalResult]) -> list[dict[str, Any]]:
672
672
  """Takes in given model name and list of `huggingface_hub.EvalResult` and returns a
673
673
  valid model-index that will be compatible with the format expected by the
674
674
  Hugging Face Hub.
@@ -677,12 +677,12 @@ def eval_results_to_model_index(model_name: str, eval_results: List[EvalResult])
677
677
  model_name (`str`):
678
678
  Name of the model (ex. "my-cool-model"). This is used as the identifier
679
679
  for the model on leaderboards like PapersWithCode.
680
- eval_results (`List[EvalResult]`):
680
+ eval_results (`list[EvalResult]`):
681
681
  List of `huggingface_hub.EvalResult` objects containing the metrics to be
682
682
  reported in the model-index.
683
683
 
684
684
  Returns:
685
- model_index (`List[Dict[str, Any]]`): The eval_results converted to a model-index.
685
+ model_index (`list[dict[str, Any]]`): The eval_results converted to a model-index.
686
686
 
687
687
  Example:
688
688
  ```python
@@ -705,7 +705,7 @@ def eval_results_to_model_index(model_name: str, eval_results: List[EvalResult])
705
705
 
706
706
  # Metrics are reported on a unique task-and-dataset basis.
707
707
  # Here, we make a map of those pairs and the associated EvalResults.
708
- task_and_ds_types_map: Dict[Any, List[EvalResult]] = defaultdict(list)
708
+ task_and_ds_types_map: dict[Any, list[EvalResult]] = defaultdict(list)
709
709
  for eval_result in eval_results:
710
710
  task_and_ds_types_map[eval_result.unique_identifier].append(eval_result)
711
711
 
@@ -760,7 +760,7 @@ def eval_results_to_model_index(model_name: str, eval_results: List[EvalResult])
760
760
  return _remove_none(model_index)
761
761
 
762
762
 
763
- def _to_unique_list(tags: Optional[List[str]]) -> Optional[List[str]]:
763
+ def _to_unique_list(tags: Optional[list[str]]) -> Optional[list[str]]:
764
764
  if tags is None:
765
765
  return tags
766
766
  unique_tags = [] # make tags unique + keep order explicitly
@@ -15,7 +15,6 @@
15
15
  """Contains helpers to serialize tensors."""
16
16
 
17
17
  from ._base import StateDictSplit, split_state_dict_into_shards_factory
18
- from ._tensorflow import get_tf_storage_size, split_tf_state_dict_into_shards
19
18
  from ._torch import (
20
19
  get_torch_storage_id,
21
20
  get_torch_storage_size,
@@ -14,7 +14,7 @@
14
14
  """Contains helpers to split tensors into shards."""
15
15
 
16
16
  from dataclasses import dataclass, field
17
- from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
17
+ from typing import Any, Callable, Optional, TypeVar, Union
18
18
 
19
19
  from .. import logging
20
20
 
@@ -38,16 +38,16 @@ logger = logging.get_logger(__file__)
38
38
  @dataclass
39
39
  class StateDictSplit:
40
40
  is_sharded: bool = field(init=False)
41
- metadata: Dict[str, Any]
42
- filename_to_tensors: Dict[str, List[str]]
43
- tensor_to_filename: Dict[str, str]
41
+ metadata: dict[str, Any]
42
+ filename_to_tensors: dict[str, list[str]]
43
+ tensor_to_filename: dict[str, str]
44
44
 
45
45
  def __post_init__(self):
46
46
  self.is_sharded = len(self.filename_to_tensors) > 1
47
47
 
48
48
 
49
49
  def split_state_dict_into_shards_factory(
50
- state_dict: Dict[str, TensorT],
50
+ state_dict: dict[str, TensorT],
51
51
  *,
52
52
  get_storage_size: TensorSizeFn_T,
53
53
  filename_pattern: str,
@@ -62,15 +62,12 @@ def split_state_dict_into_shards_factory(
62
62
  have tensors of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not
63
63
  [6+2+2GB], [6+2GB], [6GB].
64
64
 
65
- <Tip warning={true}>
66
-
67
- If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
68
- size greater than `max_shard_size`.
69
-
70
- </Tip>
65
+ > [!WARNING]
66
+ > If one of the model's tensor is bigger than `max_shard_size`, it will end up in its own shard which will have a
67
+ > size greater than `max_shard_size`.
71
68
 
72
69
  Args:
73
- state_dict (`Dict[str, Tensor]`):
70
+ state_dict (`dict[str, Tensor]`):
74
71
  The state dictionary to save.
75
72
  get_storage_size (`Callable[[Tensor], int]`):
76
73
  A function that returns the size of a tensor when saved on disk in bytes.
@@ -87,10 +84,10 @@ def split_state_dict_into_shards_factory(
87
84
  Returns:
88
85
  [`StateDictSplit`]: A `StateDictSplit` object containing the shards and the index to retrieve them.
89
86
  """
90
- storage_id_to_tensors: Dict[Any, List[str]] = {}
87
+ storage_id_to_tensors: dict[Any, list[str]] = {}
91
88
 
92
- shard_list: List[Dict[str, TensorT]] = []
93
- current_shard: Dict[str, TensorT] = {}
89
+ shard_list: list[dict[str, TensorT]] = []
90
+ current_shard: dict[str, TensorT] = {}
94
91
  current_shard_size = 0
95
92
  total_size = 0
96
93
 
@@ -7,7 +7,7 @@ import zipfile
7
7
  from contextlib import contextmanager
8
8
  from dataclasses import dataclass, field
9
9
  from pathlib import Path
10
- from typing import Any, Dict, Generator, Iterable, Tuple, Union
10
+ from typing import Any, Generator, Iterable, Union
11
11
 
12
12
  from ..errors import DDUFCorruptedFileError, DDUFExportError, DDUFInvalidEntryNameError
13
13
 
@@ -87,7 +87,7 @@ class DDUFEntry:
87
87
  return f.read(self.length).decode(encoding=encoding)
88
88
 
89
89
 
90
- def read_dduf_file(dduf_path: Union[os.PathLike, str]) -> Dict[str, DDUFEntry]:
90
+ def read_dduf_file(dduf_path: Union[os.PathLike, str]) -> dict[str, DDUFEntry]:
91
91
  """
92
92
  Read a DDUF file and return a dictionary of entries.
93
93
 
@@ -98,7 +98,7 @@ def read_dduf_file(dduf_path: Union[os.PathLike, str]) -> Dict[str, DDUFEntry]:
98
98
  The path to the DDUF file to read.
99
99
 
100
100
  Returns:
101
- `Dict[str, DDUFEntry]`:
101
+ `dict[str, DDUFEntry]`:
102
102
  A dictionary of [`DDUFEntry`] indexed by filename.
103
103
 
104
104
  Raises:
@@ -157,7 +157,7 @@ def read_dduf_file(dduf_path: Union[os.PathLike, str]) -> Dict[str, DDUFEntry]:
157
157
 
158
158
 
159
159
  def export_entries_as_dduf(
160
- dduf_path: Union[str, os.PathLike], entries: Iterable[Tuple[str, Union[str, Path, bytes]]]
160
+ dduf_path: Union[str, os.PathLike], entries: Iterable[tuple[str, Union[str, Path, bytes]]]
161
161
  ) -> None:
162
162
  """Write a DDUF file from an iterable of entries.
163
163
 
@@ -167,7 +167,7 @@ def export_entries_as_dduf(
167
167
  Args:
168
168
  dduf_path (`str` or `os.PathLike`):
169
169
  The path to the DDUF file to write.
170
- entries (`Iterable[Tuple[str, Union[str, Path, bytes]]]`):
170
+ entries (`Iterable[tuple[str, Union[str, Path, bytes]]]`):
171
171
  An iterable of entries to write in the DDUF file. Each entry is a tuple with the filename and the content.
172
172
  The filename should be the path to the file in the DDUF archive.
173
173
  The content can be a string or a pathlib.Path representing a path to a file on the local disk or directly the content as bytes.
@@ -201,8 +201,8 @@ def export_entries_as_dduf(
201
201
  >>> pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
202
202
  ... # ... do some work with the pipeline
203
203
 
204
- >>> def as_entries(pipe: DiffusionPipeline) -> Generator[Tuple[str, bytes], None, None]:
205
- ... # Build an generator that yields the entries to add to the DDUF file.
204
+ >>> def as_entries(pipe: DiffusionPipeline) -> Generator[tuple[str, bytes], None, None]:
205
+ ... # Build a generator that yields the entries to add to the DDUF file.
206
206
  ... # The first element of the tuple is the filename in the DDUF archive (must use UNIX separator!). The second element is the content of the file.
207
207
  ... # Entries will be evaluated lazily when the DDUF file is created (only 1 entry is loaded in memory at a time)
208
208
  ... yield "vae/config.json", pipe.vae.to_json_string().encode()
@@ -267,7 +267,7 @@ def export_folder_as_dduf(dduf_path: Union[str, os.PathLike], folder_path: Union
267
267
  """
268
268
  folder_path = Path(folder_path)
269
269
 
270
- def _iterate_over_folder() -> Iterable[Tuple[str, Path]]:
270
+ def _iterate_over_folder() -> Iterable[tuple[str, Path]]:
271
271
  for path in Path(folder_path).glob("**/*"):
272
272
  if not path.is_file():
273
273
  continue