huggingface-hub 0.21.2__py3-none-any.whl → 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of huggingface-hub might be problematic. Click here for more details.

Files changed (97) hide show
  1. huggingface_hub/__init__.py +217 -1
  2. huggingface_hub/_commit_api.py +14 -15
  3. huggingface_hub/_inference_endpoints.py +12 -11
  4. huggingface_hub/_login.py +1 -0
  5. huggingface_hub/_multi_commits.py +1 -0
  6. huggingface_hub/_snapshot_download.py +9 -1
  7. huggingface_hub/_tensorboard_logger.py +1 -0
  8. huggingface_hub/_webhooks_payload.py +1 -0
  9. huggingface_hub/_webhooks_server.py +1 -0
  10. huggingface_hub/commands/_cli_utils.py +1 -0
  11. huggingface_hub/commands/delete_cache.py +1 -0
  12. huggingface_hub/commands/download.py +1 -0
  13. huggingface_hub/commands/env.py +1 -0
  14. huggingface_hub/commands/scan_cache.py +1 -0
  15. huggingface_hub/commands/upload.py +1 -0
  16. huggingface_hub/community.py +1 -0
  17. huggingface_hub/constants.py +3 -1
  18. huggingface_hub/errors.py +38 -0
  19. huggingface_hub/file_download.py +102 -95
  20. huggingface_hub/hf_api.py +47 -35
  21. huggingface_hub/hf_file_system.py +77 -3
  22. huggingface_hub/hub_mixin.py +230 -61
  23. huggingface_hub/inference/_client.py +554 -239
  24. huggingface_hub/inference/_common.py +195 -41
  25. huggingface_hub/inference/_generated/_async_client.py +558 -239
  26. huggingface_hub/inference/_generated/types/__init__.py +115 -0
  27. huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
  28. huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
  29. huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +116 -0
  30. huggingface_hub/inference/_generated/types/base.py +149 -0
  31. huggingface_hub/inference/_generated/types/chat_completion.py +106 -0
  32. huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
  33. huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
  34. huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
  35. huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
  36. huggingface_hub/inference/_generated/types/image_classification.py +43 -0
  37. huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
  38. huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
  39. huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
  40. huggingface_hub/inference/_generated/types/object_detection.py +55 -0
  41. huggingface_hub/inference/_generated/types/question_answering.py +77 -0
  42. huggingface_hub/inference/_generated/types/sentence_similarity.py +28 -0
  43. huggingface_hub/inference/_generated/types/summarization.py +46 -0
  44. huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
  45. huggingface_hub/inference/_generated/types/text2text_generation.py +45 -0
  46. huggingface_hub/inference/_generated/types/text_classification.py +43 -0
  47. huggingface_hub/inference/_generated/types/text_generation.py +161 -0
  48. huggingface_hub/inference/_generated/types/text_to_audio.py +105 -0
  49. huggingface_hub/inference/_generated/types/text_to_image.py +57 -0
  50. huggingface_hub/inference/_generated/types/token_classification.py +53 -0
  51. huggingface_hub/inference/_generated/types/translation.py +46 -0
  52. huggingface_hub/inference/_generated/types/video_classification.py +47 -0
  53. huggingface_hub/inference/_generated/types/visual_question_answering.py +53 -0
  54. huggingface_hub/inference/_generated/types/zero_shot_classification.py +56 -0
  55. huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +51 -0
  56. huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +55 -0
  57. huggingface_hub/inference/_templating.py +105 -0
  58. huggingface_hub/inference/_types.py +4 -152
  59. huggingface_hub/keras_mixin.py +39 -17
  60. huggingface_hub/lfs.py +20 -8
  61. huggingface_hub/repocard.py +11 -3
  62. huggingface_hub/repocard_data.py +12 -2
  63. huggingface_hub/serialization/__init__.py +1 -0
  64. huggingface_hub/serialization/_base.py +1 -0
  65. huggingface_hub/serialization/_numpy.py +1 -0
  66. huggingface_hub/serialization/_tensorflow.py +1 -0
  67. huggingface_hub/serialization/_torch.py +1 -0
  68. huggingface_hub/utils/__init__.py +4 -1
  69. huggingface_hub/utils/_cache_manager.py +7 -0
  70. huggingface_hub/utils/_chunk_utils.py +1 -0
  71. huggingface_hub/utils/_datetime.py +1 -0
  72. huggingface_hub/utils/_errors.py +10 -1
  73. huggingface_hub/utils/_experimental.py +1 -0
  74. huggingface_hub/utils/_fixes.py +19 -3
  75. huggingface_hub/utils/_git_credential.py +1 -0
  76. huggingface_hub/utils/_headers.py +10 -3
  77. huggingface_hub/utils/_hf_folder.py +1 -0
  78. huggingface_hub/utils/_http.py +1 -0
  79. huggingface_hub/utils/_pagination.py +1 -0
  80. huggingface_hub/utils/_paths.py +1 -0
  81. huggingface_hub/utils/_runtime.py +22 -0
  82. huggingface_hub/utils/_subprocess.py +1 -0
  83. huggingface_hub/utils/_token.py +1 -0
  84. huggingface_hub/utils/_typing.py +29 -1
  85. huggingface_hub/utils/_validators.py +1 -0
  86. huggingface_hub/utils/endpoint_helpers.py +1 -0
  87. huggingface_hub/utils/logging.py +1 -1
  88. huggingface_hub/utils/sha.py +1 -0
  89. huggingface_hub/utils/tqdm.py +1 -0
  90. {huggingface_hub-0.21.2.dist-info → huggingface_hub-0.22.0.dist-info}/METADATA +14 -15
  91. huggingface_hub-0.22.0.dist-info/RECORD +113 -0
  92. {huggingface_hub-0.21.2.dist-info → huggingface_hub-0.22.0.dist-info}/WHEEL +1 -1
  93. huggingface_hub/inference/_text_generation.py +0 -551
  94. huggingface_hub-0.21.2.dist-info/RECORD +0 -81
  95. {huggingface_hub-0.21.2.dist-info → huggingface_hub-0.22.0.dist-info}/LICENSE +0 -0
  96. {huggingface_hub-0.21.2.dist-info → huggingface_hub-0.22.0.dist-info}/entry_points.txt +0 -0
  97. {huggingface_hub-0.21.2.dist-info → huggingface_hub-0.22.0.dist-info}/top_level.txt +0 -0
@@ -46,7 +46,7 @@ import sys
46
46
  from typing import TYPE_CHECKING
47
47
 
48
48
 
49
- __version__ = "0.21.2"
49
+ __version__ = "0.22.0"
50
50
 
51
51
  # Alphabetical order of definitions is ensured in tests
52
52
  # WARNING: any comment added in this dictionary definition will be lost when
@@ -259,6 +259,114 @@ _SUBMOD_ATTRS = {
259
259
  "inference._generated._async_client": [
260
260
  "AsyncInferenceClient",
261
261
  ],
262
+ "inference._generated.types": [
263
+ "AudioClassificationInput",
264
+ "AudioClassificationOutputElement",
265
+ "AudioClassificationParameters",
266
+ "AudioToAudioInput",
267
+ "AudioToAudioOutputElement",
268
+ "AutomaticSpeechRecognitionGenerationParameters",
269
+ "AutomaticSpeechRecognitionInput",
270
+ "AutomaticSpeechRecognitionOutput",
271
+ "AutomaticSpeechRecognitionOutputChunk",
272
+ "AutomaticSpeechRecognitionParameters",
273
+ "ChatCompletionInput",
274
+ "ChatCompletionInputMessage",
275
+ "ChatCompletionOutput",
276
+ "ChatCompletionOutputChoice",
277
+ "ChatCompletionOutputChoiceMessage",
278
+ "ChatCompletionStreamOutput",
279
+ "ChatCompletionStreamOutputChoice",
280
+ "ChatCompletionStreamOutputDelta",
281
+ "DepthEstimationInput",
282
+ "DepthEstimationOutput",
283
+ "DocumentQuestionAnsweringInput",
284
+ "DocumentQuestionAnsweringInputData",
285
+ "DocumentQuestionAnsweringOutputElement",
286
+ "DocumentQuestionAnsweringParameters",
287
+ "FeatureExtractionInput",
288
+ "FillMaskInput",
289
+ "FillMaskOutputElement",
290
+ "FillMaskParameters",
291
+ "ImageClassificationInput",
292
+ "ImageClassificationOutputElement",
293
+ "ImageClassificationParameters",
294
+ "ImageSegmentationInput",
295
+ "ImageSegmentationOutputElement",
296
+ "ImageSegmentationParameters",
297
+ "ImageToImageInput",
298
+ "ImageToImageOutput",
299
+ "ImageToImageParameters",
300
+ "ImageToImageTargetSize",
301
+ "ImageToTextGenerationParameters",
302
+ "ImageToTextInput",
303
+ "ImageToTextOutput",
304
+ "ImageToTextParameters",
305
+ "ObjectDetectionBoundingBox",
306
+ "ObjectDetectionInput",
307
+ "ObjectDetectionOutputElement",
308
+ "ObjectDetectionParameters",
309
+ "QuestionAnsweringInput",
310
+ "QuestionAnsweringInputData",
311
+ "QuestionAnsweringOutputElement",
312
+ "QuestionAnsweringParameters",
313
+ "SentenceSimilarityInput",
314
+ "SentenceSimilarityInputData",
315
+ "SummarizationGenerationParameters",
316
+ "SummarizationInput",
317
+ "SummarizationOutput",
318
+ "TableQuestionAnsweringInput",
319
+ "TableQuestionAnsweringInputData",
320
+ "TableQuestionAnsweringOutputElement",
321
+ "Text2TextGenerationInput",
322
+ "Text2TextGenerationOutput",
323
+ "Text2TextGenerationParameters",
324
+ "TextClassificationInput",
325
+ "TextClassificationOutputElement",
326
+ "TextClassificationParameters",
327
+ "TextGenerationInput",
328
+ "TextGenerationOutput",
329
+ "TextGenerationOutputDetails",
330
+ "TextGenerationOutputSequenceDetails",
331
+ "TextGenerationOutputToken",
332
+ "TextGenerationParameters",
333
+ "TextGenerationPrefillToken",
334
+ "TextGenerationStreamDetails",
335
+ "TextGenerationStreamOutput",
336
+ "TextToAudioGenerationParameters",
337
+ "TextToAudioInput",
338
+ "TextToAudioOutput",
339
+ "TextToAudioParameters",
340
+ "TextToImageInput",
341
+ "TextToImageOutput",
342
+ "TextToImageParameters",
343
+ "TextToImageTargetSize",
344
+ "TokenClassificationInput",
345
+ "TokenClassificationOutputElement",
346
+ "TokenClassificationParameters",
347
+ "TranslationGenerationParameters",
348
+ "TranslationInput",
349
+ "TranslationOutput",
350
+ "VideoClassificationInput",
351
+ "VideoClassificationOutputElement",
352
+ "VideoClassificationParameters",
353
+ "VisualQuestionAnsweringInput",
354
+ "VisualQuestionAnsweringInputData",
355
+ "VisualQuestionAnsweringOutputElement",
356
+ "VisualQuestionAnsweringParameters",
357
+ "ZeroShotClassificationInput",
358
+ "ZeroShotClassificationInputData",
359
+ "ZeroShotClassificationOutputElement",
360
+ "ZeroShotClassificationParameters",
361
+ "ZeroShotImageClassificationInput",
362
+ "ZeroShotImageClassificationInputData",
363
+ "ZeroShotImageClassificationOutputElement",
364
+ "ZeroShotImageClassificationParameters",
365
+ "ZeroShotObjectDetectionBoundingBox",
366
+ "ZeroShotObjectDetectionInput",
367
+ "ZeroShotObjectDetectionInputData",
368
+ "ZeroShotObjectDetectionOutputElement",
369
+ ],
262
370
  "inference_api": [
263
371
  "InferenceApi",
264
372
  ],
@@ -613,6 +721,114 @@ if TYPE_CHECKING: # pragma: no cover
613
721
  InferenceTimeoutError, # noqa: F401
614
722
  )
615
723
  from .inference._generated._async_client import AsyncInferenceClient # noqa: F401
724
+ from .inference._generated.types import (
725
+ AudioClassificationInput, # noqa: F401
726
+ AudioClassificationOutputElement, # noqa: F401
727
+ AudioClassificationParameters, # noqa: F401
728
+ AudioToAudioInput, # noqa: F401
729
+ AudioToAudioOutputElement, # noqa: F401
730
+ AutomaticSpeechRecognitionGenerationParameters, # noqa: F401
731
+ AutomaticSpeechRecognitionInput, # noqa: F401
732
+ AutomaticSpeechRecognitionOutput, # noqa: F401
733
+ AutomaticSpeechRecognitionOutputChunk, # noqa: F401
734
+ AutomaticSpeechRecognitionParameters, # noqa: F401
735
+ ChatCompletionInput, # noqa: F401
736
+ ChatCompletionInputMessage, # noqa: F401
737
+ ChatCompletionOutput, # noqa: F401
738
+ ChatCompletionOutputChoice, # noqa: F401
739
+ ChatCompletionOutputChoiceMessage, # noqa: F401
740
+ ChatCompletionStreamOutput, # noqa: F401
741
+ ChatCompletionStreamOutputChoice, # noqa: F401
742
+ ChatCompletionStreamOutputDelta, # noqa: F401
743
+ DepthEstimationInput, # noqa: F401
744
+ DepthEstimationOutput, # noqa: F401
745
+ DocumentQuestionAnsweringInput, # noqa: F401
746
+ DocumentQuestionAnsweringInputData, # noqa: F401
747
+ DocumentQuestionAnsweringOutputElement, # noqa: F401
748
+ DocumentQuestionAnsweringParameters, # noqa: F401
749
+ FeatureExtractionInput, # noqa: F401
750
+ FillMaskInput, # noqa: F401
751
+ FillMaskOutputElement, # noqa: F401
752
+ FillMaskParameters, # noqa: F401
753
+ ImageClassificationInput, # noqa: F401
754
+ ImageClassificationOutputElement, # noqa: F401
755
+ ImageClassificationParameters, # noqa: F401
756
+ ImageSegmentationInput, # noqa: F401
757
+ ImageSegmentationOutputElement, # noqa: F401
758
+ ImageSegmentationParameters, # noqa: F401
759
+ ImageToImageInput, # noqa: F401
760
+ ImageToImageOutput, # noqa: F401
761
+ ImageToImageParameters, # noqa: F401
762
+ ImageToImageTargetSize, # noqa: F401
763
+ ImageToTextGenerationParameters, # noqa: F401
764
+ ImageToTextInput, # noqa: F401
765
+ ImageToTextOutput, # noqa: F401
766
+ ImageToTextParameters, # noqa: F401
767
+ ObjectDetectionBoundingBox, # noqa: F401
768
+ ObjectDetectionInput, # noqa: F401
769
+ ObjectDetectionOutputElement, # noqa: F401
770
+ ObjectDetectionParameters, # noqa: F401
771
+ QuestionAnsweringInput, # noqa: F401
772
+ QuestionAnsweringInputData, # noqa: F401
773
+ QuestionAnsweringOutputElement, # noqa: F401
774
+ QuestionAnsweringParameters, # noqa: F401
775
+ SentenceSimilarityInput, # noqa: F401
776
+ SentenceSimilarityInputData, # noqa: F401
777
+ SummarizationGenerationParameters, # noqa: F401
778
+ SummarizationInput, # noqa: F401
779
+ SummarizationOutput, # noqa: F401
780
+ TableQuestionAnsweringInput, # noqa: F401
781
+ TableQuestionAnsweringInputData, # noqa: F401
782
+ TableQuestionAnsweringOutputElement, # noqa: F401
783
+ Text2TextGenerationInput, # noqa: F401
784
+ Text2TextGenerationOutput, # noqa: F401
785
+ Text2TextGenerationParameters, # noqa: F401
786
+ TextClassificationInput, # noqa: F401
787
+ TextClassificationOutputElement, # noqa: F401
788
+ TextClassificationParameters, # noqa: F401
789
+ TextGenerationInput, # noqa: F401
790
+ TextGenerationOutput, # noqa: F401
791
+ TextGenerationOutputDetails, # noqa: F401
792
+ TextGenerationOutputSequenceDetails, # noqa: F401
793
+ TextGenerationOutputToken, # noqa: F401
794
+ TextGenerationParameters, # noqa: F401
795
+ TextGenerationPrefillToken, # noqa: F401
796
+ TextGenerationStreamDetails, # noqa: F401
797
+ TextGenerationStreamOutput, # noqa: F401
798
+ TextToAudioGenerationParameters, # noqa: F401
799
+ TextToAudioInput, # noqa: F401
800
+ TextToAudioOutput, # noqa: F401
801
+ TextToAudioParameters, # noqa: F401
802
+ TextToImageInput, # noqa: F401
803
+ TextToImageOutput, # noqa: F401
804
+ TextToImageParameters, # noqa: F401
805
+ TextToImageTargetSize, # noqa: F401
806
+ TokenClassificationInput, # noqa: F401
807
+ TokenClassificationOutputElement, # noqa: F401
808
+ TokenClassificationParameters, # noqa: F401
809
+ TranslationGenerationParameters, # noqa: F401
810
+ TranslationInput, # noqa: F401
811
+ TranslationOutput, # noqa: F401
812
+ VideoClassificationInput, # noqa: F401
813
+ VideoClassificationOutputElement, # noqa: F401
814
+ VideoClassificationParameters, # noqa: F401
815
+ VisualQuestionAnsweringInput, # noqa: F401
816
+ VisualQuestionAnsweringInputData, # noqa: F401
817
+ VisualQuestionAnsweringOutputElement, # noqa: F401
818
+ VisualQuestionAnsweringParameters, # noqa: F401
819
+ ZeroShotClassificationInput, # noqa: F401
820
+ ZeroShotClassificationInputData, # noqa: F401
821
+ ZeroShotClassificationOutputElement, # noqa: F401
822
+ ZeroShotClassificationParameters, # noqa: F401
823
+ ZeroShotImageClassificationInput, # noqa: F401
824
+ ZeroShotImageClassificationInputData, # noqa: F401
825
+ ZeroShotImageClassificationOutputElement, # noqa: F401
826
+ ZeroShotImageClassificationParameters, # noqa: F401
827
+ ZeroShotObjectDetectionBoundingBox, # noqa: F401
828
+ ZeroShotObjectDetectionInput, # noqa: F401
829
+ ZeroShotObjectDetectionInputData, # noqa: F401
830
+ ZeroShotObjectDetectionOutputElement, # noqa: F401
831
+ )
616
832
  from .inference_api import InferenceApi # noqa: F401
617
833
  from .keras_mixin import (
618
834
  KerasModelHubMixin, # noqa: F401
@@ -1,6 +1,7 @@
1
1
  """
2
2
  Type definitions and utilities for the `create_commit` API
3
3
  """
4
+
4
5
  import base64
5
6
  import io
6
7
  import os
@@ -21,7 +22,6 @@ from .file_download import hf_hub_url
21
22
  from .lfs import UploadInfo, lfs_upload, post_lfs_batch_info
22
23
  from .utils import (
23
24
  EntryNotFoundError,
24
- build_hf_headers,
25
25
  chunk_iterable,
26
26
  hf_raise_for_status,
27
27
  logging,
@@ -318,7 +318,7 @@ def _upload_lfs_files(
318
318
  additions: List[CommitOperationAdd],
319
319
  repo_type: str,
320
320
  repo_id: str,
321
- token: Optional[str],
321
+ headers: Dict[str, str],
322
322
  endpoint: Optional[str] = None,
323
323
  num_threads: int = 5,
324
324
  revision: Optional[str] = None,
@@ -337,8 +337,8 @@ def _upload_lfs_files(
337
337
  repo_id (`str`):
338
338
  A namespace (user or an organization) and a repo name separated
339
339
  by a `/`.
340
- token (`str`, *optional*):
341
- An authentication token ( See https://huggingface.co/settings/tokens )
340
+ headers (`Dict[str, str]`):
341
+ Headers to use for the request, including authorization headers and user agent.
342
342
  num_threads (`int`, *optional*):
343
343
  The number of concurrent threads to use when uploading. Defaults to 5.
344
344
  revision (`str`, *optional*):
@@ -359,11 +359,12 @@ def _upload_lfs_files(
359
359
  for chunk in chunk_iterable(additions, chunk_size=256):
360
360
  batch_actions_chunk, batch_errors_chunk = post_lfs_batch_info(
361
361
  upload_infos=[op.upload_info for op in chunk],
362
- token=token,
363
362
  repo_id=repo_id,
364
363
  repo_type=repo_type,
365
364
  revision=revision,
366
365
  endpoint=endpoint,
366
+ headers=headers,
367
+ token=None, # already passed in 'headers'
367
368
  )
368
369
 
369
370
  # If at least 1 error, we do not retrieve information for other chunks
@@ -398,7 +399,7 @@ def _upload_lfs_files(
398
399
  def _wrapped_lfs_upload(batch_action) -> None:
399
400
  try:
400
401
  operation = oid2addop[batch_action["oid"]]
401
- lfs_upload(operation=operation, lfs_batch_action=batch_action, token=token)
402
+ lfs_upload(operation=operation, lfs_batch_action=batch_action, headers=headers)
402
403
  except Exception as exc:
403
404
  raise RuntimeError(f"Error while uploading '{operation.path_in_repo}' to the Hub.") from exc
404
405
 
@@ -442,7 +443,7 @@ def _fetch_upload_modes(
442
443
  additions: Iterable[CommitOperationAdd],
443
444
  repo_type: str,
444
445
  repo_id: str,
445
- token: Optional[str],
446
+ headers: Dict[str, str],
446
447
  revision: str,
447
448
  endpoint: Optional[str] = None,
448
449
  create_pr: bool = False,
@@ -461,8 +462,8 @@ def _fetch_upload_modes(
461
462
  repo_id (`str`):
462
463
  A namespace (user or an organization) and a repo name separated
463
464
  by a `/`.
464
- token (`str`, *optional*):
465
- An authentication token ( See https://huggingface.co/settings/tokens )
465
+ headers (`Dict[str, str]`):
466
+ Headers to use for the request, including authorization headers and user agent.
466
467
  revision (`str`):
467
468
  The git revision to upload the files to. Can be any valid git revision.
468
469
  gitignore_content (`str`, *optional*):
@@ -477,7 +478,6 @@ def _fetch_upload_modes(
477
478
  If the Hub API response is improperly formatted.
478
479
  """
479
480
  endpoint = endpoint if endpoint is not None else ENDPOINT
480
- headers = build_hf_headers(token=token)
481
481
 
482
482
  # Fetch upload mode (LFS or regular) chunk by chunk.
483
483
  upload_modes: Dict[str, UploadMode] = {}
@@ -526,7 +526,7 @@ def _fetch_files_to_copy(
526
526
  copies: Iterable[CommitOperationCopy],
527
527
  repo_type: str,
528
528
  repo_id: str,
529
- token: Optional[str],
529
+ headers: Dict[str, str],
530
530
  revision: str,
531
531
  endpoint: Optional[str] = None,
532
532
  ) -> Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]]:
@@ -545,8 +545,8 @@ def _fetch_files_to_copy(
545
545
  repo_id (`str`):
546
546
  A namespace (user or an organization) and a repo name separated
547
547
  by a `/`.
548
- token (`str`, *optional*):
549
- An authentication token ( See https://huggingface.co/settings/tokens )
548
+ headers (`Dict[str, str]`):
549
+ Headers to use for the request, including authorization headers and user agent.
550
550
  revision (`str`):
551
551
  The git revision to upload the files to. Can be any valid git revision.
552
552
 
@@ -562,7 +562,7 @@ def _fetch_files_to_copy(
562
562
  """
563
563
  from .hf_api import HfApi, RepoFolder
564
564
 
565
- hf_api = HfApi(endpoint=endpoint, token=token)
565
+ hf_api = HfApi(endpoint=endpoint, headers=headers)
566
566
  files_to_copy: Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]] = {}
567
567
  for src_revision, operations in groupby(copies, key=lambda op: op.src_revision):
568
568
  operations = list(operations) # type: ignore
@@ -581,7 +581,6 @@ def _fetch_files_to_copy(
581
581
  files_to_copy[(src_repo_file.path, src_revision)] = src_repo_file
582
582
  else:
583
583
  # TODO: (optimization) download regular files to copy concurrently
584
- headers = build_hf_headers(token=token)
585
584
  url = hf_hub_url(
586
585
  endpoint=endpoint,
587
586
  repo_type=repo_type,
@@ -2,7 +2,7 @@ import time
2
2
  from dataclasses import dataclass, field
3
3
  from datetime import datetime
4
4
  from enum import Enum
5
- from typing import TYPE_CHECKING, Dict, Optional
5
+ from typing import TYPE_CHECKING, Dict, Optional, Union
6
6
 
7
7
  from .inference._client import InferenceClient
8
8
  from .inference._generated._async_client import AsyncInferenceClient
@@ -71,8 +71,9 @@ class InferenceEndpoint:
71
71
  The type of the Inference Endpoint (public, protected, private).
72
72
  raw (`Dict`):
73
73
  The raw dictionary data returned from the API.
74
- token (`str`, *optional*):
75
- Authentication token for the Inference Endpoint, if set when requesting the API.
74
+ token (`str` or `bool`, *optional*):
75
+ Authentication token for the Inference Endpoint, if set when requesting the API. Will default to the
76
+ locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server.
76
77
 
77
78
  Example:
78
79
  ```python
@@ -120,12 +121,12 @@ class InferenceEndpoint:
120
121
  raw: Dict = field(repr=False)
121
122
 
122
123
  # Internal fields
123
- _token: Optional[str] = field(repr=False, compare=False)
124
+ _token: Union[str, bool, None] = field(repr=False, compare=False)
124
125
  _api: "HfApi" = field(repr=False, compare=False)
125
126
 
126
127
  @classmethod
127
128
  def from_raw(
128
- cls, raw: Dict, namespace: str, token: Optional[str] = None, api: Optional["HfApi"] = None
129
+ cls, raw: Dict, namespace: str, token: Union[str, bool, None] = None, api: Optional["HfApi"] = None
129
130
  ) -> "InferenceEndpoint":
130
131
  """Initialize object from raw dictionary."""
131
132
  if api is None:
@@ -230,7 +231,7 @@ class InferenceEndpoint:
230
231
  Returns:
231
232
  [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
232
233
  """
233
- obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
234
+ obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
234
235
  self.raw = obj.raw
235
236
  self._populate_from_raw()
236
237
  return self
@@ -295,7 +296,7 @@ class InferenceEndpoint:
295
296
  framework=framework,
296
297
  revision=revision,
297
298
  task=task,
298
- token=self._token,
299
+ token=self._token, # type: ignore [arg-type]
299
300
  )
300
301
 
301
302
  # Mutate current object
@@ -316,7 +317,7 @@ class InferenceEndpoint:
316
317
  Returns:
317
318
  [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
318
319
  """
319
- obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
320
+ obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
320
321
  self.raw = obj.raw
321
322
  self._populate_from_raw()
322
323
  return self
@@ -330,7 +331,7 @@ class InferenceEndpoint:
330
331
  Returns:
331
332
  [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
332
333
  """
333
- obj = self._api.resume_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
334
+ obj = self._api.resume_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
334
335
  self.raw = obj.raw
335
336
  self._populate_from_raw()
336
337
  return self
@@ -348,7 +349,7 @@ class InferenceEndpoint:
348
349
  Returns:
349
350
  [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
350
351
  """
351
- obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
352
+ obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
352
353
  self.raw = obj.raw
353
354
  self._populate_from_raw()
354
355
  return self
@@ -361,7 +362,7 @@ class InferenceEndpoint:
361
362
 
362
363
  This is an alias for [`HfApi.delete_inference_endpoint`].
363
364
  """
364
- self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
365
+ self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
365
366
 
366
367
  def _populate_from_raw(self) -> None:
367
368
  """Populate fields from raw dictionary.
huggingface_hub/_login.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Contains methods to login to the Hub."""
15
+
15
16
  import os
16
17
  import subprocess
17
18
  from functools import partial
@@ -13,6 +13,7 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
  """Contains utilities to multi-commits (i.e. push changes iteratively on a PR)."""
16
+
16
17
  import re
17
18
  from dataclasses import dataclass, field
18
19
  from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Union
@@ -53,6 +53,7 @@ def snapshot_download(
53
53
  ignore_patterns: Optional[Union[List[str], str]] = None,
54
54
  max_workers: int = 8,
55
55
  tqdm_class: Optional[base_tqdm] = None,
56
+ headers: Optional[Dict[str, str]] = None,
56
57
  endpoint: Optional[str] = None,
57
58
  ) -> str:
58
59
  """Download repo files.
@@ -120,6 +121,8 @@ def snapshot_download(
120
121
  - If `True`, the token is read from the HuggingFace config
121
122
  folder.
122
123
  - If a string, it's used as the authentication token.
124
+ headers (`dict`, *optional*):
125
+ Additional headers to include in the request. Those headers take precedence over the others.
123
126
  local_files_only (`bool`, *optional*, defaults to `False`):
124
127
  If `True`, avoid downloading the file and return the path to the
125
128
  local cached file if it exists.
@@ -174,7 +177,11 @@ def snapshot_download(
174
177
  try:
175
178
  # if we have internet connection we want to list files to download
176
179
  api = HfApi(
177
- library_name=library_name, library_version=library_version, user_agent=user_agent, endpoint=endpoint
180
+ library_name=library_name,
181
+ library_version=library_version,
182
+ user_agent=user_agent,
183
+ endpoint=endpoint,
184
+ headers=headers,
178
185
  )
179
186
  repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision, token=token)
180
187
  except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
@@ -297,6 +304,7 @@ def snapshot_download(
297
304
  resume_download=resume_download,
298
305
  force_download=force_download,
299
306
  token=token,
307
+ headers=headers,
300
308
  )
301
309
 
302
310
  if HF_HUB_ENABLE_HF_TRANSFER:
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Contains a logger to push training logs to the Hub, using Tensorboard."""
15
+
15
16
  from pathlib import Path
16
17
  from typing import TYPE_CHECKING, List, Optional, Union
17
18
 
@@ -13,6 +13,7 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
  """Contains data structures to parse the webhooks payload."""
16
+
16
17
  from typing import List, Literal, Optional
17
18
 
18
19
  from pydantic import BaseModel
@@ -13,6 +13,7 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
  """Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily."""
16
+
16
17
  import atexit
17
18
  import inspect
18
19
  import os
@@ -12,6 +12,7 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
  """Contains a utility for good-looking prints."""
15
+
15
16
  import os
16
17
  from typing import List, Union
17
18
 
@@ -55,6 +55,7 @@ TODO: add "--limit" arg to limit to X repos ?
55
55
  TODO: add "-y" arg for immediate deletion ?
56
56
  See discussions in https://github.com/huggingface/huggingface_hub/issues/1025.
57
57
  """
58
+
58
59
  import os
59
60
  from argparse import Namespace, _SubParsersAction
60
61
  from functools import wraps
@@ -35,6 +35,7 @@ Usage:
35
35
  # Download to local dir
36
36
  huggingface-cli download gpt2 --local-dir=./models/gpt2
37
37
  """
38
+
38
39
  import warnings
39
40
  from argparse import Namespace, _SubParsersAction
40
41
  from typing import List, Literal, Optional, Union
@@ -16,6 +16,7 @@
16
16
  Usage:
17
17
  huggingface-cli env
18
18
  """
19
+
19
20
  from argparse import _SubParsersAction
20
21
 
21
22
  from ..utils import dump_environment_info
@@ -20,6 +20,7 @@ Usage:
20
20
  huggingface-cli scan-cache -vvv
21
21
  huggingface-cli scan-cache --dir ~/.cache/huggingface/hub
22
22
  """
23
+
23
24
  import time
24
25
  from argparse import Namespace, _SubParsersAction
25
26
  from typing import Optional
@@ -42,6 +42,7 @@ Usage:
42
42
  # Schedule commits every 30 minutes
43
43
  huggingface-cli upload Wauplin/my-cool-model --every=30
44
44
  """
45
+
45
46
  import os
46
47
  import time
47
48
  import warnings
@@ -4,6 +4,7 @@ Data structures to interact with Discussions and Pull Requests on the Hub.
4
4
  See [the Discussions and Pull Requests guide](https://huggingface.co/docs/hub/repositories-pull-requests-discussions)
5
5
  for more information on Pull Requests, Discussions, and the community tab.
6
6
  """
7
+
7
8
  from dataclasses import dataclass
8
9
  from datetime import datetime
9
10
  from typing import List, Literal, Optional, Union
@@ -52,7 +52,9 @@ HUGGINGFACE_CO_URL_HOME = "https://huggingface.co/"
52
52
 
53
53
  _staging_mode = _is_true(os.environ.get("HUGGINGFACE_CO_STAGING"))
54
54
 
55
- ENDPOINT = os.getenv("HF_ENDPOINT") or ("https://hub-ci.huggingface.co" if _staging_mode else "https://huggingface.co")
55
+ _HF_DEFAULT_ENDPOINT = "https://huggingface.co"
56
+ _HF_DEFAULT_STAGING_ENDPOINT = "https://hub-ci.huggingface.co"
57
+ ENDPOINT = os.getenv("HF_ENDPOINT") or (_HF_DEFAULT_STAGING_ENDPOINT if _staging_mode else _HF_DEFAULT_ENDPOINT)
56
58
 
57
59
  HUGGINGFACE_CO_URL_TEMPLATE = ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
58
60
  HUGGINGFACE_HEADER_X_REPO_COMMIT = "X-Repo-Commit"
@@ -0,0 +1,38 @@
1
+ """Contains all custom errors."""
2
+
3
+ from requests import HTTPError
4
+
5
+
6
+ # INFERENCE CLIENT ERRORS
7
+
8
+
9
+ class InferenceTimeoutError(HTTPError, TimeoutError):
10
+ """Error raised when a model is unavailable or the request times out."""
11
+
12
+
13
+ # TEXT GENERATION ERRORS
14
+
15
+
16
+ class TextGenerationError(HTTPError):
17
+ """Generic error raised if text-generation went wrong."""
18
+
19
+
20
+ # Text Generation Inference Errors
21
+ class ValidationError(TextGenerationError):
22
+ """Server-side validation error."""
23
+
24
+
25
+ class GenerationError(TextGenerationError):
26
+ pass
27
+
28
+
29
+ class OverloadedError(TextGenerationError):
30
+ pass
31
+
32
+
33
+ class IncompleteGenerationError(TextGenerationError):
34
+ pass
35
+
36
+
37
+ class UnknownError(TextGenerationError):
38
+ pass