huggingface-hub 0.21.3__tar.gz → 0.22.0rc0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/PKG-INFO +3 -2
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/README.md +1 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/pyproject.toml +0 -3
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/setup.py +15 -12
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/__init__.py +217 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_commit_api.py +14 -15
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_inference_endpoints.py +12 -11
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_login.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_multi_commits.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_snapshot_download.py +9 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_tensorboard_logger.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_webhooks_payload.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_webhooks_server.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/_cli_utils.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/delete_cache.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/download.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/env.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/scan_cache.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/upload.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/community.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/constants.py +3 -1
- huggingface_hub-0.22.0rc0/src/huggingface_hub/errors.py +38 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/file_download.py +24 -24
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/hf_api.py +47 -35
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/hub_mixin.py +224 -63
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/inference/_client.py +554 -239
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/inference/_common.py +195 -41
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/inference/_generated/_async_client.py +558 -239
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/__init__.py +115 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/audio_to_audio.py +31 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +116 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/base.py +149 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/chat_completion.py +106 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/depth_estimation.py +29 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/document_question_answering.py +85 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/feature_extraction.py +19 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/fill_mask.py +50 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/image_classification.py +43 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/image_segmentation.py +52 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/image_to_image.py +55 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/image_to_text.py +105 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/object_detection.py +55 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/question_answering.py +77 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/sentence_similarity.py +28 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/summarization.py +46 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/table_question_answering.py +45 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/text2text_generation.py +45 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/text_classification.py +43 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/text_generation.py +161 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/text_to_audio.py +105 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/text_to_image.py +57 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/token_classification.py +53 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/translation.py +46 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/video_classification.py +47 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/visual_question_answering.py +53 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/zero_shot_classification.py +56 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +51 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +55 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_templating.py +105 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/inference/_types.py +52 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/keras_mixin.py +39 -17
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/lfs.py +20 -8
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/repocard.py +11 -3
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/repocard_data.py +12 -2
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/serialization/__init__.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/serialization/_base.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/serialization/_numpy.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/serialization/_tensorflow.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/serialization/_torch.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/__init__.py +4 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_cache_manager.py +7 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_chunk_utils.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_datetime.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_errors.py +10 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_experimental.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_fixes.py +19 -3
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_git_credential.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_headers.py +10 -3
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_hf_folder.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_http.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_pagination.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_paths.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_runtime.py +22 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_subprocess.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_token.py +1 -0
- huggingface_hub-0.22.0rc0/src/huggingface_hub/utils/_typing.py +50 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_validators.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/endpoint_helpers.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/logging.py +1 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/sha.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/tqdm.py +1 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub.egg-info/PKG-INFO +3 -2
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub.egg-info/SOURCES.txt +33 -1
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub.egg-info/requires.txt +13 -29
- huggingface_hub-0.21.3/src/huggingface_hub/inference/_text_generation.py +0 -551
- huggingface_hub-0.21.3/src/huggingface_hub/inference/_types.py +0 -200
- huggingface_hub-0.21.3/src/huggingface_hub/utils/_typing.py +0 -22
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/LICENSE +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/MANIFEST.in +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/setup.cfg +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_commit_scheduler.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_space_api.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/__init__.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/huggingface_cli.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/lfs.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/user.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/fastai_utils.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/hf_file_system.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/inference/__init__.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/inference/_generated/__init__.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/inference_api.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/repository.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/templates/datasetcard_template.md +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/templates/modelcard_template.md +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_cache_assets.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_deprecation.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_safetensors.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/_telemetry.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/utils/insecure_hashlib.py +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub.egg-info/dependency_links.txt +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub.egg-info/entry_points.txt +0 -0
- {huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: huggingface_hub
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.22.0rc0
|
|
4
4
|
Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
|
|
5
5
|
Home-page: https://github.com/huggingface/huggingface_hub
|
|
6
6
|
Author: Hugging Face, Inc.
|
|
@@ -28,6 +28,7 @@ Provides-Extra: torch
|
|
|
28
28
|
Provides-Extra: hf_transfer
|
|
29
29
|
Provides-Extra: fastai
|
|
30
30
|
Provides-Extra: tensorflow
|
|
31
|
+
Provides-Extra: tensorflow-testing
|
|
31
32
|
Provides-Extra: testing
|
|
32
33
|
Provides-Extra: typing
|
|
33
34
|
Provides-Extra: quality
|
|
@@ -178,7 +179,7 @@ The advantages are:
|
|
|
178
179
|
|
|
179
180
|
- Free model or dataset hosting for libraries and their users.
|
|
180
181
|
- Built-in file versioning, even with very large files, thanks to a git-based approach.
|
|
181
|
-
-
|
|
182
|
+
- Serverless inference API for all models publicly available.
|
|
182
183
|
- In-browser widgets to play with the uploaded models.
|
|
183
184
|
- Anyone can upload a new model for your library, they just need to add the corresponding tag for the model to be discoverable.
|
|
184
185
|
- Fast downloads! We use Cloudfront (a CDN) to geo-replicate downloads so they're blazing fast from anywhere on the globe.
|
|
@@ -141,7 +141,7 @@ The advantages are:
|
|
|
141
141
|
|
|
142
142
|
- Free model or dataset hosting for libraries and their users.
|
|
143
143
|
- Built-in file versioning, even with very large files, thanks to a git-based approach.
|
|
144
|
-
-
|
|
144
|
+
- Serverless inference API for all models publicly available.
|
|
145
145
|
- In-browser widgets to play with the uploaded models.
|
|
146
146
|
- Anyone can upload a new model for your library, they just need to add the corresponding tag for the model to be discoverable.
|
|
147
147
|
- Fast downloads! We use Cloudfront (a CDN) to geo-replicate downloads so they're blazing fast from anywhere on the globe.
|
|
@@ -14,28 +14,22 @@ def get_version() -> str:
|
|
|
14
14
|
install_requires = [
|
|
15
15
|
"filelock",
|
|
16
16
|
"fsspec>=2023.5.0",
|
|
17
|
+
"packaging>=20.9",
|
|
18
|
+
"pyyaml>=5.1",
|
|
17
19
|
"requests",
|
|
18
20
|
"tqdm>=4.42.1",
|
|
19
|
-
"pyyaml>=5.1",
|
|
20
21
|
"typing-extensions>=3.7.4.3", # to be able to import TypeAlias
|
|
21
|
-
"packaging>=20.9",
|
|
22
22
|
]
|
|
23
23
|
|
|
24
24
|
extras = {}
|
|
25
25
|
|
|
26
26
|
extras["cli"] = [
|
|
27
|
-
"InquirerPy==0.3.4",
|
|
28
|
-
# Note: installs `prompt-toolkit` in the background
|
|
27
|
+
"InquirerPy==0.3.4", # Note: installs `prompt-toolkit` in the background
|
|
29
28
|
]
|
|
30
29
|
|
|
31
30
|
extras["inference"] = [
|
|
32
31
|
"aiohttp", # for AsyncInferenceClient
|
|
33
|
-
|
|
34
|
-
# Let's limit pydantic to 1.x for now. Since Tensorflow 2.14, Python3.8 is not supported anyway so impact should be
|
|
35
|
-
# limited. We still trigger some CIs on Python 3.8 so we need this workaround.
|
|
36
|
-
# NOTE: when relaxing constraint to support v3.x, make sure to adapt `src/huggingface_hub/inference/_text_generation.py`.
|
|
37
|
-
"pydantic>1.1,<3.0; python_version>'3.8'",
|
|
38
|
-
"pydantic>1.1,<2.0; python_version=='3.8'",
|
|
32
|
+
"minijinja>=1.0", # for chat-completion if not TGI-served
|
|
39
33
|
]
|
|
40
34
|
|
|
41
35
|
extras["torch"] = [
|
|
@@ -51,7 +45,16 @@ extras["fastai"] = [
|
|
|
51
45
|
"fastcore>=1.3.27",
|
|
52
46
|
]
|
|
53
47
|
|
|
54
|
-
extras["tensorflow"] = [
|
|
48
|
+
extras["tensorflow"] = [
|
|
49
|
+
"tensorflow",
|
|
50
|
+
"pydot",
|
|
51
|
+
"graphviz",
|
|
52
|
+
]
|
|
53
|
+
|
|
54
|
+
extras["tensorflow-testing"] = [
|
|
55
|
+
"tensorflow",
|
|
56
|
+
"keras<3.0",
|
|
57
|
+
]
|
|
55
58
|
|
|
56
59
|
|
|
57
60
|
extras["testing"] = (
|
|
@@ -88,7 +91,7 @@ extras["typing"] = [
|
|
|
88
91
|
]
|
|
89
92
|
|
|
90
93
|
extras["quality"] = [
|
|
91
|
-
"ruff>=0.
|
|
94
|
+
"ruff>=0.3.0",
|
|
92
95
|
"mypy==1.5.1",
|
|
93
96
|
]
|
|
94
97
|
|
|
@@ -46,7 +46,7 @@ import sys
|
|
|
46
46
|
from typing import TYPE_CHECKING
|
|
47
47
|
|
|
48
48
|
|
|
49
|
-
__version__ = "0.
|
|
49
|
+
__version__ = "0.22.0.rc0"
|
|
50
50
|
|
|
51
51
|
# Alphabetical order of definitions is ensured in tests
|
|
52
52
|
# WARNING: any comment added in this dictionary definition will be lost when
|
|
@@ -259,6 +259,114 @@ _SUBMOD_ATTRS = {
|
|
|
259
259
|
"inference._generated._async_client": [
|
|
260
260
|
"AsyncInferenceClient",
|
|
261
261
|
],
|
|
262
|
+
"inference._generated.types": [
|
|
263
|
+
"AudioClassificationInput",
|
|
264
|
+
"AudioClassificationOutputElement",
|
|
265
|
+
"AudioClassificationParameters",
|
|
266
|
+
"AudioToAudioInput",
|
|
267
|
+
"AudioToAudioOutputElement",
|
|
268
|
+
"AutomaticSpeechRecognitionGenerationParameters",
|
|
269
|
+
"AutomaticSpeechRecognitionInput",
|
|
270
|
+
"AutomaticSpeechRecognitionOutput",
|
|
271
|
+
"AutomaticSpeechRecognitionOutputChunk",
|
|
272
|
+
"AutomaticSpeechRecognitionParameters",
|
|
273
|
+
"ChatCompletionInput",
|
|
274
|
+
"ChatCompletionInputMessage",
|
|
275
|
+
"ChatCompletionOutput",
|
|
276
|
+
"ChatCompletionOutputChoice",
|
|
277
|
+
"ChatCompletionOutputChoiceMessage",
|
|
278
|
+
"ChatCompletionStreamOutput",
|
|
279
|
+
"ChatCompletionStreamOutputChoice",
|
|
280
|
+
"ChatCompletionStreamOutputDelta",
|
|
281
|
+
"DepthEstimationInput",
|
|
282
|
+
"DepthEstimationOutput",
|
|
283
|
+
"DocumentQuestionAnsweringInput",
|
|
284
|
+
"DocumentQuestionAnsweringInputData",
|
|
285
|
+
"DocumentQuestionAnsweringOutputElement",
|
|
286
|
+
"DocumentQuestionAnsweringParameters",
|
|
287
|
+
"FeatureExtractionInput",
|
|
288
|
+
"FillMaskInput",
|
|
289
|
+
"FillMaskOutputElement",
|
|
290
|
+
"FillMaskParameters",
|
|
291
|
+
"ImageClassificationInput",
|
|
292
|
+
"ImageClassificationOutputElement",
|
|
293
|
+
"ImageClassificationParameters",
|
|
294
|
+
"ImageSegmentationInput",
|
|
295
|
+
"ImageSegmentationOutputElement",
|
|
296
|
+
"ImageSegmentationParameters",
|
|
297
|
+
"ImageToImageInput",
|
|
298
|
+
"ImageToImageOutput",
|
|
299
|
+
"ImageToImageParameters",
|
|
300
|
+
"ImageToImageTargetSize",
|
|
301
|
+
"ImageToTextGenerationParameters",
|
|
302
|
+
"ImageToTextInput",
|
|
303
|
+
"ImageToTextOutput",
|
|
304
|
+
"ImageToTextParameters",
|
|
305
|
+
"ObjectDetectionBoundingBox",
|
|
306
|
+
"ObjectDetectionInput",
|
|
307
|
+
"ObjectDetectionOutputElement",
|
|
308
|
+
"ObjectDetectionParameters",
|
|
309
|
+
"QuestionAnsweringInput",
|
|
310
|
+
"QuestionAnsweringInputData",
|
|
311
|
+
"QuestionAnsweringOutputElement",
|
|
312
|
+
"QuestionAnsweringParameters",
|
|
313
|
+
"SentenceSimilarityInput",
|
|
314
|
+
"SentenceSimilarityInputData",
|
|
315
|
+
"SummarizationGenerationParameters",
|
|
316
|
+
"SummarizationInput",
|
|
317
|
+
"SummarizationOutput",
|
|
318
|
+
"TableQuestionAnsweringInput",
|
|
319
|
+
"TableQuestionAnsweringInputData",
|
|
320
|
+
"TableQuestionAnsweringOutputElement",
|
|
321
|
+
"Text2TextGenerationInput",
|
|
322
|
+
"Text2TextGenerationOutput",
|
|
323
|
+
"Text2TextGenerationParameters",
|
|
324
|
+
"TextClassificationInput",
|
|
325
|
+
"TextClassificationOutputElement",
|
|
326
|
+
"TextClassificationParameters",
|
|
327
|
+
"TextGenerationInput",
|
|
328
|
+
"TextGenerationOutput",
|
|
329
|
+
"TextGenerationOutputDetails",
|
|
330
|
+
"TextGenerationOutputSequenceDetails",
|
|
331
|
+
"TextGenerationOutputToken",
|
|
332
|
+
"TextGenerationParameters",
|
|
333
|
+
"TextGenerationPrefillToken",
|
|
334
|
+
"TextGenerationStreamDetails",
|
|
335
|
+
"TextGenerationStreamOutput",
|
|
336
|
+
"TextToAudioGenerationParameters",
|
|
337
|
+
"TextToAudioInput",
|
|
338
|
+
"TextToAudioOutput",
|
|
339
|
+
"TextToAudioParameters",
|
|
340
|
+
"TextToImageInput",
|
|
341
|
+
"TextToImageOutput",
|
|
342
|
+
"TextToImageParameters",
|
|
343
|
+
"TextToImageTargetSize",
|
|
344
|
+
"TokenClassificationInput",
|
|
345
|
+
"TokenClassificationOutputElement",
|
|
346
|
+
"TokenClassificationParameters",
|
|
347
|
+
"TranslationGenerationParameters",
|
|
348
|
+
"TranslationInput",
|
|
349
|
+
"TranslationOutput",
|
|
350
|
+
"VideoClassificationInput",
|
|
351
|
+
"VideoClassificationOutputElement",
|
|
352
|
+
"VideoClassificationParameters",
|
|
353
|
+
"VisualQuestionAnsweringInput",
|
|
354
|
+
"VisualQuestionAnsweringInputData",
|
|
355
|
+
"VisualQuestionAnsweringOutputElement",
|
|
356
|
+
"VisualQuestionAnsweringParameters",
|
|
357
|
+
"ZeroShotClassificationInput",
|
|
358
|
+
"ZeroShotClassificationInputData",
|
|
359
|
+
"ZeroShotClassificationOutputElement",
|
|
360
|
+
"ZeroShotClassificationParameters",
|
|
361
|
+
"ZeroShotImageClassificationInput",
|
|
362
|
+
"ZeroShotImageClassificationInputData",
|
|
363
|
+
"ZeroShotImageClassificationOutputElement",
|
|
364
|
+
"ZeroShotImageClassificationParameters",
|
|
365
|
+
"ZeroShotObjectDetectionBoundingBox",
|
|
366
|
+
"ZeroShotObjectDetectionInput",
|
|
367
|
+
"ZeroShotObjectDetectionInputData",
|
|
368
|
+
"ZeroShotObjectDetectionOutputElement",
|
|
369
|
+
],
|
|
262
370
|
"inference_api": [
|
|
263
371
|
"InferenceApi",
|
|
264
372
|
],
|
|
@@ -613,6 +721,114 @@ if TYPE_CHECKING: # pragma: no cover
|
|
|
613
721
|
InferenceTimeoutError, # noqa: F401
|
|
614
722
|
)
|
|
615
723
|
from .inference._generated._async_client import AsyncInferenceClient # noqa: F401
|
|
724
|
+
from .inference._generated.types import (
|
|
725
|
+
AudioClassificationInput, # noqa: F401
|
|
726
|
+
AudioClassificationOutputElement, # noqa: F401
|
|
727
|
+
AudioClassificationParameters, # noqa: F401
|
|
728
|
+
AudioToAudioInput, # noqa: F401
|
|
729
|
+
AudioToAudioOutputElement, # noqa: F401
|
|
730
|
+
AutomaticSpeechRecognitionGenerationParameters, # noqa: F401
|
|
731
|
+
AutomaticSpeechRecognitionInput, # noqa: F401
|
|
732
|
+
AutomaticSpeechRecognitionOutput, # noqa: F401
|
|
733
|
+
AutomaticSpeechRecognitionOutputChunk, # noqa: F401
|
|
734
|
+
AutomaticSpeechRecognitionParameters, # noqa: F401
|
|
735
|
+
ChatCompletionInput, # noqa: F401
|
|
736
|
+
ChatCompletionInputMessage, # noqa: F401
|
|
737
|
+
ChatCompletionOutput, # noqa: F401
|
|
738
|
+
ChatCompletionOutputChoice, # noqa: F401
|
|
739
|
+
ChatCompletionOutputChoiceMessage, # noqa: F401
|
|
740
|
+
ChatCompletionStreamOutput, # noqa: F401
|
|
741
|
+
ChatCompletionStreamOutputChoice, # noqa: F401
|
|
742
|
+
ChatCompletionStreamOutputDelta, # noqa: F401
|
|
743
|
+
DepthEstimationInput, # noqa: F401
|
|
744
|
+
DepthEstimationOutput, # noqa: F401
|
|
745
|
+
DocumentQuestionAnsweringInput, # noqa: F401
|
|
746
|
+
DocumentQuestionAnsweringInputData, # noqa: F401
|
|
747
|
+
DocumentQuestionAnsweringOutputElement, # noqa: F401
|
|
748
|
+
DocumentQuestionAnsweringParameters, # noqa: F401
|
|
749
|
+
FeatureExtractionInput, # noqa: F401
|
|
750
|
+
FillMaskInput, # noqa: F401
|
|
751
|
+
FillMaskOutputElement, # noqa: F401
|
|
752
|
+
FillMaskParameters, # noqa: F401
|
|
753
|
+
ImageClassificationInput, # noqa: F401
|
|
754
|
+
ImageClassificationOutputElement, # noqa: F401
|
|
755
|
+
ImageClassificationParameters, # noqa: F401
|
|
756
|
+
ImageSegmentationInput, # noqa: F401
|
|
757
|
+
ImageSegmentationOutputElement, # noqa: F401
|
|
758
|
+
ImageSegmentationParameters, # noqa: F401
|
|
759
|
+
ImageToImageInput, # noqa: F401
|
|
760
|
+
ImageToImageOutput, # noqa: F401
|
|
761
|
+
ImageToImageParameters, # noqa: F401
|
|
762
|
+
ImageToImageTargetSize, # noqa: F401
|
|
763
|
+
ImageToTextGenerationParameters, # noqa: F401
|
|
764
|
+
ImageToTextInput, # noqa: F401
|
|
765
|
+
ImageToTextOutput, # noqa: F401
|
|
766
|
+
ImageToTextParameters, # noqa: F401
|
|
767
|
+
ObjectDetectionBoundingBox, # noqa: F401
|
|
768
|
+
ObjectDetectionInput, # noqa: F401
|
|
769
|
+
ObjectDetectionOutputElement, # noqa: F401
|
|
770
|
+
ObjectDetectionParameters, # noqa: F401
|
|
771
|
+
QuestionAnsweringInput, # noqa: F401
|
|
772
|
+
QuestionAnsweringInputData, # noqa: F401
|
|
773
|
+
QuestionAnsweringOutputElement, # noqa: F401
|
|
774
|
+
QuestionAnsweringParameters, # noqa: F401
|
|
775
|
+
SentenceSimilarityInput, # noqa: F401
|
|
776
|
+
SentenceSimilarityInputData, # noqa: F401
|
|
777
|
+
SummarizationGenerationParameters, # noqa: F401
|
|
778
|
+
SummarizationInput, # noqa: F401
|
|
779
|
+
SummarizationOutput, # noqa: F401
|
|
780
|
+
TableQuestionAnsweringInput, # noqa: F401
|
|
781
|
+
TableQuestionAnsweringInputData, # noqa: F401
|
|
782
|
+
TableQuestionAnsweringOutputElement, # noqa: F401
|
|
783
|
+
Text2TextGenerationInput, # noqa: F401
|
|
784
|
+
Text2TextGenerationOutput, # noqa: F401
|
|
785
|
+
Text2TextGenerationParameters, # noqa: F401
|
|
786
|
+
TextClassificationInput, # noqa: F401
|
|
787
|
+
TextClassificationOutputElement, # noqa: F401
|
|
788
|
+
TextClassificationParameters, # noqa: F401
|
|
789
|
+
TextGenerationInput, # noqa: F401
|
|
790
|
+
TextGenerationOutput, # noqa: F401
|
|
791
|
+
TextGenerationOutputDetails, # noqa: F401
|
|
792
|
+
TextGenerationOutputSequenceDetails, # noqa: F401
|
|
793
|
+
TextGenerationOutputToken, # noqa: F401
|
|
794
|
+
TextGenerationParameters, # noqa: F401
|
|
795
|
+
TextGenerationPrefillToken, # noqa: F401
|
|
796
|
+
TextGenerationStreamDetails, # noqa: F401
|
|
797
|
+
TextGenerationStreamOutput, # noqa: F401
|
|
798
|
+
TextToAudioGenerationParameters, # noqa: F401
|
|
799
|
+
TextToAudioInput, # noqa: F401
|
|
800
|
+
TextToAudioOutput, # noqa: F401
|
|
801
|
+
TextToAudioParameters, # noqa: F401
|
|
802
|
+
TextToImageInput, # noqa: F401
|
|
803
|
+
TextToImageOutput, # noqa: F401
|
|
804
|
+
TextToImageParameters, # noqa: F401
|
|
805
|
+
TextToImageTargetSize, # noqa: F401
|
|
806
|
+
TokenClassificationInput, # noqa: F401
|
|
807
|
+
TokenClassificationOutputElement, # noqa: F401
|
|
808
|
+
TokenClassificationParameters, # noqa: F401
|
|
809
|
+
TranslationGenerationParameters, # noqa: F401
|
|
810
|
+
TranslationInput, # noqa: F401
|
|
811
|
+
TranslationOutput, # noqa: F401
|
|
812
|
+
VideoClassificationInput, # noqa: F401
|
|
813
|
+
VideoClassificationOutputElement, # noqa: F401
|
|
814
|
+
VideoClassificationParameters, # noqa: F401
|
|
815
|
+
VisualQuestionAnsweringInput, # noqa: F401
|
|
816
|
+
VisualQuestionAnsweringInputData, # noqa: F401
|
|
817
|
+
VisualQuestionAnsweringOutputElement, # noqa: F401
|
|
818
|
+
VisualQuestionAnsweringParameters, # noqa: F401
|
|
819
|
+
ZeroShotClassificationInput, # noqa: F401
|
|
820
|
+
ZeroShotClassificationInputData, # noqa: F401
|
|
821
|
+
ZeroShotClassificationOutputElement, # noqa: F401
|
|
822
|
+
ZeroShotClassificationParameters, # noqa: F401
|
|
823
|
+
ZeroShotImageClassificationInput, # noqa: F401
|
|
824
|
+
ZeroShotImageClassificationInputData, # noqa: F401
|
|
825
|
+
ZeroShotImageClassificationOutputElement, # noqa: F401
|
|
826
|
+
ZeroShotImageClassificationParameters, # noqa: F401
|
|
827
|
+
ZeroShotObjectDetectionBoundingBox, # noqa: F401
|
|
828
|
+
ZeroShotObjectDetectionInput, # noqa: F401
|
|
829
|
+
ZeroShotObjectDetectionInputData, # noqa: F401
|
|
830
|
+
ZeroShotObjectDetectionOutputElement, # noqa: F401
|
|
831
|
+
)
|
|
616
832
|
from .inference_api import InferenceApi # noqa: F401
|
|
617
833
|
from .keras_mixin import (
|
|
618
834
|
KerasModelHubMixin, # noqa: F401
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Type definitions and utilities for the `create_commit` API
|
|
3
3
|
"""
|
|
4
|
+
|
|
4
5
|
import base64
|
|
5
6
|
import io
|
|
6
7
|
import os
|
|
@@ -21,7 +22,6 @@ from .file_download import hf_hub_url
|
|
|
21
22
|
from .lfs import UploadInfo, lfs_upload, post_lfs_batch_info
|
|
22
23
|
from .utils import (
|
|
23
24
|
EntryNotFoundError,
|
|
24
|
-
build_hf_headers,
|
|
25
25
|
chunk_iterable,
|
|
26
26
|
hf_raise_for_status,
|
|
27
27
|
logging,
|
|
@@ -318,7 +318,7 @@ def _upload_lfs_files(
|
|
|
318
318
|
additions: List[CommitOperationAdd],
|
|
319
319
|
repo_type: str,
|
|
320
320
|
repo_id: str,
|
|
321
|
-
|
|
321
|
+
headers: Dict[str, str],
|
|
322
322
|
endpoint: Optional[str] = None,
|
|
323
323
|
num_threads: int = 5,
|
|
324
324
|
revision: Optional[str] = None,
|
|
@@ -337,8 +337,8 @@ def _upload_lfs_files(
|
|
|
337
337
|
repo_id (`str`):
|
|
338
338
|
A namespace (user or an organization) and a repo name separated
|
|
339
339
|
by a `/`.
|
|
340
|
-
|
|
341
|
-
|
|
340
|
+
headers (`Dict[str, str]`):
|
|
341
|
+
Headers to use for the request, including authorization headers and user agent.
|
|
342
342
|
num_threads (`int`, *optional*):
|
|
343
343
|
The number of concurrent threads to use when uploading. Defaults to 5.
|
|
344
344
|
revision (`str`, *optional*):
|
|
@@ -359,11 +359,12 @@ def _upload_lfs_files(
|
|
|
359
359
|
for chunk in chunk_iterable(additions, chunk_size=256):
|
|
360
360
|
batch_actions_chunk, batch_errors_chunk = post_lfs_batch_info(
|
|
361
361
|
upload_infos=[op.upload_info for op in chunk],
|
|
362
|
-
token=token,
|
|
363
362
|
repo_id=repo_id,
|
|
364
363
|
repo_type=repo_type,
|
|
365
364
|
revision=revision,
|
|
366
365
|
endpoint=endpoint,
|
|
366
|
+
headers=headers,
|
|
367
|
+
token=None, # already passed in 'headers'
|
|
367
368
|
)
|
|
368
369
|
|
|
369
370
|
# If at least 1 error, we do not retrieve information for other chunks
|
|
@@ -398,7 +399,7 @@ def _upload_lfs_files(
|
|
|
398
399
|
def _wrapped_lfs_upload(batch_action) -> None:
|
|
399
400
|
try:
|
|
400
401
|
operation = oid2addop[batch_action["oid"]]
|
|
401
|
-
lfs_upload(operation=operation, lfs_batch_action=batch_action,
|
|
402
|
+
lfs_upload(operation=operation, lfs_batch_action=batch_action, headers=headers)
|
|
402
403
|
except Exception as exc:
|
|
403
404
|
raise RuntimeError(f"Error while uploading '{operation.path_in_repo}' to the Hub.") from exc
|
|
404
405
|
|
|
@@ -442,7 +443,7 @@ def _fetch_upload_modes(
|
|
|
442
443
|
additions: Iterable[CommitOperationAdd],
|
|
443
444
|
repo_type: str,
|
|
444
445
|
repo_id: str,
|
|
445
|
-
|
|
446
|
+
headers: Dict[str, str],
|
|
446
447
|
revision: str,
|
|
447
448
|
endpoint: Optional[str] = None,
|
|
448
449
|
create_pr: bool = False,
|
|
@@ -461,8 +462,8 @@ def _fetch_upload_modes(
|
|
|
461
462
|
repo_id (`str`):
|
|
462
463
|
A namespace (user or an organization) and a repo name separated
|
|
463
464
|
by a `/`.
|
|
464
|
-
|
|
465
|
-
|
|
465
|
+
headers (`Dict[str, str]`):
|
|
466
|
+
Headers to use for the request, including authorization headers and user agent.
|
|
466
467
|
revision (`str`):
|
|
467
468
|
The git revision to upload the files to. Can be any valid git revision.
|
|
468
469
|
gitignore_content (`str`, *optional*):
|
|
@@ -477,7 +478,6 @@ def _fetch_upload_modes(
|
|
|
477
478
|
If the Hub API response is improperly formatted.
|
|
478
479
|
"""
|
|
479
480
|
endpoint = endpoint if endpoint is not None else ENDPOINT
|
|
480
|
-
headers = build_hf_headers(token=token)
|
|
481
481
|
|
|
482
482
|
# Fetch upload mode (LFS or regular) chunk by chunk.
|
|
483
483
|
upload_modes: Dict[str, UploadMode] = {}
|
|
@@ -526,7 +526,7 @@ def _fetch_files_to_copy(
|
|
|
526
526
|
copies: Iterable[CommitOperationCopy],
|
|
527
527
|
repo_type: str,
|
|
528
528
|
repo_id: str,
|
|
529
|
-
|
|
529
|
+
headers: Dict[str, str],
|
|
530
530
|
revision: str,
|
|
531
531
|
endpoint: Optional[str] = None,
|
|
532
532
|
) -> Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]]:
|
|
@@ -545,8 +545,8 @@ def _fetch_files_to_copy(
|
|
|
545
545
|
repo_id (`str`):
|
|
546
546
|
A namespace (user or an organization) and a repo name separated
|
|
547
547
|
by a `/`.
|
|
548
|
-
|
|
549
|
-
|
|
548
|
+
headers (`Dict[str, str]`):
|
|
549
|
+
Headers to use for the request, including authorization headers and user agent.
|
|
550
550
|
revision (`str`):
|
|
551
551
|
The git revision to upload the files to. Can be any valid git revision.
|
|
552
552
|
|
|
@@ -562,7 +562,7 @@ def _fetch_files_to_copy(
|
|
|
562
562
|
"""
|
|
563
563
|
from .hf_api import HfApi, RepoFolder
|
|
564
564
|
|
|
565
|
-
hf_api = HfApi(endpoint=endpoint,
|
|
565
|
+
hf_api = HfApi(endpoint=endpoint, headers=headers)
|
|
566
566
|
files_to_copy: Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]] = {}
|
|
567
567
|
for src_revision, operations in groupby(copies, key=lambda op: op.src_revision):
|
|
568
568
|
operations = list(operations) # type: ignore
|
|
@@ -581,7 +581,6 @@ def _fetch_files_to_copy(
|
|
|
581
581
|
files_to_copy[(src_repo_file.path, src_revision)] = src_repo_file
|
|
582
582
|
else:
|
|
583
583
|
# TODO: (optimization) download regular files to copy concurrently
|
|
584
|
-
headers = build_hf_headers(token=token)
|
|
585
584
|
url = hf_hub_url(
|
|
586
585
|
endpoint=endpoint,
|
|
587
586
|
repo_type=repo_type,
|
{huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_inference_endpoints.py
RENAMED
|
@@ -2,7 +2,7 @@ import time
|
|
|
2
2
|
from dataclasses import dataclass, field
|
|
3
3
|
from datetime import datetime
|
|
4
4
|
from enum import Enum
|
|
5
|
-
from typing import TYPE_CHECKING, Dict, Optional
|
|
5
|
+
from typing import TYPE_CHECKING, Dict, Optional, Union
|
|
6
6
|
|
|
7
7
|
from .inference._client import InferenceClient
|
|
8
8
|
from .inference._generated._async_client import AsyncInferenceClient
|
|
@@ -71,8 +71,9 @@ class InferenceEndpoint:
|
|
|
71
71
|
The type of the Inference Endpoint (public, protected, private).
|
|
72
72
|
raw (`Dict`):
|
|
73
73
|
The raw dictionary data returned from the API.
|
|
74
|
-
token (`str`, *optional*):
|
|
75
|
-
Authentication token for the Inference Endpoint, if set when requesting the API.
|
|
74
|
+
token (`str` or `bool`, *optional*):
|
|
75
|
+
Authentication token for the Inference Endpoint, if set when requesting the API. Will default to the
|
|
76
|
+
locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server.
|
|
76
77
|
|
|
77
78
|
Example:
|
|
78
79
|
```python
|
|
@@ -120,12 +121,12 @@ class InferenceEndpoint:
|
|
|
120
121
|
raw: Dict = field(repr=False)
|
|
121
122
|
|
|
122
123
|
# Internal fields
|
|
123
|
-
_token:
|
|
124
|
+
_token: Union[str, bool, None] = field(repr=False, compare=False)
|
|
124
125
|
_api: "HfApi" = field(repr=False, compare=False)
|
|
125
126
|
|
|
126
127
|
@classmethod
|
|
127
128
|
def from_raw(
|
|
128
|
-
cls, raw: Dict, namespace: str, token:
|
|
129
|
+
cls, raw: Dict, namespace: str, token: Union[str, bool, None] = None, api: Optional["HfApi"] = None
|
|
129
130
|
) -> "InferenceEndpoint":
|
|
130
131
|
"""Initialize object from raw dictionary."""
|
|
131
132
|
if api is None:
|
|
@@ -230,7 +231,7 @@ class InferenceEndpoint:
|
|
|
230
231
|
Returns:
|
|
231
232
|
[`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
|
|
232
233
|
"""
|
|
233
|
-
obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
|
|
234
|
+
obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
|
|
234
235
|
self.raw = obj.raw
|
|
235
236
|
self._populate_from_raw()
|
|
236
237
|
return self
|
|
@@ -295,7 +296,7 @@ class InferenceEndpoint:
|
|
|
295
296
|
framework=framework,
|
|
296
297
|
revision=revision,
|
|
297
298
|
task=task,
|
|
298
|
-
token=self._token,
|
|
299
|
+
token=self._token, # type: ignore [arg-type]
|
|
299
300
|
)
|
|
300
301
|
|
|
301
302
|
# Mutate current object
|
|
@@ -316,7 +317,7 @@ class InferenceEndpoint:
|
|
|
316
317
|
Returns:
|
|
317
318
|
[`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
|
|
318
319
|
"""
|
|
319
|
-
obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
|
|
320
|
+
obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
|
|
320
321
|
self.raw = obj.raw
|
|
321
322
|
self._populate_from_raw()
|
|
322
323
|
return self
|
|
@@ -330,7 +331,7 @@ class InferenceEndpoint:
|
|
|
330
331
|
Returns:
|
|
331
332
|
[`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
|
|
332
333
|
"""
|
|
333
|
-
obj = self._api.resume_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
|
|
334
|
+
obj = self._api.resume_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
|
|
334
335
|
self.raw = obj.raw
|
|
335
336
|
self._populate_from_raw()
|
|
336
337
|
return self
|
|
@@ -348,7 +349,7 @@ class InferenceEndpoint:
|
|
|
348
349
|
Returns:
|
|
349
350
|
[`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
|
|
350
351
|
"""
|
|
351
|
-
obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
|
|
352
|
+
obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
|
|
352
353
|
self.raw = obj.raw
|
|
353
354
|
self._populate_from_raw()
|
|
354
355
|
return self
|
|
@@ -361,7 +362,7 @@ class InferenceEndpoint:
|
|
|
361
362
|
|
|
362
363
|
This is an alias for [`HfApi.delete_inference_endpoint`].
|
|
363
364
|
"""
|
|
364
|
-
self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token)
|
|
365
|
+
self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
|
|
365
366
|
|
|
366
367
|
def _populate_from_raw(self) -> None:
|
|
367
368
|
"""Populate fields from raw dictionary.
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# See the License for the specific language governing permissions and
|
|
14
14
|
# limitations under the License.
|
|
15
15
|
"""Contains utilities to multi-commits (i.e. push changes iteratively on a PR)."""
|
|
16
|
+
|
|
16
17
|
import re
|
|
17
18
|
from dataclasses import dataclass, field
|
|
18
19
|
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Union
|
{huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_snapshot_download.py
RENAMED
|
@@ -53,6 +53,7 @@ def snapshot_download(
|
|
|
53
53
|
ignore_patterns: Optional[Union[List[str], str]] = None,
|
|
54
54
|
max_workers: int = 8,
|
|
55
55
|
tqdm_class: Optional[base_tqdm] = None,
|
|
56
|
+
headers: Optional[Dict[str, str]] = None,
|
|
56
57
|
endpoint: Optional[str] = None,
|
|
57
58
|
) -> str:
|
|
58
59
|
"""Download repo files.
|
|
@@ -120,6 +121,8 @@ def snapshot_download(
|
|
|
120
121
|
- If `True`, the token is read from the HuggingFace config
|
|
121
122
|
folder.
|
|
122
123
|
- If a string, it's used as the authentication token.
|
|
124
|
+
headers (`dict`, *optional*):
|
|
125
|
+
Additional headers to include in the request. Those headers take precedence over the others.
|
|
123
126
|
local_files_only (`bool`, *optional*, defaults to `False`):
|
|
124
127
|
If `True`, avoid downloading the file and return the path to the
|
|
125
128
|
local cached file if it exists.
|
|
@@ -174,7 +177,11 @@ def snapshot_download(
|
|
|
174
177
|
try:
|
|
175
178
|
# if we have internet connection we want to list files to download
|
|
176
179
|
api = HfApi(
|
|
177
|
-
library_name=library_name,
|
|
180
|
+
library_name=library_name,
|
|
181
|
+
library_version=library_version,
|
|
182
|
+
user_agent=user_agent,
|
|
183
|
+
endpoint=endpoint,
|
|
184
|
+
headers=headers,
|
|
178
185
|
)
|
|
179
186
|
repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision, token=token)
|
|
180
187
|
except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
|
|
@@ -297,6 +304,7 @@ def snapshot_download(
|
|
|
297
304
|
resume_download=resume_download,
|
|
298
305
|
force_download=force_download,
|
|
299
306
|
token=token,
|
|
307
|
+
headers=headers,
|
|
300
308
|
)
|
|
301
309
|
|
|
302
310
|
if HF_HUB_ENABLE_HF_TRANSFER:
|
{huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/_tensorboard_logger.py
RENAMED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
"""Contains a logger to push training logs to the Hub, using Tensorboard."""
|
|
15
|
+
|
|
15
16
|
from pathlib import Path
|
|
16
17
|
from typing import TYPE_CHECKING, List, Optional, Union
|
|
17
18
|
|
{huggingface_hub-0.21.3 → huggingface_hub-0.22.0rc0}/src/huggingface_hub/commands/delete_cache.py
RENAMED
|
@@ -55,6 +55,7 @@ TODO: add "--limit" arg to limit to X repos ?
|
|
|
55
55
|
TODO: add "-y" arg for immediate deletion ?
|
|
56
56
|
See discussions in https://github.com/huggingface/huggingface_hub/issues/1025.
|
|
57
57
|
"""
|
|
58
|
+
|
|
58
59
|
import os
|
|
59
60
|
from argparse import Namespace, _SubParsersAction
|
|
60
61
|
from functools import wraps
|