huggingface-hub 0.18.0rc0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +31 -5
- huggingface_hub/_commit_api.py +7 -11
- huggingface_hub/_inference_endpoints.py +348 -0
- huggingface_hub/_login.py +9 -7
- huggingface_hub/_multi_commits.py +1 -1
- huggingface_hub/_snapshot_download.py +6 -7
- huggingface_hub/_space_api.py +7 -4
- huggingface_hub/_tensorboard_logger.py +1 -0
- huggingface_hub/_webhooks_payload.py +7 -7
- huggingface_hub/commands/lfs.py +3 -6
- huggingface_hub/commands/user.py +1 -4
- huggingface_hub/constants.py +27 -0
- huggingface_hub/file_download.py +142 -134
- huggingface_hub/hf_api.py +1058 -503
- huggingface_hub/hf_file_system.py +57 -12
- huggingface_hub/hub_mixin.py +3 -5
- huggingface_hub/inference/_client.py +43 -8
- huggingface_hub/inference/_common.py +8 -16
- huggingface_hub/inference/_generated/_async_client.py +41 -8
- huggingface_hub/inference/_text_generation.py +43 -0
- huggingface_hub/inference_api.py +1 -1
- huggingface_hub/lfs.py +32 -14
- huggingface_hub/repocard_data.py +7 -0
- huggingface_hub/repository.py +19 -3
- huggingface_hub/templates/datasetcard_template.md +83 -43
- huggingface_hub/templates/modelcard_template.md +4 -3
- huggingface_hub/utils/__init__.py +1 -1
- huggingface_hub/utils/_cache_assets.py +3 -3
- huggingface_hub/utils/_cache_manager.py +6 -7
- huggingface_hub/utils/_datetime.py +3 -1
- huggingface_hub/utils/_errors.py +10 -0
- huggingface_hub/utils/_hf_folder.py +4 -2
- huggingface_hub/utils/_http.py +10 -1
- huggingface_hub/utils/_runtime.py +4 -2
- huggingface_hub/utils/endpoint_helpers.py +27 -175
- huggingface_hub/utils/insecure_hashlib.py +34 -0
- huggingface_hub/utils/logging.py +4 -6
- huggingface_hub/utils/sha.py +2 -1
- {huggingface_hub-0.18.0rc0.dist-info → huggingface_hub-0.19.0.dist-info}/METADATA +16 -15
- huggingface_hub-0.19.0.dist-info/RECORD +74 -0
- {huggingface_hub-0.18.0rc0.dist-info → huggingface_hub-0.19.0.dist-info}/WHEEL +1 -1
- huggingface_hub-0.18.0rc0.dist-info/RECORD +0 -72
- {huggingface_hub-0.18.0rc0.dist-info → huggingface_hub-0.19.0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.18.0rc0.dist-info → huggingface_hub-0.19.0.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.18.0rc0.dist-info → huggingface_hub-0.19.0.dist-info}/top_level.txt +0 -0
|
@@ -55,7 +55,7 @@ class ObjectId(BaseModel):
|
|
|
55
55
|
|
|
56
56
|
class WebhookPayloadUrl(BaseModel):
|
|
57
57
|
web: str
|
|
58
|
-
api: Optional[str]
|
|
58
|
+
api: Optional[str] = None
|
|
59
59
|
|
|
60
60
|
|
|
61
61
|
class WebhookPayloadMovedTo(BaseModel):
|
|
@@ -74,7 +74,7 @@ class WebhookPayloadEvent(BaseModel):
|
|
|
74
74
|
|
|
75
75
|
class WebhookPayloadDiscussionChanges(BaseModel):
|
|
76
76
|
base: str
|
|
77
|
-
mergeCommitId: Optional[str]
|
|
77
|
+
mergeCommitId: Optional[str] = None
|
|
78
78
|
|
|
79
79
|
|
|
80
80
|
class WebhookPayloadComment(ObjectId):
|
|
@@ -92,16 +92,16 @@ class WebhookPayloadDiscussion(ObjectId):
|
|
|
92
92
|
isPullRequest: bool
|
|
93
93
|
status: DiscussionStatus_T
|
|
94
94
|
changes: Optional[WebhookPayloadDiscussionChanges]
|
|
95
|
-
pinned: Optional[bool]
|
|
95
|
+
pinned: Optional[bool] = None
|
|
96
96
|
|
|
97
97
|
|
|
98
98
|
class WebhookPayloadRepo(ObjectId):
|
|
99
99
|
owner: ObjectId
|
|
100
|
-
head_sha: Optional[str]
|
|
100
|
+
head_sha: Optional[str] = None
|
|
101
101
|
name: str
|
|
102
102
|
private: bool
|
|
103
|
-
subdomain: Optional[str]
|
|
104
|
-
tags: Optional[List[str]]
|
|
103
|
+
subdomain: Optional[str] = None
|
|
104
|
+
tags: Optional[List[str]] = None
|
|
105
105
|
type: Literal["dataset", "model", "space"]
|
|
106
106
|
url: WebhookPayloadUrl
|
|
107
107
|
|
|
@@ -112,4 +112,4 @@ class WebhookPayload(BaseModel):
|
|
|
112
112
|
discussion: Optional[WebhookPayloadDiscussion]
|
|
113
113
|
comment: Optional[WebhookPayloadComment]
|
|
114
114
|
webhook: WebhookPayloadWebhook
|
|
115
|
-
movedTo: Optional[WebhookPayloadMovedTo]
|
|
115
|
+
movedTo: Optional[WebhookPayloadMovedTo] = None
|
huggingface_hub/commands/lfs.py
CHANGED
|
@@ -56,16 +56,13 @@ class LfsCommands(BaseHuggingfaceCLICommand):
|
|
|
56
56
|
@staticmethod
|
|
57
57
|
def register_subcommand(parser: _SubParsersAction):
|
|
58
58
|
enable_parser = parser.add_parser(
|
|
59
|
-
"lfs-enable-largefiles",
|
|
60
|
-
help="Configure your repository to enable upload of files > 5GB.",
|
|
59
|
+
"lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB."
|
|
61
60
|
)
|
|
62
61
|
enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
|
|
63
62
|
enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
|
|
64
63
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
help="Command will get called by git-lfs, do not call it directly.",
|
|
68
|
-
)
|
|
64
|
+
# Command will get called by git-lfs, do not call it directly.
|
|
65
|
+
upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False)
|
|
69
66
|
upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
|
|
70
67
|
|
|
71
68
|
|
huggingface_hub/commands/user.py
CHANGED
|
@@ -58,10 +58,7 @@ class UserCommands(BaseHuggingfaceCLICommand):
|
|
|
58
58
|
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
|
|
59
59
|
|
|
60
60
|
# new system: git-based repo system
|
|
61
|
-
repo_parser = parser.add_parser(
|
|
62
|
-
"repo",
|
|
63
|
-
help="{create, ls-files} Commands to interact with your huggingface.co repos.",
|
|
64
|
-
)
|
|
61
|
+
repo_parser = parser.add_parser("repo", help="{create} Commands to interact with your huggingface.co repos.")
|
|
65
62
|
repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands")
|
|
66
63
|
repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co")
|
|
67
64
|
repo_create_parser.add_argument(
|
huggingface_hub/constants.py
CHANGED
|
@@ -5,6 +5,7 @@ from typing import Optional
|
|
|
5
5
|
|
|
6
6
|
# Possible values for env variables
|
|
7
7
|
|
|
8
|
+
|
|
8
9
|
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
|
|
9
10
|
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
|
|
10
11
|
|
|
@@ -29,6 +30,11 @@ TF_WEIGHTS_NAME = "model.ckpt"
|
|
|
29
30
|
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
|
|
30
31
|
CONFIG_NAME = "config.json"
|
|
31
32
|
REPOCARD_NAME = "README.md"
|
|
33
|
+
DEFAULT_ETAG_TIMEOUT = 10
|
|
34
|
+
DEFAULT_DOWNLOAD_TIMEOUT = 10
|
|
35
|
+
DEFAULT_REQUEST_TIMEOUT = 10
|
|
36
|
+
DOWNLOAD_CHUNK_SIZE = 10 * 1024 * 1024
|
|
37
|
+
HF_TRANSFER_CONCURRENCY = 100
|
|
32
38
|
|
|
33
39
|
# Git-related constants
|
|
34
40
|
|
|
@@ -48,6 +54,10 @@ HUGGINGFACE_HEADER_X_LINKED_SIZE = "X-Linked-Size"
|
|
|
48
54
|
|
|
49
55
|
INFERENCE_ENDPOINT = os.environ.get("HF_INFERENCE_ENDPOINT", "https://api-inference.huggingface.co")
|
|
50
56
|
|
|
57
|
+
# See https://huggingface.co/docs/inference-endpoints/index
|
|
58
|
+
INFERENCE_ENDPOINTS_ENDPOINT = "https://api.endpoints.huggingface.cloud/v2"
|
|
59
|
+
|
|
60
|
+
|
|
51
61
|
REPO_ID_SEPARATOR = "--"
|
|
52
62
|
# ^ this substring is not allowed in repo_ids on hf.co
|
|
53
63
|
# and is the canonical one we use for serialization of repo ids elsewhere.
|
|
@@ -82,9 +92,14 @@ hf_cache_home = os.path.expanduser(
|
|
|
82
92
|
default_cache_path = os.path.join(hf_cache_home, "hub")
|
|
83
93
|
default_assets_cache_path = os.path.join(hf_cache_home, "assets")
|
|
84
94
|
|
|
95
|
+
# Legacy env variables
|
|
85
96
|
HUGGINGFACE_HUB_CACHE = os.getenv("HUGGINGFACE_HUB_CACHE", default_cache_path)
|
|
86
97
|
HUGGINGFACE_ASSETS_CACHE = os.getenv("HUGGINGFACE_ASSETS_CACHE", default_assets_cache_path)
|
|
87
98
|
|
|
99
|
+
# New env variables
|
|
100
|
+
HF_HUB_CACHE = os.getenv("HF_HUB_CACHE", HUGGINGFACE_HUB_CACHE)
|
|
101
|
+
HF_ASSETS_CACHE = os.getenv("HF_ASSETS_CACHE", HUGGINGFACE_ASSETS_CACHE)
|
|
102
|
+
|
|
88
103
|
HF_HUB_OFFLINE = _is_true(os.environ.get("HF_HUB_OFFLINE") or os.environ.get("TRANSFORMERS_OFFLINE"))
|
|
89
104
|
|
|
90
105
|
# Opt-out from telemetry requests
|
|
@@ -97,6 +112,12 @@ _OLD_HF_TOKEN_PATH = os.path.expanduser("~/.huggingface/token")
|
|
|
97
112
|
HF_TOKEN_PATH = os.path.join(hf_cache_home, "token")
|
|
98
113
|
|
|
99
114
|
|
|
115
|
+
if _staging_mode:
|
|
116
|
+
# In staging mode, we use a different cache to ensure we don't mix up production and staging data or tokens
|
|
117
|
+
_staging_home = os.path.join(os.path.expanduser("~"), ".cache", "huggingface_staging")
|
|
118
|
+
HUGGINGFACE_HUB_CACHE = os.path.join(_staging_home, "hub")
|
|
119
|
+
HF_TOKEN_PATH = os.path.join(_staging_home, "token")
|
|
120
|
+
|
|
100
121
|
# Here, `True` will disable progress bars globally without possibility of enabling it
|
|
101
122
|
# programmatically. `False` will enable them without possibility of disabling them.
|
|
102
123
|
# If environment variable is not set (None), then the user is free to enable/disable
|
|
@@ -130,6 +151,12 @@ HF_HUB_LOCAL_DIR_AUTO_SYMLINK_THRESHOLD: int = (
|
|
|
130
151
|
_as_int(os.environ.get("HF_HUB_LOCAL_DIR_AUTO_SYMLINK_THRESHOLD")) or 5 * 1024 * 1024
|
|
131
152
|
)
|
|
132
153
|
|
|
154
|
+
# Used to override the etag timeout on a system level
|
|
155
|
+
HF_HUB_ETAG_TIMEOUT: int = _as_int(os.environ.get("HF_HUB_ETAG_TIMEOUT")) or DEFAULT_ETAG_TIMEOUT
|
|
156
|
+
|
|
157
|
+
# Used to override the get request timeout on a system level
|
|
158
|
+
HF_HUB_DOWNLOAD_TIMEOUT: int = _as_int(os.environ.get("HF_HUB_DOWNLOAD_TIMEOUT")) or DEFAULT_DOWNLOAD_TIMEOUT
|
|
159
|
+
|
|
133
160
|
# List frameworks that are handled by the InferenceAPI service. Useful to scan endpoints and check which models are
|
|
134
161
|
# deployed and running. Since 95% of the models are using the top 4 frameworks listed below, we scan only those by
|
|
135
162
|
# default. We still keep the full list of supported frameworks in case we want to scan all of them.
|