huggingface-hub 0.34.3__py3-none-any.whl → 0.34.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +1 -1
- huggingface_hub/_jobs_api.py +9 -10
- huggingface_hub/_upload_large_folder.py +108 -1
- huggingface_hub/cli/jobs.py +37 -12
- huggingface_hub/hf_api.py +25 -15
- huggingface_hub/inference/_client.py +81 -1
- huggingface_hub/inference/_generated/_async_client.py +82 -1
- huggingface_hub/inference/_providers/__init__.py +8 -0
- huggingface_hub/inference/_providers/_common.py +1 -0
- huggingface_hub/inference/_providers/fal_ai.py +31 -0
- huggingface_hub/inference/_providers/scaleway.py +28 -0
- huggingface_hub/utils/_dotenv.py +24 -20
- huggingface_hub/utils/_runtime.py +1 -0
- {huggingface_hub-0.34.3.dist-info → huggingface_hub-0.34.5.dist-info}/METADATA +1 -1
- {huggingface_hub-0.34.3.dist-info → huggingface_hub-0.34.5.dist-info}/RECORD +19 -18
- {huggingface_hub-0.34.3.dist-info → huggingface_hub-0.34.5.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.34.3.dist-info → huggingface_hub-0.34.5.dist-info}/WHEEL +0 -0
- {huggingface_hub-0.34.3.dist-info → huggingface_hub-0.34.5.dist-info}/entry_points.txt +0 -0
- {huggingface_hub-0.34.3.dist-info → huggingface_hub-0.34.5.dist-info}/top_level.txt +0 -0
huggingface_hub/__init__.py
CHANGED
huggingface_hub/_jobs_api.py
CHANGED
|
@@ -47,15 +47,12 @@ class JobStatus:
|
|
|
47
47
|
stage: JobStage
|
|
48
48
|
message: Optional[str]
|
|
49
49
|
|
|
50
|
-
def __init__(self, **kwargs) -> None:
|
|
51
|
-
self.stage = kwargs["stage"]
|
|
52
|
-
self.message = kwargs.get("message")
|
|
53
|
-
|
|
54
50
|
|
|
55
51
|
@dataclass
|
|
56
52
|
class JobOwner:
|
|
57
53
|
id: str
|
|
58
54
|
name: str
|
|
55
|
+
type: str
|
|
59
56
|
|
|
60
57
|
|
|
61
58
|
@dataclass
|
|
@@ -89,7 +86,7 @@ class JobInfo:
|
|
|
89
86
|
Status of the Job, e.g. `JobStatus(stage="RUNNING", message=None)`
|
|
90
87
|
See [`JobStage`] for possible stage values.
|
|
91
88
|
status: (`JobOwner` or `None`):
|
|
92
|
-
Owner of the Job, e.g. `JobOwner(id="5e9ecfc04957053f60648a3e", name="lhoestq")`
|
|
89
|
+
Owner of the Job, e.g. `JobOwner(id="5e9ecfc04957053f60648a3e", name="lhoestq", type="user")`
|
|
93
90
|
|
|
94
91
|
Example:
|
|
95
92
|
|
|
@@ -100,7 +97,7 @@ class JobInfo:
|
|
|
100
97
|
... command=["python", "-c", "print('Hello from the cloud!')"]
|
|
101
98
|
... )
|
|
102
99
|
>>> job
|
|
103
|
-
JobInfo(id='687fb701029421ae5549d998', created_at=datetime.datetime(2025, 7, 22, 16, 6, 25, 79000, tzinfo=datetime.timezone.utc), docker_image='python:3.12', space_id=None, command=['python', '-c', "print('Hello from the cloud!')"], arguments=[], environment={}, secrets={}, flavor='cpu-basic', status=JobStatus(stage='RUNNING', message=None), owner=JobOwner(id='5e9ecfc04957053f60648a3e', name='lhoestq'), endpoint='https://huggingface.co', url='https://huggingface.co/jobs/lhoestq/687fb701029421ae5549d998')
|
|
100
|
+
JobInfo(id='687fb701029421ae5549d998', created_at=datetime.datetime(2025, 7, 22, 16, 6, 25, 79000, tzinfo=datetime.timezone.utc), docker_image='python:3.12', space_id=None, command=['python', '-c', "print('Hello from the cloud!')"], arguments=[], environment={}, secrets={}, flavor='cpu-basic', status=JobStatus(stage='RUNNING', message=None), owner=JobOwner(id='5e9ecfc04957053f60648a3e', name='lhoestq', type='user'), endpoint='https://huggingface.co', url='https://huggingface.co/jobs/lhoestq/687fb701029421ae5549d998')
|
|
104
101
|
>>> job.id
|
|
105
102
|
'687fb701029421ae5549d998'
|
|
106
103
|
>>> job.url
|
|
@@ -119,8 +116,8 @@ class JobInfo:
|
|
|
119
116
|
environment: Optional[Dict[str, Any]]
|
|
120
117
|
secrets: Optional[Dict[str, Any]]
|
|
121
118
|
flavor: Optional[SpaceHardware]
|
|
122
|
-
status:
|
|
123
|
-
owner:
|
|
119
|
+
status: JobStatus
|
|
120
|
+
owner: JobOwner
|
|
124
121
|
|
|
125
122
|
# Inferred fields
|
|
126
123
|
endpoint: str
|
|
@@ -132,13 +129,15 @@ class JobInfo:
|
|
|
132
129
|
self.created_at = parse_datetime(created_at) if created_at else None
|
|
133
130
|
self.docker_image = kwargs.get("dockerImage") or kwargs.get("docker_image")
|
|
134
131
|
self.space_id = kwargs.get("spaceId") or kwargs.get("space_id")
|
|
135
|
-
|
|
132
|
+
owner = kwargs.get("owner", {})
|
|
133
|
+
self.owner = JobOwner(id=owner["id"], name=owner["name"], type=owner["type"])
|
|
136
134
|
self.command = kwargs.get("command")
|
|
137
135
|
self.arguments = kwargs.get("arguments")
|
|
138
136
|
self.environment = kwargs.get("environment")
|
|
139
137
|
self.secrets = kwargs.get("secrets")
|
|
140
138
|
self.flavor = kwargs.get("flavor")
|
|
141
|
-
|
|
139
|
+
status = kwargs.get("status", {})
|
|
140
|
+
self.status = JobStatus(stage=status["stage"], message=status.get("message"))
|
|
142
141
|
|
|
143
142
|
# Inferred fields
|
|
144
143
|
self.endpoint = kwargs.get("endpoint", constants.ENDPOINT)
|
|
@@ -24,7 +24,7 @@ import traceback
|
|
|
24
24
|
from datetime import datetime
|
|
25
25
|
from pathlib import Path
|
|
26
26
|
from threading import Lock
|
|
27
|
-
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
|
|
27
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
|
28
28
|
from urllib.parse import quote
|
|
29
29
|
|
|
30
30
|
from . import constants
|
|
@@ -49,6 +49,108 @@ COMMIT_SIZE_SCALE: List[int] = [20, 50, 75, 100, 125, 200, 250, 400, 600, 1000]
|
|
|
49
49
|
UPLOAD_BATCH_SIZE_XET = 256 # Max 256 files per upload batch for XET-enabled repos
|
|
50
50
|
UPLOAD_BATCH_SIZE_LFS = 1 # Otherwise, batches of 1 for regular LFS upload
|
|
51
51
|
|
|
52
|
+
# Repository limits (from https://huggingface.co/docs/hub/repositories-recommendations)
|
|
53
|
+
MAX_FILES_PER_REPO = 100_000 # Recommended maximum number of files per repository
|
|
54
|
+
MAX_FILES_PER_FOLDER = 10_000 # Recommended maximum number of files per folder
|
|
55
|
+
MAX_FILE_SIZE_GB = 50 # Hard limit for individual file size
|
|
56
|
+
RECOMMENDED_FILE_SIZE_GB = 20 # Recommended maximum for individual file size
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _validate_upload_limits(paths_list: List[LocalUploadFilePaths]) -> None:
|
|
60
|
+
"""
|
|
61
|
+
Validate upload against repository limits and warn about potential issues.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
paths_list: List of file paths to be uploaded
|
|
65
|
+
|
|
66
|
+
Warns about:
|
|
67
|
+
- Too many files in the repository (>100k)
|
|
68
|
+
- Too many entries (files or subdirectories) in a single folder (>10k)
|
|
69
|
+
- Files exceeding size limits (>20GB recommended, >50GB hard limit)
|
|
70
|
+
"""
|
|
71
|
+
logger.info("Running validation checks on files to upload...")
|
|
72
|
+
|
|
73
|
+
# Check 1: Total file count
|
|
74
|
+
if len(paths_list) > MAX_FILES_PER_REPO:
|
|
75
|
+
logger.warning(
|
|
76
|
+
f"You are about to upload {len(paths_list):,} files. "
|
|
77
|
+
f"This exceeds the recommended limit of {MAX_FILES_PER_REPO:,} files per repository.\n"
|
|
78
|
+
f"Consider:\n"
|
|
79
|
+
f" - Splitting your data into multiple repositories\n"
|
|
80
|
+
f" - Using fewer, larger files (e.g., parquet files)\n"
|
|
81
|
+
f" - See: https://huggingface.co/docs/hub/repositories-recommendations"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Check 2: Files and subdirectories per folder
|
|
85
|
+
# Track immediate children (files and subdirs) for each folder
|
|
86
|
+
from collections import defaultdict
|
|
87
|
+
|
|
88
|
+
entries_per_folder: Dict[str, Any] = defaultdict(lambda: {"files": 0, "subdirs": set()})
|
|
89
|
+
|
|
90
|
+
for paths in paths_list:
|
|
91
|
+
path = Path(paths.path_in_repo)
|
|
92
|
+
parts = path.parts
|
|
93
|
+
|
|
94
|
+
# Count this file in its immediate parent directory
|
|
95
|
+
parent = str(path.parent) if str(path.parent) != "." else "."
|
|
96
|
+
entries_per_folder[parent]["files"] += 1
|
|
97
|
+
|
|
98
|
+
# Track immediate subdirectories for each parent folder
|
|
99
|
+
# Walk through the path components to track parent-child relationships
|
|
100
|
+
for i, child in enumerate(parts[:-1]):
|
|
101
|
+
parent = "." if i == 0 else "/".join(parts[:i])
|
|
102
|
+
entries_per_folder[parent]["subdirs"].add(child)
|
|
103
|
+
|
|
104
|
+
# Check limits for each folder
|
|
105
|
+
for folder, data in entries_per_folder.items():
|
|
106
|
+
file_count = data["files"]
|
|
107
|
+
subdir_count = len(data["subdirs"])
|
|
108
|
+
total_entries = file_count + subdir_count
|
|
109
|
+
|
|
110
|
+
if total_entries > MAX_FILES_PER_FOLDER:
|
|
111
|
+
folder_display = "root" if folder == "." else folder
|
|
112
|
+
logger.warning(
|
|
113
|
+
f"Folder '{folder_display}' contains {total_entries:,} entries "
|
|
114
|
+
f"({file_count:,} files and {subdir_count:,} subdirectories). "
|
|
115
|
+
f"This exceeds the recommended {MAX_FILES_PER_FOLDER:,} entries per folder.\n"
|
|
116
|
+
"Consider reorganising into sub-folders."
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Check 3: File sizes
|
|
120
|
+
large_files = []
|
|
121
|
+
very_large_files = []
|
|
122
|
+
|
|
123
|
+
for paths in paths_list:
|
|
124
|
+
size = paths.file_path.stat().st_size
|
|
125
|
+
size_gb = size / 1_000_000_000 # Use decimal GB as per Hub limits
|
|
126
|
+
|
|
127
|
+
if size_gb > MAX_FILE_SIZE_GB:
|
|
128
|
+
very_large_files.append((paths.path_in_repo, size_gb))
|
|
129
|
+
elif size_gb > RECOMMENDED_FILE_SIZE_GB:
|
|
130
|
+
large_files.append((paths.path_in_repo, size_gb))
|
|
131
|
+
|
|
132
|
+
# Warn about very large files (>50GB)
|
|
133
|
+
if very_large_files:
|
|
134
|
+
files_str = "\n - ".join(f"{path}: {size:.1f}GB" for path, size in very_large_files[:5])
|
|
135
|
+
more_str = f"\n ... and {len(very_large_files) - 5} more files" if len(very_large_files) > 5 else ""
|
|
136
|
+
logger.warning(
|
|
137
|
+
f"Found {len(very_large_files)} files exceeding the {MAX_FILE_SIZE_GB}GB hard limit:\n"
|
|
138
|
+
f" - {files_str}{more_str}\n"
|
|
139
|
+
f"These files may fail to upload. Consider splitting them into smaller chunks."
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Warn about large files (>20GB)
|
|
143
|
+
if large_files:
|
|
144
|
+
files_str = "\n - ".join(f"{path}: {size:.1f}GB" for path, size in large_files[:5])
|
|
145
|
+
more_str = f"\n ... and {len(large_files) - 5} more files" if len(large_files) > 5 else ""
|
|
146
|
+
logger.warning(
|
|
147
|
+
f"Found {len(large_files)} files larger than {RECOMMENDED_FILE_SIZE_GB}GB (recommended limit):\n"
|
|
148
|
+
f" - {files_str}{more_str}\n"
|
|
149
|
+
f"Large files may slow down loading and processing."
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
logger.info("Validation checks complete.")
|
|
153
|
+
|
|
52
154
|
|
|
53
155
|
def upload_large_folder_internal(
|
|
54
156
|
api: "HfApi",
|
|
@@ -118,6 +220,11 @@ def upload_large_folder_internal(
|
|
|
118
220
|
paths_list = [get_local_upload_paths(folder_path, relpath) for relpath in filtered_paths_list]
|
|
119
221
|
logger.info(f"Found {len(paths_list)} candidate files to upload")
|
|
120
222
|
|
|
223
|
+
# Validate upload against repository limits
|
|
224
|
+
_validate_upload_limits(paths_list)
|
|
225
|
+
|
|
226
|
+
logger.info("Starting upload...")
|
|
227
|
+
|
|
121
228
|
# Read metadata for each file
|
|
122
229
|
items = [
|
|
123
230
|
(paths, read_upload_metadata(folder_path, paths.path_in_repo))
|
huggingface_hub/cli/jobs.py
CHANGED
|
@@ -40,7 +40,7 @@ from typing import Dict, List, Optional, Union
|
|
|
40
40
|
|
|
41
41
|
import requests
|
|
42
42
|
|
|
43
|
-
from huggingface_hub import HfApi, SpaceHardware
|
|
43
|
+
from huggingface_hub import HfApi, SpaceHardware, get_token
|
|
44
44
|
from huggingface_hub.utils import logging
|
|
45
45
|
from huggingface_hub.utils._dotenv import load_dotenv
|
|
46
46
|
|
|
@@ -75,8 +75,16 @@ class RunCommand(BaseHuggingfaceCLICommand):
|
|
|
75
75
|
def register_subcommand(parser: _SubParsersAction) -> None:
|
|
76
76
|
run_parser = parser.add_parser("run", help="Run a Job")
|
|
77
77
|
run_parser.add_argument("image", type=str, help="The Docker image to use.")
|
|
78
|
-
run_parser.add_argument("-e", "--env", action="append", help="Set environment variables.")
|
|
79
|
-
run_parser.add_argument(
|
|
78
|
+
run_parser.add_argument("-e", "--env", action="append", help="Set environment variables. E.g. --env ENV=value")
|
|
79
|
+
run_parser.add_argument(
|
|
80
|
+
"-s",
|
|
81
|
+
"--secrets",
|
|
82
|
+
action="append",
|
|
83
|
+
help=(
|
|
84
|
+
"Set secret environment variables. E.g. --secrets SECRET=value "
|
|
85
|
+
"or `--secrets HF_TOKEN` to pass your Hugging Face token."
|
|
86
|
+
),
|
|
87
|
+
)
|
|
80
88
|
run_parser.add_argument("--env-file", type=str, help="Read in a file of environment variables.")
|
|
81
89
|
run_parser.add_argument("--secrets-file", type=str, help="Read in a file of secret environment variables.")
|
|
82
90
|
run_parser.add_argument(
|
|
@@ -113,14 +121,15 @@ class RunCommand(BaseHuggingfaceCLICommand):
|
|
|
113
121
|
self.command: List[str] = args.command
|
|
114
122
|
self.env: dict[str, Optional[str]] = {}
|
|
115
123
|
if args.env_file:
|
|
116
|
-
self.env.update(load_dotenv(Path(args.env_file).read_text()))
|
|
124
|
+
self.env.update(load_dotenv(Path(args.env_file).read_text(), environ=os.environ.copy()))
|
|
117
125
|
for env_value in args.env or []:
|
|
118
|
-
self.env.update(load_dotenv(env_value))
|
|
126
|
+
self.env.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
119
127
|
self.secrets: dict[str, Optional[str]] = {}
|
|
128
|
+
extended_environ = _get_extended_environ()
|
|
120
129
|
if args.secrets_file:
|
|
121
|
-
self.secrets.update(load_dotenv(Path(args.secrets_file).read_text()))
|
|
130
|
+
self.secrets.update(load_dotenv(Path(args.secrets_file).read_text(), environ=extended_environ))
|
|
122
131
|
for secret in args.secrets or []:
|
|
123
|
-
self.secrets.update(load_dotenv(secret))
|
|
132
|
+
self.secrets.update(load_dotenv(secret, environ=extended_environ))
|
|
124
133
|
self.flavor: Optional[SpaceHardware] = args.flavor
|
|
125
134
|
self.timeout: Optional[str] = args.timeout
|
|
126
135
|
self.detach: bool = args.detach
|
|
@@ -449,7 +458,15 @@ class UvCommand(BaseHuggingfaceCLICommand):
|
|
|
449
458
|
help=f"Flavor for the hardware, as in HF Spaces. Defaults to `cpu-basic`. Possible values: {', '.join(SUGGESTED_FLAVORS)}.",
|
|
450
459
|
)
|
|
451
460
|
run_parser.add_argument("-e", "--env", action="append", help="Environment variables")
|
|
452
|
-
run_parser.add_argument(
|
|
461
|
+
run_parser.add_argument(
|
|
462
|
+
"-s",
|
|
463
|
+
"--secrets",
|
|
464
|
+
action="append",
|
|
465
|
+
help=(
|
|
466
|
+
"Set secret environment variables. E.g. --secrets SECRET=value "
|
|
467
|
+
"or `--secrets HF_TOKEN` to pass your Hugging Face token."
|
|
468
|
+
),
|
|
469
|
+
)
|
|
453
470
|
run_parser.add_argument("--env-file", type=str, help="Read in a file of environment variables.")
|
|
454
471
|
run_parser.add_argument(
|
|
455
472
|
"--secrets-file",
|
|
@@ -480,14 +497,15 @@ class UvCommand(BaseHuggingfaceCLICommand):
|
|
|
480
497
|
self.image = args.image
|
|
481
498
|
self.env: dict[str, Optional[str]] = {}
|
|
482
499
|
if args.env_file:
|
|
483
|
-
self.env.update(load_dotenv(Path(args.env_file).read_text()))
|
|
500
|
+
self.env.update(load_dotenv(Path(args.env_file).read_text(), environ=os.environ.copy()))
|
|
484
501
|
for env_value in args.env or []:
|
|
485
|
-
self.env.update(load_dotenv(env_value))
|
|
502
|
+
self.env.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
486
503
|
self.secrets: dict[str, Optional[str]] = {}
|
|
504
|
+
extended_environ = _get_extended_environ()
|
|
487
505
|
if args.secrets_file:
|
|
488
|
-
self.secrets.update(load_dotenv(Path(args.secrets_file).read_text()))
|
|
506
|
+
self.secrets.update(load_dotenv(Path(args.secrets_file).read_text(), environ=extended_environ))
|
|
489
507
|
for secret in args.secrets or []:
|
|
490
|
-
self.secrets.update(load_dotenv(secret))
|
|
508
|
+
self.secrets.update(load_dotenv(secret, environ=extended_environ))
|
|
491
509
|
self.flavor: Optional[SpaceHardware] = args.flavor
|
|
492
510
|
self.timeout: Optional[str] = args.timeout
|
|
493
511
|
self.detach: bool = args.detach
|
|
@@ -523,3 +541,10 @@ class UvCommand(BaseHuggingfaceCLICommand):
|
|
|
523
541
|
# Now let's stream the logs
|
|
524
542
|
for log in api.fetch_job_logs(job_id=job.id):
|
|
525
543
|
print(log)
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
def _get_extended_environ() -> Dict[str, str]:
|
|
547
|
+
extended_environ = os.environ.copy()
|
|
548
|
+
if (token := get_token()) is not None:
|
|
549
|
+
extended_environ["HF_TOKEN"] = token
|
|
550
|
+
return extended_environ
|
huggingface_hub/hf_api.py
CHANGED
|
@@ -132,7 +132,11 @@ from .utils import (
|
|
|
132
132
|
validate_hf_hub_args,
|
|
133
133
|
)
|
|
134
134
|
from .utils import tqdm as hf_tqdm
|
|
135
|
-
from .utils._auth import
|
|
135
|
+
from .utils._auth import (
|
|
136
|
+
_get_token_from_environment,
|
|
137
|
+
_get_token_from_file,
|
|
138
|
+
_get_token_from_google_colab,
|
|
139
|
+
)
|
|
136
140
|
from .utils._deprecation import _deprecate_method
|
|
137
141
|
from .utils._runtime import is_xet_available
|
|
138
142
|
from .utils._typing import CallableT
|
|
@@ -1777,18 +1781,20 @@ class HfApi:
|
|
|
1777
1781
|
try:
|
|
1778
1782
|
hf_raise_for_status(r)
|
|
1779
1783
|
except HTTPError as e:
|
|
1780
|
-
|
|
1781
|
-
|
|
1782
|
-
|
|
1783
|
-
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
|
|
1787
|
-
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1784
|
+
if e.response.status_code == 401:
|
|
1785
|
+
error_message = "Invalid user token."
|
|
1786
|
+
# Check which token is the effective one and generate the error message accordingly
|
|
1787
|
+
if effective_token == _get_token_from_google_colab():
|
|
1788
|
+
error_message += " The token from Google Colab vault is invalid. Please update it from the UI."
|
|
1789
|
+
elif effective_token == _get_token_from_environment():
|
|
1790
|
+
error_message += (
|
|
1791
|
+
" The token from HF_TOKEN environment variable is invalid. "
|
|
1792
|
+
"Note that HF_TOKEN takes precedence over `hf auth login`."
|
|
1793
|
+
)
|
|
1794
|
+
elif effective_token == _get_token_from_file():
|
|
1795
|
+
error_message += " The token stored is invalid. Please run `hf auth login` to update it."
|
|
1796
|
+
raise HTTPError(error_message, request=e.request, response=e.response) from e
|
|
1797
|
+
raise
|
|
1792
1798
|
return r.json()
|
|
1793
1799
|
|
|
1794
1800
|
@_deprecate_method(
|
|
@@ -5315,14 +5321,18 @@ class HfApi:
|
|
|
5315
5321
|
1. (Check parameters and setup.)
|
|
5316
5322
|
2. Create repo if missing.
|
|
5317
5323
|
3. List local files to upload.
|
|
5318
|
-
4.
|
|
5324
|
+
4. Run validation checks and display warnings if repository limits might be exceeded:
|
|
5325
|
+
- Warns if the total number of files exceeds 100k (recommended limit).
|
|
5326
|
+
- Warns if any folder contains more than 10k files (recommended limit).
|
|
5327
|
+
- Warns about files larger than 20GB (recommended) or 50GB (hard limit).
|
|
5328
|
+
5. Start workers. Workers can perform the following tasks:
|
|
5319
5329
|
- Hash a file.
|
|
5320
5330
|
- Get upload mode (regular or LFS) for a list of files.
|
|
5321
5331
|
- Pre-upload an LFS file.
|
|
5322
5332
|
- Commit a bunch of files.
|
|
5323
5333
|
Once a worker finishes a task, it will move on to the next task based on the priority list (see below) until
|
|
5324
5334
|
all files are uploaded and committed.
|
|
5325
|
-
|
|
5335
|
+
6. While workers are up, regularly print a report to sys.stdout.
|
|
5326
5336
|
|
|
5327
5337
|
Order of priority:
|
|
5328
5338
|
1. Commit if more than 5 minutes since last commit attempt (and at least 1 file).
|
|
@@ -81,6 +81,7 @@ from huggingface_hub.inference._generated.types import (
|
|
|
81
81
|
ImageSegmentationSubtask,
|
|
82
82
|
ImageToImageTargetSize,
|
|
83
83
|
ImageToTextOutput,
|
|
84
|
+
ImageToVideoTargetSize,
|
|
84
85
|
ObjectDetectionOutputElement,
|
|
85
86
|
Padding,
|
|
86
87
|
QuestionAnsweringOutputElement,
|
|
@@ -132,7 +133,7 @@ class InferenceClient:
|
|
|
132
133
|
Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
|
|
133
134
|
arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
|
|
134
135
|
provider (`str`, *optional*):
|
|
135
|
-
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
|
|
136
|
+
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, `"sambanova"`, `"scaleway"` or `"together"`.
|
|
136
137
|
Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
137
138
|
If model is a URL or `base_url` is passed, then `provider` is not used.
|
|
138
139
|
token (`str`, *optional*):
|
|
@@ -1339,6 +1340,85 @@ class InferenceClient:
|
|
|
1339
1340
|
response = provider_helper.get_response(response, request_parameters)
|
|
1340
1341
|
return _bytes_to_image(response)
|
|
1341
1342
|
|
|
1343
|
+
def image_to_video(
|
|
1344
|
+
self,
|
|
1345
|
+
image: ContentT,
|
|
1346
|
+
*,
|
|
1347
|
+
model: Optional[str] = None,
|
|
1348
|
+
prompt: Optional[str] = None,
|
|
1349
|
+
negative_prompt: Optional[str] = None,
|
|
1350
|
+
num_frames: Optional[float] = None,
|
|
1351
|
+
num_inference_steps: Optional[int] = None,
|
|
1352
|
+
guidance_scale: Optional[float] = None,
|
|
1353
|
+
seed: Optional[int] = None,
|
|
1354
|
+
target_size: Optional[ImageToVideoTargetSize] = None,
|
|
1355
|
+
**kwargs,
|
|
1356
|
+
) -> bytes:
|
|
1357
|
+
"""
|
|
1358
|
+
Generate a video from an input image.
|
|
1359
|
+
|
|
1360
|
+
Args:
|
|
1361
|
+
image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
|
|
1362
|
+
The input image to generate a video from. It can be raw bytes, an image file, a URL to an online image, or a PIL Image.
|
|
1363
|
+
model (`str`, *optional*):
|
|
1364
|
+
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
|
1365
|
+
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
|
1366
|
+
prompt (`str`, *optional*):
|
|
1367
|
+
The text prompt to guide the video generation.
|
|
1368
|
+
negative_prompt (`str`, *optional*):
|
|
1369
|
+
One prompt to guide what NOT to include in video generation.
|
|
1370
|
+
num_frames (`float`, *optional*):
|
|
1371
|
+
The num_frames parameter determines how many video frames are generated.
|
|
1372
|
+
num_inference_steps (`int`, *optional*):
|
|
1373
|
+
For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher
|
|
1374
|
+
quality image at the expense of slower inference.
|
|
1375
|
+
guidance_scale (`float`, *optional*):
|
|
1376
|
+
For diffusion models. A higher guidance scale value encourages the model to generate videos closely
|
|
1377
|
+
linked to the text prompt at the expense of lower image quality.
|
|
1378
|
+
seed (`int`, *optional*):
|
|
1379
|
+
The seed to use for the video generation.
|
|
1380
|
+
target_size (`ImageToVideoTargetSize`, *optional*):
|
|
1381
|
+
The size in pixel of the output video frames.
|
|
1382
|
+
num_inference_steps (`int`, *optional*):
|
|
1383
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality video at the
|
|
1384
|
+
expense of slower inference.
|
|
1385
|
+
seed (`int`, *optional*):
|
|
1386
|
+
Seed for the random number generator.
|
|
1387
|
+
|
|
1388
|
+
Returns:
|
|
1389
|
+
`bytes`: The generated video.
|
|
1390
|
+
|
|
1391
|
+
Examples:
|
|
1392
|
+
```py
|
|
1393
|
+
>>> from huggingface_hub import InferenceClient
|
|
1394
|
+
>>> client = InferenceClient()
|
|
1395
|
+
>>> video = client.image_to_video("cat.jpg", model="Wan-AI/Wan2.2-I2V-A14B", prompt="turn the cat into a tiger")
|
|
1396
|
+
>>> with open("tiger.mp4", "wb") as f:
|
|
1397
|
+
... f.write(video)
|
|
1398
|
+
```
|
|
1399
|
+
"""
|
|
1400
|
+
model_id = model or self.model
|
|
1401
|
+
provider_helper = get_provider_helper(self.provider, task="image-to-video", model=model_id)
|
|
1402
|
+
request_parameters = provider_helper.prepare_request(
|
|
1403
|
+
inputs=image,
|
|
1404
|
+
parameters={
|
|
1405
|
+
"prompt": prompt,
|
|
1406
|
+
"negative_prompt": negative_prompt,
|
|
1407
|
+
"num_frames": num_frames,
|
|
1408
|
+
"num_inference_steps": num_inference_steps,
|
|
1409
|
+
"guidance_scale": guidance_scale,
|
|
1410
|
+
"seed": seed,
|
|
1411
|
+
"target_size": target_size,
|
|
1412
|
+
**kwargs,
|
|
1413
|
+
},
|
|
1414
|
+
headers=self.headers,
|
|
1415
|
+
model=model_id,
|
|
1416
|
+
api_key=self.token,
|
|
1417
|
+
)
|
|
1418
|
+
response = self._inner_post(request_parameters)
|
|
1419
|
+
response = provider_helper.get_response(response, request_parameters)
|
|
1420
|
+
return response
|
|
1421
|
+
|
|
1342
1422
|
def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput:
|
|
1343
1423
|
"""
|
|
1344
1424
|
Takes an input image and return text.
|
|
@@ -66,6 +66,7 @@ from huggingface_hub.inference._generated.types import (
|
|
|
66
66
|
ImageSegmentationSubtask,
|
|
67
67
|
ImageToImageTargetSize,
|
|
68
68
|
ImageToTextOutput,
|
|
69
|
+
ImageToVideoTargetSize,
|
|
69
70
|
ObjectDetectionOutputElement,
|
|
70
71
|
Padding,
|
|
71
72
|
QuestionAnsweringOutputElement,
|
|
@@ -120,7 +121,7 @@ class AsyncInferenceClient:
|
|
|
120
121
|
Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2
|
|
121
122
|
arguments are mutually exclusive. If a URL is passed as `model` or `base_url` for chat completion, the `(/v1)/chat/completions` suffix path will be appended to the URL.
|
|
122
123
|
provider (`str`, *optional*):
|
|
123
|
-
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`.
|
|
124
|
+
Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, `"sambanova"`, `"scaleway"` or `"together"`.
|
|
124
125
|
Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.
|
|
125
126
|
If model is a URL or `base_url` is passed, then `provider` is not used.
|
|
126
127
|
token (`str`, *optional*):
|
|
@@ -1385,6 +1386,86 @@ class AsyncInferenceClient:
|
|
|
1385
1386
|
response = provider_helper.get_response(response, request_parameters)
|
|
1386
1387
|
return _bytes_to_image(response)
|
|
1387
1388
|
|
|
1389
|
+
async def image_to_video(
|
|
1390
|
+
self,
|
|
1391
|
+
image: ContentT,
|
|
1392
|
+
*,
|
|
1393
|
+
model: Optional[str] = None,
|
|
1394
|
+
prompt: Optional[str] = None,
|
|
1395
|
+
negative_prompt: Optional[str] = None,
|
|
1396
|
+
num_frames: Optional[float] = None,
|
|
1397
|
+
num_inference_steps: Optional[int] = None,
|
|
1398
|
+
guidance_scale: Optional[float] = None,
|
|
1399
|
+
seed: Optional[int] = None,
|
|
1400
|
+
target_size: Optional[ImageToVideoTargetSize] = None,
|
|
1401
|
+
**kwargs,
|
|
1402
|
+
) -> bytes:
|
|
1403
|
+
"""
|
|
1404
|
+
Generate a video from an input image.
|
|
1405
|
+
|
|
1406
|
+
Args:
|
|
1407
|
+
image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`):
|
|
1408
|
+
The input image to generate a video from. It can be raw bytes, an image file, a URL to an online image, or a PIL Image.
|
|
1409
|
+
model (`str`, *optional*):
|
|
1410
|
+
The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
|
|
1411
|
+
Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
|
|
1412
|
+
prompt (`str`, *optional*):
|
|
1413
|
+
The text prompt to guide the video generation.
|
|
1414
|
+
negative_prompt (`str`, *optional*):
|
|
1415
|
+
One prompt to guide what NOT to include in video generation.
|
|
1416
|
+
num_frames (`float`, *optional*):
|
|
1417
|
+
The num_frames parameter determines how many video frames are generated.
|
|
1418
|
+
num_inference_steps (`int`, *optional*):
|
|
1419
|
+
For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher
|
|
1420
|
+
quality image at the expense of slower inference.
|
|
1421
|
+
guidance_scale (`float`, *optional*):
|
|
1422
|
+
For diffusion models. A higher guidance scale value encourages the model to generate videos closely
|
|
1423
|
+
linked to the text prompt at the expense of lower image quality.
|
|
1424
|
+
seed (`int`, *optional*):
|
|
1425
|
+
The seed to use for the video generation.
|
|
1426
|
+
target_size (`ImageToVideoTargetSize`, *optional*):
|
|
1427
|
+
The size in pixel of the output video frames.
|
|
1428
|
+
num_inference_steps (`int`, *optional*):
|
|
1429
|
+
The number of denoising steps. More denoising steps usually lead to a higher quality video at the
|
|
1430
|
+
expense of slower inference.
|
|
1431
|
+
seed (`int`, *optional*):
|
|
1432
|
+
Seed for the random number generator.
|
|
1433
|
+
|
|
1434
|
+
Returns:
|
|
1435
|
+
`bytes`: The generated video.
|
|
1436
|
+
|
|
1437
|
+
Examples:
|
|
1438
|
+
```py
|
|
1439
|
+
# Must be run in an async context
|
|
1440
|
+
>>> from huggingface_hub import AsyncInferenceClient
|
|
1441
|
+
>>> client = AsyncInferenceClient()
|
|
1442
|
+
>>> video = await client.image_to_video("cat.jpg", model="Wan-AI/Wan2.2-I2V-A14B", prompt="turn the cat into a tiger")
|
|
1443
|
+
>>> with open("tiger.mp4", "wb") as f:
|
|
1444
|
+
... f.write(video)
|
|
1445
|
+
```
|
|
1446
|
+
"""
|
|
1447
|
+
model_id = model or self.model
|
|
1448
|
+
provider_helper = get_provider_helper(self.provider, task="image-to-video", model=model_id)
|
|
1449
|
+
request_parameters = provider_helper.prepare_request(
|
|
1450
|
+
inputs=image,
|
|
1451
|
+
parameters={
|
|
1452
|
+
"prompt": prompt,
|
|
1453
|
+
"negative_prompt": negative_prompt,
|
|
1454
|
+
"num_frames": num_frames,
|
|
1455
|
+
"num_inference_steps": num_inference_steps,
|
|
1456
|
+
"guidance_scale": guidance_scale,
|
|
1457
|
+
"seed": seed,
|
|
1458
|
+
"target_size": target_size,
|
|
1459
|
+
**kwargs,
|
|
1460
|
+
},
|
|
1461
|
+
headers=self.headers,
|
|
1462
|
+
model=model_id,
|
|
1463
|
+
api_key=self.token,
|
|
1464
|
+
)
|
|
1465
|
+
response = await self._inner_post(request_parameters)
|
|
1466
|
+
response = provider_helper.get_response(response, request_parameters)
|
|
1467
|
+
return response
|
|
1468
|
+
|
|
1388
1469
|
async def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput:
|
|
1389
1470
|
"""
|
|
1390
1471
|
Takes an input image and return text.
|
|
@@ -13,6 +13,7 @@ from .cohere import CohereConversationalTask
|
|
|
13
13
|
from .fal_ai import (
|
|
14
14
|
FalAIAutomaticSpeechRecognitionTask,
|
|
15
15
|
FalAIImageToImageTask,
|
|
16
|
+
FalAIImageToVideoTask,
|
|
16
17
|
FalAITextToImageTask,
|
|
17
18
|
FalAITextToSpeechTask,
|
|
18
19
|
FalAITextToVideoTask,
|
|
@@ -37,6 +38,7 @@ from .nscale import NscaleConversationalTask, NscaleTextToImageTask
|
|
|
37
38
|
from .openai import OpenAIConversationalTask
|
|
38
39
|
from .replicate import ReplicateImageToImageTask, ReplicateTask, ReplicateTextToImageTask, ReplicateTextToSpeechTask
|
|
39
40
|
from .sambanova import SambanovaConversationalTask, SambanovaFeatureExtractionTask
|
|
41
|
+
from .scaleway import ScalewayConversationalTask, ScalewayFeatureExtractionTask
|
|
40
42
|
from .together import TogetherConversationalTask, TogetherTextGenerationTask, TogetherTextToImageTask
|
|
41
43
|
|
|
42
44
|
|
|
@@ -59,6 +61,7 @@ PROVIDER_T = Literal[
|
|
|
59
61
|
"openai",
|
|
60
62
|
"replicate",
|
|
61
63
|
"sambanova",
|
|
64
|
+
"scaleway",
|
|
62
65
|
"together",
|
|
63
66
|
]
|
|
64
67
|
|
|
@@ -79,6 +82,7 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
|
|
|
79
82
|
"text-to-image": FalAITextToImageTask(),
|
|
80
83
|
"text-to-speech": FalAITextToSpeechTask(),
|
|
81
84
|
"text-to-video": FalAITextToVideoTask(),
|
|
85
|
+
"image-to-video": FalAIImageToVideoTask(),
|
|
82
86
|
"image-to-image": FalAIImageToImageTask(),
|
|
83
87
|
},
|
|
84
88
|
"featherless-ai": {
|
|
@@ -152,6 +156,10 @@ PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = {
|
|
|
152
156
|
"conversational": SambanovaConversationalTask(),
|
|
153
157
|
"feature-extraction": SambanovaFeatureExtractionTask(),
|
|
154
158
|
},
|
|
159
|
+
"scaleway": {
|
|
160
|
+
"conversational": ScalewayConversationalTask(),
|
|
161
|
+
"feature-extraction": ScalewayFeatureExtractionTask(),
|
|
162
|
+
},
|
|
155
163
|
"together": {
|
|
156
164
|
"text-to-image": TogetherTextToImageTask(),
|
|
157
165
|
"conversational": TogetherConversationalTask(),
|
|
@@ -213,3 +213,34 @@ class FalAIImageToImageTask(FalAIQueueTask):
|
|
|
213
213
|
output = super().get_response(response, request_params)
|
|
214
214
|
url = _as_dict(output)["images"][0]["url"]
|
|
215
215
|
return get_session().get(url).content
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class FalAIImageToVideoTask(FalAIQueueTask):
|
|
219
|
+
def __init__(self):
|
|
220
|
+
super().__init__("image-to-video")
|
|
221
|
+
|
|
222
|
+
def _prepare_payload_as_dict(
|
|
223
|
+
self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
|
|
224
|
+
) -> Optional[Dict]:
|
|
225
|
+
image_url = _as_url(inputs, default_mime_type="image/jpeg")
|
|
226
|
+
payload: Dict[str, Any] = {
|
|
227
|
+
"image_url": image_url,
|
|
228
|
+
**filter_none(parameters),
|
|
229
|
+
}
|
|
230
|
+
if provider_mapping_info.adapter_weights_path is not None:
|
|
231
|
+
lora_path = constants.HUGGINGFACE_CO_URL_TEMPLATE.format(
|
|
232
|
+
repo_id=provider_mapping_info.hf_model_id,
|
|
233
|
+
revision="main",
|
|
234
|
+
filename=provider_mapping_info.adapter_weights_path,
|
|
235
|
+
)
|
|
236
|
+
payload["loras"] = [{"path": lora_path, "scale": 1}]
|
|
237
|
+
return payload
|
|
238
|
+
|
|
239
|
+
def get_response(
|
|
240
|
+
self,
|
|
241
|
+
response: Union[bytes, Dict],
|
|
242
|
+
request_params: Optional[RequestParameters] = None,
|
|
243
|
+
) -> Any:
|
|
244
|
+
output = super().get_response(response, request_params)
|
|
245
|
+
url = _as_dict(output)["video"]["url"]
|
|
246
|
+
return get_session().get(url).content
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Union
|
|
2
|
+
|
|
3
|
+
from huggingface_hub.inference._common import RequestParameters, _as_dict
|
|
4
|
+
|
|
5
|
+
from ._common import BaseConversationalTask, InferenceProviderMapping, TaskProviderHelper, filter_none
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ScalewayConversationalTask(BaseConversationalTask):
|
|
9
|
+
def __init__(self):
|
|
10
|
+
super().__init__(provider="scaleway", base_url="https://api.scaleway.ai")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ScalewayFeatureExtractionTask(TaskProviderHelper):
|
|
14
|
+
def __init__(self):
|
|
15
|
+
super().__init__(provider="scaleway", base_url="https://api.scaleway.ai", task="feature-extraction")
|
|
16
|
+
|
|
17
|
+
def _prepare_route(self, mapped_model: str, api_key: str) -> str:
|
|
18
|
+
return "/v1/embeddings"
|
|
19
|
+
|
|
20
|
+
def _prepare_payload_as_dict(
|
|
21
|
+
self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping
|
|
22
|
+
) -> Optional[Dict]:
|
|
23
|
+
parameters = filter_none(parameters)
|
|
24
|
+
return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters}
|
|
25
|
+
|
|
26
|
+
def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:
|
|
27
|
+
embeddings = _as_dict(response)["data"]
|
|
28
|
+
return [embedding["embedding"] for embedding in embeddings]
|
huggingface_hub/utils/_dotenv.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
# AI-generated module (ChatGPT)
|
|
2
2
|
import re
|
|
3
|
-
from typing import Dict
|
|
3
|
+
from typing import Dict, Optional
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
def load_dotenv(dotenv_str: str) -> Dict[str, str]:
|
|
6
|
+
def load_dotenv(dotenv_str: str, environ: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
|
7
7
|
"""
|
|
8
8
|
Parse a DOTENV-format string and return a dictionary of key-value pairs.
|
|
9
9
|
Handles quoted values, comments, export keyword, and blank lines.
|
|
@@ -12,17 +12,17 @@ def load_dotenv(dotenv_str: str) -> Dict[str, str]:
|
|
|
12
12
|
line_pattern = re.compile(
|
|
13
13
|
r"""
|
|
14
14
|
^\s*
|
|
15
|
-
(?:export\
|
|
15
|
+
(?:export[^\S\n]+)? # optional export
|
|
16
16
|
([A-Za-z_][A-Za-z0-9_]*) # key
|
|
17
|
-
\
|
|
17
|
+
[^\S\n]*(=)?[^\S\n]*
|
|
18
18
|
( # value group
|
|
19
19
|
(?:
|
|
20
20
|
'(?:\\'|[^'])*' # single-quoted value
|
|
21
|
-
| "(
|
|
21
|
+
| \"(?:\\\"|[^\"])*\" # double-quoted value
|
|
22
22
|
| [^#\n\r]+? # unquoted value
|
|
23
23
|
)
|
|
24
24
|
)?
|
|
25
|
-
\
|
|
25
|
+
[^\S\n]*(?:\#.*)?$ # optional inline comment
|
|
26
26
|
""",
|
|
27
27
|
re.VERBOSE,
|
|
28
28
|
)
|
|
@@ -33,19 +33,23 @@ def load_dotenv(dotenv_str: str) -> Dict[str, str]:
|
|
|
33
33
|
continue # Skip comments and empty lines
|
|
34
34
|
|
|
35
35
|
match = line_pattern.match(line)
|
|
36
|
-
if
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
36
|
+
if match:
|
|
37
|
+
key = match.group(1)
|
|
38
|
+
val = None
|
|
39
|
+
if match.group(2): # if there is '='
|
|
40
|
+
raw_val = match.group(3) or ""
|
|
41
|
+
val = raw_val.strip()
|
|
42
|
+
# Remove surrounding quotes if quoted
|
|
43
|
+
if (val.startswith('"') and val.endswith('"')) or (val.startswith("'") and val.endswith("'")):
|
|
44
|
+
val = val[1:-1]
|
|
45
|
+
val = val.replace(r"\n", "\n").replace(r"\t", "\t").replace(r"\"", '"').replace(r"\\", "\\")
|
|
46
|
+
if raw_val.startswith('"'):
|
|
47
|
+
val = val.replace(r"\$", "$") # only in double quotes
|
|
48
|
+
elif environ is not None:
|
|
49
|
+
# Get it from the current environment
|
|
50
|
+
val = environ.get(key)
|
|
51
|
+
|
|
52
|
+
if val is not None:
|
|
53
|
+
env[key] = val
|
|
50
54
|
|
|
51
55
|
return env
|
|
@@ -385,6 +385,7 @@ def dump_environment_info() -> Dict[str, Any]:
|
|
|
385
385
|
info["HF_HUB_DISABLE_SYMLINKS_WARNING"] = constants.HF_HUB_DISABLE_SYMLINKS_WARNING
|
|
386
386
|
info["HF_HUB_DISABLE_EXPERIMENTAL_WARNING"] = constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING
|
|
387
387
|
info["HF_HUB_DISABLE_IMPLICIT_TOKEN"] = constants.HF_HUB_DISABLE_IMPLICIT_TOKEN
|
|
388
|
+
info["HF_HUB_DISABLE_XET"] = constants.HF_HUB_DISABLE_XET
|
|
388
389
|
info["HF_HUB_ENABLE_HF_TRANSFER"] = constants.HF_HUB_ENABLE_HF_TRANSFER
|
|
389
390
|
info["HF_HUB_ETAG_TIMEOUT"] = constants.HF_HUB_ETAG_TIMEOUT
|
|
390
391
|
info["HF_HUB_DOWNLOAD_TIMEOUT"] = constants.HF_HUB_DOWNLOAD_TIMEOUT
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: huggingface-hub
|
|
3
|
-
Version: 0.34.
|
|
3
|
+
Version: 0.34.5
|
|
4
4
|
Summary: Client library to download and publish models, datasets and other repos on the huggingface.co hub
|
|
5
5
|
Home-page: https://github.com/huggingface/huggingface_hub
|
|
6
6
|
Author: Hugging Face, Inc.
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
huggingface_hub/__init__.py,sha256=
|
|
1
|
+
huggingface_hub/__init__.py,sha256=rv3FvQosoKR8MvPywT4KVkRA8MmMN0CmBPLTjmVp_LI,51837
|
|
2
2
|
huggingface_hub/_commit_api.py,sha256=68HxFnJE2s-QmGZRHQav5kOMTseYV_ZQi04ADaQmZUk,38979
|
|
3
3
|
huggingface_hub/_commit_scheduler.py,sha256=tfIoO1xWHjTJ6qy6VS6HIoymDycFPg0d6pBSZprrU2U,14679
|
|
4
4
|
huggingface_hub/_inference_endpoints.py,sha256=ahmbPcEXsJ_JcMb9TDgdkD8Z2z9uytkFG3_1o6dTm8g,17598
|
|
5
|
-
huggingface_hub/_jobs_api.py,sha256=
|
|
5
|
+
huggingface_hub/_jobs_api.py,sha256=Fm59WJ-P3E5mkCthQWgGugTlbXnQe4nxDKIwZ_jtpt8,5344
|
|
6
6
|
huggingface_hub/_local_folder.py,sha256=2iHXNgIT3UdSt2PvCovd0NzgVxTRypKb-rvAFLK-gZU,17305
|
|
7
7
|
huggingface_hub/_login.py,sha256=rcwx9EZdFUB3vuowC5QBiSYS4ImUnBzo04igR1Z8l40,20256
|
|
8
8
|
huggingface_hub/_oauth.py,sha256=75ya9toHxC0WRKsLOAI212CrssRjTSxs16mHWWNMb3w,18714
|
|
9
9
|
huggingface_hub/_snapshot_download.py,sha256=b-NzYQcvktsAirIfGQKgzQwu8w0S6lhBTvnJ5S6saw8,16166
|
|
10
10
|
huggingface_hub/_space_api.py,sha256=jb6rF8qLtjaNU12D-8ygAPM26xDiHCu8CHXHowhGTmg,5470
|
|
11
11
|
huggingface_hub/_tensorboard_logger.py,sha256=ZkYcAUiRC8RGL214QUYtp58O8G5tn-HF6DCWha9imcA,8358
|
|
12
|
-
huggingface_hub/_upload_large_folder.py,sha256=
|
|
12
|
+
huggingface_hub/_upload_large_folder.py,sha256=l2YWLZttOw69EGdihT3y_Nhr5mweLGooZG9L8smNoHY,30066
|
|
13
13
|
huggingface_hub/_webhooks_payload.py,sha256=Xm3KaK7tCOGBlXkuZvbym6zjHXrT1XCrbUFWuXiBmNY,3617
|
|
14
14
|
huggingface_hub/_webhooks_server.py,sha256=5J63wk9MUGKBNJVsOD9i60mJ-VMp0YYmlf87vQsl-L8,15767
|
|
15
15
|
huggingface_hub/community.py,sha256=4MtcoxEI9_0lmmilBEnvUEi8_O1Ivfa8p6eKxYU5-ts,12198
|
|
@@ -18,7 +18,7 @@ huggingface_hub/dataclasses.py,sha256=sgPdEi2UDprhNPP2PPkiSlzsHdC1WcpwVTLwlHAEcr
|
|
|
18
18
|
huggingface_hub/errors.py,sha256=D7Lw0Jjrf8vfmD0B26LEvg-JWkU8Zq0KDPJOzFY4QLw,11201
|
|
19
19
|
huggingface_hub/fastai_utils.py,sha256=DpeH9d-6ut2k_nCAAwglM51XmRmgfbRe2SPifpVL5Yk,16745
|
|
20
20
|
huggingface_hub/file_download.py,sha256=E-NWON01pprbAsw7Kz477JX6f8HTWsdpEdQAtA37t5c,78974
|
|
21
|
-
huggingface_hub/hf_api.py,sha256=
|
|
21
|
+
huggingface_hub/hf_api.py,sha256=a12ywN6Lc43A12B36xxK4dJzXsgtmjQTZqYAC4gjTzY,465077
|
|
22
22
|
huggingface_hub/hf_file_system.py,sha256=nrNOoNHRwf1swebtQvZExSblRjQg9rHKxL7Cslk72uw,46899
|
|
23
23
|
huggingface_hub/hub_mixin.py,sha256=MArtbUxjXiYeOvOmNBG9I_j5t02m2xCVAcge4waip1w,38091
|
|
24
24
|
huggingface_hub/inference_api.py,sha256=b4-NhPSn9b44nYKV8tDKXodmE4JVdEymMWL4CVGkzlE,8323
|
|
@@ -34,7 +34,7 @@ huggingface_hub/cli/auth.py,sha256=BO6sJJcdHhjouMEH5JpUmC0qg3vaukX4I5DtA7ohLes,7
|
|
|
34
34
|
huggingface_hub/cli/cache.py,sha256=zPLEWP5MidLElo0FeLvkUjCT0EYlX1pmC-2TkCa8kD0,15871
|
|
35
35
|
huggingface_hub/cli/download.py,sha256=PUpW-nbu6ZAP6P9DpVhliAKSSlxvXWkVh0U2KZoukhQ,7115
|
|
36
36
|
huggingface_hub/cli/hf.py,sha256=SQ73_SXEQnWVJkhKT_6bwNQBHQXGOdI5qqlTTtI0XH0,2328
|
|
37
|
-
huggingface_hub/cli/jobs.py,sha256=
|
|
37
|
+
huggingface_hub/cli/jobs.py,sha256=QPF8YT31hoyjXXp_pD-LK_VHNFXF09su-nLeP4b9WGQ,21538
|
|
38
38
|
huggingface_hub/cli/lfs.py,sha256=J9MkKOGUW6GjBrKs2zZUCOaAGxpatxsEoSbBjuhDJV8,7230
|
|
39
39
|
huggingface_hub/cli/repo.py,sha256=lNDEZbOpLW8SQVBYDQ1xofw9nJ7M8AUsd2kBIV_m_do,10576
|
|
40
40
|
huggingface_hub/cli/repo_files.py,sha256=L-Ku52l2vZ04GCabp_OhVXqLzE9dsKQqaQKudGzjWg4,4831
|
|
@@ -57,10 +57,10 @@ huggingface_hub/commands/upload_large_folder.py,sha256=_1id84BFtbL8HgFRKZ-el_uPr
|
|
|
57
57
|
huggingface_hub/commands/user.py,sha256=MjG1lwMq1p5QAlBolFnRX_pUxE3Kd3UiPl-nEEQSgXg,7537
|
|
58
58
|
huggingface_hub/commands/version.py,sha256=rGpCbvxImY9eQqXrshYt609Iws27R75WARmKQrIo6Ok,1390
|
|
59
59
|
huggingface_hub/inference/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
60
|
-
huggingface_hub/inference/_client.py,sha256=
|
|
60
|
+
huggingface_hub/inference/_client.py,sha256=__SCWGGXY6tivLdWJUiL5YZXYEyaOXNOMmwo_6UksZo,165054
|
|
61
61
|
huggingface_hub/inference/_common.py,sha256=6qAIauugyl1eHk0FhWdjBNEXBNF33_VXC8lc1GR8t7s,15874
|
|
62
62
|
huggingface_hub/inference/_generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
63
|
-
huggingface_hub/inference/_generated/_async_client.py,sha256=
|
|
63
|
+
huggingface_hub/inference/_generated/_async_client.py,sha256=7u6ZvDz6nDQ5fhs2qaBAVDw4QZgshGZiBeq-hdRZoL0,171289
|
|
64
64
|
huggingface_hub/inference/_generated/types/__init__.py,sha256=9WvrGQ8aThtKSNzZF06j-CIE2ZuItne8FFnea1p1u38,6557
|
|
65
65
|
huggingface_hub/inference/_generated/types/audio_classification.py,sha256=Jg3mzfGhCSH6CfvVvgJSiFpkz6v4nNA0G4LJXacEgNc,1573
|
|
66
66
|
huggingface_hub/inference/_generated/types/audio_to_audio.py,sha256=2Ep4WkePL7oJwcp5nRJqApwviumGHbft9HhXE9XLHj4,891
|
|
@@ -103,12 +103,12 @@ huggingface_hub/inference/_mcp/constants.py,sha256=AnOp_oR5Vty0d5J3AynGGNK9i1I9K
|
|
|
103
103
|
huggingface_hub/inference/_mcp/mcp_client.py,sha256=ndaTcZZPbU1ZTNUeB9-WdaOx7bHD3lsrXnKxCeiwpUg,15788
|
|
104
104
|
huggingface_hub/inference/_mcp/types.py,sha256=ic8VSR9JY1d-vPWsBVXYtXtIU669-HbGQ3m12Szs7BQ,815
|
|
105
105
|
huggingface_hub/inference/_mcp/utils.py,sha256=VsRWl0fuSZDS0zNT9n7FOMSlzA0UBbP8p8xWKWDt2Pc,4093
|
|
106
|
-
huggingface_hub/inference/_providers/__init__.py,sha256=
|
|
107
|
-
huggingface_hub/inference/_providers/_common.py,sha256=
|
|
106
|
+
huggingface_hub/inference/_providers/__init__.py,sha256=j3kMOwM1nopVGZeYAbw78sv0yvQiwB2Nzpm9pQqqnSg,8594
|
|
107
|
+
huggingface_hub/inference/_providers/_common.py,sha256=mUL5T9v3iqM1d7cYIh0qEyVC8w7UliQz_LEni2srekI,11350
|
|
108
108
|
huggingface_hub/inference/_providers/black_forest_labs.py,sha256=wO7qgRyNyrIKlZtvL3vJEbS4-D19kfoXZk6PDh1dTis,2842
|
|
109
109
|
huggingface_hub/inference/_providers/cerebras.py,sha256=QOJ-1U-os7uE7p6eUnn_P_APq-yQhx28be7c3Tq2EuA,210
|
|
110
110
|
huggingface_hub/inference/_providers/cohere.py,sha256=O3tC-qIUL91mx_mE8bOHCtDWcQuKOUauhUoXSUBUCZ8,1253
|
|
111
|
-
huggingface_hub/inference/_providers/fal_ai.py,sha256=
|
|
111
|
+
huggingface_hub/inference/_providers/fal_ai.py,sha256=Z3-MPddtsfjWT1Zmvni0m8LPBSFjOa_wyv8xV9FNtoA,9860
|
|
112
112
|
huggingface_hub/inference/_providers/featherless_ai.py,sha256=QxBz-32O4PztxixrIjrfKuTOzvfqyUi-cVsw0Hf_zlY,1382
|
|
113
113
|
huggingface_hub/inference/_providers/fireworks_ai.py,sha256=Id226ITfPkOcFMFzly3MW9l-dZl9l4qizL4JEHWkBFk,1215
|
|
114
114
|
huggingface_hub/inference/_providers/groq.py,sha256=JTk2JV4ZOlaohho7zLAFQtk92kGVsPmLJ1hmzcwsqvQ,315
|
|
@@ -120,6 +120,7 @@ huggingface_hub/inference/_providers/nscale.py,sha256=qWUsWinQmUbNUqehyKn34tVoWe
|
|
|
120
120
|
huggingface_hub/inference/_providers/openai.py,sha256=GCVYeNdjWIgpQQ7E_Xv8IebmdhTi0S6WfFosz3nLtps,1089
|
|
121
121
|
huggingface_hub/inference/_providers/replicate.py,sha256=BuLb1x4nUlH5SfazBwvMiFwwcs-OS99U87m3QWdx2is,3810
|
|
122
122
|
huggingface_hub/inference/_providers/sambanova.py,sha256=Unt3H3jr_kgI9vzRjmmW1DFyoEuPkKCcgIIloiOj3j8,2037
|
|
123
|
+
huggingface_hub/inference/_providers/scaleway.py,sha256=Jy81kXWbXCHBpx6xmyzdEfXGSyhUfjKOLHuDSvhHWGo,1209
|
|
123
124
|
huggingface_hub/inference/_providers/together.py,sha256=KHF19CS3qXS7G1-CwcMiD8Z5wzPKEKi4F2DzqAthbBE,3439
|
|
124
125
|
huggingface_hub/serialization/__init__.py,sha256=kn-Fa-m4FzMnN8lNsF-SwFcfzug4CucexybGKyvZ8S0,1041
|
|
125
126
|
huggingface_hub/serialization/_base.py,sha256=Df3GwGR9NzeK_SD75prXLucJAzPiNPgHbgXSw-_LTk8,8126
|
|
@@ -135,7 +136,7 @@ huggingface_hub/utils/_cache_manager.py,sha256=osqV4gM5Mx293Kw1POuJ5uKIcoIG57gY0
|
|
|
135
136
|
huggingface_hub/utils/_chunk_utils.py,sha256=kRCaj5228_vKcyLWspd8Xq01f17Jz6ds5Sr9ed5d_RU,2130
|
|
136
137
|
huggingface_hub/utils/_datetime.py,sha256=kCS5jaKV25kOncX1xujbXsz5iDLcjLcLw85semGNzxQ,2770
|
|
137
138
|
huggingface_hub/utils/_deprecation.py,sha256=HZhRGGUX_QMKBBBwHHlffLtmCSK01TOpeXHefZbPfwI,4872
|
|
138
|
-
huggingface_hub/utils/_dotenv.py,sha256=
|
|
139
|
+
huggingface_hub/utils/_dotenv.py,sha256=RzHqC8HgzVxE-N4DFBcnemvX0NHmXcV0My2ASK0U1OQ,2017
|
|
139
140
|
huggingface_hub/utils/_experimental.py,sha256=3-c8irbn9sJr2CwWbzhGkIrdXKg8_x7BifhHFy32ei8,2470
|
|
140
141
|
huggingface_hub/utils/_fixes.py,sha256=xQV1QkUn2WpLqLjtXNiyn9gh-454K6AF-Q3kwkYAQD8,4437
|
|
141
142
|
huggingface_hub/utils/_git_credential.py,sha256=SDdsiREr1TcAR2Ze2TB0E5cYzVJgvDZrs60od9lAsMc,4596
|
|
@@ -145,7 +146,7 @@ huggingface_hub/utils/_http.py,sha256=her7UZ0KRo9WYDArpqVFyEXTusOGUECj5HNS8Eahqm
|
|
|
145
146
|
huggingface_hub/utils/_lfs.py,sha256=EC0Oz6Wiwl8foRNkUOzrETXzAWlbgpnpxo5a410ovFY,3957
|
|
146
147
|
huggingface_hub/utils/_pagination.py,sha256=EX5tRasSuQDaKbXuGYbInBK2odnSWNHgzw2tSgqeBRI,1906
|
|
147
148
|
huggingface_hub/utils/_paths.py,sha256=w1ZhFmmD5ykWjp_hAvhjtOoa2ZUcOXJrF4a6O3QpAWo,5042
|
|
148
|
-
huggingface_hub/utils/_runtime.py,sha256=
|
|
149
|
+
huggingface_hub/utils/_runtime.py,sha256=L7SOYezdxKcwd4DovAY0UGY3qt27toXO-QjceIDwExk,11634
|
|
149
150
|
huggingface_hub/utils/_safetensors.py,sha256=GW3nyv7xQcuwObKYeYoT9VhURVzG1DZTbKBKho8Bbos,4458
|
|
150
151
|
huggingface_hub/utils/_subprocess.py,sha256=u9FFUDE7TrzQTiuEzlUnHx7S2P57GbYRV8u16GJwrFw,4625
|
|
151
152
|
huggingface_hub/utils/_telemetry.py,sha256=54LXeIJU5pEGghPAh06gqNAR-UoxOjVLvKqAQscwqZs,4890
|
|
@@ -158,9 +159,9 @@ huggingface_hub/utils/insecure_hashlib.py,sha256=iAaepavFZ5Dhfa5n8KozRfQprKmvcjS
|
|
|
158
159
|
huggingface_hub/utils/logging.py,sha256=0A8fF1yh3L9Ka_bCDX2ml4U5Ht0tY8Dr3JcbRvWFuwo,4909
|
|
159
160
|
huggingface_hub/utils/sha.py,sha256=OFnNGCba0sNcT2gUwaVCJnldxlltrHHe0DS_PCpV3C4,2134
|
|
160
161
|
huggingface_hub/utils/tqdm.py,sha256=xAKcyfnNHsZ7L09WuEM5Ew5-MDhiahLACbbN2zMmcLs,10671
|
|
161
|
-
huggingface_hub-0.34.
|
|
162
|
-
huggingface_hub-0.34.
|
|
163
|
-
huggingface_hub-0.34.
|
|
164
|
-
huggingface_hub-0.34.
|
|
165
|
-
huggingface_hub-0.34.
|
|
166
|
-
huggingface_hub-0.34.
|
|
162
|
+
huggingface_hub-0.34.5.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
163
|
+
huggingface_hub-0.34.5.dist-info/METADATA,sha256=67CHYP_fg-DqvhmXT82RONWfD5P-YOKwqc8INQsDWSY,14699
|
|
164
|
+
huggingface_hub-0.34.5.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
|
165
|
+
huggingface_hub-0.34.5.dist-info/entry_points.txt,sha256=HIzLhjwPTO7U_ncpW4AkmzAuaadr1ajmYagW5mdb5TM,217
|
|
166
|
+
huggingface_hub-0.34.5.dist-info/top_level.txt,sha256=8KzlQJAY4miUvjAssOAJodqKOw3harNzuiwGQ9qLSSk,16
|
|
167
|
+
huggingface_hub-0.34.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|