huggingface-hub 0.31.0rc0__py3-none-any.whl → 1.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- huggingface_hub/__init__.py +145 -46
- huggingface_hub/_commit_api.py +168 -119
- huggingface_hub/_commit_scheduler.py +15 -15
- huggingface_hub/_inference_endpoints.py +15 -12
- huggingface_hub/_jobs_api.py +301 -0
- huggingface_hub/_local_folder.py +18 -3
- huggingface_hub/_login.py +31 -63
- huggingface_hub/_oauth.py +460 -0
- huggingface_hub/_snapshot_download.py +239 -80
- huggingface_hub/_space_api.py +5 -5
- huggingface_hub/_tensorboard_logger.py +15 -19
- huggingface_hub/_upload_large_folder.py +172 -76
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +13 -25
- huggingface_hub/{commands → cli}/__init__.py +1 -15
- huggingface_hub/cli/_cli_utils.py +173 -0
- huggingface_hub/cli/auth.py +147 -0
- huggingface_hub/cli/cache.py +841 -0
- huggingface_hub/cli/download.py +189 -0
- huggingface_hub/cli/hf.py +60 -0
- huggingface_hub/cli/inference_endpoints.py +377 -0
- huggingface_hub/cli/jobs.py +772 -0
- huggingface_hub/cli/lfs.py +175 -0
- huggingface_hub/cli/repo.py +315 -0
- huggingface_hub/cli/repo_files.py +94 -0
- huggingface_hub/{commands/env.py → cli/system.py} +10 -13
- huggingface_hub/cli/upload.py +294 -0
- huggingface_hub/cli/upload_large_folder.py +117 -0
- huggingface_hub/community.py +20 -12
- huggingface_hub/constants.py +38 -53
- huggingface_hub/dataclasses.py +609 -0
- huggingface_hub/errors.py +80 -30
- huggingface_hub/fastai_utils.py +30 -41
- huggingface_hub/file_download.py +435 -351
- huggingface_hub/hf_api.py +2050 -1124
- huggingface_hub/hf_file_system.py +269 -152
- huggingface_hub/hub_mixin.py +43 -63
- huggingface_hub/inference/_client.py +347 -434
- huggingface_hub/inference/_common.py +133 -121
- huggingface_hub/inference/_generated/_async_client.py +397 -541
- huggingface_hub/inference/_generated/types/__init__.py +5 -1
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +59 -23
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/image_to_image.py +6 -2
- huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +5 -5
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +10 -10
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/__init__.py +0 -0
- huggingface_hub/inference/_mcp/_cli_hacks.py +88 -0
- huggingface_hub/inference/_mcp/agent.py +100 -0
- huggingface_hub/inference/_mcp/cli.py +247 -0
- huggingface_hub/inference/_mcp/constants.py +81 -0
- huggingface_hub/inference/_mcp/mcp_client.py +395 -0
- huggingface_hub/inference/_mcp/types.py +45 -0
- huggingface_hub/inference/_mcp/utils.py +128 -0
- huggingface_hub/inference/_providers/__init__.py +82 -7
- huggingface_hub/inference/_providers/_common.py +129 -27
- huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
- huggingface_hub/inference/_providers/cerebras.py +1 -1
- huggingface_hub/inference/_providers/clarifai.py +13 -0
- huggingface_hub/inference/_providers/cohere.py +20 -3
- huggingface_hub/inference/_providers/fal_ai.py +183 -56
- huggingface_hub/inference/_providers/featherless_ai.py +38 -0
- huggingface_hub/inference/_providers/fireworks_ai.py +18 -0
- huggingface_hub/inference/_providers/groq.py +9 -0
- huggingface_hub/inference/_providers/hf_inference.py +69 -30
- huggingface_hub/inference/_providers/hyperbolic.py +4 -4
- huggingface_hub/inference/_providers/nebius.py +33 -5
- huggingface_hub/inference/_providers/novita.py +5 -5
- huggingface_hub/inference/_providers/nscale.py +44 -0
- huggingface_hub/inference/_providers/openai.py +3 -1
- huggingface_hub/inference/_providers/publicai.py +6 -0
- huggingface_hub/inference/_providers/replicate.py +31 -13
- huggingface_hub/inference/_providers/sambanova.py +18 -4
- huggingface_hub/inference/_providers/scaleway.py +28 -0
- huggingface_hub/inference/_providers/together.py +20 -5
- huggingface_hub/inference/_providers/wavespeed.py +138 -0
- huggingface_hub/inference/_providers/zai_org.py +17 -0
- huggingface_hub/lfs.py +33 -100
- huggingface_hub/repocard.py +34 -38
- huggingface_hub/repocard_data.py +57 -57
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +12 -15
- huggingface_hub/serialization/_dduf.py +8 -8
- huggingface_hub/serialization/_torch.py +69 -69
- huggingface_hub/utils/__init__.py +19 -8
- huggingface_hub/utils/_auth.py +7 -7
- huggingface_hub/utils/_cache_manager.py +92 -147
- huggingface_hub/utils/_chunk_utils.py +2 -3
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +55 -0
- huggingface_hub/utils/_experimental.py +7 -5
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +5 -5
- huggingface_hub/utils/_headers.py +8 -30
- huggingface_hub/utils/_http.py +398 -239
- huggingface_hub/utils/_pagination.py +4 -4
- huggingface_hub/utils/_parsing.py +98 -0
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +61 -24
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +9 -9
- huggingface_hub/utils/_telemetry.py +4 -4
- huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -4
- huggingface_hub/utils/_typing.py +25 -5
- huggingface_hub/utils/_validators.py +55 -74
- huggingface_hub/utils/_verification.py +167 -0
- huggingface_hub/utils/_xet.py +64 -17
- huggingface_hub/utils/_xet_progress_reporting.py +162 -0
- huggingface_hub/utils/insecure_hashlib.py +3 -5
- huggingface_hub/utils/logging.py +8 -11
- huggingface_hub/utils/tqdm.py +5 -4
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/METADATA +94 -85
- huggingface_hub-1.1.3.dist-info/RECORD +155 -0
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/WHEEL +1 -1
- huggingface_hub-1.1.3.dist-info/entry_points.txt +6 -0
- huggingface_hub/commands/delete_cache.py +0 -474
- huggingface_hub/commands/download.py +0 -200
- huggingface_hub/commands/huggingface_cli.py +0 -61
- huggingface_hub/commands/lfs.py +0 -200
- huggingface_hub/commands/repo_files.py +0 -128
- huggingface_hub/commands/scan_cache.py +0 -181
- huggingface_hub/commands/tag.py +0 -159
- huggingface_hub/commands/upload.py +0 -314
- huggingface_hub/commands/upload_large_folder.py +0 -129
- huggingface_hub/commands/user.py +0 -304
- huggingface_hub/commands/version.py +0 -37
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -500
- huggingface_hub/repository.py +0 -1477
- huggingface_hub/serialization/_tensorflow.py +0 -95
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.31.0rc0.dist-info/RECORD +0 -135
- huggingface_hub-0.31.0rc0.dist-info/entry_points.txt +0 -6
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info/licenses}/LICENSE +0 -0
- {huggingface_hub-0.31.0rc0.dist-info → huggingface_hub-1.1.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,772 @@
|
|
|
1
|
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
"""Contains commands to interact with jobs on the Hugging Face Hub.
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
# run a job
|
|
18
|
+
hf jobs run <image> <command>
|
|
19
|
+
|
|
20
|
+
# List running or completed jobs
|
|
21
|
+
hf jobs ps [-a] [-f key=value] [--format TEMPLATE]
|
|
22
|
+
|
|
23
|
+
# Stream logs from a job
|
|
24
|
+
hf jobs logs <job-id>
|
|
25
|
+
|
|
26
|
+
# Inspect detailed information about a job
|
|
27
|
+
hf jobs inspect <job-id>
|
|
28
|
+
|
|
29
|
+
# Cancel a running job
|
|
30
|
+
hf jobs cancel <job-id>
|
|
31
|
+
|
|
32
|
+
# Run a UV script
|
|
33
|
+
hf jobs uv run <script>
|
|
34
|
+
|
|
35
|
+
# Schedule a job
|
|
36
|
+
hf jobs scheduled run <schedule> <image> <command>
|
|
37
|
+
|
|
38
|
+
# List scheduled jobs
|
|
39
|
+
hf jobs scheduled ps [-a] [-f key=value] [--format TEMPLATE]
|
|
40
|
+
|
|
41
|
+
# Inspect a scheduled job
|
|
42
|
+
hf jobs scheduled inspect <scheduled_job_id>
|
|
43
|
+
|
|
44
|
+
# Suspend a scheduled job
|
|
45
|
+
hf jobs scheduled suspend <scheduled_job_id>
|
|
46
|
+
|
|
47
|
+
# Resume a scheduled job
|
|
48
|
+
hf jobs scheduled resume <scheduled_job_id>
|
|
49
|
+
|
|
50
|
+
# Delete a scheduled job
|
|
51
|
+
hf jobs scheduled delete <scheduled_job_id>
|
|
52
|
+
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
import json
|
|
56
|
+
import os
|
|
57
|
+
import re
|
|
58
|
+
from dataclasses import asdict
|
|
59
|
+
from pathlib import Path
|
|
60
|
+
from typing import Annotated, Dict, Optional, Union
|
|
61
|
+
|
|
62
|
+
import typer
|
|
63
|
+
|
|
64
|
+
from huggingface_hub import SpaceHardware, get_token
|
|
65
|
+
from huggingface_hub.errors import HfHubHTTPError
|
|
66
|
+
from huggingface_hub.utils import logging
|
|
67
|
+
from huggingface_hub.utils._dotenv import load_dotenv
|
|
68
|
+
|
|
69
|
+
from ._cli_utils import TokenOpt, get_hf_api, typer_factory
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
logger = logging.get_logger(__name__)
|
|
73
|
+
|
|
74
|
+
SUGGESTED_FLAVORS = [item.value for item in SpaceHardware if item.value != "zero-a10g"]
|
|
75
|
+
|
|
76
|
+
# Common job-related options
|
|
77
|
+
ImageArg = Annotated[
|
|
78
|
+
str,
|
|
79
|
+
typer.Argument(
|
|
80
|
+
help="The Docker image to use.",
|
|
81
|
+
),
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
ImageOpt = Annotated[
|
|
85
|
+
Optional[str],
|
|
86
|
+
typer.Option(
|
|
87
|
+
help="Use a custom Docker image with `uv` installed.",
|
|
88
|
+
),
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
FlavorOpt = Annotated[
|
|
92
|
+
Optional[SpaceHardware],
|
|
93
|
+
typer.Option(
|
|
94
|
+
help=f"Flavor for the hardware, as in HF Spaces. Defaults to `cpu-basic`. Possible values: {', '.join(SUGGESTED_FLAVORS)}.",
|
|
95
|
+
),
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
EnvOpt = Annotated[
|
|
99
|
+
Optional[list[str]],
|
|
100
|
+
typer.Option(
|
|
101
|
+
"-e",
|
|
102
|
+
"--env",
|
|
103
|
+
help="Set environment variables. E.g. --env ENV=value",
|
|
104
|
+
),
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
SecretsOpt = Annotated[
|
|
108
|
+
Optional[list[str]],
|
|
109
|
+
typer.Option(
|
|
110
|
+
"-s",
|
|
111
|
+
"--secrets",
|
|
112
|
+
help="Set secret environment variables. E.g. --secrets SECRET=value or `--secrets HF_TOKEN` to pass your Hugging Face token.",
|
|
113
|
+
),
|
|
114
|
+
]
|
|
115
|
+
|
|
116
|
+
EnvFileOpt = Annotated[
|
|
117
|
+
Optional[str],
|
|
118
|
+
typer.Option(
|
|
119
|
+
"--env-file",
|
|
120
|
+
help="Read in a file of environment variables.",
|
|
121
|
+
),
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
SecretsFileOpt = Annotated[
|
|
125
|
+
Optional[str],
|
|
126
|
+
typer.Option(
|
|
127
|
+
help="Read in a file of secret environment variables.",
|
|
128
|
+
),
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
TimeoutOpt = Annotated[
|
|
132
|
+
Optional[str],
|
|
133
|
+
typer.Option(
|
|
134
|
+
help="Max duration: int/float with s (seconds, default), m (minutes), h (hours) or d (days).",
|
|
135
|
+
),
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
DetachOpt = Annotated[
|
|
139
|
+
bool,
|
|
140
|
+
typer.Option(
|
|
141
|
+
"-d",
|
|
142
|
+
"--detach",
|
|
143
|
+
help="Run the Job in the background and print the Job ID.",
|
|
144
|
+
),
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
NamespaceOpt = Annotated[
|
|
148
|
+
Optional[str],
|
|
149
|
+
typer.Option(
|
|
150
|
+
help="The namespace where the job will be running. Defaults to the current user's namespace.",
|
|
151
|
+
),
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
WithOpt = Annotated[
|
|
155
|
+
Optional[list[str]],
|
|
156
|
+
typer.Option(
|
|
157
|
+
"--with",
|
|
158
|
+
help="Run with the given packages installed",
|
|
159
|
+
),
|
|
160
|
+
]
|
|
161
|
+
|
|
162
|
+
PythonOpt = Annotated[
|
|
163
|
+
Optional[str],
|
|
164
|
+
typer.Option(
|
|
165
|
+
"-p",
|
|
166
|
+
"--python",
|
|
167
|
+
help="The Python interpreter to use for the run environment",
|
|
168
|
+
),
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
SuspendOpt = Annotated[
|
|
172
|
+
Optional[bool],
|
|
173
|
+
typer.Option(
|
|
174
|
+
help="Suspend (pause) the scheduled Job",
|
|
175
|
+
),
|
|
176
|
+
]
|
|
177
|
+
|
|
178
|
+
ConcurrencyOpt = Annotated[
|
|
179
|
+
Optional[bool],
|
|
180
|
+
typer.Option(
|
|
181
|
+
help="Allow multiple instances of this Job to run concurrently",
|
|
182
|
+
),
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
ScheduleArg = Annotated[
|
|
186
|
+
str,
|
|
187
|
+
typer.Argument(
|
|
188
|
+
help="One of annually, yearly, monthly, weekly, daily, hourly, or a CRON schedule expression.",
|
|
189
|
+
),
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
ScriptArg = Annotated[
|
|
193
|
+
str,
|
|
194
|
+
typer.Argument(
|
|
195
|
+
help="UV script to run (local file or URL)",
|
|
196
|
+
),
|
|
197
|
+
]
|
|
198
|
+
|
|
199
|
+
ScriptArgsArg = Annotated[
|
|
200
|
+
Optional[list[str]],
|
|
201
|
+
typer.Argument(
|
|
202
|
+
help="Arguments for the script",
|
|
203
|
+
),
|
|
204
|
+
]
|
|
205
|
+
|
|
206
|
+
CommandArg = Annotated[
|
|
207
|
+
list[str],
|
|
208
|
+
typer.Argument(
|
|
209
|
+
help="The command to run.",
|
|
210
|
+
),
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
JobIdArg = Annotated[
|
|
214
|
+
str,
|
|
215
|
+
typer.Argument(
|
|
216
|
+
help="Job ID",
|
|
217
|
+
),
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
ScheduledJobIdArg = Annotated[
|
|
221
|
+
str,
|
|
222
|
+
typer.Argument(
|
|
223
|
+
help="Scheduled Job ID",
|
|
224
|
+
),
|
|
225
|
+
]
|
|
226
|
+
|
|
227
|
+
RepoOpt = Annotated[
|
|
228
|
+
Optional[str],
|
|
229
|
+
typer.Option(
|
|
230
|
+
help="Repository name for the script (creates ephemeral if not specified)",
|
|
231
|
+
),
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
jobs_cli = typer_factory(help="Run and manage Jobs on the Hub.")
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
@jobs_cli.command("run", help="Run a Job")
|
|
239
|
+
def jobs_run(
|
|
240
|
+
image: ImageArg,
|
|
241
|
+
command: CommandArg,
|
|
242
|
+
env: EnvOpt = None,
|
|
243
|
+
secrets: SecretsOpt = None,
|
|
244
|
+
env_file: EnvFileOpt = None,
|
|
245
|
+
secrets_file: SecretsFileOpt = None,
|
|
246
|
+
flavor: FlavorOpt = None,
|
|
247
|
+
timeout: TimeoutOpt = None,
|
|
248
|
+
detach: DetachOpt = False,
|
|
249
|
+
namespace: NamespaceOpt = None,
|
|
250
|
+
token: TokenOpt = None,
|
|
251
|
+
) -> None:
|
|
252
|
+
env_map: dict[str, Optional[str]] = {}
|
|
253
|
+
if env_file:
|
|
254
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
255
|
+
for env_value in env or []:
|
|
256
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
257
|
+
|
|
258
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
259
|
+
extended_environ = _get_extended_environ()
|
|
260
|
+
if secrets_file:
|
|
261
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
262
|
+
for secret in secrets or []:
|
|
263
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
264
|
+
|
|
265
|
+
api = get_hf_api(token=token)
|
|
266
|
+
job = api.run_job(
|
|
267
|
+
image=image,
|
|
268
|
+
command=command,
|
|
269
|
+
env=env_map,
|
|
270
|
+
secrets=secrets_map,
|
|
271
|
+
flavor=flavor,
|
|
272
|
+
timeout=timeout,
|
|
273
|
+
namespace=namespace,
|
|
274
|
+
)
|
|
275
|
+
# Always print the job ID to the user
|
|
276
|
+
print(f"Job started with ID: {job.id}")
|
|
277
|
+
print(f"View at: {job.url}")
|
|
278
|
+
|
|
279
|
+
if detach:
|
|
280
|
+
return
|
|
281
|
+
# Now let's stream the logs
|
|
282
|
+
for log in api.fetch_job_logs(job_id=job.id):
|
|
283
|
+
print(log)
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
@jobs_cli.command("logs", help="Fetch the logs of a Job")
|
|
287
|
+
def jobs_logs(
|
|
288
|
+
job_id: JobIdArg,
|
|
289
|
+
namespace: NamespaceOpt = None,
|
|
290
|
+
token: TokenOpt = None,
|
|
291
|
+
) -> None:
|
|
292
|
+
api = get_hf_api(token=token)
|
|
293
|
+
for log in api.fetch_job_logs(job_id=job_id, namespace=namespace):
|
|
294
|
+
print(log)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _matches_filters(job_properties: dict[str, str], filters: dict[str, str]) -> bool:
|
|
298
|
+
"""Check if scheduled job matches all specified filters."""
|
|
299
|
+
for key, pattern in filters.items():
|
|
300
|
+
# Check if property exists
|
|
301
|
+
if key not in job_properties:
|
|
302
|
+
return False
|
|
303
|
+
# Support pattern matching with wildcards
|
|
304
|
+
if "*" in pattern or "?" in pattern:
|
|
305
|
+
# Convert glob pattern to regex
|
|
306
|
+
regex_pattern = pattern.replace("*", ".*").replace("?", ".")
|
|
307
|
+
if not re.search(f"^{regex_pattern}$", job_properties[key], re.IGNORECASE):
|
|
308
|
+
return False
|
|
309
|
+
# Simple substring matching
|
|
310
|
+
elif pattern.lower() not in job_properties[key].lower():
|
|
311
|
+
return False
|
|
312
|
+
return True
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def _print_output(rows: list[list[Union[str, int]]], headers: list[str], fmt: Optional[str]) -> None:
|
|
316
|
+
"""Print output according to the chosen format."""
|
|
317
|
+
if fmt:
|
|
318
|
+
# Use custom template if provided
|
|
319
|
+
template = fmt
|
|
320
|
+
for row in rows:
|
|
321
|
+
line = template
|
|
322
|
+
for i, field in enumerate(["id", "image", "command", "created", "status"]):
|
|
323
|
+
placeholder = f"{{{{.{field}}}}}"
|
|
324
|
+
if placeholder in line:
|
|
325
|
+
line = line.replace(placeholder, str(row[i]))
|
|
326
|
+
print(line)
|
|
327
|
+
else:
|
|
328
|
+
# Default tabular format
|
|
329
|
+
print(_tabulate(rows, headers=headers))
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
@jobs_cli.command("ps", help="List Jobs")
|
|
333
|
+
def jobs_ps(
|
|
334
|
+
all: Annotated[
|
|
335
|
+
bool,
|
|
336
|
+
typer.Option(
|
|
337
|
+
"-a",
|
|
338
|
+
"--all",
|
|
339
|
+
help="Show all Jobs (default shows just running)",
|
|
340
|
+
),
|
|
341
|
+
] = False,
|
|
342
|
+
namespace: NamespaceOpt = None,
|
|
343
|
+
token: TokenOpt = None,
|
|
344
|
+
filter: Annotated[
|
|
345
|
+
Optional[list[str]],
|
|
346
|
+
typer.Option(
|
|
347
|
+
"-f",
|
|
348
|
+
"--filter",
|
|
349
|
+
help="Filter output based on conditions provided (format: key=value)",
|
|
350
|
+
),
|
|
351
|
+
] = None,
|
|
352
|
+
format: Annotated[
|
|
353
|
+
Optional[str],
|
|
354
|
+
typer.Option(
|
|
355
|
+
help="Format output using a custom template",
|
|
356
|
+
),
|
|
357
|
+
] = None,
|
|
358
|
+
) -> None:
|
|
359
|
+
try:
|
|
360
|
+
api = get_hf_api(token=token)
|
|
361
|
+
# Fetch jobs data
|
|
362
|
+
jobs = api.list_jobs(namespace=namespace)
|
|
363
|
+
# Define table headers
|
|
364
|
+
table_headers = ["JOB ID", "IMAGE/SPACE", "COMMAND", "CREATED", "STATUS"]
|
|
365
|
+
rows: list[list[Union[str, int]]] = []
|
|
366
|
+
|
|
367
|
+
filters: dict[str, str] = {}
|
|
368
|
+
for f in filter or []:
|
|
369
|
+
if "=" in f:
|
|
370
|
+
key, value = f.split("=", 1)
|
|
371
|
+
filters[key.lower()] = value
|
|
372
|
+
else:
|
|
373
|
+
print(f"Warning: Ignoring invalid filter format '{f}'. Use key=value format.")
|
|
374
|
+
# Process jobs data
|
|
375
|
+
for job in jobs:
|
|
376
|
+
# Extract job data for filtering
|
|
377
|
+
status = job.status.stage if job.status else "UNKNOWN"
|
|
378
|
+
if not all and status not in ("RUNNING", "UPDATING"):
|
|
379
|
+
# Skip job if not all jobs should be shown and status doesn't match criteria
|
|
380
|
+
continue
|
|
381
|
+
# Extract job data for output
|
|
382
|
+
job_id = job.id
|
|
383
|
+
|
|
384
|
+
# Extract image or space information
|
|
385
|
+
image_or_space = job.docker_image or "N/A"
|
|
386
|
+
|
|
387
|
+
# Extract and format command
|
|
388
|
+
cmd = job.command or []
|
|
389
|
+
command_str = " ".join(cmd) if cmd else "N/A"
|
|
390
|
+
|
|
391
|
+
# Extract creation time
|
|
392
|
+
created_at = job.created_at.strftime("%Y-%m-%d %H:%M:%S") if job.created_at else "N/A"
|
|
393
|
+
|
|
394
|
+
# Create a dict with all job properties for filtering
|
|
395
|
+
props = {"id": job_id, "image": image_or_space, "status": status.lower(), "command": command_str}
|
|
396
|
+
if not _matches_filters(props, filters):
|
|
397
|
+
continue
|
|
398
|
+
|
|
399
|
+
# Create row
|
|
400
|
+
rows.append([job_id, image_or_space, command_str, created_at, status])
|
|
401
|
+
|
|
402
|
+
# Handle empty results
|
|
403
|
+
if not rows:
|
|
404
|
+
filters_msg = (
|
|
405
|
+
f" matching filters: {', '.join([f'{k}={v}' for k, v in filters.items()])}" if filters else ""
|
|
406
|
+
)
|
|
407
|
+
print(f"No jobs found{filters_msg}")
|
|
408
|
+
return
|
|
409
|
+
# Apply custom format if provided or use default tabular format
|
|
410
|
+
_print_output(rows, table_headers, format)
|
|
411
|
+
|
|
412
|
+
except HfHubHTTPError as e:
|
|
413
|
+
print(f"Error fetching jobs data: {e}")
|
|
414
|
+
except (KeyError, ValueError, TypeError) as e:
|
|
415
|
+
print(f"Error processing jobs data: {e}")
|
|
416
|
+
except Exception as e:
|
|
417
|
+
print(f"Unexpected error - {type(e).__name__}: {e}")
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
@jobs_cli.command("inspect", help="Display detailed information on one or more Jobs")
|
|
421
|
+
def jobs_inspect(
|
|
422
|
+
job_ids: Annotated[
|
|
423
|
+
list[str],
|
|
424
|
+
typer.Argument(
|
|
425
|
+
help="The jobs to inspect",
|
|
426
|
+
),
|
|
427
|
+
],
|
|
428
|
+
namespace: NamespaceOpt = None,
|
|
429
|
+
token: TokenOpt = None,
|
|
430
|
+
) -> None:
|
|
431
|
+
api = get_hf_api(token=token)
|
|
432
|
+
jobs = [api.inspect_job(job_id=job_id, namespace=namespace) for job_id in job_ids]
|
|
433
|
+
print(json.dumps([asdict(job) for job in jobs], indent=4, default=str))
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@jobs_cli.command("cancel", help="Cancel a Job")
|
|
437
|
+
def jobs_cancel(
|
|
438
|
+
job_id: JobIdArg,
|
|
439
|
+
namespace: NamespaceOpt = None,
|
|
440
|
+
token: TokenOpt = None,
|
|
441
|
+
) -> None:
|
|
442
|
+
api = get_hf_api(token=token)
|
|
443
|
+
api.cancel_job(job_id=job_id, namespace=namespace)
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
uv_app = typer_factory(help="Run UV scripts (Python with inline dependencies) on HF infrastructure")
|
|
447
|
+
jobs_cli.add_typer(uv_app, name="uv")
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
@uv_app.command("run", help="Run a UV script (local file or URL) on HF infrastructure")
|
|
451
|
+
def jobs_uv_run(
|
|
452
|
+
script: ScriptArg,
|
|
453
|
+
script_args: ScriptArgsArg = None,
|
|
454
|
+
image: ImageOpt = None,
|
|
455
|
+
repo: RepoOpt = None,
|
|
456
|
+
flavor: FlavorOpt = None,
|
|
457
|
+
env: EnvOpt = None,
|
|
458
|
+
secrets: SecretsOpt = None,
|
|
459
|
+
env_file: EnvFileOpt = None,
|
|
460
|
+
secrets_file: SecretsFileOpt = None,
|
|
461
|
+
timeout: TimeoutOpt = None,
|
|
462
|
+
detach: DetachOpt = False,
|
|
463
|
+
namespace: NamespaceOpt = None,
|
|
464
|
+
token: TokenOpt = None,
|
|
465
|
+
with_: WithOpt = None,
|
|
466
|
+
python: PythonOpt = None,
|
|
467
|
+
) -> None:
|
|
468
|
+
env_map: dict[str, Optional[str]] = {}
|
|
469
|
+
if env_file:
|
|
470
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
471
|
+
for env_value in env or []:
|
|
472
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
473
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
474
|
+
extended_environ = _get_extended_environ()
|
|
475
|
+
if secrets_file:
|
|
476
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
477
|
+
for secret in secrets or []:
|
|
478
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
479
|
+
|
|
480
|
+
api = get_hf_api(token=token)
|
|
481
|
+
job = api.run_uv_job(
|
|
482
|
+
script=script,
|
|
483
|
+
script_args=script_args or [],
|
|
484
|
+
dependencies=with_,
|
|
485
|
+
python=python,
|
|
486
|
+
image=image,
|
|
487
|
+
env=env_map,
|
|
488
|
+
secrets=secrets_map,
|
|
489
|
+
flavor=flavor, # type: ignore[arg-type]
|
|
490
|
+
timeout=timeout,
|
|
491
|
+
namespace=namespace,
|
|
492
|
+
_repo=repo,
|
|
493
|
+
)
|
|
494
|
+
# Always print the job ID to the user
|
|
495
|
+
print(f"Job started with ID: {job.id}")
|
|
496
|
+
print(f"View at: {job.url}")
|
|
497
|
+
if detach:
|
|
498
|
+
return
|
|
499
|
+
# Now let's stream the logs
|
|
500
|
+
for log in api.fetch_job_logs(job_id=job.id):
|
|
501
|
+
print(log)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
scheduled_app = typer_factory(help="Create and manage scheduled Jobs on the Hub.")
|
|
505
|
+
jobs_cli.add_typer(scheduled_app, name="scheduled")
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
@scheduled_app.command("run", help="Schedule a Job")
|
|
509
|
+
def scheduled_run(
|
|
510
|
+
schedule: ScheduleArg,
|
|
511
|
+
image: ImageArg,
|
|
512
|
+
command: CommandArg,
|
|
513
|
+
suspend: SuspendOpt = None,
|
|
514
|
+
concurrency: ConcurrencyOpt = None,
|
|
515
|
+
env: EnvOpt = None,
|
|
516
|
+
secrets: SecretsOpt = None,
|
|
517
|
+
env_file: EnvFileOpt = None,
|
|
518
|
+
secrets_file: SecretsFileOpt = None,
|
|
519
|
+
flavor: FlavorOpt = None,
|
|
520
|
+
timeout: TimeoutOpt = None,
|
|
521
|
+
namespace: NamespaceOpt = None,
|
|
522
|
+
token: TokenOpt = None,
|
|
523
|
+
) -> None:
|
|
524
|
+
env_map: dict[str, Optional[str]] = {}
|
|
525
|
+
if env_file:
|
|
526
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
527
|
+
for env_value in env or []:
|
|
528
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
529
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
530
|
+
extended_environ = _get_extended_environ()
|
|
531
|
+
if secrets_file:
|
|
532
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
533
|
+
for secret in secrets or []:
|
|
534
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
535
|
+
|
|
536
|
+
api = get_hf_api(token=token)
|
|
537
|
+
scheduled_job = api.create_scheduled_job(
|
|
538
|
+
image=image,
|
|
539
|
+
command=command,
|
|
540
|
+
schedule=schedule,
|
|
541
|
+
suspend=suspend,
|
|
542
|
+
concurrency=concurrency,
|
|
543
|
+
env=env_map,
|
|
544
|
+
secrets=secrets_map,
|
|
545
|
+
flavor=flavor,
|
|
546
|
+
timeout=timeout,
|
|
547
|
+
namespace=namespace,
|
|
548
|
+
)
|
|
549
|
+
print(f"Scheduled Job created with ID: {scheduled_job.id}")
|
|
550
|
+
|
|
551
|
+
|
|
552
|
+
@scheduled_app.command("ps", help="List scheduled Jobs")
|
|
553
|
+
def scheduled_ps(
|
|
554
|
+
all: Annotated[
|
|
555
|
+
bool,
|
|
556
|
+
typer.Option(
|
|
557
|
+
"-a",
|
|
558
|
+
"--all",
|
|
559
|
+
help="Show all scheduled Jobs (default hides suspended)",
|
|
560
|
+
),
|
|
561
|
+
] = False,
|
|
562
|
+
namespace: NamespaceOpt = None,
|
|
563
|
+
token: TokenOpt = None,
|
|
564
|
+
filter: Annotated[
|
|
565
|
+
Optional[list[str]],
|
|
566
|
+
typer.Option(
|
|
567
|
+
"-f",
|
|
568
|
+
"--filter",
|
|
569
|
+
help="Filter output based on conditions provided (format: key=value)",
|
|
570
|
+
),
|
|
571
|
+
] = None,
|
|
572
|
+
format: Annotated[
|
|
573
|
+
Optional[str],
|
|
574
|
+
typer.Option(
|
|
575
|
+
"--format",
|
|
576
|
+
help="Format output using a custom template",
|
|
577
|
+
),
|
|
578
|
+
] = None,
|
|
579
|
+
) -> None:
|
|
580
|
+
try:
|
|
581
|
+
api = get_hf_api(token=token)
|
|
582
|
+
scheduled_jobs = api.list_scheduled_jobs(namespace=namespace)
|
|
583
|
+
table_headers = ["ID", "SCHEDULE", "IMAGE/SPACE", "COMMAND", "LAST RUN", "NEXT RUN", "SUSPEND"]
|
|
584
|
+
rows: list[list[Union[str, int]]] = []
|
|
585
|
+
filters: dict[str, str] = {}
|
|
586
|
+
for f in filter or []:
|
|
587
|
+
if "=" in f:
|
|
588
|
+
key, value = f.split("=", 1)
|
|
589
|
+
filters[key.lower()] = value
|
|
590
|
+
else:
|
|
591
|
+
print(f"Warning: Ignoring invalid filter format '{f}'. Use key=value format.")
|
|
592
|
+
|
|
593
|
+
for scheduled_job in scheduled_jobs:
|
|
594
|
+
suspend = scheduled_job.suspend or False
|
|
595
|
+
if not all and suspend:
|
|
596
|
+
continue
|
|
597
|
+
sj_id = scheduled_job.id
|
|
598
|
+
schedule = scheduled_job.schedule or "N/A"
|
|
599
|
+
image_or_space = scheduled_job.job_spec.docker_image or "N/A"
|
|
600
|
+
cmd = scheduled_job.job_spec.command or []
|
|
601
|
+
command_str = " ".join(cmd) if cmd else "N/A"
|
|
602
|
+
last_job_at = (
|
|
603
|
+
scheduled_job.status.last_job.at.strftime("%Y-%m-%d %H:%M:%S")
|
|
604
|
+
if scheduled_job.status.last_job
|
|
605
|
+
else "N/A"
|
|
606
|
+
)
|
|
607
|
+
next_job_run_at = (
|
|
608
|
+
scheduled_job.status.next_job_run_at.strftime("%Y-%m-%d %H:%M:%S")
|
|
609
|
+
if scheduled_job.status.next_job_run_at
|
|
610
|
+
else "N/A"
|
|
611
|
+
)
|
|
612
|
+
props = {"id": sj_id, "image": image_or_space, "suspend": str(suspend), "command": command_str}
|
|
613
|
+
if not _matches_filters(props, filters):
|
|
614
|
+
continue
|
|
615
|
+
rows.append([sj_id, schedule, image_or_space, command_str, last_job_at, next_job_run_at, suspend])
|
|
616
|
+
|
|
617
|
+
if not rows:
|
|
618
|
+
filters_msg = (
|
|
619
|
+
f" matching filters: {', '.join([f'{k}={v}' for k, v in filters.items()])}" if filters else ""
|
|
620
|
+
)
|
|
621
|
+
print(f"No scheduled jobs found{filters_msg}")
|
|
622
|
+
return
|
|
623
|
+
_print_output(rows, table_headers, format)
|
|
624
|
+
|
|
625
|
+
except HfHubHTTPError as e:
|
|
626
|
+
print(f"Error fetching scheduled jobs data: {e}")
|
|
627
|
+
except (KeyError, ValueError, TypeError) as e:
|
|
628
|
+
print(f"Error processing scheduled jobs data: {e}")
|
|
629
|
+
except Exception as e:
|
|
630
|
+
print(f"Unexpected error - {type(e).__name__}: {e}")
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
@scheduled_app.command("inspect", help="Display detailed information on one or more scheduled Jobs")
|
|
634
|
+
def scheduled_inspect(
|
|
635
|
+
scheduled_job_ids: Annotated[
|
|
636
|
+
list[str],
|
|
637
|
+
typer.Argument(
|
|
638
|
+
help="The scheduled jobs to inspect",
|
|
639
|
+
),
|
|
640
|
+
],
|
|
641
|
+
namespace: NamespaceOpt = None,
|
|
642
|
+
token: TokenOpt = None,
|
|
643
|
+
) -> None:
|
|
644
|
+
api = get_hf_api(token=token)
|
|
645
|
+
scheduled_jobs = [
|
|
646
|
+
api.inspect_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
647
|
+
for scheduled_job_id in scheduled_job_ids
|
|
648
|
+
]
|
|
649
|
+
print(json.dumps([asdict(scheduled_job) for scheduled_job in scheduled_jobs], indent=4, default=str))
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
@scheduled_app.command("delete", help="Delete a scheduled Job")
|
|
653
|
+
def scheduled_delete(
|
|
654
|
+
scheduled_job_id: ScheduledJobIdArg,
|
|
655
|
+
namespace: NamespaceOpt = None,
|
|
656
|
+
token: TokenOpt = None,
|
|
657
|
+
) -> None:
|
|
658
|
+
api = get_hf_api(token=token)
|
|
659
|
+
api.delete_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
@scheduled_app.command("suspend", help="Suspend (pause) a scheduled Job")
|
|
663
|
+
def scheduled_suspend(
|
|
664
|
+
scheduled_job_id: ScheduledJobIdArg,
|
|
665
|
+
namespace: NamespaceOpt = None,
|
|
666
|
+
token: TokenOpt = None,
|
|
667
|
+
) -> None:
|
|
668
|
+
api = get_hf_api(token=token)
|
|
669
|
+
api.suspend_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
@scheduled_app.command("resume", help="Resume (unpause) a scheduled Job")
|
|
673
|
+
def scheduled_resume(
|
|
674
|
+
scheduled_job_id: ScheduledJobIdArg,
|
|
675
|
+
namespace: NamespaceOpt = None,
|
|
676
|
+
token: TokenOpt = None,
|
|
677
|
+
) -> None:
|
|
678
|
+
api = get_hf_api(token=token)
|
|
679
|
+
api.resume_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
scheduled_uv_app = typer_factory(help="Schedule UV scripts on HF infrastructure")
|
|
683
|
+
scheduled_app.add_typer(scheduled_uv_app, name="uv")
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
@scheduled_uv_app.command("run", help="Run a UV script (local file or URL) on HF infrastructure")
|
|
687
|
+
def scheduled_uv_run(
|
|
688
|
+
schedule: ScheduleArg,
|
|
689
|
+
script: ScriptArg,
|
|
690
|
+
script_args: ScriptArgsArg = None,
|
|
691
|
+
suspend: SuspendOpt = None,
|
|
692
|
+
concurrency: ConcurrencyOpt = None,
|
|
693
|
+
image: ImageOpt = None,
|
|
694
|
+
repo: RepoOpt = None,
|
|
695
|
+
flavor: FlavorOpt = None,
|
|
696
|
+
env: EnvOpt = None,
|
|
697
|
+
secrets: SecretsOpt = None,
|
|
698
|
+
env_file: EnvFileOpt = None,
|
|
699
|
+
secrets_file: SecretsFileOpt = None,
|
|
700
|
+
timeout: TimeoutOpt = None,
|
|
701
|
+
namespace: NamespaceOpt = None,
|
|
702
|
+
token: TokenOpt = None,
|
|
703
|
+
with_: WithOpt = None,
|
|
704
|
+
python: PythonOpt = None,
|
|
705
|
+
) -> None:
|
|
706
|
+
env_map: dict[str, Optional[str]] = {}
|
|
707
|
+
if env_file:
|
|
708
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
709
|
+
for env_value in env or []:
|
|
710
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
711
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
712
|
+
extended_environ = _get_extended_environ()
|
|
713
|
+
if secrets_file:
|
|
714
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
715
|
+
for secret in secrets or []:
|
|
716
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
717
|
+
|
|
718
|
+
api = get_hf_api(token=token)
|
|
719
|
+
job = api.create_scheduled_uv_job(
|
|
720
|
+
script=script,
|
|
721
|
+
script_args=script_args or [],
|
|
722
|
+
schedule=schedule,
|
|
723
|
+
suspend=suspend,
|
|
724
|
+
concurrency=concurrency,
|
|
725
|
+
dependencies=with_,
|
|
726
|
+
python=python,
|
|
727
|
+
image=image,
|
|
728
|
+
env=env_map,
|
|
729
|
+
secrets=secrets_map,
|
|
730
|
+
flavor=flavor, # type: ignore[arg-type]
|
|
731
|
+
timeout=timeout,
|
|
732
|
+
namespace=namespace,
|
|
733
|
+
_repo=repo,
|
|
734
|
+
)
|
|
735
|
+
print(f"Scheduled Job created with ID: {job.id}")
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
### UTILS
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
def _tabulate(rows: list[list[Union[str, int]]], headers: list[str]) -> str:
|
|
742
|
+
"""
|
|
743
|
+
Inspired by:
|
|
744
|
+
|
|
745
|
+
- stackoverflow.com/a/8356620/593036
|
|
746
|
+
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
|
|
747
|
+
"""
|
|
748
|
+
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
|
|
749
|
+
terminal_width = max(os.get_terminal_size().columns, len(headers) * 12)
|
|
750
|
+
while len(headers) + sum(col_widths) > terminal_width:
|
|
751
|
+
col_to_minimize = col_widths.index(max(col_widths))
|
|
752
|
+
col_widths[col_to_minimize] //= 2
|
|
753
|
+
if len(headers) + sum(col_widths) <= terminal_width:
|
|
754
|
+
col_widths[col_to_minimize] = terminal_width - sum(col_widths) - len(headers) + col_widths[col_to_minimize]
|
|
755
|
+
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
|
|
756
|
+
lines = []
|
|
757
|
+
lines.append(row_format.format(*headers))
|
|
758
|
+
lines.append(row_format.format(*["-" * w for w in col_widths]))
|
|
759
|
+
for row in rows:
|
|
760
|
+
row_format_args = [
|
|
761
|
+
str(x)[: col_width - 3] + "..." if len(str(x)) > col_width else str(x)
|
|
762
|
+
for x, col_width in zip(row, col_widths)
|
|
763
|
+
]
|
|
764
|
+
lines.append(row_format.format(*row_format_args))
|
|
765
|
+
return "\n".join(lines)
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def _get_extended_environ() -> Dict[str, str]:
|
|
769
|
+
extended_environ = os.environ.copy()
|
|
770
|
+
if (token := get_token()) is not None:
|
|
771
|
+
extended_environ["HF_TOKEN"] = token
|
|
772
|
+
return extended_environ
|