huggingface-hub 0.36.0rc0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of huggingface-hub might be problematic. Click here for more details.
- huggingface_hub/__init__.py +33 -45
- huggingface_hub/_commit_api.py +39 -43
- huggingface_hub/_commit_scheduler.py +11 -8
- huggingface_hub/_inference_endpoints.py +8 -8
- huggingface_hub/_jobs_api.py +20 -20
- huggingface_hub/_login.py +17 -43
- huggingface_hub/_oauth.py +8 -8
- huggingface_hub/_snapshot_download.py +135 -50
- huggingface_hub/_space_api.py +4 -4
- huggingface_hub/_tensorboard_logger.py +5 -5
- huggingface_hub/_upload_large_folder.py +18 -32
- huggingface_hub/_webhooks_payload.py +3 -3
- huggingface_hub/_webhooks_server.py +2 -2
- huggingface_hub/cli/__init__.py +0 -14
- huggingface_hub/cli/_cli_utils.py +143 -39
- huggingface_hub/cli/auth.py +105 -171
- huggingface_hub/cli/cache.py +594 -361
- huggingface_hub/cli/download.py +120 -112
- huggingface_hub/cli/hf.py +38 -41
- huggingface_hub/cli/jobs.py +689 -1017
- huggingface_hub/cli/lfs.py +120 -143
- huggingface_hub/cli/repo.py +282 -216
- huggingface_hub/cli/repo_files.py +50 -84
- huggingface_hub/cli/system.py +6 -25
- huggingface_hub/cli/upload.py +198 -220
- huggingface_hub/cli/upload_large_folder.py +91 -106
- huggingface_hub/community.py +5 -5
- huggingface_hub/constants.py +17 -52
- huggingface_hub/dataclasses.py +135 -21
- huggingface_hub/errors.py +47 -30
- huggingface_hub/fastai_utils.py +8 -9
- huggingface_hub/file_download.py +351 -303
- huggingface_hub/hf_api.py +398 -570
- huggingface_hub/hf_file_system.py +101 -66
- huggingface_hub/hub_mixin.py +32 -54
- huggingface_hub/inference/_client.py +177 -162
- huggingface_hub/inference/_common.py +38 -54
- huggingface_hub/inference/_generated/_async_client.py +218 -258
- huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +3 -3
- huggingface_hub/inference/_generated/types/base.py +10 -7
- huggingface_hub/inference/_generated/types/chat_completion.py +16 -16
- huggingface_hub/inference/_generated/types/depth_estimation.py +2 -2
- huggingface_hub/inference/_generated/types/document_question_answering.py +2 -2
- huggingface_hub/inference/_generated/types/feature_extraction.py +2 -2
- huggingface_hub/inference/_generated/types/fill_mask.py +2 -2
- huggingface_hub/inference/_generated/types/sentence_similarity.py +3 -3
- huggingface_hub/inference/_generated/types/summarization.py +2 -2
- huggingface_hub/inference/_generated/types/table_question_answering.py +4 -4
- huggingface_hub/inference/_generated/types/text2text_generation.py +2 -2
- huggingface_hub/inference/_generated/types/text_generation.py +10 -10
- huggingface_hub/inference/_generated/types/text_to_video.py +2 -2
- huggingface_hub/inference/_generated/types/token_classification.py +2 -2
- huggingface_hub/inference/_generated/types/translation.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +2 -2
- huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +1 -3
- huggingface_hub/inference/_mcp/agent.py +3 -3
- huggingface_hub/inference/_mcp/constants.py +1 -2
- huggingface_hub/inference/_mcp/mcp_client.py +33 -22
- huggingface_hub/inference/_mcp/types.py +10 -10
- huggingface_hub/inference/_mcp/utils.py +4 -4
- huggingface_hub/inference/_providers/__init__.py +12 -4
- huggingface_hub/inference/_providers/_common.py +62 -24
- huggingface_hub/inference/_providers/black_forest_labs.py +6 -6
- huggingface_hub/inference/_providers/cohere.py +3 -3
- huggingface_hub/inference/_providers/fal_ai.py +25 -25
- huggingface_hub/inference/_providers/featherless_ai.py +4 -4
- huggingface_hub/inference/_providers/fireworks_ai.py +3 -3
- huggingface_hub/inference/_providers/hf_inference.py +13 -13
- huggingface_hub/inference/_providers/hyperbolic.py +4 -4
- huggingface_hub/inference/_providers/nebius.py +10 -10
- huggingface_hub/inference/_providers/novita.py +5 -5
- huggingface_hub/inference/_providers/nscale.py +4 -4
- huggingface_hub/inference/_providers/replicate.py +15 -15
- huggingface_hub/inference/_providers/sambanova.py +6 -6
- huggingface_hub/inference/_providers/together.py +7 -7
- huggingface_hub/lfs.py +21 -94
- huggingface_hub/repocard.py +15 -16
- huggingface_hub/repocard_data.py +57 -57
- huggingface_hub/serialization/__init__.py +0 -1
- huggingface_hub/serialization/_base.py +9 -9
- huggingface_hub/serialization/_dduf.py +7 -7
- huggingface_hub/serialization/_torch.py +28 -28
- huggingface_hub/utils/__init__.py +11 -6
- huggingface_hub/utils/_auth.py +5 -5
- huggingface_hub/utils/_cache_manager.py +49 -74
- huggingface_hub/utils/_deprecation.py +1 -1
- huggingface_hub/utils/_dotenv.py +3 -3
- huggingface_hub/utils/_fixes.py +0 -10
- huggingface_hub/utils/_git_credential.py +3 -3
- huggingface_hub/utils/_headers.py +7 -29
- huggingface_hub/utils/_http.py +371 -208
- huggingface_hub/utils/_pagination.py +4 -4
- huggingface_hub/utils/_parsing.py +98 -0
- huggingface_hub/utils/_paths.py +5 -5
- huggingface_hub/utils/_runtime.py +59 -23
- huggingface_hub/utils/_safetensors.py +21 -21
- huggingface_hub/utils/_subprocess.py +9 -9
- huggingface_hub/utils/_telemetry.py +3 -3
- huggingface_hub/{commands/_cli_utils.py → utils/_terminal.py} +4 -9
- huggingface_hub/utils/_typing.py +3 -3
- huggingface_hub/utils/_validators.py +53 -72
- huggingface_hub/utils/_xet.py +16 -16
- huggingface_hub/utils/_xet_progress_reporting.py +1 -1
- huggingface_hub/utils/insecure_hashlib.py +3 -9
- huggingface_hub/utils/tqdm.py +3 -3
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/METADATA +16 -35
- huggingface_hub-1.0.0.dist-info/RECORD +152 -0
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/entry_points.txt +0 -1
- huggingface_hub/commands/__init__.py +0 -27
- huggingface_hub/commands/delete_cache.py +0 -476
- huggingface_hub/commands/download.py +0 -204
- huggingface_hub/commands/env.py +0 -39
- huggingface_hub/commands/huggingface_cli.py +0 -65
- huggingface_hub/commands/lfs.py +0 -200
- huggingface_hub/commands/repo.py +0 -151
- huggingface_hub/commands/repo_files.py +0 -132
- huggingface_hub/commands/scan_cache.py +0 -183
- huggingface_hub/commands/tag.py +0 -161
- huggingface_hub/commands/upload.py +0 -318
- huggingface_hub/commands/upload_large_folder.py +0 -131
- huggingface_hub/commands/user.py +0 -208
- huggingface_hub/commands/version.py +0 -40
- huggingface_hub/inference_api.py +0 -217
- huggingface_hub/keras_mixin.py +0 -497
- huggingface_hub/repository.py +0 -1471
- huggingface_hub/serialization/_tensorflow.py +0 -92
- huggingface_hub/utils/_hf_folder.py +0 -68
- huggingface_hub-0.36.0rc0.dist-info/RECORD +0 -170
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/LICENSE +0 -0
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/WHEEL +0 -0
- {huggingface_hub-0.36.0rc0.dist-info → huggingface_hub-1.0.0.dist-info}/top_level.txt +0 -0
huggingface_hub/cli/jobs.py
CHANGED
|
@@ -28,1073 +28,745 @@ Usage:
|
|
|
28
28
|
|
|
29
29
|
# Cancel a running job
|
|
30
30
|
hf jobs cancel <job-id>
|
|
31
|
+
|
|
32
|
+
# Run a UV script
|
|
33
|
+
hf jobs uv run <script>
|
|
34
|
+
|
|
35
|
+
# Schedule a job
|
|
36
|
+
hf jobs scheduled run <schedule> <image> <command>
|
|
37
|
+
|
|
38
|
+
# List scheduled jobs
|
|
39
|
+
hf jobs scheduled ps [-a] [-f key=value] [--format TEMPLATE]
|
|
40
|
+
|
|
41
|
+
# Inspect a scheduled job
|
|
42
|
+
hf jobs scheduled inspect <scheduled_job_id>
|
|
43
|
+
|
|
44
|
+
# Suspend a scheduled job
|
|
45
|
+
hf jobs scheduled suspend <scheduled_job_id>
|
|
46
|
+
|
|
47
|
+
# Resume a scheduled job
|
|
48
|
+
hf jobs scheduled resume <scheduled_job_id>
|
|
49
|
+
|
|
50
|
+
# Delete a scheduled job
|
|
51
|
+
hf jobs scheduled delete <scheduled_job_id>
|
|
52
|
+
|
|
31
53
|
"""
|
|
32
54
|
|
|
33
55
|
import json
|
|
34
56
|
import os
|
|
35
57
|
import re
|
|
36
|
-
from argparse import Namespace, _SubParsersAction
|
|
37
58
|
from dataclasses import asdict
|
|
38
59
|
from pathlib import Path
|
|
39
|
-
from typing import
|
|
60
|
+
from typing import Annotated, Dict, Optional, Union
|
|
40
61
|
|
|
41
|
-
import
|
|
62
|
+
import typer
|
|
42
63
|
|
|
43
|
-
from huggingface_hub import
|
|
64
|
+
from huggingface_hub import SpaceHardware, get_token
|
|
65
|
+
from huggingface_hub.errors import HfHubHTTPError
|
|
44
66
|
from huggingface_hub.utils import logging
|
|
45
67
|
from huggingface_hub.utils._dotenv import load_dotenv
|
|
46
68
|
|
|
47
|
-
from . import
|
|
69
|
+
from ._cli_utils import TokenOpt, get_hf_api, typer_factory
|
|
48
70
|
|
|
49
71
|
|
|
50
72
|
logger = logging.get_logger(__name__)
|
|
51
73
|
|
|
52
74
|
SUGGESTED_FLAVORS = [item.value for item in SpaceHardware if item.value != "zero-a10g"]
|
|
53
75
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
76
|
+
# Common job-related options
|
|
77
|
+
ImageArg = Annotated[
|
|
78
|
+
str,
|
|
79
|
+
typer.Argument(
|
|
80
|
+
help="The Docker image to use.",
|
|
81
|
+
),
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
ImageOpt = Annotated[
|
|
85
|
+
Optional[str],
|
|
86
|
+
typer.Option(
|
|
87
|
+
help="Use a custom Docker image with `uv` installed.",
|
|
88
|
+
),
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
FlavorOpt = Annotated[
|
|
92
|
+
Optional[SpaceHardware],
|
|
93
|
+
typer.Option(
|
|
94
|
+
help=f"Flavor for the hardware, as in HF Spaces. Defaults to `cpu-basic`. Possible values: {', '.join(SUGGESTED_FLAVORS)}.",
|
|
95
|
+
),
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
EnvOpt = Annotated[
|
|
99
|
+
Optional[list[str]],
|
|
100
|
+
typer.Option(
|
|
101
|
+
"-e",
|
|
102
|
+
"--env",
|
|
103
|
+
help="Set environment variables. E.g. --env ENV=value",
|
|
104
|
+
),
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
SecretsOpt = Annotated[
|
|
108
|
+
Optional[list[str]],
|
|
109
|
+
typer.Option(
|
|
110
|
+
"-s",
|
|
111
|
+
"--secrets",
|
|
112
|
+
help="Set secret environment variables. E.g. --secrets SECRET=value or `--secrets HF_TOKEN` to pass your Hugging Face token.",
|
|
113
|
+
),
|
|
114
|
+
]
|
|
115
|
+
|
|
116
|
+
EnvFileOpt = Annotated[
|
|
117
|
+
Optional[str],
|
|
118
|
+
typer.Option(
|
|
119
|
+
"--env-file",
|
|
120
|
+
help="Read in a file of environment variables.",
|
|
121
|
+
),
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
SecretsFileOpt = Annotated[
|
|
125
|
+
Optional[str],
|
|
126
|
+
typer.Option(
|
|
127
|
+
help="Read in a file of secret environment variables.",
|
|
128
|
+
),
|
|
129
|
+
]
|
|
130
|
+
|
|
131
|
+
TimeoutOpt = Annotated[
|
|
132
|
+
Optional[str],
|
|
133
|
+
typer.Option(
|
|
134
|
+
help="Max duration: int/float with s (seconds, default), m (minutes), h (hours) or d (days).",
|
|
135
|
+
),
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
DetachOpt = Annotated[
|
|
139
|
+
bool,
|
|
140
|
+
typer.Option(
|
|
141
|
+
"-d",
|
|
142
|
+
"--detach",
|
|
143
|
+
help="Run the Job in the background and print the Job ID.",
|
|
144
|
+
),
|
|
145
|
+
]
|
|
146
|
+
|
|
147
|
+
NamespaceOpt = Annotated[
|
|
148
|
+
Optional[str],
|
|
149
|
+
typer.Option(
|
|
150
|
+
help="The namespace where the job will be running. Defaults to the current user's namespace.",
|
|
151
|
+
),
|
|
152
|
+
]
|
|
153
|
+
|
|
154
|
+
WithOpt = Annotated[
|
|
155
|
+
Optional[list[str]],
|
|
156
|
+
typer.Option(
|
|
157
|
+
"--with",
|
|
158
|
+
help="Run with the given packages installed",
|
|
159
|
+
),
|
|
160
|
+
]
|
|
161
|
+
|
|
162
|
+
PythonOpt = Annotated[
|
|
163
|
+
Optional[str],
|
|
164
|
+
typer.Option(
|
|
165
|
+
"-p",
|
|
166
|
+
"--python",
|
|
167
|
+
help="The Python interpreter to use for the run environment",
|
|
168
|
+
),
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
SuspendOpt = Annotated[
|
|
172
|
+
Optional[bool],
|
|
173
|
+
typer.Option(
|
|
174
|
+
help="Suspend (pause) the scheduled Job",
|
|
175
|
+
),
|
|
176
|
+
]
|
|
177
|
+
|
|
178
|
+
ConcurrencyOpt = Annotated[
|
|
179
|
+
Optional[bool],
|
|
180
|
+
typer.Option(
|
|
181
|
+
help="Allow multiple instances of this Job to run concurrently",
|
|
182
|
+
),
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
ScheduleArg = Annotated[
|
|
186
|
+
str,
|
|
187
|
+
typer.Argument(
|
|
188
|
+
help="One of annually, yearly, monthly, weekly, daily, hourly, or a CRON schedule expression.",
|
|
189
|
+
),
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
ScriptArg = Annotated[
|
|
193
|
+
str,
|
|
194
|
+
typer.Argument(
|
|
195
|
+
help="UV script to run (local file or URL)",
|
|
196
|
+
),
|
|
197
|
+
]
|
|
198
|
+
|
|
199
|
+
ScriptArgsArg = Annotated[
|
|
200
|
+
Optional[list[str]],
|
|
201
|
+
typer.Argument(
|
|
202
|
+
help="Arguments for the script",
|
|
203
|
+
),
|
|
204
|
+
]
|
|
205
|
+
|
|
206
|
+
CommandArg = Annotated[
|
|
207
|
+
list[str],
|
|
208
|
+
typer.Argument(
|
|
209
|
+
help="The command to run.",
|
|
210
|
+
),
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
JobIdArg = Annotated[
|
|
214
|
+
str,
|
|
215
|
+
typer.Argument(
|
|
216
|
+
help="Job ID",
|
|
217
|
+
),
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
ScheduledJobIdArg = Annotated[
|
|
221
|
+
str,
|
|
222
|
+
typer.Argument(
|
|
223
|
+
help="Scheduled Job ID",
|
|
224
|
+
),
|
|
225
|
+
]
|
|
226
|
+
|
|
227
|
+
RepoOpt = Annotated[
|
|
228
|
+
Optional[str],
|
|
229
|
+
typer.Option(
|
|
230
|
+
help="Repository name for the script (creates ephemeral if not specified)",
|
|
231
|
+
),
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
jobs_cli = typer_factory(help="Run and manage Jobs on the Hub.")
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
@jobs_cli.command("run", help="Run a Job")
|
|
239
|
+
def jobs_run(
|
|
240
|
+
image: ImageArg,
|
|
241
|
+
command: CommandArg,
|
|
242
|
+
env: EnvOpt = None,
|
|
243
|
+
secrets: SecretsOpt = None,
|
|
244
|
+
env_file: EnvFileOpt = None,
|
|
245
|
+
secrets_file: SecretsFileOpt = None,
|
|
246
|
+
flavor: FlavorOpt = None,
|
|
247
|
+
timeout: TimeoutOpt = None,
|
|
248
|
+
detach: DetachOpt = False,
|
|
249
|
+
namespace: NamespaceOpt = None,
|
|
250
|
+
token: TokenOpt = None,
|
|
251
|
+
) -> None:
|
|
252
|
+
env_map: dict[str, Optional[str]] = {}
|
|
253
|
+
if env_file:
|
|
254
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
255
|
+
for env_value in env or []:
|
|
256
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
257
|
+
|
|
258
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
259
|
+
extended_environ = _get_extended_environ()
|
|
260
|
+
if secrets_file:
|
|
261
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
262
|
+
for secret in secrets or []:
|
|
263
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
264
|
+
|
|
265
|
+
api = get_hf_api(token=token)
|
|
266
|
+
job = api.run_job(
|
|
267
|
+
image=image,
|
|
268
|
+
command=command,
|
|
269
|
+
env=env_map,
|
|
270
|
+
secrets=secrets_map,
|
|
271
|
+
flavor=flavor,
|
|
272
|
+
timeout=timeout,
|
|
273
|
+
namespace=namespace,
|
|
274
|
+
)
|
|
275
|
+
# Always print the job ID to the user
|
|
276
|
+
print(f"Job started with ID: {job.id}")
|
|
277
|
+
print(f"View at: {job.url}")
|
|
278
|
+
|
|
279
|
+
if detach:
|
|
280
|
+
return
|
|
281
|
+
# Now let's stream the logs
|
|
282
|
+
for log in api.fetch_job_logs(job_id=job.id):
|
|
283
|
+
print(log)
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
@jobs_cli.command("logs", help="Fetch the logs of a Job")
|
|
287
|
+
def jobs_logs(
|
|
288
|
+
job_id: JobIdArg,
|
|
289
|
+
namespace: NamespaceOpt = None,
|
|
290
|
+
token: TokenOpt = None,
|
|
291
|
+
) -> None:
|
|
292
|
+
api = get_hf_api(token=token)
|
|
293
|
+
for log in api.fetch_job_logs(job_id=job_id, namespace=namespace):
|
|
294
|
+
print(log)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def _matches_filters(job_properties: dict[str, str], filters: dict[str, str]) -> bool:
|
|
298
|
+
"""Check if scheduled job matches all specified filters."""
|
|
299
|
+
for key, pattern in filters.items():
|
|
300
|
+
# Check if property exists
|
|
301
|
+
if key not in job_properties:
|
|
302
|
+
return False
|
|
303
|
+
# Support pattern matching with wildcards
|
|
304
|
+
if "*" in pattern or "?" in pattern:
|
|
305
|
+
# Convert glob pattern to regex
|
|
306
|
+
regex_pattern = pattern.replace("*", ".*").replace("?", ".")
|
|
307
|
+
if not re.search(f"^{regex_pattern}$", job_properties[key], re.IGNORECASE):
|
|
308
|
+
return False
|
|
309
|
+
# Simple substring matching
|
|
310
|
+
elif pattern.lower() not in job_properties[key].lower():
|
|
311
|
+
return False
|
|
312
|
+
return True
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def _print_output(rows: list[list[Union[str, int]]], headers: list[str], fmt: Optional[str]) -> None:
|
|
316
|
+
"""Print output according to the chosen format."""
|
|
317
|
+
if fmt:
|
|
318
|
+
# Use custom template if provided
|
|
319
|
+
template = fmt
|
|
320
|
+
for row in rows:
|
|
321
|
+
line = template
|
|
322
|
+
for i, field in enumerate(["id", "image", "command", "created", "status"]):
|
|
323
|
+
placeholder = f"{{{{.{field}}}}}"
|
|
324
|
+
if placeholder in line:
|
|
325
|
+
line = line.replace(placeholder, str(row[i]))
|
|
326
|
+
print(line)
|
|
327
|
+
else:
|
|
328
|
+
# Default tabular format
|
|
329
|
+
print(_tabulate(rows, headers=headers))
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
@jobs_cli.command("ps", help="List Jobs")
|
|
333
|
+
def jobs_ps(
|
|
334
|
+
all: Annotated[
|
|
335
|
+
bool,
|
|
336
|
+
typer.Option(
|
|
221
337
|
"-a",
|
|
222
338
|
"--all",
|
|
223
|
-
action="store_true",
|
|
224
339
|
help="Show all Jobs (default shows just running)",
|
|
225
|
-
)
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
"--token",
|
|
233
|
-
type=str,
|
|
234
|
-
help="A User Access Token generated from https://huggingface.co/settings/tokens",
|
|
235
|
-
)
|
|
236
|
-
# Add Docker-style filtering argument
|
|
237
|
-
run_parser.add_argument(
|
|
340
|
+
),
|
|
341
|
+
] = False,
|
|
342
|
+
namespace: NamespaceOpt = None,
|
|
343
|
+
token: TokenOpt = None,
|
|
344
|
+
filter: Annotated[
|
|
345
|
+
Optional[list[str]],
|
|
346
|
+
typer.Option(
|
|
238
347
|
"-f",
|
|
239
348
|
"--filter",
|
|
240
|
-
action="append",
|
|
241
|
-
default=[],
|
|
242
349
|
help="Filter output based on conditions provided (format: key=value)",
|
|
243
|
-
)
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
350
|
+
),
|
|
351
|
+
] = None,
|
|
352
|
+
format: Annotated[
|
|
353
|
+
Optional[str],
|
|
354
|
+
typer.Option(
|
|
248
355
|
help="Format output using a custom template",
|
|
249
|
-
)
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
356
|
+
),
|
|
357
|
+
] = None,
|
|
358
|
+
) -> None:
|
|
359
|
+
try:
|
|
360
|
+
api = get_hf_api(token=token)
|
|
361
|
+
# Fetch jobs data
|
|
362
|
+
jobs = api.list_jobs(namespace=namespace)
|
|
363
|
+
# Define table headers
|
|
364
|
+
table_headers = ["JOB ID", "IMAGE/SPACE", "COMMAND", "CREATED", "STATUS"]
|
|
365
|
+
rows: list[list[Union[str, int]]] = []
|
|
366
|
+
|
|
367
|
+
filters: dict[str, str] = {}
|
|
368
|
+
for f in filter or []:
|
|
261
369
|
if "=" in f:
|
|
262
370
|
key, value = f.split("=", 1)
|
|
263
|
-
|
|
371
|
+
filters[key.lower()] = value
|
|
264
372
|
else:
|
|
265
373
|
print(f"Warning: Ignoring invalid filter format '{f}'. Use key=value format.")
|
|
374
|
+
# Process jobs data
|
|
375
|
+
for job in jobs:
|
|
376
|
+
# Extract job data for filtering
|
|
377
|
+
status = job.status.stage if job.status else "UNKNOWN"
|
|
378
|
+
if not all and status not in ("RUNNING", "UPDATING"):
|
|
379
|
+
# Skip job if not all jobs should be shown and status doesn't match criteria
|
|
380
|
+
continue
|
|
381
|
+
# Extract job data for output
|
|
382
|
+
job_id = job.id
|
|
266
383
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
Fetch and display job information for the current user.
|
|
270
|
-
Uses Docker-style filtering with -f/--filter flag and key=value pairs.
|
|
271
|
-
"""
|
|
272
|
-
try:
|
|
273
|
-
api = HfApi(token=self.token)
|
|
274
|
-
|
|
275
|
-
# Fetch jobs data
|
|
276
|
-
jobs = api.list_jobs(namespace=self.namespace)
|
|
277
|
-
|
|
278
|
-
# Define table headers
|
|
279
|
-
table_headers = ["JOB ID", "IMAGE/SPACE", "COMMAND", "CREATED", "STATUS"]
|
|
384
|
+
# Extract image or space information
|
|
385
|
+
image_or_space = job.docker_image or "N/A"
|
|
280
386
|
|
|
281
|
-
#
|
|
282
|
-
|
|
387
|
+
# Extract and format command
|
|
388
|
+
cmd = job.command or []
|
|
389
|
+
command_str = " ".join(cmd) if cmd else "N/A"
|
|
283
390
|
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
status = job.status.stage if job.status else "UNKNOWN"
|
|
391
|
+
# Extract creation time
|
|
392
|
+
created_at = job.created_at.strftime("%Y-%m-%d %H:%M:%S") if job.created_at else "N/A"
|
|
287
393
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
# Extract job ID
|
|
293
|
-
job_id = job.id
|
|
294
|
-
|
|
295
|
-
# Extract image or space information
|
|
296
|
-
image_or_space = job.docker_image or "N/A"
|
|
297
|
-
|
|
298
|
-
# Extract and format command
|
|
299
|
-
command = job.command or []
|
|
300
|
-
command_str = " ".join(command) if command else "N/A"
|
|
301
|
-
|
|
302
|
-
# Extract creation time
|
|
303
|
-
created_at = job.created_at.strftime("%Y-%m-%d %H:%M:%S") if job.created_at else "N/A"
|
|
304
|
-
|
|
305
|
-
# Create a dict with all job properties for filtering
|
|
306
|
-
job_properties = {
|
|
307
|
-
"id": job_id,
|
|
308
|
-
"image": image_or_space,
|
|
309
|
-
"status": status.lower(),
|
|
310
|
-
"command": command_str,
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
# Check if job matches all filters
|
|
314
|
-
if not self._matches_filters(job_properties):
|
|
315
|
-
continue
|
|
316
|
-
|
|
317
|
-
# Create row
|
|
318
|
-
rows.append([job_id, image_or_space, command_str, created_at, status])
|
|
319
|
-
|
|
320
|
-
# Handle empty results
|
|
321
|
-
if not rows:
|
|
322
|
-
filters_msg = ""
|
|
323
|
-
if self.filters:
|
|
324
|
-
filters_msg = f" matching filters: {', '.join([f'{k}={v}' for k, v in self.filters.items()])}"
|
|
325
|
-
|
|
326
|
-
print(f"No jobs found{filters_msg}")
|
|
327
|
-
return
|
|
328
|
-
|
|
329
|
-
# Apply custom format if provided or use default tabular format
|
|
330
|
-
self._print_output(rows, table_headers)
|
|
331
|
-
|
|
332
|
-
except requests.RequestException as e:
|
|
333
|
-
print(f"Error fetching jobs data: {e}")
|
|
334
|
-
except (KeyError, ValueError, TypeError) as e:
|
|
335
|
-
print(f"Error processing jobs data: {e}")
|
|
336
|
-
except Exception as e:
|
|
337
|
-
print(f"Unexpected error - {type(e).__name__}: {e}")
|
|
338
|
-
|
|
339
|
-
def _matches_filters(self, job_properties: Dict[str, str]) -> bool:
|
|
340
|
-
"""Check if job matches all specified filters."""
|
|
341
|
-
for key, pattern in self.filters.items():
|
|
342
|
-
# Check if property exists
|
|
343
|
-
if key not in job_properties:
|
|
344
|
-
return False
|
|
394
|
+
# Create a dict with all job properties for filtering
|
|
395
|
+
props = {"id": job_id, "image": image_or_space, "status": status.lower(), "command": command_str}
|
|
396
|
+
if not _matches_filters(props, filters):
|
|
397
|
+
continue
|
|
345
398
|
|
|
346
|
-
#
|
|
347
|
-
|
|
348
|
-
# Convert glob pattern to regex
|
|
349
|
-
regex_pattern = pattern.replace("*", ".*").replace("?", ".")
|
|
350
|
-
if not re.search(f"^{regex_pattern}$", job_properties[key], re.IGNORECASE):
|
|
351
|
-
return False
|
|
352
|
-
# Simple substring matching
|
|
353
|
-
elif pattern.lower() not in job_properties[key].lower():
|
|
354
|
-
return False
|
|
399
|
+
# Create row
|
|
400
|
+
rows.append([job_id, image_or_space, command_str, created_at, status])
|
|
355
401
|
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
if self.format:
|
|
361
|
-
# Custom template formatting (simplified)
|
|
362
|
-
template = self.format
|
|
363
|
-
for row in rows:
|
|
364
|
-
line = template
|
|
365
|
-
for i, field in enumerate(["id", "image", "command", "created", "status"]):
|
|
366
|
-
placeholder = f"{{{{.{field}}}}}"
|
|
367
|
-
if placeholder in line:
|
|
368
|
-
line = line.replace(placeholder, str(row[i]))
|
|
369
|
-
print(line)
|
|
370
|
-
else:
|
|
371
|
-
# Default tabular format
|
|
372
|
-
print(
|
|
373
|
-
_tabulate(
|
|
374
|
-
rows,
|
|
375
|
-
headers=headers,
|
|
376
|
-
)
|
|
402
|
+
# Handle empty results
|
|
403
|
+
if not rows:
|
|
404
|
+
filters_msg = (
|
|
405
|
+
f" matching filters: {', '.join([f'{k}={v}' for k, v in filters.items()])}" if filters else ""
|
|
377
406
|
)
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
class InspectCommand(BaseHuggingfaceCLICommand):
|
|
381
|
-
@staticmethod
|
|
382
|
-
def register_subcommand(parser: _SubParsersAction) -> None:
|
|
383
|
-
run_parser = parser.add_parser("inspect", help="Display detailed information on one or more Jobs")
|
|
384
|
-
run_parser.add_argument(
|
|
385
|
-
"--namespace",
|
|
386
|
-
type=str,
|
|
387
|
-
help="The namespace where the job is running. Defaults to the current user's namespace.",
|
|
388
|
-
)
|
|
389
|
-
run_parser.add_argument(
|
|
390
|
-
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
|
391
|
-
)
|
|
392
|
-
run_parser.add_argument("job_ids", nargs="...", help="The jobs to inspect")
|
|
393
|
-
run_parser.set_defaults(func=InspectCommand)
|
|
394
|
-
|
|
395
|
-
def __init__(self, args: Namespace) -> None:
|
|
396
|
-
self.namespace: Optional[str] = args.namespace
|
|
397
|
-
self.token: Optional[str] = args.token
|
|
398
|
-
self.job_ids: List[str] = args.job_ids
|
|
399
|
-
|
|
400
|
-
def run(self) -> None:
|
|
401
|
-
api = HfApi(token=self.token)
|
|
402
|
-
jobs = [api.inspect_job(job_id=job_id, namespace=self.namespace) for job_id in self.job_ids]
|
|
403
|
-
print(json.dumps([asdict(job) for job in jobs], indent=4, default=str))
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
class CancelCommand(BaseHuggingfaceCLICommand):
|
|
407
|
-
@staticmethod
|
|
408
|
-
def register_subcommand(parser: _SubParsersAction) -> None:
|
|
409
|
-
run_parser = parser.add_parser("cancel", help="Cancel a Job")
|
|
410
|
-
run_parser.add_argument("job_id", type=str, help="Job ID")
|
|
411
|
-
run_parser.add_argument(
|
|
412
|
-
"--namespace",
|
|
413
|
-
type=str,
|
|
414
|
-
help="The namespace where the job is running. Defaults to the current user's namespace.",
|
|
415
|
-
)
|
|
416
|
-
run_parser.add_argument(
|
|
417
|
-
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
|
418
|
-
)
|
|
419
|
-
run_parser.set_defaults(func=CancelCommand)
|
|
420
|
-
|
|
421
|
-
def __init__(self, args: Namespace) -> None:
|
|
422
|
-
self.job_id: str = args.job_id
|
|
423
|
-
self.namespace = args.namespace
|
|
424
|
-
self.token: Optional[str] = args.token
|
|
425
|
-
|
|
426
|
-
def run(self) -> None:
|
|
427
|
-
api = HfApi(token=self.token)
|
|
428
|
-
api.cancel_job(job_id=self.job_id, namespace=self.namespace)
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
class UvCommand(BaseHuggingfaceCLICommand):
|
|
432
|
-
"""Run UV scripts on Hugging Face infrastructure."""
|
|
433
|
-
|
|
434
|
-
@staticmethod
|
|
435
|
-
def register_subcommand(parser):
|
|
436
|
-
"""Register UV run subcommand."""
|
|
437
|
-
uv_parser = parser.add_parser(
|
|
438
|
-
"uv",
|
|
439
|
-
help="Run UV scripts (Python with inline dependencies) on HF infrastructure",
|
|
440
|
-
)
|
|
441
|
-
|
|
442
|
-
subparsers = uv_parser.add_subparsers(dest="uv_command", help="UV commands", required=True)
|
|
443
|
-
|
|
444
|
-
# Run command only
|
|
445
|
-
run_parser = subparsers.add_parser(
|
|
446
|
-
"run",
|
|
447
|
-
help="Run a UV script (local file or URL) on HF infrastructure",
|
|
448
|
-
)
|
|
449
|
-
run_parser.add_argument("script", help="UV script to run (local file or URL)")
|
|
450
|
-
run_parser.add_argument("script_args", nargs="...", help="Arguments for the script", default=[])
|
|
451
|
-
run_parser.add_argument("--image", type=str, help="Use a custom Docker image with `uv` installed.")
|
|
452
|
-
run_parser.add_argument(
|
|
453
|
-
"--repo",
|
|
454
|
-
help="Repository name for the script (creates ephemeral if not specified)",
|
|
455
|
-
)
|
|
456
|
-
run_parser.add_argument(
|
|
457
|
-
"--flavor",
|
|
458
|
-
type=str,
|
|
459
|
-
help=f"Flavor for the hardware, as in HF Spaces. Defaults to `cpu-basic`. Possible values: {', '.join(SUGGESTED_FLAVORS)}.",
|
|
460
|
-
)
|
|
461
|
-
run_parser.add_argument("-e", "--env", action="append", help="Environment variables")
|
|
462
|
-
run_parser.add_argument(
|
|
463
|
-
"-s",
|
|
464
|
-
"--secrets",
|
|
465
|
-
action="append",
|
|
466
|
-
help=(
|
|
467
|
-
"Set secret environment variables. E.g. --secrets SECRET=value "
|
|
468
|
-
"or `--secrets HF_TOKEN` to pass your Hugging Face token."
|
|
469
|
-
),
|
|
470
|
-
)
|
|
471
|
-
run_parser.add_argument("--env-file", type=str, help="Read in a file of environment variables.")
|
|
472
|
-
run_parser.add_argument(
|
|
473
|
-
"--secrets-file",
|
|
474
|
-
type=str,
|
|
475
|
-
help="Read in a file of secret environment variables.",
|
|
476
|
-
)
|
|
477
|
-
run_parser.add_argument("--timeout", type=str, help="Max duration (e.g., 30s, 5m, 1h)")
|
|
478
|
-
run_parser.add_argument("-d", "--detach", action="store_true", help="Run in background")
|
|
479
|
-
run_parser.add_argument(
|
|
480
|
-
"--namespace",
|
|
481
|
-
type=str,
|
|
482
|
-
help="The namespace where the Job will be created. Defaults to the current user's namespace.",
|
|
483
|
-
)
|
|
484
|
-
run_parser.add_argument("--token", type=str, help="HF token")
|
|
485
|
-
# UV options
|
|
486
|
-
run_parser.add_argument("--with", action="append", help="Run with the given packages installed", dest="with_")
|
|
487
|
-
run_parser.add_argument(
|
|
488
|
-
"-p", "--python", type=str, help="The Python interpreter to use for the run environment"
|
|
489
|
-
)
|
|
490
|
-
run_parser.set_defaults(func=UvCommand)
|
|
491
|
-
|
|
492
|
-
def __init__(self, args: Namespace) -> None:
|
|
493
|
-
"""Initialize the command with parsed arguments."""
|
|
494
|
-
self.script = args.script
|
|
495
|
-
self.script_args = args.script_args
|
|
496
|
-
self.dependencies = args.with_
|
|
497
|
-
self.python = args.python
|
|
498
|
-
self.image = args.image
|
|
499
|
-
self.env: dict[str, Optional[str]] = {}
|
|
500
|
-
if args.env_file:
|
|
501
|
-
self.env.update(load_dotenv(Path(args.env_file).read_text(), environ=os.environ.copy()))
|
|
502
|
-
for env_value in args.env or []:
|
|
503
|
-
self.env.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
504
|
-
self.secrets: dict[str, Optional[str]] = {}
|
|
505
|
-
extended_environ = _get_extended_environ()
|
|
506
|
-
if args.secrets_file:
|
|
507
|
-
self.secrets.update(load_dotenv(Path(args.secrets_file).read_text(), environ=extended_environ))
|
|
508
|
-
for secret in args.secrets or []:
|
|
509
|
-
self.secrets.update(load_dotenv(secret, environ=extended_environ))
|
|
510
|
-
self.flavor: Optional[SpaceHardware] = args.flavor
|
|
511
|
-
self.timeout: Optional[str] = args.timeout
|
|
512
|
-
self.detach: bool = args.detach
|
|
513
|
-
self.namespace: Optional[str] = args.namespace
|
|
514
|
-
self.token: Optional[str] = args.token
|
|
515
|
-
self._repo = args.repo
|
|
516
|
-
|
|
517
|
-
def run(self) -> None:
|
|
518
|
-
"""Execute UV command."""
|
|
519
|
-
logging.set_verbosity(logging.INFO)
|
|
520
|
-
api = HfApi(token=self.token)
|
|
521
|
-
job = api.run_uv_job(
|
|
522
|
-
script=self.script,
|
|
523
|
-
script_args=self.script_args,
|
|
524
|
-
dependencies=self.dependencies,
|
|
525
|
-
python=self.python,
|
|
526
|
-
image=self.image,
|
|
527
|
-
env=self.env,
|
|
528
|
-
secrets=self.secrets,
|
|
529
|
-
flavor=self.flavor,
|
|
530
|
-
timeout=self.timeout,
|
|
531
|
-
namespace=self.namespace,
|
|
532
|
-
_repo=self._repo,
|
|
533
|
-
)
|
|
534
|
-
|
|
535
|
-
# Always print the job ID to the user
|
|
536
|
-
print(f"Job started with ID: {job.id}")
|
|
537
|
-
print(f"View at: {job.url}")
|
|
538
|
-
|
|
539
|
-
if self.detach:
|
|
407
|
+
print(f"No jobs found{filters_msg}")
|
|
540
408
|
return
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
409
|
+
# Apply custom format if provided or use default tabular format
|
|
410
|
+
_print_output(rows, table_headers, format)
|
|
411
|
+
|
|
412
|
+
except HfHubHTTPError as e:
|
|
413
|
+
print(f"Error fetching jobs data: {e}")
|
|
414
|
+
except (KeyError, ValueError, TypeError) as e:
|
|
415
|
+
print(f"Error processing jobs data: {e}")
|
|
416
|
+
except Exception as e:
|
|
417
|
+
print(f"Unexpected error - {type(e).__name__}: {e}")
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
@jobs_cli.command("inspect", help="Display detailed information on one or more Jobs")
|
|
421
|
+
def jobs_inspect(
|
|
422
|
+
job_ids: Annotated[
|
|
423
|
+
list[str],
|
|
424
|
+
typer.Argument(
|
|
425
|
+
help="The jobs to inspect",
|
|
426
|
+
),
|
|
427
|
+
],
|
|
428
|
+
namespace: NamespaceOpt = None,
|
|
429
|
+
token: TokenOpt = None,
|
|
430
|
+
) -> None:
|
|
431
|
+
api = get_hf_api(token=token)
|
|
432
|
+
jobs = [api.inspect_job(job_id=job_id, namespace=namespace) for job_id in job_ids]
|
|
433
|
+
print(json.dumps([asdict(job) for job in jobs], indent=4, default=str))
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@jobs_cli.command("cancel", help="Cancel a Job")
|
|
437
|
+
def jobs_cancel(
|
|
438
|
+
job_id: JobIdArg,
|
|
439
|
+
namespace: NamespaceOpt = None,
|
|
440
|
+
token: TokenOpt = None,
|
|
441
|
+
) -> None:
|
|
442
|
+
api = get_hf_api(token=token)
|
|
443
|
+
api.cancel_job(job_id=job_id, namespace=namespace)
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
uv_app = typer_factory(help="Run UV scripts (Python with inline dependencies) on HF infrastructure")
|
|
447
|
+
jobs_cli.add_typer(uv_app, name="uv")
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
@uv_app.command("run", help="Run a UV script (local file or URL) on HF infrastructure")
|
|
451
|
+
def jobs_uv_run(
|
|
452
|
+
script: ScriptArg,
|
|
453
|
+
script_args: ScriptArgsArg = None,
|
|
454
|
+
image: ImageOpt = None,
|
|
455
|
+
repo: RepoOpt = None,
|
|
456
|
+
flavor: FlavorOpt = None,
|
|
457
|
+
env: EnvOpt = None,
|
|
458
|
+
secrets: SecretsOpt = None,
|
|
459
|
+
env_file: EnvFileOpt = None,
|
|
460
|
+
secrets_file: SecretsFileOpt = None,
|
|
461
|
+
timeout: TimeoutOpt = None,
|
|
462
|
+
detach: DetachOpt = False,
|
|
463
|
+
namespace: NamespaceOpt = None,
|
|
464
|
+
token: TokenOpt = None,
|
|
465
|
+
with_: WithOpt = None,
|
|
466
|
+
python: PythonOpt = None,
|
|
467
|
+
) -> None:
|
|
468
|
+
env_map: dict[str, Optional[str]] = {}
|
|
469
|
+
if env_file:
|
|
470
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
471
|
+
for env_value in env or []:
|
|
472
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
473
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
474
|
+
extended_environ = _get_extended_environ()
|
|
475
|
+
if secrets_file:
|
|
476
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
477
|
+
for secret in secrets or []:
|
|
478
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
479
|
+
|
|
480
|
+
api = get_hf_api(token=token)
|
|
481
|
+
job = api.run_uv_job(
|
|
482
|
+
script=script,
|
|
483
|
+
script_args=script_args or [],
|
|
484
|
+
dependencies=with_,
|
|
485
|
+
python=python,
|
|
486
|
+
image=image,
|
|
487
|
+
env=env_map,
|
|
488
|
+
secrets=secrets_map,
|
|
489
|
+
flavor=flavor, # type: ignore[arg-type]
|
|
490
|
+
timeout=timeout,
|
|
491
|
+
namespace=namespace,
|
|
492
|
+
_repo=repo,
|
|
493
|
+
)
|
|
494
|
+
# Always print the job ID to the user
|
|
495
|
+
print(f"Job started with ID: {job.id}")
|
|
496
|
+
print(f"View at: {job.url}")
|
|
497
|
+
if detach:
|
|
498
|
+
return
|
|
499
|
+
# Now let's stream the logs
|
|
500
|
+
for log in api.fetch_job_logs(job_id=job.id):
|
|
501
|
+
print(log)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
scheduled_app = typer_factory(help="Create and manage scheduled Jobs on the Hub.")
|
|
505
|
+
jobs_cli.add_typer(scheduled_app, name="scheduled")
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
@scheduled_app.command("run", help="Schedule a Job")
|
|
509
|
+
def scheduled_run(
|
|
510
|
+
schedule: ScheduleArg,
|
|
511
|
+
image: ImageArg,
|
|
512
|
+
command: CommandArg,
|
|
513
|
+
suspend: SuspendOpt = None,
|
|
514
|
+
concurrency: ConcurrencyOpt = None,
|
|
515
|
+
env: EnvOpt = None,
|
|
516
|
+
secrets: SecretsOpt = None,
|
|
517
|
+
env_file: EnvFileOpt = None,
|
|
518
|
+
secrets_file: SecretsFileOpt = None,
|
|
519
|
+
flavor: FlavorOpt = None,
|
|
520
|
+
timeout: TimeoutOpt = None,
|
|
521
|
+
namespace: NamespaceOpt = None,
|
|
522
|
+
token: TokenOpt = None,
|
|
523
|
+
) -> None:
|
|
524
|
+
env_map: dict[str, Optional[str]] = {}
|
|
525
|
+
if env_file:
|
|
526
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
527
|
+
for env_value in env or []:
|
|
528
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
529
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
530
|
+
extended_environ = _get_extended_environ()
|
|
531
|
+
if secrets_file:
|
|
532
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
533
|
+
for secret in secrets or []:
|
|
534
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
535
|
+
|
|
536
|
+
api = get_hf_api(token=token)
|
|
537
|
+
scheduled_job = api.create_scheduled_job(
|
|
538
|
+
image=image,
|
|
539
|
+
command=command,
|
|
540
|
+
schedule=schedule,
|
|
541
|
+
suspend=suspend,
|
|
542
|
+
concurrency=concurrency,
|
|
543
|
+
env=env_map,
|
|
544
|
+
secrets=secrets_map,
|
|
545
|
+
flavor=flavor,
|
|
546
|
+
timeout=timeout,
|
|
547
|
+
namespace=namespace,
|
|
548
|
+
)
|
|
549
|
+
print(f"Scheduled Job created with ID: {scheduled_job.id}")
|
|
550
|
+
|
|
551
|
+
|
|
552
|
+
@scheduled_app.command("ps", help="List scheduled Jobs")
|
|
553
|
+
def scheduled_ps(
|
|
554
|
+
all: Annotated[
|
|
555
|
+
bool,
|
|
556
|
+
typer.Option(
|
|
677
557
|
"-a",
|
|
678
558
|
"--all",
|
|
679
|
-
action="store_true",
|
|
680
559
|
help="Show all scheduled Jobs (default hides suspended)",
|
|
681
|
-
)
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
"--token",
|
|
689
|
-
type=str,
|
|
690
|
-
help="A User Access Token generated from https://huggingface.co/settings/tokens",
|
|
691
|
-
)
|
|
692
|
-
# Add Docker-style filtering argument
|
|
693
|
-
run_parser.add_argument(
|
|
560
|
+
),
|
|
561
|
+
] = False,
|
|
562
|
+
namespace: NamespaceOpt = None,
|
|
563
|
+
token: TokenOpt = None,
|
|
564
|
+
filter: Annotated[
|
|
565
|
+
Optional[list[str]],
|
|
566
|
+
typer.Option(
|
|
694
567
|
"-f",
|
|
695
568
|
"--filter",
|
|
696
|
-
action="append",
|
|
697
|
-
default=[],
|
|
698
569
|
help="Filter output based on conditions provided (format: key=value)",
|
|
699
|
-
)
|
|
700
|
-
|
|
701
|
-
|
|
570
|
+
),
|
|
571
|
+
] = None,
|
|
572
|
+
format: Annotated[
|
|
573
|
+
Optional[str],
|
|
574
|
+
typer.Option(
|
|
702
575
|
"--format",
|
|
703
|
-
type=str,
|
|
704
576
|
help="Format output using a custom template",
|
|
705
|
-
)
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
# Parse filter arguments (key=value pairs)
|
|
716
|
-
for f in args.filter:
|
|
577
|
+
),
|
|
578
|
+
] = None,
|
|
579
|
+
) -> None:
|
|
580
|
+
try:
|
|
581
|
+
api = get_hf_api(token=token)
|
|
582
|
+
scheduled_jobs = api.list_scheduled_jobs(namespace=namespace)
|
|
583
|
+
table_headers = ["ID", "SCHEDULE", "IMAGE/SPACE", "COMMAND", "LAST RUN", "NEXT RUN", "SUSPEND"]
|
|
584
|
+
rows: list[list[Union[str, int]]] = []
|
|
585
|
+
filters: dict[str, str] = {}
|
|
586
|
+
for f in filter or []:
|
|
717
587
|
if "=" in f:
|
|
718
588
|
key, value = f.split("=", 1)
|
|
719
|
-
|
|
589
|
+
filters[key.lower()] = value
|
|
720
590
|
else:
|
|
721
591
|
print(f"Warning: Ignoring invalid filter format '{f}'. Use key=value format.")
|
|
722
592
|
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
"ID",
|
|
737
|
-
"SCHEDULE",
|
|
738
|
-
"IMAGE/SPACE",
|
|
739
|
-
"COMMAND",
|
|
740
|
-
"LAST RUN",
|
|
741
|
-
"NEXT RUN",
|
|
742
|
-
"SUSPEND",
|
|
743
|
-
]
|
|
744
|
-
|
|
745
|
-
# Process jobs data
|
|
746
|
-
rows = []
|
|
747
|
-
|
|
748
|
-
for scheduled_job in scheduled_jobs:
|
|
749
|
-
# Extract job data for filtering
|
|
750
|
-
suspend = scheduled_job.suspend
|
|
751
|
-
|
|
752
|
-
# Skip job if not all jobs should be shown and status doesn't match criteria
|
|
753
|
-
if not self.all and suspend:
|
|
754
|
-
continue
|
|
755
|
-
|
|
756
|
-
# Extract job ID
|
|
757
|
-
scheduled_job_id = scheduled_job.id
|
|
758
|
-
|
|
759
|
-
# Extract schedule
|
|
760
|
-
schedule = scheduled_job.schedule
|
|
761
|
-
|
|
762
|
-
# Extract image or space information
|
|
763
|
-
image_or_space = scheduled_job.job_spec.docker_image or "N/A"
|
|
764
|
-
|
|
765
|
-
# Extract and format command
|
|
766
|
-
command = scheduled_job.job_spec.command or []
|
|
767
|
-
command_str = " ".join(command) if command else "N/A"
|
|
768
|
-
|
|
769
|
-
# Extract status
|
|
770
|
-
last_job_at = (
|
|
771
|
-
scheduled_job.status.last_job.at.strftime("%Y-%m-%d %H:%M:%S")
|
|
772
|
-
if scheduled_job.status.last_job
|
|
773
|
-
else "N/A"
|
|
774
|
-
)
|
|
775
|
-
next_job_run_at = (
|
|
776
|
-
scheduled_job.status.next_job_run_at.strftime("%Y-%m-%d %H:%M:%S")
|
|
777
|
-
if scheduled_job.status.next_job_run_at
|
|
778
|
-
else "N/A"
|
|
779
|
-
)
|
|
780
|
-
|
|
781
|
-
# Create a dict with all job properties for filtering
|
|
782
|
-
job_properties = {
|
|
783
|
-
"id": scheduled_job_id,
|
|
784
|
-
"image": image_or_space,
|
|
785
|
-
"suspend": str(suspend),
|
|
786
|
-
"command": command_str,
|
|
787
|
-
}
|
|
788
|
-
|
|
789
|
-
# Check if job matches all filters
|
|
790
|
-
if not self._matches_filters(job_properties):
|
|
791
|
-
continue
|
|
792
|
-
|
|
793
|
-
# Create row
|
|
794
|
-
rows.append(
|
|
795
|
-
[
|
|
796
|
-
scheduled_job_id,
|
|
797
|
-
schedule,
|
|
798
|
-
image_or_space,
|
|
799
|
-
command_str,
|
|
800
|
-
last_job_at,
|
|
801
|
-
next_job_run_at,
|
|
802
|
-
suspend,
|
|
803
|
-
]
|
|
804
|
-
)
|
|
805
|
-
|
|
806
|
-
# Handle empty results
|
|
807
|
-
if not rows:
|
|
808
|
-
filters_msg = ""
|
|
809
|
-
if self.filters:
|
|
810
|
-
filters_msg = f" matching filters: {', '.join([f'{k}={v}' for k, v in self.filters.items()])}"
|
|
811
|
-
|
|
812
|
-
print(f"No scheduled jobs found{filters_msg}")
|
|
813
|
-
return
|
|
814
|
-
|
|
815
|
-
# Apply custom format if provided or use default tabular format
|
|
816
|
-
self._print_output(rows, table_headers)
|
|
817
|
-
|
|
818
|
-
except requests.RequestException as e:
|
|
819
|
-
print(f"Error fetching scheduled jobs data: {e}")
|
|
820
|
-
except (KeyError, ValueError, TypeError) as e:
|
|
821
|
-
print(f"Error processing scheduled jobs data: {e}")
|
|
822
|
-
except Exception as e:
|
|
823
|
-
print(f"Unexpected error - {type(e).__name__}: {e}")
|
|
824
|
-
|
|
825
|
-
def _matches_filters(self, job_properties: Dict[str, str]) -> bool:
|
|
826
|
-
"""Check if scheduled job matches all specified filters."""
|
|
827
|
-
for key, pattern in self.filters.items():
|
|
828
|
-
# Check if property exists
|
|
829
|
-
if key not in job_properties:
|
|
830
|
-
return False
|
|
831
|
-
|
|
832
|
-
# Support pattern matching with wildcards
|
|
833
|
-
if "*" in pattern or "?" in pattern:
|
|
834
|
-
# Convert glob pattern to regex
|
|
835
|
-
regex_pattern = pattern.replace("*", ".*").replace("?", ".")
|
|
836
|
-
if not re.search(f"^{regex_pattern}$", job_properties[key], re.IGNORECASE):
|
|
837
|
-
return False
|
|
838
|
-
# Simple substring matching
|
|
839
|
-
elif pattern.lower() not in job_properties[key].lower():
|
|
840
|
-
return False
|
|
841
|
-
|
|
842
|
-
return True
|
|
843
|
-
|
|
844
|
-
def _print_output(self, rows, headers):
|
|
845
|
-
"""Print output according to the chosen format."""
|
|
846
|
-
if self.format:
|
|
847
|
-
# Custom template formatting (simplified)
|
|
848
|
-
template = self.format
|
|
849
|
-
for row in rows:
|
|
850
|
-
line = template
|
|
851
|
-
for i, field in enumerate(
|
|
852
|
-
["id", "schedule", "image", "command", "last_job_at", "next_job_run_at", "suspend"]
|
|
853
|
-
):
|
|
854
|
-
placeholder = f"{{{{.{field}}}}}"
|
|
855
|
-
if placeholder in line:
|
|
856
|
-
line = line.replace(placeholder, str(row[i]))
|
|
857
|
-
print(line)
|
|
858
|
-
else:
|
|
859
|
-
# Default tabular format
|
|
860
|
-
print(
|
|
861
|
-
_tabulate(
|
|
862
|
-
rows,
|
|
863
|
-
headers=headers,
|
|
864
|
-
)
|
|
593
|
+
for scheduled_job in scheduled_jobs:
|
|
594
|
+
suspend = scheduled_job.suspend or False
|
|
595
|
+
if not all and suspend:
|
|
596
|
+
continue
|
|
597
|
+
sj_id = scheduled_job.id
|
|
598
|
+
schedule = scheduled_job.schedule or "N/A"
|
|
599
|
+
image_or_space = scheduled_job.job_spec.docker_image or "N/A"
|
|
600
|
+
cmd = scheduled_job.job_spec.command or []
|
|
601
|
+
command_str = " ".join(cmd) if cmd else "N/A"
|
|
602
|
+
last_job_at = (
|
|
603
|
+
scheduled_job.status.last_job.at.strftime("%Y-%m-%d %H:%M:%S")
|
|
604
|
+
if scheduled_job.status.last_job
|
|
605
|
+
else "N/A"
|
|
865
606
|
)
|
|
607
|
+
next_job_run_at = (
|
|
608
|
+
scheduled_job.status.next_job_run_at.strftime("%Y-%m-%d %H:%M:%S")
|
|
609
|
+
if scheduled_job.status.next_job_run_at
|
|
610
|
+
else "N/A"
|
|
611
|
+
)
|
|
612
|
+
props = {"id": sj_id, "image": image_or_space, "suspend": str(suspend), "command": command_str}
|
|
613
|
+
if not _matches_filters(props, filters):
|
|
614
|
+
continue
|
|
615
|
+
rows.append([sj_id, schedule, image_or_space, command_str, last_job_at, next_job_run_at, suspend])
|
|
616
|
+
|
|
617
|
+
if not rows:
|
|
618
|
+
filters_msg = (
|
|
619
|
+
f" matching filters: {', '.join([f'{k}={v}' for k, v in filters.items()])}" if filters else ""
|
|
620
|
+
)
|
|
621
|
+
print(f"No scheduled jobs found{filters_msg}")
|
|
622
|
+
return
|
|
623
|
+
_print_output(rows, table_headers, format)
|
|
624
|
+
|
|
625
|
+
except HfHubHTTPError as e:
|
|
626
|
+
print(f"Error fetching scheduled jobs data: {e}")
|
|
627
|
+
except (KeyError, ValueError, TypeError) as e:
|
|
628
|
+
print(f"Error processing scheduled jobs data: {e}")
|
|
629
|
+
except Exception as e:
|
|
630
|
+
print(f"Unexpected error - {type(e).__name__}: {e}")
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
@scheduled_app.command("inspect", help="Display detailed information on one or more scheduled Jobs")
|
|
634
|
+
def scheduled_inspect(
|
|
635
|
+
scheduled_job_ids: Annotated[
|
|
636
|
+
list[str],
|
|
637
|
+
typer.Argument(
|
|
638
|
+
help="The scheduled jobs to inspect",
|
|
639
|
+
),
|
|
640
|
+
],
|
|
641
|
+
namespace: NamespaceOpt = None,
|
|
642
|
+
token: TokenOpt = None,
|
|
643
|
+
) -> None:
|
|
644
|
+
api = get_hf_api(token=token)
|
|
645
|
+
scheduled_jobs = [
|
|
646
|
+
api.inspect_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
647
|
+
for scheduled_job_id in scheduled_job_ids
|
|
648
|
+
]
|
|
649
|
+
print(json.dumps([asdict(scheduled_job) for scheduled_job in scheduled_jobs], indent=4, default=str))
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
@scheduled_app.command("delete", help="Delete a scheduled Job")
|
|
653
|
+
def scheduled_delete(
|
|
654
|
+
scheduled_job_id: ScheduledJobIdArg,
|
|
655
|
+
namespace: NamespaceOpt = None,
|
|
656
|
+
token: TokenOpt = None,
|
|
657
|
+
) -> None:
|
|
658
|
+
api = get_hf_api(token=token)
|
|
659
|
+
api.delete_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
660
|
+
|
|
661
|
+
|
|
662
|
+
@scheduled_app.command("suspend", help="Suspend (pause) a scheduled Job")
|
|
663
|
+
def scheduled_suspend(
|
|
664
|
+
scheduled_job_id: ScheduledJobIdArg,
|
|
665
|
+
namespace: NamespaceOpt = None,
|
|
666
|
+
token: TokenOpt = None,
|
|
667
|
+
) -> None:
|
|
668
|
+
api = get_hf_api(token=token)
|
|
669
|
+
api.suspend_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
@scheduled_app.command("resume", help="Resume (unpause) a scheduled Job")
|
|
673
|
+
def scheduled_resume(
|
|
674
|
+
scheduled_job_id: ScheduledJobIdArg,
|
|
675
|
+
namespace: NamespaceOpt = None,
|
|
676
|
+
token: TokenOpt = None,
|
|
677
|
+
) -> None:
|
|
678
|
+
api = get_hf_api(token=token)
|
|
679
|
+
api.resume_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
scheduled_uv_app = typer_factory(help="Schedule UV scripts on HF infrastructure")
|
|
683
|
+
scheduled_app.add_typer(scheduled_uv_app, name="uv")
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
@scheduled_uv_app.command("run", help="Run a UV script (local file or URL) on HF infrastructure")
|
|
687
|
+
def scheduled_uv_run(
|
|
688
|
+
schedule: ScheduleArg,
|
|
689
|
+
script: ScriptArg,
|
|
690
|
+
script_args: ScriptArgsArg = None,
|
|
691
|
+
suspend: SuspendOpt = None,
|
|
692
|
+
concurrency: ConcurrencyOpt = None,
|
|
693
|
+
image: ImageOpt = None,
|
|
694
|
+
repo: RepoOpt = None,
|
|
695
|
+
flavor: FlavorOpt = None,
|
|
696
|
+
env: EnvOpt = None,
|
|
697
|
+
secrets: SecretsOpt = None,
|
|
698
|
+
env_file: EnvFileOpt = None,
|
|
699
|
+
secrets_file: SecretsFileOpt = None,
|
|
700
|
+
timeout: TimeoutOpt = None,
|
|
701
|
+
namespace: NamespaceOpt = None,
|
|
702
|
+
token: TokenOpt = None,
|
|
703
|
+
with_: WithOpt = None,
|
|
704
|
+
python: PythonOpt = None,
|
|
705
|
+
) -> None:
|
|
706
|
+
env_map: dict[str, Optional[str]] = {}
|
|
707
|
+
if env_file:
|
|
708
|
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
|
709
|
+
for env_value in env or []:
|
|
710
|
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
711
|
+
secrets_map: dict[str, Optional[str]] = {}
|
|
712
|
+
extended_environ = _get_extended_environ()
|
|
713
|
+
if secrets_file:
|
|
714
|
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
|
715
|
+
for secret in secrets or []:
|
|
716
|
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
|
717
|
+
|
|
718
|
+
api = get_hf_api(token=token)
|
|
719
|
+
job = api.create_scheduled_uv_job(
|
|
720
|
+
script=script,
|
|
721
|
+
script_args=script_args or [],
|
|
722
|
+
schedule=schedule,
|
|
723
|
+
suspend=suspend,
|
|
724
|
+
concurrency=concurrency,
|
|
725
|
+
dependencies=with_,
|
|
726
|
+
python=python,
|
|
727
|
+
image=image,
|
|
728
|
+
env=env_map,
|
|
729
|
+
secrets=secrets_map,
|
|
730
|
+
flavor=flavor, # type: ignore[arg-type]
|
|
731
|
+
timeout=timeout,
|
|
732
|
+
namespace=namespace,
|
|
733
|
+
_repo=repo,
|
|
734
|
+
)
|
|
735
|
+
print(f"Scheduled Job created with ID: {job.id}")
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
### UTILS
|
|
739
|
+
|
|
740
|
+
|
|
741
|
+
def _tabulate(rows: list[list[Union[str, int]]], headers: list[str]) -> str:
|
|
742
|
+
"""
|
|
743
|
+
Inspired by:
|
|
866
744
|
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
self.token: Optional[str] = args.token
|
|
886
|
-
self.scheduled_job_ids: List[str] = args.scheduled_job_ids
|
|
887
|
-
|
|
888
|
-
def run(self) -> None:
|
|
889
|
-
api = HfApi(token=self.token)
|
|
890
|
-
scheduled_jobs = [
|
|
891
|
-
api.inspect_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=self.namespace)
|
|
892
|
-
for scheduled_job_id in self.scheduled_job_ids
|
|
745
|
+
- stackoverflow.com/a/8356620/593036
|
|
746
|
+
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
|
|
747
|
+
"""
|
|
748
|
+
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
|
|
749
|
+
terminal_width = max(os.get_terminal_size().columns, len(headers) * 12)
|
|
750
|
+
while len(headers) + sum(col_widths) > terminal_width:
|
|
751
|
+
col_to_minimize = col_widths.index(max(col_widths))
|
|
752
|
+
col_widths[col_to_minimize] //= 2
|
|
753
|
+
if len(headers) + sum(col_widths) <= terminal_width:
|
|
754
|
+
col_widths[col_to_minimize] = terminal_width - sum(col_widths) - len(headers) + col_widths[col_to_minimize]
|
|
755
|
+
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
|
|
756
|
+
lines = []
|
|
757
|
+
lines.append(row_format.format(*headers))
|
|
758
|
+
lines.append(row_format.format(*["-" * w for w in col_widths]))
|
|
759
|
+
for row in rows:
|
|
760
|
+
row_format_args = [
|
|
761
|
+
str(x)[: col_width - 3] + "..." if len(str(x)) > col_width else str(x)
|
|
762
|
+
for x, col_width in zip(row, col_widths)
|
|
893
763
|
]
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
"--namespace",
|
|
904
|
-
type=str,
|
|
905
|
-
help="The namespace where the scheduled job is. Defaults to the current user's namespace.",
|
|
906
|
-
)
|
|
907
|
-
run_parser.add_argument(
|
|
908
|
-
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
|
909
|
-
)
|
|
910
|
-
run_parser.set_defaults(func=ScheduledDeleteCommand)
|
|
911
|
-
|
|
912
|
-
def __init__(self, args: Namespace) -> None:
|
|
913
|
-
self.scheduled_job_id: str = args.scheduled_job_id
|
|
914
|
-
self.namespace = args.namespace
|
|
915
|
-
self.token: Optional[str] = args.token
|
|
916
|
-
|
|
917
|
-
def run(self) -> None:
|
|
918
|
-
api = HfApi(token=self.token)
|
|
919
|
-
api.delete_scheduled_job(scheduled_job_id=self.scheduled_job_id, namespace=self.namespace)
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
class ScheduledSuspendCommand(BaseHuggingfaceCLICommand):
|
|
923
|
-
@staticmethod
|
|
924
|
-
def register_subcommand(parser: _SubParsersAction) -> None:
|
|
925
|
-
run_parser = parser.add_parser("suspend", help="Suspend (pause) a scheduled Job")
|
|
926
|
-
run_parser.add_argument("scheduled_job_id", type=str, help="Scheduled Job ID")
|
|
927
|
-
run_parser.add_argument(
|
|
928
|
-
"--namespace",
|
|
929
|
-
type=str,
|
|
930
|
-
help="The namespace where the scheduled job is. Defaults to the current user's namespace.",
|
|
931
|
-
)
|
|
932
|
-
run_parser.add_argument(
|
|
933
|
-
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
|
934
|
-
)
|
|
935
|
-
run_parser.set_defaults(func=ScheduledSuspendCommand)
|
|
936
|
-
|
|
937
|
-
def __init__(self, args: Namespace) -> None:
|
|
938
|
-
self.scheduled_job_id: str = args.scheduled_job_id
|
|
939
|
-
self.namespace = args.namespace
|
|
940
|
-
self.token: Optional[str] = args.token
|
|
941
|
-
|
|
942
|
-
def run(self) -> None:
|
|
943
|
-
api = HfApi(token=self.token)
|
|
944
|
-
api.suspend_scheduled_job(scheduled_job_id=self.scheduled_job_id, namespace=self.namespace)
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
class ScheduledResumeCommand(BaseHuggingfaceCLICommand):
|
|
948
|
-
@staticmethod
|
|
949
|
-
def register_subcommand(parser: _SubParsersAction) -> None:
|
|
950
|
-
run_parser = parser.add_parser("resume", help="Resume (unpause) a scheduled Job")
|
|
951
|
-
run_parser.add_argument("scheduled_job_id", type=str, help="Scheduled Job ID")
|
|
952
|
-
run_parser.add_argument(
|
|
953
|
-
"--namespace",
|
|
954
|
-
type=str,
|
|
955
|
-
help="The namespace where the scheduled job is. Defaults to the current user's namespace.",
|
|
956
|
-
)
|
|
957
|
-
run_parser.add_argument(
|
|
958
|
-
"--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens"
|
|
959
|
-
)
|
|
960
|
-
run_parser.set_defaults(func=ScheduledResumeCommand)
|
|
961
|
-
|
|
962
|
-
def __init__(self, args: Namespace) -> None:
|
|
963
|
-
self.scheduled_job_id: str = args.scheduled_job_id
|
|
964
|
-
self.namespace = args.namespace
|
|
965
|
-
self.token: Optional[str] = args.token
|
|
966
|
-
|
|
967
|
-
def run(self) -> None:
|
|
968
|
-
api = HfApi(token=self.token)
|
|
969
|
-
api.resume_scheduled_job(scheduled_job_id=self.scheduled_job_id, namespace=self.namespace)
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
class ScheduledUvCommand(BaseHuggingfaceCLICommand):
|
|
973
|
-
"""Schedule UV scripts on Hugging Face infrastructure."""
|
|
974
|
-
|
|
975
|
-
@staticmethod
|
|
976
|
-
def register_subcommand(parser):
|
|
977
|
-
"""Register UV run subcommand."""
|
|
978
|
-
uv_parser = parser.add_parser(
|
|
979
|
-
"uv",
|
|
980
|
-
help="Schedule UV scripts (Python with inline dependencies) on HF infrastructure",
|
|
981
|
-
)
|
|
982
|
-
|
|
983
|
-
subparsers = uv_parser.add_subparsers(dest="uv_command", help="UV commands", required=True)
|
|
984
|
-
|
|
985
|
-
# Run command only
|
|
986
|
-
run_parser = subparsers.add_parser(
|
|
987
|
-
"run",
|
|
988
|
-
help="Run a UV script (local file or URL) on HF infrastructure",
|
|
989
|
-
)
|
|
990
|
-
run_parser.add_argument(
|
|
991
|
-
"schedule",
|
|
992
|
-
type=str,
|
|
993
|
-
help="One of annually, yearly, monthly, weekly, daily, hourly, or a CRON schedule expression.",
|
|
994
|
-
)
|
|
995
|
-
run_parser.add_argument("script", help="UV script to run (local file or URL)")
|
|
996
|
-
run_parser.add_argument("script_args", nargs="...", help="Arguments for the script", default=[])
|
|
997
|
-
run_parser.add_argument(
|
|
998
|
-
"--suspend",
|
|
999
|
-
action="store_true",
|
|
1000
|
-
help="Suspend (pause) the scheduled Job",
|
|
1001
|
-
default=None,
|
|
1002
|
-
)
|
|
1003
|
-
run_parser.add_argument(
|
|
1004
|
-
"--concurrency",
|
|
1005
|
-
action="store_true",
|
|
1006
|
-
help="Allow multiple instances of this Job to run concurrently",
|
|
1007
|
-
default=None,
|
|
1008
|
-
)
|
|
1009
|
-
run_parser.add_argument("--image", type=str, help="Use a custom Docker image with `uv` installed.")
|
|
1010
|
-
run_parser.add_argument(
|
|
1011
|
-
"--repo",
|
|
1012
|
-
help="Repository name for the script (creates ephemeral if not specified)",
|
|
1013
|
-
)
|
|
1014
|
-
run_parser.add_argument(
|
|
1015
|
-
"--flavor",
|
|
1016
|
-
type=str,
|
|
1017
|
-
help=f"Flavor for the hardware, as in HF Spaces. Defaults to `cpu-basic`. Possible values: {', '.join(SUGGESTED_FLAVORS)}.",
|
|
1018
|
-
)
|
|
1019
|
-
run_parser.add_argument("-e", "--env", action="append", help="Environment variables")
|
|
1020
|
-
run_parser.add_argument(
|
|
1021
|
-
"-s",
|
|
1022
|
-
"--secrets",
|
|
1023
|
-
action="append",
|
|
1024
|
-
help=(
|
|
1025
|
-
"Set secret environment variables. E.g. --secrets SECRET=value "
|
|
1026
|
-
"or `--secrets HF_TOKEN` to pass your Hugging Face token."
|
|
1027
|
-
),
|
|
1028
|
-
)
|
|
1029
|
-
run_parser.add_argument("--env-file", type=str, help="Read in a file of environment variables.")
|
|
1030
|
-
run_parser.add_argument(
|
|
1031
|
-
"--secrets-file",
|
|
1032
|
-
type=str,
|
|
1033
|
-
help="Read in a file of secret environment variables.",
|
|
1034
|
-
)
|
|
1035
|
-
run_parser.add_argument("--timeout", type=str, help="Max duration (e.g., 30s, 5m, 1h)")
|
|
1036
|
-
run_parser.add_argument("-d", "--detach", action="store_true", help="Run in background")
|
|
1037
|
-
run_parser.add_argument(
|
|
1038
|
-
"--namespace",
|
|
1039
|
-
type=str,
|
|
1040
|
-
help="The namespace where the Job will be created. Defaults to the current user's namespace.",
|
|
1041
|
-
)
|
|
1042
|
-
run_parser.add_argument("--token", type=str, help="HF token")
|
|
1043
|
-
# UV options
|
|
1044
|
-
run_parser.add_argument("--with", action="append", help="Run with the given packages installed", dest="with_")
|
|
1045
|
-
run_parser.add_argument(
|
|
1046
|
-
"-p", "--python", type=str, help="The Python interpreter to use for the run environment"
|
|
1047
|
-
)
|
|
1048
|
-
run_parser.set_defaults(func=ScheduledUvCommand)
|
|
1049
|
-
|
|
1050
|
-
def __init__(self, args: Namespace) -> None:
|
|
1051
|
-
"""Initialize the command with parsed arguments."""
|
|
1052
|
-
self.schedule: str = args.schedule
|
|
1053
|
-
self.script = args.script
|
|
1054
|
-
self.script_args = args.script_args
|
|
1055
|
-
self.suspend: Optional[bool] = args.suspend
|
|
1056
|
-
self.concurrency: Optional[bool] = args.concurrency
|
|
1057
|
-
self.dependencies = args.with_
|
|
1058
|
-
self.python = args.python
|
|
1059
|
-
self.image = args.image
|
|
1060
|
-
self.env: dict[str, Optional[str]] = {}
|
|
1061
|
-
if args.env_file:
|
|
1062
|
-
self.env.update(load_dotenv(Path(args.env_file).read_text(), environ=os.environ.copy()))
|
|
1063
|
-
for env_value in args.env or []:
|
|
1064
|
-
self.env.update(load_dotenv(env_value, environ=os.environ.copy()))
|
|
1065
|
-
self.secrets: dict[str, Optional[str]] = {}
|
|
1066
|
-
extended_environ = _get_extended_environ()
|
|
1067
|
-
if args.secrets_file:
|
|
1068
|
-
self.secrets.update(load_dotenv(Path(args.secrets_file).read_text(), environ=extended_environ))
|
|
1069
|
-
for secret in args.secrets or []:
|
|
1070
|
-
self.secrets.update(load_dotenv(secret, environ=extended_environ))
|
|
1071
|
-
self.flavor: Optional[SpaceHardware] = args.flavor
|
|
1072
|
-
self.timeout: Optional[str] = args.timeout
|
|
1073
|
-
self.detach: bool = args.detach
|
|
1074
|
-
self.namespace: Optional[str] = args.namespace
|
|
1075
|
-
self.token: Optional[str] = args.token
|
|
1076
|
-
self._repo = args.repo
|
|
1077
|
-
|
|
1078
|
-
def run(self) -> None:
|
|
1079
|
-
"""Schedule UV command."""
|
|
1080
|
-
logging.set_verbosity(logging.INFO)
|
|
1081
|
-
api = HfApi(token=self.token)
|
|
1082
|
-
job = api.create_scheduled_uv_job(
|
|
1083
|
-
script=self.script,
|
|
1084
|
-
script_args=self.script_args,
|
|
1085
|
-
schedule=self.schedule,
|
|
1086
|
-
suspend=self.suspend,
|
|
1087
|
-
concurrency=self.concurrency,
|
|
1088
|
-
dependencies=self.dependencies,
|
|
1089
|
-
python=self.python,
|
|
1090
|
-
image=self.image,
|
|
1091
|
-
env=self.env,
|
|
1092
|
-
secrets=self.secrets,
|
|
1093
|
-
flavor=self.flavor,
|
|
1094
|
-
timeout=self.timeout,
|
|
1095
|
-
namespace=self.namespace,
|
|
1096
|
-
_repo=self._repo,
|
|
1097
|
-
)
|
|
1098
|
-
|
|
1099
|
-
# Always print the job ID to the user
|
|
1100
|
-
print(f"Scheduled Job created with ID: {job.id}")
|
|
764
|
+
lines.append(row_format.format(*row_format_args))
|
|
765
|
+
return "\n".join(lines)
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def _get_extended_environ() -> Dict[str, str]:
|
|
769
|
+
extended_environ = os.environ.copy()
|
|
770
|
+
if (token := get_token()) is not None:
|
|
771
|
+
extended_environ["HF_TOKEN"] = token
|
|
772
|
+
return extended_environ
|