coiled 1.117.2.dev3__tar.gz → 1.129.3.dev13__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of coiled might be problematic. Click here for more details.
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/PKG-INFO +1 -1
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/batch.py +17 -1
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/capture_environment.py +45 -77
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/batch/run.py +145 -14
- coiled-1.129.3.dev13/coiled/cli/batch/util.py +28 -0
- coiled-1.129.3.dev13/coiled/cli/batch/wait.py +108 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/core.py +4 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/curl.py +7 -2
- coiled-1.129.3.dev13/coiled/cli/file.py +116 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/hello.py +6 -5
- coiled-1.129.3.dev13/coiled/cli/mpi.py +252 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/notebook/notebook.py +10 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/run.py +65 -13
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/aws.py +48 -12
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/azure.py +50 -1
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/context.py +2 -2
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/credentials/google.py +1 -20
- coiled-1.129.3.dev13/coiled/filestore.py +458 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/plugins.py +3 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/pypi_conda_map.py +14 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/software_utils.py +140 -5
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/spans.py +2 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/types.py +18 -1
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/utils.py +101 -1
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/cluster.py +25 -3
- coiled-1.129.3.dev13/coiled/v2/cluster_comms.py +72 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/core.py +7 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/pyproject.toml +60 -65
- coiled-1.117.2.dev3/coiled/cli/batch/wait.py +0 -98
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/.gitignore +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/LICENSE +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/README.md +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/__main__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/analytics.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/auth.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/batch/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/batch/list.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/batch/logs.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/batch/status.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/azure_logs.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/better_logs.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/crud.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/get_address.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/list.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/logs.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/metrics.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/ssh.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/cluster/utils.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/config.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/diagnostics.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/env.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/examples/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/examples/exit.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/examples/hello_world.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/examples/nyc_parquet.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/examples/pytorch.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/examples/xarray_nwm.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/scripts/fill_ipython.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/scripts/nyc_parquet.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/scripts/pytorch.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/scripts/xarray_nwm.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/hello/utils.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/login.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/notebook/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/package_sync.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/prefect.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/prefect_serve.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/amp.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/entry.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/gcp.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/prometheus.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/setup/util.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/sync.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cli/utils.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/cluster.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/coiled.yaml +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/compatibility.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/config.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/core.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/credentials/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/credentials/aws.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/errors.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/exceptions.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/extensions/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/extensions/prefect/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/extensions/prefect/runners.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/extensions/prefect/workers.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/function.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/prefect.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/scan.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/software.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/spark.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/cwi_log_link.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/states.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/widgets/__init__.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/widgets/interface.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/widgets/rich.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/v2/widgets/util.py +0 -0
- {coiled-1.117.2.dev3 → coiled-1.129.3.dev13}/coiled/websockets.py +0 -0
|
@@ -17,6 +17,8 @@ def run(
|
|
|
17
17
|
workspace: str | None = None,
|
|
18
18
|
software: str | None = None,
|
|
19
19
|
container: str | None = None,
|
|
20
|
+
run_on_host: bool | None = None,
|
|
21
|
+
cluster_kwargs: dict | None = None,
|
|
20
22
|
env: list | dict | None = None,
|
|
21
23
|
secret_env: list | dict | None = None,
|
|
22
24
|
tag: list | dict | None = None,
|
|
@@ -46,7 +48,11 @@ def run(
|
|
|
46
48
|
package_sync_strict: bool = False,
|
|
47
49
|
package_sync_conda_extras: list | None = None,
|
|
48
50
|
package_sync_ignore: list[str] | None = None,
|
|
51
|
+
local_upload_path: str | None = None,
|
|
52
|
+
buffers_to_upload: list[dict] | None = None,
|
|
49
53
|
host_setup_script: str | None = None,
|
|
54
|
+
host_setup_script_content: str | None = None,
|
|
55
|
+
command_as_script: bool | None = None,
|
|
50
56
|
ignore_container_entrypoint: bool | None = None,
|
|
51
57
|
job_timeout: str | None = None,
|
|
52
58
|
logger=None,
|
|
@@ -61,8 +67,12 @@ def run(
|
|
|
61
67
|
takes a list of dictionaries, so you can specify multiple environment variables for each task.
|
|
62
68
|
For example, ``[{"FOO": 1, "BAR": 2}, {"FOO": 3, "BAR": 4}]`` will pass ``FOO=1 BAR=2`` to one task and
|
|
63
69
|
``FOO=3 BAR=4`` to another.
|
|
70
|
+
buffers_to_upload
|
|
71
|
+
takes a list of dictionaries, each should have path where file should be written on VM(s)
|
|
72
|
+
relative to working directory, and ``io.BytesIO`` which provides content of file,
|
|
73
|
+
for example ``[{"relative_path": "hello.txt", "buffer": io.BytesIO(b"hello")}]``.
|
|
64
74
|
"""
|
|
65
|
-
if isinstance(command, str):
|
|
75
|
+
if isinstance(command, str) and not command.startswith("#!") and not command_as_script:
|
|
66
76
|
command = shlex.split(command)
|
|
67
77
|
|
|
68
78
|
env = dict_to_key_val_list(env)
|
|
@@ -76,6 +86,8 @@ def run(
|
|
|
76
86
|
workspace=workspace,
|
|
77
87
|
software=software,
|
|
78
88
|
container=container,
|
|
89
|
+
run_on_host=run_on_host,
|
|
90
|
+
cluster_kwargs=cluster_kwargs,
|
|
79
91
|
env=env,
|
|
80
92
|
secret_env=secret_env,
|
|
81
93
|
tag=tag,
|
|
@@ -106,7 +118,11 @@ def run(
|
|
|
106
118
|
package_sync_strict=package_sync_strict,
|
|
107
119
|
package_sync_conda_extras=package_sync_conda_extras,
|
|
108
120
|
package_sync_ignore=package_sync_ignore,
|
|
121
|
+
local_upload_path=local_upload_path,
|
|
122
|
+
buffers_to_upload=buffers_to_upload,
|
|
109
123
|
host_setup_script=host_setup_script,
|
|
124
|
+
host_setup_script_content=host_setup_script_content,
|
|
125
|
+
command_as_script=command_as_script,
|
|
110
126
|
ignore_container_entrypoint=ignore_container_entrypoint,
|
|
111
127
|
job_timeout=job_timeout,
|
|
112
128
|
logger=logger,
|
|
@@ -1,6 +1,4 @@
|
|
|
1
|
-
import asyncio
|
|
2
1
|
import contextlib
|
|
3
|
-
import logging
|
|
4
2
|
import platform
|
|
5
3
|
import sys
|
|
6
4
|
import typing
|
|
@@ -69,49 +67,52 @@ async def approximate_packages(
|
|
|
69
67
|
architecture: ArchitectureTypesEnum = ArchitectureTypesEnum.X86_64,
|
|
70
68
|
pip_check_errors: Optional[Dict[str, List[str]]] = None,
|
|
71
69
|
gpu_enabled: bool = False,
|
|
70
|
+
use_uv_installer: bool = True,
|
|
72
71
|
) -> typing.List[ResolvedPackageInfo]:
|
|
73
72
|
user_conda_installed_python = next((p for p in packages if p["name"] == "python"), None)
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
if not user_conda_installed_pip:
|
|
79
|
-
# This means pip was installed by pip, or the system
|
|
80
|
-
# package manager
|
|
81
|
-
# Insert a conda version of pip to be installed first, it will
|
|
82
|
-
# then be used to install the users version of pip
|
|
83
|
-
pip = next(
|
|
84
|
-
(p for p in packages if p["name"] == "pip" and p["source"] == "pip"),
|
|
73
|
+
# Only add pip if we need it
|
|
74
|
+
if not use_uv_installer:
|
|
75
|
+
user_conda_installed_pip = next(
|
|
76
|
+
(i for i, p in enumerate(packages) if p["name"] == "pip" and p["source"] == "conda"),
|
|
85
77
|
None,
|
|
86
78
|
)
|
|
87
|
-
if not
|
|
88
|
-
#
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
"
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
79
|
+
if not user_conda_installed_pip:
|
|
80
|
+
# This means pip was installed by pip, or the system
|
|
81
|
+
# package manager
|
|
82
|
+
# Insert a conda version of pip to be installed first, it will
|
|
83
|
+
# then be used to install the users version of pip
|
|
84
|
+
pip = next(
|
|
85
|
+
(p for p in packages if p["name"] == "pip" and p["source"] == "pip"),
|
|
86
|
+
None,
|
|
87
|
+
)
|
|
88
|
+
if not pip:
|
|
89
|
+
# insert a modern version and hope it does not introduce conflicts
|
|
90
|
+
packages.append({
|
|
91
|
+
"name": "pip",
|
|
92
|
+
"path": None,
|
|
93
|
+
"source": "conda",
|
|
94
|
+
"channel_url": "https://conda.anaconda.org/conda-forge/",
|
|
95
|
+
"channel": "conda-forge",
|
|
96
|
+
"subdir": "noarch",
|
|
97
|
+
"conda_name": "pip",
|
|
98
|
+
"version": "22.3.1",
|
|
99
|
+
"wheel_target": None,
|
|
100
|
+
"requested": False,
|
|
101
|
+
})
|
|
102
|
+
else:
|
|
103
|
+
# insert the users pip version and hope it exists on conda-forge
|
|
104
|
+
packages.append({
|
|
105
|
+
"name": "pip",
|
|
106
|
+
"path": None,
|
|
107
|
+
"source": "conda",
|
|
108
|
+
"channel_url": "https://conda.anaconda.org/conda-forge/",
|
|
109
|
+
"channel": "conda-forge",
|
|
110
|
+
"subdir": "noarch",
|
|
111
|
+
"conda_name": "pip",
|
|
112
|
+
"version": pip["version"],
|
|
113
|
+
"wheel_target": None,
|
|
114
|
+
"requested": True,
|
|
115
|
+
})
|
|
115
116
|
coiled_selected_python = None
|
|
116
117
|
if not user_conda_installed_python:
|
|
117
118
|
# insert a special python package
|
|
@@ -208,6 +209,7 @@ async def create_environment_approximation(
|
|
|
208
209
|
progress: Optional[Progress] = None,
|
|
209
210
|
architecture: ArchitectureTypesEnum = ArchitectureTypesEnum.X86_64,
|
|
210
211
|
gpu_enabled: bool = False,
|
|
212
|
+
use_uv_installer: bool = True,
|
|
211
213
|
) -> typing.List[ResolvedPackageInfo]:
|
|
212
214
|
packages = await scan_prefix(progress=progress)
|
|
213
215
|
pip_check_errors = await check_pip_happy(progress)
|
|
@@ -237,6 +239,7 @@ async def create_environment_approximation(
|
|
|
237
239
|
architecture=architecture,
|
|
238
240
|
pip_check_errors=pip_check_errors,
|
|
239
241
|
gpu_enabled=gpu_enabled,
|
|
242
|
+
use_uv_installer=use_uv_installer,
|
|
240
243
|
)
|
|
241
244
|
return result
|
|
242
245
|
|
|
@@ -306,6 +309,7 @@ async def scan_and_create(
|
|
|
306
309
|
architecture=architecture,
|
|
307
310
|
gpu_enabled=gpu_enabled,
|
|
308
311
|
conda_extras=package_sync_conda_extras,
|
|
312
|
+
use_uv_installer=use_uv_installer,
|
|
309
313
|
)
|
|
310
314
|
|
|
311
315
|
if not package_sync_only:
|
|
@@ -427,39 +431,3 @@ If you use pip, venv, uv, pixi, etc. create a new environment and then:
|
|
|
427
431
|
|
|
428
432
|
See https://docs.coiled.io/user_guide/software/package_sync_best_practices.html
|
|
429
433
|
for more best practices. If that doesn't solve your issue, please contact support@coiled.io.""")
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
if __name__ == "__main__":
|
|
433
|
-
from logging import basicConfig
|
|
434
|
-
|
|
435
|
-
basicConfig(level=logging.INFO)
|
|
436
|
-
|
|
437
|
-
from rich.console import Console
|
|
438
|
-
from rich.table import Table
|
|
439
|
-
|
|
440
|
-
async def run():
|
|
441
|
-
async with CloudV2(asynchronous=True) as cloud:
|
|
442
|
-
return await create_environment_approximation(
|
|
443
|
-
cloud=cloud,
|
|
444
|
-
priorities={
|
|
445
|
-
("dask", "conda"): PackageLevelEnum.CRITICAL,
|
|
446
|
-
("twisted", "conda"): PackageLevelEnum.IGNORE,
|
|
447
|
-
("graphviz", "conda"): PackageLevelEnum.LOOSE,
|
|
448
|
-
("icu", "conda"): PackageLevelEnum.LOOSE,
|
|
449
|
-
},
|
|
450
|
-
)
|
|
451
|
-
|
|
452
|
-
result = asyncio.run(run())
|
|
453
|
-
|
|
454
|
-
table = Table(title="Packages")
|
|
455
|
-
keys = ("name", "source", "include", "client_version", "specifier", "error", "note")
|
|
456
|
-
|
|
457
|
-
for key in keys:
|
|
458
|
-
table.add_column(key)
|
|
459
|
-
|
|
460
|
-
for pkg in result:
|
|
461
|
-
row_values = [str(pkg.get(key, "")) for key in keys]
|
|
462
|
-
table.add_row(*row_values)
|
|
463
|
-
console = Console()
|
|
464
|
-
console.print(table)
|
|
465
|
-
console.print(table)
|
|
@@ -15,10 +15,13 @@ from rich.console import Console
|
|
|
15
15
|
from rich.panel import Panel
|
|
16
16
|
|
|
17
17
|
import coiled
|
|
18
|
+
from coiled.cli.batch.util import load_sidecar_spec
|
|
19
|
+
from coiled.cli.batch.wait import batch_job_wait
|
|
18
20
|
from coiled.cli.curl import sync_request
|
|
19
21
|
from coiled.cli.run import dict_from_key_val_list
|
|
20
22
|
from coiled.cli.utils import CONTEXT_SETTINGS, fix_path_for_upload
|
|
21
23
|
from coiled.credentials.aws import get_aws_local_session_token
|
|
24
|
+
from coiled.filestore import FilestoreManager, upload_to_filestore_with_ui
|
|
22
25
|
from coiled.utils import COILED_LOGGER_NAME, error_info_for_tracking, supress_logs
|
|
23
26
|
|
|
24
27
|
console = Console(width=80)
|
|
@@ -103,7 +106,7 @@ def handle_possible_implicit_file(implicit_file):
|
|
|
103
106
|
"remote_path": f"/scratch/{remote_rel_dir}{remote_base}",
|
|
104
107
|
"content": file_content,
|
|
105
108
|
}
|
|
106
|
-
elif any(implicit_file.endswith(t) for t in UPLOAD_FILE_TYPES):
|
|
109
|
+
elif any(implicit_file.endswith(t) and "$" not in implicit_file for t in UPLOAD_FILE_TYPES):
|
|
107
110
|
console.print(
|
|
108
111
|
f"[orange1]WARNING:[/orange1] {implicit_file} appears to be a filename, "
|
|
109
112
|
"but this file not found locally and will not be copied to VMs",
|
|
@@ -193,6 +196,7 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
193
196
|
"default is to use the entrypoint (if any) set on the image."
|
|
194
197
|
),
|
|
195
198
|
)
|
|
199
|
+
@click.option("--run-on-host", default=None, help="Run code directly on host, not inside docker container.")
|
|
196
200
|
@click.option(
|
|
197
201
|
"--env",
|
|
198
202
|
"-e",
|
|
@@ -335,6 +339,50 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
335
339
|
"into individual values. By default this is ',' for ``--map-over-values`` and newline for ``--map-over-file``."
|
|
336
340
|
),
|
|
337
341
|
)
|
|
342
|
+
@click.option("--wait", default=None, is_flag=True)
|
|
343
|
+
@click.option(
|
|
344
|
+
"--upload",
|
|
345
|
+
"local_upload_path",
|
|
346
|
+
default=None,
|
|
347
|
+
type=str,
|
|
348
|
+
help=(
|
|
349
|
+
"File or directory to upload to cloud storage and download onto the VM(s). "
|
|
350
|
+
"By default files will be copied into the working directory on VM where your batch script runs."
|
|
351
|
+
),
|
|
352
|
+
)
|
|
353
|
+
@click.option(
|
|
354
|
+
"--download",
|
|
355
|
+
"local_download_path",
|
|
356
|
+
default=None,
|
|
357
|
+
type=str,
|
|
358
|
+
help=(
|
|
359
|
+
"When used with ``--wait``, output files from job will be downloaded into this local directory "
|
|
360
|
+
"when job is complete. When used without ``--wait``, files won't be automatically downloaded, "
|
|
361
|
+
"but job will be configured to store result files in cloud storage for later download."
|
|
362
|
+
),
|
|
363
|
+
)
|
|
364
|
+
@click.option(
|
|
365
|
+
"--sync",
|
|
366
|
+
"local_sync_path",
|
|
367
|
+
default=None,
|
|
368
|
+
type=str,
|
|
369
|
+
help="Equivalent to specifying both ``--upload`` and ``--download`` with the same local directory.",
|
|
370
|
+
)
|
|
371
|
+
@click.option(
|
|
372
|
+
"--pipe-to-files",
|
|
373
|
+
default=None,
|
|
374
|
+
is_flag=True,
|
|
375
|
+
help=(
|
|
376
|
+
"Write stdout and stderr from each task to files which can be downloaded when job is complete. "
|
|
377
|
+
"This is in addition to sending stdout and stderr to logs, and is more convenient than logs for when "
|
|
378
|
+
"you want to use outputs from tasks as inputs to further processing)."
|
|
379
|
+
),
|
|
380
|
+
)
|
|
381
|
+
@click.option("--input-filestore", default=None, type=str, help="Name of input filestore")
|
|
382
|
+
@click.option("--output-filestore", default=None, type=str, help="Name of output filestore")
|
|
383
|
+
@click.option(
|
|
384
|
+
"--scheduler-sidecar-spec", default=None, type=str, help="Filename for scheduler sidecar spec (yaml or json)"
|
|
385
|
+
)
|
|
338
386
|
@click.option(
|
|
339
387
|
"--ntasks",
|
|
340
388
|
"--n-tasks",
|
|
@@ -425,7 +473,8 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
425
473
|
"For example, you can specify '30 minutes' or '1 hour'. Default is no timeout."
|
|
426
474
|
),
|
|
427
475
|
)
|
|
428
|
-
@click.
|
|
476
|
+
@click.option("--dask-container", default=None, type=str)
|
|
477
|
+
@click.argument("command", nargs=-1, required=True)
|
|
429
478
|
def batch_run_cli(ctx, **kwargs):
|
|
430
479
|
"""
|
|
431
480
|
Submit a batch job to run on Coiled.
|
|
@@ -454,6 +503,14 @@ def batch_run_cli(ctx, **kwargs):
|
|
|
454
503
|
|
|
455
504
|
def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
456
505
|
command = kwargs["command"]
|
|
506
|
+
user_files = []
|
|
507
|
+
|
|
508
|
+
if isinstance(command, str) and (command.startswith("#!") or kwargs.get("command_as_script")):
|
|
509
|
+
user_files.append({
|
|
510
|
+
"path": "script",
|
|
511
|
+
"content": command,
|
|
512
|
+
})
|
|
513
|
+
command = ["script"]
|
|
457
514
|
|
|
458
515
|
# Handle command as string case (e.g. `coiled batch run "python myscript.py"`)
|
|
459
516
|
if len(command) == 1:
|
|
@@ -475,7 +532,6 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
475
532
|
# unescape escaped COILED env vars in command
|
|
476
533
|
command = [part.replace("\\$COILED", "$COILED") for part in command]
|
|
477
534
|
|
|
478
|
-
user_files = []
|
|
479
535
|
kwargs_from_header = None
|
|
480
536
|
|
|
481
537
|
# identify implicit files referenced in commands like "python foo.py" or "foo.sh"
|
|
@@ -666,8 +722,8 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
666
722
|
if user_files_from_content:
|
|
667
723
|
user_files.extend(user_files_from_content)
|
|
668
724
|
|
|
669
|
-
host_setup_content =
|
|
670
|
-
if kwargs["host_setup_script"]:
|
|
725
|
+
host_setup_content = kwargs.get("host_setup_script_content")
|
|
726
|
+
if not host_setup_content and kwargs["host_setup_script"]:
|
|
671
727
|
with open(kwargs["host_setup_script"]) as f:
|
|
672
728
|
host_setup_content = f.read()
|
|
673
729
|
|
|
@@ -707,6 +763,12 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
707
763
|
|
|
708
764
|
batch_job_container = f"{kwargs['container']}!" if kwargs["ignore_container_entrypoint"] else kwargs["container"]
|
|
709
765
|
|
|
766
|
+
scheduler_sidecars = load_sidecar_spec(kwargs.get("scheduler_sidecar_spec"))
|
|
767
|
+
|
|
768
|
+
dask_container = (
|
|
769
|
+
kwargs.get("dask_container") or dask.config.get("coiled.batch.dask-container", None) or "ghcr.io/dask/dask"
|
|
770
|
+
)
|
|
771
|
+
|
|
710
772
|
cluster_kwargs = {
|
|
711
773
|
"name": kwargs["name"],
|
|
712
774
|
"workspace": kwargs["workspace"],
|
|
@@ -719,7 +781,9 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
719
781
|
# if batch job is running in extra container, then we just need a pretty minimal dask container
|
|
720
782
|
# so for now switch the default in that case to basic dask container
|
|
721
783
|
# TODO would it be better to use a pre-built senv with our `cloud-env-run` container instead?
|
|
722
|
-
"container":
|
|
784
|
+
"container": dask_container
|
|
785
|
+
if (kwargs["container"] or kwargs.get("run_on_host")) and not kwargs["software"]
|
|
786
|
+
else None,
|
|
723
787
|
"region": kwargs["region"],
|
|
724
788
|
"scheduler_options": {
|
|
725
789
|
"idle_timeout": "520 weeks", # TODO allow job timeout?
|
|
@@ -739,6 +803,8 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
739
803
|
"package_sync_conda_extras": kwargs.get("package_sync_conda_extras"),
|
|
740
804
|
"package_sync_ignore": kwargs.get("package_sync_ignore"),
|
|
741
805
|
"allow_cross_zone": True if kwargs["allow_cross_zone"] is None else kwargs["allow_cross_zone"],
|
|
806
|
+
"scheduler_sidecars": scheduler_sidecars,
|
|
807
|
+
**(kwargs.get("cluster_kwargs") or {}),
|
|
742
808
|
}
|
|
743
809
|
|
|
744
810
|
# when task will run on scheduler, give it the same VM specs as worker node
|
|
@@ -758,7 +824,7 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
758
824
|
|
|
759
825
|
# Create a job
|
|
760
826
|
job_spec = {
|
|
761
|
-
"user_command":
|
|
827
|
+
"user_command": coiled.utils.join_command_parts(command),
|
|
762
828
|
"user_files": user_files,
|
|
763
829
|
**job_array_kwargs,
|
|
764
830
|
"scheduler_task_array": scheduler_task_ids,
|
|
@@ -771,8 +837,10 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
771
837
|
# Avoid possibly breaking prefect batch jobs
|
|
772
838
|
# https://github.com/coiled/platform/pull/8655#pullrequestreview-2826448869
|
|
773
839
|
"workdir": None if "flow-run-id" in tags else "/scratch/batch",
|
|
840
|
+
"pipe_to_files": bool(kwargs.get("pipe_to_files")),
|
|
774
841
|
"host_setup": host_setup_content,
|
|
775
842
|
"job_timeout_seconds": parse_timedelta(kwargs["job_timeout"]) if kwargs["job_timeout"] else None,
|
|
843
|
+
"run_in_container": not kwargs.get("run_on_host"),
|
|
776
844
|
}
|
|
777
845
|
|
|
778
846
|
with coiled.Cloud(workspace=kwargs["workspace"]) as cloud:
|
|
@@ -796,6 +864,48 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
796
864
|
|
|
797
865
|
job_id = response["id"]
|
|
798
866
|
|
|
867
|
+
filestores_to_attach = []
|
|
868
|
+
|
|
869
|
+
for sidecar in scheduler_sidecars:
|
|
870
|
+
for attachment in sidecar.get("filestores") or []:
|
|
871
|
+
filestores_to_attach.append({"worker": False, "input": True, "output": True, **attachment})
|
|
872
|
+
|
|
873
|
+
# only create and attach filestores if using upload or indicate desire to store results
|
|
874
|
+
if (
|
|
875
|
+
kwargs.get("local_upload_path")
|
|
876
|
+
or kwargs.get("local_sync_path")
|
|
877
|
+
or kwargs.get("local_download_path")
|
|
878
|
+
or kwargs.get("pipe_to_files")
|
|
879
|
+
or kwargs.get("input_filestore")
|
|
880
|
+
or kwargs.get("output_filestore")
|
|
881
|
+
or kwargs.get("buffers_to_upload")
|
|
882
|
+
):
|
|
883
|
+
fs_base_name = kwargs["name"] or f"batch-job-{job_id}"
|
|
884
|
+
|
|
885
|
+
in_fs_name = kwargs.get("input_filestore") or f"{fs_base_name}-input"
|
|
886
|
+
out_fs_name = kwargs.get("output_filestore") or f"{fs_base_name}-output"
|
|
887
|
+
|
|
888
|
+
filestores = FilestoreManager.get_or_create_filestores(
|
|
889
|
+
names=[in_fs_name, out_fs_name],
|
|
890
|
+
workspace=job_spec["workspace"],
|
|
891
|
+
region=kwargs["region"],
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
in_fs = filestores[0]
|
|
895
|
+
out_fs = filestores[1]
|
|
896
|
+
|
|
897
|
+
filestores_to_attach.extend([
|
|
898
|
+
{"id": in_fs["id"], "input": True, "path": "/scratch/batch/", "primary": True},
|
|
899
|
+
{"id": out_fs["id"], "output": True, "path": "/scratch/batch/", "primary": True},
|
|
900
|
+
])
|
|
901
|
+
|
|
902
|
+
if kwargs.get("local_upload_path") or kwargs.get("local_sync_path") or kwargs.get("buffers_to_upload"):
|
|
903
|
+
upload_to_filestore_with_ui(
|
|
904
|
+
fs=in_fs,
|
|
905
|
+
local_dir=kwargs.get("local_upload_path") or kwargs.get("local_sync_path"),
|
|
906
|
+
file_buffers=kwargs.get("buffers_to_upload"),
|
|
907
|
+
)
|
|
908
|
+
|
|
799
909
|
# Run the job on a cluster
|
|
800
910
|
with supress_logs([COILED_LOGGER_NAME], level=logging.WARNING):
|
|
801
911
|
cluster = coiled.Cluster(
|
|
@@ -804,9 +914,16 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
804
914
|
**cluster_kwargs,
|
|
805
915
|
)
|
|
806
916
|
|
|
917
|
+
# TODO support for attaching as part of create request
|
|
918
|
+
if filestores_to_attach:
|
|
919
|
+
FilestoreManager.attach_filestores_to_cluster(
|
|
920
|
+
cluster_id=cluster.cluster_id,
|
|
921
|
+
attachments=filestores_to_attach,
|
|
922
|
+
)
|
|
923
|
+
|
|
807
924
|
if logger:
|
|
808
925
|
message = f"""
|
|
809
|
-
Command: {
|
|
926
|
+
Command: {coiled.utils.join_command_parts(command)}
|
|
810
927
|
Cluster ID: {cluster.cluster_id}
|
|
811
928
|
URL: {cluster.details_url}
|
|
812
929
|
Tasks: {n_tasks}
|
|
@@ -822,17 +939,31 @@ Tasks: {n_tasks}
|
|
|
822
939
|
status_command = f"{status_command} --workspace {kwargs['workspace']}"
|
|
823
940
|
else:
|
|
824
941
|
status_command = f"coiled.batch.status({cluster.cluster_id})"
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
[bold]Cluster ID[/]: [bright_blue]{cluster.cluster_id}[/]
|
|
828
|
-
[bold]URL[/]: [link][bright_blue]{cluster.details_url}[/bright_blue][/link]
|
|
829
|
-
[bold]Tasks[/]: [bright_blue]{n_tasks}[/]
|
|
942
|
+
track_status_message = (
|
|
943
|
+
f"""
|
|
830
944
|
|
|
831
945
|
To track progress run:
|
|
832
946
|
|
|
833
947
|
[green]{status_command}[/]
|
|
834
|
-
|
|
948
|
+
"""
|
|
949
|
+
if not kwargs.get("wait")
|
|
950
|
+
else ""
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
message = f"""
|
|
954
|
+
[bold]Command[/]: [bright_blue]{coiled.utils.join_command_parts(command)}[/]
|
|
955
|
+
[bold]Cluster ID[/]: [bright_blue]{cluster.cluster_id}[/]
|
|
956
|
+
[bold]URL[/]: [link][bright_blue]{cluster.details_url}[/bright_blue][/link]
|
|
957
|
+
[bold]Tasks[/]: [bright_blue]{n_tasks}[/]
|
|
958
|
+
{track_status_message}{extra_message}"""
|
|
835
959
|
|
|
836
960
|
console.print(Panel(message, title="Coiled Batch"))
|
|
837
961
|
|
|
962
|
+
if kwargs.get("wait") and cluster.cluster_id:
|
|
963
|
+
batch_job_wait(
|
|
964
|
+
cluster_id=cluster.cluster_id,
|
|
965
|
+
workspace=job_spec["workspace"],
|
|
966
|
+
download=kwargs.get("local_download_path") or kwargs.get("local_sync_path"),
|
|
967
|
+
)
|
|
968
|
+
|
|
838
969
|
return {"cluster_id": cluster.cluster_id, "cluster_name": cluster.name, "job_id": job_id}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def load_sidecar_spec(spec_path: str | None):
|
|
9
|
+
scheduler_sidecars = []
|
|
10
|
+
if spec_path:
|
|
11
|
+
with open(spec_path) as f:
|
|
12
|
+
if spec_path.endswith((".yaml", ".yml")):
|
|
13
|
+
sidecar_spec = yaml.safe_load(f)
|
|
14
|
+
elif spec_path.endswith(".json"):
|
|
15
|
+
sidecar_spec = json.load(f)
|
|
16
|
+
else:
|
|
17
|
+
raise ValueError(f"Unknown format for {spec_path}, json or yaml expected.")
|
|
18
|
+
|
|
19
|
+
# support either list-like or dict-like
|
|
20
|
+
if isinstance(sidecar_spec, list):
|
|
21
|
+
scheduler_sidecars = sidecar_spec
|
|
22
|
+
if isinstance(sidecar_spec, dict):
|
|
23
|
+
scheduler_sidecars = [{"name": key, **val} for key, val in sidecar_spec.items()]
|
|
24
|
+
|
|
25
|
+
for sidecar in scheduler_sidecars:
|
|
26
|
+
# allow `image` as the key, to match docker compose spec
|
|
27
|
+
sidecar["container"] = sidecar.get("container") or sidecar.get("image")
|
|
28
|
+
return scheduler_sidecars
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import itertools
|
|
4
|
+
import time
|
|
5
|
+
from collections import Counter
|
|
6
|
+
|
|
7
|
+
import click
|
|
8
|
+
from rich.align import Align
|
|
9
|
+
from rich.console import Console, Group
|
|
10
|
+
from rich.progress import BarColumn, TextColumn
|
|
11
|
+
from rich.status import Status
|
|
12
|
+
|
|
13
|
+
import coiled
|
|
14
|
+
|
|
15
|
+
from ..cluster.utils import find_cluster
|
|
16
|
+
from ..utils import CONTEXT_SETTINGS
|
|
17
|
+
|
|
18
|
+
console = Console(width=80)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@click.command(context_settings=CONTEXT_SETTINGS)
|
|
22
|
+
@click.argument("cluster", default="", required=False)
|
|
23
|
+
@click.option(
|
|
24
|
+
"--workspace",
|
|
25
|
+
default=None,
|
|
26
|
+
help="Coiled workspace (uses default workspace if not specified).",
|
|
27
|
+
)
|
|
28
|
+
@click.option(
|
|
29
|
+
"--download",
|
|
30
|
+
default=None,
|
|
31
|
+
)
|
|
32
|
+
def batch_wait_cli(
|
|
33
|
+
cluster: str,
|
|
34
|
+
workspace: str | None,
|
|
35
|
+
download: str | None,
|
|
36
|
+
):
|
|
37
|
+
"""Monitor the progress of a Coiled Batch job."""
|
|
38
|
+
with coiled.Cloud(workspace=workspace) as cloud:
|
|
39
|
+
cluster_info = find_cluster(cloud, cluster)
|
|
40
|
+
cluster_id = cluster_info["id"]
|
|
41
|
+
jobs = coiled.batch.status(cluster=cluster_id, workspace=workspace)
|
|
42
|
+
if not jobs:
|
|
43
|
+
print(f"No batch jobs for cluster {cluster_id}")
|
|
44
|
+
return
|
|
45
|
+
batch_job_wait(cluster_id=cluster_id, workspace=workspace, download=download)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def batch_job_wait(cluster_id: int, workspace: str | None, download: str | None):
|
|
49
|
+
cluster_errored = False
|
|
50
|
+
with coiled.utils.SimpleRichProgressPanel(
|
|
51
|
+
TextColumn("[progress.description]{task.description}"),
|
|
52
|
+
BarColumn(complete_style="progress.remaining"),
|
|
53
|
+
TextColumn("[progress.percentage]{task.completed}"),
|
|
54
|
+
console=console,
|
|
55
|
+
batch_title="",
|
|
56
|
+
) as progress:
|
|
57
|
+
progress_tasks = {
|
|
58
|
+
"pending": progress.add_task("[yellow]Pending"),
|
|
59
|
+
"running": progress.add_task("[green]Processing"),
|
|
60
|
+
"done": progress.add_task("[blue]Done"),
|
|
61
|
+
"error": progress.add_task("[red]Error"),
|
|
62
|
+
}
|
|
63
|
+
done = False
|
|
64
|
+
while not done:
|
|
65
|
+
jobs = coiled.batch.status(cluster=cluster_id, workspace=workspace)
|
|
66
|
+
progress.batch_title = format_batch_title(jobs) # type: ignore
|
|
67
|
+
tasks = list(itertools.chain.from_iterable(job["tasks"] for job in jobs))
|
|
68
|
+
states = [task["state"] if not task["exit_code"] else "error" for task in tasks]
|
|
69
|
+
counts = Counter(states)
|
|
70
|
+
for state, task in progress_tasks.items():
|
|
71
|
+
# show both "running" and "assigned" as "Processing..."
|
|
72
|
+
state_to_show = "running" if state == "assigned" else state
|
|
73
|
+
|
|
74
|
+
progress.update(task, total=len(tasks), completed=counts[state_to_show], refresh=True)
|
|
75
|
+
|
|
76
|
+
all_tasks_completed = (counts["done"] + counts["error"]) == len(tasks)
|
|
77
|
+
cluster_errored = jobs[0]["cluster_state"] == "error"
|
|
78
|
+
done = all_tasks_completed or cluster_errored
|
|
79
|
+
if not done:
|
|
80
|
+
time.sleep(2)
|
|
81
|
+
|
|
82
|
+
if download and not cluster_errored:
|
|
83
|
+
attachments = coiled.filestore.wait_until_complete(cluster_id)
|
|
84
|
+
|
|
85
|
+
if not attachments:
|
|
86
|
+
print("No filestores found for this job")
|
|
87
|
+
return
|
|
88
|
+
|
|
89
|
+
for attachment in attachments:
|
|
90
|
+
if attachment["output"] and attachment["primary"]:
|
|
91
|
+
coiled.filestore.download_from_filestore_with_ui(
|
|
92
|
+
fs=attachment["filestore"],
|
|
93
|
+
into=download,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
coiled.filestore.clear_filestores_with_ui([a["filestore"] for a in attachments if a["primary"]])
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def format_batch_title(jobs):
|
|
100
|
+
cluster_id = jobs[0]["cluster_id"]
|
|
101
|
+
cluster_state = jobs[0]["cluster_state"]
|
|
102
|
+
user_command = jobs[0]["user_command"]
|
|
103
|
+
return Group(
|
|
104
|
+
Align.center(
|
|
105
|
+
Status(f"Monitoring jobs for cluster {cluster_id} ([bold]{cluster_state}[/bold])", spinner="dots")
|
|
106
|
+
),
|
|
107
|
+
Align.center(f"[bold]Command:[/bold] [green]{user_command}[/green]"),
|
|
108
|
+
)
|
|
@@ -7,8 +7,10 @@ from .config import config
|
|
|
7
7
|
from .curl import curl
|
|
8
8
|
from .diagnostics import diagnostics
|
|
9
9
|
from .env import env
|
|
10
|
+
from .file import file_group
|
|
10
11
|
from .hello import hello
|
|
11
12
|
from .login import login
|
|
13
|
+
from .mpi import mpi_group
|
|
12
14
|
from .notebook import notebook_group
|
|
13
15
|
from .package_sync import package_sync
|
|
14
16
|
from .prefect import prefect
|
|
@@ -40,3 +42,5 @@ cli.add_command(batch_group)
|
|
|
40
42
|
cli.add_command(better_logs, "logs")
|
|
41
43
|
cli.add_command(hello)
|
|
42
44
|
cli.add_command(hello, "quickstart")
|
|
45
|
+
cli.add_command(file_group)
|
|
46
|
+
cli.add_command(mpi_group, "mpi")
|