coiled 1.111.4.dev5__tar.gz → 1.128.3.dev21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of coiled might be problematic. Click here for more details.
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/PKG-INFO +2 -3
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/batch.py +19 -1
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/capture_environment.py +48 -75
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/batch/run.py +174 -21
- coiled-1.128.3.dev21/coiled/cli/batch/util.py +28 -0
- coiled-1.128.3.dev21/coiled/cli/batch/wait.py +108 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/core.py +4 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/curl.py +7 -2
- coiled-1.128.3.dev21/coiled/cli/file.py +116 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/hello.py +6 -5
- coiled-1.128.3.dev21/coiled/cli/mpi.py +252 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/notebook/notebook.py +33 -2
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/package_sync.py +2 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/run.py +94 -26
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/aws.py +48 -12
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/context.py +2 -2
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/credentials/google.py +1 -20
- coiled-1.128.3.dev21/coiled/filestore.py +458 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/plugins.py +3 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/pypi_conda_map.py +14 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/scan.py +113 -83
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/software_utils.py +158 -15
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/spans.py +2 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/types.py +22 -1
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/utils.py +115 -7
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/cluster.py +90 -38
- coiled-1.128.3.dev21/coiled/v2/cluster_comms.py +72 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/core.py +31 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/pyproject.toml +60 -66
- coiled-1.111.4.dev5/coiled/cli/batch/wait.py +0 -98
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/.gitignore +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/LICENSE +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/README.md +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/__main__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/analytics.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/auth.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/batch/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/batch/list.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/batch/logs.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/batch/status.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/azure_logs.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/better_logs.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/crud.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/get_address.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/list.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/logs.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/metrics.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/ssh.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/cluster/utils.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/config.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/diagnostics.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/env.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/examples/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/examples/exit.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/examples/hello_world.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/examples/nyc_parquet.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/examples/pytorch.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/examples/xarray_nwm.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/scripts/fill_ipython.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/scripts/nyc_parquet.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/scripts/pytorch.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/scripts/xarray_nwm.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/hello/utils.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/login.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/notebook/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/prefect.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/prefect_serve.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/amp.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/azure.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/entry.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/gcp.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/prometheus.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/setup/util.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/sync.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cli/utils.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/cluster.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/coiled.yaml +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/compatibility.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/config.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/core.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/credentials/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/credentials/aws.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/errors.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/exceptions.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/extensions/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/extensions/prefect/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/extensions/prefect/runners.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/extensions/prefect/workers.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/function.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/prefect.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/software.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/spark.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/cwi_log_link.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/states.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/widgets/__init__.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/widgets/interface.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/widgets/rich.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/v2/widgets/util.py +0 -0
- {coiled-1.111.4.dev5 → coiled-1.128.3.dev21}/coiled/websockets.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: coiled
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.128.3.dev21
|
|
4
4
|
Summary: Python client for coiled.io dask clusters
|
|
5
5
|
Project-URL: Homepage, https://coiled.io
|
|
6
6
|
Maintainer-email: Coiled <info@coiled.io>
|
|
@@ -25,10 +25,9 @@ Requires-Dist: jsondiff
|
|
|
25
25
|
Requires-Dist: packaging
|
|
26
26
|
Requires-Dist: paramiko>=2.4
|
|
27
27
|
Requires-Dist: pip-requirements-parser
|
|
28
|
-
Requires-Dist: pip>=
|
|
28
|
+
Requires-Dist: pip>=20.2
|
|
29
29
|
Requires-Dist: prometheus-client
|
|
30
30
|
Requires-Dist: rich>=11.2.0
|
|
31
|
-
Requires-Dist: setuptools>=49.3.0
|
|
32
31
|
Requires-Dist: toml
|
|
33
32
|
Requires-Dist: typing-extensions
|
|
34
33
|
Requires-Dist: wheel
|
|
@@ -17,6 +17,8 @@ def run(
|
|
|
17
17
|
workspace: str | None = None,
|
|
18
18
|
software: str | None = None,
|
|
19
19
|
container: str | None = None,
|
|
20
|
+
run_on_host: bool | None = None,
|
|
21
|
+
cluster_kwargs: dict | None = None,
|
|
20
22
|
env: list | dict | None = None,
|
|
21
23
|
secret_env: list | dict | None = None,
|
|
22
24
|
tag: list | dict | None = None,
|
|
@@ -45,7 +47,12 @@ def run(
|
|
|
45
47
|
forward_aws_credentials: bool | None = None,
|
|
46
48
|
package_sync_strict: bool = False,
|
|
47
49
|
package_sync_conda_extras: list | None = None,
|
|
50
|
+
package_sync_ignore: list[str] | None = None,
|
|
51
|
+
local_upload_path: str | None = None,
|
|
52
|
+
buffers_to_upload: list[dict] | None = None,
|
|
48
53
|
host_setup_script: str | None = None,
|
|
54
|
+
host_setup_script_content: str | None = None,
|
|
55
|
+
command_as_script: bool | None = None,
|
|
49
56
|
ignore_container_entrypoint: bool | None = None,
|
|
50
57
|
job_timeout: str | None = None,
|
|
51
58
|
logger=None,
|
|
@@ -60,8 +67,12 @@ def run(
|
|
|
60
67
|
takes a list of dictionaries, so you can specify multiple environment variables for each task.
|
|
61
68
|
For example, ``[{"FOO": 1, "BAR": 2}, {"FOO": 3, "BAR": 4}]`` will pass ``FOO=1 BAR=2`` to one task and
|
|
62
69
|
``FOO=3 BAR=4`` to another.
|
|
70
|
+
buffers_to_upload
|
|
71
|
+
takes a list of dictionaries, each should have path where file should be written on VM(s)
|
|
72
|
+
relative to working directory, and ``io.BytesIO`` which provides content of file,
|
|
73
|
+
for example ``[{"relative_path": "hello.txt", "buffer": io.BytesIO(b"hello")}]``.
|
|
63
74
|
"""
|
|
64
|
-
if isinstance(command, str):
|
|
75
|
+
if isinstance(command, str) and not command.startswith("#!") and not command_as_script:
|
|
65
76
|
command = shlex.split(command)
|
|
66
77
|
|
|
67
78
|
env = dict_to_key_val_list(env)
|
|
@@ -75,6 +86,8 @@ def run(
|
|
|
75
86
|
workspace=workspace,
|
|
76
87
|
software=software,
|
|
77
88
|
container=container,
|
|
89
|
+
run_on_host=run_on_host,
|
|
90
|
+
cluster_kwargs=cluster_kwargs,
|
|
78
91
|
env=env,
|
|
79
92
|
secret_env=secret_env,
|
|
80
93
|
tag=tag,
|
|
@@ -104,7 +117,12 @@ def run(
|
|
|
104
117
|
forward_aws_credentials=forward_aws_credentials,
|
|
105
118
|
package_sync_strict=package_sync_strict,
|
|
106
119
|
package_sync_conda_extras=package_sync_conda_extras,
|
|
120
|
+
package_sync_ignore=package_sync_ignore,
|
|
121
|
+
local_upload_path=local_upload_path,
|
|
122
|
+
buffers_to_upload=buffers_to_upload,
|
|
107
123
|
host_setup_script=host_setup_script,
|
|
124
|
+
host_setup_script_content=host_setup_script_content,
|
|
125
|
+
command_as_script=command_as_script,
|
|
108
126
|
ignore_container_entrypoint=ignore_container_entrypoint,
|
|
109
127
|
job_timeout=job_timeout,
|
|
110
128
|
logger=logger,
|
|
@@ -1,6 +1,4 @@
|
|
|
1
|
-
import asyncio
|
|
2
1
|
import contextlib
|
|
3
|
-
import logging
|
|
4
2
|
import platform
|
|
5
3
|
import sys
|
|
6
4
|
import typing
|
|
@@ -55,6 +53,7 @@ async def default_python() -> PackageInfo:
|
|
|
55
53
|
"conda_name": "python",
|
|
56
54
|
"version": python_version,
|
|
57
55
|
"wheel_target": None,
|
|
56
|
+
"requested": True,
|
|
58
57
|
}
|
|
59
58
|
|
|
60
59
|
|
|
@@ -68,47 +67,52 @@ async def approximate_packages(
|
|
|
68
67
|
architecture: ArchitectureTypesEnum = ArchitectureTypesEnum.X86_64,
|
|
69
68
|
pip_check_errors: Optional[Dict[str, List[str]]] = None,
|
|
70
69
|
gpu_enabled: bool = False,
|
|
70
|
+
use_uv_installer: bool = True,
|
|
71
71
|
) -> typing.List[ResolvedPackageInfo]:
|
|
72
72
|
user_conda_installed_python = next((p for p in packages if p["name"] == "python"), None)
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
if not user_conda_installed_pip:
|
|
78
|
-
# This means pip was installed by pip, or the system
|
|
79
|
-
# package manager
|
|
80
|
-
# Insert a conda version of pip to be installed first, it will
|
|
81
|
-
# then be used to install the users version of pip
|
|
82
|
-
pip = next(
|
|
83
|
-
(p for p in packages if p["name"] == "pip" and p["source"] == "pip"),
|
|
73
|
+
# Only add pip if we need it
|
|
74
|
+
if not use_uv_installer:
|
|
75
|
+
user_conda_installed_pip = next(
|
|
76
|
+
(i for i, p in enumerate(packages) if p["name"] == "pip" and p["source"] == "conda"),
|
|
84
77
|
None,
|
|
85
78
|
)
|
|
86
|
-
if not
|
|
87
|
-
#
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
"
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
79
|
+
if not user_conda_installed_pip:
|
|
80
|
+
# This means pip was installed by pip, or the system
|
|
81
|
+
# package manager
|
|
82
|
+
# Insert a conda version of pip to be installed first, it will
|
|
83
|
+
# then be used to install the users version of pip
|
|
84
|
+
pip = next(
|
|
85
|
+
(p for p in packages if p["name"] == "pip" and p["source"] == "pip"),
|
|
86
|
+
None,
|
|
87
|
+
)
|
|
88
|
+
if not pip:
|
|
89
|
+
# insert a modern version and hope it does not introduce conflicts
|
|
90
|
+
packages.append({
|
|
91
|
+
"name": "pip",
|
|
92
|
+
"path": None,
|
|
93
|
+
"source": "conda",
|
|
94
|
+
"channel_url": "https://conda.anaconda.org/conda-forge/",
|
|
95
|
+
"channel": "conda-forge",
|
|
96
|
+
"subdir": "noarch",
|
|
97
|
+
"conda_name": "pip",
|
|
98
|
+
"version": "22.3.1",
|
|
99
|
+
"wheel_target": None,
|
|
100
|
+
"requested": False,
|
|
101
|
+
})
|
|
102
|
+
else:
|
|
103
|
+
# insert the users pip version and hope it exists on conda-forge
|
|
104
|
+
packages.append({
|
|
105
|
+
"name": "pip",
|
|
106
|
+
"path": None,
|
|
107
|
+
"source": "conda",
|
|
108
|
+
"channel_url": "https://conda.anaconda.org/conda-forge/",
|
|
109
|
+
"channel": "conda-forge",
|
|
110
|
+
"subdir": "noarch",
|
|
111
|
+
"conda_name": "pip",
|
|
112
|
+
"version": pip["version"],
|
|
113
|
+
"wheel_target": None,
|
|
114
|
+
"requested": True,
|
|
115
|
+
})
|
|
112
116
|
coiled_selected_python = None
|
|
113
117
|
if not user_conda_installed_python:
|
|
114
118
|
# insert a special python package
|
|
@@ -151,6 +155,7 @@ async def approximate_packages(
|
|
|
151
155
|
"conda_name": pkg["conda_name"],
|
|
152
156
|
"version": pkg["version"],
|
|
153
157
|
"wheel_target": pkg["wheel_target"],
|
|
158
|
+
"requested": pkg["requested"],
|
|
154
159
|
}
|
|
155
160
|
# Send all packages to backend to help with debugging
|
|
156
161
|
for pkg in packages + local_python_code + local_python_wheel_packages + ignored_packages
|
|
@@ -204,6 +209,7 @@ async def create_environment_approximation(
|
|
|
204
209
|
progress: Optional[Progress] = None,
|
|
205
210
|
architecture: ArchitectureTypesEnum = ArchitectureTypesEnum.X86_64,
|
|
206
211
|
gpu_enabled: bool = False,
|
|
212
|
+
use_uv_installer: bool = True,
|
|
207
213
|
) -> typing.List[ResolvedPackageInfo]:
|
|
208
214
|
packages = await scan_prefix(progress=progress)
|
|
209
215
|
pip_check_errors = await check_pip_happy(progress)
|
|
@@ -220,6 +226,7 @@ async def create_environment_approximation(
|
|
|
220
226
|
"conda_name": conda_extra,
|
|
221
227
|
"version": "",
|
|
222
228
|
"wheel_target": None,
|
|
229
|
+
"requested": True,
|
|
223
230
|
}
|
|
224
231
|
for conda_extra in (conda_extras or [])
|
|
225
232
|
]
|
|
@@ -232,6 +239,7 @@ async def create_environment_approximation(
|
|
|
232
239
|
architecture=architecture,
|
|
233
240
|
pip_check_errors=pip_check_errors,
|
|
234
241
|
gpu_enabled=gpu_enabled,
|
|
242
|
+
use_uv_installer=use_uv_installer,
|
|
235
243
|
)
|
|
236
244
|
return result
|
|
237
245
|
|
|
@@ -301,6 +309,7 @@ async def scan_and_create(
|
|
|
301
309
|
architecture=architecture,
|
|
302
310
|
gpu_enabled=gpu_enabled,
|
|
303
311
|
conda_extras=package_sync_conda_extras,
|
|
312
|
+
use_uv_installer=use_uv_installer,
|
|
304
313
|
)
|
|
305
314
|
|
|
306
315
|
if not package_sync_only:
|
|
@@ -422,39 +431,3 @@ If you use pip, venv, uv, pixi, etc. create a new environment and then:
|
|
|
422
431
|
|
|
423
432
|
See https://docs.coiled.io/user_guide/software/package_sync_best_practices.html
|
|
424
433
|
for more best practices. If that doesn't solve your issue, please contact support@coiled.io.""")
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
if __name__ == "__main__":
|
|
428
|
-
from logging import basicConfig
|
|
429
|
-
|
|
430
|
-
basicConfig(level=logging.INFO)
|
|
431
|
-
|
|
432
|
-
from rich.console import Console
|
|
433
|
-
from rich.table import Table
|
|
434
|
-
|
|
435
|
-
async def run():
|
|
436
|
-
async with CloudV2(asynchronous=True) as cloud:
|
|
437
|
-
return await create_environment_approximation(
|
|
438
|
-
cloud=cloud,
|
|
439
|
-
priorities={
|
|
440
|
-
("dask", "conda"): PackageLevelEnum.CRITICAL,
|
|
441
|
-
("twisted", "conda"): PackageLevelEnum.IGNORE,
|
|
442
|
-
("graphviz", "conda"): PackageLevelEnum.LOOSE,
|
|
443
|
-
("icu", "conda"): PackageLevelEnum.LOOSE,
|
|
444
|
-
},
|
|
445
|
-
)
|
|
446
|
-
|
|
447
|
-
result = asyncio.run(run())
|
|
448
|
-
|
|
449
|
-
table = Table(title="Packages")
|
|
450
|
-
keys = ("name", "source", "include", "client_version", "specifier", "error", "note")
|
|
451
|
-
|
|
452
|
-
for key in keys:
|
|
453
|
-
table.add_column(key)
|
|
454
|
-
|
|
455
|
-
for pkg in result:
|
|
456
|
-
row_values = [str(pkg.get(key, "")) for key in keys]
|
|
457
|
-
table.add_row(*row_values)
|
|
458
|
-
console = Console()
|
|
459
|
-
console.print(table)
|
|
460
|
-
console.print(table)
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import datetime
|
|
4
|
+
import gzip
|
|
5
|
+
import json
|
|
4
6
|
import logging
|
|
5
7
|
import os
|
|
6
8
|
import re
|
|
@@ -8,15 +10,18 @@ import shlex
|
|
|
8
10
|
|
|
9
11
|
import click
|
|
10
12
|
import dask.config
|
|
11
|
-
from dask.utils import format_time, parse_timedelta
|
|
13
|
+
from dask.utils import format_bytes, format_time, parse_timedelta
|
|
12
14
|
from rich.console import Console
|
|
13
15
|
from rich.panel import Panel
|
|
14
16
|
|
|
15
17
|
import coiled
|
|
18
|
+
from coiled.cli.batch.util import load_sidecar_spec
|
|
19
|
+
from coiled.cli.batch.wait import batch_job_wait
|
|
16
20
|
from coiled.cli.curl import sync_request
|
|
17
21
|
from coiled.cli.run import dict_from_key_val_list
|
|
18
22
|
from coiled.cli.utils import CONTEXT_SETTINGS, fix_path_for_upload
|
|
19
23
|
from coiled.credentials.aws import get_aws_local_session_token
|
|
24
|
+
from coiled.filestore import FilestoreManager, upload_to_filestore_with_ui
|
|
20
25
|
from coiled.utils import COILED_LOGGER_NAME, error_info_for_tracking, supress_logs
|
|
21
26
|
|
|
22
27
|
console = Console(width=80)
|
|
@@ -101,7 +106,7 @@ def handle_possible_implicit_file(implicit_file):
|
|
|
101
106
|
"remote_path": f"/scratch/{remote_rel_dir}{remote_base}",
|
|
102
107
|
"content": file_content,
|
|
103
108
|
}
|
|
104
|
-
elif any(implicit_file.endswith(t) for t in UPLOAD_FILE_TYPES):
|
|
109
|
+
elif any(implicit_file.endswith(t) and "$" not in implicit_file for t in UPLOAD_FILE_TYPES):
|
|
105
110
|
console.print(
|
|
106
111
|
f"[orange1]WARNING:[/orange1] {implicit_file} appears to be a filename, "
|
|
107
112
|
"but this file not found locally and will not be copied to VMs",
|
|
@@ -191,6 +196,7 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
191
196
|
"default is to use the entrypoint (if any) set on the image."
|
|
192
197
|
),
|
|
193
198
|
)
|
|
199
|
+
@click.option("--run-on-host", default=None, help="Run code directly on host, not inside docker container.")
|
|
194
200
|
@click.option(
|
|
195
201
|
"--env",
|
|
196
202
|
"-e",
|
|
@@ -265,7 +271,11 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
265
271
|
"--spot-policy",
|
|
266
272
|
default=None,
|
|
267
273
|
type=click.Choice(["on-demand", "spot", "spot_with_fallback"]),
|
|
268
|
-
help=
|
|
274
|
+
help=(
|
|
275
|
+
"Default is on-demand; allows using spot VMs, or spot VMs as available "
|
|
276
|
+
"with on-demand as a fallback. Only applies to workers (scheduler VM is "
|
|
277
|
+
"always on-demand)."
|
|
278
|
+
),
|
|
269
279
|
)
|
|
270
280
|
@click.option(
|
|
271
281
|
"--allow-cross-zone/--no-cross-zone",
|
|
@@ -329,6 +339,50 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
329
339
|
"into individual values. By default this is ',' for ``--map-over-values`` and newline for ``--map-over-file``."
|
|
330
340
|
),
|
|
331
341
|
)
|
|
342
|
+
@click.option("--wait", default=None, is_flag=True)
|
|
343
|
+
@click.option(
|
|
344
|
+
"--upload",
|
|
345
|
+
"local_upload_path",
|
|
346
|
+
default=None,
|
|
347
|
+
type=str,
|
|
348
|
+
help=(
|
|
349
|
+
"File or directory to upload to cloud storage and download onto the VM(s). "
|
|
350
|
+
"By default files will be copied into the working directory on VM where your batch script runs."
|
|
351
|
+
),
|
|
352
|
+
)
|
|
353
|
+
@click.option(
|
|
354
|
+
"--download",
|
|
355
|
+
"local_download_path",
|
|
356
|
+
default=None,
|
|
357
|
+
type=str,
|
|
358
|
+
help=(
|
|
359
|
+
"When used with ``--wait``, output files from job will be downloaded into this local directory "
|
|
360
|
+
"when job is complete. When used without ``--wait``, files won't be automatically downloaded, "
|
|
361
|
+
"but job will be configured to store result files in cloud storage for later download."
|
|
362
|
+
),
|
|
363
|
+
)
|
|
364
|
+
@click.option(
|
|
365
|
+
"--sync",
|
|
366
|
+
"local_sync_path",
|
|
367
|
+
default=None,
|
|
368
|
+
type=str,
|
|
369
|
+
help="Equivalent to specifying both ``--upload`` and ``--download`` with the same local directory.",
|
|
370
|
+
)
|
|
371
|
+
@click.option(
|
|
372
|
+
"--pipe-to-files",
|
|
373
|
+
default=None,
|
|
374
|
+
is_flag=True,
|
|
375
|
+
help=(
|
|
376
|
+
"Write stdout and stderr from each task to files which can be downloaded when job is complete. "
|
|
377
|
+
"This is in addition to sending stdout and stderr to logs, and is more convenient than logs for when "
|
|
378
|
+
"you want to use outputs from tasks as inputs to further processing)."
|
|
379
|
+
),
|
|
380
|
+
)
|
|
381
|
+
@click.option("--input-filestore", default=None, type=str, help="Name of input filestore")
|
|
382
|
+
@click.option("--output-filestore", default=None, type=str, help="Name of output filestore")
|
|
383
|
+
@click.option(
|
|
384
|
+
"--scheduler-sidecar-spec", default=None, type=str, help="Filename for scheduler sidecar spec (yaml or json)"
|
|
385
|
+
)
|
|
332
386
|
@click.option(
|
|
333
387
|
"--ntasks",
|
|
334
388
|
"--n-tasks",
|
|
@@ -396,6 +450,15 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
396
450
|
"environment that are not in your local environment."
|
|
397
451
|
),
|
|
398
452
|
)
|
|
453
|
+
@click.option(
|
|
454
|
+
"--package-sync-ignore",
|
|
455
|
+
default=None,
|
|
456
|
+
multiple=True,
|
|
457
|
+
help=(
|
|
458
|
+
"A list of package names to exclude from the environment. Note their dependencies may still be installed,"
|
|
459
|
+
"or they may be installed by another package that depends on them!"
|
|
460
|
+
),
|
|
461
|
+
)
|
|
399
462
|
@click.option(
|
|
400
463
|
"--host-setup-script",
|
|
401
464
|
default=None,
|
|
@@ -410,7 +473,8 @@ def get_kwargs_from_header(f: dict, click_params: list):
|
|
|
410
473
|
"For example, you can specify '30 minutes' or '1 hour'. Default is no timeout."
|
|
411
474
|
),
|
|
412
475
|
)
|
|
413
|
-
@click.
|
|
476
|
+
@click.option("--dask-container", default=None, type=str)
|
|
477
|
+
@click.argument("command", nargs=-1, required=True)
|
|
414
478
|
def batch_run_cli(ctx, **kwargs):
|
|
415
479
|
"""
|
|
416
480
|
Submit a batch job to run on Coiled.
|
|
@@ -439,6 +503,14 @@ def batch_run_cli(ctx, **kwargs):
|
|
|
439
503
|
|
|
440
504
|
def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
441
505
|
command = kwargs["command"]
|
|
506
|
+
user_files = []
|
|
507
|
+
|
|
508
|
+
if isinstance(command, str) and (command.startswith("#!") or kwargs.get("command_as_script")):
|
|
509
|
+
user_files.append({
|
|
510
|
+
"path": "script",
|
|
511
|
+
"content": command,
|
|
512
|
+
})
|
|
513
|
+
command = ["script"]
|
|
442
514
|
|
|
443
515
|
# Handle command as string case (e.g. `coiled batch run "python myscript.py"`)
|
|
444
516
|
if len(command) == 1:
|
|
@@ -460,7 +532,6 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
460
532
|
# unescape escaped COILED env vars in command
|
|
461
533
|
command = [part.replace("\\$COILED", "$COILED") for part in command]
|
|
462
534
|
|
|
463
|
-
user_files = []
|
|
464
535
|
kwargs_from_header = None
|
|
465
536
|
|
|
466
537
|
# identify implicit files referenced in commands like "python foo.py" or "foo.sh"
|
|
@@ -651,8 +722,8 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
651
722
|
if user_files_from_content:
|
|
652
723
|
user_files.extend(user_files_from_content)
|
|
653
724
|
|
|
654
|
-
host_setup_content =
|
|
655
|
-
if kwargs["host_setup_script"]:
|
|
725
|
+
host_setup_content = kwargs.get("host_setup_script_content")
|
|
726
|
+
if not host_setup_content and kwargs["host_setup_script"]:
|
|
656
727
|
with open(kwargs["host_setup_script"]) as f:
|
|
657
728
|
host_setup_content = f.read()
|
|
658
729
|
|
|
@@ -692,6 +763,12 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
692
763
|
|
|
693
764
|
batch_job_container = f"{kwargs['container']}!" if kwargs["ignore_container_entrypoint"] else kwargs["container"]
|
|
694
765
|
|
|
766
|
+
scheduler_sidecars = load_sidecar_spec(kwargs.get("scheduler_sidecar_spec"))
|
|
767
|
+
|
|
768
|
+
dask_container = (
|
|
769
|
+
kwargs.get("dask_container") or dask.config.get("coiled.batch.dask-container", None) or "ghcr.io/dask/dask"
|
|
770
|
+
)
|
|
771
|
+
|
|
695
772
|
cluster_kwargs = {
|
|
696
773
|
"name": kwargs["name"],
|
|
697
774
|
"workspace": kwargs["workspace"],
|
|
@@ -704,7 +781,9 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
704
781
|
# if batch job is running in extra container, then we just need a pretty minimal dask container
|
|
705
782
|
# so for now switch the default in that case to basic dask container
|
|
706
783
|
# TODO would it be better to use a pre-built senv with our `cloud-env-run` container instead?
|
|
707
|
-
"container":
|
|
784
|
+
"container": dask_container
|
|
785
|
+
if (kwargs["container"] or kwargs.get("run_on_host")) and not kwargs["software"]
|
|
786
|
+
else None,
|
|
708
787
|
"region": kwargs["region"],
|
|
709
788
|
"scheduler_options": {
|
|
710
789
|
"idle_timeout": "520 weeks", # TODO allow job timeout?
|
|
@@ -720,9 +799,12 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
720
799
|
"tags": {**tags, **{"coiled-cluster-type": "batch"}},
|
|
721
800
|
"allow_ssh_from": kwargs["allow_ssh_from"],
|
|
722
801
|
# "mount_bucket": mount_bucket,
|
|
723
|
-
"package_sync_strict": kwargs
|
|
724
|
-
"package_sync_conda_extras": kwargs
|
|
802
|
+
"package_sync_strict": kwargs.get("package_sync_strict"),
|
|
803
|
+
"package_sync_conda_extras": kwargs.get("package_sync_conda_extras"),
|
|
804
|
+
"package_sync_ignore": kwargs.get("package_sync_ignore"),
|
|
725
805
|
"allow_cross_zone": True if kwargs["allow_cross_zone"] is None else kwargs["allow_cross_zone"],
|
|
806
|
+
"scheduler_sidecars": scheduler_sidecars,
|
|
807
|
+
**(kwargs.get("cluster_kwargs") or {}),
|
|
726
808
|
}
|
|
727
809
|
|
|
728
810
|
# when task will run on scheduler, give it the same VM specs as worker node
|
|
@@ -742,7 +824,7 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
742
824
|
|
|
743
825
|
# Create a job
|
|
744
826
|
job_spec = {
|
|
745
|
-
"user_command":
|
|
827
|
+
"user_command": coiled.utils.join_command_parts(command),
|
|
746
828
|
"user_files": user_files,
|
|
747
829
|
**job_array_kwargs,
|
|
748
830
|
"scheduler_task_array": scheduler_task_ids,
|
|
@@ -755,25 +837,75 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
755
837
|
# Avoid possibly breaking prefect batch jobs
|
|
756
838
|
# https://github.com/coiled/platform/pull/8655#pullrequestreview-2826448869
|
|
757
839
|
"workdir": None if "flow-run-id" in tags else "/scratch/batch",
|
|
840
|
+
"pipe_to_files": bool(kwargs.get("pipe_to_files")),
|
|
758
841
|
"host_setup": host_setup_content,
|
|
759
842
|
"job_timeout_seconds": parse_timedelta(kwargs["job_timeout"]) if kwargs["job_timeout"] else None,
|
|
843
|
+
"run_in_container": not kwargs.get("run_on_host"),
|
|
760
844
|
}
|
|
761
845
|
|
|
762
846
|
with coiled.Cloud(workspace=kwargs["workspace"]) as cloud:
|
|
763
847
|
job_spec["workspace"] = cloud.default_workspace
|
|
764
848
|
|
|
765
|
-
|
|
849
|
+
compressed_data = gzip.compress(json.dumps(job_spec).encode())
|
|
850
|
+
if len(compressed_data) > 2_400_000:
|
|
851
|
+
raise ValueError(
|
|
852
|
+
f"Cannot submit job because data is too large "
|
|
853
|
+
f"({format_bytes(len(compressed_data))} is over 2.4 MiB limit)"
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
url = f"{cloud.server}/api/v2/jobs/compressed"
|
|
766
857
|
response = sync_request(
|
|
767
858
|
cloud=cloud,
|
|
768
859
|
url=url,
|
|
769
860
|
method="post",
|
|
770
|
-
data=
|
|
771
|
-
json=True,
|
|
861
|
+
data=compressed_data,
|
|
772
862
|
json_output=True,
|
|
773
863
|
)
|
|
774
864
|
|
|
775
865
|
job_id = response["id"]
|
|
776
866
|
|
|
867
|
+
filestores_to_attach = []
|
|
868
|
+
|
|
869
|
+
for sidecar in scheduler_sidecars:
|
|
870
|
+
for attachment in sidecar.get("filestores") or []:
|
|
871
|
+
filestores_to_attach.append({"worker": False, "input": True, "output": True, **attachment})
|
|
872
|
+
|
|
873
|
+
# only create and attach filestores if using upload or indicate desire to store results
|
|
874
|
+
if (
|
|
875
|
+
kwargs.get("local_upload_path")
|
|
876
|
+
or kwargs.get("local_sync_path")
|
|
877
|
+
or kwargs.get("local_download_path")
|
|
878
|
+
or kwargs.get("pipe_to_files")
|
|
879
|
+
or kwargs.get("input_filestore")
|
|
880
|
+
or kwargs.get("output_filestore")
|
|
881
|
+
or kwargs.get("buffers_to_upload")
|
|
882
|
+
):
|
|
883
|
+
fs_base_name = kwargs["name"] or f"batch-job-{job_id}"
|
|
884
|
+
|
|
885
|
+
in_fs_name = kwargs.get("input_filestore") or f"{fs_base_name}-input"
|
|
886
|
+
out_fs_name = kwargs.get("output_filestore") or f"{fs_base_name}-output"
|
|
887
|
+
|
|
888
|
+
filestores = FilestoreManager.get_or_create_filestores(
|
|
889
|
+
names=[in_fs_name, out_fs_name],
|
|
890
|
+
workspace=job_spec["workspace"],
|
|
891
|
+
region=kwargs["region"],
|
|
892
|
+
)
|
|
893
|
+
|
|
894
|
+
in_fs = filestores[0]
|
|
895
|
+
out_fs = filestores[1]
|
|
896
|
+
|
|
897
|
+
filestores_to_attach.extend([
|
|
898
|
+
{"id": in_fs["id"], "input": True, "path": "/scratch/batch/", "primary": True},
|
|
899
|
+
{"id": out_fs["id"], "output": True, "path": "/scratch/batch/", "primary": True},
|
|
900
|
+
])
|
|
901
|
+
|
|
902
|
+
if kwargs.get("local_upload_path") or kwargs.get("local_sync_path") or kwargs.get("buffers_to_upload"):
|
|
903
|
+
upload_to_filestore_with_ui(
|
|
904
|
+
fs=in_fs,
|
|
905
|
+
local_dir=kwargs.get("local_upload_path") or kwargs.get("local_sync_path"),
|
|
906
|
+
file_buffers=kwargs.get("buffers_to_upload"),
|
|
907
|
+
)
|
|
908
|
+
|
|
777
909
|
# Run the job on a cluster
|
|
778
910
|
with supress_logs([COILED_LOGGER_NAME], level=logging.WARNING):
|
|
779
911
|
cluster = coiled.Cluster(
|
|
@@ -782,9 +914,16 @@ def _batch_run(default_kwargs, logger=None, from_cli=False, **kwargs) -> dict:
|
|
|
782
914
|
**cluster_kwargs,
|
|
783
915
|
)
|
|
784
916
|
|
|
917
|
+
# TODO support for attaching as part of create request
|
|
918
|
+
if filestores_to_attach:
|
|
919
|
+
FilestoreManager.attach_filestores_to_cluster(
|
|
920
|
+
cluster_id=cluster.cluster_id,
|
|
921
|
+
attachments=filestores_to_attach,
|
|
922
|
+
)
|
|
923
|
+
|
|
785
924
|
if logger:
|
|
786
925
|
message = f"""
|
|
787
|
-
Command: {
|
|
926
|
+
Command: {coiled.utils.join_command_parts(command)}
|
|
788
927
|
Cluster ID: {cluster.cluster_id}
|
|
789
928
|
URL: {cluster.details_url}
|
|
790
929
|
Tasks: {n_tasks}
|
|
@@ -800,17 +939,31 @@ Tasks: {n_tasks}
|
|
|
800
939
|
status_command = f"{status_command} --workspace {kwargs['workspace']}"
|
|
801
940
|
else:
|
|
802
941
|
status_command = f"coiled.batch.status({cluster.cluster_id})"
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
[bold]Cluster ID[/]: [bright_blue]{cluster.cluster_id}[/]
|
|
806
|
-
[bold]URL[/]: [link][bright_blue]{cluster.details_url}[/bright_blue][/link]
|
|
807
|
-
[bold]Tasks[/]: [bright_blue]{n_tasks}[/]
|
|
942
|
+
track_status_message = (
|
|
943
|
+
f"""
|
|
808
944
|
|
|
809
945
|
To track progress run:
|
|
810
946
|
|
|
811
947
|
[green]{status_command}[/]
|
|
812
|
-
|
|
948
|
+
"""
|
|
949
|
+
if not kwargs.get("wait")
|
|
950
|
+
else ""
|
|
951
|
+
)
|
|
952
|
+
|
|
953
|
+
message = f"""
|
|
954
|
+
[bold]Command[/]: [bright_blue]{coiled.utils.join_command_parts(command)}[/]
|
|
955
|
+
[bold]Cluster ID[/]: [bright_blue]{cluster.cluster_id}[/]
|
|
956
|
+
[bold]URL[/]: [link][bright_blue]{cluster.details_url}[/bright_blue][/link]
|
|
957
|
+
[bold]Tasks[/]: [bright_blue]{n_tasks}[/]
|
|
958
|
+
{track_status_message}{extra_message}"""
|
|
813
959
|
|
|
814
960
|
console.print(Panel(message, title="Coiled Batch"))
|
|
815
961
|
|
|
962
|
+
if kwargs.get("wait") and cluster.cluster_id:
|
|
963
|
+
batch_job_wait(
|
|
964
|
+
cluster_id=cluster.cluster_id,
|
|
965
|
+
workspace=job_spec["workspace"],
|
|
966
|
+
download=kwargs.get("local_download_path") or kwargs.get("local_sync_path"),
|
|
967
|
+
)
|
|
968
|
+
|
|
816
969
|
return {"cluster_id": cluster.cluster_id, "cluster_name": cluster.name, "job_id": job_id}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
import yaml
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def load_sidecar_spec(spec_path: str | None):
|
|
9
|
+
scheduler_sidecars = []
|
|
10
|
+
if spec_path:
|
|
11
|
+
with open(spec_path) as f:
|
|
12
|
+
if spec_path.endswith((".yaml", ".yml")):
|
|
13
|
+
sidecar_spec = yaml.safe_load(f)
|
|
14
|
+
elif spec_path.endswith(".json"):
|
|
15
|
+
sidecar_spec = json.load(f)
|
|
16
|
+
else:
|
|
17
|
+
raise ValueError(f"Unknown format for {spec_path}, json or yaml expected.")
|
|
18
|
+
|
|
19
|
+
# support either list-like or dict-like
|
|
20
|
+
if isinstance(sidecar_spec, list):
|
|
21
|
+
scheduler_sidecars = sidecar_spec
|
|
22
|
+
if isinstance(sidecar_spec, dict):
|
|
23
|
+
scheduler_sidecars = [{"name": key, **val} for key, val in sidecar_spec.items()]
|
|
24
|
+
|
|
25
|
+
for sidecar in scheduler_sidecars:
|
|
26
|
+
# allow `image` as the key, to match docker compose spec
|
|
27
|
+
sidecar["container"] = sidecar.get("container") or sidecar.get("image")
|
|
28
|
+
return scheduler_sidecars
|