modal 0.62.115__py3-none-any.whl → 0.72.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modal/__init__.py +13 -9
- modal/__main__.py +41 -3
- modal/_clustered_functions.py +80 -0
- modal/_clustered_functions.pyi +22 -0
- modal/_container_entrypoint.py +402 -398
- modal/_ipython.py +3 -13
- modal/_location.py +17 -10
- modal/_output.py +243 -99
- modal/_pty.py +2 -2
- modal/_resolver.py +55 -60
- modal/_resources.py +26 -7
- modal/_runtime/__init__.py +1 -0
- modal/_runtime/asgi.py +519 -0
- modal/_runtime/container_io_manager.py +1025 -0
- modal/{execution_context.py → _runtime/execution_context.py} +11 -2
- modal/_runtime/telemetry.py +169 -0
- modal/_runtime/user_code_imports.py +356 -0
- modal/_serialization.py +123 -6
- modal/_traceback.py +47 -187
- modal/_tunnel.py +50 -14
- modal/_tunnel.pyi +19 -36
- modal/_utils/app_utils.py +3 -17
- modal/_utils/async_utils.py +386 -104
- modal/_utils/blob_utils.py +157 -186
- modal/_utils/bytes_io_segment_payload.py +97 -0
- modal/_utils/deprecation.py +89 -0
- modal/_utils/docker_utils.py +98 -0
- modal/_utils/function_utils.py +299 -98
- modal/_utils/grpc_testing.py +47 -34
- modal/_utils/grpc_utils.py +54 -21
- modal/_utils/hash_utils.py +51 -10
- modal/_utils/http_utils.py +39 -9
- modal/_utils/logger.py +2 -1
- modal/_utils/mount_utils.py +34 -16
- modal/_utils/name_utils.py +58 -0
- modal/_utils/package_utils.py +14 -1
- modal/_utils/pattern_utils.py +205 -0
- modal/_utils/rand_pb_testing.py +3 -3
- modal/_utils/shell_utils.py +15 -49
- modal/_vendor/a2wsgi_wsgi.py +62 -72
- modal/_vendor/cloudpickle.py +1 -1
- modal/_watcher.py +12 -10
- modal/app.py +561 -323
- modal/app.pyi +474 -262
- modal/call_graph.py +7 -6
- modal/cli/_download.py +22 -6
- modal/cli/_traceback.py +200 -0
- modal/cli/app.py +203 -42
- modal/cli/config.py +12 -5
- modal/cli/container.py +61 -13
- modal/cli/dict.py +128 -0
- modal/cli/entry_point.py +26 -13
- modal/cli/environment.py +40 -9
- modal/cli/import_refs.py +21 -48
- modal/cli/launch.py +28 -14
- modal/cli/network_file_system.py +57 -21
- modal/cli/profile.py +1 -1
- modal/cli/programs/run_jupyter.py +34 -9
- modal/cli/programs/vscode.py +58 -8
- modal/cli/queues.py +131 -0
- modal/cli/run.py +199 -96
- modal/cli/secret.py +5 -4
- modal/cli/token.py +7 -2
- modal/cli/utils.py +74 -8
- modal/cli/volume.py +97 -56
- modal/client.py +248 -144
- modal/client.pyi +156 -124
- modal/cloud_bucket_mount.py +43 -30
- modal/cloud_bucket_mount.pyi +32 -25
- modal/cls.py +528 -141
- modal/cls.pyi +189 -145
- modal/config.py +32 -15
- modal/container_process.py +177 -0
- modal/container_process.pyi +82 -0
- modal/dict.py +50 -54
- modal/dict.pyi +120 -164
- modal/environments.py +106 -5
- modal/environments.pyi +77 -25
- modal/exception.py +30 -43
- modal/experimental.py +62 -2
- modal/file_io.py +537 -0
- modal/file_io.pyi +235 -0
- modal/file_pattern_matcher.py +196 -0
- modal/functions.py +846 -428
- modal/functions.pyi +446 -387
- modal/gpu.py +57 -44
- modal/image.py +943 -417
- modal/image.pyi +584 -245
- modal/io_streams.py +434 -0
- modal/io_streams.pyi +122 -0
- modal/mount.py +223 -90
- modal/mount.pyi +241 -243
- modal/network_file_system.py +85 -86
- modal/network_file_system.pyi +151 -110
- modal/object.py +66 -36
- modal/object.pyi +166 -143
- modal/output.py +63 -0
- modal/parallel_map.py +73 -47
- modal/parallel_map.pyi +51 -63
- modal/partial_function.py +272 -107
- modal/partial_function.pyi +219 -120
- modal/proxy.py +15 -12
- modal/proxy.pyi +3 -8
- modal/queue.py +96 -72
- modal/queue.pyi +210 -135
- modal/requirements/2024.04.txt +2 -1
- modal/requirements/2024.10.txt +16 -0
- modal/requirements/README.md +21 -0
- modal/requirements/base-images.json +22 -0
- modal/retries.py +45 -4
- modal/runner.py +325 -203
- modal/runner.pyi +124 -110
- modal/running_app.py +27 -4
- modal/sandbox.py +509 -231
- modal/sandbox.pyi +396 -169
- modal/schedule.py +2 -2
- modal/scheduler_placement.py +20 -3
- modal/secret.py +41 -25
- modal/secret.pyi +62 -42
- modal/serving.py +39 -49
- modal/serving.pyi +37 -43
- modal/stream_type.py +15 -0
- modal/token_flow.py +5 -3
- modal/token_flow.pyi +37 -32
- modal/volume.py +123 -137
- modal/volume.pyi +228 -221
- {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/METADATA +5 -5
- modal-0.72.13.dist-info/RECORD +174 -0
- {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/top_level.txt +0 -1
- modal_docs/gen_reference_docs.py +3 -1
- modal_docs/mdmd/mdmd.py +0 -1
- modal_docs/mdmd/signatures.py +1 -2
- modal_global_objects/images/base_images.py +28 -0
- modal_global_objects/mounts/python_standalone.py +2 -2
- modal_proto/__init__.py +1 -1
- modal_proto/api.proto +1231 -531
- modal_proto/api_grpc.py +750 -430
- modal_proto/api_pb2.py +2102 -1176
- modal_proto/api_pb2.pyi +8859 -0
- modal_proto/api_pb2_grpc.py +1329 -675
- modal_proto/api_pb2_grpc.pyi +1416 -0
- modal_proto/modal_api_grpc.py +149 -0
- modal_proto/modal_options_grpc.py +3 -0
- modal_proto/options_pb2.pyi +20 -0
- modal_proto/options_pb2_grpc.pyi +7 -0
- modal_proto/py.typed +0 -0
- modal_version/__init__.py +1 -1
- modal_version/_version_generated.py +2 -2
- modal/_asgi.py +0 -370
- modal/_container_exec.py +0 -128
- modal/_container_io_manager.py +0 -646
- modal/_container_io_manager.pyi +0 -412
- modal/_sandbox_shell.py +0 -49
- modal/app_utils.py +0 -20
- modal/app_utils.pyi +0 -17
- modal/execution_context.pyi +0 -37
- modal/shared_volume.py +0 -23
- modal/shared_volume.pyi +0 -24
- modal-0.62.115.dist-info/RECORD +0 -207
- modal_global_objects/images/conda.py +0 -15
- modal_global_objects/images/debian_slim.py +0 -15
- modal_global_objects/images/micromamba.py +0 -15
- test/__init__.py +0 -1
- test/aio_test.py +0 -12
- test/async_utils_test.py +0 -279
- test/blob_test.py +0 -67
- test/cli_imports_test.py +0 -149
- test/cli_test.py +0 -674
- test/client_test.py +0 -203
- test/cloud_bucket_mount_test.py +0 -22
- test/cls_test.py +0 -636
- test/config_test.py +0 -149
- test/conftest.py +0 -1485
- test/container_app_test.py +0 -50
- test/container_test.py +0 -1405
- test/cpu_test.py +0 -23
- test/decorator_test.py +0 -85
- test/deprecation_test.py +0 -34
- test/dict_test.py +0 -51
- test/e2e_test.py +0 -68
- test/error_test.py +0 -7
- test/function_serialization_test.py +0 -32
- test/function_test.py +0 -791
- test/function_utils_test.py +0 -101
- test/gpu_test.py +0 -159
- test/grpc_utils_test.py +0 -82
- test/helpers.py +0 -47
- test/image_test.py +0 -814
- test/live_reload_test.py +0 -80
- test/lookup_test.py +0 -70
- test/mdmd_test.py +0 -329
- test/mount_test.py +0 -162
- test/mounted_files_test.py +0 -327
- test/network_file_system_test.py +0 -188
- test/notebook_test.py +0 -66
- test/object_test.py +0 -41
- test/package_utils_test.py +0 -25
- test/queue_test.py +0 -115
- test/resolver_test.py +0 -59
- test/retries_test.py +0 -67
- test/runner_test.py +0 -85
- test/sandbox_test.py +0 -191
- test/schedule_test.py +0 -15
- test/scheduler_placement_test.py +0 -57
- test/secret_test.py +0 -89
- test/serialization_test.py +0 -50
- test/stub_composition_test.py +0 -10
- test/stub_test.py +0 -361
- test/test_asgi_wrapper.py +0 -234
- test/token_flow_test.py +0 -18
- test/traceback_test.py +0 -135
- test/tunnel_test.py +0 -29
- test/utils_test.py +0 -88
- test/version_test.py +0 -14
- test/volume_test.py +0 -397
- test/watcher_test.py +0 -58
- test/webhook_test.py +0 -145
- {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/LICENSE +0 -0
- {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/WHEEL +0 -0
- {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/entry_points.txt +0 -0
modal/sandbox.py
CHANGED
@@ -1,13 +1,17 @@
|
|
1
1
|
# Copyright Modal Labs 2022
|
2
2
|
import asyncio
|
3
3
|
import os
|
4
|
-
from
|
4
|
+
from collections.abc import AsyncGenerator, Sequence
|
5
|
+
from typing import TYPE_CHECKING, AsyncIterator, Literal, Optional, Union, overload
|
6
|
+
|
7
|
+
if TYPE_CHECKING:
|
8
|
+
import _typeshed
|
5
9
|
|
6
10
|
from google.protobuf.message import Message
|
7
|
-
from grpclib
|
11
|
+
from grpclib import GRPCError, Status
|
8
12
|
|
13
|
+
from modal._tunnel import Tunnel
|
9
14
|
from modal.cloud_bucket_mount import _CloudBucketMount, cloud_bucket_mounts_to_proto
|
10
|
-
from modal.exception import InvalidError, SandboxTerminatedError, SandboxTimeoutError
|
11
15
|
from modal.volume import _Volume
|
12
16
|
from modal_proto import api_pb2
|
13
17
|
|
@@ -15,209 +19,30 @@ from ._location import parse_cloud_provider
|
|
15
19
|
from ._resolver import Resolver
|
16
20
|
from ._resources import convert_fn_config_to_resources_config
|
17
21
|
from ._utils.async_utils import synchronize_api
|
18
|
-
from ._utils.
|
19
|
-
from ._utils.
|
22
|
+
from ._utils.deprecation import deprecation_error
|
23
|
+
from ._utils.grpc_utils import retry_transient_errors
|
24
|
+
from ._utils.mount_utils import validate_network_file_systems, validate_volumes
|
20
25
|
from .client import _Client
|
21
26
|
from .config import config
|
27
|
+
from .container_process import _ContainerProcess
|
28
|
+
from .exception import ExecutionError, InvalidError, SandboxTerminatedError, SandboxTimeoutError
|
29
|
+
from .file_io import FileWatchEvent, FileWatchEventType, _FileIO
|
22
30
|
from .gpu import GPU_T
|
23
31
|
from .image import _Image
|
32
|
+
from .io_streams import StreamReader, StreamWriter, _StreamReader, _StreamWriter
|
24
33
|
from .mount import _Mount
|
25
34
|
from .network_file_system import _NetworkFileSystem, network_file_system_mount_protos
|
26
|
-
from .object import _Object
|
35
|
+
from .object import _get_environment_name, _Object
|
36
|
+
from .proxy import _Proxy
|
27
37
|
from .scheduler_placement import SchedulerPlacement
|
28
38
|
from .secret import _Secret
|
39
|
+
from .stream_type import StreamType
|
29
40
|
|
41
|
+
_default_image: _Image = _Image.debian_slim()
|
30
42
|
|
31
|
-
class _LogsReader:
|
32
|
-
"""Provides an interface to buffer and fetch logs from a sandbox stream (`stdout` or `stderr`).
|
33
|
-
|
34
|
-
As an asynchronous iterable, the object supports the async for statement.
|
35
|
-
|
36
|
-
**Usage**
|
37
|
-
|
38
|
-
```python
|
39
|
-
@app.function()
|
40
|
-
async def my_fn():
|
41
|
-
sandbox = app.spawn_sandbox(
|
42
|
-
"bash",
|
43
|
-
"-c",
|
44
|
-
"while true; do echo foo; sleep 1; done"
|
45
|
-
)
|
46
|
-
async for message in sandbox.stdout:
|
47
|
-
print(f"Message: {message}")
|
48
|
-
```
|
49
|
-
"""
|
50
|
-
|
51
|
-
def __init__(self, file_descriptor: int, sandbox_id: str, client: _Client) -> None:
|
52
|
-
"""mdmd:hidden"""
|
53
|
-
|
54
|
-
self._file_descriptor = file_descriptor
|
55
|
-
self._sandbox_id = sandbox_id
|
56
|
-
self._client = client
|
57
|
-
self._stream = None
|
58
|
-
self._last_log_batch_entry_id = ""
|
59
|
-
# Whether the reader received an EOF. Once EOF is True, it returns
|
60
|
-
# an empty string for any subsequent reads (including async for)
|
61
|
-
self.eof = False
|
62
|
-
|
63
|
-
async def read(self) -> str:
|
64
|
-
"""Fetch and return contents of the entire stream. If EOF was received,
|
65
|
-
return an empty string.
|
66
|
-
|
67
|
-
**Usage**
|
68
|
-
|
69
|
-
```python
|
70
|
-
sandbox = app.app.spawn_sandbox("echo", "hello")
|
71
|
-
sandbox.wait()
|
72
|
-
|
73
|
-
print(sandbox.stdout.read())
|
74
|
-
```
|
75
|
-
|
76
|
-
"""
|
77
|
-
data = ""
|
78
|
-
# TODO: maybe combine this with get_app_logs_loop
|
79
|
-
async for message in self._get_logs():
|
80
|
-
if message is None:
|
81
|
-
break
|
82
|
-
data += message.data
|
83
|
-
|
84
|
-
return data
|
85
|
-
|
86
|
-
async def _get_logs(self) -> AsyncIterator[Optional[api_pb2.TaskLogs]]:
|
87
|
-
"""mdmd:hidden
|
88
|
-
Streams sandbox logs from the server to the reader.
|
89
|
-
|
90
|
-
When the stream receives an EOF, it yields None. Once an EOF is received,
|
91
|
-
subsequent invocations will not yield logs.
|
92
|
-
"""
|
93
|
-
if self.eof:
|
94
|
-
yield None
|
95
|
-
return
|
96
|
-
|
97
|
-
completed = False
|
98
|
-
|
99
|
-
retries_remaining = 10
|
100
|
-
while not completed:
|
101
|
-
req = api_pb2.SandboxGetLogsRequest(
|
102
|
-
sandbox_id=self._sandbox_id,
|
103
|
-
file_descriptor=self._file_descriptor,
|
104
|
-
timeout=55,
|
105
|
-
last_entry_id=self._last_log_batch_entry_id,
|
106
|
-
)
|
107
|
-
try:
|
108
|
-
async for log_batch in unary_stream(self._client.stub.SandboxGetLogs, req):
|
109
|
-
self._last_log_batch_entry_id = log_batch.entry_id
|
110
|
-
|
111
|
-
for message in log_batch.items:
|
112
|
-
yield message
|
113
|
-
if log_batch.eof:
|
114
|
-
self.eof = True
|
115
|
-
completed = True
|
116
|
-
yield None
|
117
|
-
break
|
118
|
-
except (GRPCError, StreamTerminatedError) as exc:
|
119
|
-
if retries_remaining > 0:
|
120
|
-
retries_remaining -= 1
|
121
|
-
if isinstance(exc, GRPCError):
|
122
|
-
if exc.status in RETRYABLE_GRPC_STATUS_CODES:
|
123
|
-
await asyncio.sleep(1.0)
|
124
|
-
continue
|
125
|
-
elif isinstance(exc, StreamTerminatedError):
|
126
|
-
continue
|
127
|
-
raise
|
128
|
-
|
129
|
-
def __aiter__(self):
|
130
|
-
"""mdmd:hidden"""
|
131
|
-
self._stream = self._get_logs()
|
132
|
-
return self
|
133
|
-
|
134
|
-
async def __anext__(self):
|
135
|
-
"""mdmd:hidden"""
|
136
|
-
value = await self._stream.__anext__()
|
137
|
-
|
138
|
-
# The stream yields None if it receives an EOF batch.
|
139
|
-
if value is None:
|
140
|
-
raise StopAsyncIteration
|
141
|
-
|
142
|
-
return value.data
|
143
|
-
|
144
|
-
|
145
|
-
MAX_BUFFER_SIZE = 128 * 1024
|
146
43
|
|
147
|
-
|
148
|
-
|
149
|
-
"""Provides an interface to buffer and write logs to a sandbox stream (`stdin`)."""
|
150
|
-
|
151
|
-
def __init__(self, sandbox_id: str, client: _Client):
|
152
|
-
self._index = 1
|
153
|
-
self._sandbox_id = sandbox_id
|
154
|
-
self._client = client
|
155
|
-
self._is_closed = False
|
156
|
-
self._buffer = bytearray()
|
157
|
-
|
158
|
-
def get_next_index(self):
|
159
|
-
"""mdmd:hidden"""
|
160
|
-
index = self._index
|
161
|
-
self._index += 1
|
162
|
-
return index
|
163
|
-
|
164
|
-
def write(self, data: Union[bytes, bytearray, memoryview]):
|
165
|
-
"""
|
166
|
-
Writes data to stream's internal buffer, but does not drain/flush the write.
|
167
|
-
|
168
|
-
This method needs to be used along with the `drain()` method which flushes the buffer.
|
169
|
-
|
170
|
-
**Usage**
|
171
|
-
|
172
|
-
```python
|
173
|
-
@app.local_entrypoint()
|
174
|
-
def main():
|
175
|
-
sandbox = app.spawn_sandbox(
|
176
|
-
"bash",
|
177
|
-
"-c",
|
178
|
-
"while read line; do echo $line; done",
|
179
|
-
)
|
180
|
-
sandbox.stdin.write(b"foo\\n")
|
181
|
-
sandbox.stdin.write(b"bar\\n")
|
182
|
-
sandbox.stdin.write_eof()
|
183
|
-
|
184
|
-
sandbox.stdin.drain()
|
185
|
-
sandbox.wait()
|
186
|
-
```
|
187
|
-
"""
|
188
|
-
if self._is_closed:
|
189
|
-
raise EOFError("Stdin is closed. Cannot write to it.")
|
190
|
-
if isinstance(data, (bytes, bytearray, memoryview)):
|
191
|
-
if len(self._buffer) + len(data) > MAX_BUFFER_SIZE:
|
192
|
-
raise BufferError("Buffer size exceed limit. Call drain to clear the buffer.")
|
193
|
-
self._buffer.extend(data)
|
194
|
-
else:
|
195
|
-
raise TypeError(f"data argument must be a bytes-like object, not {type(data).__name__}")
|
196
|
-
|
197
|
-
def write_eof(self):
|
198
|
-
"""
|
199
|
-
Closes the write end of the stream after the buffered write data is drained.
|
200
|
-
If the sandbox process was blocked on input, it will become unblocked after `write_eof()`.
|
201
|
-
|
202
|
-
This method needs to be used along with the `drain()` method which flushes the EOF to the process.
|
203
|
-
"""
|
204
|
-
self._is_closed = True
|
205
|
-
|
206
|
-
async def drain(self):
|
207
|
-
"""
|
208
|
-
Flushes the write buffer and EOF to the running Sandbox process.
|
209
|
-
"""
|
210
|
-
data = bytes(self._buffer)
|
211
|
-
self._buffer.clear()
|
212
|
-
index = self.get_next_index()
|
213
|
-
await retry_transient_errors(
|
214
|
-
self._client.stub.SandboxStdinWrite,
|
215
|
-
api_pb2.SandboxStdinWriteRequest(sandbox_id=self._sandbox_id, index=index, eof=self._is_closed, input=data),
|
216
|
-
)
|
217
|
-
|
218
|
-
|
219
|
-
LogsReader = synchronize_api(_LogsReader)
|
220
|
-
StreamWriter = synchronize_api(_StreamWriter)
|
44
|
+
if TYPE_CHECKING:
|
45
|
+
import modal.app
|
221
46
|
|
222
47
|
|
223
48
|
class _Sandbox(_Object, type_prefix="sb"):
|
@@ -228,9 +53,11 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
228
53
|
"""
|
229
54
|
|
230
55
|
_result: Optional[api_pb2.GenericResult]
|
231
|
-
_stdout:
|
232
|
-
_stderr:
|
56
|
+
_stdout: _StreamReader[str]
|
57
|
+
_stderr: _StreamReader[str]
|
233
58
|
_stdin: _StreamWriter
|
59
|
+
_task_id: Optional[str] = None
|
60
|
+
_tunnels: Optional[dict[int, Tunnel]] = None
|
234
61
|
|
235
62
|
@staticmethod
|
236
63
|
def _new(
|
@@ -242,14 +69,17 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
242
69
|
workdir: Optional[str] = None,
|
243
70
|
gpu: GPU_T = None,
|
244
71
|
cloud: Optional[str] = None,
|
72
|
+
region: Optional[Union[str, Sequence[str]]] = None,
|
245
73
|
cpu: Optional[float] = None,
|
246
|
-
memory: Optional[Union[int,
|
247
|
-
network_file_systems:
|
74
|
+
memory: Optional[Union[int, tuple[int, int]]] = None,
|
75
|
+
network_file_systems: dict[Union[str, os.PathLike], _NetworkFileSystem] = {},
|
248
76
|
block_network: bool = False,
|
249
|
-
|
250
|
-
|
77
|
+
cidr_allowlist: Optional[Sequence[str]] = None,
|
78
|
+
volumes: dict[Union[str, os.PathLike], Union[_Volume, _CloudBucketMount]] = {},
|
251
79
|
pty_info: Optional[api_pb2.PTYInfo] = None,
|
252
|
-
|
80
|
+
encrypted_ports: Sequence[int] = [],
|
81
|
+
unencrypted_ports: Sequence[int] = [],
|
82
|
+
proxy: Optional[_Proxy] = None,
|
253
83
|
_experimental_scheduler_placement: Optional[SchedulerPlacement] = None,
|
254
84
|
) -> "_Sandbox":
|
255
85
|
"""mdmd:hidden"""
|
@@ -257,17 +87,30 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
257
87
|
if len(entrypoint_args) == 0:
|
258
88
|
raise InvalidError("entrypoint_args must not be empty")
|
259
89
|
|
260
|
-
|
261
|
-
|
262
|
-
|
90
|
+
validated_network_file_systems = validate_network_file_systems(network_file_systems)
|
91
|
+
|
92
|
+
scheduler_placement: Optional[SchedulerPlacement] = _experimental_scheduler_placement
|
93
|
+
if region:
|
94
|
+
if scheduler_placement:
|
95
|
+
raise InvalidError("`region` and `_experimental_scheduler_placement` cannot be used together")
|
96
|
+
scheduler_placement = SchedulerPlacement(region=region)
|
97
|
+
|
98
|
+
if isinstance(gpu, list):
|
99
|
+
raise InvalidError(
|
100
|
+
"Sandboxes do not support configuring a list of GPUs. "
|
101
|
+
"Specify a single GPU configuration, e.g. gpu='a10g'"
|
102
|
+
)
|
103
|
+
|
104
|
+
if workdir is not None and not workdir.startswith("/"):
|
105
|
+
raise InvalidError(f"workdir must be an absolute path, got: {workdir}")
|
263
106
|
|
264
107
|
# Validate volumes
|
265
108
|
validated_volumes = validate_volumes(volumes)
|
266
109
|
cloud_bucket_mounts = [(k, v) for k, v in validated_volumes if isinstance(v, _CloudBucketMount)]
|
267
110
|
validated_volumes = [(k, v) for k, v in validated_volumes if isinstance(v, _Volume)]
|
268
111
|
|
269
|
-
def _deps() ->
|
270
|
-
deps:
|
112
|
+
def _deps() -> list[_Object]:
|
113
|
+
deps: list[_Object] = [image] + list(mounts) + list(secrets)
|
271
114
|
for _, vol in validated_network_file_systems:
|
272
115
|
deps.append(vol)
|
273
116
|
for _, vol in validated_volumes:
|
@@ -275,6 +118,8 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
275
118
|
for _, cloud_bucket_mount in cloud_bucket_mounts:
|
276
119
|
if cloud_bucket_mount.secret:
|
277
120
|
deps.append(cloud_bucket_mount.secret)
|
121
|
+
if proxy:
|
122
|
+
deps.append(proxy)
|
278
123
|
return deps
|
279
124
|
|
280
125
|
async def _load(self: _Sandbox, resolver: Resolver, _existing_object_id: Optional[str]):
|
@@ -283,33 +128,60 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
283
128
|
api_pb2.VolumeMount(
|
284
129
|
mount_path=path,
|
285
130
|
volume_id=volume.object_id,
|
286
|
-
allow_background_commits=
|
131
|
+
allow_background_commits=True,
|
287
132
|
)
|
288
133
|
for path, volume in validated_volumes
|
289
134
|
]
|
290
135
|
|
136
|
+
open_ports = [api_pb2.PortSpec(port=port, unencrypted=False) for port in encrypted_ports]
|
137
|
+
open_ports.extend([api_pb2.PortSpec(port=port, unencrypted=True) for port in unencrypted_ports])
|
138
|
+
|
139
|
+
if block_network:
|
140
|
+
# If the network is blocked, cidr_allowlist is invalid as we don't allow any network access.
|
141
|
+
if cidr_allowlist is not None:
|
142
|
+
raise InvalidError("`cidr_allowlist` cannot be used when `block_network` is enabled")
|
143
|
+
network_access = api_pb2.NetworkAccess(
|
144
|
+
network_access_type=api_pb2.NetworkAccess.NetworkAccessType.BLOCKED,
|
145
|
+
)
|
146
|
+
elif cidr_allowlist is None:
|
147
|
+
# If the allowlist is empty, we allow all network access.
|
148
|
+
network_access = api_pb2.NetworkAccess(
|
149
|
+
network_access_type=api_pb2.NetworkAccess.NetworkAccessType.OPEN,
|
150
|
+
)
|
151
|
+
else:
|
152
|
+
network_access = api_pb2.NetworkAccess(
|
153
|
+
network_access_type=api_pb2.NetworkAccess.NetworkAccessType.ALLOWLIST,
|
154
|
+
allowed_cidrs=cidr_allowlist,
|
155
|
+
)
|
156
|
+
|
157
|
+
ephemeral_disk = None # Ephemeral disk requests not supported on Sandboxes.
|
291
158
|
definition = api_pb2.Sandbox(
|
292
159
|
entrypoint_args=entrypoint_args,
|
293
160
|
image_id=image.object_id,
|
294
|
-
mount_ids=[mount.object_id for mount in mounts],
|
161
|
+
mount_ids=[mount.object_id for mount in mounts] + [mount.object_id for mount in image._mount_layers],
|
295
162
|
secret_ids=[secret.object_id for secret in secrets],
|
296
163
|
timeout_secs=timeout,
|
297
164
|
workdir=workdir,
|
298
|
-
resources=convert_fn_config_to_resources_config(
|
165
|
+
resources=convert_fn_config_to_resources_config(
|
166
|
+
cpu=cpu, memory=memory, gpu=gpu, ephemeral_disk=ephemeral_disk
|
167
|
+
),
|
299
168
|
cloud_provider=parse_cloud_provider(cloud) if cloud else None,
|
300
169
|
nfs_mounts=network_file_system_mount_protos(validated_network_file_systems, False),
|
301
170
|
runtime_debug=config.get("function_runtime_debug"),
|
302
|
-
block_network=block_network,
|
303
171
|
cloud_bucket_mounts=cloud_bucket_mounts_to_proto(cloud_bucket_mounts),
|
304
172
|
volume_mounts=volume_mounts,
|
305
173
|
pty_info=pty_info,
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
174
|
+
scheduler_placement=scheduler_placement.proto if scheduler_placement else None,
|
175
|
+
worker_id=config.get("worker_id"),
|
176
|
+
open_ports=api_pb2.PortSpecs(ports=open_ports),
|
177
|
+
network_access=network_access,
|
178
|
+
proxy_id=(proxy.object_id if proxy else None),
|
310
179
|
)
|
311
180
|
|
312
|
-
|
181
|
+
# Note - `resolver.app_id` will be `None` for app-less sandboxes
|
182
|
+
create_req = api_pb2.SandboxCreateRequest(
|
183
|
+
app_id=resolver.app_id, definition=definition, environment_name=resolver.environment_name
|
184
|
+
)
|
313
185
|
create_resp = await retry_transient_errors(resolver.client.stub.SandboxCreate, create_req)
|
314
186
|
|
315
187
|
sandbox_id = create_resp.sandbox_id
|
@@ -317,15 +189,127 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
317
189
|
|
318
190
|
return _Sandbox._from_loader(_load, "Sandbox()", deps=_deps)
|
319
191
|
|
192
|
+
@staticmethod
|
193
|
+
async def create(
|
194
|
+
*entrypoint_args: str,
|
195
|
+
app: Optional["modal.app._App"] = None, # Optionally associate the sandbox with an app
|
196
|
+
environment_name: Optional[str] = None, # Optionally override the default environment
|
197
|
+
image: Optional[_Image] = None, # The image to run as the container for the sandbox.
|
198
|
+
mounts: Sequence[_Mount] = (), # Mounts to attach to the sandbox.
|
199
|
+
secrets: Sequence[_Secret] = (), # Environment variables to inject into the sandbox.
|
200
|
+
network_file_systems: dict[Union[str, os.PathLike], _NetworkFileSystem] = {},
|
201
|
+
timeout: Optional[int] = None, # Maximum execution time of the sandbox in seconds.
|
202
|
+
workdir: Optional[str] = None, # Working directory of the sandbox.
|
203
|
+
gpu: GPU_T = None,
|
204
|
+
cloud: Optional[str] = None,
|
205
|
+
region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the sandbox on.
|
206
|
+
# Specify, in fractional CPU cores, how many CPU cores to request.
|
207
|
+
# Or, pass (request, limit) to additionally specify a hard limit in fractional CPU cores.
|
208
|
+
# CPU throttling will prevent a container from exceeding its specified limit.
|
209
|
+
cpu: Optional[Union[float, tuple[float, float]]] = None,
|
210
|
+
# Specify, in MiB, a memory request which is the minimum memory required.
|
211
|
+
# Or, pass (request, limit) to additionally specify a hard limit in MiB.
|
212
|
+
memory: Optional[Union[int, tuple[int, int]]] = None,
|
213
|
+
block_network: bool = False, # Whether to block network access
|
214
|
+
# List of CIDRs the sandbox is allowed to access. If None, all CIDRs are allowed.
|
215
|
+
cidr_allowlist: Optional[Sequence[str]] = None,
|
216
|
+
volumes: dict[
|
217
|
+
Union[str, os.PathLike], Union[_Volume, _CloudBucketMount]
|
218
|
+
] = {}, # Mount points for Modal Volumes and CloudBucketMounts
|
219
|
+
pty_info: Optional[api_pb2.PTYInfo] = None,
|
220
|
+
# List of ports to tunnel into the sandbox. Encrypted ports are tunneled with TLS.
|
221
|
+
encrypted_ports: Sequence[int] = [],
|
222
|
+
# List of ports to tunnel into the sandbox without encryption.
|
223
|
+
unencrypted_ports: Sequence[int] = [],
|
224
|
+
# Reference to a Modal Proxy to use in front of this Sandbox.
|
225
|
+
proxy: Optional[_Proxy] = None,
|
226
|
+
_experimental_scheduler_placement: Optional[
|
227
|
+
SchedulerPlacement
|
228
|
+
] = None, # Experimental controls over fine-grained scheduling (alpha).
|
229
|
+
client: Optional[_Client] = None,
|
230
|
+
) -> "_Sandbox":
|
231
|
+
from .app import _App
|
232
|
+
|
233
|
+
environment_name = _get_environment_name(environment_name)
|
234
|
+
|
235
|
+
# If there are no entrypoint args, we'll sleep forever so that the sandbox will stay
|
236
|
+
# alive long enough for the user to interact with it.
|
237
|
+
if len(entrypoint_args) == 0:
|
238
|
+
max_sleep_time = 60 * 60 * 24 * 2 # 2 days is plenty since workers roll every 24h
|
239
|
+
entrypoint_args = ("sleep", str(max_sleep_time))
|
240
|
+
|
241
|
+
# TODO(erikbern): Get rid of the `_new` method and create an already-hydrated object
|
242
|
+
obj = _Sandbox._new(
|
243
|
+
entrypoint_args,
|
244
|
+
image=image or _default_image,
|
245
|
+
mounts=mounts,
|
246
|
+
secrets=secrets,
|
247
|
+
timeout=timeout,
|
248
|
+
workdir=workdir,
|
249
|
+
gpu=gpu,
|
250
|
+
cloud=cloud,
|
251
|
+
region=region,
|
252
|
+
cpu=cpu,
|
253
|
+
memory=memory,
|
254
|
+
network_file_systems=network_file_systems,
|
255
|
+
block_network=block_network,
|
256
|
+
cidr_allowlist=cidr_allowlist,
|
257
|
+
volumes=volumes,
|
258
|
+
pty_info=pty_info,
|
259
|
+
encrypted_ports=encrypted_ports,
|
260
|
+
unencrypted_ports=unencrypted_ports,
|
261
|
+
proxy=proxy,
|
262
|
+
_experimental_scheduler_placement=_experimental_scheduler_placement,
|
263
|
+
)
|
264
|
+
|
265
|
+
app_id: Optional[str] = None
|
266
|
+
app_client: Optional[_Client] = None
|
267
|
+
|
268
|
+
if app is not None:
|
269
|
+
if app.app_id is None:
|
270
|
+
raise ValueError(
|
271
|
+
"App has not been initialized yet. To create an App lazily, use `App.lookup`: \n"
|
272
|
+
"app = modal.App.lookup('my-app', create_if_missing=True)\n"
|
273
|
+
"modal.Sandbox.create('echo', 'hi', app=app)\n"
|
274
|
+
"In order to initialize an existing `App` object, refer to our docs: https://modal.com/docs/guide/apps"
|
275
|
+
)
|
276
|
+
|
277
|
+
app_id = app.app_id
|
278
|
+
app_client = app._client
|
279
|
+
elif (container_app := _App._get_container_app()) is not None:
|
280
|
+
app_id = container_app.app_id
|
281
|
+
app_client = container_app._client
|
282
|
+
else:
|
283
|
+
arglist = ", ".join(repr(s) for s in entrypoint_args)
|
284
|
+
deprecation_error(
|
285
|
+
(2024, 9, 14),
|
286
|
+
"Creating a `Sandbox` without an `App` is deprecated.\n\n"
|
287
|
+
"You may pass in an `App` object, or reference one by name with `App.lookup`:\n\n"
|
288
|
+
"```\n"
|
289
|
+
"app = modal.App.lookup('sandbox-app', create_if_missing=True)\n"
|
290
|
+
f"sb = modal.Sandbox.create({arglist}, app=app)\n"
|
291
|
+
"```",
|
292
|
+
)
|
293
|
+
|
294
|
+
client = client or app_client or await _Client.from_env()
|
295
|
+
|
296
|
+
resolver = Resolver(client, environment_name=environment_name, app_id=app_id)
|
297
|
+
await resolver.load(obj)
|
298
|
+
return obj
|
299
|
+
|
320
300
|
def _hydrate_metadata(self, handle_metadata: Optional[Message]):
|
321
|
-
self._stdout =
|
322
|
-
|
323
|
-
|
301
|
+
self._stdout: _StreamReader[str] = StreamReader[str](
|
302
|
+
api_pb2.FILE_DESCRIPTOR_STDOUT, self.object_id, "sandbox", self._client, by_line=True
|
303
|
+
)
|
304
|
+
self._stderr: _StreamReader[str] = StreamReader[str](
|
305
|
+
api_pb2.FILE_DESCRIPTOR_STDERR, self.object_id, "sandbox", self._client, by_line=True
|
306
|
+
)
|
307
|
+
self._stdin = StreamWriter(self.object_id, "sandbox", self._client)
|
324
308
|
self._result = None
|
325
309
|
|
326
310
|
@staticmethod
|
327
311
|
async def from_id(sandbox_id: str, client: Optional[_Client] = None) -> "_Sandbox":
|
328
|
-
"""Construct a Sandbox from an id and look up the
|
312
|
+
"""Construct a Sandbox from an id and look up the Sandbox result.
|
329
313
|
|
330
314
|
The ID of a Sandbox object can be accessed using `.object_id`.
|
331
315
|
"""
|
@@ -336,14 +320,57 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
336
320
|
resp = await retry_transient_errors(client.stub.SandboxWait, req)
|
337
321
|
|
338
322
|
obj = _Sandbox._new_hydrated(sandbox_id, client, None)
|
339
|
-
|
323
|
+
|
324
|
+
if resp.result.status:
|
325
|
+
obj._result = resp.result
|
340
326
|
|
341
327
|
return obj
|
342
328
|
|
329
|
+
async def set_tags(self, tags: dict[str, str], *, client: Optional[_Client] = None):
|
330
|
+
"""Set tags (key-value pairs) on the Sandbox. Tags can be used to filter results in `Sandbox.list`."""
|
331
|
+
environment_name = _get_environment_name()
|
332
|
+
if client is None:
|
333
|
+
client = await _Client.from_env()
|
334
|
+
|
335
|
+
tags_list = [api_pb2.SandboxTag(tag_name=name, tag_value=value) for name, value in tags.items()]
|
336
|
+
|
337
|
+
req = api_pb2.SandboxTagsSetRequest(
|
338
|
+
environment_name=environment_name,
|
339
|
+
sandbox_id=self.object_id,
|
340
|
+
tags=tags_list,
|
341
|
+
)
|
342
|
+
try:
|
343
|
+
await retry_transient_errors(client.stub.SandboxTagsSet, req)
|
344
|
+
except GRPCError as exc:
|
345
|
+
raise InvalidError(exc.message) if exc.status == Status.INVALID_ARGUMENT else exc
|
346
|
+
|
347
|
+
async def snapshot_filesystem(self, timeout: int = 55) -> _Image:
|
348
|
+
"""Snapshot the filesystem of the Sandbox.
|
349
|
+
|
350
|
+
Returns an [`Image`](https://modal.com/docs/reference/modal.Image) object which
|
351
|
+
can be used to spawn a new Sandbox with the same filesystem.
|
352
|
+
"""
|
353
|
+
req = api_pb2.SandboxSnapshotFsRequest(sandbox_id=self.object_id, timeout=timeout)
|
354
|
+
resp = await retry_transient_errors(self._client.stub.SandboxSnapshotFs, req)
|
355
|
+
|
356
|
+
if resp.result.status != api_pb2.GenericResult.GENERIC_STATUS_SUCCESS:
|
357
|
+
raise ExecutionError(resp.result.exception)
|
358
|
+
|
359
|
+
image_id = resp.image_id
|
360
|
+
metadata = resp.image_metadata
|
361
|
+
|
362
|
+
async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
|
363
|
+
self._hydrate(image_id, resolver.client, metadata)
|
364
|
+
|
365
|
+
rep = "Image()"
|
366
|
+
image = _Image._from_loader(_load, rep)
|
367
|
+
|
368
|
+
return image
|
369
|
+
|
343
370
|
# Live handle methods
|
344
371
|
|
345
372
|
async def wait(self, raise_on_termination: bool = True):
|
346
|
-
"""Wait for the
|
373
|
+
"""Wait for the Sandbox to finish running."""
|
347
374
|
|
348
375
|
while True:
|
349
376
|
req = api_pb2.SandboxWaitRequest(sandbox_id=self.object_id, timeout=50)
|
@@ -357,10 +384,37 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
357
384
|
raise SandboxTerminatedError()
|
358
385
|
break
|
359
386
|
|
387
|
+
async def tunnels(self, timeout: int = 50) -> dict[int, Tunnel]:
|
388
|
+
"""Get tunnel metadata for the sandbox.
|
389
|
+
|
390
|
+
Raises `SandboxTimeoutError` if the tunnels are not available after the timeout.
|
391
|
+
|
392
|
+
Returns a dictionary of `Tunnel` objects which are keyed by the container port.
|
393
|
+
|
394
|
+
NOTE: Previous to client v0.64.152, this returned a list of `TunnelData` objects.
|
395
|
+
"""
|
396
|
+
|
397
|
+
if self._tunnels:
|
398
|
+
return self._tunnels
|
399
|
+
|
400
|
+
req = api_pb2.SandboxGetTunnelsRequest(sandbox_id=self.object_id, timeout=timeout)
|
401
|
+
resp = await retry_transient_errors(self._client.stub.SandboxGetTunnels, req)
|
402
|
+
|
403
|
+
# If we couldn't get the tunnels in time, report the timeout.
|
404
|
+
if resp.result.status == api_pb2.GenericResult.GENERIC_STATUS_TIMEOUT:
|
405
|
+
raise SandboxTimeoutError()
|
406
|
+
|
407
|
+
# Otherwise, we got the tunnels and can report the result.
|
408
|
+
self._tunnels = {
|
409
|
+
t.container_port: Tunnel(t.host, t.port, t.unencrypted_host, t.unencrypted_port) for t in resp.tunnels
|
410
|
+
}
|
411
|
+
|
412
|
+
return self._tunnels
|
413
|
+
|
360
414
|
async def terminate(self):
|
361
|
-
"""Terminate
|
415
|
+
"""Terminate Sandbox execution.
|
362
416
|
|
363
|
-
This is a no-op if the
|
417
|
+
This is a no-op if the Sandbox has already finished running."""
|
364
418
|
|
365
419
|
await retry_transient_errors(
|
366
420
|
self._client.stub.SandboxTerminate, api_pb2.SandboxTerminateRequest(sandbox_id=self.object_id)
|
@@ -368,9 +422,9 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
368
422
|
await self.wait(raise_on_termination=False)
|
369
423
|
|
370
424
|
async def poll(self) -> Optional[int]:
|
371
|
-
"""Check if the
|
425
|
+
"""Check if the Sandbox has finished running.
|
372
426
|
|
373
|
-
Returns `None` if the
|
427
|
+
Returns `None` if the Sandbox is still running, else returns the exit code.
|
374
428
|
"""
|
375
429
|
|
376
430
|
req = api_pb2.SandboxWaitRequest(sandbox_id=self.object_id, timeout=0)
|
@@ -381,21 +435,186 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
381
435
|
|
382
436
|
return self.returncode
|
383
437
|
|
438
|
+
async def _get_task_id(self):
|
439
|
+
while not self._task_id:
|
440
|
+
resp = await self._client.stub.SandboxGetTaskId(api_pb2.SandboxGetTaskIdRequest(sandbox_id=self.object_id))
|
441
|
+
self._task_id = resp.task_id
|
442
|
+
if not self._task_id:
|
443
|
+
await asyncio.sleep(0.5)
|
444
|
+
return self._task_id
|
445
|
+
|
446
|
+
@overload
|
447
|
+
async def exec(
|
448
|
+
self,
|
449
|
+
*cmds: str,
|
450
|
+
pty_info: Optional[api_pb2.PTYInfo] = None,
|
451
|
+
stdout: StreamType = StreamType.PIPE,
|
452
|
+
stderr: StreamType = StreamType.PIPE,
|
453
|
+
timeout: Optional[int] = None,
|
454
|
+
workdir: Optional[str] = None,
|
455
|
+
secrets: Sequence[_Secret] = (),
|
456
|
+
text: Literal[True] = True,
|
457
|
+
bufsize: Literal[-1, 1] = -1,
|
458
|
+
_pty_info: Optional[api_pb2.PTYInfo] = None,
|
459
|
+
) -> _ContainerProcess[str]:
|
460
|
+
...
|
461
|
+
|
462
|
+
@overload
|
463
|
+
async def exec(
|
464
|
+
self,
|
465
|
+
*cmds: str,
|
466
|
+
pty_info: Optional[api_pb2.PTYInfo] = None,
|
467
|
+
stdout: StreamType = StreamType.PIPE,
|
468
|
+
stderr: StreamType = StreamType.PIPE,
|
469
|
+
timeout: Optional[int] = None,
|
470
|
+
workdir: Optional[str] = None,
|
471
|
+
secrets: Sequence[_Secret] = (),
|
472
|
+
text: Literal[False] = False,
|
473
|
+
bufsize: Literal[-1, 1] = -1,
|
474
|
+
_pty_info: Optional[api_pb2.PTYInfo] = None,
|
475
|
+
) -> _ContainerProcess[bytes]:
|
476
|
+
...
|
477
|
+
|
478
|
+
async def exec(
|
479
|
+
self,
|
480
|
+
*cmds: str,
|
481
|
+
pty_info: Optional[api_pb2.PTYInfo] = None, # Deprecated: internal use only
|
482
|
+
stdout: StreamType = StreamType.PIPE,
|
483
|
+
stderr: StreamType = StreamType.PIPE,
|
484
|
+
timeout: Optional[int] = None,
|
485
|
+
workdir: Optional[str] = None,
|
486
|
+
secrets: Sequence[_Secret] = (),
|
487
|
+
# Encode output as text.
|
488
|
+
text: bool = True,
|
489
|
+
# Control line-buffered output.
|
490
|
+
# -1 means unbuffered, 1 means line-buffered (only available if `text=True`).
|
491
|
+
bufsize: Literal[-1, 1] = -1,
|
492
|
+
# Internal option to set terminal size and metadata
|
493
|
+
_pty_info: Optional[api_pb2.PTYInfo] = None,
|
494
|
+
):
|
495
|
+
"""Execute a command in the Sandbox and return
|
496
|
+
a [`ContainerProcess`](/docs/reference/modal.ContainerProcess#modalcontainer_process) handle.
|
497
|
+
|
498
|
+
**Usage**
|
499
|
+
|
500
|
+
```python
|
501
|
+
app = modal.App.lookup("my-app", create_if_missing=True)
|
502
|
+
|
503
|
+
sandbox = modal.Sandbox.create("sleep", "infinity", app=app)
|
504
|
+
|
505
|
+
process = sandbox.exec("bash", "-c", "for i in $(seq 1 10); do echo foo $i; sleep 0.5; done")
|
506
|
+
|
507
|
+
for line in process.stdout:
|
508
|
+
print(line)
|
509
|
+
```
|
510
|
+
"""
|
511
|
+
|
512
|
+
if workdir is not None and not workdir.startswith("/"):
|
513
|
+
raise InvalidError(f"workdir must be an absolute path, got: {workdir}")
|
514
|
+
|
515
|
+
# Force secret resolution so we can pass the secret IDs to the backend.
|
516
|
+
for secret in secrets:
|
517
|
+
await secret.resolve(client=self._client)
|
518
|
+
|
519
|
+
task_id = await self._get_task_id()
|
520
|
+
req = api_pb2.ContainerExecRequest(
|
521
|
+
task_id=task_id,
|
522
|
+
command=cmds,
|
523
|
+
pty_info=_pty_info or pty_info,
|
524
|
+
runtime_debug=config.get("function_runtime_debug"),
|
525
|
+
timeout_secs=timeout or 0,
|
526
|
+
workdir=workdir,
|
527
|
+
secret_ids=[secret.object_id for secret in secrets],
|
528
|
+
)
|
529
|
+
resp = await retry_transient_errors(self._client.stub.ContainerExec, req)
|
530
|
+
by_line = bufsize == 1
|
531
|
+
return _ContainerProcess(resp.exec_id, self._client, stdout=stdout, stderr=stderr, text=text, by_line=by_line)
|
532
|
+
|
533
|
+
@overload
|
534
|
+
async def open(
|
535
|
+
self,
|
536
|
+
path: str,
|
537
|
+
mode: "_typeshed.OpenTextMode",
|
538
|
+
) -> _FileIO[str]:
|
539
|
+
...
|
540
|
+
|
541
|
+
@overload
|
542
|
+
async def open(
|
543
|
+
self,
|
544
|
+
path: str,
|
545
|
+
mode: "_typeshed.OpenBinaryMode",
|
546
|
+
) -> _FileIO[bytes]:
|
547
|
+
...
|
548
|
+
|
549
|
+
async def open(
|
550
|
+
self,
|
551
|
+
path: str,
|
552
|
+
mode: Union["_typeshed.OpenTextMode", "_typeshed.OpenBinaryMode"] = "r",
|
553
|
+
):
|
554
|
+
"""Open a file in the Sandbox and return
|
555
|
+
a [`FileIO`](/docs/reference/modal.FileIO#modalfile_io) handle.
|
556
|
+
|
557
|
+
**Usage**
|
558
|
+
|
559
|
+
```python notest
|
560
|
+
sb = modal.Sandbox.create(app=sb_app)
|
561
|
+
f = sb.open("/test.txt", "w")
|
562
|
+
f.write("hello")
|
563
|
+
f.close()
|
564
|
+
```
|
565
|
+
"""
|
566
|
+
task_id = await self._get_task_id()
|
567
|
+
return await _FileIO.create(path, mode, self._client, task_id)
|
568
|
+
|
569
|
+
async def ls(self, path: str) -> list[str]:
|
570
|
+
"""List the contents of a directory in the Sandbox."""
|
571
|
+
task_id = await self._get_task_id()
|
572
|
+
return await _FileIO.ls(path, self._client, task_id)
|
573
|
+
|
574
|
+
async def mkdir(self, path: str, parents: bool = False) -> None:
|
575
|
+
"""Create a new directory in the Sandbox."""
|
576
|
+
task_id = await self._get_task_id()
|
577
|
+
return await _FileIO.mkdir(path, self._client, task_id, parents)
|
578
|
+
|
579
|
+
async def rm(self, path: str, recursive: bool = False) -> None:
|
580
|
+
"""Remove a file or directory in the Sandbox."""
|
581
|
+
task_id = await self._get_task_id()
|
582
|
+
return await _FileIO.rm(path, self._client, task_id, recursive)
|
583
|
+
|
584
|
+
async def watch(
|
585
|
+
self,
|
586
|
+
path: str,
|
587
|
+
filter: Optional[list[FileWatchEventType]] = None,
|
588
|
+
recursive: Optional[bool] = None,
|
589
|
+
timeout: Optional[int] = None,
|
590
|
+
) -> AsyncIterator[FileWatchEvent]:
|
591
|
+
task_id = await self._get_task_id()
|
592
|
+
async for event in _FileIO.watch(path, self._client, task_id, filter, recursive, timeout):
|
593
|
+
yield event
|
594
|
+
|
384
595
|
@property
|
385
|
-
def stdout(self) ->
|
386
|
-
"""
|
596
|
+
def stdout(self) -> _StreamReader[str]:
|
597
|
+
"""
|
598
|
+
[`StreamReader`](/docs/reference/modal.io_streams#modalio_streamsstreamreader) for
|
599
|
+
the sandbox's stdout stream.
|
600
|
+
"""
|
387
601
|
|
388
602
|
return self._stdout
|
389
603
|
|
390
604
|
@property
|
391
|
-
def stderr(self) ->
|
392
|
-
"""`
|
605
|
+
def stderr(self) -> _StreamReader[str]:
|
606
|
+
"""[`StreamReader`](/docs/reference/modal.io_streams#modalio_streamsstreamreader) for
|
607
|
+
the sandbox's stderr stream.
|
608
|
+
"""
|
393
609
|
|
394
610
|
return self._stderr
|
395
611
|
|
396
612
|
@property
|
397
613
|
def stdin(self) -> _StreamWriter:
|
398
|
-
"""
|
614
|
+
"""
|
615
|
+
[`StreamWriter`](/docs/reference/modal.io_streams#modalio_streamsstreamwriter) for
|
616
|
+
the sandbox's stdin stream.
|
617
|
+
"""
|
399
618
|
|
400
619
|
return self._stdin
|
401
620
|
|
@@ -414,5 +633,64 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
414
633
|
else:
|
415
634
|
return self._result.exitcode
|
416
635
|
|
636
|
+
@staticmethod
|
637
|
+
async def list(
|
638
|
+
*, app_id: Optional[str] = None, tags: Optional[dict[str, str]] = None, client: Optional[_Client] = None
|
639
|
+
) -> AsyncGenerator["_Sandbox", None]:
|
640
|
+
"""List all sandboxes for the current environment or app ID (if specified). If tags are specified, only
|
641
|
+
sandboxes that have at least those tags are returned. Returns an iterator over `Sandbox` objects."""
|
642
|
+
before_timestamp = None
|
643
|
+
environment_name = _get_environment_name()
|
644
|
+
if client is None:
|
645
|
+
client = await _Client.from_env()
|
646
|
+
|
647
|
+
tags_list = [api_pb2.SandboxTag(tag_name=name, tag_value=value) for name, value in tags.items()] if tags else []
|
648
|
+
|
649
|
+
while True:
|
650
|
+
req = api_pb2.SandboxListRequest(
|
651
|
+
app_id=app_id,
|
652
|
+
before_timestamp=before_timestamp,
|
653
|
+
environment_name=environment_name,
|
654
|
+
include_finished=False,
|
655
|
+
tags=tags_list,
|
656
|
+
)
|
657
|
+
|
658
|
+
# Fetches a batch of sandboxes.
|
659
|
+
try:
|
660
|
+
resp = await retry_transient_errors(client.stub.SandboxList, req)
|
661
|
+
except GRPCError as exc:
|
662
|
+
raise InvalidError(exc.message) if exc.status == Status.INVALID_ARGUMENT else exc
|
663
|
+
|
664
|
+
if not resp.sandboxes:
|
665
|
+
return
|
666
|
+
|
667
|
+
for sandbox_info in resp.sandboxes:
|
668
|
+
obj = _Sandbox._new_hydrated(sandbox_info.id, client, None)
|
669
|
+
obj._result = sandbox_info.task_info.result
|
670
|
+
yield obj
|
671
|
+
|
672
|
+
# Fetch the next batch starting from the end of the current one.
|
673
|
+
before_timestamp = resp.sandboxes[-1].created_at
|
674
|
+
|
417
675
|
|
418
676
|
Sandbox = synchronize_api(_Sandbox)
|
677
|
+
|
678
|
+
|
679
|
+
def __getattr__(name):
|
680
|
+
if name == "LogsReader":
|
681
|
+
deprecation_error(
|
682
|
+
(2024, 8, 12),
|
683
|
+
"`modal.sandbox.LogsReader` is deprecated. Please import `modal.io_streams.StreamReader` instead.",
|
684
|
+
)
|
685
|
+
from .io_streams import StreamReader
|
686
|
+
|
687
|
+
return StreamReader
|
688
|
+
elif name == "StreamWriter":
|
689
|
+
deprecation_error(
|
690
|
+
(2024, 8, 12),
|
691
|
+
"`modal.sandbox.StreamWriter` is deprecated. Please import `modal.io_streams.StreamWriter` instead.",
|
692
|
+
)
|
693
|
+
from .io_streams import StreamWriter
|
694
|
+
|
695
|
+
return StreamWriter
|
696
|
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|