modal 1.1.5.dev66__py3-none-any.whl → 1.3.1.dev8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of modal might be problematic. Click here for more details.
- modal/__init__.py +4 -4
- modal/__main__.py +4 -29
- modal/_billing.py +84 -0
- modal/_clustered_functions.py +1 -3
- modal/_container_entrypoint.py +33 -208
- modal/_functions.py +171 -138
- modal/_grpc_client.py +191 -0
- modal/_ipython.py +16 -6
- modal/_load_context.py +106 -0
- modal/_object.py +72 -21
- modal/_output.py +12 -14
- modal/_partial_function.py +31 -4
- modal/_resolver.py +44 -57
- modal/_runtime/container_io_manager.py +30 -28
- modal/_runtime/container_io_manager.pyi +42 -44
- modal/_runtime/gpu_memory_snapshot.py +9 -7
- modal/_runtime/user_code_event_loop.py +80 -0
- modal/_runtime/user_code_imports.py +236 -10
- modal/_serialization.py +2 -1
- modal/_traceback.py +4 -13
- modal/_tunnel.py +16 -11
- modal/_tunnel.pyi +25 -3
- modal/_utils/async_utils.py +337 -10
- modal/_utils/auth_token_manager.py +1 -4
- modal/_utils/blob_utils.py +29 -22
- modal/_utils/function_utils.py +20 -21
- modal/_utils/grpc_testing.py +6 -3
- modal/_utils/grpc_utils.py +223 -64
- modal/_utils/mount_utils.py +26 -1
- modal/_utils/name_utils.py +2 -3
- modal/_utils/package_utils.py +0 -1
- modal/_utils/rand_pb_testing.py +8 -1
- modal/_utils/task_command_router_client.py +524 -0
- modal/_vendor/cloudpickle.py +144 -48
- modal/app.py +285 -105
- modal/app.pyi +216 -53
- modal/billing.py +5 -0
- modal/builder/2025.06.txt +6 -3
- modal/builder/PREVIEW.txt +2 -1
- modal/builder/base-images.json +4 -2
- modal/cli/_download.py +19 -3
- modal/cli/cluster.py +4 -2
- modal/cli/config.py +3 -1
- modal/cli/container.py +5 -4
- modal/cli/dict.py +5 -2
- modal/cli/entry_point.py +26 -2
- modal/cli/environment.py +2 -16
- modal/cli/launch.py +1 -76
- modal/cli/network_file_system.py +5 -20
- modal/cli/programs/run_jupyter.py +1 -1
- modal/cli/programs/vscode.py +1 -1
- modal/cli/queues.py +5 -4
- modal/cli/run.py +24 -204
- modal/cli/secret.py +1 -2
- modal/cli/shell.py +375 -0
- modal/cli/utils.py +1 -13
- modal/cli/volume.py +11 -17
- modal/client.py +16 -125
- modal/client.pyi +94 -144
- modal/cloud_bucket_mount.py +3 -1
- modal/cloud_bucket_mount.pyi +4 -0
- modal/cls.py +101 -64
- modal/cls.pyi +9 -8
- modal/config.py +21 -1
- modal/container_process.py +288 -12
- modal/container_process.pyi +99 -38
- modal/dict.py +72 -33
- modal/dict.pyi +88 -57
- modal/environments.py +16 -8
- modal/environments.pyi +6 -2
- modal/exception.py +154 -16
- modal/experimental/__init__.py +24 -53
- modal/experimental/flash.py +161 -74
- modal/experimental/flash.pyi +97 -49
- modal/file_io.py +50 -92
- modal/file_io.pyi +117 -89
- modal/functions.pyi +70 -87
- modal/image.py +82 -47
- modal/image.pyi +51 -30
- modal/io_streams.py +500 -149
- modal/io_streams.pyi +279 -189
- modal/mount.py +60 -46
- modal/mount.pyi +41 -17
- modal/network_file_system.py +19 -11
- modal/network_file_system.pyi +72 -39
- modal/object.pyi +114 -22
- modal/parallel_map.py +42 -44
- modal/parallel_map.pyi +9 -17
- modal/partial_function.pyi +4 -2
- modal/proxy.py +14 -6
- modal/proxy.pyi +10 -2
- modal/queue.py +45 -38
- modal/queue.pyi +88 -52
- modal/runner.py +96 -96
- modal/runner.pyi +44 -27
- modal/sandbox.py +225 -107
- modal/sandbox.pyi +226 -60
- modal/secret.py +58 -56
- modal/secret.pyi +28 -13
- modal/serving.py +7 -11
- modal/serving.pyi +7 -8
- modal/snapshot.py +29 -15
- modal/snapshot.pyi +18 -10
- modal/token_flow.py +1 -1
- modal/token_flow.pyi +4 -6
- modal/volume.py +102 -55
- modal/volume.pyi +125 -66
- {modal-1.1.5.dev66.dist-info → modal-1.3.1.dev8.dist-info}/METADATA +10 -9
- modal-1.3.1.dev8.dist-info/RECORD +189 -0
- modal_proto/api.proto +141 -70
- modal_proto/api_grpc.py +42 -26
- modal_proto/api_pb2.py +1123 -1103
- modal_proto/api_pb2.pyi +331 -83
- modal_proto/api_pb2_grpc.py +80 -48
- modal_proto/api_pb2_grpc.pyi +26 -18
- modal_proto/modal_api_grpc.py +175 -174
- modal_proto/task_command_router.proto +164 -0
- modal_proto/task_command_router_grpc.py +138 -0
- modal_proto/task_command_router_pb2.py +180 -0
- modal_proto/{sandbox_router_pb2.pyi → task_command_router_pb2.pyi} +148 -57
- modal_proto/task_command_router_pb2_grpc.py +272 -0
- modal_proto/task_command_router_pb2_grpc.pyi +100 -0
- modal_version/__init__.py +1 -1
- modal_version/__main__.py +1 -1
- modal/cli/programs/launch_instance_ssh.py +0 -94
- modal/cli/programs/run_marimo.py +0 -95
- modal-1.1.5.dev66.dist-info/RECORD +0 -191
- modal_proto/modal_options_grpc.py +0 -3
- modal_proto/options.proto +0 -19
- modal_proto/options_grpc.py +0 -3
- modal_proto/options_pb2.py +0 -35
- modal_proto/options_pb2.pyi +0 -20
- modal_proto/options_pb2_grpc.py +0 -4
- modal_proto/options_pb2_grpc.pyi +0 -7
- modal_proto/sandbox_router.proto +0 -125
- modal_proto/sandbox_router_grpc.py +0 -89
- modal_proto/sandbox_router_pb2.py +0 -128
- modal_proto/sandbox_router_pb2_grpc.py +0 -169
- modal_proto/sandbox_router_pb2_grpc.pyi +0 -63
- {modal-1.1.5.dev66.dist-info → modal-1.3.1.dev8.dist-info}/WHEEL +0 -0
- {modal-1.1.5.dev66.dist-info → modal-1.3.1.dev8.dist-info}/entry_points.txt +0 -0
- {modal-1.1.5.dev66.dist-info → modal-1.3.1.dev8.dist-info}/licenses/LICENSE +0 -0
- {modal-1.1.5.dev66.dist-info → modal-1.3.1.dev8.dist-info}/top_level.txt +0 -0
modal/sandbox.py
CHANGED
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
# Copyright Modal Labs 2022
|
|
2
2
|
import asyncio
|
|
3
|
+
import builtins
|
|
3
4
|
import json
|
|
4
5
|
import os
|
|
5
6
|
import time
|
|
7
|
+
import uuid
|
|
6
8
|
from collections.abc import AsyncGenerator, Collection, Sequence
|
|
7
9
|
from dataclasses import dataclass
|
|
10
|
+
from pathlib import PurePosixPath
|
|
8
11
|
from typing import TYPE_CHECKING, Any, AsyncIterator, Literal, Optional, Union, overload
|
|
9
12
|
|
|
10
13
|
from ._pty import get_pty_info
|
|
@@ -14,32 +17,31 @@ if TYPE_CHECKING:
|
|
|
14
17
|
import _typeshed
|
|
15
18
|
|
|
16
19
|
from google.protobuf.message import Message
|
|
17
|
-
from grpclib import GRPCError, Status
|
|
18
20
|
|
|
19
21
|
from modal._tunnel import Tunnel
|
|
20
22
|
from modal.cloud_bucket_mount import _CloudBucketMount, cloud_bucket_mounts_to_proto
|
|
21
23
|
from modal.mount import _Mount
|
|
22
24
|
from modal.volume import _Volume
|
|
23
|
-
from modal_proto import api_pb2
|
|
25
|
+
from modal_proto import api_pb2, task_command_router_pb2 as sr_pb2
|
|
24
26
|
|
|
27
|
+
from ._load_context import LoadContext
|
|
25
28
|
from ._object import _get_environment_name, _Object
|
|
26
29
|
from ._resolver import Resolver
|
|
27
30
|
from ._resources import convert_fn_config_to_resources_config
|
|
28
31
|
from ._utils.async_utils import TaskContext, synchronize_api
|
|
29
32
|
from ._utils.deprecation import deprecation_warning
|
|
30
|
-
from ._utils.grpc_utils import retry_transient_errors
|
|
31
33
|
from ._utils.mount_utils import validate_network_file_systems, validate_volumes
|
|
32
|
-
from ._utils.name_utils import
|
|
34
|
+
from ._utils.name_utils import check_object_name
|
|
35
|
+
from ._utils.task_command_router_client import TaskCommandRouterClient
|
|
33
36
|
from .client import _Client
|
|
34
37
|
from .container_process import _ContainerProcess
|
|
35
|
-
from .exception import
|
|
38
|
+
from .exception import ExecutionError, InvalidError, SandboxTerminatedError, SandboxTimeoutError
|
|
36
39
|
from .file_io import FileWatchEvent, FileWatchEventType, _FileIO
|
|
37
40
|
from .gpu import GPU_T
|
|
38
41
|
from .image import _Image
|
|
39
42
|
from .io_streams import StreamReader, StreamWriter, _StreamReader, _StreamWriter
|
|
40
43
|
from .network_file_system import _NetworkFileSystem, network_file_system_mount_protos
|
|
41
44
|
from .proxy import _Proxy
|
|
42
|
-
from .scheduler_placement import SchedulerPlacement
|
|
43
45
|
from .secret import _Secret
|
|
44
46
|
from .snapshot import _SandboxSnapshot
|
|
45
47
|
from .stream_type import StreamType
|
|
@@ -78,16 +80,6 @@ def _validate_exec_args(args: Sequence[str]) -> None:
|
|
|
78
80
|
)
|
|
79
81
|
|
|
80
82
|
|
|
81
|
-
def _warn_if_invalid_name(name: str) -> None:
|
|
82
|
-
if not is_valid_object_name(name):
|
|
83
|
-
deprecation_warning(
|
|
84
|
-
(2025, 9, 3),
|
|
85
|
-
f"Sandbox name '{name}' will be considered invalid in a future release."
|
|
86
|
-
"\n\nNames may contain only alphanumeric characters, dashes, periods, and underscores,"
|
|
87
|
-
" must be shorter than 64 characters, and cannot conflict with App ID strings.",
|
|
88
|
-
)
|
|
89
|
-
|
|
90
|
-
|
|
91
83
|
class DefaultSandboxNameOverride(str):
|
|
92
84
|
"""A singleton class that represents the default sandbox name override.
|
|
93
85
|
|
|
@@ -121,9 +113,10 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
121
113
|
_stdout: _StreamReader[str]
|
|
122
114
|
_stderr: _StreamReader[str]
|
|
123
115
|
_stdin: _StreamWriter
|
|
124
|
-
_task_id: Optional[str]
|
|
125
|
-
_tunnels: Optional[dict[int, Tunnel]]
|
|
126
|
-
_enable_snapshot: bool
|
|
116
|
+
_task_id: Optional[str]
|
|
117
|
+
_tunnels: Optional[dict[int, Tunnel]]
|
|
118
|
+
_enable_snapshot: bool
|
|
119
|
+
_command_router_client: Optional[TaskCommandRouterClient]
|
|
127
120
|
|
|
128
121
|
@staticmethod
|
|
129
122
|
def _default_pty_info() -> api_pb2.PTYInfo:
|
|
@@ -155,7 +148,6 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
155
148
|
unencrypted_ports: Sequence[int] = [],
|
|
156
149
|
proxy: Optional[_Proxy] = None,
|
|
157
150
|
experimental_options: Optional[dict[str, bool]] = None,
|
|
158
|
-
_experimental_scheduler_placement: Optional[SchedulerPlacement] = None,
|
|
159
151
|
enable_snapshot: bool = False,
|
|
160
152
|
verbose: bool = False,
|
|
161
153
|
) -> "_Sandbox":
|
|
@@ -163,12 +155,6 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
163
155
|
|
|
164
156
|
validated_network_file_systems = validate_network_file_systems(network_file_systems)
|
|
165
157
|
|
|
166
|
-
scheduler_placement: Optional[SchedulerPlacement] = _experimental_scheduler_placement
|
|
167
|
-
if region:
|
|
168
|
-
if scheduler_placement:
|
|
169
|
-
raise InvalidError("`region` and `_experimental_scheduler_placement` cannot be used together")
|
|
170
|
-
scheduler_placement = SchedulerPlacement(region=region)
|
|
171
|
-
|
|
172
158
|
if isinstance(gpu, list):
|
|
173
159
|
raise InvalidError(
|
|
174
160
|
"Sandboxes do not support configuring a list of GPUs. "
|
|
@@ -183,6 +169,11 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
183
169
|
cloud_bucket_mounts = [(k, v) for k, v in validated_volumes if isinstance(v, _CloudBucketMount)]
|
|
184
170
|
validated_volumes = [(k, v) for k, v in validated_volumes if isinstance(v, _Volume)]
|
|
185
171
|
|
|
172
|
+
scheduler_placement: Optional[api_pb2.SchedulerPlacement] = None
|
|
173
|
+
if region:
|
|
174
|
+
regions = [region] if isinstance(region, str) else (list(region) if region else None)
|
|
175
|
+
scheduler_placement = api_pb2.SchedulerPlacement(regions=regions)
|
|
176
|
+
|
|
186
177
|
if pty:
|
|
187
178
|
pty_info = _Sandbox._default_pty_info()
|
|
188
179
|
|
|
@@ -199,7 +190,9 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
199
190
|
deps.append(proxy)
|
|
200
191
|
return deps
|
|
201
192
|
|
|
202
|
-
async def _load(
|
|
193
|
+
async def _load(
|
|
194
|
+
self: _Sandbox, resolver: Resolver, load_context: LoadContext, _existing_object_id: Optional[str]
|
|
195
|
+
):
|
|
203
196
|
# Relies on dicts being ordered (true as of Python 3.6).
|
|
204
197
|
volume_mounts = [
|
|
205
198
|
api_pb2.VolumeMount(
|
|
@@ -257,7 +250,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
257
250
|
cloud_bucket_mounts=cloud_bucket_mounts_to_proto(cloud_bucket_mounts),
|
|
258
251
|
volume_mounts=volume_mounts,
|
|
259
252
|
pty_info=pty_info,
|
|
260
|
-
scheduler_placement=scheduler_placement
|
|
253
|
+
scheduler_placement=scheduler_placement,
|
|
261
254
|
worker_id=config.get("worker_id"),
|
|
262
255
|
open_ports=api_pb2.PortSpecs(ports=open_ports),
|
|
263
256
|
network_access=network_access,
|
|
@@ -268,18 +261,12 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
268
261
|
experimental_options=experimental_options,
|
|
269
262
|
)
|
|
270
263
|
|
|
271
|
-
create_req = api_pb2.SandboxCreateRequest(app_id=
|
|
272
|
-
|
|
273
|
-
create_resp = await retry_transient_errors(resolver.client.stub.SandboxCreate, create_req)
|
|
274
|
-
except GRPCError as exc:
|
|
275
|
-
if exc.status == Status.ALREADY_EXISTS:
|
|
276
|
-
raise AlreadyExistsError(exc.message)
|
|
277
|
-
raise exc
|
|
278
|
-
|
|
264
|
+
create_req = api_pb2.SandboxCreateRequest(app_id=load_context.app_id, definition=definition)
|
|
265
|
+
create_resp = await load_context.client.stub.SandboxCreate(create_req)
|
|
279
266
|
sandbox_id = create_resp.sandbox_id
|
|
280
|
-
self._hydrate(sandbox_id,
|
|
267
|
+
self._hydrate(sandbox_id, load_context.client, None)
|
|
281
268
|
|
|
282
|
-
return _Sandbox._from_loader(_load, "Sandbox()", deps=_deps)
|
|
269
|
+
return _Sandbox._from_loader(_load, "Sandbox()", deps=_deps, load_context_overrides=LoadContext.empty())
|
|
283
270
|
|
|
284
271
|
@staticmethod
|
|
285
272
|
async def create(
|
|
@@ -325,9 +312,6 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
325
312
|
experimental_options: Optional[dict[str, bool]] = None,
|
|
326
313
|
# Enable memory snapshots.
|
|
327
314
|
_experimental_enable_snapshot: bool = False,
|
|
328
|
-
_experimental_scheduler_placement: Optional[
|
|
329
|
-
SchedulerPlacement
|
|
330
|
-
] = None, # Experimental controls over fine-grained scheduling (alpha).
|
|
331
315
|
client: Optional[_Client] = None,
|
|
332
316
|
environment_name: Optional[str] = None, # *DEPRECATED* Optionally override the default environment
|
|
333
317
|
pty_info: Optional[api_pb2.PTYInfo] = None, # *DEPRECATED* Use `pty` instead. `pty` will override `pty_info`.
|
|
@@ -389,7 +373,6 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
389
373
|
proxy=proxy,
|
|
390
374
|
experimental_options=experimental_options,
|
|
391
375
|
_experimental_enable_snapshot=_experimental_enable_snapshot,
|
|
392
|
-
_experimental_scheduler_placement=_experimental_scheduler_placement,
|
|
393
376
|
client=client,
|
|
394
377
|
verbose=verbose,
|
|
395
378
|
pty_info=pty_info,
|
|
@@ -423,7 +406,6 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
423
406
|
proxy: Optional[_Proxy] = None,
|
|
424
407
|
experimental_options: Optional[dict[str, bool]] = None,
|
|
425
408
|
_experimental_enable_snapshot: bool = False,
|
|
426
|
-
_experimental_scheduler_placement: Optional[SchedulerPlacement] = None,
|
|
427
409
|
client: Optional[_Client] = None,
|
|
428
410
|
verbose: bool = False,
|
|
429
411
|
pty_info: Optional[api_pb2.PTYInfo] = None,
|
|
@@ -438,7 +420,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
438
420
|
|
|
439
421
|
_validate_exec_args(args)
|
|
440
422
|
if name is not None:
|
|
441
|
-
|
|
423
|
+
check_object_name(name, "Sandbox")
|
|
442
424
|
|
|
443
425
|
if block_network and (encrypted_ports or h2_ports or unencrypted_ports):
|
|
444
426
|
raise InvalidError("Cannot specify open ports when `block_network` is enabled")
|
|
@@ -473,7 +455,6 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
473
455
|
unencrypted_ports=unencrypted_ports,
|
|
474
456
|
proxy=proxy,
|
|
475
457
|
experimental_options=experimental_options,
|
|
476
|
-
_experimental_scheduler_placement=_experimental_scheduler_placement,
|
|
477
458
|
enable_snapshot=_experimental_enable_snapshot,
|
|
478
459
|
verbose=verbose,
|
|
479
460
|
)
|
|
@@ -494,6 +475,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
494
475
|
app_id = app.app_id
|
|
495
476
|
app_client = app._client
|
|
496
477
|
elif (container_app := _App._get_container_app()) is not None:
|
|
478
|
+
# implicit app/client provided by running in a modal Function
|
|
497
479
|
app_id = container_app.app_id
|
|
498
480
|
app_client = container_app._client
|
|
499
481
|
else:
|
|
@@ -506,21 +488,26 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
506
488
|
"```",
|
|
507
489
|
)
|
|
508
490
|
|
|
509
|
-
client = client or app_client
|
|
491
|
+
client = client or app_client
|
|
510
492
|
|
|
511
|
-
resolver = Resolver(
|
|
512
|
-
|
|
493
|
+
resolver = Resolver()
|
|
494
|
+
load_context = LoadContext(client=client, app_id=app_id)
|
|
495
|
+
await resolver.load(obj, load_context)
|
|
513
496
|
return obj
|
|
514
497
|
|
|
515
498
|
def _hydrate_metadata(self, handle_metadata: Optional[Message]):
|
|
516
|
-
self._stdout
|
|
499
|
+
self._stdout = StreamReader(
|
|
517
500
|
api_pb2.FILE_DESCRIPTOR_STDOUT, self.object_id, "sandbox", self._client, by_line=True
|
|
518
501
|
)
|
|
519
|
-
self._stderr
|
|
502
|
+
self._stderr = StreamReader(
|
|
520
503
|
api_pb2.FILE_DESCRIPTOR_STDERR, self.object_id, "sandbox", self._client, by_line=True
|
|
521
504
|
)
|
|
522
505
|
self._stdin = StreamWriter(self.object_id, "sandbox", self._client)
|
|
523
506
|
self._result = None
|
|
507
|
+
self._task_id = None
|
|
508
|
+
self._tunnels = None
|
|
509
|
+
self._enable_snapshot = False
|
|
510
|
+
self._command_router_client = None
|
|
524
511
|
|
|
525
512
|
@staticmethod
|
|
526
513
|
async def from_name(
|
|
@@ -540,7 +527,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
540
527
|
env_name = _get_environment_name(environment_name)
|
|
541
528
|
|
|
542
529
|
req = api_pb2.SandboxGetFromNameRequest(sandbox_name=name, app_name=app_name, environment_name=env_name)
|
|
543
|
-
resp = await
|
|
530
|
+
resp = await client.stub.SandboxGetFromName(req)
|
|
544
531
|
return _Sandbox._new_hydrated(resp.sandbox_id, client, None)
|
|
545
532
|
|
|
546
533
|
@staticmethod
|
|
@@ -553,7 +540,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
553
540
|
client = await _Client.from_env()
|
|
554
541
|
|
|
555
542
|
req = api_pb2.SandboxWaitRequest(sandbox_id=sandbox_id, timeout=0)
|
|
556
|
-
resp = await
|
|
543
|
+
resp = await client.stub.SandboxWait(req)
|
|
557
544
|
|
|
558
545
|
obj = _Sandbox._new_hydrated(sandbox_id, client, None)
|
|
559
546
|
|
|
@@ -565,10 +552,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
565
552
|
async def get_tags(self) -> dict[str, str]:
|
|
566
553
|
"""Fetches any tags (key-value pairs) currently attached to this Sandbox from the server."""
|
|
567
554
|
req = api_pb2.SandboxTagsGetRequest(sandbox_id=self.object_id)
|
|
568
|
-
|
|
569
|
-
resp = await retry_transient_errors(self._client.stub.SandboxTagsGet, req)
|
|
570
|
-
except GRPCError as exc:
|
|
571
|
-
raise InvalidError(exc.message) if exc.status == Status.INVALID_ARGUMENT else exc
|
|
555
|
+
resp = await self._client.stub.SandboxTagsGet(req)
|
|
572
556
|
|
|
573
557
|
return {tag.tag_name: tag.tag_value for tag in resp.tags}
|
|
574
558
|
|
|
@@ -589,10 +573,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
589
573
|
sandbox_id=self.object_id,
|
|
590
574
|
tags=tags_list,
|
|
591
575
|
)
|
|
592
|
-
|
|
593
|
-
await retry_transient_errors(self._client.stub.SandboxTagsSet, req)
|
|
594
|
-
except GRPCError as exc:
|
|
595
|
-
raise InvalidError(exc.message) if exc.status == Status.INVALID_ARGUMENT else exc
|
|
576
|
+
await self._client.stub.SandboxTagsSet(req)
|
|
596
577
|
|
|
597
578
|
async def snapshot_filesystem(self, timeout: int = 55) -> _Image:
|
|
598
579
|
"""Snapshot the filesystem of the Sandbox.
|
|
@@ -602,7 +583,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
602
583
|
"""
|
|
603
584
|
await self._get_task_id() # Ensure the sandbox has started
|
|
604
585
|
req = api_pb2.SandboxSnapshotFsRequest(sandbox_id=self.object_id, timeout=timeout)
|
|
605
|
-
resp = await
|
|
586
|
+
resp = await self._client.stub.SandboxSnapshotFs(req)
|
|
606
587
|
|
|
607
588
|
if resp.result.status != api_pb2.GenericResult.GENERIC_STATUS_SUCCESS:
|
|
608
589
|
raise ExecutionError(resp.result.exception)
|
|
@@ -610,16 +591,60 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
610
591
|
image_id = resp.image_id
|
|
611
592
|
metadata = resp.image_metadata
|
|
612
593
|
|
|
613
|
-
async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
|
|
594
|
+
async def _load(self: _Image, resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]):
|
|
614
595
|
# no need to hydrate again since we do it eagerly below
|
|
615
596
|
pass
|
|
616
597
|
|
|
617
598
|
rep = "Image()"
|
|
618
|
-
|
|
599
|
+
# TODO: use ._new_hydrated instead
|
|
600
|
+
image = _Image._from_loader(_load, rep, hydrate_lazily=True, load_context_overrides=LoadContext.empty())
|
|
619
601
|
image._hydrate(image_id, self._client, metadata) # hydrating eagerly since we have all of the data
|
|
620
602
|
|
|
621
603
|
return image
|
|
622
604
|
|
|
605
|
+
async def _experimental_mount_image(self, path: Union[PurePosixPath, str], image: Optional[_Image]):
|
|
606
|
+
"""Mount an Image at a path in the Sandbox filesystem."""
|
|
607
|
+
|
|
608
|
+
image_id = None
|
|
609
|
+
|
|
610
|
+
if image:
|
|
611
|
+
if not image._object_id:
|
|
612
|
+
# FIXME
|
|
613
|
+
raise InvalidError("Image has not been built.")
|
|
614
|
+
image_id = image._object_id
|
|
615
|
+
else:
|
|
616
|
+
image_id = "" # empty string indicates mount an empty dir
|
|
617
|
+
|
|
618
|
+
task_id = await self._get_task_id()
|
|
619
|
+
if (command_router_client := await self._get_command_router_client(task_id)) is None:
|
|
620
|
+
raise InvalidError("Mounting directories requires direct Sandbox control - please contact Modal support.")
|
|
621
|
+
|
|
622
|
+
posix_path = PurePosixPath(path)
|
|
623
|
+
if not posix_path.is_absolute():
|
|
624
|
+
raise InvalidError(f"Mount path must be absolute; got: {posix_path}")
|
|
625
|
+
path_bytes = posix_path.as_posix().encode("utf8")
|
|
626
|
+
|
|
627
|
+
req = sr_pb2.TaskMountDirectoryRequest(task_id=task_id, path=path_bytes, image_id=image_id)
|
|
628
|
+
await command_router_client.mount_image(req)
|
|
629
|
+
|
|
630
|
+
async def _experimental_snapshot_directory(self, path: Union[PurePosixPath, str]) -> _Image:
|
|
631
|
+
"""Snapshot local changes to a previously mounted Image, creating a new Image."""
|
|
632
|
+
|
|
633
|
+
task_id = await self._get_task_id()
|
|
634
|
+
if (command_router_client := await self._get_command_router_client(task_id)) is None:
|
|
635
|
+
raise InvalidError(
|
|
636
|
+
"Snapshotting directories requires direct Sandbox control - please contact Modal support."
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
posix_path = PurePosixPath(path)
|
|
640
|
+
if not posix_path.is_absolute():
|
|
641
|
+
raise InvalidError(f"Snapshot path must be absolute; got: {posix_path}")
|
|
642
|
+
path_bytes = posix_path.as_posix().encode("utf8")
|
|
643
|
+
|
|
644
|
+
req = sr_pb2.TaskSnapshotDirectoryRequest(task_id=task_id, path=path_bytes)
|
|
645
|
+
res = await command_router_client.snapshot_directory(req)
|
|
646
|
+
return _Image._new_hydrated(res.image_id, self._client, None)
|
|
647
|
+
|
|
623
648
|
# Live handle methods
|
|
624
649
|
|
|
625
650
|
async def wait(self, raise_on_termination: bool = True):
|
|
@@ -627,7 +652,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
627
652
|
|
|
628
653
|
while True:
|
|
629
654
|
req = api_pb2.SandboxWaitRequest(sandbox_id=self.object_id, timeout=10)
|
|
630
|
-
resp = await
|
|
655
|
+
resp = await self._client.stub.SandboxWait(req)
|
|
631
656
|
if resp.result.status:
|
|
632
657
|
logger.debug(f"Sandbox {self.object_id} wait completed with status {resp.result.status}")
|
|
633
658
|
self._result = resp.result
|
|
@@ -653,7 +678,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
653
678
|
return self._tunnels
|
|
654
679
|
|
|
655
680
|
req = api_pb2.SandboxGetTunnelsRequest(sandbox_id=self.object_id, timeout=timeout)
|
|
656
|
-
resp = await
|
|
681
|
+
resp = await self._client.stub.SandboxGetTunnels(req)
|
|
657
682
|
|
|
658
683
|
# If we couldn't get the tunnels in time, report the timeout.
|
|
659
684
|
if resp.result.status == api_pb2.GenericResult.GENERIC_STATUS_TIMEOUT:
|
|
@@ -669,10 +694,11 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
669
694
|
async def create_connect_token(
|
|
670
695
|
self, user_metadata: Optional[Union[str, dict[str, Any]]] = None
|
|
671
696
|
) -> SandboxConnectCredentials:
|
|
672
|
-
"""
|
|
697
|
+
"""
|
|
698
|
+
[Alpha] Create a token for making HTTP connections to the Sandbox.
|
|
673
699
|
|
|
674
700
|
Also accepts an optional user_metadata string or dict to associate with the token. This metadata
|
|
675
|
-
will be added to the headers by the proxy when forwarding requests to the
|
|
701
|
+
will be added to the headers by the proxy when forwarding requests to the Sandbox."""
|
|
676
702
|
if user_metadata is not None and isinstance(user_metadata, dict):
|
|
677
703
|
try:
|
|
678
704
|
user_metadata = json.dumps(user_metadata)
|
|
@@ -680,7 +706,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
680
706
|
raise InvalidError(f"Failed to serialize user_metadata: {e}")
|
|
681
707
|
|
|
682
708
|
req = api_pb2.SandboxCreateConnectTokenRequest(sandbox_id=self.object_id, user_metadata=user_metadata)
|
|
683
|
-
resp = await
|
|
709
|
+
resp = await self._client.stub.SandboxCreateConnectToken(req)
|
|
684
710
|
return SandboxConnectCredentials(resp.url, resp.token)
|
|
685
711
|
|
|
686
712
|
async def reload_volumes(self) -> None:
|
|
@@ -689,8 +715,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
689
715
|
Added in v1.1.0.
|
|
690
716
|
"""
|
|
691
717
|
task_id = await self._get_task_id()
|
|
692
|
-
await
|
|
693
|
-
self._client.stub.ContainerReloadVolumes,
|
|
718
|
+
await self._client.stub.ContainerReloadVolumes(
|
|
694
719
|
api_pb2.ContainerReloadVolumesRequest(
|
|
695
720
|
task_id=task_id,
|
|
696
721
|
),
|
|
@@ -701,9 +726,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
701
726
|
|
|
702
727
|
This is a no-op if the Sandbox has already finished running."""
|
|
703
728
|
|
|
704
|
-
await
|
|
705
|
-
self._client.stub.SandboxTerminate, api_pb2.SandboxTerminateRequest(sandbox_id=self.object_id)
|
|
706
|
-
)
|
|
729
|
+
await self._client.stub.SandboxTerminate(api_pb2.SandboxTerminateRequest(sandbox_id=self.object_id))
|
|
707
730
|
|
|
708
731
|
async def poll(self) -> Optional[int]:
|
|
709
732
|
"""Check if the Sandbox has finished running.
|
|
@@ -712,7 +735,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
712
735
|
"""
|
|
713
736
|
|
|
714
737
|
req = api_pb2.SandboxWaitRequest(sandbox_id=self.object_id, timeout=0)
|
|
715
|
-
resp = await
|
|
738
|
+
resp = await self._client.stub.SandboxWait(req)
|
|
716
739
|
|
|
717
740
|
if resp.result.status:
|
|
718
741
|
self._result = resp.result
|
|
@@ -721,14 +744,19 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
721
744
|
|
|
722
745
|
async def _get_task_id(self) -> str:
|
|
723
746
|
while not self._task_id:
|
|
724
|
-
resp = await
|
|
725
|
-
self._client.stub.SandboxGetTaskId, api_pb2.SandboxGetTaskIdRequest(sandbox_id=self.object_id)
|
|
726
|
-
)
|
|
747
|
+
resp = await self._client.stub.SandboxGetTaskId(api_pb2.SandboxGetTaskIdRequest(sandbox_id=self.object_id))
|
|
727
748
|
self._task_id = resp.task_id
|
|
728
749
|
if not self._task_id:
|
|
729
750
|
await asyncio.sleep(0.5)
|
|
730
751
|
return self._task_id
|
|
731
752
|
|
|
753
|
+
async def _get_command_router_client(self, task_id: str) -> Optional[TaskCommandRouterClient]:
|
|
754
|
+
if self._command_router_client is None:
|
|
755
|
+
# Attempt to initialize a router client. Returns None if the new exec path not enabled
|
|
756
|
+
# for this sandbox.
|
|
757
|
+
self._command_router_client = await TaskCommandRouterClient.try_init(self._client, task_id)
|
|
758
|
+
return self._command_router_client
|
|
759
|
+
|
|
732
760
|
@overload
|
|
733
761
|
async def exec(
|
|
734
762
|
self,
|
|
@@ -790,13 +818,8 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
790
818
|
|
|
791
819
|
**Usage**
|
|
792
820
|
|
|
793
|
-
```python
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
sandbox = modal.Sandbox.create("sleep", "infinity", app=app)
|
|
797
|
-
|
|
798
|
-
process = sandbox.exec("bash", "-c", "for i in $(seq 1 10); do echo foo $i; sleep 0.5; done")
|
|
799
|
-
|
|
821
|
+
```python fixture:sandbox
|
|
822
|
+
process = sandbox.exec("bash", "-c", "for i in $(seq 1 3); do echo foo $i; sleep 0.1; done")
|
|
800
823
|
for line in process.stdout:
|
|
801
824
|
print(line)
|
|
802
825
|
```
|
|
@@ -854,21 +877,57 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
854
877
|
await TaskContext.gather(*secret_coros)
|
|
855
878
|
|
|
856
879
|
task_id = await self._get_task_id()
|
|
880
|
+
kwargs = {
|
|
881
|
+
"task_id": task_id,
|
|
882
|
+
"pty_info": pty_info,
|
|
883
|
+
"stdout": stdout,
|
|
884
|
+
"stderr": stderr,
|
|
885
|
+
"timeout": timeout,
|
|
886
|
+
"workdir": workdir,
|
|
887
|
+
"secret_ids": [secret.object_id for secret in secrets],
|
|
888
|
+
"text": text,
|
|
889
|
+
"bufsize": bufsize,
|
|
890
|
+
"runtime_debug": config.get("function_runtime_debug"),
|
|
891
|
+
}
|
|
892
|
+
# NB: This must come after the task ID is set, since the sandbox must be
|
|
893
|
+
# scheduled before we can create a router client.
|
|
894
|
+
if (command_router_client := await self._get_command_router_client(task_id)) is not None:
|
|
895
|
+
kwargs["command_router_client"] = command_router_client
|
|
896
|
+
return await self._exec_through_command_router(*args, **kwargs)
|
|
897
|
+
else:
|
|
898
|
+
return await self._exec_through_server(*args, **kwargs)
|
|
899
|
+
|
|
900
|
+
async def _exec_through_server(
|
|
901
|
+
self,
|
|
902
|
+
*args: str,
|
|
903
|
+
task_id: str,
|
|
904
|
+
pty_info: Optional[api_pb2.PTYInfo] = None,
|
|
905
|
+
stdout: StreamType = StreamType.PIPE,
|
|
906
|
+
stderr: StreamType = StreamType.PIPE,
|
|
907
|
+
timeout: Optional[int] = None,
|
|
908
|
+
workdir: Optional[str] = None,
|
|
909
|
+
secret_ids: Optional[Collection[str]] = None,
|
|
910
|
+
text: bool = True,
|
|
911
|
+
bufsize: Literal[-1, 1] = -1,
|
|
912
|
+
runtime_debug: bool = False,
|
|
913
|
+
) -> Union[_ContainerProcess[bytes], _ContainerProcess[str]]:
|
|
914
|
+
"""Execute a command through the Modal server."""
|
|
857
915
|
req = api_pb2.ContainerExecRequest(
|
|
858
916
|
task_id=task_id,
|
|
859
917
|
command=args,
|
|
860
918
|
pty_info=pty_info,
|
|
861
|
-
runtime_debug=
|
|
919
|
+
runtime_debug=runtime_debug,
|
|
862
920
|
timeout_secs=timeout or 0,
|
|
863
921
|
workdir=workdir,
|
|
864
|
-
secret_ids=
|
|
922
|
+
secret_ids=secret_ids,
|
|
865
923
|
)
|
|
866
|
-
resp = await
|
|
924
|
+
resp = await self._client.stub.ContainerExec(req)
|
|
867
925
|
by_line = bufsize == 1
|
|
868
926
|
exec_deadline = time.monotonic() + int(timeout) + CONTAINER_EXEC_TIMEOUT_BUFFER if timeout else None
|
|
869
927
|
logger.debug(f"Created ContainerProcess for exec_id {resp.exec_id} on Sandbox {self.object_id}")
|
|
870
928
|
return _ContainerProcess(
|
|
871
929
|
resp.exec_id,
|
|
930
|
+
task_id,
|
|
872
931
|
self._client,
|
|
873
932
|
stdout=stdout,
|
|
874
933
|
stderr=stderr,
|
|
@@ -877,26 +936,95 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
877
936
|
by_line=by_line,
|
|
878
937
|
)
|
|
879
938
|
|
|
939
|
+
async def _exec_through_command_router(
|
|
940
|
+
self,
|
|
941
|
+
*args: str,
|
|
942
|
+
task_id: str,
|
|
943
|
+
command_router_client: TaskCommandRouterClient,
|
|
944
|
+
pty_info: Optional[api_pb2.PTYInfo] = None,
|
|
945
|
+
stdout: StreamType = StreamType.PIPE,
|
|
946
|
+
stderr: StreamType = StreamType.PIPE,
|
|
947
|
+
timeout: Optional[int] = None,
|
|
948
|
+
workdir: Optional[str] = None,
|
|
949
|
+
secret_ids: Optional[Collection[str]] = None,
|
|
950
|
+
text: bool = True,
|
|
951
|
+
bufsize: Literal[-1, 1] = -1,
|
|
952
|
+
runtime_debug: bool = False,
|
|
953
|
+
) -> Union[_ContainerProcess[bytes], _ContainerProcess[str]]:
|
|
954
|
+
"""Execute a command through a task command router running on the Modal worker."""
|
|
955
|
+
|
|
956
|
+
# Generate a random process ID to use as a combination of idempotency key/process identifier.
|
|
957
|
+
process_id = str(uuid.uuid4())
|
|
958
|
+
if stdout == StreamType.PIPE:
|
|
959
|
+
stdout_config = sr_pb2.TaskExecStdoutConfig.TASK_EXEC_STDOUT_CONFIG_PIPE
|
|
960
|
+
elif stdout == StreamType.DEVNULL:
|
|
961
|
+
stdout_config = sr_pb2.TaskExecStdoutConfig.TASK_EXEC_STDOUT_CONFIG_DEVNULL
|
|
962
|
+
elif stdout == StreamType.STDOUT:
|
|
963
|
+
# Stream stdout to the client so that it can be printed locally in the reader.
|
|
964
|
+
stdout_config = sr_pb2.TaskExecStdoutConfig.TASK_EXEC_STDOUT_CONFIG_PIPE
|
|
965
|
+
else:
|
|
966
|
+
raise ValueError("Unsupported StreamType for stdout")
|
|
967
|
+
|
|
968
|
+
if stderr == StreamType.PIPE:
|
|
969
|
+
stderr_config = sr_pb2.TaskExecStderrConfig.TASK_EXEC_STDERR_CONFIG_PIPE
|
|
970
|
+
elif stderr == StreamType.DEVNULL:
|
|
971
|
+
stderr_config = sr_pb2.TaskExecStderrConfig.TASK_EXEC_STDERR_CONFIG_DEVNULL
|
|
972
|
+
elif stderr == StreamType.STDOUT:
|
|
973
|
+
# Stream stderr to the client so that it can be printed locally in the reader.
|
|
974
|
+
stderr_config = sr_pb2.TaskExecStderrConfig.TASK_EXEC_STDERR_CONFIG_PIPE
|
|
975
|
+
else:
|
|
976
|
+
raise ValueError("Unsupported StreamType for stderr")
|
|
977
|
+
|
|
978
|
+
# Start the process.
|
|
979
|
+
start_req = sr_pb2.TaskExecStartRequest(
|
|
980
|
+
task_id=task_id,
|
|
981
|
+
exec_id=process_id,
|
|
982
|
+
command_args=args,
|
|
983
|
+
stdout_config=stdout_config,
|
|
984
|
+
stderr_config=stderr_config,
|
|
985
|
+
timeout_secs=timeout,
|
|
986
|
+
workdir=workdir,
|
|
987
|
+
secret_ids=secret_ids,
|
|
988
|
+
pty_info=pty_info,
|
|
989
|
+
runtime_debug=runtime_debug,
|
|
990
|
+
)
|
|
991
|
+
_ = await command_router_client.exec_start(start_req)
|
|
992
|
+
|
|
993
|
+
return _ContainerProcess(
|
|
994
|
+
process_id,
|
|
995
|
+
task_id,
|
|
996
|
+
self._client,
|
|
997
|
+
command_router_client=command_router_client,
|
|
998
|
+
stdout=stdout,
|
|
999
|
+
stderr=stderr,
|
|
1000
|
+
text=text,
|
|
1001
|
+
by_line=bufsize == 1,
|
|
1002
|
+
exec_deadline=time.monotonic() + int(timeout) if timeout else None,
|
|
1003
|
+
)
|
|
1004
|
+
|
|
880
1005
|
async def _experimental_snapshot(self) -> _SandboxSnapshot:
|
|
881
1006
|
await self._get_task_id()
|
|
882
1007
|
snap_req = api_pb2.SandboxSnapshotRequest(sandbox_id=self.object_id)
|
|
883
|
-
snap_resp = await
|
|
1008
|
+
snap_resp = await self._client.stub.SandboxSnapshot(snap_req)
|
|
884
1009
|
|
|
885
1010
|
snapshot_id = snap_resp.snapshot_id
|
|
886
1011
|
|
|
887
1012
|
# wait for the snapshot to succeed. this is implemented as a second idempotent rpc
|
|
888
1013
|
# because the snapshot itself may take a while to complete.
|
|
889
1014
|
wait_req = api_pb2.SandboxSnapshotWaitRequest(snapshot_id=snapshot_id, timeout=55.0)
|
|
890
|
-
wait_resp = await
|
|
1015
|
+
wait_resp = await self._client.stub.SandboxSnapshotWait(wait_req)
|
|
891
1016
|
if wait_resp.result.status != api_pb2.GenericResult.GENERIC_STATUS_SUCCESS:
|
|
892
1017
|
raise ExecutionError(wait_resp.result.exception)
|
|
893
1018
|
|
|
894
|
-
async def _load(
|
|
1019
|
+
async def _load(
|
|
1020
|
+
self: _SandboxSnapshot, resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]
|
|
1021
|
+
):
|
|
895
1022
|
# we eagerly hydrate the sandbox snapshot below
|
|
896
1023
|
pass
|
|
897
1024
|
|
|
898
1025
|
rep = "SandboxSnapshot()"
|
|
899
|
-
|
|
1026
|
+
# TODO: use ._new_hydrated instead
|
|
1027
|
+
obj = _SandboxSnapshot._from_loader(_load, rep, hydrate_lazily=True, load_context_overrides=LoadContext.empty())
|
|
900
1028
|
obj._hydrate(snapshot_id, self._client, None)
|
|
901
1029
|
|
|
902
1030
|
return obj
|
|
@@ -911,7 +1039,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
911
1039
|
client = client or await _Client.from_env()
|
|
912
1040
|
|
|
913
1041
|
if name is not None and name != _DEFAULT_SANDBOX_NAME_OVERRIDE:
|
|
914
|
-
|
|
1042
|
+
check_object_name(name, "Sandbox")
|
|
915
1043
|
|
|
916
1044
|
if name is _DEFAULT_SANDBOX_NAME_OVERRIDE:
|
|
917
1045
|
restore_req = api_pb2.SandboxRestoreRequest(
|
|
@@ -929,21 +1057,14 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
929
1057
|
sandbox_name_override=name,
|
|
930
1058
|
sandbox_name_override_type=api_pb2.SandboxRestoreRequest.SANDBOX_NAME_OVERRIDE_TYPE_STRING,
|
|
931
1059
|
)
|
|
932
|
-
|
|
933
|
-
restore_resp: api_pb2.SandboxRestoreResponse = await retry_transient_errors(
|
|
934
|
-
client.stub.SandboxRestore, restore_req
|
|
935
|
-
)
|
|
936
|
-
except GRPCError as exc:
|
|
937
|
-
if exc.status == Status.ALREADY_EXISTS:
|
|
938
|
-
raise AlreadyExistsError(exc.message)
|
|
939
|
-
raise exc
|
|
1060
|
+
restore_resp: api_pb2.SandboxRestoreResponse = await client.stub.SandboxRestore(restore_req)
|
|
940
1061
|
|
|
941
1062
|
sandbox = await _Sandbox.from_id(restore_resp.sandbox_id, client)
|
|
942
1063
|
|
|
943
1064
|
task_id_req = api_pb2.SandboxGetTaskIdRequest(
|
|
944
1065
|
sandbox_id=restore_resp.sandbox_id, wait_until_ready=True, timeout=55.0
|
|
945
1066
|
)
|
|
946
|
-
resp = await
|
|
1067
|
+
resp = await client.stub.SandboxGetTaskId(task_id_req)
|
|
947
1068
|
if resp.task_result.status not in [
|
|
948
1069
|
api_pb2.GenericResult.GENERIC_STATUS_UNSPECIFIED,
|
|
949
1070
|
api_pb2.GenericResult.GENERIC_STATUS_SUCCESS,
|
|
@@ -986,7 +1107,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
986
1107
|
task_id = await self._get_task_id()
|
|
987
1108
|
return await _FileIO.create(path, mode, self._client, task_id)
|
|
988
1109
|
|
|
989
|
-
async def ls(self, path: str) -> list[str]:
|
|
1110
|
+
async def ls(self, path: str) -> builtins.list[str]:
|
|
990
1111
|
"""[Alpha] List the contents of a directory in the Sandbox."""
|
|
991
1112
|
task_id = await self._get_task_id()
|
|
992
1113
|
return await _FileIO.ls(path, self._client, task_id)
|
|
@@ -1004,7 +1125,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
1004
1125
|
async def watch(
|
|
1005
1126
|
self,
|
|
1006
1127
|
path: str,
|
|
1007
|
-
filter: Optional[list[FileWatchEventType]] = None,
|
|
1128
|
+
filter: Optional[builtins.list[FileWatchEventType]] = None,
|
|
1008
1129
|
recursive: Optional[bool] = None,
|
|
1009
1130
|
timeout: Optional[int] = None,
|
|
1010
1131
|
) -> AsyncIterator[FileWatchEvent]:
|
|
@@ -1077,10 +1198,7 @@ class _Sandbox(_Object, type_prefix="sb"):
|
|
|
1077
1198
|
)
|
|
1078
1199
|
|
|
1079
1200
|
# Fetches a batch of sandboxes.
|
|
1080
|
-
|
|
1081
|
-
resp = await retry_transient_errors(client.stub.SandboxList, req)
|
|
1082
|
-
except GRPCError as exc:
|
|
1083
|
-
raise InvalidError(exc.message) if exc.status == Status.INVALID_ARGUMENT else exc
|
|
1201
|
+
resp = await client.stub.SandboxList(req)
|
|
1084
1202
|
|
|
1085
1203
|
if not resp.sandboxes:
|
|
1086
1204
|
return
|