modal 1.1.5.dev83__py3-none-any.whl → 1.3.1.dev8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of modal might be problematic. Click here for more details.
- modal/__init__.py +4 -4
- modal/__main__.py +4 -29
- modal/_billing.py +84 -0
- modal/_clustered_functions.py +1 -3
- modal/_container_entrypoint.py +33 -208
- modal/_functions.py +146 -121
- modal/_grpc_client.py +191 -0
- modal/_ipython.py +16 -6
- modal/_load_context.py +106 -0
- modal/_object.py +72 -21
- modal/_output.py +12 -14
- modal/_partial_function.py +31 -4
- modal/_resolver.py +44 -57
- modal/_runtime/container_io_manager.py +26 -28
- modal/_runtime/container_io_manager.pyi +42 -44
- modal/_runtime/gpu_memory_snapshot.py +9 -7
- modal/_runtime/user_code_event_loop.py +80 -0
- modal/_runtime/user_code_imports.py +236 -10
- modal/_serialization.py +2 -1
- modal/_traceback.py +4 -13
- modal/_tunnel.py +16 -11
- modal/_tunnel.pyi +25 -3
- modal/_utils/async_utils.py +337 -10
- modal/_utils/auth_token_manager.py +1 -4
- modal/_utils/blob_utils.py +29 -22
- modal/_utils/function_utils.py +20 -21
- modal/_utils/grpc_testing.py +6 -3
- modal/_utils/grpc_utils.py +223 -64
- modal/_utils/mount_utils.py +26 -1
- modal/_utils/package_utils.py +0 -1
- modal/_utils/rand_pb_testing.py +8 -1
- modal/_utils/task_command_router_client.py +524 -0
- modal/_vendor/cloudpickle.py +144 -48
- modal/app.py +215 -96
- modal/app.pyi +78 -37
- modal/billing.py +5 -0
- modal/builder/2025.06.txt +6 -3
- modal/builder/PREVIEW.txt +2 -1
- modal/builder/base-images.json +4 -2
- modal/cli/_download.py +19 -3
- modal/cli/cluster.py +4 -2
- modal/cli/config.py +3 -1
- modal/cli/container.py +5 -4
- modal/cli/dict.py +5 -2
- modal/cli/entry_point.py +26 -2
- modal/cli/environment.py +2 -16
- modal/cli/launch.py +1 -76
- modal/cli/network_file_system.py +5 -20
- modal/cli/queues.py +5 -4
- modal/cli/run.py +24 -204
- modal/cli/secret.py +1 -2
- modal/cli/shell.py +375 -0
- modal/cli/utils.py +1 -13
- modal/cli/volume.py +11 -17
- modal/client.py +16 -125
- modal/client.pyi +94 -144
- modal/cloud_bucket_mount.py +3 -1
- modal/cloud_bucket_mount.pyi +4 -0
- modal/cls.py +101 -64
- modal/cls.pyi +9 -8
- modal/config.py +21 -1
- modal/container_process.py +288 -12
- modal/container_process.pyi +99 -38
- modal/dict.py +72 -33
- modal/dict.pyi +88 -57
- modal/environments.py +16 -8
- modal/environments.pyi +6 -2
- modal/exception.py +154 -16
- modal/experimental/__init__.py +23 -5
- modal/experimental/flash.py +161 -74
- modal/experimental/flash.pyi +97 -49
- modal/file_io.py +50 -92
- modal/file_io.pyi +117 -89
- modal/functions.pyi +70 -87
- modal/image.py +73 -47
- modal/image.pyi +33 -30
- modal/io_streams.py +500 -149
- modal/io_streams.pyi +279 -189
- modal/mount.py +60 -45
- modal/mount.pyi +41 -17
- modal/network_file_system.py +19 -11
- modal/network_file_system.pyi +72 -39
- modal/object.pyi +114 -22
- modal/parallel_map.py +42 -44
- modal/parallel_map.pyi +9 -17
- modal/partial_function.pyi +4 -2
- modal/proxy.py +14 -6
- modal/proxy.pyi +10 -2
- modal/queue.py +45 -38
- modal/queue.pyi +88 -52
- modal/runner.py +96 -96
- modal/runner.pyi +44 -27
- modal/sandbox.py +225 -108
- modal/sandbox.pyi +226 -63
- modal/secret.py +58 -56
- modal/secret.pyi +28 -13
- modal/serving.py +7 -11
- modal/serving.pyi +7 -8
- modal/snapshot.py +29 -15
- modal/snapshot.pyi +18 -10
- modal/token_flow.py +1 -1
- modal/token_flow.pyi +4 -6
- modal/volume.py +102 -55
- modal/volume.pyi +125 -66
- {modal-1.1.5.dev83.dist-info → modal-1.3.1.dev8.dist-info}/METADATA +10 -9
- modal-1.3.1.dev8.dist-info/RECORD +189 -0
- modal_proto/api.proto +86 -30
- modal_proto/api_grpc.py +10 -25
- modal_proto/api_pb2.py +1080 -1047
- modal_proto/api_pb2.pyi +253 -79
- modal_proto/api_pb2_grpc.py +14 -48
- modal_proto/api_pb2_grpc.pyi +6 -18
- modal_proto/modal_api_grpc.py +175 -176
- modal_proto/{sandbox_router.proto → task_command_router.proto} +62 -45
- modal_proto/task_command_router_grpc.py +138 -0
- modal_proto/task_command_router_pb2.py +180 -0
- modal_proto/{sandbox_router_pb2.pyi → task_command_router_pb2.pyi} +110 -63
- modal_proto/task_command_router_pb2_grpc.py +272 -0
- modal_proto/task_command_router_pb2_grpc.pyi +100 -0
- modal_version/__init__.py +1 -1
- modal_version/__main__.py +1 -1
- modal/cli/programs/launch_instance_ssh.py +0 -94
- modal/cli/programs/run_marimo.py +0 -95
- modal-1.1.5.dev83.dist-info/RECORD +0 -191
- modal_proto/modal_options_grpc.py +0 -3
- modal_proto/options.proto +0 -19
- modal_proto/options_grpc.py +0 -3
- modal_proto/options_pb2.py +0 -35
- modal_proto/options_pb2.pyi +0 -20
- modal_proto/options_pb2_grpc.py +0 -4
- modal_proto/options_pb2_grpc.pyi +0 -7
- modal_proto/sandbox_router_grpc.py +0 -105
- modal_proto/sandbox_router_pb2.py +0 -148
- modal_proto/sandbox_router_pb2_grpc.py +0 -203
- modal_proto/sandbox_router_pb2_grpc.pyi +0 -75
- {modal-1.1.5.dev83.dist-info → modal-1.3.1.dev8.dist-info}/WHEEL +0 -0
- {modal-1.1.5.dev83.dist-info → modal-1.3.1.dev8.dist-info}/entry_points.txt +0 -0
- {modal-1.1.5.dev83.dist-info → modal-1.3.1.dev8.dist-info}/licenses/LICENSE +0 -0
- {modal-1.1.5.dev83.dist-info → modal-1.3.1.dev8.dist-info}/top_level.txt +0 -0
modal/experimental/flash.pyi
CHANGED
|
@@ -11,6 +11,9 @@ class _FlashManager:
|
|
|
11
11
|
port: int,
|
|
12
12
|
process: typing.Optional[subprocess.Popen] = None,
|
|
13
13
|
health_check_url: typing.Optional[str] = None,
|
|
14
|
+
startup_timeout: int = 30,
|
|
15
|
+
exit_grace_period: int = 0,
|
|
16
|
+
h2_enabled: bool = False,
|
|
14
17
|
):
|
|
15
18
|
"""Initialize self. See help(type(self)) for accurate signature."""
|
|
16
19
|
...
|
|
@@ -28,8 +31,6 @@ class _FlashManager:
|
|
|
28
31
|
async def stop(self): ...
|
|
29
32
|
async def close(self): ...
|
|
30
33
|
|
|
31
|
-
SUPERSELF = typing.TypeVar("SUPERSELF", covariant=True)
|
|
32
|
-
|
|
33
34
|
class FlashManager:
|
|
34
35
|
def __init__(
|
|
35
36
|
self,
|
|
@@ -37,9 +38,12 @@ class FlashManager:
|
|
|
37
38
|
port: int,
|
|
38
39
|
process: typing.Optional[subprocess.Popen] = None,
|
|
39
40
|
health_check_url: typing.Optional[str] = None,
|
|
41
|
+
startup_timeout: int = 30,
|
|
42
|
+
exit_grace_period: int = 0,
|
|
43
|
+
h2_enabled: bool = False,
|
|
40
44
|
): ...
|
|
41
45
|
|
|
42
|
-
class __is_port_connection_healthy_spec(typing_extensions.Protocol
|
|
46
|
+
class __is_port_connection_healthy_spec(typing_extensions.Protocol):
|
|
43
47
|
def __call__(
|
|
44
48
|
self, /, process: typing.Optional[subprocess.Popen], timeout: float = 0.5
|
|
45
49
|
) -> tuple[bool, typing.Optional[Exception]]: ...
|
|
@@ -47,15 +51,15 @@ class FlashManager:
|
|
|
47
51
|
self, /, process: typing.Optional[subprocess.Popen], timeout: float = 0.5
|
|
48
52
|
) -> tuple[bool, typing.Optional[Exception]]: ...
|
|
49
53
|
|
|
50
|
-
is_port_connection_healthy: __is_port_connection_healthy_spec
|
|
54
|
+
is_port_connection_healthy: __is_port_connection_healthy_spec
|
|
51
55
|
|
|
52
|
-
class ___start_spec(typing_extensions.Protocol
|
|
56
|
+
class ___start_spec(typing_extensions.Protocol):
|
|
53
57
|
def __call__(self, /): ...
|
|
54
58
|
async def aio(self, /): ...
|
|
55
59
|
|
|
56
|
-
_start: ___start_spec
|
|
60
|
+
_start: ___start_spec
|
|
57
61
|
|
|
58
|
-
class ___drain_container_spec(typing_extensions.Protocol
|
|
62
|
+
class ___drain_container_spec(typing_extensions.Protocol):
|
|
59
63
|
def __call__(self, /):
|
|
60
64
|
"""Background task that checks if we've encountered too many failures and drains the container if so."""
|
|
61
65
|
...
|
|
@@ -64,27 +68,27 @@ class FlashManager:
|
|
|
64
68
|
"""Background task that checks if we've encountered too many failures and drains the container if so."""
|
|
65
69
|
...
|
|
66
70
|
|
|
67
|
-
_drain_container: ___drain_container_spec
|
|
71
|
+
_drain_container: ___drain_container_spec
|
|
68
72
|
|
|
69
|
-
class ___run_heartbeat_spec(typing_extensions.Protocol
|
|
73
|
+
class ___run_heartbeat_spec(typing_extensions.Protocol):
|
|
70
74
|
def __call__(self, /, host: str, port: int): ...
|
|
71
75
|
async def aio(self, /, host: str, port: int): ...
|
|
72
76
|
|
|
73
|
-
_run_heartbeat: ___run_heartbeat_spec
|
|
77
|
+
_run_heartbeat: ___run_heartbeat_spec
|
|
74
78
|
|
|
75
79
|
def get_container_url(self): ...
|
|
76
80
|
|
|
77
|
-
class __stop_spec(typing_extensions.Protocol
|
|
81
|
+
class __stop_spec(typing_extensions.Protocol):
|
|
78
82
|
def __call__(self, /): ...
|
|
79
83
|
async def aio(self, /): ...
|
|
80
84
|
|
|
81
|
-
stop: __stop_spec
|
|
85
|
+
stop: __stop_spec
|
|
82
86
|
|
|
83
|
-
class __close_spec(typing_extensions.Protocol
|
|
87
|
+
class __close_spec(typing_extensions.Protocol):
|
|
84
88
|
def __call__(self, /): ...
|
|
85
89
|
async def aio(self, /): ...
|
|
86
90
|
|
|
87
|
-
close: __close_spec
|
|
91
|
+
close: __close_spec
|
|
88
92
|
|
|
89
93
|
class __flash_forward_spec(typing_extensions.Protocol):
|
|
90
94
|
def __call__(
|
|
@@ -93,6 +97,9 @@ class __flash_forward_spec(typing_extensions.Protocol):
|
|
|
93
97
|
port: int,
|
|
94
98
|
process: typing.Optional[subprocess.Popen] = None,
|
|
95
99
|
health_check_url: typing.Optional[str] = None,
|
|
100
|
+
startup_timeout: int = 30,
|
|
101
|
+
exit_grace_period: int = 0,
|
|
102
|
+
h2_enabled: bool = False,
|
|
96
103
|
) -> FlashManager:
|
|
97
104
|
"""Forward a port to the Modal Flash service, exposing that port as a stable web endpoint.
|
|
98
105
|
This is a highly experimental method that can break or be removed at any time without warning.
|
|
@@ -106,6 +113,9 @@ class __flash_forward_spec(typing_extensions.Protocol):
|
|
|
106
113
|
port: int,
|
|
107
114
|
process: typing.Optional[subprocess.Popen] = None,
|
|
108
115
|
health_check_url: typing.Optional[str] = None,
|
|
116
|
+
startup_timeout: int = 30,
|
|
117
|
+
exit_grace_period: int = 0,
|
|
118
|
+
h2_enabled: bool = False,
|
|
109
119
|
) -> FlashManager:
|
|
110
120
|
"""Forward a port to the Modal Flash service, exposing that port as a stable web endpoint.
|
|
111
121
|
This is a highly experimental method that can break or be removed at any time without warning.
|
|
@@ -139,7 +149,7 @@ class _FlashPrometheusAutoscaler:
|
|
|
139
149
|
async def start(self): ...
|
|
140
150
|
async def _run_autoscaler_loop(self): ...
|
|
141
151
|
async def _compute_target_containers(self, current_replicas: int) -> int:
|
|
142
|
-
"""Gets
|
|
152
|
+
"""Gets metrics from container to autoscale up or down."""
|
|
143
153
|
...
|
|
144
154
|
|
|
145
155
|
def _calculate_desired_replicas(
|
|
@@ -154,13 +164,10 @@ class _FlashPrometheusAutoscaler:
|
|
|
154
164
|
...
|
|
155
165
|
|
|
156
166
|
async def _get_scaling_info(self, containers) -> tuple[float, int]:
|
|
157
|
-
"""Get metrics using
|
|
167
|
+
"""Get metrics using container exposed metrics endpoints."""
|
|
158
168
|
...
|
|
159
169
|
|
|
160
170
|
async def _get_metrics(self, url: str) -> typing.Optional[dict[str, list[typing.Any]]]: ...
|
|
161
|
-
async def _get_container_metrics(
|
|
162
|
-
self, container_id: str
|
|
163
|
-
) -> typing.Optional[modal_proto.api_pb2.TaskGetAutoscalingMetricsResponse]: ...
|
|
164
171
|
async def _get_all_containers(self): ...
|
|
165
172
|
async def _set_target_slots(self, target_slots: int): ...
|
|
166
173
|
def _make_scaling_decision(
|
|
@@ -212,28 +219,28 @@ class FlashPrometheusAutoscaler:
|
|
|
212
219
|
autoscaling_interval_seconds: int,
|
|
213
220
|
): ...
|
|
214
221
|
|
|
215
|
-
class __start_spec(typing_extensions.Protocol
|
|
222
|
+
class __start_spec(typing_extensions.Protocol):
|
|
216
223
|
def __call__(self, /): ...
|
|
217
224
|
async def aio(self, /): ...
|
|
218
225
|
|
|
219
|
-
start: __start_spec
|
|
226
|
+
start: __start_spec
|
|
220
227
|
|
|
221
|
-
class ___run_autoscaler_loop_spec(typing_extensions.Protocol
|
|
228
|
+
class ___run_autoscaler_loop_spec(typing_extensions.Protocol):
|
|
222
229
|
def __call__(self, /): ...
|
|
223
230
|
async def aio(self, /): ...
|
|
224
231
|
|
|
225
|
-
_run_autoscaler_loop: ___run_autoscaler_loop_spec
|
|
232
|
+
_run_autoscaler_loop: ___run_autoscaler_loop_spec
|
|
226
233
|
|
|
227
|
-
class ___compute_target_containers_spec(typing_extensions.Protocol
|
|
234
|
+
class ___compute_target_containers_spec(typing_extensions.Protocol):
|
|
228
235
|
def __call__(self, /, current_replicas: int) -> int:
|
|
229
|
-
"""Gets
|
|
236
|
+
"""Gets metrics from container to autoscale up or down."""
|
|
230
237
|
...
|
|
231
238
|
|
|
232
239
|
async def aio(self, /, current_replicas: int) -> int:
|
|
233
|
-
"""Gets
|
|
240
|
+
"""Gets metrics from container to autoscale up or down."""
|
|
234
241
|
...
|
|
235
242
|
|
|
236
|
-
_compute_target_containers: ___compute_target_containers_spec
|
|
243
|
+
_compute_target_containers: ___compute_target_containers_spec
|
|
237
244
|
|
|
238
245
|
def _calculate_desired_replicas(
|
|
239
246
|
self,
|
|
@@ -246,44 +253,34 @@ class FlashPrometheusAutoscaler:
|
|
|
246
253
|
"""Calculate the desired number of replicas to autoscale to."""
|
|
247
254
|
...
|
|
248
255
|
|
|
249
|
-
class ___get_scaling_info_spec(typing_extensions.Protocol
|
|
256
|
+
class ___get_scaling_info_spec(typing_extensions.Protocol):
|
|
250
257
|
def __call__(self, /, containers) -> tuple[float, int]:
|
|
251
|
-
"""Get metrics using
|
|
258
|
+
"""Get metrics using container exposed metrics endpoints."""
|
|
252
259
|
...
|
|
253
260
|
|
|
254
261
|
async def aio(self, /, containers) -> tuple[float, int]:
|
|
255
|
-
"""Get metrics using
|
|
262
|
+
"""Get metrics using container exposed metrics endpoints."""
|
|
256
263
|
...
|
|
257
264
|
|
|
258
|
-
_get_scaling_info: ___get_scaling_info_spec
|
|
265
|
+
_get_scaling_info: ___get_scaling_info_spec
|
|
259
266
|
|
|
260
|
-
class ___get_metrics_spec(typing_extensions.Protocol
|
|
267
|
+
class ___get_metrics_spec(typing_extensions.Protocol):
|
|
261
268
|
def __call__(self, /, url: str) -> typing.Optional[dict[str, list[typing.Any]]]: ...
|
|
262
269
|
async def aio(self, /, url: str) -> typing.Optional[dict[str, list[typing.Any]]]: ...
|
|
263
270
|
|
|
264
|
-
_get_metrics: ___get_metrics_spec
|
|
265
|
-
|
|
266
|
-
class ___get_container_metrics_spec(typing_extensions.Protocol[SUPERSELF]):
|
|
267
|
-
def __call__(
|
|
268
|
-
self, /, container_id: str
|
|
269
|
-
) -> typing.Optional[modal_proto.api_pb2.TaskGetAutoscalingMetricsResponse]: ...
|
|
270
|
-
async def aio(
|
|
271
|
-
self, /, container_id: str
|
|
272
|
-
) -> typing.Optional[modal_proto.api_pb2.TaskGetAutoscalingMetricsResponse]: ...
|
|
273
|
-
|
|
274
|
-
_get_container_metrics: ___get_container_metrics_spec[typing_extensions.Self]
|
|
271
|
+
_get_metrics: ___get_metrics_spec
|
|
275
272
|
|
|
276
|
-
class ___get_all_containers_spec(typing_extensions.Protocol
|
|
273
|
+
class ___get_all_containers_spec(typing_extensions.Protocol):
|
|
277
274
|
def __call__(self, /): ...
|
|
278
275
|
async def aio(self, /): ...
|
|
279
276
|
|
|
280
|
-
_get_all_containers: ___get_all_containers_spec
|
|
277
|
+
_get_all_containers: ___get_all_containers_spec
|
|
281
278
|
|
|
282
|
-
class ___set_target_slots_spec(typing_extensions.Protocol
|
|
279
|
+
class ___set_target_slots_spec(typing_extensions.Protocol):
|
|
283
280
|
def __call__(self, /, target_slots: int): ...
|
|
284
281
|
async def aio(self, /, target_slots: int): ...
|
|
285
282
|
|
|
286
|
-
_set_target_slots: ___set_target_slots_spec
|
|
283
|
+
_set_target_slots: ___set_target_slots_spec
|
|
287
284
|
|
|
288
285
|
def _make_scaling_decision(
|
|
289
286
|
self,
|
|
@@ -313,11 +310,11 @@ class FlashPrometheusAutoscaler:
|
|
|
313
310
|
"""
|
|
314
311
|
...
|
|
315
312
|
|
|
316
|
-
class __stop_spec(typing_extensions.Protocol
|
|
313
|
+
class __stop_spec(typing_extensions.Protocol):
|
|
317
314
|
def __call__(self, /): ...
|
|
318
315
|
async def aio(self, /): ...
|
|
319
316
|
|
|
320
|
-
stop: __stop_spec
|
|
317
|
+
stop: __stop_spec
|
|
321
318
|
|
|
322
319
|
class __flash_prometheus_autoscaler_spec(typing_extensions.Protocol):
|
|
323
320
|
def __call__(
|
|
@@ -392,3 +389,54 @@ class __flash_get_containers_spec(typing_extensions.Protocol):
|
|
|
392
389
|
...
|
|
393
390
|
|
|
394
391
|
flash_get_containers: __flash_get_containers_spec
|
|
392
|
+
|
|
393
|
+
def _http_server(
|
|
394
|
+
port: typing.Optional[int] = None,
|
|
395
|
+
*,
|
|
396
|
+
proxy_regions: list[str] = [],
|
|
397
|
+
startup_timeout: int = 30,
|
|
398
|
+
exit_grace_period: typing.Optional[int] = None,
|
|
399
|
+
h2_enabled: bool = False,
|
|
400
|
+
):
|
|
401
|
+
"""Decorator for Flash-enabled HTTP servers on Modal classes.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
port: The local port to forward to the HTTP server.
|
|
405
|
+
proxy_regions: The regions to proxy the HTTP server to.
|
|
406
|
+
startup_timeout: The maximum time to wait for the HTTP server to start.
|
|
407
|
+
exit_grace_period: The time to wait for the HTTP server to exit gracefully.
|
|
408
|
+
"""
|
|
409
|
+
...
|
|
410
|
+
|
|
411
|
+
def http_server(
|
|
412
|
+
port: typing.Optional[int] = None,
|
|
413
|
+
*,
|
|
414
|
+
proxy_regions: list[str] = [],
|
|
415
|
+
startup_timeout: int = 30,
|
|
416
|
+
exit_grace_period: typing.Optional[int] = None,
|
|
417
|
+
h2_enabled: bool = False,
|
|
418
|
+
):
|
|
419
|
+
"""Decorator for Flash-enabled HTTP servers on Modal classes.
|
|
420
|
+
|
|
421
|
+
Args:
|
|
422
|
+
port: The local port to forward to the HTTP server.
|
|
423
|
+
proxy_regions: The regions to proxy the HTTP server to.
|
|
424
|
+
startup_timeout: The maximum time to wait for the HTTP server to start.
|
|
425
|
+
exit_grace_period: The time to wait for the HTTP server to exit gracefully.
|
|
426
|
+
"""
|
|
427
|
+
...
|
|
428
|
+
|
|
429
|
+
class _FlashContainerEntry:
|
|
430
|
+
"""A class that manages the lifecycle of Flash manager for Flash containers.
|
|
431
|
+
|
|
432
|
+
It is intentional that stop() runs before exit handlers and close().
|
|
433
|
+
This ensures the container is deregistered first, preventing new requests from being routed to it
|
|
434
|
+
while exit handlers execute and the exit grace period elapses, before finally closing the tunnel.
|
|
435
|
+
"""
|
|
436
|
+
def __init__(self, http_config: modal_proto.api_pb2.HTTPConfig):
|
|
437
|
+
"""Initialize self. See help(type(self)) for accurate signature."""
|
|
438
|
+
...
|
|
439
|
+
|
|
440
|
+
def enter(self): ...
|
|
441
|
+
def stop(self): ...
|
|
442
|
+
def close(self): ...
|
modal/file_io.py
CHANGED
|
@@ -10,17 +10,16 @@ if TYPE_CHECKING:
|
|
|
10
10
|
|
|
11
11
|
import json
|
|
12
12
|
|
|
13
|
-
from grpclib.exceptions import
|
|
13
|
+
from grpclib.exceptions import StreamTerminatedError
|
|
14
14
|
|
|
15
15
|
from modal._utils.async_utils import TaskContext
|
|
16
|
-
from modal._utils.grpc_utils import retry_transient_errors
|
|
17
16
|
from modal.exception import ClientClosed
|
|
18
17
|
from modal_proto import api_pb2
|
|
19
18
|
|
|
20
19
|
from ._utils.async_utils import synchronize_api
|
|
21
|
-
from ._utils.
|
|
20
|
+
from ._utils.deprecation import deprecation_error
|
|
22
21
|
from .client import _Client
|
|
23
|
-
from .exception import FilesystemExecutionError,
|
|
22
|
+
from .exception import FilesystemExecutionError, InternalError, ServiceError
|
|
24
23
|
|
|
25
24
|
WRITE_CHUNK_SIZE = 16 * 1024 * 1024 # 16 MiB
|
|
26
25
|
WRITE_FILE_SIZE_LIMIT = 1024 * 1024 * 1024 # 1 GiB
|
|
@@ -47,57 +46,17 @@ T = TypeVar("T", str, bytes)
|
|
|
47
46
|
|
|
48
47
|
|
|
49
48
|
async def _delete_bytes(file: "_FileIO", start: Optional[int] = None, end: Optional[int] = None) -> None:
|
|
50
|
-
"""
|
|
51
|
-
|
|
52
|
-
`start` and `end` are byte offsets. `start` is inclusive, `end` is exclusive.
|
|
53
|
-
If either is None, the start or end of the file is used, respectively.
|
|
49
|
+
"""mdmd:hidden
|
|
50
|
+
This method has been removed.
|
|
54
51
|
"""
|
|
55
|
-
|
|
56
|
-
file._check_closed()
|
|
57
|
-
if start is not None and end is not None:
|
|
58
|
-
if start >= end:
|
|
59
|
-
raise ValueError("start must be less than end")
|
|
60
|
-
resp = await retry_transient_errors(
|
|
61
|
-
file._client.stub.ContainerFilesystemExec,
|
|
62
|
-
api_pb2.ContainerFilesystemExecRequest(
|
|
63
|
-
file_delete_bytes_request=api_pb2.ContainerFileDeleteBytesRequest(
|
|
64
|
-
file_descriptor=file._file_descriptor,
|
|
65
|
-
start_inclusive=start,
|
|
66
|
-
end_exclusive=end,
|
|
67
|
-
),
|
|
68
|
-
task_id=file._task_id,
|
|
69
|
-
),
|
|
70
|
-
)
|
|
71
|
-
await file._wait(resp.exec_id)
|
|
52
|
+
deprecation_error((2025, 12, 3), "delete_bytes has been removed.")
|
|
72
53
|
|
|
73
54
|
|
|
74
55
|
async def _replace_bytes(file: "_FileIO", data: bytes, start: Optional[int] = None, end: Optional[int] = None) -> None:
|
|
75
|
-
"""
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
`start` and `end` are byte offsets. `start` is inclusive, `end` is exclusive.
|
|
79
|
-
If either is None, the start or end of the file is used, respectively.
|
|
56
|
+
"""mdmd:hidden
|
|
57
|
+
This method has been removed.
|
|
80
58
|
"""
|
|
81
|
-
|
|
82
|
-
file._check_closed()
|
|
83
|
-
if start is not None and end is not None:
|
|
84
|
-
if start >= end:
|
|
85
|
-
raise InvalidError("start must be less than end")
|
|
86
|
-
if len(data) > WRITE_CHUNK_SIZE:
|
|
87
|
-
raise InvalidError("Write request payload exceeds 16 MiB limit")
|
|
88
|
-
resp = await retry_transient_errors(
|
|
89
|
-
file._client.stub.ContainerFilesystemExec,
|
|
90
|
-
api_pb2.ContainerFilesystemExecRequest(
|
|
91
|
-
file_write_replace_bytes_request=api_pb2.ContainerFileWriteReplaceBytesRequest(
|
|
92
|
-
file_descriptor=file._file_descriptor,
|
|
93
|
-
data=data,
|
|
94
|
-
start_inclusive=start,
|
|
95
|
-
end_exclusive=end,
|
|
96
|
-
),
|
|
97
|
-
task_id=file._task_id,
|
|
98
|
-
),
|
|
99
|
-
)
|
|
100
|
-
await file._wait(resp.exec_id)
|
|
59
|
+
deprecation_error((2025, 12, 3), "replace_bytes has been removed.")
|
|
101
60
|
|
|
102
61
|
|
|
103
62
|
class FileWatchEventType(enum.Enum):
|
|
@@ -206,13 +165,12 @@ class _FileIO(Generic[T]):
|
|
|
206
165
|
completed = True
|
|
207
166
|
break
|
|
208
167
|
|
|
209
|
-
except (
|
|
168
|
+
except (ServiceError, InternalError, StreamTerminatedError, ClientClosed) as exc:
|
|
210
169
|
if retries_remaining > 0:
|
|
211
170
|
retries_remaining -= 1
|
|
212
|
-
if isinstance(exc,
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
continue
|
|
171
|
+
if isinstance(exc, (ServiceError, InternalError)):
|
|
172
|
+
await asyncio.sleep(1.0)
|
|
173
|
+
continue
|
|
216
174
|
elif isinstance(exc, StreamTerminatedError):
|
|
217
175
|
continue
|
|
218
176
|
elif isinstance(exc, ClientClosed):
|
|
@@ -230,7 +188,7 @@ class _FileIO(Generic[T]):
|
|
|
230
188
|
|
|
231
189
|
async def _wait(self, exec_id: str) -> bytes:
|
|
232
190
|
# The logic here is similar to how output is read from `exec`
|
|
233
|
-
|
|
191
|
+
output_buffer = io.BytesIO()
|
|
234
192
|
completed = False
|
|
235
193
|
retries_remaining = 10
|
|
236
194
|
while not completed:
|
|
@@ -241,18 +199,17 @@ class _FileIO(Generic[T]):
|
|
|
241
199
|
break
|
|
242
200
|
if isinstance(data, Exception):
|
|
243
201
|
raise data
|
|
244
|
-
|
|
245
|
-
except (
|
|
202
|
+
output_buffer.write(data)
|
|
203
|
+
except (ServiceError, InternalError, StreamTerminatedError) as exc:
|
|
246
204
|
if retries_remaining > 0:
|
|
247
205
|
retries_remaining -= 1
|
|
248
|
-
if isinstance(exc,
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
continue
|
|
206
|
+
if isinstance(exc, (ServiceError, InternalError)):
|
|
207
|
+
await asyncio.sleep(1.0)
|
|
208
|
+
continue
|
|
252
209
|
elif isinstance(exc, StreamTerminatedError):
|
|
253
210
|
continue
|
|
254
211
|
raise
|
|
255
|
-
return
|
|
212
|
+
return output_buffer.getvalue()
|
|
256
213
|
|
|
257
214
|
def _validate_type(self, data: Union[bytes, str]) -> None:
|
|
258
215
|
if self._binary and isinstance(data, str):
|
|
@@ -261,8 +218,7 @@ class _FileIO(Generic[T]):
|
|
|
261
218
|
raise TypeError("Expected str when in text mode")
|
|
262
219
|
|
|
263
220
|
async def _open_file(self, path: str, mode: str) -> None:
|
|
264
|
-
resp = await
|
|
265
|
-
self._client.stub.ContainerFilesystemExec,
|
|
221
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
266
222
|
api_pb2.ContainerFilesystemExecRequest(
|
|
267
223
|
file_open_request=api_pb2.ContainerFileOpenRequest(path=path, mode=mode),
|
|
268
224
|
task_id=self._task_id,
|
|
@@ -285,8 +241,7 @@ class _FileIO(Generic[T]):
|
|
|
285
241
|
return self
|
|
286
242
|
|
|
287
243
|
async def _make_read_request(self, n: Optional[int]) -> bytes:
|
|
288
|
-
resp = await
|
|
289
|
-
self._client.stub.ContainerFilesystemExec,
|
|
244
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
290
245
|
api_pb2.ContainerFilesystemExecRequest(
|
|
291
246
|
file_read_request=api_pb2.ContainerFileReadRequest(file_descriptor=self._file_descriptor, n=n),
|
|
292
247
|
task_id=self._task_id,
|
|
@@ -309,8 +264,7 @@ class _FileIO(Generic[T]):
|
|
|
309
264
|
"""Read a single line from the current position."""
|
|
310
265
|
self._check_closed()
|
|
311
266
|
self._check_readable()
|
|
312
|
-
resp = await
|
|
313
|
-
self._client.stub.ContainerFilesystemExec,
|
|
267
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
314
268
|
api_pb2.ContainerFilesystemExecRequest(
|
|
315
269
|
file_read_line_request=api_pb2.ContainerFileReadLineRequest(file_descriptor=self._file_descriptor),
|
|
316
270
|
task_id=self._task_id,
|
|
@@ -351,8 +305,7 @@ class _FileIO(Generic[T]):
|
|
|
351
305
|
raise ValueError("Write request payload exceeds 1 GiB limit")
|
|
352
306
|
for i in range(0, len(data), WRITE_CHUNK_SIZE):
|
|
353
307
|
chunk = data[i : i + WRITE_CHUNK_SIZE]
|
|
354
|
-
resp = await
|
|
355
|
-
self._client.stub.ContainerFilesystemExec,
|
|
308
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
356
309
|
api_pb2.ContainerFilesystemExecRequest(
|
|
357
310
|
file_write_request=api_pb2.ContainerFileWriteRequest(
|
|
358
311
|
file_descriptor=self._file_descriptor,
|
|
@@ -367,8 +320,7 @@ class _FileIO(Generic[T]):
|
|
|
367
320
|
"""Flush the buffer to disk."""
|
|
368
321
|
self._check_closed()
|
|
369
322
|
self._check_writable()
|
|
370
|
-
resp = await
|
|
371
|
-
self._client.stub.ContainerFilesystemExec,
|
|
323
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
372
324
|
api_pb2.ContainerFilesystemExecRequest(
|
|
373
325
|
file_flush_request=api_pb2.ContainerFileFlushRequest(file_descriptor=self._file_descriptor),
|
|
374
326
|
task_id=self._task_id,
|
|
@@ -393,8 +345,7 @@ class _FileIO(Generic[T]):
|
|
|
393
345
|
(relative to the current position) and 2 (relative to the file's end).
|
|
394
346
|
"""
|
|
395
347
|
self._check_closed()
|
|
396
|
-
resp = await
|
|
397
|
-
self._client.stub.ContainerFilesystemExec,
|
|
348
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
398
349
|
api_pb2.ContainerFilesystemExecRequest(
|
|
399
350
|
file_seek_request=api_pb2.ContainerFileSeekRequest(
|
|
400
351
|
file_descriptor=self._file_descriptor,
|
|
@@ -410,8 +361,7 @@ class _FileIO(Generic[T]):
|
|
|
410
361
|
async def ls(cls, path: str, client: _Client, task_id: str) -> list[str]:
|
|
411
362
|
"""List the contents of the provided directory."""
|
|
412
363
|
self = _FileIO(client, task_id)
|
|
413
|
-
resp = await
|
|
414
|
-
self._client.stub.ContainerFilesystemExec,
|
|
364
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
415
365
|
api_pb2.ContainerFilesystemExecRequest(
|
|
416
366
|
file_ls_request=api_pb2.ContainerFileLsRequest(path=path),
|
|
417
367
|
task_id=task_id,
|
|
@@ -427,8 +377,7 @@ class _FileIO(Generic[T]):
|
|
|
427
377
|
async def mkdir(cls, path: str, client: _Client, task_id: str, parents: bool = False) -> None:
|
|
428
378
|
"""Create a new directory."""
|
|
429
379
|
self = _FileIO(client, task_id)
|
|
430
|
-
resp = await
|
|
431
|
-
self._client.stub.ContainerFilesystemExec,
|
|
380
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
432
381
|
api_pb2.ContainerFilesystemExecRequest(
|
|
433
382
|
file_mkdir_request=api_pb2.ContainerFileMkdirRequest(path=path, make_parents=parents),
|
|
434
383
|
task_id=self._task_id,
|
|
@@ -440,8 +389,7 @@ class _FileIO(Generic[T]):
|
|
|
440
389
|
async def rm(cls, path: str, client: _Client, task_id: str, recursive: bool = False) -> None:
|
|
441
390
|
"""Remove a file or directory in the Sandbox."""
|
|
442
391
|
self = _FileIO(client, task_id)
|
|
443
|
-
resp = await
|
|
444
|
-
self._client.stub.ContainerFilesystemExec,
|
|
392
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
445
393
|
api_pb2.ContainerFilesystemExecRequest(
|
|
446
394
|
file_rm_request=api_pb2.ContainerFileRmRequest(path=path, recursive=recursive),
|
|
447
395
|
task_id=self._task_id,
|
|
@@ -460,8 +408,7 @@ class _FileIO(Generic[T]):
|
|
|
460
408
|
timeout: Optional[int] = None,
|
|
461
409
|
) -> AsyncIterator[FileWatchEvent]:
|
|
462
410
|
self = _FileIO(client, task_id)
|
|
463
|
-
resp = await
|
|
464
|
-
self._client.stub.ContainerFilesystemExec,
|
|
411
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
465
412
|
api_pb2.ContainerFilesystemExecRequest(
|
|
466
413
|
file_watch_request=api_pb2.ContainerFileWatchRequest(
|
|
467
414
|
path=path,
|
|
@@ -471,10 +418,22 @@ class _FileIO(Generic[T]):
|
|
|
471
418
|
task_id=self._task_id,
|
|
472
419
|
),
|
|
473
420
|
)
|
|
421
|
+
|
|
422
|
+
def end_of_event(item: bytes, buffer: io.BytesIO, boundary_token: bytes) -> bool:
|
|
423
|
+
if not item.endswith(b"\n"):
|
|
424
|
+
return False
|
|
425
|
+
boundary_token_size = len(boundary_token)
|
|
426
|
+
if buffer.tell() < boundary_token_size:
|
|
427
|
+
return False
|
|
428
|
+
buffer.seek(-boundary_token_size, io.SEEK_END)
|
|
429
|
+
if buffer.read(boundary_token_size) == boundary_token:
|
|
430
|
+
return True
|
|
431
|
+
return False
|
|
432
|
+
|
|
474
433
|
async with TaskContext() as tc:
|
|
475
434
|
tc.create_task(self._consume_watch_output(resp.exec_id))
|
|
476
435
|
|
|
477
|
-
|
|
436
|
+
item_buffer = io.BytesIO()
|
|
478
437
|
while True:
|
|
479
438
|
if len(self._watch_output_buffer) > 0:
|
|
480
439
|
item = self._watch_output_buffer.pop(0)
|
|
@@ -482,12 +441,12 @@ class _FileIO(Generic[T]):
|
|
|
482
441
|
break
|
|
483
442
|
if isinstance(item, Exception):
|
|
484
443
|
raise item
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
#
|
|
488
|
-
if
|
|
444
|
+
item_buffer.write(item)
|
|
445
|
+
assert isinstance(item, bytes)
|
|
446
|
+
# Single events may span multiple messages so we need to check for a special event boundary token
|
|
447
|
+
if end_of_event(item, item_buffer, boundary_token=b"\n\n"):
|
|
489
448
|
try:
|
|
490
|
-
event_json = json.loads(
|
|
449
|
+
event_json = json.loads(item_buffer.getvalue().strip().decode())
|
|
491
450
|
event = FileWatchEvent(
|
|
492
451
|
type=FileWatchEventType(event_json["event_type"]),
|
|
493
452
|
paths=event_json["paths"],
|
|
@@ -497,14 +456,13 @@ class _FileIO(Generic[T]):
|
|
|
497
456
|
except (json.JSONDecodeError, KeyError, ValueError):
|
|
498
457
|
# skip invalid events
|
|
499
458
|
pass
|
|
500
|
-
|
|
459
|
+
item_buffer = io.BytesIO()
|
|
501
460
|
else:
|
|
502
461
|
await asyncio.sleep(0.1)
|
|
503
462
|
|
|
504
463
|
async def _close(self) -> None:
|
|
505
464
|
# Buffer is flushed by the runner on close
|
|
506
|
-
resp = await
|
|
507
|
-
self._client.stub.ContainerFilesystemExec,
|
|
465
|
+
resp = await self._client.stub.ContainerFilesystemExec(
|
|
508
466
|
api_pb2.ContainerFilesystemExecRequest(
|
|
509
467
|
file_close_request=api_pb2.ContainerFileCloseRequest(file_descriptor=self._file_descriptor),
|
|
510
468
|
task_id=self._task_id,
|