modal 1.0.4.dev12__py3-none-any.whl → 1.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. modal/_clustered_functions.pyi +13 -3
  2. modal/_functions.py +84 -46
  3. modal/_partial_function.py +1 -1
  4. modal/_runtime/container_io_manager.pyi +222 -40
  5. modal/_runtime/execution_context.pyi +60 -6
  6. modal/_serialization.py +25 -2
  7. modal/_tunnel.pyi +380 -12
  8. modal/_utils/async_utils.py +1 -1
  9. modal/_utils/blob_utils.py +56 -19
  10. modal/_utils/function_utils.py +33 -7
  11. modal/_utils/grpc_utils.py +11 -4
  12. modal/app.py +5 -5
  13. modal/app.pyi +658 -48
  14. modal/cli/run.py +2 -1
  15. modal/client.pyi +224 -36
  16. modal/cloud_bucket_mount.pyi +192 -4
  17. modal/cls.py +7 -7
  18. modal/cls.pyi +442 -35
  19. modal/container_process.pyi +103 -14
  20. modal/dict.py +4 -4
  21. modal/dict.pyi +453 -51
  22. modal/environments.pyi +41 -9
  23. modal/exception.py +6 -2
  24. modal/experimental/__init__.py +90 -0
  25. modal/experimental/ipython.py +11 -7
  26. modal/file_io.pyi +236 -45
  27. modal/functions.pyi +573 -65
  28. modal/gpu.py +1 -1
  29. modal/image.py +1 -1
  30. modal/image.pyi +1256 -74
  31. modal/io_streams.py +8 -4
  32. modal/io_streams.pyi +348 -38
  33. modal/mount.pyi +261 -31
  34. modal/network_file_system.py +3 -3
  35. modal/network_file_system.pyi +307 -26
  36. modal/object.pyi +48 -9
  37. modal/parallel_map.py +93 -19
  38. modal/parallel_map.pyi +160 -15
  39. modal/partial_function.pyi +255 -14
  40. modal/proxy.py +1 -1
  41. modal/proxy.pyi +28 -3
  42. modal/queue.py +4 -4
  43. modal/queue.pyi +447 -30
  44. modal/runner.pyi +160 -22
  45. modal/sandbox.py +8 -7
  46. modal/sandbox.pyi +310 -50
  47. modal/schedule.py +1 -1
  48. modal/secret.py +2 -2
  49. modal/secret.pyi +164 -15
  50. modal/snapshot.pyi +25 -4
  51. modal/token_flow.pyi +28 -8
  52. modal/volume.py +41 -4
  53. modal/volume.pyi +693 -59
  54. {modal-1.0.4.dev12.dist-info → modal-1.0.5.dist-info}/METADATA +3 -3
  55. {modal-1.0.4.dev12.dist-info → modal-1.0.5.dist-info}/RECORD +67 -67
  56. modal_proto/api.proto +56 -0
  57. modal_proto/api_grpc.py +48 -0
  58. modal_proto/api_pb2.py +874 -780
  59. modal_proto/api_pb2.pyi +194 -8
  60. modal_proto/api_pb2_grpc.py +100 -0
  61. modal_proto/api_pb2_grpc.pyi +32 -0
  62. modal_proto/modal_api_grpc.py +3 -0
  63. modal_version/__init__.py +1 -1
  64. {modal-1.0.4.dev12.dist-info → modal-1.0.5.dist-info}/WHEEL +0 -0
  65. {modal-1.0.4.dev12.dist-info → modal-1.0.5.dist-info}/entry_points.txt +0 -0
  66. {modal-1.0.4.dev12.dist-info → modal-1.0.5.dist-info}/licenses/LICENSE +0 -0
  67. {modal-1.0.4.dev12.dist-info → modal-1.0.5.dist-info}/top_level.txt +0 -0
modal/_tunnel.pyi CHANGED
@@ -4,34 +4,402 @@ import typing
4
4
  import typing_extensions
5
5
 
6
6
  class Tunnel:
7
+ """A port forwarded from within a running Modal container. Created by `modal.forward()`.
8
+
9
+ **Important:** This is an experimental API which may change in the future.
10
+ """
11
+
7
12
  host: str
8
13
  port: int
9
14
  unencrypted_host: str
10
15
  unencrypted_port: int
11
16
 
12
17
  @property
13
- def url(self) -> str: ...
18
+ def url(self) -> str:
19
+ """Get the public HTTPS URL of the forwarded port."""
20
+ ...
21
+
14
22
  @property
15
- def tls_socket(self) -> tuple[str, int]: ...
23
+ def tls_socket(self) -> tuple[str, int]:
24
+ """Get the public TLS socket as a (host, port) tuple."""
25
+ ...
26
+
16
27
  @property
17
- def tcp_socket(self) -> tuple[str, int]: ...
18
- def __init__(self, host: str, port: int, unencrypted_host: str, unencrypted_port: int) -> None: ...
19
- def __repr__(self): ...
20
- def __eq__(self, other): ...
21
- def __setattr__(self, name, value): ...
22
- def __delattr__(self, name): ...
23
- def __hash__(self): ...
28
+ def tcp_socket(self) -> tuple[str, int]:
29
+ """Get the public TCP socket as a (host, port) tuple."""
30
+ ...
31
+
32
+ def __init__(self, host: str, port: int, unencrypted_host: str, unencrypted_port: int) -> None:
33
+ """Initialize self. See help(type(self)) for accurate signature."""
34
+ ...
35
+
36
+ def __repr__(self):
37
+ """Return repr(self)."""
38
+ ...
39
+
40
+ def __eq__(self, other):
41
+ """Return self==value."""
42
+ ...
43
+
44
+ def __setattr__(self, name, value):
45
+ """Implement setattr(self, name, value)."""
46
+ ...
47
+
48
+ def __delattr__(self, name):
49
+ """Implement delattr(self, name)."""
50
+ ...
51
+
52
+ def __hash__(self):
53
+ """Return hash(self)."""
54
+ ...
24
55
 
25
56
  def _forward(
26
57
  port: int, *, unencrypted: bool = False, client: typing.Optional[modal.client._Client] = None
27
- ) -> typing.AsyncContextManager[Tunnel]: ...
58
+ ) -> typing.AsyncContextManager[Tunnel]:
59
+ '''Expose a port publicly from inside a running Modal container, with TLS.
60
+
61
+ If `unencrypted` is set, this also exposes the TCP socket without encryption on a random port
62
+ number. This can be used to SSH into a container (see example below). Note that it is on the public Internet, so
63
+ make sure you are using a secure protocol over TCP.
64
+
65
+ **Important:** This is an experimental API which may change in the future.
66
+
67
+ **Usage:**
68
+
69
+ ```python notest
70
+ import modal
71
+ from flask import Flask
72
+
73
+ app = modal.App(image=modal.Image.debian_slim().pip_install("Flask"))
74
+ flask_app = Flask(__name__)
75
+
76
+
77
+ @flask_app.route("/")
78
+ def hello_world():
79
+ return "Hello, World!"
80
+
81
+
82
+ @app.function()
83
+ def run_app():
84
+ # Start a web server inside the container at port 8000. `modal.forward(8000)` lets us
85
+ # expose that port to the world at a random HTTPS URL.
86
+ with modal.forward(8000) as tunnel:
87
+ print("Server listening at", tunnel.url)
88
+ flask_app.run("0.0.0.0", 8000)
89
+
90
+ # When the context manager exits, the port is no longer exposed.
91
+ ```
92
+
93
+ **Raw TCP usage:**
94
+
95
+ ```python
96
+ import socket
97
+ import threading
98
+
99
+ import modal
100
+
101
+
102
+ def run_echo_server(port: int):
103
+ """Run a TCP echo server listening on the given port."""
104
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
105
+ sock.bind(("0.0.0.0", port))
106
+ sock.listen(1)
107
+
108
+ while True:
109
+ conn, addr = sock.accept()
110
+ print("Connection from:", addr)
111
+
112
+ # Start a new thread to handle the connection
113
+ def handle(conn):
114
+ with conn:
115
+ while True:
116
+ data = conn.recv(1024)
117
+ if not data:
118
+ break
119
+ conn.sendall(data)
120
+
121
+ threading.Thread(target=handle, args=(conn,)).start()
122
+
123
+
124
+ app = modal.App()
125
+
126
+
127
+ @app.function()
128
+ def tcp_tunnel():
129
+ # This exposes port 8000 to public Internet traffic over TCP.
130
+ with modal.forward(8000, unencrypted=True) as tunnel:
131
+ # You can connect to this TCP socket from outside the container, for example, using `nc`:
132
+ # nc <HOST> <PORT>
133
+ print("TCP tunnel listening at:", tunnel.tcp_socket)
134
+ run_echo_server(8000)
135
+ ```
136
+
137
+ **SSH example:**
138
+ This assumes you have a rsa keypair in `~/.ssh/id_rsa{.pub}`, this is a bare-bones example
139
+ letting you SSH into a Modal container.
140
+
141
+ ```python
142
+ import subprocess
143
+ import time
144
+
145
+ import modal
146
+
147
+ app = modal.App()
148
+ image = (
149
+ modal.Image.debian_slim()
150
+ .apt_install("openssh-server")
151
+ .run_commands("mkdir /run/sshd")
152
+ .add_local_file("~/.ssh/id_rsa.pub", "/root/.ssh/authorized_keys", copy=True)
153
+ )
154
+
155
+
156
+ @app.function(image=image, timeout=3600)
157
+ def some_function():
158
+ subprocess.Popen(["/usr/sbin/sshd", "-D", "-e"])
159
+ with modal.forward(port=22, unencrypted=True) as tunnel:
160
+ hostname, port = tunnel.tcp_socket
161
+ connection_cmd = f'ssh -p {port} root@{hostname}'
162
+ print(f"ssh into container using: {connection_cmd}")
163
+ time.sleep(3600) # keep alive for 1 hour or until killed
164
+ ```
165
+
166
+ If you intend to use this more generally, a suggestion is to put the subprocess and port
167
+ forwarding code in an `@enter` lifecycle method of an @app.cls, to only make a single
168
+ ssh server and port for each container (and not one for each input to the function).
169
+ '''
170
+ ...
28
171
 
29
172
  class __forward_spec(typing_extensions.Protocol):
30
173
  def __call__(
31
174
  self, /, port: int, *, unencrypted: bool = False, client: typing.Optional[modal.client.Client] = None
32
- ) -> synchronicity.combined_types.AsyncAndBlockingContextManager[Tunnel]: ...
175
+ ) -> synchronicity.combined_types.AsyncAndBlockingContextManager[Tunnel]:
176
+ '''Expose a port publicly from inside a running Modal container, with TLS.
177
+
178
+ If `unencrypted` is set, this also exposes the TCP socket without encryption on a random port
179
+ number. This can be used to SSH into a container (see example below). Note that it is on the public Internet, so
180
+ make sure you are using a secure protocol over TCP.
181
+
182
+ **Important:** This is an experimental API which may change in the future.
183
+
184
+ **Usage:**
185
+
186
+ ```python notest
187
+ import modal
188
+ from flask import Flask
189
+
190
+ app = modal.App(image=modal.Image.debian_slim().pip_install("Flask"))
191
+ flask_app = Flask(__name__)
192
+
193
+
194
+ @flask_app.route("/")
195
+ def hello_world():
196
+ return "Hello, World!"
197
+
198
+
199
+ @app.function()
200
+ def run_app():
201
+ # Start a web server inside the container at port 8000. `modal.forward(8000)` lets us
202
+ # expose that port to the world at a random HTTPS URL.
203
+ with modal.forward(8000) as tunnel:
204
+ print("Server listening at", tunnel.url)
205
+ flask_app.run("0.0.0.0", 8000)
206
+
207
+ # When the context manager exits, the port is no longer exposed.
208
+ ```
209
+
210
+ **Raw TCP usage:**
211
+
212
+ ```python
213
+ import socket
214
+ import threading
215
+
216
+ import modal
217
+
218
+
219
+ def run_echo_server(port: int):
220
+ """Run a TCP echo server listening on the given port."""
221
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
222
+ sock.bind(("0.0.0.0", port))
223
+ sock.listen(1)
224
+
225
+ while True:
226
+ conn, addr = sock.accept()
227
+ print("Connection from:", addr)
228
+
229
+ # Start a new thread to handle the connection
230
+ def handle(conn):
231
+ with conn:
232
+ while True:
233
+ data = conn.recv(1024)
234
+ if not data:
235
+ break
236
+ conn.sendall(data)
237
+
238
+ threading.Thread(target=handle, args=(conn,)).start()
239
+
240
+
241
+ app = modal.App()
242
+
243
+
244
+ @app.function()
245
+ def tcp_tunnel():
246
+ # This exposes port 8000 to public Internet traffic over TCP.
247
+ with modal.forward(8000, unencrypted=True) as tunnel:
248
+ # You can connect to this TCP socket from outside the container, for example, using `nc`:
249
+ # nc <HOST> <PORT>
250
+ print("TCP tunnel listening at:", tunnel.tcp_socket)
251
+ run_echo_server(8000)
252
+ ```
253
+
254
+ **SSH example:**
255
+ This assumes you have a rsa keypair in `~/.ssh/id_rsa{.pub}`, this is a bare-bones example
256
+ letting you SSH into a Modal container.
257
+
258
+ ```python
259
+ import subprocess
260
+ import time
261
+
262
+ import modal
263
+
264
+ app = modal.App()
265
+ image = (
266
+ modal.Image.debian_slim()
267
+ .apt_install("openssh-server")
268
+ .run_commands("mkdir /run/sshd")
269
+ .add_local_file("~/.ssh/id_rsa.pub", "/root/.ssh/authorized_keys", copy=True)
270
+ )
271
+
272
+
273
+ @app.function(image=image, timeout=3600)
274
+ def some_function():
275
+ subprocess.Popen(["/usr/sbin/sshd", "-D", "-e"])
276
+ with modal.forward(port=22, unencrypted=True) as tunnel:
277
+ hostname, port = tunnel.tcp_socket
278
+ connection_cmd = f'ssh -p {port} root@{hostname}'
279
+ print(f"ssh into container using: {connection_cmd}")
280
+ time.sleep(3600) # keep alive for 1 hour or until killed
281
+ ```
282
+
283
+ If you intend to use this more generally, a suggestion is to put the subprocess and port
284
+ forwarding code in an `@enter` lifecycle method of an @app.cls, to only make a single
285
+ ssh server and port for each container (and not one for each input to the function).
286
+ '''
287
+ ...
288
+
33
289
  def aio(
34
290
  self, /, port: int, *, unencrypted: bool = False, client: typing.Optional[modal.client.Client] = None
35
- ) -> typing.AsyncContextManager[Tunnel]: ...
291
+ ) -> typing.AsyncContextManager[Tunnel]:
292
+ '''Expose a port publicly from inside a running Modal container, with TLS.
293
+
294
+ If `unencrypted` is set, this also exposes the TCP socket without encryption on a random port
295
+ number. This can be used to SSH into a container (see example below). Note that it is on the public Internet, so
296
+ make sure you are using a secure protocol over TCP.
297
+
298
+ **Important:** This is an experimental API which may change in the future.
299
+
300
+ **Usage:**
301
+
302
+ ```python notest
303
+ import modal
304
+ from flask import Flask
305
+
306
+ app = modal.App(image=modal.Image.debian_slim().pip_install("Flask"))
307
+ flask_app = Flask(__name__)
308
+
309
+
310
+ @flask_app.route("/")
311
+ def hello_world():
312
+ return "Hello, World!"
313
+
314
+
315
+ @app.function()
316
+ def run_app():
317
+ # Start a web server inside the container at port 8000. `modal.forward(8000)` lets us
318
+ # expose that port to the world at a random HTTPS URL.
319
+ with modal.forward(8000) as tunnel:
320
+ print("Server listening at", tunnel.url)
321
+ flask_app.run("0.0.0.0", 8000)
322
+
323
+ # When the context manager exits, the port is no longer exposed.
324
+ ```
325
+
326
+ **Raw TCP usage:**
327
+
328
+ ```python
329
+ import socket
330
+ import threading
331
+
332
+ import modal
333
+
334
+
335
+ def run_echo_server(port: int):
336
+ """Run a TCP echo server listening on the given port."""
337
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
338
+ sock.bind(("0.0.0.0", port))
339
+ sock.listen(1)
340
+
341
+ while True:
342
+ conn, addr = sock.accept()
343
+ print("Connection from:", addr)
344
+
345
+ # Start a new thread to handle the connection
346
+ def handle(conn):
347
+ with conn:
348
+ while True:
349
+ data = conn.recv(1024)
350
+ if not data:
351
+ break
352
+ conn.sendall(data)
353
+
354
+ threading.Thread(target=handle, args=(conn,)).start()
355
+
356
+
357
+ app = modal.App()
358
+
359
+
360
+ @app.function()
361
+ def tcp_tunnel():
362
+ # This exposes port 8000 to public Internet traffic over TCP.
363
+ with modal.forward(8000, unencrypted=True) as tunnel:
364
+ # You can connect to this TCP socket from outside the container, for example, using `nc`:
365
+ # nc <HOST> <PORT>
366
+ print("TCP tunnel listening at:", tunnel.tcp_socket)
367
+ run_echo_server(8000)
368
+ ```
369
+
370
+ **SSH example:**
371
+ This assumes you have a rsa keypair in `~/.ssh/id_rsa{.pub}`, this is a bare-bones example
372
+ letting you SSH into a Modal container.
373
+
374
+ ```python
375
+ import subprocess
376
+ import time
377
+
378
+ import modal
379
+
380
+ app = modal.App()
381
+ image = (
382
+ modal.Image.debian_slim()
383
+ .apt_install("openssh-server")
384
+ .run_commands("mkdir /run/sshd")
385
+ .add_local_file("~/.ssh/id_rsa.pub", "/root/.ssh/authorized_keys", copy=True)
386
+ )
387
+
388
+
389
+ @app.function(image=image, timeout=3600)
390
+ def some_function():
391
+ subprocess.Popen(["/usr/sbin/sshd", "-D", "-e"])
392
+ with modal.forward(port=22, unencrypted=True) as tunnel:
393
+ hostname, port = tunnel.tcp_socket
394
+ connection_cmd = f'ssh -p {port} root@{hostname}'
395
+ print(f"ssh into container using: {connection_cmd}")
396
+ time.sleep(3600) # keep alive for 1 hour or until killed
397
+ ```
398
+
399
+ If you intend to use this more generally, a suggestion is to put the subprocess and port
400
+ forwarding code in an `@enter` lifecycle method of an @app.cls, to only make a single
401
+ ssh server and port for each container (and not one for each input to the function).
402
+ '''
403
+ ...
36
404
 
37
405
  forward: __forward_spec
@@ -396,7 +396,7 @@ class _WarnIfGeneratorIsNotConsumed:
396
396
  return await self.gen.aclose()
397
397
 
398
398
 
399
- synchronize_api(_WarnIfGeneratorIsNotConsumed)
399
+ _BlockingWarnIfGeneratorIsNotConsumed = synchronize_api(_WarnIfGeneratorIsNotConsumed)
400
400
 
401
401
 
402
402
  class _WarnIfNonWrappedGeneratorIsNotConsumed(_WarnIfGeneratorIsNotConsumed):
@@ -4,6 +4,7 @@ import dataclasses
4
4
  import hashlib
5
5
  import os
6
6
  import platform
7
+ import random
7
8
  import time
8
9
  from collections.abc import AsyncIterator
9
10
  from contextlib import AbstractContextManager, contextmanager
@@ -37,12 +38,15 @@ if TYPE_CHECKING:
37
38
  # Max size for function inputs and outputs.
38
39
  MAX_OBJECT_SIZE_BYTES = 2 * 1024 * 1024 # 2 MiB
39
40
 
41
+ # Max size for async function inputs and outputs.
42
+ MAX_ASYNC_OBJECT_SIZE_BYTES = 8 * 1024 # 8 KiB
43
+
40
44
  # If a file is LARGE_FILE_LIMIT bytes or larger, it's uploaded to blob store (s3) instead of going through grpc
41
45
  # It will also make sure to chunk the hash calculation to avoid reading the entire file into memory
42
46
  LARGE_FILE_LIMIT = 4 * 1024 * 1024 # 4 MiB
43
47
 
44
48
  # Max parallelism during map calls
45
- BLOB_MAX_PARALLELISM = 10
49
+ BLOB_MAX_PARALLELISM = 20
46
50
 
47
51
  # read ~16MiB chunks by default
48
52
  DEFAULT_SEGMENT_CHUNK_SIZE = 2**24
@@ -55,6 +59,8 @@ MULTIPART_UPLOAD_THRESHOLD = 1024**3
55
59
  # For block based storage like volumefs2: the size of a block
56
60
  BLOCK_SIZE: int = 8 * 1024 * 1024
57
61
 
62
+ HEALTHY_R2_UPLOAD_PERCENTAGE = 0.95
63
+
58
64
 
59
65
  @retry(n_attempts=5, base_delay=0.5, timeout=None)
60
66
  async def _upload_to_s3_url(
@@ -182,6 +188,22 @@ def get_content_length(data: BinaryIO) -> int:
182
188
  return content_length - pos
183
189
 
184
190
 
191
+ async def _blob_upload_with_fallback(items, blob_ids, callback):
192
+ for idx, (item, blob_id) in enumerate(zip(items, blob_ids)):
193
+ # We want to default to R2 95% of the time and S3 5% of the time.
194
+ # To ensure the failure path is continuously exercised.
195
+ if idx == 0 and len(items) > 1 and random.random() > HEALTHY_R2_UPLOAD_PERCENTAGE:
196
+ continue
197
+ try:
198
+ await callback(item)
199
+ return blob_id
200
+ except Exception as _:
201
+ # Ignore all errors except the last one, since we're out of fallback options.
202
+ if idx == len(items) - 1:
203
+ raise
204
+ raise ExecutionError("Failed to upload blob")
205
+
206
+
185
207
  async def _blob_upload(
186
208
  upload_hashes: UploadHashes, data: Union[bytes, BinaryIO], stub, progress_report_cb: Optional[Callable] = None
187
209
  ) -> str:
@@ -197,17 +219,23 @@ async def _blob_upload(
197
219
  )
198
220
  resp = await retry_transient_errors(stub.BlobCreate, req)
199
221
 
200
- blob_id = resp.blob_id
222
+ if resp.WhichOneof("upload_types_oneof") == "multiparts":
223
+
224
+ async def upload_multipart_upload(part):
225
+ return await perform_multipart_upload(
226
+ data,
227
+ content_length=content_length,
228
+ max_part_size=part.part_length,
229
+ part_urls=part.upload_urls,
230
+ completion_url=part.completion_url,
231
+ upload_chunk_size=DEFAULT_SEGMENT_CHUNK_SIZE,
232
+ progress_report_cb=progress_report_cb,
233
+ )
201
234
 
202
- if resp.WhichOneof("upload_type_oneof") == "multipart":
203
- await perform_multipart_upload(
204
- data,
205
- content_length=content_length,
206
- max_part_size=resp.multipart.part_length,
207
- part_urls=resp.multipart.upload_urls,
208
- completion_url=resp.multipart.completion_url,
209
- upload_chunk_size=DEFAULT_SEGMENT_CHUNK_SIZE,
210
- progress_report_cb=progress_report_cb,
235
+ blob_id = await _blob_upload_with_fallback(
236
+ resp.multiparts.items,
237
+ resp.blob_ids,
238
+ upload_multipart_upload,
211
239
  )
212
240
  else:
213
241
  from .bytes_io_segment_payload import BytesIOSegmentPayload
@@ -215,11 +243,19 @@ async def _blob_upload(
215
243
  payload = BytesIOSegmentPayload(
216
244
  data, segment_start=0, segment_length=content_length, progress_report_cb=progress_report_cb
217
245
  )
218
- await _upload_to_s3_url(
219
- resp.upload_url,
220
- payload,
221
- # for single part uploads, we use server side md5 checksums
222
- content_md5_b64=upload_hashes.md5_base64,
246
+
247
+ async def upload_to_s3_url(url):
248
+ return await _upload_to_s3_url(
249
+ url,
250
+ payload,
251
+ # for single part uploads, we use server side md5 checksums
252
+ content_md5_b64=upload_hashes.md5_base64,
253
+ )
254
+
255
+ blob_id = await _blob_upload_with_fallback(
256
+ resp.upload_urls.items,
257
+ resp.blob_ids,
258
+ upload_to_s3_url,
223
259
  )
224
260
 
225
261
  if progress_report_cb:
@@ -380,8 +416,10 @@ def get_file_upload_spec_from_fileobj(fp: BinaryIO, mount_filename: PurePosixPat
380
416
  mode,
381
417
  )
382
418
 
419
+
383
420
  _FileUploadSource2 = Callable[[], ContextManager[BinaryIO]]
384
421
 
422
+
385
423
  @dataclasses.dataclass
386
424
  class FileUploadSpec2:
387
425
  source: _FileUploadSource2
@@ -393,7 +431,6 @@ class FileUploadSpec2:
393
431
  mode: int # file permission bits (last 12 bits of st_mode)
394
432
  size: int
395
433
 
396
-
397
434
  @staticmethod
398
435
  async def from_path(
399
436
  filename: Path,
@@ -416,7 +453,6 @@ class FileUploadSpec2:
416
453
  hash_semaphore,
417
454
  )
418
455
 
419
-
420
456
  @staticmethod
421
457
  async def from_fileobj(
422
458
  source_fp: Union[BinaryIO, BytesIO],
@@ -426,6 +462,7 @@ class FileUploadSpec2:
426
462
  ) -> "FileUploadSpec2":
427
463
  try:
428
464
  fileno = source_fp.fileno()
465
+
429
466
  def source():
430
467
  new_fd = os.dup(fileno)
431
468
  fp = os.fdopen(new_fd, "rb")
@@ -436,6 +473,7 @@ class FileUploadSpec2:
436
473
  # `.fileno()` not available; assume BytesIO-like type
437
474
  source_fp = cast(BytesIO, source_fp)
438
475
  buffer = source_fp.getbuffer()
476
+
439
477
  def source():
440
478
  return BytesIO(buffer)
441
479
 
@@ -447,7 +485,6 @@ class FileUploadSpec2:
447
485
  hash_semaphore,
448
486
  )
449
487
 
450
-
451
488
  @staticmethod
452
489
  async def _create(
453
490
  source: _FileUploadSource2,
@@ -10,7 +10,6 @@ from typing import Any, Callable, Literal, Optional
10
10
 
11
11
  from grpclib import GRPCError
12
12
  from grpclib.exceptions import StreamTerminatedError
13
- from synchronicity.exceptions import UserCodeException
14
13
 
15
14
  import modal_proto
16
15
  from modal_proto import api_pb2
@@ -33,7 +32,7 @@ from ..exception import (
33
32
  RemoteError,
34
33
  )
35
34
  from ..mount import ROOT_DIR, _is_modal_path, _Mount
36
- from .blob_utils import MAX_OBJECT_SIZE_BYTES, blob_download, blob_upload
35
+ from .blob_utils import MAX_ASYNC_OBJECT_SIZE_BYTES, MAX_OBJECT_SIZE_BYTES, blob_download, blob_upload
37
36
  from .grpc_utils import RETRYABLE_GRPC_STATUS_CODES
38
37
 
39
38
 
@@ -497,8 +496,9 @@ async def _process_result(result: api_pb2.GenericResult, data_format: int, stub,
497
496
  append_modal_tb(exc, tb_dict, line_cache)
498
497
  except Exception:
499
498
  pass
500
- uc_exc = UserCodeException(exc_with_hints(exc))
501
- raise uc_exc
499
+
500
+ raise exc_with_hints(exc)
501
+
502
502
  raise RemoteError(result.exception)
503
503
 
504
504
  try:
@@ -511,8 +511,27 @@ async def _process_result(result: api_pb2.GenericResult, data_format: int, stub,
511
511
  ) from deser_exc
512
512
 
513
513
 
514
+ def should_upload(
515
+ num_bytes: int,
516
+ function_call_invocation_type: Optional["api_pb2.FunctionCallInvocationType.ValueType"],
517
+ ) -> bool:
518
+ """
519
+ Determine if the input should be uploaded to blob storage.
520
+ """
521
+ return num_bytes > MAX_OBJECT_SIZE_BYTES or (
522
+ function_call_invocation_type == api_pb2.FUNCTION_CALL_INVOCATION_TYPE_ASYNC
523
+ and num_bytes > MAX_ASYNC_OBJECT_SIZE_BYTES
524
+ )
525
+
526
+
514
527
  async def _create_input(
515
- args, kwargs, stub: ModalClientModal, *, idx: Optional[int] = None, method_name: Optional[str] = None
528
+ args,
529
+ kwargs,
530
+ stub: ModalClientModal,
531
+ *,
532
+ idx: Optional[int] = None,
533
+ method_name: Optional[str] = None,
534
+ function_call_invocation_type: Optional["api_pb2.FunctionCallInvocationType.ValueType"] = None,
516
535
  ) -> api_pb2.FunctionPutInputsItem:
517
536
  """Serialize function arguments and create a FunctionInput protobuf,
518
537
  uploading to blob storage if needed.
@@ -524,9 +543,8 @@ async def _create_input(
524
543
 
525
544
  args_serialized = serialize((args, kwargs))
526
545
 
527
- if len(args_serialized) > MAX_OBJECT_SIZE_BYTES:
546
+ if should_upload(len(args_serialized), function_call_invocation_type):
528
547
  args_blob_id = await blob_upload(args_serialized, stub)
529
-
530
548
  return api_pb2.FunctionPutInputsItem(
531
549
  input=api_pb2.FunctionInput(
532
550
  args_blob_id=args_blob_id,
@@ -603,6 +621,14 @@ class FunctionCreationStatus:
603
621
  f"Custom domain for {self.tag} => [magenta underline]{custom_domain.url}[/magenta underline]"
604
622
  )
605
623
 
624
+ elif self.response.function.flash_service_urls:
625
+ for flash_service_url in self.response.function.flash_service_urls:
626
+ flash_service_url_status_row = self.resolver.add_status_row()
627
+ flash_service_url_status_row.finish(
628
+ f"Created flash service endpoint for {self.tag} => "
629
+ f"[magenta underline]{flash_service_url}[/magenta underline]"
630
+ )
631
+
606
632
  else:
607
633
  for warning in self.response.server_warnings:
608
634
  self.status_row.warning(warning)