modal 1.0.6.dev9__py3-none-any.whl → 1.0.6.dev15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of modal might be problematic. Click here for more details.

@@ -15,7 +15,6 @@ if telemetry_socket:
15
15
  instrument_imports(telemetry_socket)
16
16
 
17
17
  import asyncio
18
- import concurrent.futures
19
18
  import inspect
20
19
  import queue
21
20
  import signal
@@ -49,7 +48,6 @@ from ._runtime.container_io_manager import (
49
48
  ContainerIOManager,
50
49
  IOContext,
51
50
  UserException,
52
- _ContainerIOManager,
53
51
  )
54
52
 
55
53
  if TYPE_CHECKING:
@@ -198,21 +196,16 @@ def call_function(
198
196
 
199
197
  # Send up to this many outputs at a time.
200
198
  generator_queue: asyncio.Queue[Any] = await container_io_manager._queue_create.aio(1024)
201
- generator_output_task = asyncio.create_task(
202
- container_io_manager.generator_output_task.aio(
203
- function_call_ids[0],
204
- io_context.finalized_function.data_format,
205
- generator_queue,
206
- )
207
- )
208
-
209
- item_count = 0
210
- async for value in res:
211
- await container_io_manager._queue_put.aio(generator_queue, value)
212
- item_count += 1
199
+ async with container_io_manager.generator_output_sender(
200
+ function_call_ids[0],
201
+ io_context.finalized_function.data_format,
202
+ generator_queue,
203
+ ):
204
+ item_count = 0
205
+ async for value in res:
206
+ await container_io_manager._queue_put.aio(generator_queue, value)
207
+ item_count += 1
213
208
 
214
- await container_io_manager._queue_put.aio(generator_queue, _ContainerIOManager._GENERATOR_STOP_SENTINEL)
215
- await generator_output_task # Wait to finish sending generator outputs.
216
209
  message = api_pb2.GeneratorDone(items_total=item_count)
217
210
  await container_io_manager.push_outputs.aio(
218
211
  io_context,
@@ -249,20 +242,17 @@ def call_function(
249
242
 
250
243
  # Send up to this many outputs at a time.
251
244
  generator_queue: asyncio.Queue[Any] = container_io_manager._queue_create(1024)
252
- generator_output_task: concurrent.futures.Future = container_io_manager.generator_output_task( # type: ignore
245
+
246
+ with container_io_manager.generator_output_sender(
253
247
  function_call_ids[0],
254
248
  io_context.finalized_function.data_format,
255
249
  generator_queue,
256
- _future=True, # type: ignore # Synchronicity magic to return a future.
257
- )
258
-
259
- item_count = 0
260
- for value in res:
261
- container_io_manager._queue_put(generator_queue, value)
262
- item_count += 1
250
+ ):
251
+ item_count = 0
252
+ for value in res:
253
+ container_io_manager._queue_put(generator_queue, value)
254
+ item_count += 1
263
255
 
264
- container_io_manager._queue_put(generator_queue, _ContainerIOManager._GENERATOR_STOP_SENTINEL)
265
- generator_output_task.result() # Wait to finish sending generator outputs.
266
256
  message = api_pb2.GeneratorDone(items_total=item_count)
267
257
  container_io_manager.push_outputs(io_context, started_at, message, api_pb2.DATA_FORMAT_GENERATOR_DONE)
268
258
  else:
modal/_functions.py CHANGED
@@ -40,6 +40,7 @@ from ._utils.async_utils import (
40
40
  synchronizer,
41
41
  warn_if_generator_is_not_consumed,
42
42
  )
43
+ from ._utils.blob_utils import MAX_OBJECT_SIZE_BYTES
43
44
  from ._utils.deprecation import deprecation_warning, warn_if_passing_namespace
44
45
  from ._utils.function_utils import (
45
46
  ATTEMPT_TIMEOUT_GRACE_PERIOD,
@@ -145,6 +146,7 @@ class _Invocation:
145
146
  args,
146
147
  kwargs,
147
148
  stub,
149
+ max_object_size_bytes=function._max_object_size_bytes,
148
150
  method_name=function._use_method_name,
149
151
  function_call_invocation_type=function_call_invocation_type,
150
152
  )
@@ -386,7 +388,13 @@ class _InputPlaneInvocation:
386
388
  function_id = function.object_id
387
389
  control_plane_stub = client.stub
388
390
  # Note: Blob upload is done on the control plane stub, not the input plane stub!
389
- input_item = await _create_input(args, kwargs, control_plane_stub, method_name=function._use_method_name)
391
+ input_item = await _create_input(
392
+ args,
393
+ kwargs,
394
+ control_plane_stub,
395
+ max_object_size_bytes=function._max_object_size_bytes,
396
+ method_name=function._use_method_name,
397
+ )
390
398
 
391
399
  request = api_pb2.AttemptStartRequest(
392
400
  function_id=function_id,
@@ -443,8 +451,10 @@ class _InputPlaneInvocation:
443
451
  self.attempt_token = retry_response.attempt_token
444
452
  continue
445
453
 
454
+ control_plane_stub = self.client.stub
455
+ # Note: Blob download is done on the control plane stub, not the input plane stub!
446
456
  return await _process_result(
447
- await_response.output.result, await_response.output.data_format, self.stub, self.client
457
+ await_response.output.result, await_response.output.data_format, control_plane_stub, self.client
448
458
  )
449
459
 
450
460
 
@@ -1414,6 +1424,15 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
1414
1424
  self._definition_id = metadata.definition_id
1415
1425
  self._input_plane_url = metadata.input_plane_url
1416
1426
  self._input_plane_region = metadata.input_plane_region
1427
+ # The server may pass back a larger max object size for some input plane users. This applies to input plane
1428
+ # users only - anyone using the control plane will get the standard limit.
1429
+ # There are some cases like FunctionPrecreate where this value is not set at all. We expect that this field
1430
+ # will eventually be hydrated with the correct value, but just to be defensive, if the field is not set we use
1431
+ # MAX_OBJECT_SIZE_BYTES, otherwise it would get set to 0. Accidentally using 0 would cause us to blob upload
1432
+ # everything, so let's avoid that.
1433
+ self._max_object_size_bytes = (
1434
+ metadata.max_object_size_bytes if metadata.HasField("max_object_size_bytes") else MAX_OBJECT_SIZE_BYTES
1435
+ )
1417
1436
 
1418
1437
  def _get_metadata(self):
1419
1438
  # Overridden concrete implementation of base class method
@@ -1430,6 +1449,7 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
1430
1449
  function_schema=self._metadata.function_schema if self._metadata else None,
1431
1450
  input_plane_url=self._input_plane_url,
1432
1451
  input_plane_region=self._input_plane_region,
1452
+ max_object_size_bytes=self._max_object_size_bytes,
1433
1453
  )
1434
1454
 
1435
1455
  def _check_no_web_url(self, fn_name: str):
@@ -290,7 +290,6 @@ class _ContainerIOManager:
290
290
 
291
291
  _client: _Client
292
292
 
293
- _GENERATOR_STOP_SENTINEL: ClassVar[Sentinel] = Sentinel()
294
293
  _singleton: ClassVar[Optional["_ContainerIOManager"]] = None
295
294
 
296
295
  def _init(self, container_args: api_pb2.ContainerArguments, client: _Client):
@@ -508,33 +507,47 @@ class _ContainerIOManager:
508
507
  req = api_pb2.FunctionCallPutDataRequest(function_call_id=function_call_id, data_chunks=data_chunks)
509
508
  await retry_transient_errors(self._client.stub.FunctionCallPutDataOut, req)
510
509
 
511
- async def generator_output_task(self, function_call_id: str, data_format: int, message_rx: asyncio.Queue) -> None:
512
- """Task that feeds generator outputs into a function call's `data_out` stream."""
513
- index = 1
514
- received_sentinel = False
515
- while not received_sentinel:
516
- message = await message_rx.get()
517
- if message is self._GENERATOR_STOP_SENTINEL:
518
- break
519
- # ASGI 'http.response.start' and 'http.response.body' msgs are observed to be separated by 1ms.
520
- # If we don't sleep here for 1ms we end up with an extra call to .put_data_out().
521
- if index == 1:
522
- await asyncio.sleep(0.001)
523
- serialized_messages = [serialize_data_format(message, data_format)]
524
- total_size = len(serialized_messages[0]) + 512
525
- while total_size < 16 * 1024 * 1024: # 16 MiB, maximum size in a single message
526
- try:
527
- message = message_rx.get_nowait()
528
- except asyncio.QueueEmpty:
529
- break
530
- if message is self._GENERATOR_STOP_SENTINEL:
531
- received_sentinel = True
510
+ @asynccontextmanager
511
+ async def generator_output_sender(
512
+ self, function_call_id: str, data_format: int, message_rx: asyncio.Queue
513
+ ) -> AsyncGenerator[None, None]:
514
+ """Runs background task that feeds generator outputs into a function call's `data_out` stream."""
515
+ GENERATOR_STOP_SENTINEL = Sentinel()
516
+
517
+ async def generator_output_task():
518
+ index = 1
519
+ received_sentinel = False
520
+ while not received_sentinel:
521
+ message = await message_rx.get()
522
+ if message is GENERATOR_STOP_SENTINEL:
532
523
  break
533
- else:
534
- serialized_messages.append(serialize_data_format(message, data_format))
535
- total_size += len(serialized_messages[-1]) + 512 # 512 bytes for estimated framing overhead
536
- await self.put_data_out(function_call_id, index, data_format, serialized_messages)
537
- index += len(serialized_messages)
524
+ # ASGI 'http.response.start' and 'http.response.body' msgs are observed to be separated by 1ms.
525
+ # If we don't sleep here for 1ms we end up with an extra call to .put_data_out().
526
+ if index == 1:
527
+ await asyncio.sleep(0.001)
528
+ serialized_messages = [serialize_data_format(message, data_format)]
529
+ total_size = len(serialized_messages[0]) + 512
530
+ while total_size < 16 * 1024 * 1024: # 16 MiB, maximum size in a single message
531
+ try:
532
+ message = message_rx.get_nowait()
533
+ except asyncio.QueueEmpty:
534
+ break
535
+ if message is GENERATOR_STOP_SENTINEL:
536
+ received_sentinel = True
537
+ break
538
+ else:
539
+ serialized_messages.append(serialize_data_format(message, data_format))
540
+ total_size += len(serialized_messages[-1]) + 512 # 512 bytes for estimated framing overhead
541
+ await self.put_data_out(function_call_id, index, data_format, serialized_messages)
542
+ index += len(serialized_messages)
543
+
544
+ task = asyncio.create_task(generator_output_task())
545
+ try:
546
+ yield
547
+ finally:
548
+ # gracefully stop the task after all current inputs have been sent
549
+ await message_rx.put(GENERATOR_STOP_SENTINEL)
550
+ await task
538
551
 
539
552
  async def _queue_create(self, size: int) -> asyncio.Queue:
540
553
  """Create a queue, on the synchronicity event loop (needed on Python 3.8 and 3.9)."""
@@ -106,7 +106,6 @@ class _ContainerIOManager:
106
106
  _is_interactivity_enabled: bool
107
107
  _fetching_inputs: bool
108
108
  _client: modal.client._Client
109
- _GENERATOR_STOP_SENTINEL: typing.ClassVar[Sentinel]
110
109
  _singleton: typing.ClassVar[typing.Optional[_ContainerIOManager]]
111
110
 
112
111
  def _init(self, container_args: modal_proto.api_pb2.ContainerArguments, client: modal.client._Client): ...
@@ -148,10 +147,10 @@ class _ContainerIOManager:
148
147
  """
149
148
  ...
150
149
 
151
- async def generator_output_task(
150
+ def generator_output_sender(
152
151
  self, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue
153
- ) -> None:
154
- """Task that feeds generator outputs into a function call's `data_out` stream."""
152
+ ) -> typing.AsyncContextManager[None]:
153
+ """Runs background task that feeds generator outputs into a function call's `data_out` stream."""
155
154
  ...
156
155
 
157
156
  async def _queue_create(self, size: int) -> asyncio.queues.Queue:
@@ -268,7 +267,6 @@ class ContainerIOManager:
268
267
  _is_interactivity_enabled: bool
269
268
  _fetching_inputs: bool
270
269
  _client: modal.client.Client
271
- _GENERATOR_STOP_SENTINEL: typing.ClassVar[Sentinel]
272
270
  _singleton: typing.ClassVar[typing.Optional[ContainerIOManager]]
273
271
 
274
272
  def __init__(self, /, *args, **kwargs):
@@ -367,16 +365,20 @@ class ContainerIOManager:
367
365
 
368
366
  put_data_out: __put_data_out_spec[typing_extensions.Self]
369
367
 
370
- class __generator_output_task_spec(typing_extensions.Protocol[SUPERSELF]):
371
- def __call__(self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue) -> None:
372
- """Task that feeds generator outputs into a function call's `data_out` stream."""
368
+ class __generator_output_sender_spec(typing_extensions.Protocol[SUPERSELF]):
369
+ def __call__(
370
+ self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue
371
+ ) -> synchronicity.combined_types.AsyncAndBlockingContextManager[None]:
372
+ """Runs background task that feeds generator outputs into a function call's `data_out` stream."""
373
373
  ...
374
374
 
375
- async def aio(self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue) -> None:
376
- """Task that feeds generator outputs into a function call's `data_out` stream."""
375
+ def aio(
376
+ self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue
377
+ ) -> typing.AsyncContextManager[None]:
378
+ """Runs background task that feeds generator outputs into a function call's `data_out` stream."""
377
379
  ...
378
380
 
379
- generator_output_task: __generator_output_task_spec[typing_extensions.Self]
381
+ generator_output_sender: __generator_output_sender_spec[typing_extensions.Self]
380
382
 
381
383
  class ___queue_create_spec(typing_extensions.Protocol[SUPERSELF]):
382
384
  def __call__(self, /, size: int) -> asyncio.queues.Queue:
@@ -34,7 +34,6 @@ from ..exception import (
34
34
  from ..mount import ROOT_DIR, _is_modal_path, _Mount
35
35
  from .blob_utils import (
36
36
  MAX_ASYNC_OBJECT_SIZE_BYTES,
37
- MAX_OBJECT_SIZE_BYTES,
38
37
  blob_download,
39
38
  blob_upload_with_r2_failure_info,
40
39
  )
@@ -518,12 +517,13 @@ async def _process_result(result: api_pb2.GenericResult, data_format: int, stub,
518
517
 
519
518
  def should_upload(
520
519
  num_bytes: int,
520
+ max_object_size_bytes: int,
521
521
  function_call_invocation_type: Optional["api_pb2.FunctionCallInvocationType.ValueType"],
522
522
  ) -> bool:
523
523
  """
524
524
  Determine if the input should be uploaded to blob storage.
525
525
  """
526
- return num_bytes > MAX_OBJECT_SIZE_BYTES or (
526
+ return num_bytes > max_object_size_bytes or (
527
527
  function_call_invocation_type == api_pb2.FUNCTION_CALL_INVOCATION_TYPE_ASYNC
528
528
  and num_bytes > MAX_ASYNC_OBJECT_SIZE_BYTES
529
529
  )
@@ -534,6 +534,7 @@ async def _create_input(
534
534
  kwargs,
535
535
  stub: ModalClientModal,
536
536
  *,
537
+ max_object_size_bytes: int,
537
538
  idx: Optional[int] = None,
538
539
  method_name: Optional[str] = None,
539
540
  function_call_invocation_type: Optional["api_pb2.FunctionCallInvocationType.ValueType"] = None,
@@ -548,7 +549,7 @@ async def _create_input(
548
549
 
549
550
  args_serialized = serialize((args, kwargs))
550
551
 
551
- if should_upload(len(args_serialized), function_call_invocation_type):
552
+ if should_upload(len(args_serialized), max_object_size_bytes, function_call_invocation_type):
552
553
  args_blob_id, r2_failed, r2_latency_ms = await blob_upload_with_r2_failure_info(args_serialized, stub)
553
554
  return api_pb2.FunctionPutInputsItem(
554
555
  input=api_pb2.FunctionInput(
modal/client.pyi CHANGED
@@ -31,7 +31,7 @@ class _Client:
31
31
  server_url: str,
32
32
  client_type: int,
33
33
  credentials: typing.Optional[tuple[str, str]],
34
- version: str = "1.0.6.dev9",
34
+ version: str = "1.0.6.dev15",
35
35
  ):
36
36
  """mdmd:hidden
37
37
  The Modal client object is not intended to be instantiated directly by users.
@@ -160,7 +160,7 @@ class Client:
160
160
  server_url: str,
161
161
  client_type: int,
162
162
  credentials: typing.Optional[tuple[str, str]],
163
- version: str = "1.0.6.dev9",
163
+ version: str = "1.0.6.dev15",
164
164
  ):
165
165
  """mdmd:hidden
166
166
  The Modal client object is not intended to be instantiated directly by users.
modal/functions.pyi CHANGED
@@ -428,7 +428,7 @@ class Function(
428
428
 
429
429
  _call_generator: ___call_generator_spec[typing_extensions.Self]
430
430
 
431
- class __remote_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER, SUPERSELF]):
431
+ class __remote_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
432
432
  def __call__(self, /, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER:
433
433
  """Calls the function remotely, executing it with the given arguments and returning the execution's result."""
434
434
  ...
@@ -437,7 +437,7 @@ class Function(
437
437
  """Calls the function remotely, executing it with the given arguments and returning the execution's result."""
438
438
  ...
439
439
 
440
- remote: __remote_spec[modal._functions.P, modal._functions.ReturnType, typing_extensions.Self]
440
+ remote: __remote_spec[modal._functions.ReturnType, modal._functions.P, typing_extensions.Self]
441
441
 
442
442
  class __remote_gen_spec(typing_extensions.Protocol[SUPERSELF]):
443
443
  def __call__(self, /, *args, **kwargs) -> typing.Generator[typing.Any, None, None]:
@@ -464,7 +464,7 @@ class Function(
464
464
  """
465
465
  ...
466
466
 
467
- class ___experimental_spawn_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER, SUPERSELF]):
467
+ class ___experimental_spawn_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
468
468
  def __call__(self, /, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]:
469
469
  """[Experimental] Calls the function with the given arguments, without waiting for the results.
470
470
 
@@ -488,7 +488,7 @@ class Function(
488
488
  ...
489
489
 
490
490
  _experimental_spawn: ___experimental_spawn_spec[
491
- modal._functions.P, modal._functions.ReturnType, typing_extensions.Self
491
+ modal._functions.ReturnType, modal._functions.P, typing_extensions.Self
492
492
  ]
493
493
 
494
494
  class ___spawn_map_inner_spec(typing_extensions.Protocol[P_INNER, SUPERSELF]):
@@ -497,7 +497,7 @@ class Function(
497
497
 
498
498
  _spawn_map_inner: ___spawn_map_inner_spec[modal._functions.P, typing_extensions.Self]
499
499
 
500
- class __spawn_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER, SUPERSELF]):
500
+ class __spawn_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
501
501
  def __call__(self, /, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]:
502
502
  """Calls the function with the given arguments, without waiting for the results.
503
503
 
@@ -518,7 +518,7 @@ class Function(
518
518
  """
519
519
  ...
520
520
 
521
- spawn: __spawn_spec[modal._functions.P, modal._functions.ReturnType, typing_extensions.Self]
521
+ spawn: __spawn_spec[modal._functions.ReturnType, modal._functions.P, typing_extensions.Self]
522
522
 
523
523
  def get_raw_f(self) -> collections.abc.Callable[..., typing.Any]:
524
524
  """Return the inner Python object wrapped by this Modal Function."""
modal/image.py CHANGED
@@ -1222,6 +1222,114 @@ class _Image(_Object, type_prefix="im"):
1222
1222
  gpu_config=parse_gpu_config(gpu),
1223
1223
  )
1224
1224
 
1225
+ def uv_pip_install(
1226
+ self,
1227
+ *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
1228
+ requirements: Optional[list[str]] = None, # Passes -r (--requirements) to uv pip install
1229
+ find_links: Optional[str] = None, # Passes -f (--find-links) to uv pip install
1230
+ index_url: Optional[str] = None, # Passes -i (--index-url) to uv pip install
1231
+ extra_index_url: Optional[str] = None, # Passes --extra-index-url to uv pip install
1232
+ pre: bool = False, # Allow pre-releases using uv pip install --prerelease allow
1233
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation"
1234
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1235
+ uv_version: Optional[str] = None, # uv version to use
1236
+ secrets: Sequence[_Secret] = [],
1237
+ gpu: GPU_T = None,
1238
+ ) -> "_Image":
1239
+ """Install a list of Python packages using uv pip install.
1240
+
1241
+ **Examples**
1242
+
1243
+ Simple installation:
1244
+ ```python
1245
+ image = modal.Image.debian_slim().uv_pip_install("torch==2.7.1", "numpy")
1246
+ ```
1247
+
1248
+ This method assumes that:
1249
+ - Python is on the `$PATH` and dependencies are installed with the first Python on the `$PATH`.
1250
+ - Shell supports backticks for substitution
1251
+ - `which` command is on the `$PATH`
1252
+ """
1253
+ pkgs = _flatten_str_args("uv_pip_install", "packages", packages)
1254
+
1255
+ if requirements is None or isinstance(requirements, list):
1256
+ requirements = requirements or []
1257
+ else:
1258
+ raise InvalidError("requirements must be None or a list of strings")
1259
+
1260
+ if not pkgs and not requirements:
1261
+ return self
1262
+ elif not _validate_packages(pkgs):
1263
+ raise InvalidError(
1264
+ "Package list for `Image.uv_pip_install` cannot contain other arguments;"
1265
+ " try the `extra_options` parameter instead."
1266
+ )
1267
+
1268
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1269
+ commands = ["FROM base"]
1270
+ UV_ROOT = "/.uv"
1271
+ if uv_version is None:
1272
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:latest /uv {UV_ROOT}/uv")
1273
+ else:
1274
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:{uv_version} /uv {UV_ROOT}/uv")
1275
+
1276
+ # NOTE: Using `which python` assumes:
1277
+ # - python is on the PATH and uv is installing into the first python in the PATH
1278
+ # - the shell supports backticks for substitution
1279
+ # - `which` command is on the PATH
1280
+ uv_pip_args = ["--python `which python`", "--compile-bytecode"]
1281
+ context_files = {}
1282
+
1283
+ if find_links:
1284
+ uv_pip_args.append(f"--find-links {shlex.quote(find_links)}")
1285
+ if index_url:
1286
+ uv_pip_args.append(f"--index-url {shlex.quote(index_url)}")
1287
+ if extra_index_url:
1288
+ uv_pip_args.append(f"--extra-index-url {shlex.quote(extra_index_url)}")
1289
+ if pre:
1290
+ uv_pip_args.append("--prerelease allow")
1291
+ if extra_options:
1292
+ uv_pip_args.append(extra_options)
1293
+
1294
+ if requirements:
1295
+
1296
+ def _generate_paths(idx: int, req: str) -> dict:
1297
+ local_path = os.path.expanduser(req)
1298
+ basename = os.path.basename(req)
1299
+
1300
+ # The requirement files can have the same name but in different directories:
1301
+ # requirements=["test/requirements.txt", "a/b/c/requirements.txt"]
1302
+ # To uniquely identify these files, we add a `idx` prefix to every file's basename
1303
+ # - `test/requirements.txt` -> `/.0_requirements.txt` in context -> `/.uv/0/requirements.txt` to uv
1304
+ # - `a/b/c/requirements.txt` -> `/.1_requirements.txt` in context -> `/.uv/1/requirements.txt` to uv
1305
+ return {
1306
+ "local_path": local_path,
1307
+ "context_path": f"/.{idx}_{basename}",
1308
+ "dest_path": f"{UV_ROOT}/{idx}/{basename}",
1309
+ }
1310
+
1311
+ requirement_paths = [_generate_paths(idx, req) for idx, req in enumerate(requirements)]
1312
+ requirements_cli = " ".join(f"--requirements {req['dest_path']}" for req in requirement_paths)
1313
+ uv_pip_args.append(requirements_cli)
1314
+
1315
+ commands.extend([f"COPY {req['context_path']} {req['dest_path']}" for req in requirement_paths])
1316
+ context_files.update({req["context_path"]: req["local_path"] for req in requirement_paths})
1317
+
1318
+ uv_pip_args.extend(shlex.quote(p) for p in sorted(pkgs))
1319
+ uv_pip_args_joined = " ".join(uv_pip_args)
1320
+
1321
+ commands.append(f"RUN {UV_ROOT}/uv pip install {uv_pip_args_joined}")
1322
+
1323
+ return DockerfileSpec(commands=commands, context_files=context_files)
1324
+
1325
+ return _Image._from_args(
1326
+ base_images={"base": self},
1327
+ dockerfile_function=build_dockerfile,
1328
+ force_build=self.force_build or force_build,
1329
+ gpu_config=parse_gpu_config(gpu),
1330
+ secrets=secrets,
1331
+ )
1332
+
1225
1333
  def poetry_install_from_file(
1226
1334
  self,
1227
1335
  poetry_pyproject_toml: str,
@@ -1312,6 +1420,149 @@ class _Image(_Object, type_prefix="im"):
1312
1420
  gpu_config=parse_gpu_config(gpu),
1313
1421
  )
1314
1422
 
1423
+ def uv_sync(
1424
+ self,
1425
+ uv_project_dir: str = "./", # Path to local uv managed project
1426
+ *,
1427
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1428
+ groups: Optional[list[str]] = None, # Dependency group to install using `uv sync --group`
1429
+ extras: Optional[list[str]] = None, # Optional dependencies to install using `uv sync --extra`
1430
+ frozen: bool = True, # If True, then we run `uv sync --frozen` when a uv.lock file is present
1431
+ extra_options: str = "", # Extra options to pass to `uv sync`
1432
+ uv_version: Optional[str] = None, # uv version to use
1433
+ secrets: Sequence[_Secret] = [],
1434
+ gpu: GPU_T = None,
1435
+ ) -> "_Image":
1436
+ """Creates a virtual environment with the dependencies in a uv managed project with `uv sync`.
1437
+
1438
+ **Examples**
1439
+ ```python
1440
+ image = modal.Image.debian_slim().uv_sync()
1441
+ ```
1442
+ """
1443
+
1444
+ def _normalize_items(items, name) -> list[str]:
1445
+ if items is None:
1446
+ return []
1447
+ elif isinstance(items, list):
1448
+ return items
1449
+ else:
1450
+ raise InvalidError(f"{name} must be None or a list of strings")
1451
+
1452
+ groups = _normalize_items(groups, "groups")
1453
+ extras = _normalize_items(extras, "extras")
1454
+
1455
+ def _check_pyproject_toml(pyproject_toml: str, version: ImageBuilderVersion):
1456
+ if not os.path.exists(pyproject_toml):
1457
+ raise InvalidError(f"Expected {pyproject_toml} to exist")
1458
+
1459
+ import toml
1460
+
1461
+ with open(pyproject_toml) as f:
1462
+ pyproject_toml_content = toml.load(f)
1463
+
1464
+ if (
1465
+ "tool" in pyproject_toml_content
1466
+ and "uv" in pyproject_toml_content["tool"]
1467
+ and "workspace" in pyproject_toml_content["tool"]["uv"]
1468
+ ):
1469
+ raise InvalidError("uv workspaces are not supported")
1470
+
1471
+ if version > "2024.10":
1472
+ # For builder version > 2024.10, modal is mounted at runtime and is not
1473
+ # a requirement in `uv.lock`
1474
+ return
1475
+
1476
+ dependencies = pyproject_toml_content["project"]["dependencies"]
1477
+
1478
+ for group in groups:
1479
+ if (
1480
+ "dependency-groups" in pyproject_toml_content
1481
+ and group in pyproject_toml_content["dependency-groups"]
1482
+ ):
1483
+ dependencies += pyproject_toml_content["dependency-groups"][group]
1484
+
1485
+ for extra in extras:
1486
+ if (
1487
+ "project" in pyproject_toml_content
1488
+ and "optional-dependencies" in pyproject_toml_content["project"]
1489
+ and extra in pyproject_toml_content["project"]["optional-dependencies"]
1490
+ ):
1491
+ dependencies += pyproject_toml_content["project"]["optional-dependencies"][extra]
1492
+
1493
+ PACKAGE_REGEX = re.compile(r"^[\w-]+")
1494
+
1495
+ def _extract_package(package) -> str:
1496
+ m = PACKAGE_REGEX.match(package)
1497
+ return m.group(0) if m else ""
1498
+
1499
+ if not any(_extract_package(dependency) == "modal" for dependency in dependencies):
1500
+ raise InvalidError(
1501
+ "Image builder version <= 2024.10 requires modal to be specified in your pyproject.toml file"
1502
+ )
1503
+
1504
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1505
+ uv_project_dir_ = os.path.expanduser(uv_project_dir)
1506
+ pyproject_toml = os.path.join(uv_project_dir_, "pyproject.toml")
1507
+
1508
+ UV_ROOT = "/.uv"
1509
+ uv_sync_args = [
1510
+ f"--project={UV_ROOT}",
1511
+ "--no-install-workspace", # Do not install the root project or any "uv workspace"
1512
+ "--compile-bytecode",
1513
+ ]
1514
+
1515
+ for group in groups:
1516
+ uv_sync_args.append(f"--group={group}")
1517
+ for extra in extras:
1518
+ uv_sync_args.append(f"--extra={extra}")
1519
+ if extra_options:
1520
+ uv_sync_args.append(extra_options)
1521
+
1522
+ commands = ["FROM base"]
1523
+
1524
+ if uv_version is None:
1525
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:latest /uv {UV_ROOT}/uv")
1526
+ else:
1527
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:{uv_version} /uv {UV_ROOT}/uv")
1528
+
1529
+ context_files = {}
1530
+
1531
+ _check_pyproject_toml(pyproject_toml, version)
1532
+
1533
+ context_files["/.pyproject.toml"] = pyproject_toml
1534
+ commands.append(f"COPY /.pyproject.toml {UV_ROOT}/pyproject.toml")
1535
+
1536
+ uv_lock = os.path.join(uv_project_dir_, "uv.lock")
1537
+ if os.path.exists(uv_lock):
1538
+ context_files["/.uv.lock"] = uv_lock
1539
+ commands.append(f"COPY /.uv.lock {UV_ROOT}/uv.lock")
1540
+
1541
+ if frozen:
1542
+ # Do not update `uv.lock` when we have one when `frozen=True`. This it ehd efault because this
1543
+ # ensures that the runtime environment matches the local `uv.lock`.
1544
+ #
1545
+ # If `frozen=False`, then `uv sync` will update the the dependencies in the `uv.lock` file
1546
+ # during build time.
1547
+ uv_sync_args.append("--frozen")
1548
+
1549
+ uv_sync_args_joined = " ".join(uv_sync_args).strip()
1550
+
1551
+ commands += [
1552
+ f"RUN {UV_ROOT}/uv sync {uv_sync_args_joined}",
1553
+ f"ENV PATH={UV_ROOT}/.venv/bin:$PATH",
1554
+ ]
1555
+
1556
+ return DockerfileSpec(commands=commands, context_files=context_files)
1557
+
1558
+ return _Image._from_args(
1559
+ base_images={"base": self},
1560
+ dockerfile_function=build_dockerfile,
1561
+ force_build=self.force_build or force_build,
1562
+ secrets=secrets,
1563
+ gpu_config=parse_gpu_config(gpu),
1564
+ )
1565
+
1315
1566
  def dockerfile_commands(
1316
1567
  self,
1317
1568
  *dockerfile_commands: Union[str, list[str]],