modal 1.0.3.dev10__py3-none-any.whl → 1.2.3.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of modal might be problematic. Click here for more details.

Files changed (160) hide show
  1. modal/__init__.py +0 -2
  2. modal/__main__.py +3 -4
  3. modal/_billing.py +80 -0
  4. modal/_clustered_functions.py +7 -3
  5. modal/_clustered_functions.pyi +15 -3
  6. modal/_container_entrypoint.py +51 -69
  7. modal/_functions.py +508 -240
  8. modal/_grpc_client.py +171 -0
  9. modal/_load_context.py +105 -0
  10. modal/_object.py +81 -21
  11. modal/_output.py +58 -45
  12. modal/_partial_function.py +48 -73
  13. modal/_pty.py +7 -3
  14. modal/_resolver.py +26 -46
  15. modal/_runtime/asgi.py +4 -3
  16. modal/_runtime/container_io_manager.py +358 -220
  17. modal/_runtime/container_io_manager.pyi +296 -101
  18. modal/_runtime/execution_context.py +18 -2
  19. modal/_runtime/execution_context.pyi +64 -7
  20. modal/_runtime/gpu_memory_snapshot.py +262 -57
  21. modal/_runtime/user_code_imports.py +28 -58
  22. modal/_serialization.py +90 -6
  23. modal/_traceback.py +42 -1
  24. modal/_tunnel.pyi +380 -12
  25. modal/_utils/async_utils.py +84 -29
  26. modal/_utils/auth_token_manager.py +111 -0
  27. modal/_utils/blob_utils.py +181 -58
  28. modal/_utils/deprecation.py +19 -0
  29. modal/_utils/function_utils.py +91 -47
  30. modal/_utils/grpc_utils.py +89 -66
  31. modal/_utils/mount_utils.py +26 -1
  32. modal/_utils/name_utils.py +17 -3
  33. modal/_utils/task_command_router_client.py +536 -0
  34. modal/_utils/time_utils.py +34 -6
  35. modal/app.py +256 -88
  36. modal/app.pyi +909 -92
  37. modal/billing.py +5 -0
  38. modal/builder/2025.06.txt +18 -0
  39. modal/builder/PREVIEW.txt +18 -0
  40. modal/builder/base-images.json +58 -0
  41. modal/cli/_download.py +19 -3
  42. modal/cli/_traceback.py +3 -2
  43. modal/cli/app.py +4 -4
  44. modal/cli/cluster.py +15 -7
  45. modal/cli/config.py +5 -3
  46. modal/cli/container.py +7 -6
  47. modal/cli/dict.py +22 -16
  48. modal/cli/entry_point.py +12 -5
  49. modal/cli/environment.py +5 -4
  50. modal/cli/import_refs.py +3 -3
  51. modal/cli/launch.py +102 -5
  52. modal/cli/network_file_system.py +11 -12
  53. modal/cli/profile.py +3 -2
  54. modal/cli/programs/launch_instance_ssh.py +94 -0
  55. modal/cli/programs/run_jupyter.py +1 -1
  56. modal/cli/programs/run_marimo.py +95 -0
  57. modal/cli/programs/vscode.py +1 -1
  58. modal/cli/queues.py +57 -26
  59. modal/cli/run.py +91 -23
  60. modal/cli/secret.py +48 -22
  61. modal/cli/token.py +7 -8
  62. modal/cli/utils.py +4 -7
  63. modal/cli/volume.py +31 -25
  64. modal/client.py +15 -85
  65. modal/client.pyi +183 -62
  66. modal/cloud_bucket_mount.py +5 -3
  67. modal/cloud_bucket_mount.pyi +197 -5
  68. modal/cls.py +200 -126
  69. modal/cls.pyi +446 -68
  70. modal/config.py +29 -11
  71. modal/container_process.py +319 -19
  72. modal/container_process.pyi +190 -20
  73. modal/dict.py +290 -71
  74. modal/dict.pyi +835 -83
  75. modal/environments.py +15 -27
  76. modal/environments.pyi +46 -24
  77. modal/exception.py +14 -2
  78. modal/experimental/__init__.py +194 -40
  79. modal/experimental/flash.py +618 -0
  80. modal/experimental/flash.pyi +380 -0
  81. modal/experimental/ipython.py +11 -7
  82. modal/file_io.py +29 -36
  83. modal/file_io.pyi +251 -53
  84. modal/file_pattern_matcher.py +56 -16
  85. modal/functions.pyi +673 -92
  86. modal/gpu.py +1 -1
  87. modal/image.py +528 -176
  88. modal/image.pyi +1572 -145
  89. modal/io_streams.py +458 -128
  90. modal/io_streams.pyi +433 -52
  91. modal/mount.py +216 -151
  92. modal/mount.pyi +225 -78
  93. modal/network_file_system.py +45 -62
  94. modal/network_file_system.pyi +277 -56
  95. modal/object.pyi +93 -17
  96. modal/parallel_map.py +942 -129
  97. modal/parallel_map.pyi +294 -15
  98. modal/partial_function.py +0 -2
  99. modal/partial_function.pyi +234 -19
  100. modal/proxy.py +17 -8
  101. modal/proxy.pyi +36 -3
  102. modal/queue.py +270 -65
  103. modal/queue.pyi +817 -57
  104. modal/runner.py +115 -101
  105. modal/runner.pyi +205 -49
  106. modal/sandbox.py +512 -136
  107. modal/sandbox.pyi +845 -111
  108. modal/schedule.py +1 -1
  109. modal/secret.py +300 -70
  110. modal/secret.pyi +589 -34
  111. modal/serving.py +7 -11
  112. modal/serving.pyi +7 -8
  113. modal/snapshot.py +11 -8
  114. modal/snapshot.pyi +25 -4
  115. modal/token_flow.py +4 -4
  116. modal/token_flow.pyi +28 -8
  117. modal/volume.py +416 -158
  118. modal/volume.pyi +1117 -121
  119. {modal-1.0.3.dev10.dist-info → modal-1.2.3.dev7.dist-info}/METADATA +10 -9
  120. modal-1.2.3.dev7.dist-info/RECORD +195 -0
  121. modal_docs/mdmd/mdmd.py +17 -4
  122. modal_proto/api.proto +534 -79
  123. modal_proto/api_grpc.py +337 -1
  124. modal_proto/api_pb2.py +1522 -968
  125. modal_proto/api_pb2.pyi +1619 -134
  126. modal_proto/api_pb2_grpc.py +699 -4
  127. modal_proto/api_pb2_grpc.pyi +226 -14
  128. modal_proto/modal_api_grpc.py +175 -154
  129. modal_proto/sandbox_router.proto +145 -0
  130. modal_proto/sandbox_router_grpc.py +105 -0
  131. modal_proto/sandbox_router_pb2.py +149 -0
  132. modal_proto/sandbox_router_pb2.pyi +333 -0
  133. modal_proto/sandbox_router_pb2_grpc.py +203 -0
  134. modal_proto/sandbox_router_pb2_grpc.pyi +75 -0
  135. modal_proto/task_command_router.proto +144 -0
  136. modal_proto/task_command_router_grpc.py +105 -0
  137. modal_proto/task_command_router_pb2.py +149 -0
  138. modal_proto/task_command_router_pb2.pyi +333 -0
  139. modal_proto/task_command_router_pb2_grpc.py +203 -0
  140. modal_proto/task_command_router_pb2_grpc.pyi +75 -0
  141. modal_version/__init__.py +1 -1
  142. modal/requirements/PREVIEW.txt +0 -16
  143. modal/requirements/base-images.json +0 -26
  144. modal-1.0.3.dev10.dist-info/RECORD +0 -179
  145. modal_proto/modal_options_grpc.py +0 -3
  146. modal_proto/options.proto +0 -19
  147. modal_proto/options_grpc.py +0 -3
  148. modal_proto/options_pb2.py +0 -35
  149. modal_proto/options_pb2.pyi +0 -20
  150. modal_proto/options_pb2_grpc.py +0 -4
  151. modal_proto/options_pb2_grpc.pyi +0 -7
  152. /modal/{requirements → builder}/2023.12.312.txt +0 -0
  153. /modal/{requirements → builder}/2023.12.txt +0 -0
  154. /modal/{requirements → builder}/2024.04.txt +0 -0
  155. /modal/{requirements → builder}/2024.10.txt +0 -0
  156. /modal/{requirements → builder}/README.md +0 -0
  157. {modal-1.0.3.dev10.dist-info → modal-1.2.3.dev7.dist-info}/WHEEL +0 -0
  158. {modal-1.0.3.dev10.dist-info → modal-1.2.3.dev7.dist-info}/entry_points.txt +0 -0
  159. {modal-1.0.3.dev10.dist-info → modal-1.2.3.dev7.dist-info}/licenses/LICENSE +0 -0
  160. {modal-1.0.3.dev10.dist-info → modal-1.2.3.dev7.dist-info}/top_level.txt +0 -0
modal/image.py CHANGED
@@ -7,7 +7,7 @@ import shlex
7
7
  import sys
8
8
  import typing
9
9
  import warnings
10
- from collections.abc import Sequence
10
+ from collections.abc import Collection, Sequence
11
11
  from dataclasses import dataclass
12
12
  from inspect import isfunction
13
13
  from pathlib import Path, PurePosixPath
@@ -23,26 +23,29 @@ from typing import (
23
23
 
24
24
  from google.protobuf.message import Message
25
25
  from grpclib.exceptions import GRPCError, StreamTerminatedError
26
+ from typing_extensions import Self
26
27
 
28
+ from modal._serialization import serialize_data_format
27
29
  from modal_proto import api_pb2
28
30
 
31
+ from ._load_context import LoadContext
29
32
  from ._object import _Object, live_method_gen
30
33
  from ._resolver import Resolver
31
- from ._serialization import serialize
34
+ from ._serialization import get_preferred_payload_format, serialize
32
35
  from ._utils.async_utils import synchronize_api
33
36
  from ._utils.blob_utils import MAX_OBJECT_SIZE_BYTES
34
- from ._utils.deprecation import deprecation_warning
35
37
  from ._utils.docker_utils import (
36
38
  extract_copy_command_patterns,
37
39
  find_dockerignore_file,
38
40
  )
39
41
  from ._utils.function_utils import FunctionInfo
40
- from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, retry_transient_errors
42
+ from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES
43
+ from ._utils.mount_utils import validate_only_modal_volumes
41
44
  from .client import _Client
42
45
  from .cloud_bucket_mount import _CloudBucketMount
43
46
  from .config import config, logger, user_config_path
44
47
  from .environments import _get_environment_cached
45
- from .exception import InvalidError, NotFoundError, RemoteError, VersionError
48
+ from .exception import ExecutionError, InvalidError, NotFoundError, RemoteError, VersionError
46
49
  from .file_pattern_matcher import NON_PYTHON_FILES, FilePatternMatcher, _ignore_fn
47
50
  from .gpu import GPU_T, parse_gpu_config
48
51
  from .mount import _Mount, python_standalone_mount_name
@@ -56,7 +59,7 @@ if typing.TYPE_CHECKING:
56
59
  import modal._functions
57
60
 
58
61
  # This is used for both type checking and runtime validation
59
- ImageBuilderVersion = Literal["2023.12", "2024.04", "2024.10", "PREVIEW"]
62
+ ImageBuilderVersion = Literal["2023.12", "2024.04", "2024.10", "2025.06", "PREVIEW"]
60
63
 
61
64
  # Note: we also define supported Python versions via logic at the top of the package __init__.py
62
65
  # so that we fail fast / clearly in unsupported containers. Additionally, we enumerate the supported
@@ -64,12 +67,13 @@ ImageBuilderVersion = Literal["2023.12", "2024.04", "2024.10", "PREVIEW"]
64
67
  # Consider consolidating these multiple sources of truth?
65
68
  SUPPORTED_PYTHON_SERIES: dict[ImageBuilderVersion, list[str]] = {
66
69
  "PREVIEW": ["3.9", "3.10", "3.11", "3.12", "3.13"],
70
+ "2025.06": ["3.9", "3.10", "3.11", "3.12", "3.13"],
67
71
  "2024.10": ["3.9", "3.10", "3.11", "3.12", "3.13"],
68
72
  "2024.04": ["3.9", "3.10", "3.11", "3.12"],
69
73
  "2023.12": ["3.9", "3.10", "3.11", "3.12"],
70
74
  }
71
75
 
72
- LOCAL_REQUIREMENTS_DIR = Path(__file__).parent / "requirements"
76
+ LOCAL_REQUIREMENTS_DIR = Path(__file__).parent / "builder"
73
77
  CONTAINER_REQUIREMENTS_PATH = "/modal_requirements.txt"
74
78
 
75
79
 
@@ -281,24 +285,12 @@ def _create_context_mount_function(
281
285
  ignore: Union[Sequence[str], Callable[[Path], bool], _AutoDockerIgnoreSentinel],
282
286
  dockerfile_cmds: list[str] = [],
283
287
  dockerfile_path: Optional[Path] = None,
284
- context_mount: Optional[_Mount] = None,
285
288
  context_dir: Optional[Union[Path, str]] = None,
286
289
  ):
287
290
  if dockerfile_path and dockerfile_cmds:
288
291
  raise InvalidError("Cannot provide both dockerfile and docker commands")
289
292
 
290
- if context_mount:
291
- if ignore is not AUTO_DOCKERIGNORE:
292
- raise InvalidError("Cannot set both `context_mount` and `ignore`")
293
- if context_dir is not None:
294
- raise InvalidError("Cannot set both `context_mount` and `context_dir`")
295
-
296
- def identity_context_mount_fn() -> Optional[_Mount]:
297
- return context_mount
298
-
299
- return identity_context_mount_fn
300
-
301
- elif ignore is AUTO_DOCKERIGNORE:
293
+ if ignore is AUTO_DOCKERIGNORE:
302
294
 
303
295
  def auto_created_context_mount_fn() -> Optional[_Mount]:
304
296
  nonlocal context_dir
@@ -416,7 +408,6 @@ class _Image(_Object, type_prefix="im"):
416
408
  self.force_build = False
417
409
 
418
410
  def _initialize_from_other(self, other: "_Image"):
419
- # used by .clone()
420
411
  self.inside_exceptions = other.inside_exceptions
421
412
  self.force_build = other.force_build
422
413
  self._serve_mounts = other._serve_mounts
@@ -444,12 +435,16 @@ class _Image(_Object, type_prefix="im"):
444
435
 
445
436
  base_image = self
446
437
 
447
- async def _load(self2: "_Image", resolver: Resolver, existing_object_id: Optional[str]):
438
+ async def _load(
439
+ self2: "_Image", resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]
440
+ ):
448
441
  self2._hydrate_from_other(base_image) # same image id as base image as long as it's lazy
449
442
  self2._deferred_mounts = tuple(base_image._deferred_mounts) + (mount,)
450
443
  self2._serve_mounts = base_image._serve_mounts | ({mount} if mount.is_local() else set())
451
444
 
452
- img = _Image._from_loader(_load, "Image(local files)", deps=lambda: [base_image, mount])
445
+ img = _Image._from_loader(
446
+ _load, "Image(local files)", deps=lambda: [base_image, mount], load_context_overrides=LoadContext.empty()
447
+ )
453
448
  img._added_python_source_set = base_image._added_python_source_set
454
449
  return img
455
450
 
@@ -490,13 +485,15 @@ class _Image(_Object, type_prefix="im"):
490
485
  *,
491
486
  base_images: Optional[dict[str, "_Image"]] = None,
492
487
  dockerfile_function: Optional[Callable[[ImageBuilderVersion], DockerfileSpec]] = None,
493
- secrets: Optional[Sequence[_Secret]] = None,
488
+ secrets: Optional[Collection[_Secret]] = None,
494
489
  gpu_config: Optional[api_pb2.GPUConfig] = None,
495
490
  build_function: Optional["modal._functions._Function"] = None,
496
491
  build_function_input: Optional[api_pb2.FunctionInput] = None,
497
492
  image_registry_config: Optional[_ImageRegistryConfig] = None,
498
493
  context_mount_function: Optional[Callable[[], Optional[_Mount]]] = None,
499
494
  force_build: bool = False,
495
+ build_args: dict[str, str] = {},
496
+ validated_volumes: Optional[Sequence[tuple[str, _Volume]]] = None,
500
497
  # For internal use only.
501
498
  _namespace: "api_pb2.DeploymentNamespace.ValueType" = api_pb2.DEPLOYMENT_NAMESPACE_WORKSPACE,
502
499
  _do_assert_no_mount_layers: bool = True,
@@ -504,6 +501,9 @@ class _Image(_Object, type_prefix="im"):
504
501
  if base_images is None:
505
502
  base_images = {}
506
503
 
504
+ if validated_volumes is None:
505
+ validated_volumes = []
506
+
507
507
  if secrets is None:
508
508
  secrets = []
509
509
  if gpu_config is None:
@@ -524,20 +524,22 @@ class _Image(_Object, type_prefix="im"):
524
524
  deps += (build_function,)
525
525
  if image_registry_config and image_registry_config.secret:
526
526
  deps += (image_registry_config.secret,)
527
+ for _, vol in validated_volumes:
528
+ deps += (vol,)
527
529
  return deps
528
530
 
529
- async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
531
+ async def _load(self: _Image, resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]):
530
532
  context_mount = context_mount_function() if context_mount_function else None
531
533
  if context_mount:
532
- await resolver.load(context_mount)
534
+ await resolver.load(context_mount, load_context)
533
535
 
534
536
  if _do_assert_no_mount_layers:
535
537
  for image in base_images.values():
536
538
  # base images can't have
537
539
  image._assert_no_mount_layers()
538
540
 
539
- assert resolver.app_id # type narrowing
540
- environment = await _get_environment_cached(resolver.environment_name or "", resolver.client)
541
+ assert load_context.app_id # type narrowing
542
+ environment = await _get_environment_cached(load_context.environment_name or "", load_context.client)
541
543
  # A bit hacky,but assume that the environment provides a valid builder version
542
544
  image_builder_version = cast(ImageBuilderVersion, environment._settings.image_builder_version)
543
545
  builder_version = _get_image_builder_version(image_builder_version)
@@ -602,6 +604,17 @@ class _Image(_Object, type_prefix="im"):
602
604
  build_function_id = ""
603
605
  _build_function = None
604
606
 
607
+ # Relies on dicts being ordered (true as of Python 3.6).
608
+ volume_mounts = [
609
+ api_pb2.VolumeMount(
610
+ mount_path=path,
611
+ volume_id=volume.object_id,
612
+ allow_background_commits=True,
613
+ read_only=volume._read_only,
614
+ )
615
+ for path, volume in validated_volumes
616
+ ]
617
+
605
618
  image_definition = api_pb2.Image(
606
619
  base_images=base_images_pb2s,
607
620
  dockerfile_commands=dockerfile.commands,
@@ -613,10 +626,12 @@ class _Image(_Object, type_prefix="im"):
613
626
  runtime=config.get("function_runtime"),
614
627
  runtime_debug=config.get("function_runtime_debug"),
615
628
  build_function=_build_function,
629
+ build_args=build_args,
630
+ volume_mounts=volume_mounts,
616
631
  )
617
632
 
618
633
  req = api_pb2.ImageGetOrCreateRequest(
619
- app_id=resolver.app_id,
634
+ app_id=load_context.app_id,
620
635
  image=image_definition,
621
636
  existing_image_id=existing_object_id or "", # TODO: ignored
622
637
  build_function_id=build_function_id,
@@ -628,7 +643,7 @@ class _Image(_Object, type_prefix="im"):
628
643
  allow_global_deployment=os.environ.get("MODAL_IMAGE_ALLOW_GLOBAL_DEPLOYMENT") == "1",
629
644
  ignore_cache=config.get("ignore_cache"),
630
645
  )
631
- resp = await retry_transient_errors(resolver.client.stub.ImageGetOrCreate, req)
646
+ resp = await load_context.client.stub.ImageGetOrCreate(req)
632
647
  image_id = resp.image_id
633
648
  result: api_pb2.GenericResult
634
649
  metadata: Optional[api_pb2.ImageMetadata] = None
@@ -641,7 +656,7 @@ class _Image(_Object, type_prefix="im"):
641
656
  else:
642
657
  # not built or in the process of building - wait for build
643
658
  logger.debug("Waiting for image %s" % image_id)
644
- resp = await _image_await_build_result(image_id, resolver.client)
659
+ resp = await _image_await_build_result(image_id, load_context.client)
645
660
  result = resp.result
646
661
  if resp.HasField("metadata"):
647
662
  metadata = resp.metadata
@@ -655,7 +670,13 @@ class _Image(_Object, type_prefix="im"):
655
670
  msg += " (Hint: Use `modal.enable_output()` to see logs from the process building the Image.)"
656
671
  raise RemoteError(msg)
657
672
  elif result.status == api_pb2.GenericResult.GENERIC_STATUS_TERMINATED:
658
- raise RemoteError(f"Image build for {image_id} terminated due to external shut-down. Please try again.")
673
+ msg = f"Image build for {image_id} terminated due to external shut-down. Please try again."
674
+ if result.exception:
675
+ msg = (
676
+ f"Image build for {image_id} terminated due to external shut-down with the exception:\n"
677
+ f"{result.exception}"
678
+ )
679
+ raise RemoteError(msg)
659
680
  elif result.status == api_pb2.GenericResult.GENERIC_STATUS_TIMEOUT:
660
681
  raise RemoteError(
661
682
  f"Image build for {image_id} timed out. Please try again with a larger `timeout` parameter."
@@ -665,7 +686,7 @@ class _Image(_Object, type_prefix="im"):
665
686
  else:
666
687
  raise RemoteError("Unknown status %s!" % result.status)
667
688
 
668
- self._hydrate(image_id, resolver.client, metadata)
689
+ self._hydrate(image_id, load_context.client, metadata)
669
690
  local_mounts = set()
670
691
  for base in base_images.values():
671
692
  local_mounts |= base._serve_mounts
@@ -674,7 +695,7 @@ class _Image(_Object, type_prefix="im"):
674
695
  self._serve_mounts = frozenset(local_mounts)
675
696
 
676
697
  rep = f"Image({dockerfile_function})"
677
- obj = _Image._from_loader(_load, rep, deps=_deps)
698
+ obj = _Image._from_loader(_load, rep, deps=_deps, load_context_overrides=LoadContext.empty())
678
699
  obj.force_build = force_build
679
700
  obj._added_python_source_set = frozenset.union(
680
701
  frozenset(), *(base._added_python_source_set for base in base_images.values())
@@ -797,28 +818,6 @@ class _Image(_Object, type_prefix="im"):
797
818
  mount = _Mount._add_local_dir(Path(local_path), PurePosixPath(remote_path), ignore=_ignore_fn(ignore))
798
819
  return self._add_mount_layer_or_copy(mount, copy=copy)
799
820
 
800
- def copy_local_file(self, local_path: Union[str, Path], remote_path: Union[str, Path] = "./") -> "_Image":
801
- """mdmd:hidden
802
- Copy a file into the image as a part of building it.
803
-
804
- This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
805
- works in a `Dockerfile`.
806
- """
807
- deprecation_warning(
808
- (2025, 1, 13),
809
- COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_file"),
810
- )
811
- basename = str(Path(local_path).name)
812
-
813
- def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
814
- return DockerfileSpec(commands=["FROM base", f"COPY {basename} {remote_path}"], context_files={})
815
-
816
- return _Image._from_args(
817
- base_images={"base": self},
818
- dockerfile_function=build_dockerfile,
819
- context_mount_function=lambda: _Mount._from_local_file(local_path, remote_path=f"/{basename}"),
820
- )
821
-
822
821
  def add_local_python_source(
823
822
  self, *modules: str, copy: bool = False, ignore: Union[Sequence[str], Callable[[Path], bool]] = NON_PYTHON_FILES
824
823
  ) -> "_Image":
@@ -863,95 +862,80 @@ class _Image(_Object, type_prefix="im"):
863
862
  img._added_python_source_set |= set(modules)
864
863
  return img
865
864
 
866
- def copy_local_dir(
867
- self,
868
- local_path: Union[str, Path],
869
- remote_path: Union[str, Path] = ".",
870
- # Predicate filter function for file exclusion, which should accept a filepath and return `True` for exclusion.
871
- # Defaults to excluding no files. If a Sequence is provided, it will be converted to a FilePatternMatcher.
872
- # Which follows dockerignore syntax.
873
- ignore: Union[Sequence[str], Callable[[Path], bool]] = [],
874
- ) -> "_Image":
875
- """mdmd:hidden
876
- **Deprecated**: Use image.add_local_dir instead
865
+ @staticmethod
866
+ async def from_id(image_id: str, client: Optional[_Client] = None) -> "_Image":
867
+ """Construct an Image from an id and look up the Image result.
877
868
 
878
- Copy a directory into the image as a part of building the image.
869
+ The ID of an Image object can be accessed using `.object_id`.
870
+ """
879
871
 
880
- This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
881
- works in a `Dockerfile`.
872
+ async def _load(self: _Image, resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]):
873
+ resp = await load_context.client.stub.ImageFromId(api_pb2.ImageFromIdRequest(image_id=image_id))
874
+ self._hydrate(resp.image_id, load_context.client, resp.metadata)
882
875
 
883
- **Usage:**
876
+ rep = f"Image.from_id({image_id!r})"
877
+ obj = _Image._from_loader(_load, rep, load_context_overrides=LoadContext(client=client))
884
878
 
885
- ```python notest
886
- from pathlib import Path
887
- from modal import FilePatternMatcher
879
+ return obj
888
880
 
889
- image = modal.Image.debian_slim().copy_local_dir(
890
- "~/assets",
891
- remote_path="/assets",
892
- ignore=["**/*.venv"],
893
- )
881
+ async def build(self, app: "modal.app._App") -> "_Image":
882
+ """Eagerly build an image.
894
883
 
895
- image = modal.Image.debian_slim().copy_local_dir(
896
- "~/assets",
897
- remote_path="/assets",
898
- ignore=lambda p: p.is_relative_to(".venv"),
899
- )
884
+ If your image was previously built, then this method will not rebuild your image
885
+ and your cached image is returned.
900
886
 
901
- image = modal.Image.debian_slim().copy_local_dir(
902
- "~/assets",
903
- remote_path="/assets",
904
- ignore=FilePatternMatcher("**/*.txt"),
905
- )
887
+ **Examples**
906
888
 
907
- # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
908
- image = modal.Image.debian_slim().copy_local_dir(
909
- "~/assets",
910
- remote_path="/assets",
911
- ignore=~FilePatternMatcher("**/*.py"),
912
- )
889
+ ```python
890
+ image = modal.Image.debian_slim().uv_pip_install("scipy", "numpy")
913
891
 
914
- # You can also read ignore patterns from a file.
915
- image = modal.Image.debian_slim().copy_local_dir(
916
- "~/assets",
917
- remote_path="/assets",
918
- ignore=FilePatternMatcher.from_file("/path/to/ignorefile"),
919
- )
892
+ app = modal.App.lookup("build-image", create_if_missing=True)
893
+ with modal.enable_output(): # To see logs in your local terminal
894
+ image.build(app)
895
+
896
+ # Save the image id
897
+ my_image_id = image.object_id
898
+
899
+ # Reference the image with the id or uses it another context.
900
+ built_image = modal.Image.from_id(my_image_id)
920
901
  ```
921
- """
922
- deprecation_warning(
923
- (2025, 1, 13),
924
- COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_dir"),
925
- )
926
902
 
927
- def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
928
- return DockerfileSpec(commands=["FROM base", f"COPY . {remote_path}"], context_files={})
903
+ Alternatively, you can pre-build a image and use it in a sandbox.
929
904
 
930
- return _Image._from_args(
931
- base_images={"base": self},
932
- dockerfile_function=build_dockerfile,
933
- context_mount_function=lambda: _Mount._add_local_dir(
934
- Path(local_path), PurePosixPath("/"), ignore=_ignore_fn(ignore)
935
- ),
936
- )
905
+ ```python notest
906
+ app = modal.App.lookup("sandbox-example", create_if_missing=True)
937
907
 
938
- @staticmethod
939
- async def from_id(image_id: str, client: Optional[_Client] = None) -> "_Image":
940
- """Construct an Image from an id and look up the Image result.
908
+ with modal.enable_output():
909
+ image = modal.Image.debian_slim().uv_pip_install("scipy")
910
+ image.build(app)
941
911
 
942
- The ID of an Image object can be accessed using `.object_id`.
943
- """
944
- if client is None:
945
- client = await _Client.from_env()
912
+ sb = modal.Sandbox.create("python", "-c", "import scipy; print(scipy)", app=app, image=image)
913
+ print(sb.stdout.read())
914
+ sb.terminate()
915
+ ```
946
916
 
947
- async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
948
- resp = await retry_transient_errors(client.stub.ImageFromId, api_pb2.ImageFromIdRequest(image_id=image_id))
949
- self._hydrate(resp.image_id, resolver.client, resp.metadata)
917
+ **Note**
950
918
 
951
- rep = f"Image.from_id({image_id!r})"
952
- obj = _Image._from_loader(_load, rep)
919
+ For defining Modal functions, images are built automatically when deploying or running an App.
920
+ You do not need to built the image explicitly:
953
921
 
954
- return obj
922
+ ```python notest
923
+ app = modal.App()
924
+ image = modal.Image.debian_slim()
925
+
926
+ # No need to explicitly build the image for defining a function.
927
+ @app.function(image=image)
928
+ def f():
929
+ ...
930
+ ```
931
+
932
+ """
933
+ if app.app_id is None:
934
+ raise InvalidError("App has not been initialized yet. Use the content manager `app.run()` or `App.lookup`")
935
+
936
+ resolver = Resolver()
937
+ await resolver.load(self, app._root_load_context)
938
+ return self
955
939
 
956
940
  def pip_install(
957
941
  self,
@@ -962,7 +946,8 @@ class _Image(_Object, type_prefix="im"):
962
946
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
963
947
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
964
948
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
965
- secrets: Sequence[_Secret] = [],
949
+ env: Optional[dict[str, Optional[str]]] = None,
950
+ secrets: Optional[Collection[_Secret]] = None,
966
951
  gpu: GPU_T = None,
967
952
  ) -> "_Image":
968
953
  """Install a list of Python packages using pip.
@@ -1009,6 +994,10 @@ class _Image(_Object, type_prefix="im"):
1009
994
  commands = [cmd.strip() for cmd in commands]
1010
995
  return DockerfileSpec(commands=commands, context_files={})
1011
996
 
997
+ secrets = secrets or []
998
+ if env:
999
+ secrets = [*secrets, _Secret.from_dict(env)]
1000
+
1012
1001
  gpu_config = parse_gpu_config(gpu)
1013
1002
  return _Image._from_args(
1014
1003
  base_images={"base": self},
@@ -1028,7 +1017,8 @@ class _Image(_Object, type_prefix="im"):
1028
1017
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1029
1018
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1030
1019
  gpu: GPU_T = None,
1031
- secrets: Sequence[_Secret] = [],
1020
+ env: Optional[dict[str, Optional[str]]] = None, # Environment variables to set in the container
1021
+ secrets: Optional[Collection[_Secret]] = None, # Secrets to inject into the container as environment variables
1032
1022
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1033
1023
  ) -> "_Image":
1034
1024
  """
@@ -1062,12 +1052,16 @@ class _Image(_Object, type_prefix="im"):
1062
1052
  )
1063
1053
  ```
1064
1054
  """
1055
+
1065
1056
  if not secrets:
1066
1057
  raise InvalidError(
1067
1058
  "No secrets provided to function. "
1068
1059
  "Installing private packages requires tokens to be passed via modal.Secret objects."
1069
1060
  )
1070
1061
 
1062
+ if env:
1063
+ secrets = [*secrets, _Secret.from_dict(env)]
1064
+
1071
1065
  invalid_repos = []
1072
1066
  install_urls = []
1073
1067
  for repo_ref in repositories:
@@ -1129,11 +1123,16 @@ class _Image(_Object, type_prefix="im"):
1129
1123
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1130
1124
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1131
1125
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1132
- secrets: Sequence[_Secret] = [],
1126
+ env: Optional[dict[str, Optional[str]]] = None,
1127
+ secrets: Optional[Collection[_Secret]] = None,
1133
1128
  gpu: GPU_T = None,
1134
1129
  ) -> "_Image":
1135
1130
  """Install a list of Python packages from a local `requirements.txt` file."""
1136
1131
 
1132
+ secrets = secrets or []
1133
+ if env:
1134
+ secrets = [*secrets, _Secret.from_dict(env)]
1135
+
1137
1136
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1138
1137
  requirements_txt_path = os.path.expanduser(requirements_txt)
1139
1138
  context_files = {"/.requirements.txt": requirements_txt_path}
@@ -1170,7 +1169,8 @@ class _Image(_Object, type_prefix="im"):
1170
1169
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1171
1170
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1172
1171
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1173
- secrets: Sequence[_Secret] = [],
1172
+ env: Optional[dict[str, Optional[str]]] = None,
1173
+ secrets: Optional[Collection[_Secret]] = None,
1174
1174
  gpu: GPU_T = None,
1175
1175
  ) -> "_Image":
1176
1176
  """Install dependencies specified by a local `pyproject.toml` file.
@@ -1181,6 +1181,10 @@ class _Image(_Object, type_prefix="im"):
1181
1181
  all of the packages in each listed section are installed as well.
1182
1182
  """
1183
1183
 
1184
+ secrets = secrets or []
1185
+ if env:
1186
+ secrets = [*secrets, _Secret.from_dict(env)]
1187
+
1184
1188
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1185
1189
  # Defer toml import so we don't need it in the container runtime environment
1186
1190
  import toml
@@ -1221,21 +1225,139 @@ class _Image(_Object, type_prefix="im"):
1221
1225
  gpu_config=parse_gpu_config(gpu),
1222
1226
  )
1223
1227
 
1228
+ def uv_pip_install(
1229
+ self,
1230
+ *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
1231
+ requirements: Optional[list[str]] = None, # Passes -r (--requirements) to uv pip install
1232
+ find_links: Optional[str] = None, # Passes -f (--find-links) to uv pip install
1233
+ index_url: Optional[str] = None, # Passes -i (--index-url) to uv pip install
1234
+ extra_index_url: Optional[str] = None, # Passes --extra-index-url to uv pip install
1235
+ pre: bool = False, # Allow pre-releases using uv pip install --prerelease allow
1236
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation"
1237
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1238
+ uv_version: Optional[str] = None, # uv version to use
1239
+ env: Optional[dict[str, Optional[str]]] = None,
1240
+ secrets: Optional[Collection[_Secret]] = None,
1241
+ gpu: GPU_T = None,
1242
+ ) -> "_Image":
1243
+ """Install a list of Python packages using uv pip install.
1244
+
1245
+ **Examples**
1246
+
1247
+ Simple installation:
1248
+ ```python
1249
+ image = modal.Image.debian_slim().uv_pip_install("torch==2.7.1", "numpy")
1250
+ ```
1251
+
1252
+ This method assumes that:
1253
+ - Python is on the `$PATH` and dependencies are installed with the first Python on the `$PATH`.
1254
+ - Shell supports backticks for substitution
1255
+ - `which` command is on the `$PATH`
1256
+
1257
+ Added in v1.1.0.
1258
+ """
1259
+
1260
+ secrets = secrets or []
1261
+ if env:
1262
+ secrets = [*secrets, _Secret.from_dict(env)]
1263
+
1264
+ pkgs = _flatten_str_args("uv_pip_install", "packages", packages)
1265
+
1266
+ if requirements is None or isinstance(requirements, list):
1267
+ requirements = requirements or []
1268
+ else:
1269
+ raise InvalidError("requirements must be None or a list of strings")
1270
+
1271
+ if not pkgs and not requirements:
1272
+ return self
1273
+ elif not _validate_packages(pkgs):
1274
+ raise InvalidError(
1275
+ "Package list for `Image.uv_pip_install` cannot contain other arguments;"
1276
+ " try the `extra_options` parameter instead."
1277
+ )
1278
+
1279
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1280
+ commands = ["FROM base"]
1281
+ UV_ROOT = "/.uv"
1282
+ if uv_version is None:
1283
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:latest /uv {UV_ROOT}/uv")
1284
+ else:
1285
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:{uv_version} /uv {UV_ROOT}/uv")
1286
+
1287
+ # NOTE: Using $(command -v python) assumes:
1288
+ # - python is on the PATH and uv is installing into the first python in the PATH
1289
+ # - the shell supports $() for substitution
1290
+ # - `command` command is on the PATH
1291
+ uv_pip_args = ["--python $(command -v python)", "--compile-bytecode"]
1292
+ context_files = {}
1293
+
1294
+ if find_links:
1295
+ uv_pip_args.append(f"--find-links {shlex.quote(find_links)}")
1296
+ if index_url:
1297
+ uv_pip_args.append(f"--index-url {shlex.quote(index_url)}")
1298
+ if extra_index_url:
1299
+ uv_pip_args.append(f"--extra-index-url {shlex.quote(extra_index_url)}")
1300
+ if pre:
1301
+ uv_pip_args.append("--prerelease allow")
1302
+ if extra_options:
1303
+ uv_pip_args.append(extra_options)
1304
+
1305
+ if requirements:
1306
+
1307
+ def _generate_paths(idx: int, req: str) -> dict:
1308
+ local_path = os.path.expanduser(req)
1309
+ basename = os.path.basename(req)
1310
+
1311
+ # The requirement files can have the same name but in different directories:
1312
+ # requirements=["test/requirements.txt", "a/b/c/requirements.txt"]
1313
+ # To uniquely identify these files, we add a `idx` prefix to every file's basename
1314
+ # - `test/requirements.txt` -> `/.0_requirements.txt` in context -> `/.uv/0/requirements.txt` to uv
1315
+ # - `a/b/c/requirements.txt` -> `/.1_requirements.txt` in context -> `/.uv/1/requirements.txt` to uv
1316
+ return {
1317
+ "local_path": local_path,
1318
+ "context_path": f"/.{idx}_{basename}",
1319
+ "dest_path": f"{UV_ROOT}/{idx}/{basename}",
1320
+ }
1321
+
1322
+ requirement_paths = [_generate_paths(idx, req) for idx, req in enumerate(requirements)]
1323
+ requirements_cli = " ".join(f"--requirements {req['dest_path']}" for req in requirement_paths)
1324
+ uv_pip_args.append(requirements_cli)
1325
+
1326
+ commands.extend([f"COPY {req['context_path']} {req['dest_path']}" for req in requirement_paths])
1327
+ context_files.update({req["context_path"]: req["local_path"] for req in requirement_paths})
1328
+
1329
+ uv_pip_args.extend(shlex.quote(p) for p in sorted(pkgs))
1330
+ uv_pip_args_joined = " ".join(uv_pip_args)
1331
+
1332
+ commands.append(f"RUN {UV_ROOT}/uv pip install {uv_pip_args_joined}")
1333
+
1334
+ return DockerfileSpec(commands=commands, context_files=context_files)
1335
+
1336
+ return _Image._from_args(
1337
+ base_images={"base": self},
1338
+ dockerfile_function=build_dockerfile,
1339
+ force_build=self.force_build or force_build,
1340
+ gpu_config=parse_gpu_config(gpu),
1341
+ secrets=secrets,
1342
+ )
1343
+
1224
1344
  def poetry_install_from_file(
1225
1345
  self,
1226
1346
  poetry_pyproject_toml: str,
1227
1347
  poetry_lockfile: Optional[str] = None, # Path to lockfile. If not provided, uses poetry.lock in same directory.
1228
1348
  *,
1229
1349
  ignore_lockfile: bool = False, # If set to True, do not use poetry.lock, even when present
1230
- # If set to True, use old installer. See https://github.com/python-poetry/poetry/issues/3336
1231
- old_installer: bool = False,
1232
1350
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1233
1351
  # Selected optional dependency groups to install (See https://python-poetry.org/docs/cli/#install)
1234
1352
  with_: list[str] = [],
1235
1353
  # Selected optional dependency groups to exclude (See https://python-poetry.org/docs/cli/#install)
1236
1354
  without: list[str] = [],
1237
1355
  only: list[str] = [], # Only install dependency groups specifed in this list.
1238
- secrets: Sequence[_Secret] = [],
1356
+ poetry_version: Optional[str] = "latest", # Version of poetry to install, or None to skip installation
1357
+ # If set to True, use old installer. See https://github.com/python-poetry/poetry/issues/3336
1358
+ old_installer: bool = False,
1359
+ env: Optional[dict[str, Optional[str]]] = None,
1360
+ secrets: Optional[Collection[_Secret]] = None,
1239
1361
  gpu: GPU_T = None,
1240
1362
  ) -> "_Image":
1241
1363
  """Install poetry *dependencies* specified by a local `pyproject.toml` file.
@@ -1245,12 +1367,26 @@ class _Image(_Object, type_prefix="im"):
1245
1367
 
1246
1368
  Note that the root project of the poetry project is not installed, only the dependencies.
1247
1369
  For including local python source files see `add_local_python_source`
1370
+
1371
+ Poetry will be installed to the Image (using pip) unless `poetry_version` is set to None.
1372
+ Note that the interpretation of `poetry_version="latest"` depends on the Modal Image Builder
1373
+ version, with versions 2024.10 and earlier limiting poetry to 1.x.
1248
1374
  """
1249
1375
 
1376
+ secrets = secrets or []
1377
+ if env:
1378
+ secrets = [*secrets, _Secret.from_dict(env)]
1379
+
1250
1380
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1251
1381
  context_files = {"/.pyproject.toml": os.path.expanduser(poetry_pyproject_toml)}
1252
1382
 
1253
- commands = ["FROM base", "RUN python -m pip install poetry~=1.7"]
1383
+ commands = ["FROM base"]
1384
+ if poetry_version is not None:
1385
+ if poetry_version == "latest":
1386
+ poetry_spec = "~=1.7" if version <= "2024.10" else ""
1387
+ else:
1388
+ poetry_spec = f"=={poetry_version}" # TODO: support other versions
1389
+ commands += [f"RUN python -m pip install poetry{poetry_spec}"]
1254
1390
 
1255
1391
  if old_installer:
1256
1392
  commands += ["RUN poetry config experimental.new-installer false"]
@@ -1281,7 +1417,8 @@ class _Image(_Object, type_prefix="im"):
1281
1417
 
1282
1418
  if only:
1283
1419
  install_cmd += f" --only {','.join(only)}"
1284
- install_cmd += " --compile" # no .pyc compilation slows down cold-start.
1420
+
1421
+ install_cmd += " --compile" # Always compile .pyc during build; avoid recompiling on every cold start
1285
1422
 
1286
1423
  commands += [
1287
1424
  "COPY /.pyproject.toml /tmp/poetry/pyproject.toml",
@@ -1299,13 +1436,181 @@ class _Image(_Object, type_prefix="im"):
1299
1436
  gpu_config=parse_gpu_config(gpu),
1300
1437
  )
1301
1438
 
1439
+ def uv_sync(
1440
+ self,
1441
+ uv_project_dir: str = "./", # Path to local uv managed project
1442
+ *,
1443
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1444
+ groups: Optional[list[str]] = None, # Dependency group to install using `uv sync --group`
1445
+ extras: Optional[list[str]] = None, # Optional dependencies to install using `uv sync --extra`
1446
+ frozen: bool = True, # If True, then we run `uv sync --frozen` when a uv.lock file is present
1447
+ extra_options: str = "", # Extra options to pass to `uv sync`
1448
+ uv_version: Optional[str] = None, # uv version to use
1449
+ env: Optional[dict[str, Optional[str]]] = None,
1450
+ secrets: Optional[Collection[_Secret]] = None,
1451
+ gpu: GPU_T = None,
1452
+ ) -> "_Image":
1453
+ """Creates a virtual environment with the dependencies in a uv managed project with `uv sync`.
1454
+
1455
+ **Examples**
1456
+ ```python
1457
+ image = modal.Image.debian_slim().uv_sync()
1458
+ ```
1459
+
1460
+ The `pyproject.toml` and `uv.lock` in `uv_project_dir` are automatically added to the build context. The
1461
+ `uv_project_dir` is relative to the current working directory of where `modal` is called.
1462
+
1463
+ NOTE: This does *not* install the project itself into the environment (this is equivalent to the
1464
+ `--no-install-project` flag in the `uv sync` command) and you would be expected to add any local python source
1465
+ files using `Image.add_local_python_source` or similar methods after this call.
1466
+
1467
+ This ensures that updates to your project code wouldn't require reinstalling third-party dependencies
1468
+ after every change.
1469
+
1470
+ uv workspaces are currently not supported.
1471
+
1472
+ Added in v1.1.0.
1473
+ """
1474
+
1475
+ secrets = secrets or []
1476
+ if env:
1477
+ secrets = [*secrets, _Secret.from_dict(env)]
1478
+
1479
+ def _normalize_items(items, name) -> list[str]:
1480
+ if items is None:
1481
+ return []
1482
+ elif isinstance(items, list):
1483
+ return items
1484
+ else:
1485
+ raise InvalidError(f"{name} must be None or a list of strings")
1486
+
1487
+ groups = _normalize_items(groups, "groups")
1488
+ extras = _normalize_items(extras, "extras")
1489
+
1490
+ def _check_pyproject_toml(pyproject_toml: str, version: ImageBuilderVersion):
1491
+ if not os.path.exists(pyproject_toml):
1492
+ raise InvalidError(f"Expected {pyproject_toml} to exist")
1493
+
1494
+ import toml
1495
+
1496
+ with open(pyproject_toml) as f:
1497
+ pyproject_toml_content = toml.load(f)
1498
+
1499
+ if (
1500
+ "tool" in pyproject_toml_content
1501
+ and "uv" in pyproject_toml_content["tool"]
1502
+ and "workspace" in pyproject_toml_content["tool"]["uv"]
1503
+ ):
1504
+ raise InvalidError("uv workspaces are not supported")
1505
+
1506
+ if version > "2024.10":
1507
+ # For builder version > 2024.10, modal is mounted at runtime and is not
1508
+ # a requirement in `uv.lock`
1509
+ return
1510
+
1511
+ try:
1512
+ dependencies = pyproject_toml_content["project"]["dependencies"]
1513
+ except KeyError as e:
1514
+ raise InvalidError(
1515
+ f"Invalid pyproject.toml file: missing key {e} in {pyproject_toml}. "
1516
+ "See https://packaging.python.org/en/latest/guides/writing-pyproject-toml for guidelines."
1517
+ )
1518
+
1519
+ for group in groups:
1520
+ if (
1521
+ "dependency-groups" in pyproject_toml_content
1522
+ and group in pyproject_toml_content["dependency-groups"]
1523
+ ):
1524
+ dependencies += pyproject_toml_content["dependency-groups"][group]
1525
+
1526
+ for extra in extras:
1527
+ if (
1528
+ "project" in pyproject_toml_content
1529
+ and "optional-dependencies" in pyproject_toml_content["project"]
1530
+ and extra in pyproject_toml_content["project"]["optional-dependencies"]
1531
+ ):
1532
+ dependencies += pyproject_toml_content["project"]["optional-dependencies"][extra]
1533
+
1534
+ PACKAGE_REGEX = re.compile(r"^[\w-]+")
1535
+
1536
+ def _extract_package(package) -> str:
1537
+ m = PACKAGE_REGEX.match(package)
1538
+ return m.group(0) if m else ""
1539
+
1540
+ if not any(_extract_package(dependency) == "modal" for dependency in dependencies):
1541
+ raise InvalidError(
1542
+ "Image builder version <= 2024.10 requires modal to be specified in your pyproject.toml file"
1543
+ )
1544
+
1545
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1546
+ uv_project_dir_ = os.path.expanduser(uv_project_dir)
1547
+ pyproject_toml = os.path.join(uv_project_dir_, "pyproject.toml")
1548
+
1549
+ UV_ROOT = "/.uv"
1550
+ uv_sync_args = [
1551
+ f"--project={UV_ROOT}",
1552
+ "--no-install-workspace", # Do not install the root project or any "uv workspace"
1553
+ "--compile-bytecode",
1554
+ ]
1555
+
1556
+ for group in groups:
1557
+ uv_sync_args.append(f"--group={group}")
1558
+ for extra in extras:
1559
+ uv_sync_args.append(f"--extra={extra}")
1560
+ if extra_options:
1561
+ uv_sync_args.append(extra_options)
1562
+
1563
+ commands = ["FROM base"]
1564
+
1565
+ if uv_version is None:
1566
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:latest /uv {UV_ROOT}/uv")
1567
+ else:
1568
+ commands.append(f"COPY --from=ghcr.io/astral-sh/uv:{uv_version} /uv {UV_ROOT}/uv")
1569
+
1570
+ context_files = {}
1571
+
1572
+ _check_pyproject_toml(pyproject_toml, version)
1573
+
1574
+ context_files["/.pyproject.toml"] = pyproject_toml
1575
+ commands.append(f"COPY /.pyproject.toml {UV_ROOT}/pyproject.toml")
1576
+
1577
+ uv_lock = os.path.join(uv_project_dir_, "uv.lock")
1578
+ if os.path.exists(uv_lock):
1579
+ context_files["/.uv.lock"] = uv_lock
1580
+ commands.append(f"COPY /.uv.lock {UV_ROOT}/uv.lock")
1581
+
1582
+ if frozen:
1583
+ # Do not update `uv.lock` when we have one when `frozen=True`. This is the default because this
1584
+ # ensures that the runtime environment matches the local `uv.lock`.
1585
+ #
1586
+ # If `frozen=False`, then `uv sync` will update the the dependencies in the `uv.lock` file
1587
+ # during build time.
1588
+ uv_sync_args.append("--frozen")
1589
+
1590
+ uv_sync_args_joined = " ".join(uv_sync_args).strip()
1591
+
1592
+ commands += [
1593
+ f"RUN {UV_ROOT}/uv sync {uv_sync_args_joined}",
1594
+ f"ENV PATH={UV_ROOT}/.venv/bin:$PATH",
1595
+ ]
1596
+
1597
+ return DockerfileSpec(commands=commands, context_files=context_files)
1598
+
1599
+ return _Image._from_args(
1600
+ base_images={"base": self},
1601
+ dockerfile_function=build_dockerfile,
1602
+ force_build=self.force_build or force_build,
1603
+ secrets=secrets,
1604
+ gpu_config=parse_gpu_config(gpu),
1605
+ )
1606
+
1302
1607
  def dockerfile_commands(
1303
1608
  self,
1304
1609
  *dockerfile_commands: Union[str, list[str]],
1305
1610
  context_files: dict[str, str] = {},
1306
- secrets: Sequence[_Secret] = [],
1611
+ env: Optional[dict[str, Optional[str]]] = None,
1612
+ secrets: Optional[Collection[_Secret]] = None,
1307
1613
  gpu: GPU_T = None,
1308
- context_mount: Optional[_Mount] = None, # Deprecated: the context is now inferred
1309
1614
  context_dir: Optional[Union[Path, str]] = None, # Context for relative COPY commands
1310
1615
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1311
1616
  ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
@@ -1351,16 +1656,14 @@ class _Image(_Object, type_prefix="im"):
1351
1656
  )
1352
1657
  ```
1353
1658
  """
1354
- if context_mount is not None:
1355
- deprecation_warning(
1356
- (2025, 1, 13),
1357
- "The `context_mount` parameter of `Image.dockerfile_commands` is deprecated."
1358
- " Files are now automatically added to the build context based on the commands.",
1359
- )
1360
1659
  cmds = _flatten_str_args("dockerfile_commands", "dockerfile_commands", dockerfile_commands)
1361
1660
  if not cmds:
1362
1661
  return self
1363
1662
 
1663
+ secrets = secrets or []
1664
+ if env:
1665
+ secrets = [*secrets, _Secret.from_dict(env)]
1666
+
1364
1667
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1365
1668
  return DockerfileSpec(commands=["FROM base", *cmds], context_files=context_files)
1366
1669
 
@@ -1370,7 +1673,7 @@ class _Image(_Object, type_prefix="im"):
1370
1673
  secrets=secrets,
1371
1674
  gpu_config=parse_gpu_config(gpu),
1372
1675
  context_mount_function=_create_context_mount_function(
1373
- ignore=ignore, dockerfile_cmds=cmds, context_mount=context_mount, context_dir=context_dir
1676
+ ignore=ignore, dockerfile_cmds=cmds, context_dir=context_dir
1374
1677
  ),
1375
1678
  force_build=self.force_build or force_build,
1376
1679
  )
@@ -1379,7 +1682,7 @@ class _Image(_Object, type_prefix="im"):
1379
1682
  self,
1380
1683
  entrypoint_commands: list[str],
1381
1684
  ) -> "_Image":
1382
- """Set the entrypoint for the image."""
1685
+ """Set the ENTRYPOINT for the image."""
1383
1686
  if not isinstance(entrypoint_commands, list) or not all(isinstance(x, str) for x in entrypoint_commands):
1384
1687
  raise InvalidError("entrypoint_commands must be a list of strings.")
1385
1688
  args_str = _flatten_str_args("entrypoint", "entrypoint_commands", entrypoint_commands)
@@ -1404,11 +1707,18 @@ class _Image(_Object, type_prefix="im"):
1404
1707
  def run_commands(
1405
1708
  self,
1406
1709
  *commands: Union[str, list[str]],
1407
- secrets: Sequence[_Secret] = [],
1710
+ env: Optional[dict[str, Optional[str]]] = None,
1711
+ secrets: Optional[Collection[_Secret]] = None,
1712
+ volumes: Optional[dict[Union[str, PurePosixPath], _Volume]] = None,
1408
1713
  gpu: GPU_T = None,
1409
1714
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1410
1715
  ) -> "_Image":
1411
1716
  """Extend an image with a list of shell commands to run."""
1717
+
1718
+ secrets = secrets or []
1719
+ if env:
1720
+ secrets = [*secrets, _Secret.from_dict(env)]
1721
+
1412
1722
  cmds = _flatten_str_args("run_commands", "commands", commands)
1413
1723
  if not cmds:
1414
1724
  return self
@@ -1422,6 +1732,7 @@ class _Image(_Object, type_prefix="im"):
1422
1732
  secrets=secrets,
1423
1733
  gpu_config=parse_gpu_config(gpu),
1424
1734
  force_build=self.force_build or force_build,
1735
+ validated_volumes=validate_only_modal_volumes(volumes, "Image.run_commands"),
1425
1736
  )
1426
1737
 
1427
1738
  @staticmethod
@@ -1437,8 +1748,7 @@ class _Image(_Object, type_prefix="im"):
1437
1748
  python_version = "3.9" # Backcompat for old hardcoded default param
1438
1749
  validated_python_version = _validate_python_version(python_version, version)
1439
1750
  micromamba_version = _base_image_config("micromamba", version)
1440
- debian_codename = _base_image_config("debian", version)
1441
- tag = f"mambaorg/micromamba:{micromamba_version}-{debian_codename}-slim"
1751
+ tag = f"mambaorg/micromamba:{micromamba_version}"
1442
1752
  setup_commands = [
1443
1753
  'SHELL ["/usr/local/bin/_dockerfile_shell.sh"]',
1444
1754
  "ENV MAMBA_DOCKERFILE_ACTIVATE=1",
@@ -1468,10 +1778,16 @@ class _Image(_Object, type_prefix="im"):
1468
1778
  # A list of Conda channels, eg. ["conda-forge", "nvidia"].
1469
1779
  channels: list[str] = [],
1470
1780
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1471
- secrets: Sequence[_Secret] = [],
1781
+ env: Optional[dict[str, Optional[str]]] = None,
1782
+ secrets: Optional[Collection[_Secret]] = None,
1472
1783
  gpu: GPU_T = None,
1473
1784
  ) -> "_Image":
1474
1785
  """Install a list of additional packages using micromamba."""
1786
+
1787
+ secrets = secrets or []
1788
+ if env:
1789
+ secrets = [*secrets, _Secret.from_dict(env)]
1790
+
1475
1791
  pkgs = _flatten_str_args("micromamba_install", "packages", packages)
1476
1792
  if not pkgs and spec_file is None:
1477
1793
  return self
@@ -1623,7 +1939,7 @@ class _Image(_Object, type_prefix="im"):
1623
1939
  """Build a Modal image from a private image in Google Cloud Platform (GCP) Artifact Registry.
1624
1940
 
1625
1941
  You will need to pass a `modal.Secret` containing [your GCP service account key data](https://cloud.google.com/iam/docs/keys-create-delete#creating)
1626
- as `SERVICE_ACCOUNT_JSON`. This can be done from the [Secrets](/secrets) page.
1942
+ as `SERVICE_ACCOUNT_JSON`. This can be done from the [Secrets](https://modal.com/secrets) page.
1627
1943
  Your service account should be granted a specific role depending on the GCP registry used:
1628
1944
 
1629
1945
  - For Artifact Registry images (`pkg.dev` domains) use
@@ -1673,12 +1989,18 @@ class _Image(_Object, type_prefix="im"):
1673
1989
  ) -> "_Image":
1674
1990
  """Build a Modal image from a private image in AWS Elastic Container Registry (ECR).
1675
1991
 
1676
- You will need to pass a `modal.Secret` containing `AWS_ACCESS_KEY_ID`,
1677
- `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION` to access the target ECR registry.
1992
+ You will need to pass a `modal.Secret` containing either IAM user credentials or OIDC
1993
+ configuration to access the target ECR registry.
1994
+
1995
+ For IAM user authentication, set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION`.
1996
+
1997
+ For OIDC authentication, set `AWS_ROLE_ARN` and `AWS_REGION`.
1678
1998
 
1679
1999
  IAM configuration details can be found in the AWS documentation for
1680
2000
  ["Private repository policies"](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html).
1681
2001
 
2002
+ For more details on using an AWS role to access ECR, see the [OIDC integration guide](https://modal.com/docs/guide/oidc-integration).
2003
+
1682
2004
  See `Image.from_registry()` for information about the other parameters.
1683
2005
 
1684
2006
  **Example**
@@ -1710,12 +2032,13 @@ class _Image(_Object, type_prefix="im"):
1710
2032
  def from_dockerfile(
1711
2033
  path: Union[str, Path], # Filepath to Dockerfile.
1712
2034
  *,
1713
- context_mount: Optional[_Mount] = None, # Deprecated: the context is now inferred
1714
2035
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1715
2036
  context_dir: Optional[Union[Path, str]] = None, # Context for relative COPY commands
1716
- secrets: Sequence[_Secret] = [],
2037
+ env: Optional[dict[str, Optional[str]]] = None,
2038
+ secrets: Optional[Collection[_Secret]] = None,
1717
2039
  gpu: GPU_T = None,
1718
2040
  add_python: Optional[str] = None,
2041
+ build_args: dict[str, str] = {},
1719
2042
  ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
1720
2043
  ) -> "_Image":
1721
2044
  """Build a Modal image from a local Dockerfile.
@@ -1767,12 +2090,10 @@ class _Image(_Object, type_prefix="im"):
1767
2090
  )
1768
2091
  ```
1769
2092
  """
1770
- if context_mount is not None:
1771
- deprecation_warning(
1772
- (2025, 1, 13),
1773
- "The `context_mount` parameter of `Image.from_dockerfile` is deprecated."
1774
- " Files are now automatically added to the build context based on the commands in the Dockerfile.",
1775
- )
2093
+
2094
+ secrets = secrets or []
2095
+ if env:
2096
+ secrets = [*secrets, _Secret.from_dict(env)]
1776
2097
 
1777
2098
  # --- Build the base dockerfile
1778
2099
 
@@ -1785,10 +2106,11 @@ class _Image(_Object, type_prefix="im"):
1785
2106
  base_image = _Image._from_args(
1786
2107
  dockerfile_function=build_dockerfile_base,
1787
2108
  context_mount_function=_create_context_mount_function(
1788
- ignore=ignore, dockerfile_path=Path(path), context_mount=context_mount, context_dir=context_dir
2109
+ ignore=ignore, dockerfile_path=Path(path), context_dir=context_dir
1789
2110
  ),
1790
2111
  gpu_config=gpu_config,
1791
2112
  secrets=secrets,
2113
+ build_args=build_args,
1792
2114
  )
1793
2115
 
1794
2116
  # --- Now add in the modal dependencies, and, optionally a Python distribution
@@ -1880,7 +2202,8 @@ class _Image(_Object, type_prefix="im"):
1880
2202
  self,
1881
2203
  *packages: Union[str, list[str]], # A list of packages, e.g. ["ssh", "libpq-dev"]
1882
2204
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1883
- secrets: Sequence[_Secret] = [],
2205
+ env: Optional[dict[str, Optional[str]]] = None,
2206
+ secrets: Optional[Collection[_Secret]] = None,
1884
2207
  gpu: GPU_T = None,
1885
2208
  ) -> "_Image":
1886
2209
  """Install a list of Debian packages using `apt`.
@@ -1905,6 +2228,10 @@ class _Image(_Object, type_prefix="im"):
1905
2228
  ]
1906
2229
  return DockerfileSpec(commands=commands, context_files={})
1907
2230
 
2231
+ secrets = secrets or []
2232
+ if env:
2233
+ secrets = [*secrets, _Secret.from_dict(env)]
2234
+
1908
2235
  return _Image._from_args(
1909
2236
  base_images={"base": self},
1910
2237
  dockerfile_function=build_dockerfile,
@@ -1917,24 +2244,26 @@ class _Image(_Object, type_prefix="im"):
1917
2244
  self,
1918
2245
  raw_f: Callable[..., Any],
1919
2246
  *,
1920
- secrets: Sequence[_Secret] = (), # Optional Modal Secret objects with environment variables for the container
1921
- gpu: Union[GPU_T, list[GPU_T]] = None, # Requested GPU or or list of acceptable GPUs( e.g. ["A10", "A100"])
2247
+ env: Optional[dict[str, Optional[str]]] = None, # Environment variables to set in the container
2248
+ secrets: Optional[Collection[_Secret]] = None, # Secrets to inject into the container as environment variables
1922
2249
  volumes: dict[Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]] = {}, # Volume mount paths
1923
2250
  network_file_systems: dict[Union[str, PurePosixPath], _NetworkFileSystem] = {}, # NFS mount paths
2251
+ gpu: Union[GPU_T, list[GPU_T]] = None, # Requested GPU or or list of acceptable GPUs( e.g. ["A10", "A100"])
1924
2252
  cpu: Optional[float] = None, # How many CPU cores to request. This is a soft limit.
1925
2253
  memory: Optional[int] = None, # How much memory to request, in MiB. This is a soft limit.
1926
- timeout: Optional[int] = 60 * 60, # Maximum execution time of the function in seconds.
1927
- force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
2254
+ timeout: int = 60 * 60, # Maximum execution time of the function in seconds.
1928
2255
  cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
1929
2256
  region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
2257
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1930
2258
  args: Sequence[Any] = (), # Positional arguments to the function.
1931
2259
  kwargs: dict[str, Any] = {}, # Keyword arguments to the function.
1932
- include_source: Optional[bool] = None,
2260
+ include_source: bool = True, # Whether the builder container should have the Function's source added
1933
2261
  ) -> "_Image":
1934
- """Run user-defined function `raw_f` as an image build step. The function runs just like an ordinary Modal
1935
- function, and any kwargs accepted by `@app.function` (such as `Mount`s, `NetworkFileSystem`s,
1936
- and resource requests) can be supplied to it.
1937
- After it finishes execution, a snapshot of the resulting container file system is saved as an image.
2262
+ """Run user-defined function `raw_f` as an image build step.
2263
+
2264
+ The function runs like an ordinary Modal Function, accepting a resource configuration and integrating
2265
+ with Modal features like Secrets and Volumes. Unlike ordinary Modal Functions, any changes to the
2266
+ filesystem state will be captured on container exit and saved as a new Image.
1938
2267
 
1939
2268
  **Note**
1940
2269
 
@@ -1958,6 +2287,11 @@ class _Image(_Object, type_prefix="im"):
1958
2287
  )
1959
2288
  ```
1960
2289
  """
2290
+
2291
+ secrets = secrets or []
2292
+ if env:
2293
+ secrets = [*secrets, _Secret.from_dict(env)]
2294
+
1961
2295
  from ._functions import _Function
1962
2296
 
1963
2297
  if not callable(raw_f):
@@ -1987,13 +2321,19 @@ class _Image(_Object, type_prefix="im"):
1987
2321
  include_source=include_source,
1988
2322
  )
1989
2323
  if len(args) + len(kwargs) > 0:
1990
- args_serialized = serialize((args, kwargs))
2324
+ data_format = get_preferred_payload_format()
2325
+ args_serialized = serialize_data_format((args, kwargs), data_format)
2326
+
1991
2327
  if len(args_serialized) > MAX_OBJECT_SIZE_BYTES:
1992
2328
  raise InvalidError(
1993
2329
  f"Arguments to `run_function` are too large ({len(args_serialized)} bytes). "
1994
2330
  f"Maximum size is {MAX_OBJECT_SIZE_BYTES} bytes."
1995
2331
  )
1996
- build_function_input = api_pb2.FunctionInput(args=args_serialized, data_format=api_pb2.DATA_FORMAT_PICKLE)
2332
+
2333
+ build_function_input = api_pb2.FunctionInput(
2334
+ args=args_serialized,
2335
+ data_format=data_format,
2336
+ )
1997
2337
  else:
1998
2338
  build_function_input = None
1999
2339
  return _Image._from_args(
@@ -2053,7 +2393,9 @@ class _Image(_Object, type_prefix="im"):
2053
2393
  )
2054
2394
 
2055
2395
  def cmd(self, cmd: list[str]) -> "_Image":
2056
- """Set the default entrypoint argument (`CMD`) for the image.
2396
+ """Set the default command (`CMD`) to run when a container is started.
2397
+
2398
+ Used with `modal.Sandbox`. Has no effect on `modal.Function`.
2057
2399
 
2058
2400
  **Example**
2059
2401
 
@@ -2123,5 +2465,15 @@ class _Image(_Object, type_prefix="im"):
2123
2465
  if task_log.data:
2124
2466
  yield task_log.data
2125
2467
 
2468
+ async def hydrate(self, client: Optional[_Client] = None) -> Self:
2469
+ """mdmd:hidden"""
2470
+ # Image inherits hydrate() from Object but can't be hydrated on demand
2471
+ # Overriding the method lets us hide it from the docs and raise a better error message
2472
+ if not self.is_hydrated:
2473
+ raise ExecutionError(
2474
+ "Images cannot currently be hydrated on demand; you can build an Image by running an App that uses it."
2475
+ )
2476
+ return self
2477
+
2126
2478
 
2127
2479
  Image = synchronize_api(_Image)