modal 1.0.6.dev58__py3-none-any.whl → 1.2.3.dev7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of modal might be problematic. Click here for more details.

Files changed (147) hide show
  1. modal/__main__.py +3 -4
  2. modal/_billing.py +80 -0
  3. modal/_clustered_functions.py +7 -3
  4. modal/_clustered_functions.pyi +4 -2
  5. modal/_container_entrypoint.py +41 -49
  6. modal/_functions.py +424 -195
  7. modal/_grpc_client.py +171 -0
  8. modal/_load_context.py +105 -0
  9. modal/_object.py +68 -20
  10. modal/_output.py +58 -45
  11. modal/_partial_function.py +36 -11
  12. modal/_pty.py +7 -3
  13. modal/_resolver.py +21 -35
  14. modal/_runtime/asgi.py +4 -3
  15. modal/_runtime/container_io_manager.py +301 -186
  16. modal/_runtime/container_io_manager.pyi +70 -61
  17. modal/_runtime/execution_context.py +18 -2
  18. modal/_runtime/execution_context.pyi +4 -1
  19. modal/_runtime/gpu_memory_snapshot.py +170 -63
  20. modal/_runtime/user_code_imports.py +28 -58
  21. modal/_serialization.py +57 -1
  22. modal/_utils/async_utils.py +33 -12
  23. modal/_utils/auth_token_manager.py +2 -5
  24. modal/_utils/blob_utils.py +110 -53
  25. modal/_utils/function_utils.py +49 -42
  26. modal/_utils/grpc_utils.py +80 -50
  27. modal/_utils/mount_utils.py +26 -1
  28. modal/_utils/name_utils.py +17 -3
  29. modal/_utils/task_command_router_client.py +536 -0
  30. modal/_utils/time_utils.py +34 -6
  31. modal/app.py +219 -83
  32. modal/app.pyi +229 -56
  33. modal/billing.py +5 -0
  34. modal/{requirements → builder}/2025.06.txt +1 -0
  35. modal/{requirements → builder}/PREVIEW.txt +1 -0
  36. modal/cli/_download.py +19 -3
  37. modal/cli/_traceback.py +3 -2
  38. modal/cli/app.py +4 -4
  39. modal/cli/cluster.py +15 -7
  40. modal/cli/config.py +5 -3
  41. modal/cli/container.py +7 -6
  42. modal/cli/dict.py +22 -16
  43. modal/cli/entry_point.py +12 -5
  44. modal/cli/environment.py +5 -4
  45. modal/cli/import_refs.py +3 -3
  46. modal/cli/launch.py +102 -5
  47. modal/cli/network_file_system.py +9 -13
  48. modal/cli/profile.py +3 -2
  49. modal/cli/programs/launch_instance_ssh.py +94 -0
  50. modal/cli/programs/run_jupyter.py +1 -1
  51. modal/cli/programs/run_marimo.py +95 -0
  52. modal/cli/programs/vscode.py +1 -1
  53. modal/cli/queues.py +57 -26
  54. modal/cli/run.py +58 -16
  55. modal/cli/secret.py +48 -22
  56. modal/cli/utils.py +3 -4
  57. modal/cli/volume.py +28 -25
  58. modal/client.py +13 -116
  59. modal/client.pyi +9 -91
  60. modal/cloud_bucket_mount.py +5 -3
  61. modal/cloud_bucket_mount.pyi +5 -1
  62. modal/cls.py +130 -102
  63. modal/cls.pyi +45 -85
  64. modal/config.py +29 -10
  65. modal/container_process.py +291 -13
  66. modal/container_process.pyi +95 -32
  67. modal/dict.py +282 -63
  68. modal/dict.pyi +423 -73
  69. modal/environments.py +15 -27
  70. modal/environments.pyi +5 -15
  71. modal/exception.py +8 -0
  72. modal/experimental/__init__.py +143 -38
  73. modal/experimental/flash.py +247 -78
  74. modal/experimental/flash.pyi +137 -9
  75. modal/file_io.py +14 -28
  76. modal/file_io.pyi +2 -2
  77. modal/file_pattern_matcher.py +25 -16
  78. modal/functions.pyi +134 -61
  79. modal/image.py +255 -86
  80. modal/image.pyi +300 -62
  81. modal/io_streams.py +436 -126
  82. modal/io_streams.pyi +236 -171
  83. modal/mount.py +62 -157
  84. modal/mount.pyi +45 -172
  85. modal/network_file_system.py +30 -53
  86. modal/network_file_system.pyi +16 -76
  87. modal/object.pyi +42 -8
  88. modal/parallel_map.py +821 -113
  89. modal/parallel_map.pyi +134 -0
  90. modal/partial_function.pyi +4 -1
  91. modal/proxy.py +16 -7
  92. modal/proxy.pyi +10 -2
  93. modal/queue.py +263 -61
  94. modal/queue.pyi +409 -66
  95. modal/runner.py +112 -92
  96. modal/runner.pyi +45 -27
  97. modal/sandbox.py +451 -124
  98. modal/sandbox.pyi +513 -67
  99. modal/secret.py +291 -67
  100. modal/secret.pyi +425 -19
  101. modal/serving.py +7 -11
  102. modal/serving.pyi +7 -8
  103. modal/snapshot.py +11 -8
  104. modal/token_flow.py +4 -4
  105. modal/volume.py +344 -98
  106. modal/volume.pyi +464 -68
  107. {modal-1.0.6.dev58.dist-info → modal-1.2.3.dev7.dist-info}/METADATA +9 -8
  108. modal-1.2.3.dev7.dist-info/RECORD +195 -0
  109. modal_docs/mdmd/mdmd.py +11 -1
  110. modal_proto/api.proto +399 -67
  111. modal_proto/api_grpc.py +241 -1
  112. modal_proto/api_pb2.py +1395 -1000
  113. modal_proto/api_pb2.pyi +1239 -79
  114. modal_proto/api_pb2_grpc.py +499 -4
  115. modal_proto/api_pb2_grpc.pyi +162 -14
  116. modal_proto/modal_api_grpc.py +175 -160
  117. modal_proto/sandbox_router.proto +145 -0
  118. modal_proto/sandbox_router_grpc.py +105 -0
  119. modal_proto/sandbox_router_pb2.py +149 -0
  120. modal_proto/sandbox_router_pb2.pyi +333 -0
  121. modal_proto/sandbox_router_pb2_grpc.py +203 -0
  122. modal_proto/sandbox_router_pb2_grpc.pyi +75 -0
  123. modal_proto/task_command_router.proto +144 -0
  124. modal_proto/task_command_router_grpc.py +105 -0
  125. modal_proto/task_command_router_pb2.py +149 -0
  126. modal_proto/task_command_router_pb2.pyi +333 -0
  127. modal_proto/task_command_router_pb2_grpc.py +203 -0
  128. modal_proto/task_command_router_pb2_grpc.pyi +75 -0
  129. modal_version/__init__.py +1 -1
  130. modal-1.0.6.dev58.dist-info/RECORD +0 -183
  131. modal_proto/modal_options_grpc.py +0 -3
  132. modal_proto/options.proto +0 -19
  133. modal_proto/options_grpc.py +0 -3
  134. modal_proto/options_pb2.py +0 -35
  135. modal_proto/options_pb2.pyi +0 -20
  136. modal_proto/options_pb2_grpc.py +0 -4
  137. modal_proto/options_pb2_grpc.pyi +0 -7
  138. /modal/{requirements → builder}/2023.12.312.txt +0 -0
  139. /modal/{requirements → builder}/2023.12.txt +0 -0
  140. /modal/{requirements → builder}/2024.04.txt +0 -0
  141. /modal/{requirements → builder}/2024.10.txt +0 -0
  142. /modal/{requirements → builder}/README.md +0 -0
  143. /modal/{requirements → builder}/base-images.json +0 -0
  144. {modal-1.0.6.dev58.dist-info → modal-1.2.3.dev7.dist-info}/WHEEL +0 -0
  145. {modal-1.0.6.dev58.dist-info → modal-1.2.3.dev7.dist-info}/entry_points.txt +0 -0
  146. {modal-1.0.6.dev58.dist-info → modal-1.2.3.dev7.dist-info}/licenses/LICENSE +0 -0
  147. {modal-1.0.6.dev58.dist-info → modal-1.2.3.dev7.dist-info}/top_level.txt +0 -0
modal/image.py CHANGED
@@ -7,7 +7,7 @@ import shlex
7
7
  import sys
8
8
  import typing
9
9
  import warnings
10
- from collections.abc import Sequence
10
+ from collections.abc import Collection, Sequence
11
11
  from dataclasses import dataclass
12
12
  from inspect import isfunction
13
13
  from pathlib import Path, PurePosixPath
@@ -25,20 +25,22 @@ from google.protobuf.message import Message
25
25
  from grpclib.exceptions import GRPCError, StreamTerminatedError
26
26
  from typing_extensions import Self
27
27
 
28
+ from modal._serialization import serialize_data_format
28
29
  from modal_proto import api_pb2
29
30
 
31
+ from ._load_context import LoadContext
30
32
  from ._object import _Object, live_method_gen
31
33
  from ._resolver import Resolver
32
- from ._serialization import serialize
34
+ from ._serialization import get_preferred_payload_format, serialize
33
35
  from ._utils.async_utils import synchronize_api
34
36
  from ._utils.blob_utils import MAX_OBJECT_SIZE_BYTES
35
- from ._utils.deprecation import deprecation_warning
36
37
  from ._utils.docker_utils import (
37
38
  extract_copy_command_patterns,
38
39
  find_dockerignore_file,
39
40
  )
40
41
  from ._utils.function_utils import FunctionInfo
41
- from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, retry_transient_errors
42
+ from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES
43
+ from ._utils.mount_utils import validate_only_modal_volumes
42
44
  from .client import _Client
43
45
  from .cloud_bucket_mount import _CloudBucketMount
44
46
  from .config import config, logger, user_config_path
@@ -71,7 +73,7 @@ SUPPORTED_PYTHON_SERIES: dict[ImageBuilderVersion, list[str]] = {
71
73
  "2023.12": ["3.9", "3.10", "3.11", "3.12"],
72
74
  }
73
75
 
74
- LOCAL_REQUIREMENTS_DIR = Path(__file__).parent / "requirements"
76
+ LOCAL_REQUIREMENTS_DIR = Path(__file__).parent / "builder"
75
77
  CONTAINER_REQUIREMENTS_PATH = "/modal_requirements.txt"
76
78
 
77
79
 
@@ -283,24 +285,12 @@ def _create_context_mount_function(
283
285
  ignore: Union[Sequence[str], Callable[[Path], bool], _AutoDockerIgnoreSentinel],
284
286
  dockerfile_cmds: list[str] = [],
285
287
  dockerfile_path: Optional[Path] = None,
286
- context_mount: Optional[_Mount] = None,
287
288
  context_dir: Optional[Union[Path, str]] = None,
288
289
  ):
289
290
  if dockerfile_path and dockerfile_cmds:
290
291
  raise InvalidError("Cannot provide both dockerfile and docker commands")
291
292
 
292
- if context_mount:
293
- if ignore is not AUTO_DOCKERIGNORE:
294
- raise InvalidError("Cannot set both `context_mount` and `ignore`")
295
- if context_dir is not None:
296
- raise InvalidError("Cannot set both `context_mount` and `context_dir`")
297
-
298
- def identity_context_mount_fn() -> Optional[_Mount]:
299
- return context_mount
300
-
301
- return identity_context_mount_fn
302
-
303
- elif ignore is AUTO_DOCKERIGNORE:
293
+ if ignore is AUTO_DOCKERIGNORE:
304
294
 
305
295
  def auto_created_context_mount_fn() -> Optional[_Mount]:
306
296
  nonlocal context_dir
@@ -445,12 +435,16 @@ class _Image(_Object, type_prefix="im"):
445
435
 
446
436
  base_image = self
447
437
 
448
- async def _load(self2: "_Image", resolver: Resolver, existing_object_id: Optional[str]):
438
+ async def _load(
439
+ self2: "_Image", resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]
440
+ ):
449
441
  self2._hydrate_from_other(base_image) # same image id as base image as long as it's lazy
450
442
  self2._deferred_mounts = tuple(base_image._deferred_mounts) + (mount,)
451
443
  self2._serve_mounts = base_image._serve_mounts | ({mount} if mount.is_local() else set())
452
444
 
453
- img = _Image._from_loader(_load, "Image(local files)", deps=lambda: [base_image, mount])
445
+ img = _Image._from_loader(
446
+ _load, "Image(local files)", deps=lambda: [base_image, mount], load_context_overrides=LoadContext.empty()
447
+ )
454
448
  img._added_python_source_set = base_image._added_python_source_set
455
449
  return img
456
450
 
@@ -491,7 +485,7 @@ class _Image(_Object, type_prefix="im"):
491
485
  *,
492
486
  base_images: Optional[dict[str, "_Image"]] = None,
493
487
  dockerfile_function: Optional[Callable[[ImageBuilderVersion], DockerfileSpec]] = None,
494
- secrets: Optional[Sequence[_Secret]] = None,
488
+ secrets: Optional[Collection[_Secret]] = None,
495
489
  gpu_config: Optional[api_pb2.GPUConfig] = None,
496
490
  build_function: Optional["modal._functions._Function"] = None,
497
491
  build_function_input: Optional[api_pb2.FunctionInput] = None,
@@ -499,6 +493,7 @@ class _Image(_Object, type_prefix="im"):
499
493
  context_mount_function: Optional[Callable[[], Optional[_Mount]]] = None,
500
494
  force_build: bool = False,
501
495
  build_args: dict[str, str] = {},
496
+ validated_volumes: Optional[Sequence[tuple[str, _Volume]]] = None,
502
497
  # For internal use only.
503
498
  _namespace: "api_pb2.DeploymentNamespace.ValueType" = api_pb2.DEPLOYMENT_NAMESPACE_WORKSPACE,
504
499
  _do_assert_no_mount_layers: bool = True,
@@ -506,6 +501,9 @@ class _Image(_Object, type_prefix="im"):
506
501
  if base_images is None:
507
502
  base_images = {}
508
503
 
504
+ if validated_volumes is None:
505
+ validated_volumes = []
506
+
509
507
  if secrets is None:
510
508
  secrets = []
511
509
  if gpu_config is None:
@@ -526,20 +524,22 @@ class _Image(_Object, type_prefix="im"):
526
524
  deps += (build_function,)
527
525
  if image_registry_config and image_registry_config.secret:
528
526
  deps += (image_registry_config.secret,)
527
+ for _, vol in validated_volumes:
528
+ deps += (vol,)
529
529
  return deps
530
530
 
531
- async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
531
+ async def _load(self: _Image, resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]):
532
532
  context_mount = context_mount_function() if context_mount_function else None
533
533
  if context_mount:
534
- await resolver.load(context_mount)
534
+ await resolver.load(context_mount, load_context)
535
535
 
536
536
  if _do_assert_no_mount_layers:
537
537
  for image in base_images.values():
538
538
  # base images can't have
539
539
  image._assert_no_mount_layers()
540
540
 
541
- assert resolver.app_id # type narrowing
542
- environment = await _get_environment_cached(resolver.environment_name or "", resolver.client)
541
+ assert load_context.app_id # type narrowing
542
+ environment = await _get_environment_cached(load_context.environment_name or "", load_context.client)
543
543
  # A bit hacky,but assume that the environment provides a valid builder version
544
544
  image_builder_version = cast(ImageBuilderVersion, environment._settings.image_builder_version)
545
545
  builder_version = _get_image_builder_version(image_builder_version)
@@ -604,6 +604,17 @@ class _Image(_Object, type_prefix="im"):
604
604
  build_function_id = ""
605
605
  _build_function = None
606
606
 
607
+ # Relies on dicts being ordered (true as of Python 3.6).
608
+ volume_mounts = [
609
+ api_pb2.VolumeMount(
610
+ mount_path=path,
611
+ volume_id=volume.object_id,
612
+ allow_background_commits=True,
613
+ read_only=volume._read_only,
614
+ )
615
+ for path, volume in validated_volumes
616
+ ]
617
+
607
618
  image_definition = api_pb2.Image(
608
619
  base_images=base_images_pb2s,
609
620
  dockerfile_commands=dockerfile.commands,
@@ -616,10 +627,11 @@ class _Image(_Object, type_prefix="im"):
616
627
  runtime_debug=config.get("function_runtime_debug"),
617
628
  build_function=_build_function,
618
629
  build_args=build_args,
630
+ volume_mounts=volume_mounts,
619
631
  )
620
632
 
621
633
  req = api_pb2.ImageGetOrCreateRequest(
622
- app_id=resolver.app_id,
634
+ app_id=load_context.app_id,
623
635
  image=image_definition,
624
636
  existing_image_id=existing_object_id or "", # TODO: ignored
625
637
  build_function_id=build_function_id,
@@ -631,7 +643,7 @@ class _Image(_Object, type_prefix="im"):
631
643
  allow_global_deployment=os.environ.get("MODAL_IMAGE_ALLOW_GLOBAL_DEPLOYMENT") == "1",
632
644
  ignore_cache=config.get("ignore_cache"),
633
645
  )
634
- resp = await retry_transient_errors(resolver.client.stub.ImageGetOrCreate, req)
646
+ resp = await load_context.client.stub.ImageGetOrCreate(req)
635
647
  image_id = resp.image_id
636
648
  result: api_pb2.GenericResult
637
649
  metadata: Optional[api_pb2.ImageMetadata] = None
@@ -644,7 +656,7 @@ class _Image(_Object, type_prefix="im"):
644
656
  else:
645
657
  # not built or in the process of building - wait for build
646
658
  logger.debug("Waiting for image %s" % image_id)
647
- resp = await _image_await_build_result(image_id, resolver.client)
659
+ resp = await _image_await_build_result(image_id, load_context.client)
648
660
  result = resp.result
649
661
  if resp.HasField("metadata"):
650
662
  metadata = resp.metadata
@@ -658,7 +670,13 @@ class _Image(_Object, type_prefix="im"):
658
670
  msg += " (Hint: Use `modal.enable_output()` to see logs from the process building the Image.)"
659
671
  raise RemoteError(msg)
660
672
  elif result.status == api_pb2.GenericResult.GENERIC_STATUS_TERMINATED:
661
- raise RemoteError(f"Image build for {image_id} terminated due to external shut-down. Please try again.")
673
+ msg = f"Image build for {image_id} terminated due to external shut-down. Please try again."
674
+ if result.exception:
675
+ msg = (
676
+ f"Image build for {image_id} terminated due to external shut-down with the exception:\n"
677
+ f"{result.exception}"
678
+ )
679
+ raise RemoteError(msg)
662
680
  elif result.status == api_pb2.GenericResult.GENERIC_STATUS_TIMEOUT:
663
681
  raise RemoteError(
664
682
  f"Image build for {image_id} timed out. Please try again with a larger `timeout` parameter."
@@ -668,7 +686,7 @@ class _Image(_Object, type_prefix="im"):
668
686
  else:
669
687
  raise RemoteError("Unknown status %s!" % result.status)
670
688
 
671
- self._hydrate(image_id, resolver.client, metadata)
689
+ self._hydrate(image_id, load_context.client, metadata)
672
690
  local_mounts = set()
673
691
  for base in base_images.values():
674
692
  local_mounts |= base._serve_mounts
@@ -677,7 +695,7 @@ class _Image(_Object, type_prefix="im"):
677
695
  self._serve_mounts = frozenset(local_mounts)
678
696
 
679
697
  rep = f"Image({dockerfile_function})"
680
- obj = _Image._from_loader(_load, rep, deps=_deps)
698
+ obj = _Image._from_loader(_load, rep, deps=_deps, load_context_overrides=LoadContext.empty())
681
699
  obj.force_build = force_build
682
700
  obj._added_python_source_set = frozenset.union(
683
701
  frozenset(), *(base._added_python_source_set for base in base_images.values())
@@ -850,18 +868,75 @@ class _Image(_Object, type_prefix="im"):
850
868
 
851
869
  The ID of an Image object can be accessed using `.object_id`.
852
870
  """
853
- if client is None:
854
- client = await _Client.from_env()
855
871
 
856
- async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
857
- resp = await retry_transient_errors(client.stub.ImageFromId, api_pb2.ImageFromIdRequest(image_id=image_id))
858
- self._hydrate(resp.image_id, resolver.client, resp.metadata)
872
+ async def _load(self: _Image, resolver: Resolver, load_context: LoadContext, existing_object_id: Optional[str]):
873
+ resp = await load_context.client.stub.ImageFromId(api_pb2.ImageFromIdRequest(image_id=image_id))
874
+ self._hydrate(resp.image_id, load_context.client, resp.metadata)
859
875
 
860
876
  rep = f"Image.from_id({image_id!r})"
861
- obj = _Image._from_loader(_load, rep)
877
+ obj = _Image._from_loader(_load, rep, load_context_overrides=LoadContext(client=client))
862
878
 
863
879
  return obj
864
880
 
881
+ async def build(self, app: "modal.app._App") -> "_Image":
882
+ """Eagerly build an image.
883
+
884
+ If your image was previously built, then this method will not rebuild your image
885
+ and your cached image is returned.
886
+
887
+ **Examples**
888
+
889
+ ```python
890
+ image = modal.Image.debian_slim().uv_pip_install("scipy", "numpy")
891
+
892
+ app = modal.App.lookup("build-image", create_if_missing=True)
893
+ with modal.enable_output(): # To see logs in your local terminal
894
+ image.build(app)
895
+
896
+ # Save the image id
897
+ my_image_id = image.object_id
898
+
899
+ # Reference the image with the id or uses it another context.
900
+ built_image = modal.Image.from_id(my_image_id)
901
+ ```
902
+
903
+ Alternatively, you can pre-build a image and use it in a sandbox.
904
+
905
+ ```python notest
906
+ app = modal.App.lookup("sandbox-example", create_if_missing=True)
907
+
908
+ with modal.enable_output():
909
+ image = modal.Image.debian_slim().uv_pip_install("scipy")
910
+ image.build(app)
911
+
912
+ sb = modal.Sandbox.create("python", "-c", "import scipy; print(scipy)", app=app, image=image)
913
+ print(sb.stdout.read())
914
+ sb.terminate()
915
+ ```
916
+
917
+ **Note**
918
+
919
+ For defining Modal functions, images are built automatically when deploying or running an App.
920
+ You do not need to built the image explicitly:
921
+
922
+ ```python notest
923
+ app = modal.App()
924
+ image = modal.Image.debian_slim()
925
+
926
+ # No need to explicitly build the image for defining a function.
927
+ @app.function(image=image)
928
+ def f():
929
+ ...
930
+ ```
931
+
932
+ """
933
+ if app.app_id is None:
934
+ raise InvalidError("App has not been initialized yet. Use the content manager `app.run()` or `App.lookup`")
935
+
936
+ resolver = Resolver()
937
+ await resolver.load(self, app._root_load_context)
938
+ return self
939
+
865
940
  def pip_install(
866
941
  self,
867
942
  *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
@@ -871,7 +946,8 @@ class _Image(_Object, type_prefix="im"):
871
946
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
872
947
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
873
948
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
874
- secrets: Sequence[_Secret] = [],
949
+ env: Optional[dict[str, Optional[str]]] = None,
950
+ secrets: Optional[Collection[_Secret]] = None,
875
951
  gpu: GPU_T = None,
876
952
  ) -> "_Image":
877
953
  """Install a list of Python packages using pip.
@@ -918,6 +994,10 @@ class _Image(_Object, type_prefix="im"):
918
994
  commands = [cmd.strip() for cmd in commands]
919
995
  return DockerfileSpec(commands=commands, context_files={})
920
996
 
997
+ secrets = secrets or []
998
+ if env:
999
+ secrets = [*secrets, _Secret.from_dict(env)]
1000
+
921
1001
  gpu_config = parse_gpu_config(gpu)
922
1002
  return _Image._from_args(
923
1003
  base_images={"base": self},
@@ -937,7 +1017,8 @@ class _Image(_Object, type_prefix="im"):
937
1017
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
938
1018
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
939
1019
  gpu: GPU_T = None,
940
- secrets: Sequence[_Secret] = [],
1020
+ env: Optional[dict[str, Optional[str]]] = None, # Environment variables to set in the container
1021
+ secrets: Optional[Collection[_Secret]] = None, # Secrets to inject into the container as environment variables
941
1022
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
942
1023
  ) -> "_Image":
943
1024
  """
@@ -971,12 +1052,16 @@ class _Image(_Object, type_prefix="im"):
971
1052
  )
972
1053
  ```
973
1054
  """
1055
+
974
1056
  if not secrets:
975
1057
  raise InvalidError(
976
1058
  "No secrets provided to function. "
977
1059
  "Installing private packages requires tokens to be passed via modal.Secret objects."
978
1060
  )
979
1061
 
1062
+ if env:
1063
+ secrets = [*secrets, _Secret.from_dict(env)]
1064
+
980
1065
  invalid_repos = []
981
1066
  install_urls = []
982
1067
  for repo_ref in repositories:
@@ -1038,11 +1123,16 @@ class _Image(_Object, type_prefix="im"):
1038
1123
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1039
1124
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1040
1125
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1041
- secrets: Sequence[_Secret] = [],
1126
+ env: Optional[dict[str, Optional[str]]] = None,
1127
+ secrets: Optional[Collection[_Secret]] = None,
1042
1128
  gpu: GPU_T = None,
1043
1129
  ) -> "_Image":
1044
1130
  """Install a list of Python packages from a local `requirements.txt` file."""
1045
1131
 
1132
+ secrets = secrets or []
1133
+ if env:
1134
+ secrets = [*secrets, _Secret.from_dict(env)]
1135
+
1046
1136
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1047
1137
  requirements_txt_path = os.path.expanduser(requirements_txt)
1048
1138
  context_files = {"/.requirements.txt": requirements_txt_path}
@@ -1079,7 +1169,8 @@ class _Image(_Object, type_prefix="im"):
1079
1169
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1080
1170
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1081
1171
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1082
- secrets: Sequence[_Secret] = [],
1172
+ env: Optional[dict[str, Optional[str]]] = None,
1173
+ secrets: Optional[Collection[_Secret]] = None,
1083
1174
  gpu: GPU_T = None,
1084
1175
  ) -> "_Image":
1085
1176
  """Install dependencies specified by a local `pyproject.toml` file.
@@ -1090,6 +1181,10 @@ class _Image(_Object, type_prefix="im"):
1090
1181
  all of the packages in each listed section are installed as well.
1091
1182
  """
1092
1183
 
1184
+ secrets = secrets or []
1185
+ if env:
1186
+ secrets = [*secrets, _Secret.from_dict(env)]
1187
+
1093
1188
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1094
1189
  # Defer toml import so we don't need it in the container runtime environment
1095
1190
  import toml
@@ -1141,7 +1236,8 @@ class _Image(_Object, type_prefix="im"):
1141
1236
  extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation"
1142
1237
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1143
1238
  uv_version: Optional[str] = None, # uv version to use
1144
- secrets: Sequence[_Secret] = [],
1239
+ env: Optional[dict[str, Optional[str]]] = None,
1240
+ secrets: Optional[Collection[_Secret]] = None,
1145
1241
  gpu: GPU_T = None,
1146
1242
  ) -> "_Image":
1147
1243
  """Install a list of Python packages using uv pip install.
@@ -1157,7 +1253,14 @@ class _Image(_Object, type_prefix="im"):
1157
1253
  - Python is on the `$PATH` and dependencies are installed with the first Python on the `$PATH`.
1158
1254
  - Shell supports backticks for substitution
1159
1255
  - `which` command is on the `$PATH`
1256
+
1257
+ Added in v1.1.0.
1160
1258
  """
1259
+
1260
+ secrets = secrets or []
1261
+ if env:
1262
+ secrets = [*secrets, _Secret.from_dict(env)]
1263
+
1161
1264
  pkgs = _flatten_str_args("uv_pip_install", "packages", packages)
1162
1265
 
1163
1266
  if requirements is None or isinstance(requirements, list):
@@ -1181,11 +1284,11 @@ class _Image(_Object, type_prefix="im"):
1181
1284
  else:
1182
1285
  commands.append(f"COPY --from=ghcr.io/astral-sh/uv:{uv_version} /uv {UV_ROOT}/uv")
1183
1286
 
1184
- # NOTE: Using `which python` assumes:
1287
+ # NOTE: Using $(command -v python) assumes:
1185
1288
  # - python is on the PATH and uv is installing into the first python in the PATH
1186
- # - the shell supports backticks for substitution
1187
- # - `which` command is on the PATH
1188
- uv_pip_args = ["--python `which python`", "--compile-bytecode"]
1289
+ # - the shell supports $() for substitution
1290
+ # - `command` command is on the PATH
1291
+ uv_pip_args = ["--python $(command -v python)", "--compile-bytecode"]
1189
1292
  context_files = {}
1190
1293
 
1191
1294
  if find_links:
@@ -1253,7 +1356,8 @@ class _Image(_Object, type_prefix="im"):
1253
1356
  poetry_version: Optional[str] = "latest", # Version of poetry to install, or None to skip installation
1254
1357
  # If set to True, use old installer. See https://github.com/python-poetry/poetry/issues/3336
1255
1358
  old_installer: bool = False,
1256
- secrets: Sequence[_Secret] = [],
1359
+ env: Optional[dict[str, Optional[str]]] = None,
1360
+ secrets: Optional[Collection[_Secret]] = None,
1257
1361
  gpu: GPU_T = None,
1258
1362
  ) -> "_Image":
1259
1363
  """Install poetry *dependencies* specified by a local `pyproject.toml` file.
@@ -1269,6 +1373,10 @@ class _Image(_Object, type_prefix="im"):
1269
1373
  version, with versions 2024.10 and earlier limiting poetry to 1.x.
1270
1374
  """
1271
1375
 
1376
+ secrets = secrets or []
1377
+ if env:
1378
+ secrets = [*secrets, _Secret.from_dict(env)]
1379
+
1272
1380
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1273
1381
  context_files = {"/.pyproject.toml": os.path.expanduser(poetry_pyproject_toml)}
1274
1382
 
@@ -1338,7 +1446,8 @@ class _Image(_Object, type_prefix="im"):
1338
1446
  frozen: bool = True, # If True, then we run `uv sync --frozen` when a uv.lock file is present
1339
1447
  extra_options: str = "", # Extra options to pass to `uv sync`
1340
1448
  uv_version: Optional[str] = None, # uv version to use
1341
- secrets: Sequence[_Secret] = [],
1449
+ env: Optional[dict[str, Optional[str]]] = None,
1450
+ secrets: Optional[Collection[_Secret]] = None,
1342
1451
  gpu: GPU_T = None,
1343
1452
  ) -> "_Image":
1344
1453
  """Creates a virtual environment with the dependencies in a uv managed project with `uv sync`.
@@ -1347,8 +1456,26 @@ class _Image(_Object, type_prefix="im"):
1347
1456
  ```python
1348
1457
  image = modal.Image.debian_slim().uv_sync()
1349
1458
  ```
1459
+
1460
+ The `pyproject.toml` and `uv.lock` in `uv_project_dir` are automatically added to the build context. The
1461
+ `uv_project_dir` is relative to the current working directory of where `modal` is called.
1462
+
1463
+ NOTE: This does *not* install the project itself into the environment (this is equivalent to the
1464
+ `--no-install-project` flag in the `uv sync` command) and you would be expected to add any local python source
1465
+ files using `Image.add_local_python_source` or similar methods after this call.
1466
+
1467
+ This ensures that updates to your project code wouldn't require reinstalling third-party dependencies
1468
+ after every change.
1469
+
1470
+ uv workspaces are currently not supported.
1471
+
1472
+ Added in v1.1.0.
1350
1473
  """
1351
1474
 
1475
+ secrets = secrets or []
1476
+ if env:
1477
+ secrets = [*secrets, _Secret.from_dict(env)]
1478
+
1352
1479
  def _normalize_items(items, name) -> list[str]:
1353
1480
  if items is None:
1354
1481
  return []
@@ -1381,7 +1508,13 @@ class _Image(_Object, type_prefix="im"):
1381
1508
  # a requirement in `uv.lock`
1382
1509
  return
1383
1510
 
1384
- dependencies = pyproject_toml_content["project"]["dependencies"]
1511
+ try:
1512
+ dependencies = pyproject_toml_content["project"]["dependencies"]
1513
+ except KeyError as e:
1514
+ raise InvalidError(
1515
+ f"Invalid pyproject.toml file: missing key {e} in {pyproject_toml}. "
1516
+ "See https://packaging.python.org/en/latest/guides/writing-pyproject-toml for guidelines."
1517
+ )
1385
1518
 
1386
1519
  for group in groups:
1387
1520
  if (
@@ -1447,7 +1580,7 @@ class _Image(_Object, type_prefix="im"):
1447
1580
  commands.append(f"COPY /.uv.lock {UV_ROOT}/uv.lock")
1448
1581
 
1449
1582
  if frozen:
1450
- # Do not update `uv.lock` when we have one when `frozen=True`. This it ehd efault because this
1583
+ # Do not update `uv.lock` when we have one when `frozen=True`. This is the default because this
1451
1584
  # ensures that the runtime environment matches the local `uv.lock`.
1452
1585
  #
1453
1586
  # If `frozen=False`, then `uv sync` will update the the dependencies in the `uv.lock` file
@@ -1475,9 +1608,9 @@ class _Image(_Object, type_prefix="im"):
1475
1608
  self,
1476
1609
  *dockerfile_commands: Union[str, list[str]],
1477
1610
  context_files: dict[str, str] = {},
1478
- secrets: Sequence[_Secret] = [],
1611
+ env: Optional[dict[str, Optional[str]]] = None,
1612
+ secrets: Optional[Collection[_Secret]] = None,
1479
1613
  gpu: GPU_T = None,
1480
- context_mount: Optional[_Mount] = None, # Deprecated: the context is now inferred
1481
1614
  context_dir: Optional[Union[Path, str]] = None, # Context for relative COPY commands
1482
1615
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1483
1616
  ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
@@ -1523,16 +1656,14 @@ class _Image(_Object, type_prefix="im"):
1523
1656
  )
1524
1657
  ```
1525
1658
  """
1526
- if context_mount is not None:
1527
- deprecation_warning(
1528
- (2025, 1, 13),
1529
- "The `context_mount` parameter of `Image.dockerfile_commands` is deprecated."
1530
- " Files are now automatically added to the build context based on the commands.",
1531
- )
1532
1659
  cmds = _flatten_str_args("dockerfile_commands", "dockerfile_commands", dockerfile_commands)
1533
1660
  if not cmds:
1534
1661
  return self
1535
1662
 
1663
+ secrets = secrets or []
1664
+ if env:
1665
+ secrets = [*secrets, _Secret.from_dict(env)]
1666
+
1536
1667
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1537
1668
  return DockerfileSpec(commands=["FROM base", *cmds], context_files=context_files)
1538
1669
 
@@ -1542,7 +1673,7 @@ class _Image(_Object, type_prefix="im"):
1542
1673
  secrets=secrets,
1543
1674
  gpu_config=parse_gpu_config(gpu),
1544
1675
  context_mount_function=_create_context_mount_function(
1545
- ignore=ignore, dockerfile_cmds=cmds, context_mount=context_mount, context_dir=context_dir
1676
+ ignore=ignore, dockerfile_cmds=cmds, context_dir=context_dir
1546
1677
  ),
1547
1678
  force_build=self.force_build or force_build,
1548
1679
  )
@@ -1551,7 +1682,7 @@ class _Image(_Object, type_prefix="im"):
1551
1682
  self,
1552
1683
  entrypoint_commands: list[str],
1553
1684
  ) -> "_Image":
1554
- """Set the entrypoint for the image."""
1685
+ """Set the ENTRYPOINT for the image."""
1555
1686
  if not isinstance(entrypoint_commands, list) or not all(isinstance(x, str) for x in entrypoint_commands):
1556
1687
  raise InvalidError("entrypoint_commands must be a list of strings.")
1557
1688
  args_str = _flatten_str_args("entrypoint", "entrypoint_commands", entrypoint_commands)
@@ -1576,11 +1707,18 @@ class _Image(_Object, type_prefix="im"):
1576
1707
  def run_commands(
1577
1708
  self,
1578
1709
  *commands: Union[str, list[str]],
1579
- secrets: Sequence[_Secret] = [],
1710
+ env: Optional[dict[str, Optional[str]]] = None,
1711
+ secrets: Optional[Collection[_Secret]] = None,
1712
+ volumes: Optional[dict[Union[str, PurePosixPath], _Volume]] = None,
1580
1713
  gpu: GPU_T = None,
1581
1714
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1582
1715
  ) -> "_Image":
1583
1716
  """Extend an image with a list of shell commands to run."""
1717
+
1718
+ secrets = secrets or []
1719
+ if env:
1720
+ secrets = [*secrets, _Secret.from_dict(env)]
1721
+
1584
1722
  cmds = _flatten_str_args("run_commands", "commands", commands)
1585
1723
  if not cmds:
1586
1724
  return self
@@ -1594,6 +1732,7 @@ class _Image(_Object, type_prefix="im"):
1594
1732
  secrets=secrets,
1595
1733
  gpu_config=parse_gpu_config(gpu),
1596
1734
  force_build=self.force_build or force_build,
1735
+ validated_volumes=validate_only_modal_volumes(volumes, "Image.run_commands"),
1597
1736
  )
1598
1737
 
1599
1738
  @staticmethod
@@ -1639,10 +1778,16 @@ class _Image(_Object, type_prefix="im"):
1639
1778
  # A list of Conda channels, eg. ["conda-forge", "nvidia"].
1640
1779
  channels: list[str] = [],
1641
1780
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1642
- secrets: Sequence[_Secret] = [],
1781
+ env: Optional[dict[str, Optional[str]]] = None,
1782
+ secrets: Optional[Collection[_Secret]] = None,
1643
1783
  gpu: GPU_T = None,
1644
1784
  ) -> "_Image":
1645
1785
  """Install a list of additional packages using micromamba."""
1786
+
1787
+ secrets = secrets or []
1788
+ if env:
1789
+ secrets = [*secrets, _Secret.from_dict(env)]
1790
+
1646
1791
  pkgs = _flatten_str_args("micromamba_install", "packages", packages)
1647
1792
  if not pkgs and spec_file is None:
1648
1793
  return self
@@ -1844,12 +1989,18 @@ class _Image(_Object, type_prefix="im"):
1844
1989
  ) -> "_Image":
1845
1990
  """Build a Modal image from a private image in AWS Elastic Container Registry (ECR).
1846
1991
 
1847
- You will need to pass a `modal.Secret` containing `AWS_ACCESS_KEY_ID`,
1848
- `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION` to access the target ECR registry.
1992
+ You will need to pass a `modal.Secret` containing either IAM user credentials or OIDC
1993
+ configuration to access the target ECR registry.
1994
+
1995
+ For IAM user authentication, set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION`.
1996
+
1997
+ For OIDC authentication, set `AWS_ROLE_ARN` and `AWS_REGION`.
1849
1998
 
1850
1999
  IAM configuration details can be found in the AWS documentation for
1851
2000
  ["Private repository policies"](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html).
1852
2001
 
2002
+ For more details on using an AWS role to access ECR, see the [OIDC integration guide](https://modal.com/docs/guide/oidc-integration).
2003
+
1853
2004
  See `Image.from_registry()` for information about the other parameters.
1854
2005
 
1855
2006
  **Example**
@@ -1881,10 +2032,10 @@ class _Image(_Object, type_prefix="im"):
1881
2032
  def from_dockerfile(
1882
2033
  path: Union[str, Path], # Filepath to Dockerfile.
1883
2034
  *,
1884
- context_mount: Optional[_Mount] = None, # Deprecated: the context is now inferred
1885
2035
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1886
2036
  context_dir: Optional[Union[Path, str]] = None, # Context for relative COPY commands
1887
- secrets: Sequence[_Secret] = [],
2037
+ env: Optional[dict[str, Optional[str]]] = None,
2038
+ secrets: Optional[Collection[_Secret]] = None,
1888
2039
  gpu: GPU_T = None,
1889
2040
  add_python: Optional[str] = None,
1890
2041
  build_args: dict[str, str] = {},
@@ -1939,12 +2090,10 @@ class _Image(_Object, type_prefix="im"):
1939
2090
  )
1940
2091
  ```
1941
2092
  """
1942
- if context_mount is not None:
1943
- deprecation_warning(
1944
- (2025, 1, 13),
1945
- "The `context_mount` parameter of `Image.from_dockerfile` is deprecated."
1946
- " Files are now automatically added to the build context based on the commands in the Dockerfile.",
1947
- )
2093
+
2094
+ secrets = secrets or []
2095
+ if env:
2096
+ secrets = [*secrets, _Secret.from_dict(env)]
1948
2097
 
1949
2098
  # --- Build the base dockerfile
1950
2099
 
@@ -1957,7 +2106,7 @@ class _Image(_Object, type_prefix="im"):
1957
2106
  base_image = _Image._from_args(
1958
2107
  dockerfile_function=build_dockerfile_base,
1959
2108
  context_mount_function=_create_context_mount_function(
1960
- ignore=ignore, dockerfile_path=Path(path), context_mount=context_mount, context_dir=context_dir
2109
+ ignore=ignore, dockerfile_path=Path(path), context_dir=context_dir
1961
2110
  ),
1962
2111
  gpu_config=gpu_config,
1963
2112
  secrets=secrets,
@@ -2053,7 +2202,8 @@ class _Image(_Object, type_prefix="im"):
2053
2202
  self,
2054
2203
  *packages: Union[str, list[str]], # A list of packages, e.g. ["ssh", "libpq-dev"]
2055
2204
  force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
2056
- secrets: Sequence[_Secret] = [],
2205
+ env: Optional[dict[str, Optional[str]]] = None,
2206
+ secrets: Optional[Collection[_Secret]] = None,
2057
2207
  gpu: GPU_T = None,
2058
2208
  ) -> "_Image":
2059
2209
  """Install a list of Debian packages using `apt`.
@@ -2078,6 +2228,10 @@ class _Image(_Object, type_prefix="im"):
2078
2228
  ]
2079
2229
  return DockerfileSpec(commands=commands, context_files={})
2080
2230
 
2231
+ secrets = secrets or []
2232
+ if env:
2233
+ secrets = [*secrets, _Secret.from_dict(env)]
2234
+
2081
2235
  return _Image._from_args(
2082
2236
  base_images={"base": self},
2083
2237
  dockerfile_function=build_dockerfile,
@@ -2090,24 +2244,26 @@ class _Image(_Object, type_prefix="im"):
2090
2244
  self,
2091
2245
  raw_f: Callable[..., Any],
2092
2246
  *,
2093
- secrets: Sequence[_Secret] = (), # Optional Modal Secret objects with environment variables for the container
2094
- gpu: Union[GPU_T, list[GPU_T]] = None, # Requested GPU or or list of acceptable GPUs( e.g. ["A10", "A100"])
2247
+ env: Optional[dict[str, Optional[str]]] = None, # Environment variables to set in the container
2248
+ secrets: Optional[Collection[_Secret]] = None, # Secrets to inject into the container as environment variables
2095
2249
  volumes: dict[Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]] = {}, # Volume mount paths
2096
2250
  network_file_systems: dict[Union[str, PurePosixPath], _NetworkFileSystem] = {}, # NFS mount paths
2251
+ gpu: Union[GPU_T, list[GPU_T]] = None, # Requested GPU or or list of acceptable GPUs( e.g. ["A10", "A100"])
2097
2252
  cpu: Optional[float] = None, # How many CPU cores to request. This is a soft limit.
2098
2253
  memory: Optional[int] = None, # How much memory to request, in MiB. This is a soft limit.
2099
- timeout: Optional[int] = 60 * 60, # Maximum execution time of the function in seconds.
2100
- force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
2254
+ timeout: int = 60 * 60, # Maximum execution time of the function in seconds.
2101
2255
  cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
2102
2256
  region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
2257
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
2103
2258
  args: Sequence[Any] = (), # Positional arguments to the function.
2104
2259
  kwargs: dict[str, Any] = {}, # Keyword arguments to the function.
2105
- include_source: Optional[bool] = None,
2260
+ include_source: bool = True, # Whether the builder container should have the Function's source added
2106
2261
  ) -> "_Image":
2107
- """Run user-defined function `raw_f` as an image build step. The function runs just like an ordinary Modal
2108
- function, and any kwargs accepted by `@app.function` (such as `Mount`s, `NetworkFileSystem`s,
2109
- and resource requests) can be supplied to it.
2110
- After it finishes execution, a snapshot of the resulting container file system is saved as an image.
2262
+ """Run user-defined function `raw_f` as an image build step.
2263
+
2264
+ The function runs like an ordinary Modal Function, accepting a resource configuration and integrating
2265
+ with Modal features like Secrets and Volumes. Unlike ordinary Modal Functions, any changes to the
2266
+ filesystem state will be captured on container exit and saved as a new Image.
2111
2267
 
2112
2268
  **Note**
2113
2269
 
@@ -2131,6 +2287,11 @@ class _Image(_Object, type_prefix="im"):
2131
2287
  )
2132
2288
  ```
2133
2289
  """
2290
+
2291
+ secrets = secrets or []
2292
+ if env:
2293
+ secrets = [*secrets, _Secret.from_dict(env)]
2294
+
2134
2295
  from ._functions import _Function
2135
2296
 
2136
2297
  if not callable(raw_f):
@@ -2160,13 +2321,19 @@ class _Image(_Object, type_prefix="im"):
2160
2321
  include_source=include_source,
2161
2322
  )
2162
2323
  if len(args) + len(kwargs) > 0:
2163
- args_serialized = serialize((args, kwargs))
2324
+ data_format = get_preferred_payload_format()
2325
+ args_serialized = serialize_data_format((args, kwargs), data_format)
2326
+
2164
2327
  if len(args_serialized) > MAX_OBJECT_SIZE_BYTES:
2165
2328
  raise InvalidError(
2166
2329
  f"Arguments to `run_function` are too large ({len(args_serialized)} bytes). "
2167
2330
  f"Maximum size is {MAX_OBJECT_SIZE_BYTES} bytes."
2168
2331
  )
2169
- build_function_input = api_pb2.FunctionInput(args=args_serialized, data_format=api_pb2.DATA_FORMAT_PICKLE)
2332
+
2333
+ build_function_input = api_pb2.FunctionInput(
2334
+ args=args_serialized,
2335
+ data_format=data_format,
2336
+ )
2170
2337
  else:
2171
2338
  build_function_input = None
2172
2339
  return _Image._from_args(
@@ -2226,7 +2393,9 @@ class _Image(_Object, type_prefix="im"):
2226
2393
  )
2227
2394
 
2228
2395
  def cmd(self, cmd: list[str]) -> "_Image":
2229
- """Set the default entrypoint argument (`CMD`) for the image.
2396
+ """Set the default command (`CMD`) to run when a container is started.
2397
+
2398
+ Used with `modal.Sandbox`. Has no effect on `modal.Function`.
2230
2399
 
2231
2400
  **Example**
2232
2401