modal 0.62.115__py3-none-any.whl → 0.72.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. modal/__init__.py +13 -9
  2. modal/__main__.py +41 -3
  3. modal/_clustered_functions.py +80 -0
  4. modal/_clustered_functions.pyi +22 -0
  5. modal/_container_entrypoint.py +402 -398
  6. modal/_ipython.py +3 -13
  7. modal/_location.py +17 -10
  8. modal/_output.py +243 -99
  9. modal/_pty.py +2 -2
  10. modal/_resolver.py +55 -60
  11. modal/_resources.py +26 -7
  12. modal/_runtime/__init__.py +1 -0
  13. modal/_runtime/asgi.py +519 -0
  14. modal/_runtime/container_io_manager.py +1025 -0
  15. modal/{execution_context.py → _runtime/execution_context.py} +11 -2
  16. modal/_runtime/telemetry.py +169 -0
  17. modal/_runtime/user_code_imports.py +356 -0
  18. modal/_serialization.py +123 -6
  19. modal/_traceback.py +47 -187
  20. modal/_tunnel.py +50 -14
  21. modal/_tunnel.pyi +19 -36
  22. modal/_utils/app_utils.py +3 -17
  23. modal/_utils/async_utils.py +386 -104
  24. modal/_utils/blob_utils.py +157 -186
  25. modal/_utils/bytes_io_segment_payload.py +97 -0
  26. modal/_utils/deprecation.py +89 -0
  27. modal/_utils/docker_utils.py +98 -0
  28. modal/_utils/function_utils.py +299 -98
  29. modal/_utils/grpc_testing.py +47 -34
  30. modal/_utils/grpc_utils.py +54 -21
  31. modal/_utils/hash_utils.py +51 -10
  32. modal/_utils/http_utils.py +39 -9
  33. modal/_utils/logger.py +2 -1
  34. modal/_utils/mount_utils.py +34 -16
  35. modal/_utils/name_utils.py +58 -0
  36. modal/_utils/package_utils.py +14 -1
  37. modal/_utils/pattern_utils.py +205 -0
  38. modal/_utils/rand_pb_testing.py +3 -3
  39. modal/_utils/shell_utils.py +15 -49
  40. modal/_vendor/a2wsgi_wsgi.py +62 -72
  41. modal/_vendor/cloudpickle.py +1 -1
  42. modal/_watcher.py +12 -10
  43. modal/app.py +561 -323
  44. modal/app.pyi +474 -262
  45. modal/call_graph.py +7 -6
  46. modal/cli/_download.py +22 -6
  47. modal/cli/_traceback.py +200 -0
  48. modal/cli/app.py +203 -42
  49. modal/cli/config.py +12 -5
  50. modal/cli/container.py +61 -13
  51. modal/cli/dict.py +128 -0
  52. modal/cli/entry_point.py +26 -13
  53. modal/cli/environment.py +40 -9
  54. modal/cli/import_refs.py +21 -48
  55. modal/cli/launch.py +28 -14
  56. modal/cli/network_file_system.py +57 -21
  57. modal/cli/profile.py +1 -1
  58. modal/cli/programs/run_jupyter.py +34 -9
  59. modal/cli/programs/vscode.py +58 -8
  60. modal/cli/queues.py +131 -0
  61. modal/cli/run.py +199 -96
  62. modal/cli/secret.py +5 -4
  63. modal/cli/token.py +7 -2
  64. modal/cli/utils.py +74 -8
  65. modal/cli/volume.py +97 -56
  66. modal/client.py +248 -144
  67. modal/client.pyi +156 -124
  68. modal/cloud_bucket_mount.py +43 -30
  69. modal/cloud_bucket_mount.pyi +32 -25
  70. modal/cls.py +528 -141
  71. modal/cls.pyi +189 -145
  72. modal/config.py +32 -15
  73. modal/container_process.py +177 -0
  74. modal/container_process.pyi +82 -0
  75. modal/dict.py +50 -54
  76. modal/dict.pyi +120 -164
  77. modal/environments.py +106 -5
  78. modal/environments.pyi +77 -25
  79. modal/exception.py +30 -43
  80. modal/experimental.py +62 -2
  81. modal/file_io.py +537 -0
  82. modal/file_io.pyi +235 -0
  83. modal/file_pattern_matcher.py +196 -0
  84. modal/functions.py +846 -428
  85. modal/functions.pyi +446 -387
  86. modal/gpu.py +57 -44
  87. modal/image.py +943 -417
  88. modal/image.pyi +584 -245
  89. modal/io_streams.py +434 -0
  90. modal/io_streams.pyi +122 -0
  91. modal/mount.py +223 -90
  92. modal/mount.pyi +241 -243
  93. modal/network_file_system.py +85 -86
  94. modal/network_file_system.pyi +151 -110
  95. modal/object.py +66 -36
  96. modal/object.pyi +166 -143
  97. modal/output.py +63 -0
  98. modal/parallel_map.py +73 -47
  99. modal/parallel_map.pyi +51 -63
  100. modal/partial_function.py +272 -107
  101. modal/partial_function.pyi +219 -120
  102. modal/proxy.py +15 -12
  103. modal/proxy.pyi +3 -8
  104. modal/queue.py +96 -72
  105. modal/queue.pyi +210 -135
  106. modal/requirements/2024.04.txt +2 -1
  107. modal/requirements/2024.10.txt +16 -0
  108. modal/requirements/README.md +21 -0
  109. modal/requirements/base-images.json +22 -0
  110. modal/retries.py +45 -4
  111. modal/runner.py +325 -203
  112. modal/runner.pyi +124 -110
  113. modal/running_app.py +27 -4
  114. modal/sandbox.py +509 -231
  115. modal/sandbox.pyi +396 -169
  116. modal/schedule.py +2 -2
  117. modal/scheduler_placement.py +20 -3
  118. modal/secret.py +41 -25
  119. modal/secret.pyi +62 -42
  120. modal/serving.py +39 -49
  121. modal/serving.pyi +37 -43
  122. modal/stream_type.py +15 -0
  123. modal/token_flow.py +5 -3
  124. modal/token_flow.pyi +37 -32
  125. modal/volume.py +123 -137
  126. modal/volume.pyi +228 -221
  127. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/METADATA +5 -5
  128. modal-0.72.13.dist-info/RECORD +174 -0
  129. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/top_level.txt +0 -1
  130. modal_docs/gen_reference_docs.py +3 -1
  131. modal_docs/mdmd/mdmd.py +0 -1
  132. modal_docs/mdmd/signatures.py +1 -2
  133. modal_global_objects/images/base_images.py +28 -0
  134. modal_global_objects/mounts/python_standalone.py +2 -2
  135. modal_proto/__init__.py +1 -1
  136. modal_proto/api.proto +1231 -531
  137. modal_proto/api_grpc.py +750 -430
  138. modal_proto/api_pb2.py +2102 -1176
  139. modal_proto/api_pb2.pyi +8859 -0
  140. modal_proto/api_pb2_grpc.py +1329 -675
  141. modal_proto/api_pb2_grpc.pyi +1416 -0
  142. modal_proto/modal_api_grpc.py +149 -0
  143. modal_proto/modal_options_grpc.py +3 -0
  144. modal_proto/options_pb2.pyi +20 -0
  145. modal_proto/options_pb2_grpc.pyi +7 -0
  146. modal_proto/py.typed +0 -0
  147. modal_version/__init__.py +1 -1
  148. modal_version/_version_generated.py +2 -2
  149. modal/_asgi.py +0 -370
  150. modal/_container_exec.py +0 -128
  151. modal/_container_io_manager.py +0 -646
  152. modal/_container_io_manager.pyi +0 -412
  153. modal/_sandbox_shell.py +0 -49
  154. modal/app_utils.py +0 -20
  155. modal/app_utils.pyi +0 -17
  156. modal/execution_context.pyi +0 -37
  157. modal/shared_volume.py +0 -23
  158. modal/shared_volume.pyi +0 -24
  159. modal-0.62.115.dist-info/RECORD +0 -207
  160. modal_global_objects/images/conda.py +0 -15
  161. modal_global_objects/images/debian_slim.py +0 -15
  162. modal_global_objects/images/micromamba.py +0 -15
  163. test/__init__.py +0 -1
  164. test/aio_test.py +0 -12
  165. test/async_utils_test.py +0 -279
  166. test/blob_test.py +0 -67
  167. test/cli_imports_test.py +0 -149
  168. test/cli_test.py +0 -674
  169. test/client_test.py +0 -203
  170. test/cloud_bucket_mount_test.py +0 -22
  171. test/cls_test.py +0 -636
  172. test/config_test.py +0 -149
  173. test/conftest.py +0 -1485
  174. test/container_app_test.py +0 -50
  175. test/container_test.py +0 -1405
  176. test/cpu_test.py +0 -23
  177. test/decorator_test.py +0 -85
  178. test/deprecation_test.py +0 -34
  179. test/dict_test.py +0 -51
  180. test/e2e_test.py +0 -68
  181. test/error_test.py +0 -7
  182. test/function_serialization_test.py +0 -32
  183. test/function_test.py +0 -791
  184. test/function_utils_test.py +0 -101
  185. test/gpu_test.py +0 -159
  186. test/grpc_utils_test.py +0 -82
  187. test/helpers.py +0 -47
  188. test/image_test.py +0 -814
  189. test/live_reload_test.py +0 -80
  190. test/lookup_test.py +0 -70
  191. test/mdmd_test.py +0 -329
  192. test/mount_test.py +0 -162
  193. test/mounted_files_test.py +0 -327
  194. test/network_file_system_test.py +0 -188
  195. test/notebook_test.py +0 -66
  196. test/object_test.py +0 -41
  197. test/package_utils_test.py +0 -25
  198. test/queue_test.py +0 -115
  199. test/resolver_test.py +0 -59
  200. test/retries_test.py +0 -67
  201. test/runner_test.py +0 -85
  202. test/sandbox_test.py +0 -191
  203. test/schedule_test.py +0 -15
  204. test/scheduler_placement_test.py +0 -57
  205. test/secret_test.py +0 -89
  206. test/serialization_test.py +0 -50
  207. test/stub_composition_test.py +0 -10
  208. test/stub_test.py +0 -361
  209. test/test_asgi_wrapper.py +0 -234
  210. test/token_flow_test.py +0 -18
  211. test/traceback_test.py +0 -135
  212. test/tunnel_test.py +0 -29
  213. test/utils_test.py +0 -88
  214. test/version_test.py +0 -14
  215. test/volume_test.py +0 -397
  216. test/watcher_test.py +0 -58
  217. test/webhook_test.py +0 -145
  218. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/LICENSE +0 -0
  219. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/WHEEL +0 -0
  220. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/entry_points.txt +0 -0
modal/image.py CHANGED
@@ -1,15 +1,25 @@
1
1
  # Copyright Modal Labs 2022
2
2
  import contextlib
3
+ import json
3
4
  import os
4
5
  import re
5
6
  import shlex
6
7
  import sys
7
8
  import typing
8
9
  import warnings
10
+ from collections.abc import Sequence
9
11
  from dataclasses import dataclass
10
12
  from inspect import isfunction
11
13
  from pathlib import Path, PurePosixPath
12
- from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, get_args
14
+ from typing import (
15
+ Any,
16
+ Callable,
17
+ Literal,
18
+ Optional,
19
+ Union,
20
+ cast,
21
+ get_args,
22
+ )
13
23
 
14
24
  from google.protobuf.message import Message
15
25
  from grpclib.exceptions import GRPCError, StreamTerminatedError
@@ -20,99 +30,118 @@ from ._resolver import Resolver
20
30
  from ._serialization import serialize
21
31
  from ._utils.async_utils import synchronize_api
22
32
  from ._utils.blob_utils import MAX_OBJECT_SIZE_BYTES
33
+ from ._utils.deprecation import deprecation_error, deprecation_warning
34
+ from ._utils.docker_utils import (
35
+ extract_copy_command_patterns,
36
+ find_dockerignore_file,
37
+ )
23
38
  from ._utils.function_utils import FunctionInfo
24
- from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, retry_transient_errors, unary_stream
39
+ from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, retry_transient_errors
40
+ from .client import _Client
41
+ from .cloud_bucket_mount import _CloudBucketMount
25
42
  from .config import config, logger, user_config_path
26
- from .exception import InvalidError, NotFoundError, RemoteError, VersionError, deprecation_error, deprecation_warning
43
+ from .environments import _get_environment_cached
44
+ from .exception import InvalidError, NotFoundError, RemoteError, VersionError
45
+ from .file_pattern_matcher import NON_PYTHON_FILES, FilePatternMatcher, _ignore_fn
27
46
  from .gpu import GPU_T, parse_gpu_config
28
47
  from .mount import _Mount, python_standalone_mount_name
29
48
  from .network_file_system import _NetworkFileSystem
30
- from .object import _Object
49
+ from .object import _Object, live_method_gen
50
+ from .output import _get_output_manager
51
+ from .scheduler_placement import SchedulerPlacement
31
52
  from .secret import _Secret
53
+ from .volume import _Volume
32
54
 
33
55
  if typing.TYPE_CHECKING:
34
56
  import modal.functions
35
57
 
36
-
37
58
  # This is used for both type checking and runtime validation
38
- ImageBuilderVersion = Literal["2023.12", "2024.04"]
59
+ ImageBuilderVersion = Literal["2023.12", "2024.04", "2024.10"]
39
60
 
40
61
  # Note: we also define supported Python versions via logic at the top of the package __init__.py
41
62
  # so that we fail fast / clearly in unsupported containers. Additionally, we enumerate the supported
42
63
  # Python versions in mount.py where we specify the "standalone Python versions" we create mounts for.
43
64
  # Consider consolidating these multiple sources of truth?
44
- SUPPORTED_PYTHON_SERIES: Set[str] = {"3.8", "3.9", "3.10", "3.11", "3.12"}
65
+ SUPPORTED_PYTHON_SERIES: dict[ImageBuilderVersion, list[str]] = {
66
+ "2024.10": ["3.9", "3.10", "3.11", "3.12", "3.13"],
67
+ "2024.04": ["3.9", "3.10", "3.11", "3.12"],
68
+ "2023.12": ["3.9", "3.10", "3.11", "3.12"],
69
+ }
45
70
 
71
+ LOCAL_REQUIREMENTS_DIR = Path(__file__).parent / "requirements"
46
72
  CONTAINER_REQUIREMENTS_PATH = "/modal_requirements.txt"
47
73
 
48
74
 
49
- def _validate_python_version(version: Optional[str], allow_micro_granularity: bool = True) -> str:
50
- if version is None:
75
+ class _AutoDockerIgnoreSentinel:
76
+ def __repr__(self) -> str:
77
+ return f"{__name__}.AUTO_DOCKERIGNORE"
78
+
79
+ def __call__(self, _: Path) -> bool:
80
+ raise NotImplementedError("This is only a placeholder. Do not call")
81
+
82
+
83
+ AUTO_DOCKERIGNORE = _AutoDockerIgnoreSentinel()
84
+
85
+ COPY_DEPRECATION_MESSAGE_PATTERN = """modal.Image.copy_* methods will soon be deprecated.
86
+
87
+ Use {replacement} instead, which is functionally and performance-wise equivalent.
88
+ """
89
+
90
+
91
+ def _validate_python_version(
92
+ python_version: Optional[str], builder_version: ImageBuilderVersion, allow_micro_granularity: bool = True
93
+ ) -> str:
94
+ if python_version is None:
51
95
  # If Python version is unspecified, match the local version, up to the minor component
52
- version = series_version = "{0}.{1}".format(*sys.version_info)
53
- elif not isinstance(version, str):
54
- raise InvalidError(f"Python version must be specified as a string, not {type(version).__name__}")
55
- elif not re.match(r"^3(?:\.\d{1,2}){1,2}$", version):
56
- raise InvalidError(f"Invalid Python version: {version!r}")
96
+ python_version = series_version = "{}.{}".format(*sys.version_info)
97
+ elif not isinstance(python_version, str):
98
+ raise InvalidError(f"Python version must be specified as a string, not {type(python_version).__name__}")
99
+ elif not re.match(r"^3(?:\.\d{1,2}){1,2}(rc\d*)?$", python_version):
100
+ raise InvalidError(f"Invalid Python version: {python_version!r}")
57
101
  else:
58
- components = version.split(".")
102
+ components = python_version.split(".")
59
103
  if len(components) == 3 and not allow_micro_granularity:
60
104
  raise InvalidError(
61
105
  "Python version must be specified as 'major.minor' for this interface;"
62
- f" micro-level specification ({version!r}) is not valid."
106
+ f" micro-level specification ({python_version!r}) is not valid."
63
107
  )
64
- series_version = "{0}.{1}".format(*components)
108
+ series_version = "{}.{}".format(*components)
65
109
 
66
- if series_version not in SUPPORTED_PYTHON_SERIES:
110
+ supported_series = SUPPORTED_PYTHON_SERIES[builder_version]
111
+ if series_version not in supported_series:
67
112
  raise InvalidError(
68
- f"Unsupported Python version: {version!r}."
69
- f" Modal supports versions in the following series: {SUPPORTED_PYTHON_SERIES!r}."
113
+ f"Unsupported Python version: {python_version!r}."
114
+ f" When using the {builder_version!r} Image builder, Modal supports the following series:"
115
+ f" {supported_series!r}."
70
116
  )
71
- return version
117
+ return python_version
72
118
 
73
119
 
74
120
  def _dockerhub_python_version(builder_version: ImageBuilderVersion, python_version: Optional[str] = None) -> str:
75
- python_version = _validate_python_version(python_version)
76
- components = python_version.split(".")
121
+ python_version = _validate_python_version(python_version, builder_version)
122
+ version_components = python_version.split(".")
77
123
 
78
124
  # When user specifies a full Python version, use that
79
- if len(components) > 2:
125
+ if len(version_components) > 2:
80
126
  return python_version
81
127
 
82
128
  # Otherwise, use the same series, but a specific micro version, corresponding to the latest
83
129
  # available from https://hub.docker.com/_/python at the time of each image builder release.
84
- latest_micro_version = {
85
- "2023.12": {
86
- "3.12": "1",
87
- "3.11": "0",
88
- "3.10": "8",
89
- "3.9": "15",
90
- "3.8": "15",
91
- },
92
- "2024.04": {
93
- "3.12": "2",
94
- "3.11": "8",
95
- "3.10": "14",
96
- "3.9": "19",
97
- "3.8": "19",
98
- },
99
- }
100
- python_series = "{0}.{1}".format(*components)
101
- micro_version = latest_micro_version[builder_version][python_series]
102
- python_version = f"{python_series}.{micro_version}"
103
- return python_version
130
+ # This allows us to publish one pre-built debian-slim image per Python series.
131
+ python_versions = _base_image_config("python", builder_version)
132
+ series_to_micro_version = dict(tuple(v.rsplit(".", 1)) for v in python_versions)
133
+ python_series_requested = "{}.{}".format(*version_components)
134
+ micro_version = series_to_micro_version[python_series_requested]
135
+ return f"{python_series_requested}.{micro_version}"
104
136
 
105
137
 
106
- def _dockerhub_debian_codename(builder_version: ImageBuilderVersion) -> str:
107
- return {"2023.12": "bullseye", "2024.04": "bookworm"}[builder_version]
138
+ def _base_image_config(group: str, builder_version: ImageBuilderVersion) -> Any:
139
+ with open(LOCAL_REQUIREMENTS_DIR / "base-images.json") as f:
140
+ data = json.load(f)
141
+ return data[group][builder_version]
108
142
 
109
143
 
110
144
  def _get_modal_requirements_path(builder_version: ImageBuilderVersion, python_version: Optional[str] = None) -> str:
111
- # Locate Modal client requirements data
112
- import modal
113
-
114
- modal_path = Path(modal.__path__[0])
115
-
116
145
  # When we added Python 3.12 support, we needed to update a few dependencies but did not yet
117
146
  # support versioned builds, so we put them in a separate 3.12-specific requirements file.
118
147
  # When the python_version is not specified in the Image API, we fall back to the local version.
@@ -122,20 +151,22 @@ def _get_modal_requirements_path(builder_version: ImageBuilderVersion, python_ve
122
151
  python_version = python_version or sys.version
123
152
  suffix = ".312" if builder_version == "2023.12" and python_version.startswith("3.12") else ""
124
153
 
125
- return str(modal_path / "requirements" / f"{builder_version}{suffix}.txt")
154
+ return str(LOCAL_REQUIREMENTS_DIR / f"{builder_version}{suffix}.txt")
126
155
 
127
156
 
128
157
  def _get_modal_requirements_command(version: ImageBuilderVersion) -> str:
129
- command = "pip install"
130
- if version <= "2023.12":
131
- args = f"-r {CONTAINER_REQUIREMENTS_PATH}"
132
- else:
133
- args = f"--no-cache --no-deps -r {CONTAINER_REQUIREMENTS_PATH}"
134
- return f"{command} {args}"
158
+ if version == "2023.12":
159
+ prefix = "pip install"
160
+ elif version == "2024.04":
161
+ prefix = "pip install --no-cache --no-deps"
162
+ else: # Currently, 2024.10+
163
+ prefix = "uv pip install --system --compile-bytecode --no-cache --no-deps"
135
164
 
165
+ return f"{prefix} -r {CONTAINER_REQUIREMENTS_PATH}"
136
166
 
137
- def _flatten_str_args(function_name: str, arg_name: str, args: Tuple[Union[str, List[str]], ...]) -> List[str]:
138
- """Takes a tuple of strings, or string lists, and flattens it.
167
+
168
+ def _flatten_str_args(function_name: str, arg_name: str, args: Sequence[Union[str, list[str]]]) -> list[str]:
169
+ """Takes a sequence of strings, or string lists, and flattens it.
139
170
 
140
171
  Raises an error if any of the elements are not strings or string lists.
141
172
  """
@@ -143,7 +174,7 @@ def _flatten_str_args(function_name: str, arg_name: str, args: Tuple[Union[str,
143
174
  def is_str_list(x):
144
175
  return isinstance(x, list) and all(isinstance(y, str) for y in x)
145
176
 
146
- ret: List[str] = []
177
+ ret: list[str] = []
147
178
  for x in args:
148
179
  if isinstance(x, str):
149
180
  ret.append(x)
@@ -154,11 +185,29 @@ def _flatten_str_args(function_name: str, arg_name: str, args: Tuple[Union[str,
154
185
  return ret
155
186
 
156
187
 
188
+ def _validate_packages(packages: list[str]) -> bool:
189
+ """Validates that a list of packages does not contain any command-line options."""
190
+ return not any(pkg.startswith("-") for pkg in packages)
191
+
192
+
193
+ def _warn_invalid_packages(old_command: str) -> None:
194
+ deprecation_warning(
195
+ (2024, 7, 3),
196
+ "Passing flags to `pip` via the `packages` argument of `pip_install` is deprecated."
197
+ " Please pass flags via the `extra_options` argument instead."
198
+ "\nNote that this will cause a rebuild of this image layer."
199
+ " To avoid rebuilding, you can pass the following to `run_commands` instead:"
200
+ f'\n`image.run_commands("{old_command}")`',
201
+ show_source=False,
202
+ )
203
+
204
+
157
205
  def _make_pip_install_args(
158
206
  find_links: Optional[str] = None, # Passes -f (--find-links) pip install
159
207
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
160
208
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
161
209
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
210
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
162
211
  ) -> str:
163
212
  flags = [
164
213
  ("--find-links", find_links), # TODO(erikbern): allow multiple?
@@ -168,25 +217,30 @@ def _make_pip_install_args(
168
217
 
169
218
  args = " ".join(f"{flag} {shlex.quote(value)}" for flag, value in flags if value is not None)
170
219
  if pre:
171
- args += " --pre"
220
+ args += " --pre" # TODO: remove extra whitespace in future image builder version
221
+
222
+ if extra_options:
223
+ if args:
224
+ args += " "
225
+ args += f"{extra_options}"
172
226
 
173
227
  return args
174
228
 
175
229
 
176
- def _get_image_builder_version(client_version: str) -> ImageBuilderVersion:
177
- if config_version := config.get("image_builder_version"):
178
- version = config_version
230
+ def _get_image_builder_version(server_version: ImageBuilderVersion) -> ImageBuilderVersion:
231
+ if local_config_version := config.get("image_builder_version"):
232
+ version = local_config_version
179
233
  if (env_var := "MODAL_IMAGE_BUILDER_VERSION") in os.environ:
180
234
  version_source = f" (based on your `{env_var}` environment variable)"
181
235
  else:
182
236
  version_source = f" (based on your local config file at `{user_config_path}`)"
183
237
  else:
184
- version = client_version
185
238
  version_source = ""
239
+ version = server_version
186
240
 
187
- supported_versions: Set[ImageBuilderVersion] = set(get_args(ImageBuilderVersion))
241
+ supported_versions: set[ImageBuilderVersion] = set(get_args(ImageBuilderVersion))
188
242
  if version not in supported_versions:
189
- if config_version is not None:
243
+ if local_config_version is not None:
190
244
  update_suggestion = "or remove your local configuration"
191
245
  elif version < min(supported_versions):
192
246
  update_suggestion = "your image builder version using the Modal dashboard"
@@ -202,13 +256,81 @@ def _get_image_builder_version(client_version: str) -> ImageBuilderVersion:
202
256
  return version
203
257
 
204
258
 
259
+ def _create_context_mount(
260
+ docker_commands: Sequence[str],
261
+ ignore_fn: Callable[[Path], bool],
262
+ context_dir: Path,
263
+ ) -> Optional[_Mount]:
264
+ """
265
+ Creates a context mount from a list of docker commands.
266
+
267
+ 1. Paths are evaluated relative to context_dir.
268
+ 2. First selects inclusions based on COPY commands in the list of commands.
269
+ 3. Then ignore any files as per the ignore predicate.
270
+ """
271
+ copy_patterns = extract_copy_command_patterns(docker_commands)
272
+ if not copy_patterns:
273
+ return None # no mount needed
274
+ include_fn = FilePatternMatcher(*copy_patterns)
275
+
276
+ def ignore_with_include(source: Path) -> bool:
277
+ relative_source = source.relative_to(context_dir)
278
+ if not include_fn(relative_source) or ignore_fn(relative_source):
279
+ return True
280
+
281
+ return False
282
+
283
+ return _Mount._add_local_dir(Path("./"), PurePosixPath("/"), ignore=ignore_with_include)
284
+
285
+
286
+ def _create_context_mount_function(
287
+ ignore: Union[Sequence[str], Callable[[Path], bool]],
288
+ dockerfile_cmds: list[str] = [],
289
+ dockerfile_path: Optional[Path] = None,
290
+ context_mount: Optional[_Mount] = None,
291
+ ):
292
+ if dockerfile_path and dockerfile_cmds:
293
+ raise InvalidError("Cannot provide both dockerfile and docker commands")
294
+
295
+ if context_mount:
296
+ if ignore is not AUTO_DOCKERIGNORE:
297
+ raise InvalidError("Cannot set both `context_mount` and `ignore`")
298
+
299
+ def identity_context_mount_fn() -> Optional[_Mount]:
300
+ return context_mount
301
+
302
+ return identity_context_mount_fn
303
+ elif ignore is AUTO_DOCKERIGNORE:
304
+
305
+ def auto_created_context_mount_fn() -> Optional[_Mount]:
306
+ context_dir = Path.cwd()
307
+ dockerignore_file = find_dockerignore_file(context_dir, dockerfile_path)
308
+ ignore_fn = (
309
+ FilePatternMatcher(*dockerignore_file.read_text("utf8").splitlines())
310
+ if dockerignore_file
311
+ else _ignore_fn(())
312
+ )
313
+
314
+ cmds = dockerfile_path.read_text("utf8").splitlines() if dockerfile_path else dockerfile_cmds
315
+ return _create_context_mount(cmds, ignore_fn=ignore_fn, context_dir=context_dir)
316
+
317
+ return auto_created_context_mount_fn
318
+
319
+ def auto_created_context_mount_fn() -> Optional[_Mount]:
320
+ # use COPY commands and ignore patterns to construct implicit context mount
321
+ cmds = dockerfile_path.read_text("utf8").splitlines() if dockerfile_path else dockerfile_cmds
322
+ return _create_context_mount(cmds, ignore_fn=_ignore_fn(ignore), context_dir=Path.cwd())
323
+
324
+ return auto_created_context_mount_fn
325
+
326
+
205
327
  class _ImageRegistryConfig:
206
328
  """mdmd:hidden"""
207
329
 
208
330
  def __init__(
209
331
  self,
210
332
  # TODO: change to _PUBLIC after worker starts handling it.
211
- registry_auth_type: int = api_pb2.REGISTRY_AUTH_TYPE_UNSPECIFIED,
333
+ registry_auth_type: "api_pb2.RegistryAuthType.ValueType" = api_pb2.REGISTRY_AUTH_TYPE_UNSPECIFIED,
212
334
  secret: Optional[_Secret] = None,
213
335
  ):
214
336
  self.registry_auth_type = registry_auth_type
@@ -217,53 +339,160 @@ class _ImageRegistryConfig:
217
339
  def get_proto(self) -> api_pb2.ImageRegistryConfig:
218
340
  return api_pb2.ImageRegistryConfig(
219
341
  registry_auth_type=self.registry_auth_type,
220
- secret_id=(self.secret.object_id if self.secret else None),
342
+ secret_id=(self.secret.object_id if self.secret else ""),
221
343
  )
222
344
 
223
345
 
224
346
  @dataclass
225
347
  class DockerfileSpec:
226
348
  # Ideally we would use field() with default_factory=, but doesn't work with synchronicity type-stub gen
227
- commands: List[str]
228
- context_files: Dict[str, str]
349
+ commands: list[str]
350
+ context_files: dict[str, str]
351
+
352
+
353
+ async def _image_await_build_result(image_id: str, client: _Client) -> api_pb2.ImageJoinStreamingResponse:
354
+ last_entry_id: str = ""
355
+ result_response: Optional[api_pb2.ImageJoinStreamingResponse] = None
356
+
357
+ async def join():
358
+ nonlocal last_entry_id, result_response
359
+
360
+ request = api_pb2.ImageJoinStreamingRequest(image_id=image_id, timeout=55, last_entry_id=last_entry_id)
361
+ async for response in client.stub.ImageJoinStreaming.unary_stream(request):
362
+ if response.entry_id:
363
+ last_entry_id = response.entry_id
364
+ if response.result.status:
365
+ result_response = response
366
+ # can't return yet, since there may still be logs streaming back in subsequent responses
367
+ for task_log in response.task_logs:
368
+ if task_log.task_progress.pos or task_log.task_progress.len:
369
+ assert task_log.task_progress.progress_type == api_pb2.IMAGE_SNAPSHOT_UPLOAD
370
+ if output_mgr := _get_output_manager():
371
+ output_mgr.update_snapshot_progress(image_id, task_log.task_progress)
372
+ elif task_log.data:
373
+ if output_mgr := _get_output_manager():
374
+ await output_mgr.put_log_content(task_log)
375
+ if output_mgr := _get_output_manager():
376
+ output_mgr.flush_lines()
377
+
378
+ # Handle up to n exceptions while fetching logs
379
+ retry_count = 0
380
+ while result_response is None:
381
+ try:
382
+ await join()
383
+ except (StreamTerminatedError, GRPCError) as exc:
384
+ if isinstance(exc, GRPCError) and exc.status not in RETRYABLE_GRPC_STATUS_CODES:
385
+ raise exc
386
+ retry_count += 1
387
+ if retry_count >= 3:
388
+ raise exc
389
+ return result_response
229
390
 
230
391
 
231
392
  class _Image(_Object, type_prefix="im"):
232
393
  """Base class for container images to run functions in.
233
394
 
234
395
  Do not construct this class directly; instead use one of its static factory methods,
235
- such as `modal.Image.debian_slim`, `modal.Image.from_registry`, or `modal.Image.conda`.
396
+ such as `modal.Image.debian_slim`, `modal.Image.from_registry`, or `modal.Image.micromamba`.
236
397
  """
237
398
 
238
399
  force_build: bool
239
- inside_exceptions: List[Exception]
400
+ inside_exceptions: list[Exception]
401
+ _serve_mounts: frozenset[_Mount] # used for mounts watching in `modal serve`
402
+ _deferred_mounts: Sequence[
403
+ _Mount
404
+ ] # added as mounts on any container referencing the Image, see `def _mount_layers`
405
+ _metadata: Optional[api_pb2.ImageMetadata] = None # set on hydration, private for now
240
406
 
241
407
  def _initialize_from_empty(self):
242
408
  self.inside_exceptions = []
243
-
244
- def _hydrate_metadata(self, message: Optional[Message]):
245
- env_image_id = config.get("image_id")
409
+ self._serve_mounts = frozenset()
410
+ self._deferred_mounts = ()
411
+ self.force_build = False
412
+
413
+ def _initialize_from_other(self, other: "_Image"):
414
+ # used by .clone()
415
+ self.inside_exceptions = other.inside_exceptions
416
+ self.force_build = other.force_build
417
+ self._serve_mounts = other._serve_mounts
418
+ self._deferred_mounts = other._deferred_mounts
419
+
420
+ def _hydrate_metadata(self, metadata: Optional[Message]):
421
+ env_image_id = config.get("image_id") # set as an env var in containers
246
422
  if env_image_id == self.object_id:
247
423
  for exc in self.inside_exceptions:
424
+ # This raises exceptions from `with image.imports()` blocks
425
+ # if the hydrated image is the one used by the container
248
426
  raise exc
249
427
 
428
+ if metadata:
429
+ assert isinstance(metadata, api_pb2.ImageMetadata)
430
+ self._metadata = metadata
431
+
432
+ def _add_mount_layer_or_copy(self, mount: _Mount, copy: bool = False):
433
+ if copy:
434
+ return self.copy_mount(mount, remote_path="/")
435
+
436
+ base_image = self
437
+
438
+ async def _load(self2: "_Image", resolver: Resolver, existing_object_id: Optional[str]):
439
+ self2._hydrate_from_other(base_image) # same image id as base image as long as it's lazy
440
+ self2._deferred_mounts = tuple(base_image._deferred_mounts) + (mount,)
441
+ self2._serve_mounts = base_image._serve_mounts | ({mount} if mount.is_local() else set())
442
+
443
+ return _Image._from_loader(_load, "Image(local files)", deps=lambda: [base_image, mount])
444
+
445
+ @property
446
+ def _mount_layers(self) -> typing.Sequence[_Mount]:
447
+ """Non-evaluated mount layers on the image
448
+
449
+ When the image is used by a Modal container, these mounts need to be attached as well to
450
+ represent the full image content, as they haven't yet been represented as a layer in the
451
+ image.
452
+
453
+ When the image is used as a base image for a new layer (that is not itself a mount layer)
454
+ these mounts need to first be inserted as a copy operation (.copy_mount) into the image.
455
+ """
456
+ return self._deferred_mounts
457
+
458
+ def _assert_no_mount_layers(self):
459
+ if self._mount_layers:
460
+ raise InvalidError(
461
+ "An image tried to run a build step after using `image.add_local_*` to include local files.\n"
462
+ "\n"
463
+ "Run `image.add_local_*` commands last in your image build to avoid rebuilding images with every local "
464
+ "file change. Modal will then add these files to containers on startup instead, saving build time.\n"
465
+ "If you need to run other build steps after adding local files, set `copy=True` to copy the files "
466
+ "directly into the image, at the expense of some added build time.\n"
467
+ "\n"
468
+ "Example:\n"
469
+ "\n"
470
+ "my_image = (\n"
471
+ " Image.debian_slim()\n"
472
+ ' .add_local_file("data.json", copy=True)\n'
473
+ ' .run_commands("python -m mypak") # this now works!\n'
474
+ ")\n"
475
+ )
476
+
250
477
  @staticmethod
251
478
  def _from_args(
252
479
  *,
253
- base_images: Optional[Dict[str, "_Image"]] = None,
480
+ base_images: Optional[dict[str, "_Image"]] = None,
254
481
  dockerfile_function: Optional[Callable[[ImageBuilderVersion], DockerfileSpec]] = None,
255
482
  secrets: Optional[Sequence[_Secret]] = None,
256
483
  gpu_config: Optional[api_pb2.GPUConfig] = None,
257
484
  build_function: Optional["modal.functions._Function"] = None,
258
485
  build_function_input: Optional[api_pb2.FunctionInput] = None,
259
486
  image_registry_config: Optional[_ImageRegistryConfig] = None,
260
- context_mount: Optional[_Mount] = None,
487
+ context_mount_function: Optional[Callable[[], Optional[_Mount]]] = None,
261
488
  force_build: bool = False,
262
489
  # For internal use only.
263
- _namespace: int = api_pb2.DEPLOYMENT_NAMESPACE_WORKSPACE,
490
+ _namespace: "api_pb2.DeploymentNamespace.ValueType" = api_pb2.DEPLOYMENT_NAMESPACE_WORKSPACE,
491
+ _do_assert_no_mount_layers: bool = True,
264
492
  ):
265
493
  if base_images is None:
266
494
  base_images = {}
495
+
267
496
  if secrets is None:
268
497
  secrets = []
269
498
  if gpu_config is None:
@@ -278,18 +507,29 @@ class _Image(_Object, type_prefix="im"):
278
507
  if build_function and len(base_images) != 1:
279
508
  raise InvalidError("Cannot run a build function with multiple base images!")
280
509
 
281
- def _deps() -> List[_Object]:
282
- deps: List[_Object] = list(base_images.values()) + list(secrets)
510
+ def _deps() -> Sequence[_Object]:
511
+ deps = tuple(base_images.values()) + tuple(secrets)
283
512
  if build_function:
284
- deps.append(build_function)
285
- if context_mount:
286
- deps.append(context_mount)
287
- if image_registry_config.secret:
288
- deps.append(image_registry_config.secret)
513
+ deps += (build_function,)
514
+ if image_registry_config and image_registry_config.secret:
515
+ deps += (image_registry_config.secret,)
289
516
  return deps
290
517
 
291
518
  async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
292
- builder_version = _get_image_builder_version(resolver.client.image_builder_version)
519
+ context_mount = context_mount_function() if context_mount_function else None
520
+ if context_mount:
521
+ await resolver.load(context_mount)
522
+
523
+ if _do_assert_no_mount_layers:
524
+ for image in base_images.values():
525
+ # base images can't have
526
+ image._assert_no_mount_layers()
527
+
528
+ assert resolver.app_id # type narrowing
529
+ environment = await _get_environment_cached(resolver.environment_name or "", resolver.client)
530
+ # A bit hacky,but assume that the environment provides a valid builder version
531
+ image_builder_version = cast(ImageBuilderVersion, environment._settings.image_builder_version)
532
+ builder_version = _get_image_builder_version(image_builder_version)
293
533
 
294
534
  if dockerfile_function is None:
295
535
  dockerfile = DockerfileSpec(commands=[], context_files={})
@@ -301,7 +541,9 @@ class _Image(_Object, type_prefix="im"):
301
541
  "No commands were provided for the image — have you tried using modal.Image.debian_slim()?"
302
542
  )
303
543
  if dockerfile.commands and build_function:
304
- raise InvalidError("Cannot provide both a build function and Dockerfile commands!")
544
+ raise InvalidError(
545
+ "Cannot provide both build function and Dockerfile commands in the same image layer!"
546
+ )
305
547
 
306
548
  base_images_pb2s = [
307
549
  api_pb2.BaseImage(
@@ -318,8 +560,9 @@ class _Image(_Object, type_prefix="im"):
318
560
 
319
561
  if build_function:
320
562
  build_function_id = build_function.object_id
321
-
322
563
  globals = build_function._get_info().get_globals()
564
+ attrs = build_function._get_info().get_cls_var_attrs()
565
+ globals = {**globals, **attrs}
323
566
  filtered_globals = {}
324
567
  for k, v in globals.items():
325
568
  if isfunction(v):
@@ -329,21 +572,23 @@ class _Image(_Object, type_prefix="im"):
329
572
  except Exception:
330
573
  # Skip unserializable values for now.
331
574
  logger.warning(
332
- f"Skipping unserializable global variable {k} for {build_function._get_info().function_name}. Changes to this variable won't invalidate the image."
575
+ f"Skipping unserializable global variable {k} for "
576
+ f"{build_function._get_info().function_name}. "
577
+ "Changes to this variable won't invalidate the image."
333
578
  )
334
579
  continue
335
580
  filtered_globals[k] = v
336
581
 
337
582
  # Cloudpickle function serialization produces unstable values.
338
583
  # TODO: better way to filter out types that don't have a stable hash?
339
- build_function_globals = serialize(filtered_globals) if filtered_globals else None
584
+ build_function_globals = serialize(filtered_globals) if filtered_globals else b""
340
585
  _build_function = api_pb2.BuildFunction(
341
586
  definition=build_function.get_build_def(),
342
587
  globals=build_function_globals,
343
588
  input=build_function_input,
344
589
  )
345
590
  else:
346
- build_function_id = None
591
+ build_function_id = ""
347
592
  _build_function = None
348
593
 
349
594
  image_definition = api_pb2.Image(
@@ -352,7 +597,7 @@ class _Image(_Object, type_prefix="im"):
352
597
  context_files=context_file_pb2s,
353
598
  secret_ids=[secret.object_id for secret in secrets],
354
599
  gpu=bool(gpu_config.type), # Note: as of 2023-01-27, server still uses this
355
- context_mount_id=(context_mount.object_id if context_mount else None),
600
+ context_mount_id=(context_mount.object_id if context_mount else ""),
356
601
  gpu_config=gpu_config, # Note: as of 2023-01-27, server ignores this
357
602
  image_registry_config=image_registry_config.get_proto(),
358
603
  runtime=config.get("function_runtime"),
@@ -363,47 +608,32 @@ class _Image(_Object, type_prefix="im"):
363
608
  req = api_pb2.ImageGetOrCreateRequest(
364
609
  app_id=resolver.app_id,
365
610
  image=image_definition,
366
- existing_image_id=existing_object_id, # TODO: ignored
611
+ existing_image_id=existing_object_id or "", # TODO: ignored
367
612
  build_function_id=build_function_id,
368
613
  force_build=config.get("force_build") or force_build,
369
614
  namespace=_namespace,
370
615
  builder_version=builder_version,
616
+ # Failsafe mechanism to prevent inadvertant updates to the global images.
617
+ # Only admins can publish to the global namespace, but they have to additionally request it.
618
+ allow_global_deployment=os.environ.get("MODAL_IMAGE_ALLOW_GLOBAL_DEPLOYMENT", "0") == "1",
371
619
  )
372
620
  resp = await retry_transient_errors(resolver.client.stub.ImageGetOrCreate, req)
373
621
  image_id = resp.image_id
374
-
375
- logger.debug("Waiting for image %s" % image_id)
376
- last_entry_id: Optional[str] = None
377
- result: Optional[api_pb2.GenericResult] = None
378
-
379
- async def join():
380
- nonlocal last_entry_id, result
381
-
382
- request = api_pb2.ImageJoinStreamingRequest(image_id=image_id, timeout=55, last_entry_id=last_entry_id)
383
- async for response in unary_stream(resolver.client.stub.ImageJoinStreaming, request):
384
- if response.entry_id:
385
- last_entry_id = response.entry_id
386
- if response.result.status:
387
- result = response.result
388
- for task_log in response.task_logs:
389
- if task_log.task_progress.pos or task_log.task_progress.len:
390
- assert task_log.task_progress.progress_type == api_pb2.IMAGE_SNAPSHOT_UPLOAD
391
- resolver.image_snapshot_update(image_id, task_log.task_progress)
392
- elif task_log.data:
393
- await resolver.console_write(task_log)
394
- resolver.console_flush()
395
-
396
- # Handle up to n exceptions while fetching logs
397
- retry_count = 0
398
- while result is None:
399
- try:
400
- await join()
401
- except (StreamTerminatedError, GRPCError) as exc:
402
- if isinstance(exc, GRPCError) and exc.status not in RETRYABLE_GRPC_STATUS_CODES:
403
- raise exc
404
- retry_count += 1
405
- if retry_count >= 3:
406
- raise exc
622
+ result: api_pb2.GenericResult
623
+ metadata: Optional[api_pb2.ImageMetadata] = None
624
+
625
+ if resp.result.status:
626
+ # image already built
627
+ result = resp.result
628
+ if resp.HasField("metadata"):
629
+ metadata = resp.metadata
630
+ else:
631
+ # not built or in the process of building - wait for build
632
+ logger.debug("Waiting for image %s" % image_id)
633
+ resp = await _image_await_build_result(image_id, resolver.client)
634
+ result = resp.result
635
+ if resp.HasField("metadata"):
636
+ metadata = resp.metadata
407
637
 
408
638
  if result.status == api_pb2.GenericResult.GENERIC_STATUS_FAILURE:
409
639
  raise RemoteError(f"Image build for {image_id} failed with the exception:\n{result.exception}")
@@ -418,28 +648,19 @@ class _Image(_Object, type_prefix="im"):
418
648
  else:
419
649
  raise RemoteError("Unknown status %s!" % result.status)
420
650
 
421
- self._hydrate(image_id, resolver.client, None)
651
+ self._hydrate(image_id, resolver.client, metadata)
652
+ local_mounts = set()
653
+ for base in base_images.values():
654
+ local_mounts |= base._serve_mounts
655
+ if context_mount and context_mount.is_local():
656
+ local_mounts.add(context_mount)
657
+ self._serve_mounts = frozenset(local_mounts)
422
658
 
423
- rep = "Image()"
659
+ rep = f"Image({dockerfile_function})"
424
660
  obj = _Image._from_loader(_load, rep, deps=_deps)
425
661
  obj.force_build = force_build
426
662
  return obj
427
663
 
428
- def extend(self, **kwargs) -> "_Image":
429
- """Deprecated! This is a low-level method not intended to be part of the public API."""
430
- deprecation_warning(
431
- (2024, 3, 7),
432
- "`Image.extend` is deprecated; please use a higher-level method, such as `Image.dockerfile_commands`.",
433
- )
434
-
435
- def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
436
- return DockerfileSpec(
437
- commands=kwargs.pop("dockerfile_commands", []),
438
- context_files=kwargs.pop("context_files", {}),
439
- )
440
-
441
- return _Image._from_args(base_images={"base": self}, dockerfile_function=build_dockerfile, **kwargs)
442
-
443
664
  def copy_mount(self, mount: _Mount, remote_path: Union[str, Path] = ".") -> "_Image":
444
665
  """Copy the entire contents of a `modal.Mount` into an image.
445
666
  Useful when files only available locally are required during the image
@@ -465,16 +686,114 @@ class _Image(_Object, type_prefix="im"):
465
686
  return _Image._from_args(
466
687
  base_images={"base": self},
467
688
  dockerfile_function=build_dockerfile,
468
- context_mount=mount,
689
+ context_mount_function=lambda: mount,
690
+ )
691
+
692
+ def add_local_file(self, local_path: Union[str, Path], remote_path: str, *, copy: bool = False) -> "_Image":
693
+ """Adds a local file to the image at `remote_path` within the container
694
+
695
+ By default (`copy=False`), the files are added to containers on startup and are not built into the actual Image,
696
+ which speeds up deployment.
697
+
698
+ Set `copy=True` to copy the files into an Image layer at build time instead, similar to how
699
+ [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) works in a `Dockerfile`.
700
+
701
+ copy=True can slow down iteration since it requires a rebuild of the Image and any subsequent
702
+ build steps whenever the included files change, but it is required if you want to run additional
703
+ build steps after this one.
704
+ """
705
+ if not PurePosixPath(remote_path).is_absolute():
706
+ # TODO(elias): implement relative to absolute resolution using image workdir metadata
707
+ # + make default remote_path="./"
708
+ # This requires deferring the Mount creation until after "self" (the base image) has been resolved
709
+ # so we know the workdir of the operation.
710
+ raise InvalidError("image.add_local_file() currently only supports absolute remote_path values")
711
+
712
+ if remote_path.endswith("/"):
713
+ remote_path = remote_path + Path(local_path).name
714
+
715
+ mount = _Mount._from_local_file(local_path, remote_path)
716
+ return self._add_mount_layer_or_copy(mount, copy=copy)
717
+
718
+ def add_local_dir(
719
+ self,
720
+ local_path: Union[str, Path],
721
+ remote_path: str,
722
+ *,
723
+ copy: bool = False,
724
+ # Predicate filter function for file exclusion, which should accept a filepath and return `True` for exclusion.
725
+ # Defaults to excluding no files. If a Sequence is provided, it will be converted to a FilePatternMatcher.
726
+ # Which follows dockerignore syntax.
727
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = [],
728
+ ) -> "_Image":
729
+ """Adds a local directory's content to the image at `remote_path` within the container
730
+
731
+ By default (`copy=False`), the files are added to containers on startup and are not built into the actual Image,
732
+ which speeds up deployment.
733
+
734
+ Set `copy=True` to copy the files into an Image layer at build time instead, similar to how
735
+ [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) works in a `Dockerfile`.
736
+
737
+ copy=True can slow down iteration since it requires a rebuild of the Image and any subsequent
738
+ build steps whenever the included files change, but it is required if you want to run additional
739
+ build steps after this one.
740
+
741
+ **Usage:**
742
+
743
+ ```python
744
+ from modal import FilePatternMatcher
745
+
746
+ image = modal.Image.debian_slim().add_local_dir(
747
+ "~/assets",
748
+ remote_path="/assets",
749
+ ignore=["*.venv"],
750
+ )
751
+
752
+ image = modal.Image.debian_slim().add_local_dir(
753
+ "~/assets",
754
+ remote_path="/assets",
755
+ ignore=lambda p: p.is_relative_to(".venv"),
756
+ )
757
+
758
+ image = modal.Image.debian_slim().add_local_dir(
759
+ "~/assets",
760
+ remote_path="/assets",
761
+ ignore=FilePatternMatcher("**/*.txt"),
762
+ )
763
+
764
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
765
+ image = modal.Image.debian_slim().add_local_dir(
766
+ "~/assets",
767
+ remote_path="/assets",
768
+ ignore=~FilePatternMatcher("**/*.py"),
769
+ )
770
+
771
+ # You can also read ignore patterns from a file.
772
+ image = modal.Image.debian_slim().add_local_dir(
773
+ "~/assets",
774
+ remote_path="/assets",
775
+ ignore=FilePatternMatcher.from_file("/path/to/ignorefile"),
469
776
  )
777
+ ```
778
+ """
779
+ if not PurePosixPath(remote_path).is_absolute():
780
+ # TODO(elias): implement relative to absolute resolution using image workdir metadata
781
+ # + make default remote_path="./"
782
+ raise InvalidError("image.add_local_dir() currently only supports absolute remote_path values")
783
+
784
+ mount = _Mount._add_local_dir(Path(local_path), PurePosixPath(remote_path), ignore=_ignore_fn(ignore))
785
+ return self._add_mount_layer_or_copy(mount, copy=copy)
470
786
 
471
787
  def copy_local_file(self, local_path: Union[str, Path], remote_path: Union[str, Path] = "./") -> "_Image":
472
788
  """Copy a file into the image as a part of building it.
473
789
 
474
- This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) in a `Dockerfile`.
790
+ This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
791
+ works in a `Dockerfile`.
475
792
  """
793
+ deprecation_warning(
794
+ (2024, 1, 13), COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_file"), pending=True
795
+ )
476
796
  basename = str(Path(local_path).name)
477
- mount = _Mount.from_local_file(local_path, remote_path=f"/{basename}")
478
797
 
479
798
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
480
799
  return DockerfileSpec(commands=["FROM base", f"COPY {basename} {remote_path}"], context_files={})
@@ -482,15 +801,103 @@ class _Image(_Object, type_prefix="im"):
482
801
  return _Image._from_args(
483
802
  base_images={"base": self},
484
803
  dockerfile_function=build_dockerfile,
485
- context_mount=mount,
804
+ context_mount_function=lambda: _Mount._from_local_file(local_path, remote_path=f"/{basename}"),
486
805
  )
487
806
 
488
- def copy_local_dir(self, local_path: Union[str, Path], remote_path: Union[str, Path] = ".") -> "_Image":
807
+ def add_local_python_source(
808
+ self, *modules: str, copy: bool = False, ignore: Union[Sequence[str], Callable[[Path], bool]] = NON_PYTHON_FILES
809
+ ) -> "_Image":
810
+ """Adds locally available Python packages/modules to containers
811
+
812
+ Adds all files from the specified Python package or module to containers running the Image.
813
+
814
+ Packages are added to the `/root` directory of containers, which is on the `PYTHONPATH`
815
+ of any executed Modal Functions, enabling import of the module by that name.
816
+
817
+ By default (`copy=False`), the files are added to containers on startup and are not built into the actual Image,
818
+ which speeds up deployment.
819
+
820
+ Set `copy=True` to copy the files into an Image layer at build time instead. This can slow down iteration since
821
+ it requires a rebuild of the Image and any subsequent build steps whenever the included files change, but it is
822
+ required if you want to run additional build steps after this one.
823
+
824
+ **Note:** This excludes all dot-prefixed subdirectories or files and all `.pyc`/`__pycache__` files.
825
+ To add full directories with finer control, use `.add_local_dir()` instead and specify `/root` as
826
+ the destination directory.
827
+
828
+ By default only includes `.py`-files in the source modules. Set the `ignore` argument to a list of patterns
829
+ or a callable to override this behavior, e.g.:
830
+
831
+ ```py
832
+ # includes everything except data.json
833
+ modal.Image.debian_slim().add_local_python_source("mymodule", ignore=["data.json"])
834
+
835
+ # exclude large files
836
+ modal.Image.debian_slim().add_local_python_source(
837
+ "mymodule",
838
+ ignore=lambda p: p.stat().st_size > 1e9
839
+ )
840
+ ```
841
+ """
842
+ mount = _Mount._from_local_python_packages(*modules, ignore=ignore)
843
+ return self._add_mount_layer_or_copy(mount, copy=copy)
844
+
845
+ def copy_local_dir(
846
+ self,
847
+ local_path: Union[str, Path],
848
+ remote_path: Union[str, Path] = ".",
849
+ # Predicate filter function for file exclusion, which should accept a filepath and return `True` for exclusion.
850
+ # Defaults to excluding no files. If a Sequence is provided, it will be converted to a FilePatternMatcher.
851
+ # Which follows dockerignore syntax.
852
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = [],
853
+ ) -> "_Image":
489
854
  """Copy a directory into the image as a part of building the image.
490
855
 
491
- This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) in a `Dockerfile`.
856
+ This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
857
+ works in a `Dockerfile`.
858
+
859
+ **Usage:**
860
+
861
+ ```python
862
+ from pathlib import Path
863
+ from modal import FilePatternMatcher
864
+
865
+ image = modal.Image.debian_slim().copy_local_dir(
866
+ "~/assets",
867
+ remote_path="/assets",
868
+ ignore=["**/*.venv"],
869
+ )
870
+
871
+ image = modal.Image.debian_slim().copy_local_dir(
872
+ "~/assets",
873
+ remote_path="/assets",
874
+ ignore=lambda p: p.is_relative_to(".venv"),
875
+ )
876
+
877
+ image = modal.Image.debian_slim().copy_local_dir(
878
+ "~/assets",
879
+ remote_path="/assets",
880
+ ignore=FilePatternMatcher("**/*.txt"),
881
+ )
882
+
883
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
884
+ image = modal.Image.debian_slim().copy_local_dir(
885
+ "~/assets",
886
+ remote_path="/assets",
887
+ ignore=~FilePatternMatcher("**/*.py"),
888
+ )
889
+
890
+ # You can also read ignore patterns from a file.
891
+ image = modal.Image.debian_slim().copy_local_dir(
892
+ "~/assets",
893
+ remote_path="/assets",
894
+ ignore=FilePatternMatcher.from_file("/path/to/ignorefile"),
895
+ )
896
+ ```
492
897
  """
493
- mount = _Mount.from_local_dir(local_path, remote_path="/")
898
+ deprecation_warning(
899
+ (2024, 1, 13), COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_dir"), pending=True
900
+ )
494
901
 
495
902
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
496
903
  return DockerfileSpec(commands=["FROM base", f"COPY . {remote_path}"], context_files={})
@@ -498,36 +905,78 @@ class _Image(_Object, type_prefix="im"):
498
905
  return _Image._from_args(
499
906
  base_images={"base": self},
500
907
  dockerfile_function=build_dockerfile,
501
- context_mount=mount,
908
+ context_mount_function=lambda: _Mount._add_local_dir(
909
+ Path(local_path), PurePosixPath("/"), ignore=_ignore_fn(ignore)
910
+ ),
502
911
  )
503
912
 
913
+ @staticmethod
914
+ async def from_id(image_id: str, client: Optional[_Client] = None) -> "_Image":
915
+ """Construct an Image from an id and look up the Image result.
916
+
917
+ The ID of an Image object can be accessed using `.object_id`.
918
+ """
919
+ if client is None:
920
+ client = await _Client.from_env()
921
+
922
+ async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
923
+ resp = await retry_transient_errors(client.stub.ImageFromId, api_pb2.ImageFromIdRequest(image_id=image_id))
924
+ self._hydrate(resp.image_id, resolver.client, resp.metadata)
925
+
926
+ rep = "Image()"
927
+ obj = _Image._from_loader(_load, rep)
928
+
929
+ return obj
930
+
504
931
  def pip_install(
505
932
  self,
506
- *packages: Union[str, List[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
933
+ *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
507
934
  find_links: Optional[str] = None, # Passes -f (--find-links) pip install
508
935
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
509
936
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
510
937
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
511
- force_build: bool = False,
938
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
939
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
512
940
  secrets: Sequence[_Secret] = [],
513
941
  gpu: GPU_T = None,
514
942
  ) -> "_Image":
515
943
  """Install a list of Python packages using pip.
516
944
 
517
- **Example**
945
+ **Examples**
518
946
 
947
+ Simple installation:
519
948
  ```python
520
949
  image = modal.Image.debian_slim().pip_install("click", "httpx~=0.23.3")
521
950
  ```
951
+
952
+ More complex installation:
953
+ ```python
954
+ image = (
955
+ modal.Image.from_registry(
956
+ "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.11"
957
+ )
958
+ .pip_install(
959
+ "ninja",
960
+ "packaging",
961
+ "wheel",
962
+ "transformers==4.40.2",
963
+ )
964
+ .pip_install(
965
+ "flash-attn==2.5.8", extra_options="--no-build-isolation"
966
+ )
967
+ )
968
+ ```
522
969
  """
523
970
  pkgs = _flatten_str_args("pip_install", "packages", packages)
524
971
  if not pkgs:
525
972
  return self
526
973
 
527
974
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
528
- package_args = " ".join(shlex.quote(pkg) for pkg in sorted(pkgs))
529
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
975
+ package_args = shlex.join(sorted(pkgs))
976
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
530
977
  commands = ["FROM base", f"RUN python -m pip install {package_args} {extra_args}"]
978
+ if not _validate_packages(pkgs):
979
+ _warn_invalid_packages(commands[-1].split("RUN ")[-1])
531
980
  if version > "2023.12": # Back-compat for legacy trailing space with empty extra_args
532
981
  commands = [cmd.strip() for cmd in commands]
533
982
  return DockerfileSpec(commands=commands, context_files={})
@@ -549,9 +998,10 @@ class _Image(_Object, type_prefix="im"):
549
998
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
550
999
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
551
1000
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1001
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
552
1002
  gpu: GPU_T = None,
553
1003
  secrets: Sequence[_Secret] = [],
554
- force_build: bool = False,
1004
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
555
1005
  ) -> "_Image":
556
1006
  """
557
1007
  Install a list of Python packages from private git repositories using pip.
@@ -586,7 +1036,8 @@ class _Image(_Object, type_prefix="im"):
586
1036
  """
587
1037
  if not secrets:
588
1038
  raise InvalidError(
589
- "No secrets provided to function. Installing private packages requires tokens to be passed via modal.Secret objects."
1039
+ "No secrets provided to function. "
1040
+ "Installing private packages requires tokens to be passed via modal.Secret objects."
590
1041
  )
591
1042
 
592
1043
  invalid_repos = []
@@ -614,14 +1065,16 @@ class _Image(_Object, type_prefix="im"):
614
1065
  commands = ["FROM base"]
615
1066
  if any(r.startswith("github") for r in repositories):
616
1067
  commands.append(
617
- f"RUN bash -c \"[[ -v GITHUB_TOKEN ]] || (echo 'GITHUB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
1068
+ 'RUN bash -c "[[ -v GITHUB_TOKEN ]] || '
1069
+ f"(echo 'GITHUB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
618
1070
  )
619
1071
  if any(r.startswith("gitlab") for r in repositories):
620
1072
  commands.append(
621
- f"RUN bash -c \"[[ -v GITLAB_TOKEN ]] || (echo 'GITLAB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
1073
+ 'RUN bash -c "[[ -v GITLAB_TOKEN ]] || '
1074
+ f"(echo 'GITLAB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
622
1075
  )
623
1076
 
624
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
1077
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
625
1078
  commands.extend(["RUN apt-get update && apt-get install -y git"])
626
1079
  commands.extend([f'RUN python3 -m pip install "{url}" {extra_args}' for url in install_urls])
627
1080
  if version > "2023.12": # Back-compat for legacy trailing space with empty extra_args
@@ -646,7 +1099,8 @@ class _Image(_Object, type_prefix="im"):
646
1099
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
647
1100
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
648
1101
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
649
- force_build: bool = False,
1102
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1103
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
650
1104
  secrets: Sequence[_Secret] = [],
651
1105
  gpu: GPU_T = None,
652
1106
  ) -> "_Image":
@@ -658,7 +1112,7 @@ class _Image(_Object, type_prefix="im"):
658
1112
 
659
1113
  null_find_links_arg = " " if version == "2023.12" else ""
660
1114
  find_links_arg = f" -f {find_links}" if find_links else null_find_links_arg
661
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
1115
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
662
1116
 
663
1117
  commands = [
664
1118
  "FROM base",
@@ -680,13 +1134,14 @@ class _Image(_Object, type_prefix="im"):
680
1134
  def pip_install_from_pyproject(
681
1135
  self,
682
1136
  pyproject_toml: str,
683
- optional_dependencies: List[str] = [],
1137
+ optional_dependencies: list[str] = [],
684
1138
  *,
685
1139
  find_links: Optional[str] = None, # Passes -f (--find-links) pip install
686
1140
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
687
1141
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
688
1142
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
689
- force_build: bool = False,
1143
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1144
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
690
1145
  secrets: Sequence[_Secret] = [],
691
1146
  gpu: GPU_T = None,
692
1147
  ) -> "_Image":
@@ -708,8 +1163,10 @@ class _Image(_Object, type_prefix="im"):
708
1163
  if "project" not in config or "dependencies" not in config["project"]:
709
1164
  msg = (
710
1165
  "No [project.dependencies] section in pyproject.toml file. "
711
- "If your pyproject.toml instead declares [tool.poetry.dependencies], use `Image.poetry_install_from_file()`. "
712
- "See https://packaging.python.org/en/latest/guides/writing-pyproject-toml for further file format guidelines."
1166
+ "If your pyproject.toml instead declares [tool.poetry.dependencies], "
1167
+ "use `Image.poetry_install_from_file()`. "
1168
+ "See https://packaging.python.org/en/latest/guides/writing-pyproject-toml "
1169
+ "for further file format guidelines."
713
1170
  )
714
1171
  raise ValueError(msg)
715
1172
  else:
@@ -720,8 +1177,8 @@ class _Image(_Object, type_prefix="im"):
720
1177
  if dep_group_name in optionals:
721
1178
  dependencies.extend(optionals[dep_group_name])
722
1179
 
723
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
724
- package_args = " ".join(shlex.quote(pkg) for pkg in sorted(dependencies))
1180
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
1181
+ package_args = shlex.join(sorted(dependencies))
725
1182
  commands = ["FROM base", f"RUN python -m pip install {package_args} {extra_args}"]
726
1183
  if version > "2023.12": # Back-compat for legacy trailing space
727
1184
  commands = [cmd.strip() for cmd in commands]
@@ -745,13 +1202,13 @@ class _Image(_Object, type_prefix="im"):
745
1202
  ignore_lockfile: bool = False,
746
1203
  # If set to True, use old installer. See https://github.com/python-poetry/poetry/issues/3336
747
1204
  old_installer: bool = False,
748
- force_build: bool = False,
1205
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
749
1206
  # Selected optional dependency groups to install (See https://python-poetry.org/docs/cli/#install)
750
- with_: List[str] = [],
1207
+ with_: list[str] = [],
751
1208
  # Selected optional dependency groups to exclude (See https://python-poetry.org/docs/cli/#install)
752
- without: List[str] = [],
1209
+ without: list[str] = [],
753
1210
  # Only install dependency groups specifed in this list.
754
- only: List[str] = [],
1211
+ only: list[str] = [],
755
1212
  *,
756
1213
  secrets: Sequence[_Secret] = [],
757
1214
  gpu: GPU_T = None,
@@ -761,8 +1218,8 @@ class _Image(_Object, type_prefix="im"):
761
1218
  If not provided as argument the path to the lockfile is inferred. However, the
762
1219
  file has to exist, unless `ignore_lockfile` is set to `True`.
763
1220
 
764
- Note that the root project of the poetry project is not installed,
765
- only the dependencies. For including local packages see `modal.Mount.from_local_python_packages`
1221
+ Note that the root project of the poetry project is not installed, only the dependencies.
1222
+ For including local python source files see `add_local_python_source`
766
1223
  """
767
1224
 
768
1225
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
@@ -779,7 +1236,8 @@ class _Image(_Object, type_prefix="im"):
779
1236
  p = Path(poetry_pyproject_toml).parent / "poetry.lock"
780
1237
  if not p.exists():
781
1238
  raise NotFoundError(
782
- f"poetry.lock not found at inferred location: {p.absolute()}. If a lockfile is not needed, `ignore_lockfile=True` can be used."
1239
+ f"poetry.lock not found at inferred location: {p.absolute()}. "
1240
+ "If a lockfile is not needed, `ignore_lockfile=True` can be used."
783
1241
  )
784
1242
  poetry_lockfile = p.as_posix()
785
1243
  context_files["/.poetry.lock"] = poetry_lockfile
@@ -818,15 +1276,63 @@ class _Image(_Object, type_prefix="im"):
818
1276
 
819
1277
  def dockerfile_commands(
820
1278
  self,
821
- *dockerfile_commands: Union[str, List[str]],
822
- context_files: Dict[str, str] = {},
1279
+ *dockerfile_commands: Union[str, list[str]],
1280
+ context_files: dict[str, str] = {},
823
1281
  secrets: Sequence[_Secret] = [],
824
1282
  gpu: GPU_T = None,
825
1283
  # modal.Mount with local files to supply as build context for COPY commands
826
1284
  context_mount: Optional[_Mount] = None,
827
- force_build: bool = False,
1285
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1286
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
828
1287
  ) -> "_Image":
829
- """Extend an image with arbitrary Dockerfile-like commands."""
1288
+ """
1289
+ Extend an image with arbitrary Dockerfile-like commands.
1290
+
1291
+ **Usage:**
1292
+
1293
+ ```python
1294
+ from modal import FilePatternMatcher
1295
+
1296
+ # By default a .dockerignore file is used if present in the current working directory
1297
+ image = modal.Image.debian_slim().dockerfile_commands(
1298
+ ["COPY data /data"],
1299
+ )
1300
+
1301
+ image = modal.Image.debian_slim().dockerfile_commands(
1302
+ ["COPY data /data"],
1303
+ ignore=["*.venv"],
1304
+ )
1305
+
1306
+ image = modal.Image.debian_slim().dockerfile_commands(
1307
+ ["COPY data /data"],
1308
+ ignore=lambda p: p.is_relative_to(".venv"),
1309
+ )
1310
+
1311
+ image = modal.Image.debian_slim().dockerfile_commands(
1312
+ ["COPY data /data"],
1313
+ ignore=FilePatternMatcher("**/*.txt"),
1314
+ )
1315
+
1316
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
1317
+ image = modal.Image.debian_slim().dockerfile_commands(
1318
+ ["COPY data /data"],
1319
+ ignore=~FilePatternMatcher("**/*.py"),
1320
+ )
1321
+
1322
+ # You can also read ignore patterns from a file.
1323
+ image = modal.Image.debian_slim().dockerfile_commands(
1324
+ ["COPY data /data"],
1325
+ ignore=FilePatternMatcher.from_file("/path/to/dockerignore"),
1326
+ )
1327
+ ```
1328
+ """
1329
+ if context_mount is not None:
1330
+ deprecation_warning(
1331
+ (2025, 1, 13),
1332
+ "`context_mount` is deprecated."
1333
+ + " Files are now automatically added to the build context based on the commands.",
1334
+ pending=True,
1335
+ )
830
1336
  cmds = _flatten_str_args("dockerfile_commands", "dockerfile_commands", dockerfile_commands)
831
1337
  if not cmds:
832
1338
  return self
@@ -838,17 +1344,41 @@ class _Image(_Object, type_prefix="im"):
838
1344
  base_images={"base": self},
839
1345
  dockerfile_function=build_dockerfile,
840
1346
  secrets=secrets,
841
- gpu_config=parse_gpu_config(gpu, raise_on_true=False),
842
- context_mount=context_mount,
1347
+ gpu_config=parse_gpu_config(gpu),
1348
+ context_mount_function=_create_context_mount_function(
1349
+ ignore=ignore, dockerfile_cmds=cmds, context_mount=context_mount
1350
+ ),
843
1351
  force_build=self.force_build or force_build,
844
1352
  )
845
1353
 
1354
+ def entrypoint(
1355
+ self,
1356
+ entrypoint_commands: list[str],
1357
+ ) -> "_Image":
1358
+ """Set the entrypoint for the image."""
1359
+ args_str = _flatten_str_args("entrypoint", "entrypoint_files", entrypoint_commands)
1360
+ args_str = '"' + '", "'.join(args_str) + '"' if args_str else ""
1361
+ dockerfile_cmd = f"ENTRYPOINT [{args_str}]"
1362
+
1363
+ return self.dockerfile_commands(dockerfile_cmd)
1364
+
1365
+ def shell(
1366
+ self,
1367
+ shell_commands: list[str],
1368
+ ) -> "_Image":
1369
+ """Overwrite default shell for the image."""
1370
+ args_str = _flatten_str_args("shell", "shell_commands", shell_commands)
1371
+ args_str = '"' + '", "'.join(args_str) + '"' if args_str else ""
1372
+ dockerfile_cmd = f"SHELL [{args_str}]"
1373
+
1374
+ return self.dockerfile_commands(dockerfile_cmd)
1375
+
846
1376
  def run_commands(
847
1377
  self,
848
- *commands: Union[str, List[str]],
1378
+ *commands: Union[str, list[str]],
849
1379
  secrets: Sequence[_Secret] = [],
850
1380
  gpu: GPU_T = None,
851
- force_build: bool = False,
1381
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
852
1382
  ) -> "_Image":
853
1383
  """Extend an image with a list of shell commands to run."""
854
1384
  cmds = _flatten_str_args("run_commands", "commands", commands)
@@ -862,164 +1392,63 @@ class _Image(_Object, type_prefix="im"):
862
1392
  base_images={"base": self},
863
1393
  dockerfile_function=build_dockerfile,
864
1394
  secrets=secrets,
865
- gpu_config=parse_gpu_config(gpu, raise_on_true=False),
1395
+ gpu_config=parse_gpu_config(gpu),
866
1396
  force_build=self.force_build or force_build,
867
1397
  )
868
1398
 
869
1399
  @staticmethod
870
- def conda(python_version: Optional[str] = None, force_build: bool = False) -> "_Image":
871
- """
872
- A Conda base image, using miniconda3 and derived from the official Docker Hub image.
873
- In most cases, using [`Image.micromamba()`](/docs/reference/modal.Image#micromamba) with [`micromamba_install`](/docs/reference/modal.Image#micromamba_install) is recommended over `Image.conda()`, as it leads to significantly faster image build times.
874
- """
875
-
876
- def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
877
- nonlocal python_version
878
- if version == "2023.12" and python_version is None:
879
- python_version = "3.9" # Backcompat for old hardcoded default param
880
- validated_python_version = _validate_python_version(python_version)
881
- debian_codename = _dockerhub_debian_codename(version)
882
- requirements_path = _get_modal_requirements_path(version, python_version)
883
- context_files = {CONTAINER_REQUIREMENTS_PATH: requirements_path}
884
-
885
- # Doesn't use the official continuumio/miniconda3 image as a base. That image has maintenance
886
- # issues (https://github.com/ContinuumIO/docker-images/issues) and building our own is more flexible.
887
- conda_install_script = "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh"
888
- commands = [
889
- f"FROM debian:{debian_codename}", # the -slim images lack files required by Conda.
890
- # Temporarily add utility packages for conda installation.
891
- "RUN apt-get --quiet update && apt-get --quiet --yes install curl bzip2 \\",
892
- f"&& curl --silent --show-error --location {conda_install_script} --output /tmp/miniconda.sh \\",
893
- # Install miniconda to a filesystem location on the $PATH of Modal container tasks.
894
- # -b = install in batch mode w/o manual intervention.
895
- # -f = allow install prefix to already exist.
896
- # -p = the install prefix location.
897
- "&& bash /tmp/miniconda.sh -bfp /usr/local \\ ",
898
- "&& rm -rf /tmp/miniconda.sh",
899
- # Biggest and most stable community-led Conda channel.
900
- "RUN conda config --add channels conda-forge \\ ",
901
- # softlinking can put conda in a broken state, surfacing error on uninstall like:
902
- # `No such device or address: '/usr/local/lib/libz.so' -> '/usr/local/lib/libz.so.c~'`
903
- "&& conda config --set allow_softlinks false \\ ",
904
- # Install requested Python version from conda-forge channel; base debian image has only 3.7.
905
- f"&& conda install --yes --channel conda-forge python={validated_python_version} \\ ",
906
- "&& conda update conda \\ ",
907
- # Remove now unneeded packages and files.
908
- "&& apt-get --quiet --yes remove curl bzip2 \\ ",
909
- "&& apt-get --quiet --yes autoremove \\ ",
910
- "&& apt-get autoclean \\ ",
911
- "&& rm -rf /var/lib/apt/lists/* /var/log/dpkg.log \\ ",
912
- "&& conda clean --all --yes",
913
- # Setup .bashrc for conda.
914
- "RUN conda init bash --verbose",
915
- f"COPY {CONTAINER_REQUIREMENTS_PATH} {CONTAINER_REQUIREMENTS_PATH}",
916
- # .bashrc is explicitly sourced because RUN is a non-login shell and doesn't run bash.
917
- "RUN . /root/.bashrc && conda activate base \\ ",
918
- # Ensure that packaging tools are up to date and install client dependenices
919
- f"&& python -m pip install --upgrade {'pip' if version == '2023.12' else 'pip wheel'} \\ ",
920
- f"&& python -m {_get_modal_requirements_command(version)}",
921
- ]
922
- if version > "2023.12":
923
- commands.append(f"RUN rm {CONTAINER_REQUIREMENTS_PATH}")
924
- return DockerfileSpec(commands=commands, context_files=context_files)
925
-
926
- base = _Image._from_args(
927
- dockerfile_function=build_dockerfile,
928
- force_build=force_build,
929
- _namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
930
- )
931
-
932
- return base.dockerfile_commands(
933
- [
934
- "ENV CONDA_EXE=/usr/local/bin/conda",
935
- "ENV CONDA_PREFIX=/usr/local",
936
- "ENV CONDA_PROMPT_MODIFIER=(base)",
937
- "ENV CONDA_SHLVL=1",
938
- "ENV CONDA_PYTHON_EXE=/usr/local/bin/python",
939
- "ENV CONDA_DEFAULT_ENV=base",
940
- ]
1400
+ def conda(python_version: Optional[str] = None, force_build: bool = False):
1401
+ """mdmd:hidden"""
1402
+ message = (
1403
+ "`Image.conda` is deprecated."
1404
+ " Please use the faster and more reliable `Image.micromamba` constructor instead."
941
1405
  )
1406
+ deprecation_error((2024, 5, 2), message)
942
1407
 
943
1408
  def conda_install(
944
1409
  self,
945
- *packages: Union[str, List[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
946
- channels: List[str] = [], # A list of Conda channels, eg. ["conda-forge", "nvidia"]
947
- force_build: bool = False,
1410
+ *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
1411
+ channels: list[str] = [], # A list of Conda channels, eg. ["conda-forge", "nvidia"]
1412
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
948
1413
  secrets: Sequence[_Secret] = [],
949
1414
  gpu: GPU_T = None,
950
- ) -> "_Image":
951
- """Install a list of additional packages using Conda. Note that in most cases, using [`Image.micromamba()`](/docs/reference/modal.Image#micromamba) with [`micromamba_install`](/docs/reference/modal.Image#micromamba_install)
952
- is recommended over `conda_install`, as it leads to significantly faster image build times."""
953
-
954
- pkgs = _flatten_str_args("conda_install", "packages", packages)
955
- if not pkgs:
956
- return self
957
-
958
- def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
959
- package_args = " ".join(shlex.quote(pkg) for pkg in pkgs)
960
- channel_args = "".join(f" -c {channel}" for channel in channels)
961
-
962
- commands = [
963
- "FROM base",
964
- f"RUN conda install {package_args}{channel_args} --yes \\ ",
965
- "&& conda clean --yes --index-cache --tarballs --tempfiles --logfiles",
966
- ]
967
- return DockerfileSpec(commands=commands, context_files={})
968
-
969
- return _Image._from_args(
970
- base_images={"base": self},
971
- dockerfile_function=build_dockerfile,
972
- force_build=self.force_build or force_build,
973
- secrets=secrets,
974
- gpu_config=parse_gpu_config(gpu),
1415
+ ):
1416
+ """mdmd:hidden"""
1417
+ message = (
1418
+ "`Image.conda_install` is deprecated."
1419
+ " Please use the faster and more reliable `Image.micromamba_install` instead."
975
1420
  )
1421
+ deprecation_error((2024, 5, 2), message)
976
1422
 
977
1423
  def conda_update_from_environment(
978
1424
  self,
979
1425
  environment_yml: str,
980
- force_build: bool = False,
1426
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
981
1427
  *,
982
1428
  secrets: Sequence[_Secret] = [],
983
1429
  gpu: GPU_T = None,
984
- ) -> "_Image":
985
- """Update a Conda environment using dependencies from a given environment.yml file."""
986
-
987
- def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
988
- context_files = {"/environment.yml": os.path.expanduser(environment_yml)}
989
-
990
- commands = [
991
- "FROM base",
992
- "COPY /environment.yml /environment.yml",
993
- "RUN conda env update --name base -f /environment.yml \\ ",
994
- "&& conda clean --yes --index-cache --tarballs --tempfiles --logfiles",
995
- ]
996
- return DockerfileSpec(commands=commands, context_files=context_files)
997
-
998
- return _Image._from_args(
999
- base_images={"base": self},
1000
- dockerfile_function=build_dockerfile,
1001
- force_build=self.force_build or force_build,
1002
- secrets=secrets,
1003
- gpu_config=parse_gpu_config(gpu),
1430
+ ):
1431
+ """mdmd:hidden"""
1432
+ message = (
1433
+ "Image.conda_update_from_environment` is deprecated."
1434
+ " Please use the `Image.micromamba_install` method (with the `spec_file` parameter) instead."
1004
1435
  )
1436
+ deprecation_error((2024, 5, 2), message)
1005
1437
 
1006
1438
  @staticmethod
1007
1439
  def micromamba(
1008
1440
  python_version: Optional[str] = None,
1009
- force_build: bool = False,
1441
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1010
1442
  ) -> "_Image":
1011
- """
1012
- A Micromamba base image. Micromamba allows for fast building of small Conda-based containers.
1013
- In most cases it will be faster than using [`Image.conda()`](/docs/reference/modal.Image#conda).
1014
- """
1443
+ """A Micromamba base image. Micromamba allows for fast building of small Conda-based containers."""
1015
1444
 
1016
1445
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1017
1446
  nonlocal python_version
1018
1447
  if version == "2023.12" and python_version is None:
1019
1448
  python_version = "3.9" # Backcompat for old hardcoded default param
1020
- validated_python_version = _validate_python_version(python_version)
1021
- micromamba_version = {"2023.12": "1.3.1", "2024.04": "1.5.8"}[version]
1022
- debian_codename = _dockerhub_debian_codename(version)
1449
+ validated_python_version = _validate_python_version(python_version, version)
1450
+ micromamba_version = _base_image_config("micromamba", version)
1451
+ debian_codename = _base_image_config("debian", version)
1023
1452
  tag = f"mambaorg/micromamba:{micromamba_version}-{debian_codename}-slim"
1024
1453
  setup_commands = [
1025
1454
  'SHELL ["/usr/local/bin/_dockerfile_shell.sh"]',
@@ -1039,28 +1468,36 @@ class _Image(_Object, type_prefix="im"):
1039
1468
  def micromamba_install(
1040
1469
  self,
1041
1470
  # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
1042
- *packages: Union[str, List[str]],
1043
- # A list of Conda channels, eg. ["conda-forge", "nvidia"]
1044
- channels: List[str] = [],
1045
- force_build: bool = False,
1471
+ *packages: Union[str, list[str]],
1472
+ # A local path to a file containing package specifications
1473
+ spec_file: Optional[str] = None,
1474
+ # A list of Conda channels, eg. ["conda-forge", "nvidia"].
1475
+ channels: list[str] = [],
1476
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1046
1477
  secrets: Sequence[_Secret] = [],
1047
1478
  gpu: GPU_T = None,
1048
1479
  ) -> "_Image":
1049
1480
  """Install a list of additional packages using micromamba."""
1050
-
1051
1481
  pkgs = _flatten_str_args("micromamba_install", "packages", packages)
1052
- if not pkgs:
1482
+ if not pkgs and spec_file is None:
1053
1483
  return self
1054
1484
 
1055
1485
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1056
- package_args = " ".join(shlex.quote(pkg) for pkg in pkgs)
1486
+ package_args = shlex.join(pkgs)
1057
1487
  channel_args = "".join(f" -c {channel}" for channel in channels)
1058
1488
 
1489
+ space = " " if package_args else ""
1490
+ remote_spec_file = "" if spec_file is None else f"/{os.path.basename(spec_file)}"
1491
+ file_arg = "" if spec_file is None else f"{space}-f {remote_spec_file} -n base"
1492
+ copy_commands = [] if spec_file is None else [f"COPY {remote_spec_file} {remote_spec_file}"]
1493
+
1059
1494
  commands = [
1060
1495
  "FROM base",
1061
- f"RUN micromamba install {package_args}{channel_args} --yes",
1496
+ *copy_commands,
1497
+ f"RUN micromamba install {package_args}{file_arg}{channel_args} --yes",
1062
1498
  ]
1063
- return DockerfileSpec(commands=commands, context_files={})
1499
+ context_files = {} if spec_file is None else {remote_spec_file: os.path.expanduser(spec_file)}
1500
+ return DockerfileSpec(commands=commands, context_files=context_files)
1064
1501
 
1065
1502
  return _Image._from_args(
1066
1503
  base_images={"base": self},
@@ -1074,22 +1511,30 @@ class _Image(_Object, type_prefix="im"):
1074
1511
  def _registry_setup_commands(
1075
1512
  tag: str,
1076
1513
  builder_version: ImageBuilderVersion,
1077
- setup_commands: List[str],
1514
+ setup_commands: list[str],
1078
1515
  add_python: Optional[str] = None,
1079
- ) -> List[str]:
1080
- add_python_commands: List[str] = []
1516
+ ) -> list[str]:
1517
+ add_python_commands: list[str] = []
1081
1518
  if add_python:
1082
- _validate_python_version(add_python, allow_micro_granularity=False)
1519
+ _validate_python_version(add_python, builder_version, allow_micro_granularity=False)
1083
1520
  add_python_commands = [
1084
1521
  "COPY /python/. /usr/local",
1085
- "RUN ln -s /usr/local/bin/python3 /usr/local/bin/python",
1086
1522
  "ENV TERMINFO_DIRS=/etc/terminfo:/lib/terminfo:/usr/share/terminfo:/usr/lib/terminfo",
1087
1523
  ]
1088
-
1524
+ python_minor = add_python.split(".")[1]
1525
+ if int(python_minor) < 13:
1526
+ # Previous versions did not include the `python` binary, but later ones do.
1527
+ # (The important factor is not the Python version itself, but the standalone dist version.)
1528
+ # We insert the command in the list at the position it was previously always added
1529
+ # for backwards compatibility with existing images.
1530
+ add_python_commands.insert(1, "RUN ln -s /usr/local/bin/python3 /usr/local/bin/python")
1531
+
1532
+ # Note: this change is because we install dependencies with uv in 2024.10+
1533
+ requirements_prefix = "python -m " if builder_version < "2024.10" else ""
1089
1534
  modal_requirements_commands = [
1090
1535
  f"COPY {CONTAINER_REQUIREMENTS_PATH} {CONTAINER_REQUIREMENTS_PATH}",
1091
- f"RUN python -m pip install --upgrade {'pip' if builder_version == '2023.12' else 'pip wheel'}",
1092
- f"RUN python -m {_get_modal_requirements_command(builder_version)}",
1536
+ f"RUN python -m pip install --upgrade {_base_image_config('package_tools', builder_version)}",
1537
+ f"RUN {requirements_prefix}{_get_modal_requirements_command(builder_version)}",
1093
1538
  ]
1094
1539
  if builder_version > "2023.12":
1095
1540
  modal_requirements_commands.append(f"RUN rm {CONTAINER_REQUIREMENTS_PATH}")
@@ -1106,8 +1551,8 @@ class _Image(_Object, type_prefix="im"):
1106
1551
  tag: str,
1107
1552
  *,
1108
1553
  secret: Optional[_Secret] = None,
1109
- setup_dockerfile_commands: List[str] = [],
1110
- force_build: bool = False,
1554
+ setup_dockerfile_commands: list[str] = [],
1555
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1111
1556
  add_python: Optional[str] = None,
1112
1557
  **kwargs,
1113
1558
  ) -> "_Image":
@@ -1116,19 +1561,19 @@ class _Image(_Object, type_prefix="im"):
1116
1561
  The image must be built for the `linux/amd64` platform.
1117
1562
 
1118
1563
  If your image does not come with Python installed, you can use the `add_python` parameter
1119
- to specify a version of Python to add to the image. Supported versions are `3.8`, `3.9`,
1120
- `3.10`, `3.11`, and `3.12`. Otherwise, the image is expected to have Python>3.8 available
1121
- on PATH as `python`, along with `pip`.
1564
+ to specify a version of Python to add to the image. Otherwise, the image is expected to
1565
+ have Python on PATH as `python`, along with `pip`.
1122
1566
 
1123
1567
  You may also use `setup_dockerfile_commands` to run Dockerfile commands before the
1124
1568
  remaining commands run. This might be useful if you want a custom Python installation or to
1125
1569
  set a `SHELL`. Prefer `run_commands()` when possible though.
1126
1570
 
1127
1571
  To authenticate against a private registry with static credentials, you must set the `secret` parameter to
1128
- a `modal.Secret` containing a username (`REGISTRY_USERNAME`) and an access token or password (`REGISTRY_PASSWORD`).
1572
+ a `modal.Secret` containing a username (`REGISTRY_USERNAME`) and
1573
+ an access token or password (`REGISTRY_PASSWORD`).
1129
1574
 
1130
- To authenticate against private registries with credentials from a cloud provider, use `Image.from_gcp_artifact_registry()`
1131
- or `Image.from_aws_ecr()`.
1575
+ To authenticate against private registries with credentials from a cloud provider,
1576
+ use `Image.from_gcp_artifact_registry()` or `Image.from_aws_ecr()`.
1132
1577
 
1133
1578
  **Examples**
1134
1579
 
@@ -1138,11 +1583,15 @@ class _Image(_Object, type_prefix="im"):
1138
1583
  modal.Image.from_registry("nvcr.io/nvidia/pytorch:22.12-py3")
1139
1584
  ```
1140
1585
  """
1141
- context_mount = None
1142
- if add_python:
1143
- context_mount = _Mount.from_name(
1144
- python_standalone_mount_name(add_python),
1145
- namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1586
+
1587
+ def context_mount_function() -> Optional[_Mount]:
1588
+ return (
1589
+ _Mount.from_name(
1590
+ python_standalone_mount_name(add_python),
1591
+ namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1592
+ )
1593
+ if add_python
1594
+ else None
1146
1595
  )
1147
1596
 
1148
1597
  if "image_registry_config" not in kwargs and secret is not None:
@@ -1155,7 +1604,7 @@ class _Image(_Object, type_prefix="im"):
1155
1604
 
1156
1605
  return _Image._from_args(
1157
1606
  dockerfile_function=build_dockerfile,
1158
- context_mount=context_mount,
1607
+ context_mount_function=context_mount_function,
1159
1608
  force_build=force_build,
1160
1609
  **kwargs,
1161
1610
  )
@@ -1165,21 +1614,24 @@ class _Image(_Object, type_prefix="im"):
1165
1614
  tag: str,
1166
1615
  secret: Optional[_Secret] = None,
1167
1616
  *,
1168
- setup_dockerfile_commands: List[str] = [],
1169
- force_build: bool = False,
1617
+ setup_dockerfile_commands: list[str] = [],
1618
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1170
1619
  add_python: Optional[str] = None,
1171
1620
  **kwargs,
1172
1621
  ) -> "_Image":
1173
1622
  """Build a Modal image from a private image in Google Cloud Platform (GCP) Artifact Registry.
1174
1623
 
1175
1624
  You will need to pass a `modal.Secret` containing [your GCP service account key data](https://cloud.google.com/iam/docs/keys-create-delete#creating)
1176
- as `SERVICE_ACCOUNT_JSON`. This can be done from the [Secrets](/secrets) page. Your service account should be granted a specific
1177
- role depending on the GCP registry used:
1625
+ as `SERVICE_ACCOUNT_JSON`. This can be done from the [Secrets](/secrets) page.
1626
+ Your service account should be granted a specific role depending on the GCP registry used:
1178
1627
 
1179
- - For Artifact Registry images (`pkg.dev` domains) use the ["Artifact Registry Reader"](https://cloud.google.com/artifact-registry/docs/access-control#roles) role
1180
- - For Container Registry images (`gcr.io` domains) use the ["Storage Object Viewer"](https://cloud.google.com/artifact-registry/docs/transition/setup-gcr-repo#permissions) role
1628
+ - For Artifact Registry images (`pkg.dev` domains) use
1629
+ the ["Artifact Registry Reader"](https://cloud.google.com/artifact-registry/docs/access-control#roles) role
1630
+ - For Container Registry images (`gcr.io` domains) use
1631
+ the ["Storage Object Viewer"](https://cloud.google.com/artifact-registry/docs/transition/setup-gcr-repo) role
1181
1632
 
1182
- **Note:** This method does not use `GOOGLE_APPLICATION_CREDENTIALS` as that variable accepts a path to a JSON file, not the actual JSON string.
1633
+ **Note:** This method does not use `GOOGLE_APPLICATION_CREDENTIALS` as that
1634
+ variable accepts a path to a JSON file, not the actual JSON string.
1183
1635
 
1184
1636
  See `Image.from_registry()` for information about the other parameters.
1185
1637
 
@@ -1188,7 +1640,10 @@ class _Image(_Object, type_prefix="im"):
1188
1640
  ```python
1189
1641
  modal.Image.from_gcp_artifact_registry(
1190
1642
  "us-east1-docker.pkg.dev/my-project-1234/my-repo/my-image:my-version",
1191
- secret=modal.Secret.from_name("my-gcp-secret"),
1643
+ secret=modal.Secret.from_name(
1644
+ "my-gcp-secret",
1645
+ required_keys=["SERVICE_ACCOUNT_JSON"],
1646
+ ),
1192
1647
  add_python="3.11",
1193
1648
  )
1194
1649
  ```
@@ -1210,15 +1665,15 @@ class _Image(_Object, type_prefix="im"):
1210
1665
  tag: str,
1211
1666
  secret: Optional[_Secret] = None,
1212
1667
  *,
1213
- setup_dockerfile_commands: List[str] = [],
1214
- force_build: bool = False,
1668
+ setup_dockerfile_commands: list[str] = [],
1669
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1215
1670
  add_python: Optional[str] = None,
1216
1671
  **kwargs,
1217
1672
  ) -> "_Image":
1218
1673
  """Build a Modal image from a private image in AWS Elastic Container Registry (ECR).
1219
1674
 
1220
- You will need to pass a `modal.Secret` containing an AWS key (`AWS_ACCESS_KEY_ID`) and
1221
- secret (`AWS_SECRET_ACCESS_KEY`) with permissions to access the target ECR registry.
1675
+ You will need to pass a `modal.Secret` containing `AWS_ACCESS_KEY_ID`,
1676
+ `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION` to access the target ECR registry.
1222
1677
 
1223
1678
  IAM configuration details can be found in the AWS documentation for
1224
1679
  ["Private repository policies"](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html).
@@ -1230,7 +1685,10 @@ class _Image(_Object, type_prefix="im"):
1230
1685
  ```python
1231
1686
  modal.Image.from_aws_ecr(
1232
1687
  "000000000000.dkr.ecr.us-east-1.amazonaws.com/my-private-registry:my-version",
1233
- secret=modal.Secret.from_name("aws"),
1688
+ secret=modal.Secret.from_name(
1689
+ "aws",
1690
+ required_keys=["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"],
1691
+ ),
1234
1692
  add_python="3.11",
1235
1693
  )
1236
1694
  ```
@@ -1249,28 +1707,75 @@ class _Image(_Object, type_prefix="im"):
1249
1707
 
1250
1708
  @staticmethod
1251
1709
  def from_dockerfile(
1710
+ # Filepath to Dockerfile.
1252
1711
  path: Union[str, Path],
1253
- context_mount: Optional[
1254
- _Mount
1255
- ] = None, # modal.Mount with local files to supply as build context for COPY commands
1712
+ # modal.Mount with local files to supply as build context for COPY commands.
1713
+ # NOTE: The remote_path of the Mount should match the Dockerfile's WORKDIR.
1714
+ context_mount: Optional[_Mount] = None,
1715
+ # Ignore cached builds, similar to 'docker build --no-cache'
1256
1716
  force_build: bool = False,
1257
1717
  *,
1258
1718
  secrets: Sequence[_Secret] = [],
1259
1719
  gpu: GPU_T = None,
1260
1720
  add_python: Optional[str] = None,
1721
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
1261
1722
  ) -> "_Image":
1262
1723
  """Build a Modal image from a local Dockerfile.
1263
1724
 
1264
1725
  If your Dockerfile does not have Python installed, you can use the `add_python` parameter
1265
- to specify a version of Python to add to the image. Supported versions are `3.8`, `3.9`,
1266
- `3.10`, `3.11`, and `3.12`.
1726
+ to specify a version of Python to add to the image.
1267
1727
 
1268
- **Example**
1728
+ **Usage:**
1269
1729
 
1270
1730
  ```python
1271
- image = modal.Image.from_dockerfile("./Dockerfile", add_python="3.12")
1731
+ from modal import FilePatternMatcher
1732
+
1733
+ # By default a .dockerignore file is used if present in the current working directory
1734
+ image = modal.Image.from_dockerfile(
1735
+ "./Dockerfile",
1736
+ add_python="3.12",
1737
+ )
1738
+
1739
+ image = modal.Image.from_dockerfile(
1740
+ "./Dockerfile",
1741
+ add_python="3.12",
1742
+ ignore=["*.venv"],
1743
+ )
1744
+
1745
+ image = modal.Image.from_dockerfile(
1746
+ "./Dockerfile",
1747
+ add_python="3.12",
1748
+ ignore=lambda p: p.is_relative_to(".venv"),
1749
+ )
1750
+
1751
+ image = modal.Image.from_dockerfile(
1752
+ "./Dockerfile",
1753
+ add_python="3.12",
1754
+ ignore=FilePatternMatcher("**/*.txt"),
1755
+ )
1756
+
1757
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
1758
+ image = modal.Image.from_dockerfile(
1759
+ "./Dockerfile",
1760
+ add_python="3.12",
1761
+ ignore=~FilePatternMatcher("**/*.py"),
1762
+ )
1763
+
1764
+ # You can also read ignore patterns from a file.
1765
+ image = modal.Image.from_dockerfile(
1766
+ "./Dockerfile",
1767
+ add_python="3.12",
1768
+ ignore=FilePatternMatcher.from_file("/path/to/dockerignore"),
1769
+ )
1272
1770
  ```
1273
1771
  """
1772
+ if context_mount is not None:
1773
+ deprecation_warning(
1774
+ (2025, 1, 13),
1775
+ "`context_mount` is deprecated."
1776
+ + " Files are now automatically added to the build context based on the commands in the Dockerfile.",
1777
+ pending=True,
1778
+ )
1274
1779
 
1275
1780
  # --- Build the base dockerfile
1276
1781
 
@@ -1282,7 +1787,9 @@ class _Image(_Object, type_prefix="im"):
1282
1787
  gpu_config = parse_gpu_config(gpu)
1283
1788
  base_image = _Image._from_args(
1284
1789
  dockerfile_function=build_dockerfile_base,
1285
- context_mount=context_mount,
1790
+ context_mount_function=_create_context_mount_function(
1791
+ ignore=ignore, dockerfile_path=Path(path), context_mount=context_mount
1792
+ ),
1286
1793
  gpu_config=gpu_config,
1287
1794
  secrets=secrets,
1288
1795
  )
@@ -1291,13 +1798,15 @@ class _Image(_Object, type_prefix="im"):
1291
1798
  # This happening in two steps is probably a vestigial consequence of previous limitations,
1292
1799
  # but it will be difficult to merge them without forcing rebuilds of images.
1293
1800
 
1294
- if add_python:
1295
- context_mount = _Mount.from_name(
1296
- python_standalone_mount_name(add_python),
1297
- namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1801
+ def add_python_mount():
1802
+ return (
1803
+ _Mount.from_name(
1804
+ python_standalone_mount_name(add_python),
1805
+ namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1806
+ )
1807
+ if add_python
1808
+ else None
1298
1809
  )
1299
- else:
1300
- context_mount = None
1301
1810
 
1302
1811
  def build_dockerfile_python(version: ImageBuilderVersion) -> DockerfileSpec:
1303
1812
  commands = _Image._registry_setup_commands("base", version, [], add_python)
@@ -1308,26 +1817,28 @@ class _Image(_Object, type_prefix="im"):
1308
1817
  return _Image._from_args(
1309
1818
  base_images={"base": base_image},
1310
1819
  dockerfile_function=build_dockerfile_python,
1311
- context_mount=context_mount,
1820
+ context_mount_function=add_python_mount,
1312
1821
  force_build=force_build,
1313
1822
  )
1314
1823
 
1315
1824
  @staticmethod
1316
1825
  def debian_slim(python_version: Optional[str] = None, force_build: bool = False) -> "_Image":
1317
1826
  """Default image, based on the official `python` Docker images."""
1827
+ if isinstance(python_version, float):
1828
+ raise TypeError("The `python_version` argument should be a string, not a float.")
1318
1829
 
1319
1830
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1320
1831
  requirements_path = _get_modal_requirements_path(version, python_version)
1321
1832
  context_files = {CONTAINER_REQUIREMENTS_PATH: requirements_path}
1322
1833
  full_python_version = _dockerhub_python_version(version, python_version)
1323
- debian_codename = _dockerhub_debian_codename(version)
1834
+ debian_codename = _base_image_config("debian", version)
1324
1835
 
1325
1836
  commands = [
1326
1837
  f"FROM python:{full_python_version}-slim-{debian_codename}",
1327
1838
  f"COPY {CONTAINER_REQUIREMENTS_PATH} {CONTAINER_REQUIREMENTS_PATH}",
1328
1839
  "RUN apt-get update",
1329
1840
  "RUN apt-get install -y gcc gfortran build-essential",
1330
- f"RUN pip install --upgrade {'pip' if version == '2023.12' else 'pip wheel'}",
1841
+ f"RUN pip install --upgrade {_base_image_config('package_tools', version)}",
1331
1842
  f"RUN {_get_modal_requirements_command(version)}",
1332
1843
  # Set debian front-end to non-interactive to avoid users getting stuck with input prompts.
1333
1844
  "RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections",
@@ -1344,8 +1855,8 @@ class _Image(_Object, type_prefix="im"):
1344
1855
 
1345
1856
  def apt_install(
1346
1857
  self,
1347
- *packages: Union[str, List[str]], # A list of packages, e.g. ["ssh", "libpq-dev"]
1348
- force_build: bool = False,
1858
+ *packages: Union[str, list[str]], # A list of packages, e.g. ["ssh", "libpq-dev"]
1859
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1349
1860
  secrets: Sequence[_Secret] = [],
1350
1861
  gpu: GPU_T = None,
1351
1862
  ) -> "_Image":
@@ -1361,7 +1872,7 @@ class _Image(_Object, type_prefix="im"):
1361
1872
  if not pkgs:
1362
1873
  return self
1363
1874
 
1364
- package_args = " ".join(shlex.quote(pkg) for pkg in pkgs)
1875
+ package_args = shlex.join(pkgs)
1365
1876
 
1366
1877
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1367
1878
  commands = [
@@ -1381,28 +1892,33 @@ class _Image(_Object, type_prefix="im"):
1381
1892
 
1382
1893
  def run_function(
1383
1894
  self,
1384
- raw_f: Callable,
1895
+ raw_f: Callable[..., Any],
1385
1896
  secrets: Sequence[_Secret] = (), # Optional Modal Secret objects with environment variables for the container
1386
- gpu: GPU_T = None, # GPU specification as string ("any", "T4", "A10G", ...) or object (`modal.GPU.A100()`, ...)
1387
- mounts: Sequence[_Mount] = (),
1388
- shared_volumes: Dict[Union[str, PurePosixPath], _NetworkFileSystem] = {},
1389
- network_file_systems: Dict[Union[str, PurePosixPath], _NetworkFileSystem] = {},
1897
+ gpu: Union[
1898
+ GPU_T, list[GPU_T]
1899
+ ] = None, # GPU request as string ("any", "T4", ...), object (`modal.GPU.A100()`, ...), or a list of either
1900
+ mounts: Sequence[_Mount] = (), # Mounts attached to the function
1901
+ volumes: dict[Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]] = {}, # Volume mount paths
1902
+ network_file_systems: dict[Union[str, PurePosixPath], _NetworkFileSystem] = {}, # NFS mount paths
1390
1903
  cpu: Optional[float] = None, # How many CPU cores to request. This is a soft limit.
1391
1904
  memory: Optional[int] = None, # How much memory to request, in MiB. This is a soft limit.
1392
- timeout: Optional[int] = 86400, # Maximum execution time of the function in seconds.
1393
- force_build: bool = False,
1394
- secret: Optional[_Secret] = None, # Deprecated: use `secrets`.
1905
+ timeout: Optional[int] = 60 * 60, # Maximum execution time of the function in seconds.
1906
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1907
+ cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
1908
+ region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
1395
1909
  args: Sequence[Any] = (), # Positional arguments to the function.
1396
- kwargs: Dict[str, Any] = {}, # Keyword arguments to the function.
1910
+ kwargs: dict[str, Any] = {}, # Keyword arguments to the function.
1397
1911
  ) -> "_Image":
1398
1912
  """Run user-defined function `raw_f` as an image build step. The function runs just like an ordinary Modal
1399
- function, and any kwargs accepted by `@app.function` (such as `Mount`s, `NetworkFileSystem`s, and resource requests) can
1400
- be supplied to it. After it finishes execution, a snapshot of the resulting container file system is saved as an image.
1913
+ function, and any kwargs accepted by `@app.function` (such as `Mount`s, `NetworkFileSystem`s,
1914
+ and resource requests) can be supplied to it.
1915
+ After it finishes execution, a snapshot of the resulting container file system is saved as an image.
1401
1916
 
1402
1917
  **Note**
1403
1918
 
1404
- Only the source code of `raw_f`, the contents of `**kwargs`, and any referenced *global* variables are used to determine whether the image has changed
1405
- and needs to be rebuilt. If this function references other functions or variables, the image will not be rebuilt if you
1919
+ Only the source code of `raw_f`, the contents of `**kwargs`, and any referenced *global* variables
1920
+ are used to determine whether the image has changed and needs to be rebuilt.
1921
+ If this function references other functions or variables, the image will not be rebuilt if you
1406
1922
  make changes to them. You can force a rebuild by changing the function's source code itself.
1407
1923
 
1408
1924
  **Example**
@@ -1422,23 +1938,27 @@ class _Image(_Object, type_prefix="im"):
1422
1938
  """
1423
1939
  from .functions import _Function
1424
1940
 
1425
- info = FunctionInfo(raw_f)
1941
+ if not callable(raw_f):
1942
+ raise InvalidError(f"Argument to Image.run_function must be a function, not {type(raw_f).__name__}.")
1943
+ elif raw_f.__name__ == "<lambda>":
1944
+ # It may be possible to support lambdas eventually, but for now we don't handle them well, so reject quickly
1945
+ raise InvalidError("Image.run_function does not support lambda functions.")
1426
1946
 
1427
- if shared_volumes or network_file_systems:
1428
- warnings.warn(
1429
- "Mounting NetworkFileSystems or Volumes is usually not advised with `run_function`."
1430
- " If you are trying to download model weights, downloading it to the image itself is recommended and sufficient."
1431
- )
1947
+ scheduler_placement = SchedulerPlacement(region=region) if region else None
1948
+
1949
+ info = FunctionInfo(raw_f)
1432
1950
 
1433
1951
  function = _Function.from_args(
1434
1952
  info,
1435
1953
  app=None,
1436
- image=self,
1437
- secret=secret,
1954
+ image=self, # type: ignore[reportArgumentType] # TODO: probably conflict with type stub?
1438
1955
  secrets=secrets,
1439
1956
  gpu=gpu,
1440
1957
  mounts=mounts,
1958
+ volumes=volumes,
1441
1959
  network_file_systems=network_file_systems,
1960
+ cloud=cloud,
1961
+ scheduler_placement=scheduler_placement,
1442
1962
  memory=memory,
1443
1963
  timeout=timeout,
1444
1964
  cpu=cpu,
@@ -1461,17 +1981,15 @@ class _Image(_Object, type_prefix="im"):
1461
1981
  force_build=self.force_build or force_build,
1462
1982
  )
1463
1983
 
1464
- def env(self, vars: Dict[str, str]) -> "_Image":
1465
- """Sets the environmental variables of the image.
1984
+ def env(self, vars: dict[str, str]) -> "_Image":
1985
+ """Sets the environment variables in an Image.
1466
1986
 
1467
1987
  **Example**
1468
1988
 
1469
1989
  ```python
1470
1990
  image = (
1471
- modal.Image.conda()
1472
- .env({"CONDA_OVERRIDE_CUDA": "11.2"})
1473
- .conda_install("jax", "cuda-nvcc", channels=["conda-forge", "nvidia"])
1474
- .pip_install("dm-haiku", "optax")
1991
+ modal.Image.debian_slim()
1992
+ .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
1475
1993
  )
1476
1994
  ```
1477
1995
  """
@@ -1485,7 +2003,7 @@ class _Image(_Object, type_prefix="im"):
1485
2003
  dockerfile_function=build_dockerfile,
1486
2004
  )
1487
2005
 
1488
- def workdir(self, path: str) -> "_Image":
2006
+ def workdir(self, path: Union[str, PurePosixPath]) -> "_Image":
1489
2007
  """Set the working directory for subsequent image build steps and function execution.
1490
2008
 
1491
2009
  **Example**
@@ -1501,7 +2019,7 @@ class _Image(_Object, type_prefix="im"):
1501
2019
  """
1502
2020
 
1503
2021
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1504
- commands = ["FROM base", f"WORKDIR {shlex.quote(path)}"]
2022
+ commands = ["FROM base", f"WORKDIR {shlex.quote(str(path))}"]
1505
2023
  return DockerfileSpec(commands=commands, context_files={})
1506
2024
 
1507
2025
  return _Image._from_args(
@@ -1539,17 +2057,25 @@ class _Image(_Object, type_prefix="im"):
1539
2057
  if not isinstance(exc, ImportError):
1540
2058
  warnings.warn(f"Warning: caught a non-ImportError exception in an `imports()` block: {repr(exc)}")
1541
2059
 
1542
- def run_inside(self):
1543
- """`Image.run_inside` is deprecated - use `Image.imports` instead.
1544
-
1545
- **Usage:**
2060
+ @live_method_gen
2061
+ async def _logs(self) -> typing.AsyncGenerator[str, None]:
2062
+ """Streams logs from an image, or returns logs from an already completed image.
1546
2063
 
1547
- ```python notest
1548
- with image.imports():
1549
- import torch
1550
- ```
2064
+ This method is considered private since its interface may change - use it at your own risk!
1551
2065
  """
1552
- deprecation_error((2023, 12, 15), Image.run_inside.__doc__)
2066
+ last_entry_id: str = ""
2067
+
2068
+ request = api_pb2.ImageJoinStreamingRequest(
2069
+ image_id=self._object_id, timeout=55, last_entry_id=last_entry_id, include_logs_for_finished=True
2070
+ )
2071
+ async for response in self._client.stub.ImageJoinStreaming.unary_stream(request):
2072
+ if response.result.status:
2073
+ return
2074
+ if response.entry_id:
2075
+ last_entry_id = response.entry_id
2076
+ for task_log in response.task_logs:
2077
+ if task_log.data:
2078
+ yield task_log.data
1553
2079
 
1554
2080
 
1555
2081
  Image = synchronize_api(_Image)