modal 0.62.16__py3-none-any.whl → 0.72.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. modal/__init__.py +17 -13
  2. modal/__main__.py +41 -3
  3. modal/_clustered_functions.py +80 -0
  4. modal/_clustered_functions.pyi +22 -0
  5. modal/_container_entrypoint.py +420 -937
  6. modal/_ipython.py +3 -13
  7. modal/_location.py +17 -10
  8. modal/_output.py +243 -99
  9. modal/_pty.py +2 -2
  10. modal/_resolver.py +55 -59
  11. modal/_resources.py +51 -0
  12. modal/_runtime/__init__.py +1 -0
  13. modal/_runtime/asgi.py +519 -0
  14. modal/_runtime/container_io_manager.py +1036 -0
  15. modal/_runtime/execution_context.py +89 -0
  16. modal/_runtime/telemetry.py +169 -0
  17. modal/_runtime/user_code_imports.py +356 -0
  18. modal/_serialization.py +134 -9
  19. modal/_traceback.py +47 -187
  20. modal/_tunnel.py +52 -16
  21. modal/_tunnel.pyi +19 -36
  22. modal/_utils/app_utils.py +3 -17
  23. modal/_utils/async_utils.py +479 -100
  24. modal/_utils/blob_utils.py +157 -186
  25. modal/_utils/bytes_io_segment_payload.py +97 -0
  26. modal/_utils/deprecation.py +89 -0
  27. modal/_utils/docker_utils.py +98 -0
  28. modal/_utils/function_utils.py +460 -171
  29. modal/_utils/grpc_testing.py +47 -31
  30. modal/_utils/grpc_utils.py +62 -109
  31. modal/_utils/hash_utils.py +61 -19
  32. modal/_utils/http_utils.py +39 -9
  33. modal/_utils/logger.py +2 -1
  34. modal/_utils/mount_utils.py +34 -16
  35. modal/_utils/name_utils.py +58 -0
  36. modal/_utils/package_utils.py +14 -1
  37. modal/_utils/pattern_utils.py +205 -0
  38. modal/_utils/rand_pb_testing.py +5 -7
  39. modal/_utils/shell_utils.py +15 -49
  40. modal/_vendor/a2wsgi_wsgi.py +62 -72
  41. modal/_vendor/cloudpickle.py +1 -1
  42. modal/_watcher.py +14 -12
  43. modal/app.py +1003 -314
  44. modal/app.pyi +540 -264
  45. modal/call_graph.py +7 -6
  46. modal/cli/_download.py +63 -53
  47. modal/cli/_traceback.py +200 -0
  48. modal/cli/app.py +205 -45
  49. modal/cli/config.py +12 -5
  50. modal/cli/container.py +62 -14
  51. modal/cli/dict.py +128 -0
  52. modal/cli/entry_point.py +26 -13
  53. modal/cli/environment.py +40 -9
  54. modal/cli/import_refs.py +64 -58
  55. modal/cli/launch.py +32 -18
  56. modal/cli/network_file_system.py +64 -83
  57. modal/cli/profile.py +1 -1
  58. modal/cli/programs/run_jupyter.py +35 -10
  59. modal/cli/programs/vscode.py +60 -10
  60. modal/cli/queues.py +131 -0
  61. modal/cli/run.py +234 -131
  62. modal/cli/secret.py +8 -7
  63. modal/cli/token.py +7 -2
  64. modal/cli/utils.py +79 -10
  65. modal/cli/volume.py +110 -109
  66. modal/client.py +250 -144
  67. modal/client.pyi +157 -118
  68. modal/cloud_bucket_mount.py +108 -34
  69. modal/cloud_bucket_mount.pyi +32 -38
  70. modal/cls.py +535 -148
  71. modal/cls.pyi +190 -146
  72. modal/config.py +41 -19
  73. modal/container_process.py +177 -0
  74. modal/container_process.pyi +82 -0
  75. modal/dict.py +111 -65
  76. modal/dict.pyi +136 -131
  77. modal/environments.py +106 -5
  78. modal/environments.pyi +77 -25
  79. modal/exception.py +34 -43
  80. modal/experimental.py +61 -2
  81. modal/extensions/ipython.py +5 -5
  82. modal/file_io.py +537 -0
  83. modal/file_io.pyi +235 -0
  84. modal/file_pattern_matcher.py +197 -0
  85. modal/functions.py +906 -911
  86. modal/functions.pyi +466 -430
  87. modal/gpu.py +57 -44
  88. modal/image.py +1089 -479
  89. modal/image.pyi +584 -228
  90. modal/io_streams.py +434 -0
  91. modal/io_streams.pyi +122 -0
  92. modal/mount.py +314 -101
  93. modal/mount.pyi +241 -235
  94. modal/network_file_system.py +92 -92
  95. modal/network_file_system.pyi +152 -110
  96. modal/object.py +67 -36
  97. modal/object.pyi +166 -143
  98. modal/output.py +63 -0
  99. modal/parallel_map.py +434 -0
  100. modal/parallel_map.pyi +75 -0
  101. modal/partial_function.py +282 -117
  102. modal/partial_function.pyi +222 -129
  103. modal/proxy.py +15 -12
  104. modal/proxy.pyi +3 -8
  105. modal/queue.py +182 -65
  106. modal/queue.pyi +218 -118
  107. modal/requirements/2024.04.txt +29 -0
  108. modal/requirements/2024.10.txt +16 -0
  109. modal/requirements/README.md +21 -0
  110. modal/requirements/base-images.json +22 -0
  111. modal/retries.py +48 -7
  112. modal/runner.py +459 -156
  113. modal/runner.pyi +135 -71
  114. modal/running_app.py +38 -0
  115. modal/sandbox.py +514 -236
  116. modal/sandbox.pyi +397 -169
  117. modal/schedule.py +4 -4
  118. modal/scheduler_placement.py +20 -3
  119. modal/secret.py +56 -31
  120. modal/secret.pyi +62 -42
  121. modal/serving.py +51 -56
  122. modal/serving.pyi +44 -36
  123. modal/stream_type.py +15 -0
  124. modal/token_flow.py +5 -3
  125. modal/token_flow.pyi +37 -32
  126. modal/volume.py +285 -157
  127. modal/volume.pyi +249 -184
  128. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/METADATA +7 -7
  129. modal-0.72.11.dist-info/RECORD +174 -0
  130. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/top_level.txt +0 -1
  131. modal_docs/gen_reference_docs.py +3 -1
  132. modal_docs/mdmd/mdmd.py +0 -1
  133. modal_docs/mdmd/signatures.py +5 -2
  134. modal_global_objects/images/base_images.py +28 -0
  135. modal_global_objects/mounts/python_standalone.py +2 -2
  136. modal_proto/__init__.py +1 -1
  137. modal_proto/api.proto +1288 -533
  138. modal_proto/api_grpc.py +856 -456
  139. modal_proto/api_pb2.py +2165 -1157
  140. modal_proto/api_pb2.pyi +8859 -0
  141. modal_proto/api_pb2_grpc.py +1674 -855
  142. modal_proto/api_pb2_grpc.pyi +1416 -0
  143. modal_proto/modal_api_grpc.py +149 -0
  144. modal_proto/modal_options_grpc.py +3 -0
  145. modal_proto/options_pb2.pyi +20 -0
  146. modal_proto/options_pb2_grpc.pyi +7 -0
  147. modal_proto/py.typed +0 -0
  148. modal_version/__init__.py +1 -1
  149. modal_version/_version_generated.py +2 -2
  150. modal/_asgi.py +0 -370
  151. modal/_container_entrypoint.pyi +0 -378
  152. modal/_container_exec.py +0 -128
  153. modal/_sandbox_shell.py +0 -49
  154. modal/shared_volume.py +0 -23
  155. modal/shared_volume.pyi +0 -24
  156. modal/stub.py +0 -783
  157. modal/stub.pyi +0 -332
  158. modal-0.62.16.dist-info/RECORD +0 -198
  159. modal_global_objects/images/conda.py +0 -15
  160. modal_global_objects/images/debian_slim.py +0 -15
  161. modal_global_objects/images/micromamba.py +0 -15
  162. test/__init__.py +0 -1
  163. test/aio_test.py +0 -12
  164. test/async_utils_test.py +0 -262
  165. test/blob_test.py +0 -67
  166. test/cli_imports_test.py +0 -149
  167. test/cli_test.py +0 -659
  168. test/client_test.py +0 -194
  169. test/cls_test.py +0 -630
  170. test/config_test.py +0 -137
  171. test/conftest.py +0 -1420
  172. test/container_app_test.py +0 -32
  173. test/container_test.py +0 -1389
  174. test/cpu_test.py +0 -23
  175. test/decorator_test.py +0 -85
  176. test/deprecation_test.py +0 -34
  177. test/dict_test.py +0 -33
  178. test/e2e_test.py +0 -68
  179. test/error_test.py +0 -7
  180. test/function_serialization_test.py +0 -32
  181. test/function_test.py +0 -653
  182. test/function_utils_test.py +0 -101
  183. test/gpu_test.py +0 -159
  184. test/grpc_utils_test.py +0 -141
  185. test/helpers.py +0 -42
  186. test/image_test.py +0 -669
  187. test/live_reload_test.py +0 -80
  188. test/lookup_test.py +0 -70
  189. test/mdmd_test.py +0 -329
  190. test/mount_test.py +0 -162
  191. test/mounted_files_test.py +0 -329
  192. test/network_file_system_test.py +0 -181
  193. test/notebook_test.py +0 -66
  194. test/object_test.py +0 -41
  195. test/package_utils_test.py +0 -25
  196. test/queue_test.py +0 -97
  197. test/resolver_test.py +0 -58
  198. test/retries_test.py +0 -67
  199. test/runner_test.py +0 -85
  200. test/sandbox_test.py +0 -191
  201. test/schedule_test.py +0 -15
  202. test/scheduler_placement_test.py +0 -29
  203. test/secret_test.py +0 -78
  204. test/serialization_test.py +0 -42
  205. test/stub_composition_test.py +0 -10
  206. test/stub_test.py +0 -360
  207. test/test_asgi_wrapper.py +0 -234
  208. test/token_flow_test.py +0 -18
  209. test/traceback_test.py +0 -135
  210. test/tunnel_test.py +0 -29
  211. test/utils_test.py +0 -88
  212. test/version_test.py +0 -14
  213. test/volume_test.py +0 -341
  214. test/watcher_test.py +0 -30
  215. test/webhook_test.py +0 -146
  216. /modal/{requirements.312.txt → requirements/2023.12.312.txt} +0 -0
  217. /modal/{requirements.txt → requirements/2023.12.txt} +0 -0
  218. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/LICENSE +0 -0
  219. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/WHEEL +0 -0
  220. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/entry_points.txt +0 -0
modal/image.py CHANGED
@@ -1,14 +1,25 @@
1
1
  # Copyright Modal Labs 2022
2
2
  import contextlib
3
+ import json
3
4
  import os
5
+ import re
4
6
  import shlex
5
7
  import sys
6
8
  import typing
7
9
  import warnings
10
+ from collections.abc import Sequence
8
11
  from dataclasses import dataclass
9
12
  from inspect import isfunction
10
13
  from pathlib import Path, PurePosixPath
11
- from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
14
+ from typing import (
15
+ Any,
16
+ Callable,
17
+ Literal,
18
+ Optional,
19
+ Union,
20
+ cast,
21
+ get_args,
22
+ )
12
23
 
13
24
  from google.protobuf.message import Message
14
25
  from grpclib.exceptions import GRPCError, StreamTerminatedError
@@ -19,78 +30,151 @@ from ._resolver import Resolver
19
30
  from ._serialization import serialize
20
31
  from ._utils.async_utils import synchronize_api
21
32
  from ._utils.blob_utils import MAX_OBJECT_SIZE_BYTES
33
+ from ._utils.deprecation import deprecation_error, deprecation_warning
34
+ from ._utils.docker_utils import (
35
+ extract_copy_command_patterns,
36
+ find_dockerignore_file,
37
+ )
22
38
  from ._utils.function_utils import FunctionInfo
23
- from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, retry_transient_errors, unary_stream
24
- from .config import config, logger
25
- from .exception import InvalidError, NotFoundError, RemoteError, deprecation_warning
39
+ from ._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, retry_transient_errors
40
+ from .client import _Client
41
+ from .cloud_bucket_mount import _CloudBucketMount
42
+ from .config import config, logger, user_config_path
43
+ from .environments import _get_environment_cached
44
+ from .exception import InvalidError, NotFoundError, RemoteError, VersionError
45
+ from .file_pattern_matcher import NON_PYTHON_FILES, FilePatternMatcher, _ignore_fn
26
46
  from .gpu import GPU_T, parse_gpu_config
27
47
  from .mount import _Mount, python_standalone_mount_name
28
48
  from .network_file_system import _NetworkFileSystem
29
- from .object import _Object
49
+ from .object import _Object, live_method_gen
50
+ from .output import _get_output_manager
51
+ from .scheduler_placement import SchedulerPlacement
30
52
  from .secret import _Secret
53
+ from .volume import _Volume
31
54
 
55
+ if typing.TYPE_CHECKING:
56
+ import modal.functions
32
57
 
33
- def _validate_python_version(version: str) -> None:
34
- components = version.split(".")
35
- supported_versions = {"3.12", "3.11", "3.10", "3.9", "3.8"}
36
- if len(components) == 2 and version in supported_versions:
37
- return
38
- elif len(components) == 3:
39
- raise InvalidError(
40
- f"major.minor.patch version specification not valid. Supported major.minor versions are {supported_versions}."
41
- )
42
- raise InvalidError(f"Unsupported version {version}. Supported versions are {supported_versions}.")
58
+ # This is used for both type checking and runtime validation
59
+ ImageBuilderVersion = Literal["2023.12", "2024.04", "2024.10"]
43
60
 
61
+ # Note: we also define supported Python versions via logic at the top of the package __init__.py
62
+ # so that we fail fast / clearly in unsupported containers. Additionally, we enumerate the supported
63
+ # Python versions in mount.py where we specify the "standalone Python versions" we create mounts for.
64
+ # Consider consolidating these multiple sources of truth?
65
+ SUPPORTED_PYTHON_SERIES: dict[ImageBuilderVersion, list[str]] = {
66
+ "2024.10": ["3.9", "3.10", "3.11", "3.12", "3.13"],
67
+ "2024.04": ["3.9", "3.10", "3.11", "3.12"],
68
+ "2023.12": ["3.9", "3.10", "3.11", "3.12"],
69
+ }
44
70
 
45
- def _dockerhub_python_version(python_version=None):
46
- if python_version is None:
47
- python_version = "%d.%d" % sys.version_info[:2]
71
+ LOCAL_REQUIREMENTS_DIR = Path(__file__).parent / "requirements"
72
+ CONTAINER_REQUIREMENTS_PATH = "/modal_requirements.txt"
48
73
 
49
- parts = python_version.split(".")
50
74
 
51
- if len(parts) > 2:
52
- return python_version
75
+ class _AutoDockerIgnoreSentinel:
76
+ def __repr__(self) -> str:
77
+ return f"{__name__}.AUTO_DOCKERIGNORE"
53
78
 
54
- # We use the same major/minor version, but the highest micro version
55
- # See https://hub.docker.com/_/python
56
- latest_micro_version = {
57
- "3.12": "1",
58
- "3.11": "0",
59
- "3.10": "8",
60
- "3.9": "15",
61
- "3.8": "15",
62
- }
63
- major_minor_version = ".".join(parts[:2])
64
- python_version = major_minor_version + "." + latest_micro_version[major_minor_version]
65
- return python_version
79
+ def __call__(self, _: Path) -> bool:
80
+ raise NotImplementedError("This is only a placeholder. Do not call")
81
+
82
+
83
+ AUTO_DOCKERIGNORE = _AutoDockerIgnoreSentinel()
66
84
 
85
+ COPY_DEPRECATION_MESSAGE_PATTERN = """modal.Image.copy_* methods will soon be deprecated.
67
86
 
68
- def _get_client_requirements_path(python_version: Optional[str] = None) -> str:
69
- # Locate Modal client requirements.txt
70
- import modal
87
+ Use {replacement} instead, which is functionally and performance-wise equivalent.
88
+ """
71
89
 
72
- modal_path = modal.__path__[0]
90
+
91
+ def _validate_python_version(
92
+ python_version: Optional[str], builder_version: ImageBuilderVersion, allow_micro_granularity: bool = True
93
+ ) -> str:
73
94
  if python_version is None:
74
- major, minor, *_ = sys.version_info
95
+ # If Python version is unspecified, match the local version, up to the minor component
96
+ python_version = series_version = "{}.{}".format(*sys.version_info)
97
+ elif not isinstance(python_version, str):
98
+ raise InvalidError(f"Python version must be specified as a string, not {type(python_version).__name__}")
99
+ elif not re.match(r"^3(?:\.\d{1,2}){1,2}(rc\d*)?$", python_version):
100
+ raise InvalidError(f"Invalid Python version: {python_version!r}")
75
101
  else:
76
- major, minor = python_version.split("-")[0].split(".")[:2]
77
- suffix = {(3, 12): ".312"}.get((int(major), int(minor)), "")
78
- return os.path.join(modal_path, f"requirements{suffix}.txt")
102
+ components = python_version.split(".")
103
+ if len(components) == 3 and not allow_micro_granularity:
104
+ raise InvalidError(
105
+ "Python version must be specified as 'major.minor' for this interface;"
106
+ f" micro-level specification ({python_version!r}) is not valid."
107
+ )
108
+ series_version = "{}.{}".format(*components)
109
+
110
+ supported_series = SUPPORTED_PYTHON_SERIES[builder_version]
111
+ if series_version not in supported_series:
112
+ raise InvalidError(
113
+ f"Unsupported Python version: {python_version!r}."
114
+ f" When using the {builder_version!r} Image builder, Modal supports the following series:"
115
+ f" {supported_series!r}."
116
+ )
117
+ return python_version
118
+
119
+
120
+ def _dockerhub_python_version(builder_version: ImageBuilderVersion, python_version: Optional[str] = None) -> str:
121
+ python_version = _validate_python_version(python_version, builder_version)
122
+ version_components = python_version.split(".")
123
+
124
+ # When user specifies a full Python version, use that
125
+ if len(version_components) > 2:
126
+ return python_version
79
127
 
128
+ # Otherwise, use the same series, but a specific micro version, corresponding to the latest
129
+ # available from https://hub.docker.com/_/python at the time of each image builder release.
130
+ # This allows us to publish one pre-built debian-slim image per Python series.
131
+ python_versions = _base_image_config("python", builder_version)
132
+ series_to_micro_version = dict(tuple(v.rsplit(".", 1)) for v in python_versions)
133
+ python_series_requested = "{}.{}".format(*version_components)
134
+ micro_version = series_to_micro_version[python_series_requested]
135
+ return f"{python_series_requested}.{micro_version}"
80
136
 
81
- def _flatten_str_args(function_name: str, arg_name: str, args: Tuple[Union[str, List[str]], ...]) -> List[str]:
82
- """Takes a tuple of strings, or string lists, and flattens it.
137
+
138
+ def _base_image_config(group: str, builder_version: ImageBuilderVersion) -> Any:
139
+ with open(LOCAL_REQUIREMENTS_DIR / "base-images.json") as f:
140
+ data = json.load(f)
141
+ return data[group][builder_version]
142
+
143
+
144
+ def _get_modal_requirements_path(builder_version: ImageBuilderVersion, python_version: Optional[str] = None) -> str:
145
+ # When we added Python 3.12 support, we needed to update a few dependencies but did not yet
146
+ # support versioned builds, so we put them in a separate 3.12-specific requirements file.
147
+ # When the python_version is not specified in the Image API, we fall back to the local version.
148
+ # Note that this is buggy if you're using a registry or dockerfile Image that (implicitly) contains 3.12
149
+ # and have a different local version. We can't really fix that; but users can update their image builder.
150
+ # We can get rid of this complexity entirely when we drop support for 2023.12.
151
+ python_version = python_version or sys.version
152
+ suffix = ".312" if builder_version == "2023.12" and python_version.startswith("3.12") else ""
153
+
154
+ return str(LOCAL_REQUIREMENTS_DIR / f"{builder_version}{suffix}.txt")
155
+
156
+
157
+ def _get_modal_requirements_command(version: ImageBuilderVersion) -> str:
158
+ if version == "2023.12":
159
+ prefix = "pip install"
160
+ elif version == "2024.04":
161
+ prefix = "pip install --no-cache --no-deps"
162
+ else: # Currently, 2024.10+
163
+ prefix = "uv pip install --system --compile-bytecode --no-cache --no-deps"
164
+
165
+ return f"{prefix} -r {CONTAINER_REQUIREMENTS_PATH}"
166
+
167
+
168
+ def _flatten_str_args(function_name: str, arg_name: str, args: Sequence[Union[str, list[str]]]) -> list[str]:
169
+ """Takes a sequence of strings, or string lists, and flattens it.
83
170
 
84
171
  Raises an error if any of the elements are not strings or string lists.
85
172
  """
86
- # TODO(erikbern): maybe we can just build somthing intelligent that checks
87
- # based on type annotations in real time?
88
- # Or use something like this? https://github.com/FelixTheC/strongtyping
89
173
 
90
174
  def is_str_list(x):
91
175
  return isinstance(x, list) and all(isinstance(y, str) for y in x)
92
176
 
93
- ret: List[str] = []
177
+ ret: list[str] = []
94
178
  for x in args:
95
179
  if isinstance(x, str):
96
180
  ret.append(x)
@@ -101,11 +185,29 @@ def _flatten_str_args(function_name: str, arg_name: str, args: Tuple[Union[str,
101
185
  return ret
102
186
 
103
187
 
188
+ def _validate_packages(packages: list[str]) -> bool:
189
+ """Validates that a list of packages does not contain any command-line options."""
190
+ return not any(pkg.startswith("-") for pkg in packages)
191
+
192
+
193
+ def _warn_invalid_packages(old_command: str) -> None:
194
+ deprecation_warning(
195
+ (2024, 7, 3),
196
+ "Passing flags to `pip` via the `packages` argument of `pip_install` is deprecated."
197
+ " Please pass flags via the `extra_options` argument instead."
198
+ "\nNote that this will cause a rebuild of this image layer."
199
+ " To avoid rebuilding, you can pass the following to `run_commands` instead:"
200
+ f'\n`image.run_commands("{old_command}")`',
201
+ show_source=False,
202
+ )
203
+
204
+
104
205
  def _make_pip_install_args(
105
206
  find_links: Optional[str] = None, # Passes -f (--find-links) pip install
106
207
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
107
208
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
108
209
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
210
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
109
211
  ) -> str:
110
212
  flags = [
111
213
  ("--find-links", find_links), # TODO(erikbern): allow multiple?
@@ -113,20 +215,122 @@ def _make_pip_install_args(
113
215
  ("--extra-index-url", extra_index_url), # TODO(erikbern): allow multiple?
114
216
  ]
115
217
 
116
- args = " ".join(flag + " " + shlex.quote(value) for flag, value in flags if value is not None)
218
+ args = " ".join(f"{flag} {shlex.quote(value)}" for flag, value in flags if value is not None)
117
219
  if pre:
118
- args += " --pre"
220
+ args += " --pre" # TODO: remove extra whitespace in future image builder version
221
+
222
+ if extra_options:
223
+ if args:
224
+ args += " "
225
+ args += f"{extra_options}"
119
226
 
120
227
  return args
121
228
 
122
229
 
230
+ def _get_image_builder_version(server_version: ImageBuilderVersion) -> ImageBuilderVersion:
231
+ if local_config_version := config.get("image_builder_version"):
232
+ version = local_config_version
233
+ if (env_var := "MODAL_IMAGE_BUILDER_VERSION") in os.environ:
234
+ version_source = f" (based on your `{env_var}` environment variable)"
235
+ else:
236
+ version_source = f" (based on your local config file at `{user_config_path}`)"
237
+ else:
238
+ version_source = ""
239
+ version = server_version
240
+
241
+ supported_versions: set[ImageBuilderVersion] = set(get_args(ImageBuilderVersion))
242
+ if version not in supported_versions:
243
+ if local_config_version is not None:
244
+ update_suggestion = "or remove your local configuration"
245
+ elif version < min(supported_versions):
246
+ update_suggestion = "your image builder version using the Modal dashboard"
247
+ else:
248
+ update_suggestion = "your client library (pip install --upgrade modal)"
249
+ raise VersionError(
250
+ "This version of the modal client supports the following image builder versions:"
251
+ f" {supported_versions!r}."
252
+ f"\n\nYou are using {version!r}{version_source}."
253
+ f" Please update {update_suggestion}."
254
+ )
255
+
256
+ return version
257
+
258
+
259
+ def _create_context_mount(
260
+ docker_commands: Sequence[str],
261
+ ignore_fn: Callable[[Path], bool],
262
+ context_dir: Path,
263
+ ) -> Optional[_Mount]:
264
+ """
265
+ Creates a context mount from a list of docker commands.
266
+
267
+ 1. Paths are evaluated relative to context_dir.
268
+ 2. First selects inclusions based on COPY commands in the list of commands.
269
+ 3. Then ignore any files as per the ignore predicate.
270
+ """
271
+ copy_patterns = extract_copy_command_patterns(docker_commands)
272
+ if not copy_patterns:
273
+ return None # no mount needed
274
+ include_fn = FilePatternMatcher(*copy_patterns)
275
+
276
+ def ignore_with_include(source: Path) -> bool:
277
+ relative_source = source.relative_to(context_dir)
278
+ if not include_fn(relative_source) or ignore_fn(relative_source):
279
+ return True
280
+
281
+ return False
282
+
283
+ return _Mount._add_local_dir(Path("./"), PurePosixPath("/"), ignore=ignore_with_include)
284
+
285
+
286
+ def _create_context_mount_function(
287
+ ignore: Union[Sequence[str], Callable[[Path], bool]],
288
+ dockerfile_cmds: list[str] = [],
289
+ dockerfile_path: Optional[Path] = None,
290
+ context_mount: Optional[_Mount] = None,
291
+ ):
292
+ if dockerfile_path and dockerfile_cmds:
293
+ raise InvalidError("Cannot provide both dockerfile and docker commands")
294
+
295
+ if context_mount:
296
+ if ignore is not AUTO_DOCKERIGNORE:
297
+ raise InvalidError("Cannot set both `context_mount` and `ignore`")
298
+
299
+ def identity_context_mount_fn() -> Optional[_Mount]:
300
+ return context_mount
301
+
302
+ return identity_context_mount_fn
303
+ elif ignore is AUTO_DOCKERIGNORE:
304
+
305
+ def auto_created_context_mount_fn() -> Optional[_Mount]:
306
+ context_dir = Path.cwd()
307
+ dockerignore_file = find_dockerignore_file(context_dir, dockerfile_path)
308
+ ignore_fn = (
309
+ FilePatternMatcher(*dockerignore_file.read_text("utf8").splitlines())
310
+ if dockerignore_file
311
+ else _ignore_fn(())
312
+ )
313
+
314
+ cmds = dockerfile_path.read_text("utf8").splitlines() if dockerfile_path else dockerfile_cmds
315
+ return _create_context_mount(cmds, ignore_fn=ignore_fn, context_dir=context_dir)
316
+
317
+ return auto_created_context_mount_fn
318
+
319
+ def auto_created_context_mount_fn() -> Optional[_Mount]:
320
+ # use COPY commands and ignore patterns to construct implicit context mount
321
+ cmds = dockerfile_path.read_text("utf8").splitlines() if dockerfile_path else dockerfile_cmds
322
+ return _create_context_mount(cmds, ignore_fn=_ignore_fn(ignore), context_dir=Path.cwd())
323
+
324
+ return auto_created_context_mount_fn
325
+
326
+
123
327
  class _ImageRegistryConfig:
124
328
  """mdmd:hidden"""
125
329
 
126
330
  def __init__(
127
331
  self,
128
332
  # TODO: change to _PUBLIC after worker starts handling it.
129
- registry_auth_type: int = api_pb2.REGISTRY_AUTH_TYPE_UNSPECIFIED,
333
+ registry_auth_type: "api_pb2.RegistryAuthType.ValueType" = api_pb2.REGISTRY_AUTH_TYPE_UNSPECIFIED,
130
334
  secret: Optional[_Secret] = None,
131
335
  ):
132
336
  self.registry_auth_type = registry_auth_type
@@ -135,57 +339,160 @@ class _ImageRegistryConfig:
135
339
  def get_proto(self) -> api_pb2.ImageRegistryConfig:
136
340
  return api_pb2.ImageRegistryConfig(
137
341
  registry_auth_type=self.registry_auth_type,
138
- secret_id=(self.secret.object_id if self.secret else None),
342
+ secret_id=(self.secret.object_id if self.secret else ""),
139
343
  )
140
344
 
141
345
 
142
- if typing.TYPE_CHECKING:
143
- import modal.functions
144
-
145
-
146
346
  @dataclass
147
347
  class DockerfileSpec:
148
348
  # Ideally we would use field() with default_factory=, but doesn't work with synchronicity type-stub gen
149
- commands: List[str]
150
- context_files: Dict[str, str]
349
+ commands: list[str]
350
+ context_files: dict[str, str]
351
+
352
+
353
+ async def _image_await_build_result(image_id: str, client: _Client) -> api_pb2.ImageJoinStreamingResponse:
354
+ last_entry_id: str = ""
355
+ result_response: Optional[api_pb2.ImageJoinStreamingResponse] = None
356
+
357
+ async def join():
358
+ nonlocal last_entry_id, result_response
359
+
360
+ request = api_pb2.ImageJoinStreamingRequest(image_id=image_id, timeout=55, last_entry_id=last_entry_id)
361
+ async for response in client.stub.ImageJoinStreaming.unary_stream(request):
362
+ if response.entry_id:
363
+ last_entry_id = response.entry_id
364
+ if response.result.status:
365
+ result_response = response
366
+ # can't return yet, since there may still be logs streaming back in subsequent responses
367
+ for task_log in response.task_logs:
368
+ if task_log.task_progress.pos or task_log.task_progress.len:
369
+ assert task_log.task_progress.progress_type == api_pb2.IMAGE_SNAPSHOT_UPLOAD
370
+ if output_mgr := _get_output_manager():
371
+ output_mgr.update_snapshot_progress(image_id, task_log.task_progress)
372
+ elif task_log.data:
373
+ if output_mgr := _get_output_manager():
374
+ await output_mgr.put_log_content(task_log)
375
+ if output_mgr := _get_output_manager():
376
+ output_mgr.flush_lines()
377
+
378
+ # Handle up to n exceptions while fetching logs
379
+ retry_count = 0
380
+ while result_response is None:
381
+ try:
382
+ await join()
383
+ except (StreamTerminatedError, GRPCError) as exc:
384
+ if isinstance(exc, GRPCError) and exc.status not in RETRYABLE_GRPC_STATUS_CODES:
385
+ raise exc
386
+ retry_count += 1
387
+ if retry_count >= 3:
388
+ raise exc
389
+ return result_response
151
390
 
152
391
 
153
392
  class _Image(_Object, type_prefix="im"):
154
393
  """Base class for container images to run functions in.
155
394
 
156
395
  Do not construct this class directly; instead use one of its static factory methods,
157
- such as `modal.Image.debian_slim`, `modal.Image.from_registry`, or `modal.Image.conda`.
396
+ such as `modal.Image.debian_slim`, `modal.Image.from_registry`, or `modal.Image.micromamba`.
158
397
  """
159
398
 
160
399
  force_build: bool
161
- inside_exceptions: List[Exception]
400
+ inside_exceptions: list[Exception]
401
+ _serve_mounts: frozenset[_Mount] # used for mounts watching in `modal serve`
402
+ _deferred_mounts: Sequence[
403
+ _Mount
404
+ ] # added as mounts on any container referencing the Image, see `def _mount_layers`
405
+ _metadata: Optional[api_pb2.ImageMetadata] = None # set on hydration, private for now
162
406
 
163
407
  def _initialize_from_empty(self):
164
408
  self.inside_exceptions = []
165
-
166
- def _hydrate_metadata(self, message: Optional[Message]):
167
- env_image_id = config.get("image_id")
409
+ self._serve_mounts = frozenset()
410
+ self._deferred_mounts = ()
411
+ self.force_build = False
412
+
413
+ def _initialize_from_other(self, other: "_Image"):
414
+ # used by .clone()
415
+ self.inside_exceptions = other.inside_exceptions
416
+ self.force_build = other.force_build
417
+ self._serve_mounts = other._serve_mounts
418
+ self._deferred_mounts = other._deferred_mounts
419
+
420
+ def _hydrate_metadata(self, metadata: Optional[Message]):
421
+ env_image_id = config.get("image_id") # set as an env var in containers
168
422
  if env_image_id == self.object_id:
169
423
  for exc in self.inside_exceptions:
424
+ # This raises exceptions from `with image.imports()` blocks
425
+ # if the hydrated image is the one used by the container
170
426
  raise exc
171
427
 
428
+ if metadata:
429
+ assert isinstance(metadata, api_pb2.ImageMetadata)
430
+ self._metadata = metadata
431
+
432
+ def _add_mount_layer_or_copy(self, mount: _Mount, copy: bool = False):
433
+ if copy:
434
+ return self.copy_mount(mount, remote_path="/")
435
+
436
+ base_image = self
437
+
438
+ async def _load(self2: "_Image", resolver: Resolver, existing_object_id: Optional[str]):
439
+ self2._hydrate_from_other(base_image) # same image id as base image as long as it's lazy
440
+ self2._deferred_mounts = tuple(base_image._deferred_mounts) + (mount,)
441
+ self2._serve_mounts = base_image._serve_mounts | ({mount} if mount.is_local() else set())
442
+
443
+ return _Image._from_loader(_load, "Image(local files)", deps=lambda: [base_image, mount])
444
+
445
+ @property
446
+ def _mount_layers(self) -> typing.Sequence[_Mount]:
447
+ """Non-evaluated mount layers on the image
448
+
449
+ When the image is used by a Modal container, these mounts need to be attached as well to
450
+ represent the full image content, as they haven't yet been represented as a layer in the
451
+ image.
452
+
453
+ When the image is used as a base image for a new layer (that is not itself a mount layer)
454
+ these mounts need to first be inserted as a copy operation (.copy_mount) into the image.
455
+ """
456
+ return self._deferred_mounts
457
+
458
+ def _assert_no_mount_layers(self):
459
+ if self._mount_layers:
460
+ raise InvalidError(
461
+ "An image tried to run a build step after using `image.add_local_*` to include local files.\n"
462
+ "\n"
463
+ "Run `image.add_local_*` commands last in your image build to avoid rebuilding images with every local "
464
+ "file change. Modal will then add these files to containers on startup instead, saving build time.\n"
465
+ "If you need to run other build steps after adding local files, set `copy=True` to copy the files "
466
+ "directly into the image, at the expense of some added build time.\n"
467
+ "\n"
468
+ "Example:\n"
469
+ "\n"
470
+ "my_image = (\n"
471
+ " Image.debian_slim()\n"
472
+ ' .add_local_file("data.json", copy=True)\n'
473
+ ' .run_commands("python -m mypak") # this now works!\n'
474
+ ")\n"
475
+ )
476
+
172
477
  @staticmethod
173
478
  def _from_args(
174
479
  *,
175
- base_images: Optional[Dict[str, "_Image"]] = None,
176
- dockerfile_function: Optional[Callable[[], DockerfileSpec]] = None,
480
+ base_images: Optional[dict[str, "_Image"]] = None,
481
+ dockerfile_function: Optional[Callable[[ImageBuilderVersion], DockerfileSpec]] = None,
177
482
  secrets: Optional[Sequence[_Secret]] = None,
178
483
  gpu_config: Optional[api_pb2.GPUConfig] = None,
179
484
  build_function: Optional["modal.functions._Function"] = None,
180
485
  build_function_input: Optional[api_pb2.FunctionInput] = None,
181
486
  image_registry_config: Optional[_ImageRegistryConfig] = None,
182
- context_mount: Optional[_Mount] = None,
487
+ context_mount_function: Optional[Callable[[], Optional[_Mount]]] = None,
183
488
  force_build: bool = False,
184
489
  # For internal use only.
185
- _namespace: int = api_pb2.DEPLOYMENT_NAMESPACE_WORKSPACE,
490
+ _namespace: "api_pb2.DeploymentNamespace.ValueType" = api_pb2.DEPLOYMENT_NAMESPACE_WORKSPACE,
491
+ _do_assert_no_mount_layers: bool = True,
186
492
  ):
187
493
  if base_images is None:
188
494
  base_images = {}
495
+
189
496
  if secrets is None:
190
497
  secrets = []
191
498
  if gpu_config is None:
@@ -200,28 +507,43 @@ class _Image(_Object, type_prefix="im"):
200
507
  if build_function and len(base_images) != 1:
201
508
  raise InvalidError("Cannot run a build function with multiple base images!")
202
509
 
203
- def _deps() -> List[_Object]:
204
- deps: List[_Object] = list(base_images.values()) + list(secrets)
510
+ def _deps() -> Sequence[_Object]:
511
+ deps = tuple(base_images.values()) + tuple(secrets)
205
512
  if build_function:
206
- deps.append(build_function)
207
- if context_mount:
208
- deps.append(context_mount)
209
- if image_registry_config.secret:
210
- deps.append(image_registry_config.secret)
513
+ deps += (build_function,)
514
+ if image_registry_config and image_registry_config.secret:
515
+ deps += (image_registry_config.secret,)
211
516
  return deps
212
517
 
213
518
  async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
519
+ context_mount = context_mount_function() if context_mount_function else None
520
+ if context_mount:
521
+ await resolver.load(context_mount)
522
+
523
+ if _do_assert_no_mount_layers:
524
+ for image in base_images.values():
525
+ # base images can't have
526
+ image._assert_no_mount_layers()
527
+
528
+ assert resolver.app_id # type narrowing
529
+ environment = await _get_environment_cached(resolver.environment_name or "", resolver.client)
530
+ # A bit hacky,but assume that the environment provides a valid builder version
531
+ image_builder_version = cast(ImageBuilderVersion, environment._settings.image_builder_version)
532
+ builder_version = _get_image_builder_version(image_builder_version)
533
+
214
534
  if dockerfile_function is None:
215
535
  dockerfile = DockerfileSpec(commands=[], context_files={})
216
536
  else:
217
- dockerfile = dockerfile_function()
537
+ dockerfile = dockerfile_function(builder_version)
218
538
 
219
539
  if not dockerfile.commands and not build_function:
220
540
  raise InvalidError(
221
541
  "No commands were provided for the image — have you tried using modal.Image.debian_slim()?"
222
542
  )
223
543
  if dockerfile.commands and build_function:
224
- raise InvalidError("Cannot provide both a build function and Dockerfile commands!")
544
+ raise InvalidError(
545
+ "Cannot provide both build function and Dockerfile commands in the same image layer!"
546
+ )
225
547
 
226
548
  base_images_pb2s = [
227
549
  api_pb2.BaseImage(
@@ -238,8 +560,9 @@ class _Image(_Object, type_prefix="im"):
238
560
 
239
561
  if build_function:
240
562
  build_function_id = build_function.object_id
241
-
242
563
  globals = build_function._get_info().get_globals()
564
+ attrs = build_function._get_info().get_cls_var_attrs()
565
+ globals = {**globals, **attrs}
243
566
  filtered_globals = {}
244
567
  for k, v in globals.items():
245
568
  if isfunction(v):
@@ -249,21 +572,23 @@ class _Image(_Object, type_prefix="im"):
249
572
  except Exception:
250
573
  # Skip unserializable values for now.
251
574
  logger.warning(
252
- f"Skipping unserializable global variable {k} for {build_function._get_info().function_name}. Changes to this variable won't invalidate the image."
575
+ f"Skipping unserializable global variable {k} for "
576
+ f"{build_function._get_info().function_name}. "
577
+ "Changes to this variable won't invalidate the image."
253
578
  )
254
579
  continue
255
580
  filtered_globals[k] = v
256
581
 
257
582
  # Cloudpickle function serialization produces unstable values.
258
583
  # TODO: better way to filter out types that don't have a stable hash?
259
- build_function_globals = serialize(filtered_globals) if filtered_globals else None
584
+ build_function_globals = serialize(filtered_globals) if filtered_globals else b""
260
585
  _build_function = api_pb2.BuildFunction(
261
586
  definition=build_function.get_build_def(),
262
587
  globals=build_function_globals,
263
588
  input=build_function_input,
264
589
  )
265
590
  else:
266
- build_function_id = None
591
+ build_function_id = ""
267
592
  _build_function = None
268
593
 
269
594
  image_definition = api_pb2.Image(
@@ -272,7 +597,7 @@ class _Image(_Object, type_prefix="im"):
272
597
  context_files=context_file_pb2s,
273
598
  secret_ids=[secret.object_id for secret in secrets],
274
599
  gpu=bool(gpu_config.type), # Note: as of 2023-01-27, server still uses this
275
- context_mount_id=(context_mount.object_id if context_mount else None),
600
+ context_mount_id=(context_mount.object_id if context_mount else ""),
276
601
  gpu_config=gpu_config, # Note: as of 2023-01-27, server ignores this
277
602
  image_registry_config=image_registry_config.get_proto(),
278
603
  runtime=config.get("function_runtime"),
@@ -283,46 +608,32 @@ class _Image(_Object, type_prefix="im"):
283
608
  req = api_pb2.ImageGetOrCreateRequest(
284
609
  app_id=resolver.app_id,
285
610
  image=image_definition,
286
- existing_image_id=existing_object_id, # TODO: ignored
611
+ existing_image_id=existing_object_id or "", # TODO: ignored
287
612
  build_function_id=build_function_id,
288
613
  force_build=config.get("force_build") or force_build,
289
614
  namespace=_namespace,
615
+ builder_version=builder_version,
616
+ # Failsafe mechanism to prevent inadvertant updates to the global images.
617
+ # Only admins can publish to the global namespace, but they have to additionally request it.
618
+ allow_global_deployment=os.environ.get("MODAL_IMAGE_ALLOW_GLOBAL_DEPLOYMENT", "0") == "1",
290
619
  )
291
620
  resp = await retry_transient_errors(resolver.client.stub.ImageGetOrCreate, req)
292
621
  image_id = resp.image_id
293
-
294
- logger.debug("Waiting for image %s" % image_id)
295
- last_entry_id: Optional[str] = None
296
- result: Optional[api_pb2.GenericResult] = None
297
-
298
- async def join():
299
- nonlocal last_entry_id, result
300
-
301
- request = api_pb2.ImageJoinStreamingRequest(image_id=image_id, timeout=55, last_entry_id=last_entry_id)
302
- async for response in unary_stream(resolver.client.stub.ImageJoinStreaming, request):
303
- if response.entry_id:
304
- last_entry_id = response.entry_id
305
- if response.result.status:
306
- result = response.result
307
- for task_log in response.task_logs:
308
- if task_log.task_progress.pos or task_log.task_progress.len:
309
- assert task_log.task_progress.progress_type == api_pb2.IMAGE_SNAPSHOT_UPLOAD
310
- resolver.image_snapshot_update(image_id, task_log.task_progress)
311
- elif task_log.data:
312
- await resolver.console_write(task_log)
313
- resolver.console_flush()
314
-
315
- # Handle up to n exceptions while fetching logs
316
- retry_count = 0
317
- while result is None:
318
- try:
319
- await join()
320
- except (StreamTerminatedError, GRPCError) as exc:
321
- if isinstance(exc, GRPCError) and exc.status not in RETRYABLE_GRPC_STATUS_CODES:
322
- raise exc
323
- retry_count += 1
324
- if retry_count >= 3:
325
- raise exc
622
+ result: api_pb2.GenericResult
623
+ metadata: Optional[api_pb2.ImageMetadata] = None
624
+
625
+ if resp.result.status:
626
+ # image already built
627
+ result = resp.result
628
+ if resp.HasField("metadata"):
629
+ metadata = resp.metadata
630
+ else:
631
+ # not built or in the process of building - wait for build
632
+ logger.debug("Waiting for image %s" % image_id)
633
+ resp = await _image_await_build_result(image_id, resolver.client)
634
+ result = resp.result
635
+ if resp.HasField("metadata"):
636
+ metadata = resp.metadata
326
637
 
327
638
  if result.status == api_pb2.GenericResult.GENERIC_STATUS_FAILURE:
328
639
  raise RemoteError(f"Image build for {image_id} failed with the exception:\n{result.exception}")
@@ -337,28 +648,19 @@ class _Image(_Object, type_prefix="im"):
337
648
  else:
338
649
  raise RemoteError("Unknown status %s!" % result.status)
339
650
 
340
- self._hydrate(image_id, resolver.client, None)
651
+ self._hydrate(image_id, resolver.client, metadata)
652
+ local_mounts = set()
653
+ for base in base_images.values():
654
+ local_mounts |= base._serve_mounts
655
+ if context_mount and context_mount.is_local():
656
+ local_mounts.add(context_mount)
657
+ self._serve_mounts = frozenset(local_mounts)
341
658
 
342
- rep = "Image()"
659
+ rep = f"Image({dockerfile_function})"
343
660
  obj = _Image._from_loader(_load, rep, deps=_deps)
344
661
  obj.force_build = force_build
345
662
  return obj
346
663
 
347
- def extend(self, **kwargs) -> "_Image":
348
- """Deprecated! This is a low-level method not intended to be part of the public API."""
349
- deprecation_warning(
350
- (2024, 3, 7),
351
- "`Image.extend` is deprecated; please use a higher-level method, such as `Image.dockerfile_commands`.",
352
- )
353
-
354
- def build_dockerfile():
355
- return DockerfileSpec(
356
- commands=kwargs.pop("dockerfile_commands", []),
357
- context_files=kwargs.pop("context_files", {}),
358
- )
359
-
360
- return _Image._from_args(base_images={"base": self}, dockerfile_function=build_dockerfile, **kwargs)
361
-
362
664
  def copy_mount(self, mount: _Mount, remote_path: Union[str, Path] = ".") -> "_Image":
363
665
  """Copy the entire contents of a `modal.Mount` into an image.
364
666
  Useful when files only available locally are required during the image
@@ -377,90 +679,314 @@ class _Image(_Object, type_prefix="im"):
377
679
  if not isinstance(mount, _Mount):
378
680
  raise InvalidError("The mount argument to copy has to be a Modal Mount object")
379
681
 
380
- def build_dockerfile() -> DockerfileSpec:
682
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
381
683
  commands = ["FROM base", f"COPY . {remote_path}"] # copy everything from the supplied mount
382
684
  return DockerfileSpec(commands=commands, context_files={})
383
685
 
384
686
  return _Image._from_args(
385
687
  base_images={"base": self},
386
688
  dockerfile_function=build_dockerfile,
387
- context_mount=mount,
689
+ context_mount_function=lambda: mount,
690
+ )
691
+
692
+ def add_local_file(self, local_path: Union[str, Path], remote_path: str, *, copy: bool = False) -> "_Image":
693
+ """Adds a local file to the image at `remote_path` within the container
694
+
695
+ By default (`copy=False`), the files are added to containers on startup and are not built into the actual Image,
696
+ which speeds up deployment.
697
+
698
+ Set `copy=True` to copy the files into an Image layer at build time instead, similar to how
699
+ [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) works in a `Dockerfile`.
700
+
701
+ copy=True can slow down iteration since it requires a rebuild of the Image and any subsequent
702
+ build steps whenever the included files change, but it is required if you want to run additional
703
+ build steps after this one.
704
+ """
705
+ if not PurePosixPath(remote_path).is_absolute():
706
+ # TODO(elias): implement relative to absolute resolution using image workdir metadata
707
+ # + make default remote_path="./"
708
+ # This requires deferring the Mount creation until after "self" (the base image) has been resolved
709
+ # so we know the workdir of the operation.
710
+ raise InvalidError("image.add_local_file() currently only supports absolute remote_path values")
711
+
712
+ if remote_path.endswith("/"):
713
+ remote_path = remote_path + Path(local_path).name
714
+
715
+ mount = _Mount._from_local_file(local_path, remote_path)
716
+ return self._add_mount_layer_or_copy(mount, copy=copy)
717
+
718
+ def add_local_dir(
719
+ self,
720
+ local_path: Union[str, Path],
721
+ remote_path: str,
722
+ *,
723
+ copy: bool = False,
724
+ # Predicate filter function for file exclusion, which should accept a filepath and return `True` for exclusion.
725
+ # Defaults to excluding no files. If a Sequence is provided, it will be converted to a FilePatternMatcher.
726
+ # Which follows dockerignore syntax.
727
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = [],
728
+ ) -> "_Image":
729
+ """Adds a local directory's content to the image at `remote_path` within the container
730
+
731
+ By default (`copy=False`), the files are added to containers on startup and are not built into the actual Image,
732
+ which speeds up deployment.
733
+
734
+ Set `copy=True` to copy the files into an Image layer at build time instead, similar to how
735
+ [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) works in a `Dockerfile`.
736
+
737
+ copy=True can slow down iteration since it requires a rebuild of the Image and any subsequent
738
+ build steps whenever the included files change, but it is required if you want to run additional
739
+ build steps after this one.
740
+
741
+ **Usage:**
742
+
743
+ ```python
744
+ from pathlib import Path
745
+ from modal import FilePatternMatcher
746
+
747
+ image = modal.Image.debian_slim().add_local_dir(
748
+ "~/assets",
749
+ remote_path="/assets",
750
+ ignore=["*.venv"],
751
+ )
752
+
753
+ image = modal.Image.debian_slim().add_local_dir(
754
+ "~/assets",
755
+ remote_path="/assets",
756
+ ignore=lambda p: p.is_relative_to(".venv"),
757
+ )
758
+
759
+ image = modal.Image.debian_slim().add_local_dir(
760
+ "~/assets",
761
+ remote_path="/assets",
762
+ ignore=FilePatternMatcher("**/*.txt"),
388
763
  )
389
764
 
765
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
766
+ image = modal.Image.debian_slim().add_local_dir(
767
+ "~/assets",
768
+ remote_path="/assets",
769
+ ignore=~FilePatternMatcher("**/*.py"),
770
+ )
771
+
772
+ # You can also read ignore patterns from a file.
773
+ image = modal.Image.debian_slim().add_local_dir(
774
+ "~/assets",
775
+ remote_path="/assets",
776
+ ignore=FilePatternMatcher.from_file(Path("/path/to/ignorefile")),
777
+ )
778
+ ```
779
+ """
780
+ if not PurePosixPath(remote_path).is_absolute():
781
+ # TODO(elias): implement relative to absolute resolution using image workdir metadata
782
+ # + make default remote_path="./"
783
+ raise InvalidError("image.add_local_dir() currently only supports absolute remote_path values")
784
+
785
+ mount = _Mount._add_local_dir(Path(local_path), PurePosixPath(remote_path), ignore=_ignore_fn(ignore))
786
+ return self._add_mount_layer_or_copy(mount, copy=copy)
787
+
390
788
  def copy_local_file(self, local_path: Union[str, Path], remote_path: Union[str, Path] = "./") -> "_Image":
391
789
  """Copy a file into the image as a part of building it.
392
790
 
393
- This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) in a `Dockerfile`.
791
+ This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
792
+ works in a `Dockerfile`.
394
793
  """
794
+ deprecation_warning(
795
+ (2024, 1, 13), COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_file"), pending=True
796
+ )
395
797
  basename = str(Path(local_path).name)
396
- mount = _Mount.from_local_file(local_path, remote_path=f"/{basename}")
397
798
 
398
- def build_dockerfile() -> DockerfileSpec:
799
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
399
800
  return DockerfileSpec(commands=["FROM base", f"COPY {basename} {remote_path}"], context_files={})
400
801
 
401
802
  return _Image._from_args(
402
803
  base_images={"base": self},
403
804
  dockerfile_function=build_dockerfile,
404
- context_mount=mount,
805
+ context_mount_function=lambda: _Mount._from_local_file(local_path, remote_path=f"/{basename}"),
806
+ )
807
+
808
+ def add_local_python_source(
809
+ self, *modules: str, copy: bool = False, ignore: Union[Sequence[str], Callable[[Path], bool]] = NON_PYTHON_FILES
810
+ ) -> "_Image":
811
+ """Adds locally available Python packages/modules to containers
812
+
813
+ Adds all files from the specified Python package or module to containers running the Image.
814
+
815
+ Packages are added to the `/root` directory of containers, which is on the `PYTHONPATH`
816
+ of any executed Modal Functions, enabling import of the module by that name.
817
+
818
+ By default (`copy=False`), the files are added to containers on startup and are not built into the actual Image,
819
+ which speeds up deployment.
820
+
821
+ Set `copy=True` to copy the files into an Image layer at build time instead. This can slow down iteration since
822
+ it requires a rebuild of the Image and any subsequent build steps whenever the included files change, but it is
823
+ required if you want to run additional build steps after this one.
824
+
825
+ **Note:** This excludes all dot-prefixed subdirectories or files and all `.pyc`/`__pycache__` files.
826
+ To add full directories with finer control, use `.add_local_dir()` instead and specify `/root` as
827
+ the destination directory.
828
+
829
+ By default only includes `.py`-files in the source modules. Set the `ignore` argument to a list of patterns
830
+ or a callable to override this behavior, e.g.:
831
+
832
+ ```py
833
+ # includes everything except data.json
834
+ modal.Image.debian_slim().add_local_python_source("mymodule", ignore=["data.json"])
835
+
836
+ # exclude large files
837
+ modal.Image.debian_slim().add_local_python_source(
838
+ "mymodule",
839
+ ignore=lambda p: p.stat().st_size > 1e9
405
840
  )
841
+ ```
842
+ """
843
+ mount = _Mount._from_local_python_packages(*modules, ignore=ignore)
844
+ return self._add_mount_layer_or_copy(mount, copy=copy)
406
845
 
407
- def copy_local_dir(self, local_path: Union[str, Path], remote_path: Union[str, Path] = ".") -> "_Image":
846
+ def copy_local_dir(
847
+ self,
848
+ local_path: Union[str, Path],
849
+ remote_path: Union[str, Path] = ".",
850
+ # Predicate filter function for file exclusion, which should accept a filepath and return `True` for exclusion.
851
+ # Defaults to excluding no files. If a Sequence is provided, it will be converted to a FilePatternMatcher.
852
+ # Which follows dockerignore syntax.
853
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = [],
854
+ ) -> "_Image":
408
855
  """Copy a directory into the image as a part of building the image.
409
856
 
410
- This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy) in a `Dockerfile`.
857
+ This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
858
+ works in a `Dockerfile`.
859
+
860
+ **Usage:**
861
+
862
+ ```python
863
+ from pathlib import Path
864
+ from modal import FilePatternMatcher
865
+
866
+ image = modal.Image.debian_slim().copy_local_dir(
867
+ "~/assets",
868
+ remote_path="/assets",
869
+ ignore=["**/*.venv"],
870
+ )
871
+
872
+ image = modal.Image.debian_slim().copy_local_dir(
873
+ "~/assets",
874
+ remote_path="/assets",
875
+ ignore=lambda p: p.is_relative_to(".venv"),
876
+ )
877
+
878
+ image = modal.Image.debian_slim().copy_local_dir(
879
+ "~/assets",
880
+ remote_path="/assets",
881
+ ignore=FilePatternMatcher("**/*.txt"),
882
+ )
883
+
884
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
885
+ image = modal.Image.debian_slim().copy_local_dir(
886
+ "~/assets",
887
+ remote_path="/assets",
888
+ ignore=~FilePatternMatcher("**/*.py"),
889
+ )
890
+
891
+ # You can also read ignore patterns from a file.
892
+ image = modal.Image.debian_slim().copy_local_dir(
893
+ "~/assets",
894
+ remote_path="/assets",
895
+ ignore=FilePatternMatcher.from_file(Path("/path/to/ignorefile")),
896
+ )
897
+ ```
411
898
  """
412
- mount = _Mount.from_local_dir(local_path, remote_path="/")
899
+ deprecation_warning(
900
+ (2024, 1, 13), COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_dir"), pending=True
901
+ )
413
902
 
414
- def build_dockerfile() -> DockerfileSpec:
903
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
415
904
  return DockerfileSpec(commands=["FROM base", f"COPY . {remote_path}"], context_files={})
416
905
 
417
906
  return _Image._from_args(
418
907
  base_images={"base": self},
419
908
  dockerfile_function=build_dockerfile,
420
- context_mount=mount,
909
+ context_mount_function=lambda: _Mount._add_local_dir(
910
+ Path(local_path), PurePosixPath("/"), ignore=_ignore_fn(ignore)
911
+ ),
421
912
  )
422
913
 
914
+ @staticmethod
915
+ async def from_id(image_id: str, client: Optional[_Client] = None) -> "_Image":
916
+ """Construct an Image from an id and look up the Image result.
917
+
918
+ The ID of an Image object can be accessed using `.object_id`.
919
+ """
920
+ if client is None:
921
+ client = await _Client.from_env()
922
+
923
+ async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
924
+ resp = await retry_transient_errors(client.stub.ImageFromId, api_pb2.ImageFromIdRequest(image_id=image_id))
925
+ self._hydrate(resp.image_id, resolver.client, resp.metadata)
926
+
927
+ rep = "Image()"
928
+ obj = _Image._from_loader(_load, rep)
929
+
930
+ return obj
931
+
423
932
  def pip_install(
424
933
  self,
425
- *packages: Union[str, List[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
934
+ *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
426
935
  find_links: Optional[str] = None, # Passes -f (--find-links) pip install
427
936
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
428
937
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
429
938
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
430
- force_build: bool = False,
939
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
940
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
431
941
  secrets: Sequence[_Secret] = [],
432
942
  gpu: GPU_T = None,
433
943
  ) -> "_Image":
434
944
  """Install a list of Python packages using pip.
435
945
 
436
- **Example**
946
+ **Examples**
437
947
 
948
+ Simple installation:
438
949
  ```python
439
950
  image = modal.Image.debian_slim().pip_install("click", "httpx~=0.23.3")
440
951
  ```
952
+
953
+ More complex installation:
954
+ ```python
955
+ image = (
956
+ modal.Image.from_registry(
957
+ "nvidia/cuda:12.2.0-devel-ubuntu22.04", add_python="3.11"
958
+ )
959
+ .pip_install(
960
+ "ninja",
961
+ "packaging",
962
+ "wheel",
963
+ "transformers==4.40.2",
964
+ )
965
+ .pip_install(
966
+ "flash-attn==2.5.8", extra_options="--no-build-isolation"
967
+ )
968
+ )
969
+ ```
441
970
  """
442
971
  pkgs = _flatten_str_args("pip_install", "packages", packages)
443
972
  if not pkgs:
444
973
  return self
445
974
 
446
- def build_dockerfile() -> DockerfileSpec:
447
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
448
- package_args = " ".join(shlex.quote(pkg) for pkg in sorted(pkgs))
449
-
450
- commands = [
451
- "FROM base",
452
- f"RUN python -m pip install {package_args} {extra_args}",
453
- # TODO(erikbern): if extra_args is empty, we add a superfluous space at the end.
454
- # However removing it at this point would cause image hashes to change.
455
- # Maybe let's remove it later when/if client requirements change.
456
- ]
975
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
976
+ package_args = shlex.join(sorted(pkgs))
977
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
978
+ commands = ["FROM base", f"RUN python -m pip install {package_args} {extra_args}"]
979
+ if not _validate_packages(pkgs):
980
+ _warn_invalid_packages(commands[-1].split("RUN ")[-1])
981
+ if version > "2023.12": # Back-compat for legacy trailing space with empty extra_args
982
+ commands = [cmd.strip() for cmd in commands]
457
983
  return DockerfileSpec(commands=commands, context_files={})
458
984
 
459
985
  gpu_config = parse_gpu_config(gpu)
460
986
  return _Image._from_args(
461
987
  base_images={"base": self},
462
988
  dockerfile_function=build_dockerfile,
463
- force_build=self.force_build or force_build, # TODO shouldn't forcing upstream build always rerun this?
989
+ force_build=self.force_build or force_build,
464
990
  gpu_config=gpu_config,
465
991
  secrets=secrets,
466
992
  )
@@ -473,9 +999,10 @@ class _Image(_Object, type_prefix="im"):
473
999
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
474
1000
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
475
1001
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
1002
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
476
1003
  gpu: GPU_T = None,
477
1004
  secrets: Sequence[_Secret] = [],
478
- force_build: bool = False,
1005
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
479
1006
  ) -> "_Image":
480
1007
  """
481
1008
  Install a list of Python packages from private git repositories using pip.
@@ -510,7 +1037,8 @@ class _Image(_Object, type_prefix="im"):
510
1037
  """
511
1038
  if not secrets:
512
1039
  raise InvalidError(
513
- "No secrets provided to function. Installing private packages requires tokens to be passed via modal.Secret objects."
1040
+ "No secrets provided to function. "
1041
+ "Installing private packages requires tokens to be passed via modal.Secret objects."
514
1042
  )
515
1043
 
516
1044
  invalid_repos = []
@@ -534,20 +1062,24 @@ class _Image(_Object, type_prefix="im"):
534
1062
 
535
1063
  secret_names = ",".join([s.app_name if hasattr(s, "app_name") else str(s) for s in secrets]) # type: ignore
536
1064
 
537
- def build_dockerfile() -> DockerfileSpec:
1065
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
538
1066
  commands = ["FROM base"]
539
1067
  if any(r.startswith("github") for r in repositories):
540
1068
  commands.append(
541
- f"RUN bash -c \"[[ -v GITHUB_TOKEN ]] || (echo 'GITHUB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
1069
+ 'RUN bash -c "[[ -v GITHUB_TOKEN ]] || '
1070
+ f"(echo 'GITHUB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
542
1071
  )
543
1072
  if any(r.startswith("gitlab") for r in repositories):
544
1073
  commands.append(
545
- f"RUN bash -c \"[[ -v GITLAB_TOKEN ]] || (echo 'GITLAB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
1074
+ 'RUN bash -c "[[ -v GITLAB_TOKEN ]] || '
1075
+ f"(echo 'GITLAB_TOKEN env var not set by provided modal.Secret(s): {secret_names}' && exit 1)\"",
546
1076
  )
547
1077
 
548
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
1078
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
549
1079
  commands.extend(["RUN apt-get update && apt-get install -y git"])
550
1080
  commands.extend([f'RUN python3 -m pip install "{url}" {extra_args}' for url in install_urls])
1081
+ if version > "2023.12": # Back-compat for legacy trailing space with empty extra_args
1082
+ commands = [cmd.strip() for cmd in commands]
551
1083
  return DockerfileSpec(commands=commands, context_files={})
552
1084
 
553
1085
  gpu_config = parse_gpu_config(gpu)
@@ -568,24 +1100,28 @@ class _Image(_Object, type_prefix="im"):
568
1100
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
569
1101
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
570
1102
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
571
- force_build: bool = False,
1103
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1104
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
572
1105
  secrets: Sequence[_Secret] = [],
573
1106
  gpu: GPU_T = None,
574
1107
  ) -> "_Image":
575
1108
  """Install a list of Python packages from a local `requirements.txt` file."""
576
1109
 
577
- def build_dockerfile() -> DockerfileSpec:
1110
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
578
1111
  requirements_txt_path = os.path.expanduser(requirements_txt)
579
1112
  context_files = {"/.requirements.txt": requirements_txt_path}
580
1113
 
581
- find_links_arg = f"-f {find_links}" if find_links else ""
582
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
1114
+ null_find_links_arg = " " if version == "2023.12" else ""
1115
+ find_links_arg = f" -f {find_links}" if find_links else null_find_links_arg
1116
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
583
1117
 
584
1118
  commands = [
585
1119
  "FROM base",
586
1120
  "COPY /.requirements.txt /.requirements.txt",
587
- f"RUN python -m pip install -r /.requirements.txt {find_links_arg} {extra_args}",
1121
+ f"RUN python -m pip install -r /.requirements.txt{find_links_arg} {extra_args}",
588
1122
  ]
1123
+ if version > "2023.12": # Back-compat for legacy whitespace with empty find_link / extra args
1124
+ commands = [cmd.strip() for cmd in commands]
589
1125
  return DockerfileSpec(commands=commands, context_files=context_files)
590
1126
 
591
1127
  return _Image._from_args(
@@ -599,13 +1135,14 @@ class _Image(_Object, type_prefix="im"):
599
1135
  def pip_install_from_pyproject(
600
1136
  self,
601
1137
  pyproject_toml: str,
602
- optional_dependencies: List[str] = [],
1138
+ optional_dependencies: list[str] = [],
603
1139
  *,
604
1140
  find_links: Optional[str] = None, # Passes -f (--find-links) pip install
605
1141
  index_url: Optional[str] = None, # Passes -i (--index-url) to pip install
606
1142
  extra_index_url: Optional[str] = None, # Passes --extra-index-url to pip install
607
1143
  pre: bool = False, # Passes --pre (allow pre-releases) to pip install
608
- force_build: bool = False,
1144
+ extra_options: str = "", # Additional options to pass to pip install, e.g. "--no-build-isolation --no-clean"
1145
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
609
1146
  secrets: Sequence[_Secret] = [],
610
1147
  gpu: GPU_T = None,
611
1148
  ) -> "_Image":
@@ -617,7 +1154,7 @@ class _Image(_Object, type_prefix="im"):
617
1154
  all of the packages in each listed section are installed as well.
618
1155
  """
619
1156
 
620
- def build_dockerfile() -> DockerfileSpec:
1157
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
621
1158
  # Defer toml import so we don't need it in the container runtime environment
622
1159
  import toml
623
1160
 
@@ -627,8 +1164,10 @@ class _Image(_Object, type_prefix="im"):
627
1164
  if "project" not in config or "dependencies" not in config["project"]:
628
1165
  msg = (
629
1166
  "No [project.dependencies] section in pyproject.toml file. "
630
- "If your pyproject.toml instead declares [tool.poetry.dependencies], use `Image.poetry_install_from_file()`. "
631
- "See https://packaging.python.org/en/latest/guides/writing-pyproject-toml for further file format guidelines."
1167
+ "If your pyproject.toml instead declares [tool.poetry.dependencies], "
1168
+ "use `Image.poetry_install_from_file()`. "
1169
+ "See https://packaging.python.org/en/latest/guides/writing-pyproject-toml "
1170
+ "for further file format guidelines."
632
1171
  )
633
1172
  raise ValueError(msg)
634
1173
  else:
@@ -639,16 +1178,12 @@ class _Image(_Object, type_prefix="im"):
639
1178
  if dep_group_name in optionals:
640
1179
  dependencies.extend(optionals[dep_group_name])
641
1180
 
642
- extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre)
643
- package_args = " ".join(shlex.quote(pkg) for pkg in sorted(dependencies))
1181
+ extra_args = _make_pip_install_args(find_links, index_url, extra_index_url, pre, extra_options)
1182
+ package_args = shlex.join(sorted(dependencies))
1183
+ commands = ["FROM base", f"RUN python -m pip install {package_args} {extra_args}"]
1184
+ if version > "2023.12": # Back-compat for legacy trailing space
1185
+ commands = [cmd.strip() for cmd in commands]
644
1186
 
645
- commands = [
646
- "FROM base",
647
- f"RUN python -m pip install {package_args} {extra_args}",
648
- # TODO(erikbern): if extra_args is empty, we add a superfluous space at the end.
649
- # However removing it at this point would cause image hashes to change.
650
- # Maybe let's remove it later when/if client requirements change.
651
- ]
652
1187
  return DockerfileSpec(commands=commands, context_files={})
653
1188
 
654
1189
  return _Image._from_args(
@@ -668,13 +1203,13 @@ class _Image(_Object, type_prefix="im"):
668
1203
  ignore_lockfile: bool = False,
669
1204
  # If set to True, use old installer. See https://github.com/python-poetry/poetry/issues/3336
670
1205
  old_installer: bool = False,
671
- force_build: bool = False,
1206
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
672
1207
  # Selected optional dependency groups to install (See https://python-poetry.org/docs/cli/#install)
673
- with_: List[str] = [],
1208
+ with_: list[str] = [],
674
1209
  # Selected optional dependency groups to exclude (See https://python-poetry.org/docs/cli/#install)
675
- without: List[str] = [],
1210
+ without: list[str] = [],
676
1211
  # Only install dependency groups specifed in this list.
677
- only: List[str] = [],
1212
+ only: list[str] = [],
678
1213
  *,
679
1214
  secrets: Sequence[_Secret] = [],
680
1215
  gpu: GPU_T = None,
@@ -684,11 +1219,11 @@ class _Image(_Object, type_prefix="im"):
684
1219
  If not provided as argument the path to the lockfile is inferred. However, the
685
1220
  file has to exist, unless `ignore_lockfile` is set to `True`.
686
1221
 
687
- Note that the root project of the poetry project is not installed,
688
- only the dependencies. For including local packages see `modal.Mount.from_local_python_packages`
1222
+ Note that the root project of the poetry project is not installed, only the dependencies.
1223
+ For including local python source files see `add_local_python_source`
689
1224
  """
690
1225
 
691
- def build_dockerfile() -> DockerfileSpec:
1226
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
692
1227
  context_files = {"/.pyproject.toml": os.path.expanduser(poetry_pyproject_toml)}
693
1228
 
694
1229
  commands = ["FROM base", "RUN python -m pip install poetry~=1.7"]
@@ -702,14 +1237,17 @@ class _Image(_Object, type_prefix="im"):
702
1237
  p = Path(poetry_pyproject_toml).parent / "poetry.lock"
703
1238
  if not p.exists():
704
1239
  raise NotFoundError(
705
- f"poetry.lock not found at inferred location: {p.absolute()}. If a lockfile is not needed, `ignore_lockfile=True` can be used."
1240
+ f"poetry.lock not found at inferred location: {p.absolute()}. "
1241
+ "If a lockfile is not needed, `ignore_lockfile=True` can be used."
706
1242
  )
707
1243
  poetry_lockfile = p.as_posix()
708
1244
  context_files["/.poetry.lock"] = poetry_lockfile
709
1245
  commands += ["COPY /.poetry.lock /tmp/poetry/poetry.lock"]
710
1246
 
711
- # Indentation for back-compat TODO: fix when we update image_builder_version
712
- install_cmd = " poetry install --no-root"
1247
+ install_cmd = "poetry install --no-root"
1248
+ if version == "2023.12":
1249
+ # Backwards compatability for previous string, which started with whitespace
1250
+ install_cmd = " " + install_cmd
713
1251
 
714
1252
  if with_:
715
1253
  install_cmd += f" --with {','.join(with_)}"
@@ -739,205 +1277,188 @@ class _Image(_Object, type_prefix="im"):
739
1277
 
740
1278
  def dockerfile_commands(
741
1279
  self,
742
- *dockerfile_commands: Union[str, List[str]],
743
- context_files: Dict[str, str] = {},
1280
+ *dockerfile_commands: Union[str, list[str]],
1281
+ context_files: dict[str, str] = {},
744
1282
  secrets: Sequence[_Secret] = [],
745
1283
  gpu: GPU_T = None,
746
1284
  # modal.Mount with local files to supply as build context for COPY commands
747
1285
  context_mount: Optional[_Mount] = None,
748
- force_build: bool = False,
1286
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1287
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
749
1288
  ) -> "_Image":
750
- """Extend an image with arbitrary Dockerfile-like commands."""
1289
+ """
1290
+ Extend an image with arbitrary Dockerfile-like commands.
1291
+
1292
+ **Usage:**
1293
+
1294
+ ```python
1295
+ from pathlib import Path
1296
+ from modal import FilePatternMatcher
1297
+
1298
+ # By default a .dockerignore file is used if present in the current working directory
1299
+ image = modal.Image.debian_slim().dockerfile_commands(
1300
+ ["COPY data /data"],
1301
+ )
1302
+
1303
+ image = modal.Image.debian_slim().dockerfile_commands(
1304
+ ["COPY data /data"],
1305
+ ignore=["*.venv"],
1306
+ )
1307
+
1308
+ image = modal.Image.debian_slim().dockerfile_commands(
1309
+ ["COPY data /data"],
1310
+ ignore=lambda p: p.is_relative_to(".venv"),
1311
+ )
1312
+
1313
+ image = modal.Image.debian_slim().dockerfile_commands(
1314
+ ["COPY data /data"],
1315
+ ignore=FilePatternMatcher("**/*.txt"),
1316
+ )
1317
+
1318
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
1319
+ image = modal.Image.debian_slim().dockerfile_commands(
1320
+ ["COPY data /data"],
1321
+ ignore=~FilePatternMatcher("**/*.py"),
1322
+ )
1323
+
1324
+ # You can also read ignore patterns from a file.
1325
+ image = modal.Image.debian_slim().dockerfile_commands(
1326
+ ["COPY data /data"],
1327
+ ignore=FilePatternMatcher.from_file(Path("/path/to/dockerignore")),
1328
+ )
1329
+ ```
1330
+ """
1331
+ if context_mount is not None:
1332
+ deprecation_warning(
1333
+ (2025, 1, 13),
1334
+ "`context_mount` is deprecated."
1335
+ + " Files are now automatically added to the build context based on the commands.",
1336
+ pending=True,
1337
+ )
751
1338
  cmds = _flatten_str_args("dockerfile_commands", "dockerfile_commands", dockerfile_commands)
752
1339
  if not cmds:
753
1340
  return self
754
1341
 
755
- def build_dockerfile() -> DockerfileSpec:
1342
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
756
1343
  return DockerfileSpec(commands=["FROM base", *cmds], context_files=context_files)
757
1344
 
758
1345
  return _Image._from_args(
759
1346
  base_images={"base": self},
760
1347
  dockerfile_function=build_dockerfile,
761
1348
  secrets=secrets,
762
- gpu_config=parse_gpu_config(gpu, raise_on_true=False),
763
- context_mount=context_mount,
1349
+ gpu_config=parse_gpu_config(gpu),
1350
+ context_mount_function=_create_context_mount_function(
1351
+ ignore=ignore, dockerfile_cmds=cmds, context_mount=context_mount
1352
+ ),
764
1353
  force_build=self.force_build or force_build,
765
1354
  )
766
1355
 
1356
+ def entrypoint(
1357
+ self,
1358
+ entrypoint_commands: list[str],
1359
+ ) -> "_Image":
1360
+ """Set the entrypoint for the image."""
1361
+ args_str = _flatten_str_args("entrypoint", "entrypoint_files", entrypoint_commands)
1362
+ args_str = '"' + '", "'.join(args_str) + '"' if args_str else ""
1363
+ dockerfile_cmd = f"ENTRYPOINT [{args_str}]"
1364
+
1365
+ return self.dockerfile_commands(dockerfile_cmd)
1366
+
1367
+ def shell(
1368
+ self,
1369
+ shell_commands: list[str],
1370
+ ) -> "_Image":
1371
+ """Overwrite default shell for the image."""
1372
+ args_str = _flatten_str_args("shell", "shell_commands", shell_commands)
1373
+ args_str = '"' + '", "'.join(args_str) + '"' if args_str else ""
1374
+ dockerfile_cmd = f"SHELL [{args_str}]"
1375
+
1376
+ return self.dockerfile_commands(dockerfile_cmd)
1377
+
767
1378
  def run_commands(
768
1379
  self,
769
- *commands: Union[str, List[str]],
1380
+ *commands: Union[str, list[str]],
770
1381
  secrets: Sequence[_Secret] = [],
771
1382
  gpu: GPU_T = None,
772
- force_build: bool = False,
1383
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
773
1384
  ) -> "_Image":
774
1385
  """Extend an image with a list of shell commands to run."""
775
1386
  cmds = _flatten_str_args("run_commands", "commands", commands)
776
1387
  if not cmds:
777
1388
  return self
778
1389
 
779
- def build_dockerfile() -> DockerfileSpec:
1390
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
780
1391
  return DockerfileSpec(commands=["FROM base"] + [f"RUN {cmd}" for cmd in cmds], context_files={})
781
1392
 
782
1393
  return _Image._from_args(
783
1394
  base_images={"base": self},
784
1395
  dockerfile_function=build_dockerfile,
785
1396
  secrets=secrets,
786
- gpu_config=parse_gpu_config(gpu, raise_on_true=False),
1397
+ gpu_config=parse_gpu_config(gpu),
787
1398
  force_build=self.force_build or force_build,
788
1399
  )
789
1400
 
790
1401
  @staticmethod
791
- def conda(python_version: str = "3.9", force_build: bool = False) -> "_Image":
792
- """
793
- A Conda base image, using miniconda3 and derived from the official Docker Hub image.
794
- In most cases, using [`Image.micromamba()`](/docs/reference/modal.Image#micromamba) with [`micromamba_install`](/docs/reference/modal.Image#micromamba_install) is recommended over `Image.conda()`, as it leads to significantly faster image build times.
795
- """
796
- _validate_python_version(python_version)
797
-
798
- def build_dockerfile() -> DockerfileSpec:
799
- requirements_path = _get_client_requirements_path(python_version)
800
- context_files = {"/modal_requirements.txt": requirements_path}
801
-
802
- # Doesn't use the official continuumio/miniconda3 image as a base. That image has maintenance
803
- # issues (https://github.com/ContinuumIO/docker-images/issues) and building our own is more flexible.
804
- conda_install_script = "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh"
805
- commands = [
806
- "FROM debian:bullseye", # the -slim images lack files required by Conda.
807
- # Temporarily add utility packages for conda installation.
808
- "RUN apt-get --quiet update && apt-get --quiet --yes install curl bzip2 \\",
809
- f"&& curl --silent --show-error --location {conda_install_script} --output /tmp/miniconda.sh \\",
810
- # Install miniconda to a filesystem location on the $PATH of Modal container tasks.
811
- # -b = install in batch mode w/o manual intervention.
812
- # -f = allow install prefix to already exist.
813
- # -p = the install prefix location.
814
- "&& bash /tmp/miniconda.sh -bfp /usr/local \\ ",
815
- "&& rm -rf /tmp/miniconda.sh",
816
- # Biggest and most stable community-led Conda channel.
817
- "RUN conda config --add channels conda-forge \\ ",
818
- # softlinking can put conda in a broken state, surfacing error on uninstall like:
819
- # `No such device or address: '/usr/local/lib/libz.so' -> '/usr/local/lib/libz.so.c~'`
820
- "&& conda config --set allow_softlinks false \\ ",
821
- # Install requested Python version from conda-forge channel; base debian image has only 3.7.
822
- f"&& conda install --yes --channel conda-forge python={python_version} \\ ",
823
- "&& conda update conda \\ ",
824
- # Remove now unneeded packages and files.
825
- "&& apt-get --quiet --yes remove curl bzip2 \\ ",
826
- "&& apt-get --quiet --yes autoremove \\ ",
827
- "&& apt-get autoclean \\ ",
828
- "&& rm -rf /var/lib/apt/lists/* /var/log/dpkg.log \\ ",
829
- "&& conda clean --all --yes",
830
- # Setup .bashrc for conda.
831
- "RUN conda init bash --verbose",
832
- "COPY /modal_requirements.txt /modal_requirements.txt",
833
- # .bashrc is explicitly sourced because RUN is a non-login shell and doesn't run bash.
834
- "RUN . /root/.bashrc && conda activate base \\ ",
835
- "&& python -m pip install --upgrade pip \\ ",
836
- "&& python -m pip install -r /modal_requirements.txt",
837
- ]
838
- return DockerfileSpec(commands=commands, context_files=context_files)
839
-
840
- base = _Image._from_args(
841
- dockerfile_function=build_dockerfile,
842
- force_build=force_build,
843
- _namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
844
- )
845
-
846
- # TODO include these in the base image once we version the build?
847
- return base.dockerfile_commands(
848
- [
849
- "ENV CONDA_EXE=/usr/local/bin/conda",
850
- "ENV CONDA_PREFIX=/usr/local",
851
- "ENV CONDA_PROMPT_MODIFIER=(base)",
852
- "ENV CONDA_SHLVL=1",
853
- "ENV CONDA_PYTHON_EXE=/usr/local/bin/python",
854
- "ENV CONDA_DEFAULT_ENV=base",
855
- ]
1402
+ def conda(python_version: Optional[str] = None, force_build: bool = False):
1403
+ """mdmd:hidden"""
1404
+ message = (
1405
+ "`Image.conda` is deprecated."
1406
+ " Please use the faster and more reliable `Image.micromamba` constructor instead."
856
1407
  )
1408
+ deprecation_error((2024, 5, 2), message)
857
1409
 
858
1410
  def conda_install(
859
1411
  self,
860
- *packages: Union[str, List[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
861
- channels: List[str] = [], # A list of Conda channels, eg. ["conda-forge", "nvidia"]
862
- force_build: bool = False,
1412
+ *packages: Union[str, list[str]], # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
1413
+ channels: list[str] = [], # A list of Conda channels, eg. ["conda-forge", "nvidia"]
1414
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
863
1415
  secrets: Sequence[_Secret] = [],
864
1416
  gpu: GPU_T = None,
865
- ) -> "_Image":
866
- """Install a list of additional packages using Conda. Note that in most cases, using [`Image.micromamba()`](/docs/reference/modal.Image#micromamba) with [`micromamba_install`](/docs/reference/modal.Image#micromamba_install)
867
- is recommended over `conda_install`, as it leads to significantly faster image build times."""
868
-
869
- pkgs = _flatten_str_args("conda_install", "packages", packages)
870
- if not pkgs:
871
- return self
872
-
873
- def build_dockerfile() -> DockerfileSpec:
874
- package_args = " ".join(shlex.quote(pkg) for pkg in pkgs)
875
- channel_args = "".join(f" -c {channel}" for channel in channels)
876
-
877
- commands = [
878
- "FROM base",
879
- f"RUN conda install {package_args}{channel_args} --yes \\ ",
880
- "&& conda clean --yes --index-cache --tarballs --tempfiles --logfiles",
881
- ]
882
- return DockerfileSpec(commands=commands, context_files={})
883
-
884
- return _Image._from_args(
885
- base_images={"base": self},
886
- dockerfile_function=build_dockerfile,
887
- force_build=self.force_build or force_build,
888
- secrets=secrets,
889
- gpu_config=parse_gpu_config(gpu),
1417
+ ):
1418
+ """mdmd:hidden"""
1419
+ message = (
1420
+ "`Image.conda_install` is deprecated."
1421
+ " Please use the faster and more reliable `Image.micromamba_install` instead."
890
1422
  )
1423
+ deprecation_error((2024, 5, 2), message)
891
1424
 
892
1425
  def conda_update_from_environment(
893
1426
  self,
894
1427
  environment_yml: str,
895
- force_build: bool = False,
1428
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
896
1429
  *,
897
1430
  secrets: Sequence[_Secret] = [],
898
1431
  gpu: GPU_T = None,
899
- ) -> "_Image":
900
- """Update a Conda environment using dependencies from a given environment.yml file."""
901
-
902
- def build_dockerfile() -> DockerfileSpec:
903
- context_files = {"/environment.yml": os.path.expanduser(environment_yml)}
904
-
905
- commands = [
906
- "FROM base",
907
- "COPY /environment.yml /environment.yml",
908
- "RUN conda env update --name base -f /environment.yml \\ ",
909
- "&& conda clean --yes --index-cache --tarballs --tempfiles --logfiles",
910
- ]
911
- return DockerfileSpec(commands=commands, context_files=context_files)
912
-
913
- return _Image._from_args(
914
- base_images={"base": self},
915
- dockerfile_function=build_dockerfile,
916
- force_build=self.force_build or force_build,
917
- secrets=secrets,
918
- gpu_config=parse_gpu_config(gpu),
1432
+ ):
1433
+ """mdmd:hidden"""
1434
+ message = (
1435
+ "Image.conda_update_from_environment` is deprecated."
1436
+ " Please use the `Image.micromamba_install` method (with the `spec_file` parameter) instead."
919
1437
  )
1438
+ deprecation_error((2024, 5, 2), message)
920
1439
 
921
1440
  @staticmethod
922
1441
  def micromamba(
923
- python_version: str = "3.9",
924
- force_build: bool = False,
1442
+ python_version: Optional[str] = None,
1443
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
925
1444
  ) -> "_Image":
926
- """
927
- A Micromamba base image. Micromamba allows for fast building of small Conda-based containers.
928
- In most cases it will be faster than using [`Image.conda()`](/docs/reference/modal.Image#conda).
929
- """
930
- _validate_python_version(python_version)
931
-
932
- def build_dockerfile() -> DockerfileSpec:
933
- tag = "mambaorg/micromamba:1.3.1-bullseye-slim"
1445
+ """A Micromamba base image. Micromamba allows for fast building of small Conda-based containers."""
1446
+
1447
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1448
+ nonlocal python_version
1449
+ if version == "2023.12" and python_version is None:
1450
+ python_version = "3.9" # Backcompat for old hardcoded default param
1451
+ validated_python_version = _validate_python_version(python_version, version)
1452
+ micromamba_version = _base_image_config("micromamba", version)
1453
+ debian_codename = _base_image_config("debian", version)
1454
+ tag = f"mambaorg/micromamba:{micromamba_version}-{debian_codename}-slim"
934
1455
  setup_commands = [
935
1456
  'SHELL ["/usr/local/bin/_dockerfile_shell.sh"]',
936
1457
  "ENV MAMBA_DOCKERFILE_ACTIVATE=1",
937
- f"RUN micromamba install -n base -y python={python_version} pip -c conda-forge",
1458
+ f"RUN micromamba install -n base -y python={validated_python_version} pip -c conda-forge",
938
1459
  ]
939
- commands = _Image._registry_setup_commands(tag, setup_commands, add_python=None)
940
- context_files = {"/modal_requirements.txt": _get_client_requirements_path(python_version)}
1460
+ commands = _Image._registry_setup_commands(tag, version, setup_commands)
1461
+ context_files = {CONTAINER_REQUIREMENTS_PATH: _get_modal_requirements_path(version, python_version)}
941
1462
  return DockerfileSpec(commands=commands, context_files=context_files)
942
1463
 
943
1464
  return _Image._from_args(
@@ -949,28 +1470,36 @@ class _Image(_Object, type_prefix="im"):
949
1470
  def micromamba_install(
950
1471
  self,
951
1472
  # A list of Python packages, eg. ["numpy", "matplotlib>=3.5.0"]
952
- *packages: Union[str, List[str]],
953
- # A list of Conda channels, eg. ["conda-forge", "nvidia"]
954
- channels: List[str] = [],
955
- force_build: bool = False,
1473
+ *packages: Union[str, list[str]],
1474
+ # A local path to a file containing package specifications
1475
+ spec_file: Optional[str] = None,
1476
+ # A list of Conda channels, eg. ["conda-forge", "nvidia"].
1477
+ channels: list[str] = [],
1478
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
956
1479
  secrets: Sequence[_Secret] = [],
957
1480
  gpu: GPU_T = None,
958
1481
  ) -> "_Image":
959
1482
  """Install a list of additional packages using micromamba."""
960
-
961
1483
  pkgs = _flatten_str_args("micromamba_install", "packages", packages)
962
- if not pkgs:
1484
+ if not pkgs and spec_file is None:
963
1485
  return self
964
1486
 
965
- def build_dockerfile() -> DockerfileSpec:
966
- package_args = " ".join(shlex.quote(pkg) for pkg in pkgs)
1487
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1488
+ package_args = shlex.join(pkgs)
967
1489
  channel_args = "".join(f" -c {channel}" for channel in channels)
968
1490
 
1491
+ space = " " if package_args else ""
1492
+ remote_spec_file = "" if spec_file is None else f"/{os.path.basename(spec_file)}"
1493
+ file_arg = "" if spec_file is None else f"{space}-f {remote_spec_file} -n base"
1494
+ copy_commands = [] if spec_file is None else [f"COPY {remote_spec_file} {remote_spec_file}"]
1495
+
969
1496
  commands = [
970
1497
  "FROM base",
971
- f"RUN micromamba install {package_args}{channel_args} --yes",
1498
+ *copy_commands,
1499
+ f"RUN micromamba install {package_args}{file_arg}{channel_args} --yes",
972
1500
  ]
973
- return DockerfileSpec(commands=commands, context_files={})
1501
+ context_files = {} if spec_file is None else {remote_spec_file: os.path.expanduser(spec_file)}
1502
+ return DockerfileSpec(commands=commands, context_files=context_files)
974
1503
 
975
1504
  return _Image._from_args(
976
1505
  base_images={"base": self},
@@ -981,25 +1510,42 @@ class _Image(_Object, type_prefix="im"):
981
1510
  )
982
1511
 
983
1512
  @staticmethod
984
- def _registry_setup_commands(tag: str, setup_commands: List[str], add_python: Optional[str]) -> List[str]:
985
- add_python_commands: List[str] = []
1513
+ def _registry_setup_commands(
1514
+ tag: str,
1515
+ builder_version: ImageBuilderVersion,
1516
+ setup_commands: list[str],
1517
+ add_python: Optional[str] = None,
1518
+ ) -> list[str]:
1519
+ add_python_commands: list[str] = []
986
1520
  if add_python:
1521
+ _validate_python_version(add_python, builder_version, allow_micro_granularity=False)
987
1522
  add_python_commands = [
988
1523
  "COPY /python/. /usr/local",
989
- "RUN ln -s /usr/local/bin/python3 /usr/local/bin/python",
990
1524
  "ENV TERMINFO_DIRS=/etc/terminfo:/lib/terminfo:/usr/share/terminfo:/usr/lib/terminfo",
991
1525
  ]
1526
+ python_minor = add_python.split(".")[1]
1527
+ if int(python_minor) < 13:
1528
+ # Previous versions did not include the `python` binary, but later ones do.
1529
+ # (The important factor is not the Python version itself, but the standalone dist version.)
1530
+ # We insert the command in the list at the position it was previously always added
1531
+ # for backwards compatibility with existing images.
1532
+ add_python_commands.insert(1, "RUN ln -s /usr/local/bin/python3 /usr/local/bin/python")
1533
+
1534
+ # Note: this change is because we install dependencies with uv in 2024.10+
1535
+ requirements_prefix = "python -m " if builder_version < "2024.10" else ""
1536
+ modal_requirements_commands = [
1537
+ f"COPY {CONTAINER_REQUIREMENTS_PATH} {CONTAINER_REQUIREMENTS_PATH}",
1538
+ f"RUN python -m pip install --upgrade {_base_image_config('package_tools', builder_version)}",
1539
+ f"RUN {requirements_prefix}{_get_modal_requirements_command(builder_version)}",
1540
+ ]
1541
+ if builder_version > "2023.12":
1542
+ modal_requirements_commands.append(f"RUN rm {CONTAINER_REQUIREMENTS_PATH}")
1543
+
992
1544
  return [
993
1545
  f"FROM {tag}",
994
1546
  *add_python_commands,
995
1547
  *setup_commands,
996
- "COPY /modal_requirements.txt /modal_requirements.txt",
997
- "RUN python -m pip install --upgrade pip",
998
- "RUN python -m pip install -r /modal_requirements.txt",
999
- # TODO: We should add this next line at some point to clean up the image, but it would
1000
- # trigger a hash change, so batch it with the next rebuild-triggering change.
1001
- #
1002
- # "RUN rm /modal_requirements.txt",
1548
+ *modal_requirements_commands,
1003
1549
  ]
1004
1550
 
1005
1551
  @staticmethod
@@ -1007,8 +1553,8 @@ class _Image(_Object, type_prefix="im"):
1007
1553
  tag: str,
1008
1554
  *,
1009
1555
  secret: Optional[_Secret] = None,
1010
- setup_dockerfile_commands: List[str] = [],
1011
- force_build: bool = False,
1556
+ setup_dockerfile_commands: list[str] = [],
1557
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1012
1558
  add_python: Optional[str] = None,
1013
1559
  **kwargs,
1014
1560
  ) -> "_Image":
@@ -1017,19 +1563,19 @@ class _Image(_Object, type_prefix="im"):
1017
1563
  The image must be built for the `linux/amd64` platform.
1018
1564
 
1019
1565
  If your image does not come with Python installed, you can use the `add_python` parameter
1020
- to specify a version of Python to add to the image. Supported versions are `3.8`, `3.9`,
1021
- `3.10`, `3.11`, and `3.12`. Otherwise, the image is expected to have Python>3.8 available
1022
- on PATH as `python`, along with `pip`.
1566
+ to specify a version of Python to add to the image. Otherwise, the image is expected to
1567
+ have Python on PATH as `python`, along with `pip`.
1023
1568
 
1024
1569
  You may also use `setup_dockerfile_commands` to run Dockerfile commands before the
1025
1570
  remaining commands run. This might be useful if you want a custom Python installation or to
1026
1571
  set a `SHELL`. Prefer `run_commands()` when possible though.
1027
1572
 
1028
1573
  To authenticate against a private registry with static credentials, you must set the `secret` parameter to
1029
- a `modal.Secret` containing a username (`REGISTRY_USERNAME`) and an access token or password (`REGISTRY_PASSWORD`).
1574
+ a `modal.Secret` containing a username (`REGISTRY_USERNAME`) and
1575
+ an access token or password (`REGISTRY_PASSWORD`).
1030
1576
 
1031
- To authenticate against private registries with credentials from a cloud provider, use `Image.from_gcp_artifact_registry()`
1032
- or `Image.from_aws_ecr()`.
1577
+ To authenticate against private registries with credentials from a cloud provider,
1578
+ use `Image.from_gcp_artifact_registry()` or `Image.from_aws_ecr()`.
1033
1579
 
1034
1580
  **Examples**
1035
1581
 
@@ -1039,24 +1585,28 @@ class _Image(_Object, type_prefix="im"):
1039
1585
  modal.Image.from_registry("nvcr.io/nvidia/pytorch:22.12-py3")
1040
1586
  ```
1041
1587
  """
1042
- context_mount = None
1043
- if add_python:
1044
- context_mount = _Mount.from_name(
1045
- python_standalone_mount_name(add_python),
1046
- namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1588
+
1589
+ def context_mount_function() -> Optional[_Mount]:
1590
+ return (
1591
+ _Mount.from_name(
1592
+ python_standalone_mount_name(add_python),
1593
+ namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1594
+ )
1595
+ if add_python
1596
+ else None
1047
1597
  )
1048
1598
 
1049
1599
  if "image_registry_config" not in kwargs and secret is not None:
1050
1600
  kwargs["image_registry_config"] = _ImageRegistryConfig(api_pb2.REGISTRY_AUTH_TYPE_STATIC_CREDS, secret)
1051
1601
 
1052
- def build_dockerfile() -> DockerfileSpec:
1053
- commands = _Image._registry_setup_commands(tag, setup_dockerfile_commands, add_python)
1054
- context_files = {"/modal_requirements.txt": _get_client_requirements_path(add_python)}
1602
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1603
+ commands = _Image._registry_setup_commands(tag, version, setup_dockerfile_commands, add_python)
1604
+ context_files = {CONTAINER_REQUIREMENTS_PATH: _get_modal_requirements_path(version, add_python)}
1055
1605
  return DockerfileSpec(commands=commands, context_files=context_files)
1056
1606
 
1057
1607
  return _Image._from_args(
1058
1608
  dockerfile_function=build_dockerfile,
1059
- context_mount=context_mount,
1609
+ context_mount_function=context_mount_function,
1060
1610
  force_build=force_build,
1061
1611
  **kwargs,
1062
1612
  )
@@ -1066,21 +1616,24 @@ class _Image(_Object, type_prefix="im"):
1066
1616
  tag: str,
1067
1617
  secret: Optional[_Secret] = None,
1068
1618
  *,
1069
- setup_dockerfile_commands: List[str] = [],
1070
- force_build: bool = False,
1619
+ setup_dockerfile_commands: list[str] = [],
1620
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1071
1621
  add_python: Optional[str] = None,
1072
1622
  **kwargs,
1073
1623
  ) -> "_Image":
1074
1624
  """Build a Modal image from a private image in Google Cloud Platform (GCP) Artifact Registry.
1075
1625
 
1076
1626
  You will need to pass a `modal.Secret` containing [your GCP service account key data](https://cloud.google.com/iam/docs/keys-create-delete#creating)
1077
- as `SERVICE_ACCOUNT_JSON`. This can be done from the [Secrets](/secrets) page. Your service account should be granted a specific
1078
- role depending on the GCP registry used:
1627
+ as `SERVICE_ACCOUNT_JSON`. This can be done from the [Secrets](/secrets) page.
1628
+ Your service account should be granted a specific role depending on the GCP registry used:
1079
1629
 
1080
- - For Artifact Registry images (`pkg.dev` domains) use the ["Artifact Registry Reader"](https://cloud.google.com/artifact-registry/docs/access-control#roles) role
1081
- - For Container Registry images (`gcr.io` domains) use the ["Storage Object Viewer"](https://cloud.google.com/artifact-registry/docs/transition/setup-gcr-repo#permissions) role
1630
+ - For Artifact Registry images (`pkg.dev` domains) use
1631
+ the ["Artifact Registry Reader"](https://cloud.google.com/artifact-registry/docs/access-control#roles) role
1632
+ - For Container Registry images (`gcr.io` domains) use
1633
+ the ["Storage Object Viewer"](https://cloud.google.com/artifact-registry/docs/transition/setup-gcr-repo) role
1082
1634
 
1083
- **Note:** This method does not use `GOOGLE_APPLICATION_CREDENTIALS` as that variable accepts a path to a JSON file, not the actual JSON string.
1635
+ **Note:** This method does not use `GOOGLE_APPLICATION_CREDENTIALS` as that
1636
+ variable accepts a path to a JSON file, not the actual JSON string.
1084
1637
 
1085
1638
  See `Image.from_registry()` for information about the other parameters.
1086
1639
 
@@ -1089,7 +1642,10 @@ class _Image(_Object, type_prefix="im"):
1089
1642
  ```python
1090
1643
  modal.Image.from_gcp_artifact_registry(
1091
1644
  "us-east1-docker.pkg.dev/my-project-1234/my-repo/my-image:my-version",
1092
- secret=modal.Secret.from_name("my-gcp-secret"),
1645
+ secret=modal.Secret.from_name(
1646
+ "my-gcp-secret",
1647
+ required_keys=["SERVICE_ACCOUNT_JSON"],
1648
+ ),
1093
1649
  add_python="3.11",
1094
1650
  )
1095
1651
  ```
@@ -1111,15 +1667,15 @@ class _Image(_Object, type_prefix="im"):
1111
1667
  tag: str,
1112
1668
  secret: Optional[_Secret] = None,
1113
1669
  *,
1114
- setup_dockerfile_commands: List[str] = [],
1115
- force_build: bool = False,
1670
+ setup_dockerfile_commands: list[str] = [],
1671
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1116
1672
  add_python: Optional[str] = None,
1117
1673
  **kwargs,
1118
1674
  ) -> "_Image":
1119
1675
  """Build a Modal image from a private image in AWS Elastic Container Registry (ECR).
1120
1676
 
1121
- You will need to pass a `modal.Secret` containing an AWS key (`AWS_ACCESS_KEY_ID`) and
1122
- secret (`AWS_SECRET_ACCESS_KEY`) with permissions to access the target ECR registry.
1677
+ You will need to pass a `modal.Secret` containing `AWS_ACCESS_KEY_ID`,
1678
+ `AWS_SECRET_ACCESS_KEY`, and `AWS_REGION` to access the target ECR registry.
1123
1679
 
1124
1680
  IAM configuration details can be found in the AWS documentation for
1125
1681
  ["Private repository policies"](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html).
@@ -1131,7 +1687,10 @@ class _Image(_Object, type_prefix="im"):
1131
1687
  ```python
1132
1688
  modal.Image.from_aws_ecr(
1133
1689
  "000000000000.dkr.ecr.us-east-1.amazonaws.com/my-private-registry:my-version",
1134
- secret=modal.Secret.from_name("aws"),
1690
+ secret=modal.Secret.from_name(
1691
+ "aws",
1692
+ required_keys=["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"],
1693
+ ),
1135
1694
  add_python="3.11",
1136
1695
  )
1137
1696
  ```
@@ -1150,40 +1709,90 @@ class _Image(_Object, type_prefix="im"):
1150
1709
 
1151
1710
  @staticmethod
1152
1711
  def from_dockerfile(
1712
+ # Filepath to Dockerfile.
1153
1713
  path: Union[str, Path],
1154
- context_mount: Optional[
1155
- _Mount
1156
- ] = None, # modal.Mount with local files to supply as build context for COPY commands
1714
+ # modal.Mount with local files to supply as build context for COPY commands.
1715
+ # NOTE: The remote_path of the Mount should match the Dockerfile's WORKDIR.
1716
+ context_mount: Optional[_Mount] = None,
1717
+ # Ignore cached builds, similar to 'docker build --no-cache'
1157
1718
  force_build: bool = False,
1158
1719
  *,
1159
1720
  secrets: Sequence[_Secret] = [],
1160
1721
  gpu: GPU_T = None,
1161
1722
  add_python: Optional[str] = None,
1723
+ ignore: Union[Sequence[str], Callable[[Path], bool]] = AUTO_DOCKERIGNORE,
1162
1724
  ) -> "_Image":
1163
1725
  """Build a Modal image from a local Dockerfile.
1164
1726
 
1165
1727
  If your Dockerfile does not have Python installed, you can use the `add_python` parameter
1166
- to specify a version of Python to add to the image. Supported versions are `3.8`, `3.9`,
1167
- `3.10`, `3.11`, and `3.12`.
1728
+ to specify a version of Python to add to the image.
1168
1729
 
1169
- **Example**
1730
+ **Usage:**
1170
1731
 
1171
1732
  ```python
1172
- image = modal.Image.from_dockerfile("./Dockerfile", add_python="3.12")
1733
+ from pathlib import Path
1734
+ from modal import FilePatternMatcher
1735
+
1736
+ # By default a .dockerignore file is used if present in the current working directory
1737
+ image = modal.Image.from_dockerfile(
1738
+ "./Dockerfile",
1739
+ add_python="3.12",
1740
+ )
1741
+
1742
+ image = modal.Image.from_dockerfile(
1743
+ "./Dockerfile",
1744
+ add_python="3.12",
1745
+ ignore=["*.venv"],
1746
+ )
1747
+
1748
+ image = modal.Image.from_dockerfile(
1749
+ "./Dockerfile",
1750
+ add_python="3.12",
1751
+ ignore=lambda p: p.is_relative_to(".venv"),
1752
+ )
1753
+
1754
+ image = modal.Image.from_dockerfile(
1755
+ "./Dockerfile",
1756
+ add_python="3.12",
1757
+ ignore=FilePatternMatcher("**/*.txt"),
1758
+ )
1759
+
1760
+ # When including files is simpler than excluding them, you can use the `~` operator to invert the matcher.
1761
+ image = modal.Image.from_dockerfile(
1762
+ "./Dockerfile",
1763
+ add_python="3.12",
1764
+ ignore=~FilePatternMatcher("**/*.py"),
1765
+ )
1766
+
1767
+ # You can also read ignore patterns from a file.
1768
+ image = modal.Image.from_dockerfile(
1769
+ "./Dockerfile",
1770
+ add_python="3.12",
1771
+ ignore=FilePatternMatcher.from_file(Path("/path/to/dockerignore")),
1772
+ )
1173
1773
  ```
1174
1774
  """
1775
+ if context_mount is not None:
1776
+ deprecation_warning(
1777
+ (2025, 1, 13),
1778
+ "`context_mount` is deprecated."
1779
+ + " Files are now automatically added to the build context based on the commands in the Dockerfile.",
1780
+ pending=True,
1781
+ )
1175
1782
 
1176
1783
  # --- Build the base dockerfile
1177
1784
 
1178
- def build_base_dockerfile() -> DockerfileSpec:
1785
+ def build_dockerfile_base(version: ImageBuilderVersion) -> DockerfileSpec:
1179
1786
  with open(os.path.expanduser(path)) as f:
1180
1787
  commands = f.read().split("\n")
1181
1788
  return DockerfileSpec(commands=commands, context_files={})
1182
1789
 
1183
1790
  gpu_config = parse_gpu_config(gpu)
1184
1791
  base_image = _Image._from_args(
1185
- dockerfile_function=build_base_dockerfile,
1186
- context_mount=context_mount,
1792
+ dockerfile_function=build_dockerfile_base,
1793
+ context_mount_function=_create_context_mount_function(
1794
+ ignore=ignore, dockerfile_path=Path(path), context_mount=context_mount
1795
+ ),
1187
1796
  gpu_config=gpu_config,
1188
1797
  secrets=secrets,
1189
1798
  )
@@ -1192,67 +1801,54 @@ class _Image(_Object, type_prefix="im"):
1192
1801
  # This happening in two steps is probably a vestigial consequence of previous limitations,
1193
1802
  # but it will be difficult to merge them without forcing rebuilds of images.
1194
1803
 
1195
- if add_python:
1196
- context_mount = _Mount.from_name(
1197
- python_standalone_mount_name(add_python),
1198
- namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1804
+ def add_python_mount():
1805
+ return (
1806
+ _Mount.from_name(
1807
+ python_standalone_mount_name(add_python),
1808
+ namespace=api_pb2.DEPLOYMENT_NAMESPACE_GLOBAL,
1809
+ )
1810
+ if add_python
1811
+ else None
1199
1812
  )
1200
- else:
1201
- context_mount = None
1202
-
1203
- def enhance_dockerfile() -> DockerfileSpec:
1204
- requirements_path = _get_client_requirements_path(add_python)
1205
-
1206
- add_python_commands = []
1207
- if add_python:
1208
- add_python_commands = [
1209
- "COPY /python/. /usr/local",
1210
- "RUN ln -s /usr/local/bin/python3 /usr/local/bin/python",
1211
- "ENV TERMINFO_DIRS=/etc/terminfo:/lib/terminfo:/usr/share/terminfo:/usr/lib/terminfo",
1212
- ]
1213
-
1214
- commands = [
1215
- "FROM base",
1216
- *add_python_commands,
1217
- "COPY /modal_requirements.txt /modal_requirements.txt",
1218
- "RUN python -m pip install --upgrade pip",
1219
- "RUN python -m pip install -r /modal_requirements.txt",
1220
- ]
1221
-
1222
- context_files = {"/modal_requirements.txt": requirements_path}
1223
1813
 
1814
+ def build_dockerfile_python(version: ImageBuilderVersion) -> DockerfileSpec:
1815
+ commands = _Image._registry_setup_commands("base", version, [], add_python)
1816
+ requirements_path = _get_modal_requirements_path(version, add_python)
1817
+ context_files = {CONTAINER_REQUIREMENTS_PATH: requirements_path}
1224
1818
  return DockerfileSpec(commands=commands, context_files=context_files)
1225
1819
 
1226
1820
  return _Image._from_args(
1227
1821
  base_images={"base": base_image},
1228
- dockerfile_function=enhance_dockerfile,
1229
- context_mount=context_mount,
1822
+ dockerfile_function=build_dockerfile_python,
1823
+ context_mount_function=add_python_mount,
1230
1824
  force_build=force_build,
1231
1825
  )
1232
1826
 
1233
1827
  @staticmethod
1234
1828
  def debian_slim(python_version: Optional[str] = None, force_build: bool = False) -> "_Image":
1235
- """Default image, based on the official `python:X.Y.Z-slim-bullseye` Docker images."""
1829
+ """Default image, based on the official `python` Docker images."""
1830
+ if isinstance(python_version, float):
1831
+ raise TypeError("The `python_version` argument should be a string, not a float.")
1236
1832
 
1237
- def build_dockerfile() -> DockerfileSpec:
1238
- full_python_version = _dockerhub_python_version(python_version)
1833
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1834
+ requirements_path = _get_modal_requirements_path(version, python_version)
1835
+ context_files = {CONTAINER_REQUIREMENTS_PATH: requirements_path}
1836
+ full_python_version = _dockerhub_python_version(version, python_version)
1837
+ debian_codename = _base_image_config("debian", version)
1239
1838
 
1240
- requirements_path = _get_client_requirements_path(full_python_version)
1241
1839
  commands = [
1242
- f"FROM python:{full_python_version}-slim-bullseye",
1243
- "COPY /modal_requirements.txt /modal_requirements.txt",
1840
+ f"FROM python:{full_python_version}-slim-{debian_codename}",
1841
+ f"COPY {CONTAINER_REQUIREMENTS_PATH} {CONTAINER_REQUIREMENTS_PATH}",
1244
1842
  "RUN apt-get update",
1245
1843
  "RUN apt-get install -y gcc gfortran build-essential",
1246
- "RUN pip install --upgrade pip",
1247
- "RUN pip install -r /modal_requirements.txt",
1248
- # Set debian front-end to non-interactive to avoid users getting stuck with input
1249
- # prompts.
1844
+ f"RUN pip install --upgrade {_base_image_config('package_tools', version)}",
1845
+ f"RUN {_get_modal_requirements_command(version)}",
1846
+ # Set debian front-end to non-interactive to avoid users getting stuck with input prompts.
1250
1847
  "RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections",
1251
1848
  ]
1252
- return DockerfileSpec(
1253
- commands=commands,
1254
- context_files={"/modal_requirements.txt": requirements_path},
1255
- )
1849
+ if version > "2023.12":
1850
+ commands.append(f"RUN rm {CONTAINER_REQUIREMENTS_PATH}")
1851
+ return DockerfileSpec(commands=commands, context_files=context_files)
1256
1852
 
1257
1853
  return _Image._from_args(
1258
1854
  dockerfile_function=build_dockerfile,
@@ -1262,8 +1858,8 @@ class _Image(_Object, type_prefix="im"):
1262
1858
 
1263
1859
  def apt_install(
1264
1860
  self,
1265
- *packages: Union[str, List[str]], # A list of packages, e.g. ["ssh", "libpq-dev"]
1266
- force_build: bool = False,
1861
+ *packages: Union[str, list[str]], # A list of packages, e.g. ["ssh", "libpq-dev"]
1862
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1267
1863
  secrets: Sequence[_Secret] = [],
1268
1864
  gpu: GPU_T = None,
1269
1865
  ) -> "_Image":
@@ -1279,9 +1875,9 @@ class _Image(_Object, type_prefix="im"):
1279
1875
  if not pkgs:
1280
1876
  return self
1281
1877
 
1282
- package_args = " ".join(shlex.quote(pkg) for pkg in pkgs)
1878
+ package_args = shlex.join(pkgs)
1283
1879
 
1284
- def build_dockerfile() -> DockerfileSpec:
1880
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1285
1881
  commands = [
1286
1882
  "FROM base",
1287
1883
  "RUN apt-get update",
@@ -1299,28 +1895,33 @@ class _Image(_Object, type_prefix="im"):
1299
1895
 
1300
1896
  def run_function(
1301
1897
  self,
1302
- raw_f: Callable,
1898
+ raw_f: Callable[..., Any],
1303
1899
  secrets: Sequence[_Secret] = (), # Optional Modal Secret objects with environment variables for the container
1304
- gpu: GPU_T = None, # GPU specification as string ("any", "T4", "A10G", ...) or object (`modal.GPU.A100()`, ...)
1305
- mounts: Sequence[_Mount] = (),
1306
- shared_volumes: Dict[Union[str, PurePosixPath], _NetworkFileSystem] = {},
1307
- network_file_systems: Dict[Union[str, PurePosixPath], _NetworkFileSystem] = {},
1900
+ gpu: Union[
1901
+ GPU_T, list[GPU_T]
1902
+ ] = None, # GPU request as string ("any", "T4", ...), object (`modal.GPU.A100()`, ...), or a list of either
1903
+ mounts: Sequence[_Mount] = (), # Mounts attached to the function
1904
+ volumes: dict[Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]] = {}, # Volume mount paths
1905
+ network_file_systems: dict[Union[str, PurePosixPath], _NetworkFileSystem] = {}, # NFS mount paths
1308
1906
  cpu: Optional[float] = None, # How many CPU cores to request. This is a soft limit.
1309
1907
  memory: Optional[int] = None, # How much memory to request, in MiB. This is a soft limit.
1310
- timeout: Optional[int] = 86400, # Maximum execution time of the function in seconds.
1311
- force_build: bool = False,
1312
- secret: Optional[_Secret] = None, # Deprecated: use `secrets`.
1908
+ timeout: Optional[int] = 60 * 60, # Maximum execution time of the function in seconds.
1909
+ force_build: bool = False, # Ignore cached builds, similar to 'docker build --no-cache'
1910
+ cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
1911
+ region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
1313
1912
  args: Sequence[Any] = (), # Positional arguments to the function.
1314
- kwargs: Dict[str, Any] = {}, # Keyword arguments to the function.
1913
+ kwargs: dict[str, Any] = {}, # Keyword arguments to the function.
1315
1914
  ) -> "_Image":
1316
1915
  """Run user-defined function `raw_f` as an image build step. The function runs just like an ordinary Modal
1317
- function, and any kwargs accepted by `@stub.function` (such as `Mount`s, `NetworkFileSystem`s, and resource requests) can
1318
- be supplied to it. After it finishes execution, a snapshot of the resulting container file system is saved as an image.
1916
+ function, and any kwargs accepted by `@app.function` (such as `Mount`s, `NetworkFileSystem`s,
1917
+ and resource requests) can be supplied to it.
1918
+ After it finishes execution, a snapshot of the resulting container file system is saved as an image.
1319
1919
 
1320
1920
  **Note**
1321
1921
 
1322
- Only the source code of `raw_f`, the contents of `**kwargs`, and any referenced *global* variables are used to determine whether the image has changed
1323
- and needs to be rebuilt. If this function references other functions or variables, the image will not be rebuilt if you
1922
+ Only the source code of `raw_f`, the contents of `**kwargs`, and any referenced *global* variables
1923
+ are used to determine whether the image has changed and needs to be rebuilt.
1924
+ If this function references other functions or variables, the image will not be rebuilt if you
1324
1925
  make changes to them. You can force a rebuild by changing the function's source code itself.
1325
1926
 
1326
1927
  **Example**
@@ -1340,23 +1941,27 @@ class _Image(_Object, type_prefix="im"):
1340
1941
  """
1341
1942
  from .functions import _Function
1342
1943
 
1343
- info = FunctionInfo(raw_f)
1944
+ if not callable(raw_f):
1945
+ raise InvalidError(f"Argument to Image.run_function must be a function, not {type(raw_f).__name__}.")
1946
+ elif raw_f.__name__ == "<lambda>":
1947
+ # It may be possible to support lambdas eventually, but for now we don't handle them well, so reject quickly
1948
+ raise InvalidError("Image.run_function does not support lambda functions.")
1344
1949
 
1345
- if shared_volumes or network_file_systems:
1346
- warnings.warn(
1347
- "Mounting NetworkFileSystems or Volumes is usually not advised with `run_function`."
1348
- " If you are trying to download model weights, downloading it to the image itself is recommended and sufficient."
1349
- )
1950
+ scheduler_placement = SchedulerPlacement(region=region) if region else None
1951
+
1952
+ info = FunctionInfo(raw_f)
1350
1953
 
1351
1954
  function = _Function.from_args(
1352
1955
  info,
1353
- stub=None,
1354
- image=self,
1355
- secret=secret,
1956
+ app=None,
1957
+ image=self, # type: ignore[reportArgumentType] # TODO: probably conflict with type stub?
1356
1958
  secrets=secrets,
1357
1959
  gpu=gpu,
1358
1960
  mounts=mounts,
1961
+ volumes=volumes,
1359
1962
  network_file_systems=network_file_systems,
1963
+ cloud=cloud,
1964
+ scheduler_placement=scheduler_placement,
1360
1965
  memory=memory,
1361
1966
  timeout=timeout,
1362
1967
  cpu=cpu,
@@ -1379,22 +1984,20 @@ class _Image(_Object, type_prefix="im"):
1379
1984
  force_build=self.force_build or force_build,
1380
1985
  )
1381
1986
 
1382
- def env(self, vars: Dict[str, str]) -> "_Image":
1383
- """Sets the environmental variables of the image.
1987
+ def env(self, vars: dict[str, str]) -> "_Image":
1988
+ """Sets the environment variables in an Image.
1384
1989
 
1385
1990
  **Example**
1386
1991
 
1387
1992
  ```python
1388
1993
  image = (
1389
- modal.Image.conda()
1390
- .env({"CONDA_OVERRIDE_CUDA": "11.2"})
1391
- .conda_install("jax", "cuda-nvcc", channels=["conda-forge", "nvidia"])
1392
- .pip_install("dm-haiku", "optax")
1994
+ modal.Image.debian_slim()
1995
+ .env({"HF_HUB_ENABLE_HF_TRANSFER": "1"})
1393
1996
  )
1394
1997
  ```
1395
1998
  """
1396
1999
 
1397
- def build_dockerfile() -> DockerfileSpec:
2000
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
1398
2001
  commands = ["FROM base"] + [f"ENV {key}={shlex.quote(val)}" for (key, val) in vars.items()]
1399
2002
  return DockerfileSpec(commands=commands, context_files={})
1400
2003
 
@@ -1403,7 +2006,7 @@ class _Image(_Object, type_prefix="im"):
1403
2006
  dockerfile_function=build_dockerfile,
1404
2007
  )
1405
2008
 
1406
- def workdir(self, path: str) -> "_Image":
2009
+ def workdir(self, path: Union[str, PurePosixPath]) -> "_Image":
1407
2010
  """Set the working directory for subsequent image build steps and function execution.
1408
2011
 
1409
2012
  **Example**
@@ -1418,8 +2021,8 @@ class _Image(_Object, type_prefix="im"):
1418
2021
  ```
1419
2022
  """
1420
2023
 
1421
- def build_dockerfile() -> DockerfileSpec:
1422
- commands = ["FROM base"] + [f"WORKDIR {shlex.quote(path)}"]
2024
+ def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
2025
+ commands = ["FROM base", f"WORKDIR {shlex.quote(str(path))}"]
1423
2026
  return DockerfileSpec(commands=commands, context_files={})
1424
2027
 
1425
2028
  return _Image._from_args(
@@ -1457,18 +2060,25 @@ class _Image(_Object, type_prefix="im"):
1457
2060
  if not isinstance(exc, ImportError):
1458
2061
  warnings.warn(f"Warning: caught a non-ImportError exception in an `imports()` block: {repr(exc)}")
1459
2062
 
1460
- def run_inside(self):
1461
- """`Image.run_inside` is deprecated - use `Image.imports` instead.
2063
+ @live_method_gen
2064
+ async def _logs(self) -> typing.AsyncGenerator[str, None]:
2065
+ """Streams logs from an image, or returns logs from an already completed image.
1462
2066
 
1463
- **Usage:**
1464
-
1465
- ```python notest
1466
- with image.imports():
1467
- import torch
1468
- ```
2067
+ This method is considered private since its interface may change - use it at your own risk!
1469
2068
  """
1470
- deprecation_warning((2023, 12, 15), Image.run_inside.__doc__)
1471
- return self.imports()
2069
+ last_entry_id: str = ""
2070
+
2071
+ request = api_pb2.ImageJoinStreamingRequest(
2072
+ image_id=self._object_id, timeout=55, last_entry_id=last_entry_id, include_logs_for_finished=True
2073
+ )
2074
+ async for response in self._client.stub.ImageJoinStreaming.unary_stream(request):
2075
+ if response.result.status:
2076
+ return
2077
+ if response.entry_id:
2078
+ last_entry_id = response.entry_id
2079
+ for task_log in response.task_logs:
2080
+ if task_log.data:
2081
+ yield task_log.data
1472
2082
 
1473
2083
 
1474
2084
  Image = synchronize_api(_Image)