modal 0.62.115__py3-none-any.whl → 0.72.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. modal/__init__.py +13 -9
  2. modal/__main__.py +41 -3
  3. modal/_clustered_functions.py +80 -0
  4. modal/_clustered_functions.pyi +22 -0
  5. modal/_container_entrypoint.py +402 -398
  6. modal/_ipython.py +3 -13
  7. modal/_location.py +17 -10
  8. modal/_output.py +243 -99
  9. modal/_pty.py +2 -2
  10. modal/_resolver.py +55 -60
  11. modal/_resources.py +26 -7
  12. modal/_runtime/__init__.py +1 -0
  13. modal/_runtime/asgi.py +519 -0
  14. modal/_runtime/container_io_manager.py +1025 -0
  15. modal/{execution_context.py → _runtime/execution_context.py} +11 -2
  16. modal/_runtime/telemetry.py +169 -0
  17. modal/_runtime/user_code_imports.py +356 -0
  18. modal/_serialization.py +123 -6
  19. modal/_traceback.py +47 -187
  20. modal/_tunnel.py +50 -14
  21. modal/_tunnel.pyi +19 -36
  22. modal/_utils/app_utils.py +3 -17
  23. modal/_utils/async_utils.py +386 -104
  24. modal/_utils/blob_utils.py +157 -186
  25. modal/_utils/bytes_io_segment_payload.py +97 -0
  26. modal/_utils/deprecation.py +89 -0
  27. modal/_utils/docker_utils.py +98 -0
  28. modal/_utils/function_utils.py +299 -98
  29. modal/_utils/grpc_testing.py +47 -34
  30. modal/_utils/grpc_utils.py +54 -21
  31. modal/_utils/hash_utils.py +51 -10
  32. modal/_utils/http_utils.py +39 -9
  33. modal/_utils/logger.py +2 -1
  34. modal/_utils/mount_utils.py +34 -16
  35. modal/_utils/name_utils.py +58 -0
  36. modal/_utils/package_utils.py +14 -1
  37. modal/_utils/pattern_utils.py +205 -0
  38. modal/_utils/rand_pb_testing.py +3 -3
  39. modal/_utils/shell_utils.py +15 -49
  40. modal/_vendor/a2wsgi_wsgi.py +62 -72
  41. modal/_vendor/cloudpickle.py +1 -1
  42. modal/_watcher.py +12 -10
  43. modal/app.py +561 -323
  44. modal/app.pyi +474 -262
  45. modal/call_graph.py +7 -6
  46. modal/cli/_download.py +22 -6
  47. modal/cli/_traceback.py +200 -0
  48. modal/cli/app.py +203 -42
  49. modal/cli/config.py +12 -5
  50. modal/cli/container.py +61 -13
  51. modal/cli/dict.py +128 -0
  52. modal/cli/entry_point.py +26 -13
  53. modal/cli/environment.py +40 -9
  54. modal/cli/import_refs.py +21 -48
  55. modal/cli/launch.py +28 -14
  56. modal/cli/network_file_system.py +57 -21
  57. modal/cli/profile.py +1 -1
  58. modal/cli/programs/run_jupyter.py +34 -9
  59. modal/cli/programs/vscode.py +58 -8
  60. modal/cli/queues.py +131 -0
  61. modal/cli/run.py +199 -96
  62. modal/cli/secret.py +5 -4
  63. modal/cli/token.py +7 -2
  64. modal/cli/utils.py +74 -8
  65. modal/cli/volume.py +97 -56
  66. modal/client.py +248 -144
  67. modal/client.pyi +156 -124
  68. modal/cloud_bucket_mount.py +43 -30
  69. modal/cloud_bucket_mount.pyi +32 -25
  70. modal/cls.py +528 -141
  71. modal/cls.pyi +189 -145
  72. modal/config.py +32 -15
  73. modal/container_process.py +177 -0
  74. modal/container_process.pyi +82 -0
  75. modal/dict.py +50 -54
  76. modal/dict.pyi +120 -164
  77. modal/environments.py +106 -5
  78. modal/environments.pyi +77 -25
  79. modal/exception.py +30 -43
  80. modal/experimental.py +62 -2
  81. modal/file_io.py +537 -0
  82. modal/file_io.pyi +235 -0
  83. modal/file_pattern_matcher.py +196 -0
  84. modal/functions.py +846 -428
  85. modal/functions.pyi +446 -387
  86. modal/gpu.py +57 -44
  87. modal/image.py +943 -417
  88. modal/image.pyi +584 -245
  89. modal/io_streams.py +434 -0
  90. modal/io_streams.pyi +122 -0
  91. modal/mount.py +223 -90
  92. modal/mount.pyi +241 -243
  93. modal/network_file_system.py +85 -86
  94. modal/network_file_system.pyi +151 -110
  95. modal/object.py +66 -36
  96. modal/object.pyi +166 -143
  97. modal/output.py +63 -0
  98. modal/parallel_map.py +73 -47
  99. modal/parallel_map.pyi +51 -63
  100. modal/partial_function.py +272 -107
  101. modal/partial_function.pyi +219 -120
  102. modal/proxy.py +15 -12
  103. modal/proxy.pyi +3 -8
  104. modal/queue.py +96 -72
  105. modal/queue.pyi +210 -135
  106. modal/requirements/2024.04.txt +2 -1
  107. modal/requirements/2024.10.txt +16 -0
  108. modal/requirements/README.md +21 -0
  109. modal/requirements/base-images.json +22 -0
  110. modal/retries.py +45 -4
  111. modal/runner.py +325 -203
  112. modal/runner.pyi +124 -110
  113. modal/running_app.py +27 -4
  114. modal/sandbox.py +509 -231
  115. modal/sandbox.pyi +396 -169
  116. modal/schedule.py +2 -2
  117. modal/scheduler_placement.py +20 -3
  118. modal/secret.py +41 -25
  119. modal/secret.pyi +62 -42
  120. modal/serving.py +39 -49
  121. modal/serving.pyi +37 -43
  122. modal/stream_type.py +15 -0
  123. modal/token_flow.py +5 -3
  124. modal/token_flow.pyi +37 -32
  125. modal/volume.py +123 -137
  126. modal/volume.pyi +228 -221
  127. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/METADATA +5 -5
  128. modal-0.72.13.dist-info/RECORD +174 -0
  129. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/top_level.txt +0 -1
  130. modal_docs/gen_reference_docs.py +3 -1
  131. modal_docs/mdmd/mdmd.py +0 -1
  132. modal_docs/mdmd/signatures.py +1 -2
  133. modal_global_objects/images/base_images.py +28 -0
  134. modal_global_objects/mounts/python_standalone.py +2 -2
  135. modal_proto/__init__.py +1 -1
  136. modal_proto/api.proto +1231 -531
  137. modal_proto/api_grpc.py +750 -430
  138. modal_proto/api_pb2.py +2102 -1176
  139. modal_proto/api_pb2.pyi +8859 -0
  140. modal_proto/api_pb2_grpc.py +1329 -675
  141. modal_proto/api_pb2_grpc.pyi +1416 -0
  142. modal_proto/modal_api_grpc.py +149 -0
  143. modal_proto/modal_options_grpc.py +3 -0
  144. modal_proto/options_pb2.pyi +20 -0
  145. modal_proto/options_pb2_grpc.pyi +7 -0
  146. modal_proto/py.typed +0 -0
  147. modal_version/__init__.py +1 -1
  148. modal_version/_version_generated.py +2 -2
  149. modal/_asgi.py +0 -370
  150. modal/_container_exec.py +0 -128
  151. modal/_container_io_manager.py +0 -646
  152. modal/_container_io_manager.pyi +0 -412
  153. modal/_sandbox_shell.py +0 -49
  154. modal/app_utils.py +0 -20
  155. modal/app_utils.pyi +0 -17
  156. modal/execution_context.pyi +0 -37
  157. modal/shared_volume.py +0 -23
  158. modal/shared_volume.pyi +0 -24
  159. modal-0.62.115.dist-info/RECORD +0 -207
  160. modal_global_objects/images/conda.py +0 -15
  161. modal_global_objects/images/debian_slim.py +0 -15
  162. modal_global_objects/images/micromamba.py +0 -15
  163. test/__init__.py +0 -1
  164. test/aio_test.py +0 -12
  165. test/async_utils_test.py +0 -279
  166. test/blob_test.py +0 -67
  167. test/cli_imports_test.py +0 -149
  168. test/cli_test.py +0 -674
  169. test/client_test.py +0 -203
  170. test/cloud_bucket_mount_test.py +0 -22
  171. test/cls_test.py +0 -636
  172. test/config_test.py +0 -149
  173. test/conftest.py +0 -1485
  174. test/container_app_test.py +0 -50
  175. test/container_test.py +0 -1405
  176. test/cpu_test.py +0 -23
  177. test/decorator_test.py +0 -85
  178. test/deprecation_test.py +0 -34
  179. test/dict_test.py +0 -51
  180. test/e2e_test.py +0 -68
  181. test/error_test.py +0 -7
  182. test/function_serialization_test.py +0 -32
  183. test/function_test.py +0 -791
  184. test/function_utils_test.py +0 -101
  185. test/gpu_test.py +0 -159
  186. test/grpc_utils_test.py +0 -82
  187. test/helpers.py +0 -47
  188. test/image_test.py +0 -814
  189. test/live_reload_test.py +0 -80
  190. test/lookup_test.py +0 -70
  191. test/mdmd_test.py +0 -329
  192. test/mount_test.py +0 -162
  193. test/mounted_files_test.py +0 -327
  194. test/network_file_system_test.py +0 -188
  195. test/notebook_test.py +0 -66
  196. test/object_test.py +0 -41
  197. test/package_utils_test.py +0 -25
  198. test/queue_test.py +0 -115
  199. test/resolver_test.py +0 -59
  200. test/retries_test.py +0 -67
  201. test/runner_test.py +0 -85
  202. test/sandbox_test.py +0 -191
  203. test/schedule_test.py +0 -15
  204. test/scheduler_placement_test.py +0 -57
  205. test/secret_test.py +0 -89
  206. test/serialization_test.py +0 -50
  207. test/stub_composition_test.py +0 -10
  208. test/stub_test.py +0 -361
  209. test/test_asgi_wrapper.py +0 -234
  210. test/token_flow_test.py +0 -18
  211. test/traceback_test.py +0 -135
  212. test/tunnel_test.py +0 -29
  213. test/utils_test.py +0 -88
  214. test/version_test.py +0 -14
  215. test/volume_test.py +0 -397
  216. test/watcher_test.py +0 -58
  217. test/webhook_test.py +0 -145
  218. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/LICENSE +0 -0
  219. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/WHEEL +0 -0
  220. {modal-0.62.115.dist-info → modal-0.72.13.dist-info}/entry_points.txt +0 -0
modal/gpu.py CHANGED
@@ -1,10 +1,10 @@
1
1
  # Copyright Modal Labs 2022
2
2
  from dataclasses import dataclass
3
- from typing import Optional, Union
3
+ from typing import Callable, Optional, Union
4
4
 
5
5
  from modal_proto import api_pb2
6
6
 
7
- from .exception import InvalidError, deprecation_error, deprecation_warning
7
+ from .exception import InvalidError
8
8
 
9
9
 
10
10
  @dataclass(frozen=True)
@@ -31,7 +31,7 @@ class T4(_GPUConfig):
31
31
 
32
32
  def __init__(
33
33
  self,
34
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
34
+ count: int = 1, # Number of GPUs per container. Defaults to 1.
35
35
  ):
36
36
  super().__init__(api_pb2.GPU_TYPE_T4, count, 0)
37
37
 
@@ -43,12 +43,13 @@ class L4(_GPUConfig):
43
43
  """
44
44
  [NVIDIA L4 Tensor Core](https://www.nvidia.com/en-us/data-center/l4/) GPU class.
45
45
 
46
- A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GiB of GPU memory. Includes RTX (ray tracing) support.
46
+ A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GiB of GPU memory.
47
+ Includes RTX (ray tracing) support.
47
48
  """
48
49
 
49
50
  def __init__(
50
51
  self,
51
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
52
+ count: int = 1, # Number of GPUs per container. Defaults to 1.
52
53
  ):
53
54
  super().__init__(api_pb2.GPU_TYPE_L4, count, 0)
54
55
 
@@ -66,22 +67,12 @@ class A100(_GPUConfig):
66
67
  def __init__(
67
68
  self,
68
69
  *,
69
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
70
- memory: int = 0, # Deprecated. Use `size` instead.
70
+ count: int = 1, # Number of GPUs per container. Defaults to 1.
71
71
  size: Union[str, None] = None, # Select GiB configuration of GPU device: "40GB" or "80GB". Defaults to "40GB".
72
72
  ):
73
- allowed_memory_values = {40, 80}
74
73
  allowed_size_values = {"40GB", "80GB"}
75
- if memory == 20:
76
- raise ValueError(
77
- "A100 20GB is unsupported, consider `modal.A10G`, `modal.A100(memory_gb='40')`, or `modal.H100` instead"
78
- )
79
- elif memory and size:
80
- raise ValueError("Cannot specify both `memory` and `size`. Just specify `size`.")
81
- elif memory:
82
- if memory not in allowed_memory_values:
83
- raise ValueError(f"A100s can only have memory values of {allowed_memory_values} => memory={memory}")
84
- elif size:
74
+
75
+ if size:
85
76
  if size not in allowed_size_values:
86
77
  raise ValueError(
87
78
  f"size='{size}' is invalid. A100s can only have memory values of {allowed_size_values}."
@@ -106,14 +97,17 @@ class A10G(_GPUConfig):
106
97
  """
107
98
  [NVIDIA A10G Tensor Core](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) GPU class.
108
99
 
109
- A mid-tier data center GPU based on the Ampere architecture, providing 24 GiB of memory. A10G GPUs deliver up to 3.3x better ML training performance, 3x better ML inference performance,
100
+ A mid-tier data center GPU based on the Ampere architecture, providing 24 GiB of memory.
101
+ A10G GPUs deliver up to 3.3x better ML training performance, 3x better ML inference performance,
110
102
  and 3x better graphics performance, in comparison to NVIDIA T4 GPUs.
111
103
  """
112
104
 
113
105
  def __init__(
114
106
  self,
115
107
  *,
116
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
108
+ # Number of GPUs per container. Defaults to 1.
109
+ # Useful if you have very large models that don't fit on a single GPU.
110
+ count: int = 1,
117
111
  ):
118
112
  super().__init__(api_pb2.GPU_TYPE_A10G, count)
119
113
 
@@ -125,13 +119,17 @@ class H100(_GPUConfig):
125
119
  """
126
120
  [NVIDIA H100 Tensor Core](https://www.nvidia.com/en-us/data-center/h100/) GPU class.
127
121
 
128
- The flagship data center GPU of the Hopper architecture. Enhanced support for FP8 precision and a Transformer Engine that provides up to 4X faster training over the prior generation for GPT-3 (175B) models.
122
+ The flagship data center GPU of the Hopper architecture.
123
+ Enhanced support for FP8 precision and a Transformer Engine that provides up to 4X faster training
124
+ over the prior generation for GPT-3 (175B) models.
129
125
  """
130
126
 
131
127
  def __init__(
132
128
  self,
133
129
  *,
134
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
130
+ # Number of GPUs per container. Defaults to 1.
131
+ # Useful if you have very large models that don't fit on a single GPU.
132
+ count: int = 1,
135
133
  ):
136
134
  super().__init__(api_pb2.GPU_TYPE_H100, count)
137
135
 
@@ -139,6 +137,27 @@ class H100(_GPUConfig):
139
137
  return f"GPU(H100, count={self.count})"
140
138
 
141
139
 
140
+ class L40S(_GPUConfig):
141
+ """
142
+ [NVIDIA L40S](https://www.nvidia.com/en-us/data-center/l40s/) GPU class.
143
+
144
+ The L40S is a data center GPU for the Ada Lovelace architecture. It has 48 GB of on-chip
145
+ GDDR6 RAM and enhanced support for FP8 precision.
146
+ """
147
+
148
+ def __init__(
149
+ self,
150
+ *,
151
+ # Number of GPUs per container. Defaults to 1.
152
+ # Useful if you have very large models that don't fit on a single GPU.
153
+ count: int = 1,
154
+ ):
155
+ super().__init__(api_pb2.GPU_TYPE_L40S, count)
156
+
157
+ def __repr__(self):
158
+ return f"GPU(L40S, count={self.count})"
159
+
160
+
142
161
  class Any(_GPUConfig):
143
162
  """Selects any one of the GPU classes available within Modal, according to availability."""
144
163
 
@@ -149,24 +168,28 @@ class Any(_GPUConfig):
149
168
  return f"GPU(Any, count={self.count})"
150
169
 
151
170
 
152
- STRING_TO_GPU_CONFIG = {
171
+ STRING_TO_GPU_CONFIG: dict[str, Callable] = {
153
172
  "t4": T4,
154
173
  "l4": L4,
155
174
  "a100": A100,
175
+ "a100-80gb": lambda: A100(size="80GB"),
156
176
  "h100": H100,
157
177
  "a10g": A10G,
178
+ "l40s": L40S,
158
179
  "any": Any,
159
180
  }
160
- display_string_to_config = "\n".join(
161
- f'- "{key}" → `{cls()}`' for key, cls in STRING_TO_GPU_CONFIG.items() if key != "inf2"
162
- )
181
+ display_string_to_config = "\n".join(f'- "{key}" → `{c()}`' for key, c in STRING_TO_GPU_CONFIG.items() if key != "inf2")
163
182
  __doc__ = f"""
164
183
  **GPU configuration shortcodes**
165
184
 
166
- The following are the valid `str` values for the `gpu` parameter of [`@app.function`](/docs/reference/modal.Stub#function).
185
+ The following are the valid `str` values for the `gpu` parameter of
186
+ [`@app.function`](/docs/reference/modal.App#function).
167
187
 
168
188
  {display_string_to_config}
169
189
 
190
+ The shortcodes also support specifying count by suffixing `:N` to acquire `N` GPUs.
191
+ For example, `a10g:4` will provision 4 A10G GPUs.
192
+
170
193
  Other configurations can be created using the constructors documented below.
171
194
  """
172
195
 
@@ -174,7 +197,7 @@ Other configurations can be created using the constructors documented below.
174
197
  GPU_T = Union[None, bool, str, _GPUConfig]
175
198
 
176
199
 
177
- def _parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> Optional[_GPUConfig]:
200
+ def _parse_gpu_config(value: GPU_T) -> Optional[_GPUConfig]:
178
201
  if isinstance(value, _GPUConfig):
179
202
  return value
180
203
  elif isinstance(value, str):
@@ -187,37 +210,27 @@ def _parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> Optional[_GPU
187
210
  raise InvalidError(f"Invalid GPU count: {count_str}. Value must be an integer.")
188
211
 
189
212
  if value.lower() == "a100-20g":
190
- return A100(memory=20, count=count) # Triggers unsupported error underneath.
213
+ return A100(size="20GB", count=count) # Triggers unsupported error underneath.
191
214
  elif value.lower() == "a100-40gb":
192
215
  return A100(size="40GB", count=count)
193
216
  elif value.lower() == "a100-80gb":
194
217
  return A100(size="80GB", count=count)
195
218
  elif value.lower() not in STRING_TO_GPU_CONFIG:
196
219
  raise InvalidError(
197
- f"Invalid GPU type: {value}. Value must be one of {list(STRING_TO_GPU_CONFIG.keys())} (case-insensitive)."
220
+ f"Invalid GPU type: {value}. "
221
+ f"Value must be one of {list(STRING_TO_GPU_CONFIG.keys())} (case-insensitive)."
198
222
  )
199
223
  else:
200
224
  return STRING_TO_GPU_CONFIG[value.lower()](count=count)
201
- elif value is True:
202
- if raise_on_true:
203
- deprecation_error(
204
- (2022, 12, 19), 'Setting gpu=True is deprecated. Use `gpu="any"` or `gpu=modal.gpu.Any()` instead.'
205
- )
206
- else:
207
- # We didn't support targeting a GPU type for run_function until 2023-12-12
208
- deprecation_warning(
209
- (2023, 12, 13), 'Setting gpu=True is deprecated. Use `gpu="any"` or `gpu=modal.gpu.Any()` instead.'
210
- )
211
- return Any()
212
225
  elif value is None or value is False:
213
226
  return None
214
227
  else:
215
228
  raise InvalidError(f"Invalid GPU config: {value}. Value must be a string, a `GPUConfig` object, or `None`.")
216
229
 
217
230
 
218
- def parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> api_pb2.GPUConfig:
231
+ def parse_gpu_config(value: GPU_T) -> api_pb2.GPUConfig:
219
232
  """mdmd:hidden"""
220
- gpu_config = _parse_gpu_config(value, raise_on_true)
233
+ gpu_config = _parse_gpu_config(value)
221
234
  if gpu_config is None:
222
235
  return api_pb2.GPUConfig()
223
236
  return gpu_config._to_proto()
@@ -225,4 +238,4 @@ def parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> api_pb2.GPUCon
225
238
 
226
239
  def display_gpu_config(value: GPU_T) -> str:
227
240
  """mdmd:hidden"""
228
- return repr(_parse_gpu_config(value, False))
241
+ return repr(_parse_gpu_config(value))