modal 0.62.16__py3-none-any.whl → 0.72.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (220) hide show
  1. modal/__init__.py +17 -13
  2. modal/__main__.py +41 -3
  3. modal/_clustered_functions.py +80 -0
  4. modal/_clustered_functions.pyi +22 -0
  5. modal/_container_entrypoint.py +420 -937
  6. modal/_ipython.py +3 -13
  7. modal/_location.py +17 -10
  8. modal/_output.py +243 -99
  9. modal/_pty.py +2 -2
  10. modal/_resolver.py +55 -59
  11. modal/_resources.py +51 -0
  12. modal/_runtime/__init__.py +1 -0
  13. modal/_runtime/asgi.py +519 -0
  14. modal/_runtime/container_io_manager.py +1036 -0
  15. modal/_runtime/execution_context.py +89 -0
  16. modal/_runtime/telemetry.py +169 -0
  17. modal/_runtime/user_code_imports.py +356 -0
  18. modal/_serialization.py +134 -9
  19. modal/_traceback.py +47 -187
  20. modal/_tunnel.py +52 -16
  21. modal/_tunnel.pyi +19 -36
  22. modal/_utils/app_utils.py +3 -17
  23. modal/_utils/async_utils.py +479 -100
  24. modal/_utils/blob_utils.py +157 -186
  25. modal/_utils/bytes_io_segment_payload.py +97 -0
  26. modal/_utils/deprecation.py +89 -0
  27. modal/_utils/docker_utils.py +98 -0
  28. modal/_utils/function_utils.py +460 -171
  29. modal/_utils/grpc_testing.py +47 -31
  30. modal/_utils/grpc_utils.py +62 -109
  31. modal/_utils/hash_utils.py +61 -19
  32. modal/_utils/http_utils.py +39 -9
  33. modal/_utils/logger.py +2 -1
  34. modal/_utils/mount_utils.py +34 -16
  35. modal/_utils/name_utils.py +58 -0
  36. modal/_utils/package_utils.py +14 -1
  37. modal/_utils/pattern_utils.py +205 -0
  38. modal/_utils/rand_pb_testing.py +5 -7
  39. modal/_utils/shell_utils.py +15 -49
  40. modal/_vendor/a2wsgi_wsgi.py +62 -72
  41. modal/_vendor/cloudpickle.py +1 -1
  42. modal/_watcher.py +14 -12
  43. modal/app.py +1003 -314
  44. modal/app.pyi +540 -264
  45. modal/call_graph.py +7 -6
  46. modal/cli/_download.py +63 -53
  47. modal/cli/_traceback.py +200 -0
  48. modal/cli/app.py +205 -45
  49. modal/cli/config.py +12 -5
  50. modal/cli/container.py +62 -14
  51. modal/cli/dict.py +128 -0
  52. modal/cli/entry_point.py +26 -13
  53. modal/cli/environment.py +40 -9
  54. modal/cli/import_refs.py +64 -58
  55. modal/cli/launch.py +32 -18
  56. modal/cli/network_file_system.py +64 -83
  57. modal/cli/profile.py +1 -1
  58. modal/cli/programs/run_jupyter.py +35 -10
  59. modal/cli/programs/vscode.py +60 -10
  60. modal/cli/queues.py +131 -0
  61. modal/cli/run.py +234 -131
  62. modal/cli/secret.py +8 -7
  63. modal/cli/token.py +7 -2
  64. modal/cli/utils.py +79 -10
  65. modal/cli/volume.py +110 -109
  66. modal/client.py +250 -144
  67. modal/client.pyi +157 -118
  68. modal/cloud_bucket_mount.py +108 -34
  69. modal/cloud_bucket_mount.pyi +32 -38
  70. modal/cls.py +535 -148
  71. modal/cls.pyi +190 -146
  72. modal/config.py +41 -19
  73. modal/container_process.py +177 -0
  74. modal/container_process.pyi +82 -0
  75. modal/dict.py +111 -65
  76. modal/dict.pyi +136 -131
  77. modal/environments.py +106 -5
  78. modal/environments.pyi +77 -25
  79. modal/exception.py +34 -43
  80. modal/experimental.py +61 -2
  81. modal/extensions/ipython.py +5 -5
  82. modal/file_io.py +537 -0
  83. modal/file_io.pyi +235 -0
  84. modal/file_pattern_matcher.py +197 -0
  85. modal/functions.py +906 -911
  86. modal/functions.pyi +466 -430
  87. modal/gpu.py +57 -44
  88. modal/image.py +1089 -479
  89. modal/image.pyi +584 -228
  90. modal/io_streams.py +434 -0
  91. modal/io_streams.pyi +122 -0
  92. modal/mount.py +314 -101
  93. modal/mount.pyi +241 -235
  94. modal/network_file_system.py +92 -92
  95. modal/network_file_system.pyi +152 -110
  96. modal/object.py +67 -36
  97. modal/object.pyi +166 -143
  98. modal/output.py +63 -0
  99. modal/parallel_map.py +434 -0
  100. modal/parallel_map.pyi +75 -0
  101. modal/partial_function.py +282 -117
  102. modal/partial_function.pyi +222 -129
  103. modal/proxy.py +15 -12
  104. modal/proxy.pyi +3 -8
  105. modal/queue.py +182 -65
  106. modal/queue.pyi +218 -118
  107. modal/requirements/2024.04.txt +29 -0
  108. modal/requirements/2024.10.txt +16 -0
  109. modal/requirements/README.md +21 -0
  110. modal/requirements/base-images.json +22 -0
  111. modal/retries.py +48 -7
  112. modal/runner.py +459 -156
  113. modal/runner.pyi +135 -71
  114. modal/running_app.py +38 -0
  115. modal/sandbox.py +514 -236
  116. modal/sandbox.pyi +397 -169
  117. modal/schedule.py +4 -4
  118. modal/scheduler_placement.py +20 -3
  119. modal/secret.py +56 -31
  120. modal/secret.pyi +62 -42
  121. modal/serving.py +51 -56
  122. modal/serving.pyi +44 -36
  123. modal/stream_type.py +15 -0
  124. modal/token_flow.py +5 -3
  125. modal/token_flow.pyi +37 -32
  126. modal/volume.py +285 -157
  127. modal/volume.pyi +249 -184
  128. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/METADATA +7 -7
  129. modal-0.72.11.dist-info/RECORD +174 -0
  130. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/top_level.txt +0 -1
  131. modal_docs/gen_reference_docs.py +3 -1
  132. modal_docs/mdmd/mdmd.py +0 -1
  133. modal_docs/mdmd/signatures.py +5 -2
  134. modal_global_objects/images/base_images.py +28 -0
  135. modal_global_objects/mounts/python_standalone.py +2 -2
  136. modal_proto/__init__.py +1 -1
  137. modal_proto/api.proto +1288 -533
  138. modal_proto/api_grpc.py +856 -456
  139. modal_proto/api_pb2.py +2165 -1157
  140. modal_proto/api_pb2.pyi +8859 -0
  141. modal_proto/api_pb2_grpc.py +1674 -855
  142. modal_proto/api_pb2_grpc.pyi +1416 -0
  143. modal_proto/modal_api_grpc.py +149 -0
  144. modal_proto/modal_options_grpc.py +3 -0
  145. modal_proto/options_pb2.pyi +20 -0
  146. modal_proto/options_pb2_grpc.pyi +7 -0
  147. modal_proto/py.typed +0 -0
  148. modal_version/__init__.py +1 -1
  149. modal_version/_version_generated.py +2 -2
  150. modal/_asgi.py +0 -370
  151. modal/_container_entrypoint.pyi +0 -378
  152. modal/_container_exec.py +0 -128
  153. modal/_sandbox_shell.py +0 -49
  154. modal/shared_volume.py +0 -23
  155. modal/shared_volume.pyi +0 -24
  156. modal/stub.py +0 -783
  157. modal/stub.pyi +0 -332
  158. modal-0.62.16.dist-info/RECORD +0 -198
  159. modal_global_objects/images/conda.py +0 -15
  160. modal_global_objects/images/debian_slim.py +0 -15
  161. modal_global_objects/images/micromamba.py +0 -15
  162. test/__init__.py +0 -1
  163. test/aio_test.py +0 -12
  164. test/async_utils_test.py +0 -262
  165. test/blob_test.py +0 -67
  166. test/cli_imports_test.py +0 -149
  167. test/cli_test.py +0 -659
  168. test/client_test.py +0 -194
  169. test/cls_test.py +0 -630
  170. test/config_test.py +0 -137
  171. test/conftest.py +0 -1420
  172. test/container_app_test.py +0 -32
  173. test/container_test.py +0 -1389
  174. test/cpu_test.py +0 -23
  175. test/decorator_test.py +0 -85
  176. test/deprecation_test.py +0 -34
  177. test/dict_test.py +0 -33
  178. test/e2e_test.py +0 -68
  179. test/error_test.py +0 -7
  180. test/function_serialization_test.py +0 -32
  181. test/function_test.py +0 -653
  182. test/function_utils_test.py +0 -101
  183. test/gpu_test.py +0 -159
  184. test/grpc_utils_test.py +0 -141
  185. test/helpers.py +0 -42
  186. test/image_test.py +0 -669
  187. test/live_reload_test.py +0 -80
  188. test/lookup_test.py +0 -70
  189. test/mdmd_test.py +0 -329
  190. test/mount_test.py +0 -162
  191. test/mounted_files_test.py +0 -329
  192. test/network_file_system_test.py +0 -181
  193. test/notebook_test.py +0 -66
  194. test/object_test.py +0 -41
  195. test/package_utils_test.py +0 -25
  196. test/queue_test.py +0 -97
  197. test/resolver_test.py +0 -58
  198. test/retries_test.py +0 -67
  199. test/runner_test.py +0 -85
  200. test/sandbox_test.py +0 -191
  201. test/schedule_test.py +0 -15
  202. test/scheduler_placement_test.py +0 -29
  203. test/secret_test.py +0 -78
  204. test/serialization_test.py +0 -42
  205. test/stub_composition_test.py +0 -10
  206. test/stub_test.py +0 -360
  207. test/test_asgi_wrapper.py +0 -234
  208. test/token_flow_test.py +0 -18
  209. test/traceback_test.py +0 -135
  210. test/tunnel_test.py +0 -29
  211. test/utils_test.py +0 -88
  212. test/version_test.py +0 -14
  213. test/volume_test.py +0 -341
  214. test/watcher_test.py +0 -30
  215. test/webhook_test.py +0 -146
  216. /modal/{requirements.312.txt → requirements/2023.12.312.txt} +0 -0
  217. /modal/{requirements.txt → requirements/2023.12.txt} +0 -0
  218. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/LICENSE +0 -0
  219. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/WHEEL +0 -0
  220. {modal-0.62.16.dist-info → modal-0.72.11.dist-info}/entry_points.txt +0 -0
modal/gpu.py CHANGED
@@ -1,10 +1,10 @@
1
1
  # Copyright Modal Labs 2022
2
2
  from dataclasses import dataclass
3
- from typing import Optional, Union
3
+ from typing import Callable, Optional, Union
4
4
 
5
5
  from modal_proto import api_pb2
6
6
 
7
- from .exception import InvalidError, deprecation_error, deprecation_warning
7
+ from .exception import InvalidError
8
8
 
9
9
 
10
10
  @dataclass(frozen=True)
@@ -31,7 +31,7 @@ class T4(_GPUConfig):
31
31
 
32
32
  def __init__(
33
33
  self,
34
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
34
+ count: int = 1, # Number of GPUs per container. Defaults to 1.
35
35
  ):
36
36
  super().__init__(api_pb2.GPU_TYPE_T4, count, 0)
37
37
 
@@ -43,12 +43,13 @@ class L4(_GPUConfig):
43
43
  """
44
44
  [NVIDIA L4 Tensor Core](https://www.nvidia.com/en-us/data-center/l4/) GPU class.
45
45
 
46
- A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GiB of GPU memory. Includes RTX (ray tracing) support.
46
+ A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GiB of GPU memory.
47
+ Includes RTX (ray tracing) support.
47
48
  """
48
49
 
49
50
  def __init__(
50
51
  self,
51
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
52
+ count: int = 1, # Number of GPUs per container. Defaults to 1.
52
53
  ):
53
54
  super().__init__(api_pb2.GPU_TYPE_L4, count, 0)
54
55
 
@@ -66,22 +67,12 @@ class A100(_GPUConfig):
66
67
  def __init__(
67
68
  self,
68
69
  *,
69
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
70
- memory: int = 0, # Deprecated. Use `size` instead.
70
+ count: int = 1, # Number of GPUs per container. Defaults to 1.
71
71
  size: Union[str, None] = None, # Select GiB configuration of GPU device: "40GB" or "80GB". Defaults to "40GB".
72
72
  ):
73
- allowed_memory_values = {40, 80}
74
73
  allowed_size_values = {"40GB", "80GB"}
75
- if memory == 20:
76
- raise ValueError(
77
- "A100 20GB is unsupported, consider `modal.A10G`, `modal.A100(memory_gb='40')`, or `modal.H100` instead"
78
- )
79
- elif memory and size:
80
- raise ValueError("Cannot specify both `memory` and `size`. Just specify `size`.")
81
- elif memory:
82
- if memory not in allowed_memory_values:
83
- raise ValueError(f"A100s can only have memory values of {allowed_memory_values} => memory={memory}")
84
- elif size:
74
+
75
+ if size:
85
76
  if size not in allowed_size_values:
86
77
  raise ValueError(
87
78
  f"size='{size}' is invalid. A100s can only have memory values of {allowed_size_values}."
@@ -106,14 +97,17 @@ class A10G(_GPUConfig):
106
97
  """
107
98
  [NVIDIA A10G Tensor Core](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) GPU class.
108
99
 
109
- A mid-tier data center GPU based on the Ampere architecture, providing 24 GiB of memory. A10G GPUs deliver up to 3.3x better ML training performance, 3x better ML inference performance,
100
+ A mid-tier data center GPU based on the Ampere architecture, providing 24 GiB of memory.
101
+ A10G GPUs deliver up to 3.3x better ML training performance, 3x better ML inference performance,
110
102
  and 3x better graphics performance, in comparison to NVIDIA T4 GPUs.
111
103
  """
112
104
 
113
105
  def __init__(
114
106
  self,
115
107
  *,
116
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
108
+ # Number of GPUs per container. Defaults to 1.
109
+ # Useful if you have very large models that don't fit on a single GPU.
110
+ count: int = 1,
117
111
  ):
118
112
  super().__init__(api_pb2.GPU_TYPE_A10G, count)
119
113
 
@@ -125,13 +119,17 @@ class H100(_GPUConfig):
125
119
  """
126
120
  [NVIDIA H100 Tensor Core](https://www.nvidia.com/en-us/data-center/h100/) GPU class.
127
121
 
128
- The flagship data center GPU of the Hopper architecture. Enhanced support for FP8 precision and a Transformer Engine that provides up to 4X faster training over the prior generation for GPT-3 (175B) models.
122
+ The flagship data center GPU of the Hopper architecture.
123
+ Enhanced support for FP8 precision and a Transformer Engine that provides up to 4X faster training
124
+ over the prior generation for GPT-3 (175B) models.
129
125
  """
130
126
 
131
127
  def __init__(
132
128
  self,
133
129
  *,
134
- count: int = 1, # Number of GPUs per container. Defaults to 1. Useful if you have very large models that don't fit on a single GPU.
130
+ # Number of GPUs per container. Defaults to 1.
131
+ # Useful if you have very large models that don't fit on a single GPU.
132
+ count: int = 1,
135
133
  ):
136
134
  super().__init__(api_pb2.GPU_TYPE_H100, count)
137
135
 
@@ -139,6 +137,27 @@ class H100(_GPUConfig):
139
137
  return f"GPU(H100, count={self.count})"
140
138
 
141
139
 
140
+ class L40S(_GPUConfig):
141
+ """
142
+ [NVIDIA L40S](https://www.nvidia.com/en-us/data-center/l40s/) GPU class.
143
+
144
+ The L40S is a data center GPU for the Ada Lovelace architecture. It has 48 GB of on-chip
145
+ GDDR6 RAM and enhanced support for FP8 precision.
146
+ """
147
+
148
+ def __init__(
149
+ self,
150
+ *,
151
+ # Number of GPUs per container. Defaults to 1.
152
+ # Useful if you have very large models that don't fit on a single GPU.
153
+ count: int = 1,
154
+ ):
155
+ super().__init__(api_pb2.GPU_TYPE_L40S, count)
156
+
157
+ def __repr__(self):
158
+ return f"GPU(L40S, count={self.count})"
159
+
160
+
142
161
  class Any(_GPUConfig):
143
162
  """Selects any one of the GPU classes available within Modal, according to availability."""
144
163
 
@@ -149,24 +168,28 @@ class Any(_GPUConfig):
149
168
  return f"GPU(Any, count={self.count})"
150
169
 
151
170
 
152
- STRING_TO_GPU_CONFIG = {
171
+ STRING_TO_GPU_CONFIG: dict[str, Callable] = {
153
172
  "t4": T4,
154
173
  "l4": L4,
155
174
  "a100": A100,
175
+ "a100-80gb": lambda: A100(size="80GB"),
156
176
  "h100": H100,
157
177
  "a10g": A10G,
178
+ "l40s": L40S,
158
179
  "any": Any,
159
180
  }
160
- display_string_to_config = "\n".join(
161
- f'- "{key}" → `{cls()}`' for key, cls in STRING_TO_GPU_CONFIG.items() if key != "inf2"
162
- )
181
+ display_string_to_config = "\n".join(f'- "{key}" → `{c()}`' for key, c in STRING_TO_GPU_CONFIG.items() if key != "inf2")
163
182
  __doc__ = f"""
164
183
  **GPU configuration shortcodes**
165
184
 
166
- The following are the valid `str` values for the `gpu` parameter of [`@stub.function`](/docs/reference/modal.Stub#function).
185
+ The following are the valid `str` values for the `gpu` parameter of
186
+ [`@app.function`](/docs/reference/modal.App#function).
167
187
 
168
188
  {display_string_to_config}
169
189
 
190
+ The shortcodes also support specifying count by suffixing `:N` to acquire `N` GPUs.
191
+ For example, `a10g:4` will provision 4 A10G GPUs.
192
+
170
193
  Other configurations can be created using the constructors documented below.
171
194
  """
172
195
 
@@ -174,7 +197,7 @@ Other configurations can be created using the constructors documented below.
174
197
  GPU_T = Union[None, bool, str, _GPUConfig]
175
198
 
176
199
 
177
- def _parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> Optional[_GPUConfig]:
200
+ def _parse_gpu_config(value: GPU_T) -> Optional[_GPUConfig]:
178
201
  if isinstance(value, _GPUConfig):
179
202
  return value
180
203
  elif isinstance(value, str):
@@ -187,37 +210,27 @@ def _parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> Optional[_GPU
187
210
  raise InvalidError(f"Invalid GPU count: {count_str}. Value must be an integer.")
188
211
 
189
212
  if value.lower() == "a100-20g":
190
- return A100(memory=20, count=count) # Triggers unsupported error underneath.
213
+ return A100(size="20GB", count=count) # Triggers unsupported error underneath.
191
214
  elif value.lower() == "a100-40gb":
192
215
  return A100(size="40GB", count=count)
193
216
  elif value.lower() == "a100-80gb":
194
217
  return A100(size="80GB", count=count)
195
218
  elif value.lower() not in STRING_TO_GPU_CONFIG:
196
219
  raise InvalidError(
197
- f"Invalid GPU type: {value}. Value must be one of {list(STRING_TO_GPU_CONFIG.keys())} (case-insensitive)."
220
+ f"Invalid GPU type: {value}. "
221
+ f"Value must be one of {list(STRING_TO_GPU_CONFIG.keys())} (case-insensitive)."
198
222
  )
199
223
  else:
200
224
  return STRING_TO_GPU_CONFIG[value.lower()](count=count)
201
- elif value is True:
202
- if raise_on_true:
203
- deprecation_error(
204
- (2022, 12, 19), 'Setting gpu=True is deprecated. Use `gpu="any"` or `gpu=modal.gpu.Any()` instead.'
205
- )
206
- else:
207
- # We didn't support targeting a GPU type for run_function until 2023-12-12
208
- deprecation_warning(
209
- (2023, 12, 13), 'Setting gpu=True is deprecated. Use `gpu="any"` or `gpu=modal.gpu.Any()` instead.'
210
- )
211
- return Any()
212
225
  elif value is None or value is False:
213
226
  return None
214
227
  else:
215
228
  raise InvalidError(f"Invalid GPU config: {value}. Value must be a string, a `GPUConfig` object, or `None`.")
216
229
 
217
230
 
218
- def parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> api_pb2.GPUConfig:
231
+ def parse_gpu_config(value: GPU_T) -> api_pb2.GPUConfig:
219
232
  """mdmd:hidden"""
220
- gpu_config = _parse_gpu_config(value, raise_on_true)
233
+ gpu_config = _parse_gpu_config(value)
221
234
  if gpu_config is None:
222
235
  return api_pb2.GPUConfig()
223
236
  return gpu_config._to_proto()
@@ -225,4 +238,4 @@ def parse_gpu_config(value: GPU_T, raise_on_true: bool = True) -> api_pb2.GPUCon
225
238
 
226
239
  def display_gpu_config(value: GPU_T) -> str:
227
240
  """mdmd:hidden"""
228
- return repr(_parse_gpu_config(value, False))
241
+ return repr(_parse_gpu_config(value))