modal 0.72.5__py3-none-any.whl → 0.72.48__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. modal/_container_entrypoint.py +5 -10
  2. modal/_object.py +297 -0
  3. modal/_resolver.py +7 -5
  4. modal/_runtime/container_io_manager.py +0 -11
  5. modal/_runtime/user_code_imports.py +7 -7
  6. modal/_serialization.py +4 -3
  7. modal/_tunnel.py +1 -1
  8. modal/app.py +14 -61
  9. modal/app.pyi +25 -25
  10. modal/cli/app.py +3 -2
  11. modal/cli/container.py +1 -1
  12. modal/cli/import_refs.py +185 -113
  13. modal/cli/launch.py +10 -5
  14. modal/cli/programs/run_jupyter.py +2 -2
  15. modal/cli/programs/vscode.py +3 -3
  16. modal/cli/run.py +134 -68
  17. modal/client.py +1 -0
  18. modal/client.pyi +18 -14
  19. modal/cloud_bucket_mount.py +4 -0
  20. modal/cloud_bucket_mount.pyi +4 -0
  21. modal/cls.py +33 -5
  22. modal/cls.pyi +20 -5
  23. modal/container_process.pyi +8 -6
  24. modal/dict.py +1 -1
  25. modal/dict.pyi +32 -29
  26. modal/environments.py +1 -1
  27. modal/environments.pyi +2 -1
  28. modal/experimental.py +47 -11
  29. modal/experimental.pyi +29 -0
  30. modal/file_io.pyi +30 -28
  31. modal/file_pattern_matcher.py +3 -4
  32. modal/functions.py +31 -23
  33. modal/functions.pyi +57 -50
  34. modal/gpu.py +19 -26
  35. modal/image.py +47 -19
  36. modal/image.pyi +28 -21
  37. modal/io_streams.pyi +14 -12
  38. modal/mount.py +14 -5
  39. modal/mount.pyi +28 -25
  40. modal/network_file_system.py +7 -7
  41. modal/network_file_system.pyi +27 -24
  42. modal/object.py +2 -265
  43. modal/object.pyi +46 -130
  44. modal/parallel_map.py +2 -2
  45. modal/parallel_map.pyi +10 -7
  46. modal/partial_function.py +22 -3
  47. modal/partial_function.pyi +45 -27
  48. modal/proxy.py +1 -1
  49. modal/proxy.pyi +2 -1
  50. modal/queue.py +1 -1
  51. modal/queue.pyi +26 -23
  52. modal/runner.py +14 -3
  53. modal/sandbox.py +11 -7
  54. modal/sandbox.pyi +30 -27
  55. modal/secret.py +1 -1
  56. modal/secret.pyi +2 -1
  57. modal/token_flow.pyi +6 -4
  58. modal/volume.py +1 -1
  59. modal/volume.pyi +36 -33
  60. {modal-0.72.5.dist-info → modal-0.72.48.dist-info}/METADATA +2 -2
  61. {modal-0.72.5.dist-info → modal-0.72.48.dist-info}/RECORD +73 -71
  62. modal_proto/api.proto +151 -4
  63. modal_proto/api_grpc.py +113 -0
  64. modal_proto/api_pb2.py +998 -795
  65. modal_proto/api_pb2.pyi +430 -11
  66. modal_proto/api_pb2_grpc.py +233 -1
  67. modal_proto/api_pb2_grpc.pyi +75 -3
  68. modal_proto/modal_api_grpc.py +7 -0
  69. modal_version/_version_generated.py +1 -1
  70. {modal-0.72.5.dist-info → modal-0.72.48.dist-info}/LICENSE +0 -0
  71. {modal-0.72.5.dist-info → modal-0.72.48.dist-info}/WHEEL +0 -0
  72. {modal-0.72.5.dist-info → modal-0.72.48.dist-info}/entry_points.txt +0 -0
  73. {modal-0.72.5.dist-info → modal-0.72.48.dist-info}/top_level.txt +0 -0
modal/functions.pyi CHANGED
@@ -1,5 +1,6 @@
1
1
  import collections.abc
2
2
  import google.protobuf.message
3
+ import modal._object
3
4
  import modal._utils.async_utils
4
5
  import modal._utils.function_utils
5
6
  import modal.app
@@ -133,17 +134,20 @@ ReturnType = typing.TypeVar("ReturnType", covariant=True)
133
134
 
134
135
  OriginalReturnType = typing.TypeVar("OriginalReturnType", covariant=True)
135
136
 
136
- class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object._Object):
137
+ SUPERSELF = typing.TypeVar("SUPERSELF", covariant=True)
138
+
139
+ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal._object._Object):
137
140
  _info: typing.Optional[modal._utils.function_utils.FunctionInfo]
138
141
  _serve_mounts: frozenset[modal.mount._Mount]
139
142
  _app: typing.Optional[modal.app._App]
140
143
  _obj: typing.Optional[modal.cls._Obj]
144
+ _webhook_config: typing.Optional[modal_proto.api_pb2.WebhookConfig]
141
145
  _web_url: typing.Optional[str]
142
146
  _function_name: typing.Optional[str]
143
147
  _is_method: bool
144
148
  _spec: typing.Optional[_FunctionSpec]
145
149
  _tag: str
146
- _raw_f: typing.Callable[..., typing.Any]
150
+ _raw_f: typing.Optional[collections.abc.Callable[..., typing.Any]]
147
151
  _build_args: dict
148
152
  _is_generator: typing.Optional[bool]
149
153
  _cluster_size: typing.Optional[int]
@@ -197,7 +201,7 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.
197
201
  _experimental_buffer_containers: typing.Optional[int] = None,
198
202
  _experimental_proxy_ip: typing.Optional[str] = None,
199
203
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
200
- ) -> None: ...
204
+ ) -> _Function: ...
201
205
  def _bind_parameters(
202
206
  self,
203
207
  obj: modal.cls._Obj,
@@ -228,6 +232,7 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.
228
232
  def info(self) -> modal._utils.function_utils.FunctionInfo: ...
229
233
  @property
230
234
  def spec(self) -> _FunctionSpec: ...
235
+ def _is_web_endpoint(self) -> bool: ...
231
236
  def get_build_def(self) -> str: ...
232
237
  def _initialize_from_empty(self): ...
233
238
  def _hydrate_metadata(self, metadata: typing.Optional[google.protobuf.message.Message]): ...
@@ -254,10 +259,10 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.
254
259
  def local(self, *args: P.args, **kwargs: P.kwargs) -> OriginalReturnType: ...
255
260
  async def _experimental_spawn(self, *args: P.args, **kwargs: P.kwargs) -> _FunctionCall[ReturnType]: ...
256
261
  async def spawn(self, *args: P.args, **kwargs: P.kwargs) -> _FunctionCall[ReturnType]: ...
257
- def get_raw_f(self) -> typing.Callable[..., typing.Any]: ...
262
+ def get_raw_f(self) -> collections.abc.Callable[..., typing.Any]: ...
258
263
  async def get_current_stats(self) -> FunctionStats: ...
259
264
 
260
- class __map_spec(typing_extensions.Protocol):
265
+ class __map_spec(typing_extensions.Protocol[SUPERSELF]):
261
266
  def __call__(
262
267
  self, *input_iterators, kwargs={}, order_outputs: bool = True, return_exceptions: bool = False
263
268
  ) -> modal._utils.async_utils.AsyncOrSyncIterable: ...
@@ -269,9 +274,9 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.
269
274
  return_exceptions: bool = False,
270
275
  ) -> typing.AsyncGenerator[typing.Any, None]: ...
271
276
 
272
- map: __map_spec
277
+ map: __map_spec[typing_extensions.Self]
273
278
 
274
- class __starmap_spec(typing_extensions.Protocol):
279
+ class __starmap_spec(typing_extensions.Protocol[SUPERSELF]):
275
280
  def __call__(
276
281
  self,
277
282
  input_iterator: typing.Iterable[typing.Sequence[typing.Any]],
@@ -289,13 +294,13 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.
289
294
  return_exceptions: bool = False,
290
295
  ) -> typing.AsyncIterable[typing.Any]: ...
291
296
 
292
- starmap: __starmap_spec
297
+ starmap: __starmap_spec[typing_extensions.Self]
293
298
 
294
- class __for_each_spec(typing_extensions.Protocol):
299
+ class __for_each_spec(typing_extensions.Protocol[SUPERSELF]):
295
300
  def __call__(self, *input_iterators, kwargs={}, ignore_exceptions: bool = False): ...
296
301
  async def aio(self, *input_iterators, kwargs={}, ignore_exceptions: bool = False): ...
297
302
 
298
- for_each: __for_each_spec
303
+ for_each: __for_each_spec[typing_extensions.Self]
299
304
 
300
305
  ReturnType_INNER = typing.TypeVar("ReturnType_INNER", covariant=True)
301
306
 
@@ -306,12 +311,13 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
306
311
  _serve_mounts: frozenset[modal.mount.Mount]
307
312
  _app: typing.Optional[modal.app.App]
308
313
  _obj: typing.Optional[modal.cls.Obj]
314
+ _webhook_config: typing.Optional[modal_proto.api_pb2.WebhookConfig]
309
315
  _web_url: typing.Optional[str]
310
316
  _function_name: typing.Optional[str]
311
317
  _is_method: bool
312
318
  _spec: typing.Optional[_FunctionSpec]
313
319
  _tag: str
314
- _raw_f: typing.Callable[..., typing.Any]
320
+ _raw_f: typing.Optional[collections.abc.Callable[..., typing.Any]]
315
321
  _build_args: dict
316
322
  _is_generator: typing.Optional[bool]
317
323
  _cluster_size: typing.Optional[int]
@@ -366,7 +372,7 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
366
372
  _experimental_buffer_containers: typing.Optional[int] = None,
367
373
  _experimental_proxy_ip: typing.Optional[str] = None,
368
374
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
369
- ) -> None: ...
375
+ ) -> Function: ...
370
376
  def _bind_parameters(
371
377
  self,
372
378
  obj: modal.cls.Obj,
@@ -375,11 +381,11 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
375
381
  kwargs: dict[str, typing.Any],
376
382
  ) -> Function: ...
377
383
 
378
- class __keep_warm_spec(typing_extensions.Protocol):
384
+ class __keep_warm_spec(typing_extensions.Protocol[SUPERSELF]):
379
385
  def __call__(self, warm_pool_size: int) -> None: ...
380
386
  async def aio(self, warm_pool_size: int) -> None: ...
381
387
 
382
- keep_warm: __keep_warm_spec
388
+ keep_warm: __keep_warm_spec[typing_extensions.Self]
383
389
 
384
390
  @classmethod
385
391
  def from_name(
@@ -416,6 +422,7 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
416
422
  def info(self) -> modal._utils.function_utils.FunctionInfo: ...
417
423
  @property
418
424
  def spec(self) -> _FunctionSpec: ...
425
+ def _is_web_endpoint(self) -> bool: ...
419
426
  def get_build_def(self) -> str: ...
420
427
  def _initialize_from_empty(self): ...
421
428
  def _hydrate_metadata(self, metadata: typing.Optional[google.protobuf.message.Message]): ...
@@ -428,7 +435,7 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
428
435
  @property
429
436
  def cluster_size(self) -> int: ...
430
437
 
431
- class ___map_spec(typing_extensions.Protocol):
438
+ class ___map_spec(typing_extensions.Protocol[SUPERSELF]):
432
439
  def __call__(
433
440
  self, input_queue: modal.parallel_map.SynchronizedQueue, order_outputs: bool, return_exceptions: bool
434
441
  ) -> typing.Generator[typing.Any, None, None]: ...
@@ -436,70 +443,70 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
436
443
  self, input_queue: modal.parallel_map.SynchronizedQueue, order_outputs: bool, return_exceptions: bool
437
444
  ) -> collections.abc.AsyncGenerator[typing.Any, None]: ...
438
445
 
439
- _map: ___map_spec
446
+ _map: ___map_spec[typing_extensions.Self]
440
447
 
441
- class ___call_function_spec(typing_extensions.Protocol[ReturnType_INNER]):
448
+ class ___call_function_spec(typing_extensions.Protocol[ReturnType_INNER, SUPERSELF]):
442
449
  def __call__(self, args, kwargs) -> ReturnType_INNER: ...
443
450
  async def aio(self, args, kwargs) -> ReturnType_INNER: ...
444
451
 
445
- _call_function: ___call_function_spec[ReturnType]
452
+ _call_function: ___call_function_spec[ReturnType, typing_extensions.Self]
446
453
 
447
- class ___call_function_nowait_spec(typing_extensions.Protocol):
454
+ class ___call_function_nowait_spec(typing_extensions.Protocol[SUPERSELF]):
448
455
  def __call__(self, args, kwargs, function_call_invocation_type: int) -> _Invocation: ...
449
456
  async def aio(self, args, kwargs, function_call_invocation_type: int) -> _Invocation: ...
450
457
 
451
- _call_function_nowait: ___call_function_nowait_spec
458
+ _call_function_nowait: ___call_function_nowait_spec[typing_extensions.Self]
452
459
 
453
- class ___call_generator_spec(typing_extensions.Protocol):
460
+ class ___call_generator_spec(typing_extensions.Protocol[SUPERSELF]):
454
461
  def __call__(self, args, kwargs): ...
455
462
  def aio(self, args, kwargs): ...
456
463
 
457
- _call_generator: ___call_generator_spec
464
+ _call_generator: ___call_generator_spec[typing_extensions.Self]
458
465
 
459
- class ___call_generator_nowait_spec(typing_extensions.Protocol):
466
+ class ___call_generator_nowait_spec(typing_extensions.Protocol[SUPERSELF]):
460
467
  def __call__(self, args, kwargs): ...
461
468
  async def aio(self, args, kwargs): ...
462
469
 
463
- _call_generator_nowait: ___call_generator_nowait_spec
470
+ _call_generator_nowait: ___call_generator_nowait_spec[typing_extensions.Self]
464
471
 
465
- class __remote_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER]):
472
+ class __remote_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
466
473
  def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER: ...
467
474
  async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER: ...
468
475
 
469
- remote: __remote_spec[P, ReturnType]
476
+ remote: __remote_spec[ReturnType, P, typing_extensions.Self]
470
477
 
471
- class __remote_gen_spec(typing_extensions.Protocol):
478
+ class __remote_gen_spec(typing_extensions.Protocol[SUPERSELF]):
472
479
  def __call__(self, *args, **kwargs) -> typing.Generator[typing.Any, None, None]: ...
473
480
  def aio(self, *args, **kwargs) -> collections.abc.AsyncGenerator[typing.Any, None]: ...
474
481
 
475
- remote_gen: __remote_gen_spec
482
+ remote_gen: __remote_gen_spec[typing_extensions.Self]
476
483
 
477
484
  def _is_local(self): ...
478
485
  def _get_info(self) -> modal._utils.function_utils.FunctionInfo: ...
479
486
  def _get_obj(self) -> typing.Optional[modal.cls.Obj]: ...
480
487
  def local(self, *args: P.args, **kwargs: P.kwargs) -> OriginalReturnType: ...
481
488
 
482
- class ___experimental_spawn_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER]):
489
+ class ___experimental_spawn_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
483
490
  def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
484
491
  async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
485
492
 
486
- _experimental_spawn: ___experimental_spawn_spec[P, ReturnType]
493
+ _experimental_spawn: ___experimental_spawn_spec[ReturnType, P, typing_extensions.Self]
487
494
 
488
- class __spawn_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER]):
495
+ class __spawn_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
489
496
  def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
490
497
  async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
491
498
 
492
- spawn: __spawn_spec[P, ReturnType]
499
+ spawn: __spawn_spec[ReturnType, P, typing_extensions.Self]
493
500
 
494
- def get_raw_f(self) -> typing.Callable[..., typing.Any]: ...
501
+ def get_raw_f(self) -> collections.abc.Callable[..., typing.Any]: ...
495
502
 
496
- class __get_current_stats_spec(typing_extensions.Protocol):
503
+ class __get_current_stats_spec(typing_extensions.Protocol[SUPERSELF]):
497
504
  def __call__(self) -> FunctionStats: ...
498
505
  async def aio(self) -> FunctionStats: ...
499
506
 
500
- get_current_stats: __get_current_stats_spec
507
+ get_current_stats: __get_current_stats_spec[typing_extensions.Self]
501
508
 
502
- class __map_spec(typing_extensions.Protocol):
509
+ class __map_spec(typing_extensions.Protocol[SUPERSELF]):
503
510
  def __call__(
504
511
  self, *input_iterators, kwargs={}, order_outputs: bool = True, return_exceptions: bool = False
505
512
  ) -> modal._utils.async_utils.AsyncOrSyncIterable: ...
@@ -511,9 +518,9 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
511
518
  return_exceptions: bool = False,
512
519
  ) -> typing.AsyncGenerator[typing.Any, None]: ...
513
520
 
514
- map: __map_spec
521
+ map: __map_spec[typing_extensions.Self]
515
522
 
516
- class __starmap_spec(typing_extensions.Protocol):
523
+ class __starmap_spec(typing_extensions.Protocol[SUPERSELF]):
517
524
  def __call__(
518
525
  self,
519
526
  input_iterator: typing.Iterable[typing.Sequence[typing.Any]],
@@ -531,15 +538,15 @@ class Function(typing.Generic[P, ReturnType, OriginalReturnType], modal.object.O
531
538
  return_exceptions: bool = False,
532
539
  ) -> typing.AsyncIterable[typing.Any]: ...
533
540
 
534
- starmap: __starmap_spec
541
+ starmap: __starmap_spec[typing_extensions.Self]
535
542
 
536
- class __for_each_spec(typing_extensions.Protocol):
543
+ class __for_each_spec(typing_extensions.Protocol[SUPERSELF]):
537
544
  def __call__(self, *input_iterators, kwargs={}, ignore_exceptions: bool = False): ...
538
545
  async def aio(self, *input_iterators, kwargs={}, ignore_exceptions: bool = False): ...
539
546
 
540
- for_each: __for_each_spec
547
+ for_each: __for_each_spec[typing_extensions.Self]
541
548
 
542
- class _FunctionCall(typing.Generic[ReturnType], modal.object._Object):
549
+ class _FunctionCall(typing.Generic[ReturnType], modal._object._Object):
543
550
  _is_generator: bool
544
551
 
545
552
  def _invocation(self): ...
@@ -558,29 +565,29 @@ class FunctionCall(typing.Generic[ReturnType], modal.object.Object):
558
565
  def __init__(self, *args, **kwargs): ...
559
566
  def _invocation(self): ...
560
567
 
561
- class __get_spec(typing_extensions.Protocol[ReturnType_INNER]):
568
+ class __get_spec(typing_extensions.Protocol[ReturnType_INNER, SUPERSELF]):
562
569
  def __call__(self, timeout: typing.Optional[float] = None) -> ReturnType_INNER: ...
563
570
  async def aio(self, timeout: typing.Optional[float] = None) -> ReturnType_INNER: ...
564
571
 
565
- get: __get_spec[ReturnType]
572
+ get: __get_spec[ReturnType, typing_extensions.Self]
566
573
 
567
- class __get_gen_spec(typing_extensions.Protocol):
574
+ class __get_gen_spec(typing_extensions.Protocol[SUPERSELF]):
568
575
  def __call__(self) -> typing.Generator[typing.Any, None, None]: ...
569
576
  def aio(self) -> collections.abc.AsyncGenerator[typing.Any, None]: ...
570
577
 
571
- get_gen: __get_gen_spec
578
+ get_gen: __get_gen_spec[typing_extensions.Self]
572
579
 
573
- class __get_call_graph_spec(typing_extensions.Protocol):
580
+ class __get_call_graph_spec(typing_extensions.Protocol[SUPERSELF]):
574
581
  def __call__(self) -> list[modal.call_graph.InputInfo]: ...
575
582
  async def aio(self) -> list[modal.call_graph.InputInfo]: ...
576
583
 
577
- get_call_graph: __get_call_graph_spec
584
+ get_call_graph: __get_call_graph_spec[typing_extensions.Self]
578
585
 
579
- class __cancel_spec(typing_extensions.Protocol):
586
+ class __cancel_spec(typing_extensions.Protocol[SUPERSELF]):
580
587
  def __call__(self, terminate_containers: bool = False): ...
581
588
  async def aio(self, terminate_containers: bool = False): ...
582
589
 
583
- cancel: __cancel_spec
590
+ cancel: __cancel_spec[typing_extensions.Self]
584
591
 
585
592
  class __from_id_spec(typing_extensions.Protocol):
586
593
  def __call__(
modal/gpu.py CHANGED
@@ -9,8 +9,9 @@ from .exception import InvalidError
9
9
 
10
10
  @dataclass(frozen=True)
11
11
  class _GPUConfig:
12
- type: "api_pb2.GPUType.V"
12
+ type: "api_pb2.GPUType.V" # Deprecated, at some point
13
13
  count: int
14
+ gpu_type: str
14
15
  memory: int = 0
15
16
 
16
17
  def _to_proto(self) -> api_pb2.GPUConfig:
@@ -19,6 +20,7 @@ class _GPUConfig:
19
20
  type=self.type,
20
21
  count=self.count,
21
22
  memory=self.memory,
23
+ gpu_type=self.gpu_type,
22
24
  )
23
25
 
24
26
 
@@ -26,14 +28,14 @@ class T4(_GPUConfig):
26
28
  """
27
29
  [NVIDIA T4 Tensor Core](https://www.nvidia.com/en-us/data-center/tesla-t4/) GPU class.
28
30
 
29
- A low-cost data center GPU based on the Turing architecture, providing 16GiB of GPU memory.
31
+ A low-cost data center GPU based on the Turing architecture, providing 16GB of GPU memory.
30
32
  """
31
33
 
32
34
  def __init__(
33
35
  self,
34
36
  count: int = 1, # Number of GPUs per container. Defaults to 1.
35
37
  ):
36
- super().__init__(api_pb2.GPU_TYPE_T4, count, 0)
38
+ super().__init__(api_pb2.GPU_TYPE_T4, count, "T4")
37
39
 
38
40
  def __repr__(self):
39
41
  return f"GPU(T4, count={self.count})"
@@ -43,7 +45,7 @@ class L4(_GPUConfig):
43
45
  """
44
46
  [NVIDIA L4 Tensor Core](https://www.nvidia.com/en-us/data-center/l4/) GPU class.
45
47
 
46
- A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GiB of GPU memory.
48
+ A mid-tier data center GPU based on the Ada Lovelace architecture, providing 24GB of GPU memory.
47
49
  Includes RTX (ray tracing) support.
48
50
  """
49
51
 
@@ -51,7 +53,7 @@ class L4(_GPUConfig):
51
53
  self,
52
54
  count: int = 1, # Number of GPUs per container. Defaults to 1.
53
55
  ):
54
- super().__init__(api_pb2.GPU_TYPE_L4, count, 0)
56
+ super().__init__(api_pb2.GPU_TYPE_L4, count, "L4")
55
57
 
56
58
  def __repr__(self):
57
59
  return f"GPU(L4, count={self.count})"
@@ -61,30 +63,21 @@ class A100(_GPUConfig):
61
63
  """
62
64
  [NVIDIA A100 Tensor Core](https://www.nvidia.com/en-us/data-center/a100/) GPU class.
63
65
 
64
- The flagship data center GPU of the Ampere architecture. Available in 40GiB and 80GiB GPU memory configurations.
66
+ The flagship data center GPU of the Ampere architecture. Available in 40GB and 80GB GPU memory configurations.
65
67
  """
66
68
 
67
69
  def __init__(
68
70
  self,
69
71
  *,
70
72
  count: int = 1, # Number of GPUs per container. Defaults to 1.
71
- size: Union[str, None] = None, # Select GiB configuration of GPU device: "40GB" or "80GB". Defaults to "40GB".
73
+ size: Union[str, None] = None, # Select GB configuration of GPU device: "40GB" or "80GB". Defaults to "40GB".
72
74
  ):
73
- allowed_size_values = {"40GB", "80GB"}
74
-
75
- if size:
76
- if size not in allowed_size_values:
77
- raise ValueError(
78
- f"size='{size}' is invalid. A100s can only have memory values of {allowed_size_values}."
79
- )
80
- memory = int(size.replace("GB", ""))
75
+ if size == "40GB" or not size:
76
+ super().__init__(api_pb2.GPU_TYPE_A100, count, "A100-40GB", 40)
77
+ elif size == "80GB":
78
+ super().__init__(api_pb2.GPU_TYPE_A100_80GB, count, "A100-80GB", 80)
81
79
  else:
82
- memory = 40
83
-
84
- if memory == 80:
85
- super().__init__(api_pb2.GPU_TYPE_A100_80GB, count, memory)
86
- else:
87
- super().__init__(api_pb2.GPU_TYPE_A100, count, memory)
80
+ raise ValueError(f"size='{size}' is invalid. A100s can only have memory values of 40GB or 80GB.")
88
81
 
89
82
  def __repr__(self):
90
83
  if self.memory == 80:
@@ -97,7 +90,7 @@ class A10G(_GPUConfig):
97
90
  """
98
91
  [NVIDIA A10G Tensor Core](https://www.nvidia.com/en-us/data-center/products/a10-gpu/) GPU class.
99
92
 
100
- A mid-tier data center GPU based on the Ampere architecture, providing 24 GiB of memory.
93
+ A mid-tier data center GPU based on the Ampere architecture, providing 24 GB of memory.
101
94
  A10G GPUs deliver up to 3.3x better ML training performance, 3x better ML inference performance,
102
95
  and 3x better graphics performance, in comparison to NVIDIA T4 GPUs.
103
96
  """
@@ -109,7 +102,7 @@ class A10G(_GPUConfig):
109
102
  # Useful if you have very large models that don't fit on a single GPU.
110
103
  count: int = 1,
111
104
  ):
112
- super().__init__(api_pb2.GPU_TYPE_A10G, count)
105
+ super().__init__(api_pb2.GPU_TYPE_A10G, count, "A10G")
113
106
 
114
107
  def __repr__(self):
115
108
  return f"GPU(A10G, count={self.count})"
@@ -131,7 +124,7 @@ class H100(_GPUConfig):
131
124
  # Useful if you have very large models that don't fit on a single GPU.
132
125
  count: int = 1,
133
126
  ):
134
- super().__init__(api_pb2.GPU_TYPE_H100, count)
127
+ super().__init__(api_pb2.GPU_TYPE_H100, count, "H100")
135
128
 
136
129
  def __repr__(self):
137
130
  return f"GPU(H100, count={self.count})"
@@ -152,7 +145,7 @@ class L40S(_GPUConfig):
152
145
  # Useful if you have very large models that don't fit on a single GPU.
153
146
  count: int = 1,
154
147
  ):
155
- super().__init__(api_pb2.GPU_TYPE_L40S, count)
148
+ super().__init__(api_pb2.GPU_TYPE_L40S, count, "L40S")
156
149
 
157
150
  def __repr__(self):
158
151
  return f"GPU(L40S, count={self.count})"
@@ -162,7 +155,7 @@ class Any(_GPUConfig):
162
155
  """Selects any one of the GPU classes available within Modal, according to availability."""
163
156
 
164
157
  def __init__(self, *, count: int = 1):
165
- super().__init__(api_pb2.GPU_TYPE_ANY, count)
158
+ super().__init__(api_pb2.GPU_TYPE_ANY, count, "ANY")
166
159
 
167
160
  def __repr__(self):
168
161
  return f"GPU(Any, count={self.count})"
modal/image.py CHANGED
@@ -26,6 +26,7 @@ from grpclib.exceptions import GRPCError, StreamTerminatedError
26
26
 
27
27
  from modal_proto import api_pb2
28
28
 
29
+ from ._object import _Object, live_method_gen
29
30
  from ._resolver import Resolver
30
31
  from ._serialization import serialize
31
32
  from ._utils.async_utils import synchronize_api
@@ -46,7 +47,6 @@ from .file_pattern_matcher import NON_PYTHON_FILES, FilePatternMatcher, _ignore_
46
47
  from .gpu import GPU_T, parse_gpu_config
47
48
  from .mount import _Mount, python_standalone_mount_name
48
49
  from .network_file_system import _NetworkFileSystem
49
- from .object import _Object, live_method_gen
50
50
  from .output import _get_output_manager
51
51
  from .scheduler_placement import SchedulerPlacement
52
52
  from .secret import _Secret
@@ -82,6 +82,11 @@ class _AutoDockerIgnoreSentinel:
82
82
 
83
83
  AUTO_DOCKERIGNORE = _AutoDockerIgnoreSentinel()
84
84
 
85
+ COPY_DEPRECATION_MESSAGE_PATTERN = """modal.Image.copy_* methods will soon be deprecated.
86
+
87
+ Use {replacement} instead, which is functionally and performance-wise equivalent.
88
+ """
89
+
85
90
 
86
91
  def _validate_python_version(
87
92
  python_version: Optional[str], builder_version: ImageBuilderVersion, allow_micro_granularity: bool = True
@@ -657,13 +662,16 @@ class _Image(_Object, type_prefix="im"):
657
662
  return obj
658
663
 
659
664
  def copy_mount(self, mount: _Mount, remote_path: Union[str, Path] = ".") -> "_Image":
660
- """Copy the entire contents of a `modal.Mount` into an image.
665
+ """
666
+ **Deprecated**: Use image.add_local_dir(..., copy=True) or similar instead.
667
+
668
+ Copy the entire contents of a `modal.Mount` into an image.
661
669
  Useful when files only available locally are required during the image
662
670
  build process.
663
671
 
664
672
  **Example**
665
673
 
666
- ```python
674
+ ```python notest
667
675
  static_images_dir = "./static"
668
676
  # place all static images in root of mount
669
677
  mount = modal.Mount.from_local_dir(static_images_dir, remote_path="/")
@@ -736,7 +744,6 @@ class _Image(_Object, type_prefix="im"):
736
744
  **Usage:**
737
745
 
738
746
  ```python
739
- from pathlib import Path
740
747
  from modal import FilePatternMatcher
741
748
 
742
749
  image = modal.Image.debian_slim().add_local_dir(
@@ -768,7 +775,7 @@ class _Image(_Object, type_prefix="im"):
768
775
  image = modal.Image.debian_slim().add_local_dir(
769
776
  "~/assets",
770
777
  remote_path="/assets",
771
- ignore=FilePatternMatcher.from_file(Path("/path/to/ignorefile")),
778
+ ignore=FilePatternMatcher.from_file("/path/to/ignorefile"),
772
779
  )
773
780
  ```
774
781
  """
@@ -786,7 +793,9 @@ class _Image(_Object, type_prefix="im"):
786
793
  This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
787
794
  works in a `Dockerfile`.
788
795
  """
789
- # TODO(elias): add pending deprecation with suggestion to use add_* instead
796
+ deprecation_warning(
797
+ (2024, 1, 13), COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_file"), pending=True
798
+ )
790
799
  basename = str(Path(local_path).name)
791
800
 
792
801
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
@@ -845,14 +854,17 @@ class _Image(_Object, type_prefix="im"):
845
854
  # Which follows dockerignore syntax.
846
855
  ignore: Union[Sequence[str], Callable[[Path], bool]] = [],
847
856
  ) -> "_Image":
848
- """Copy a directory into the image as a part of building the image.
857
+ """
858
+ **Deprecated**: Use image.add_local_dir instead
859
+
860
+ Copy a directory into the image as a part of building the image.
849
861
 
850
862
  This works in a similar way to [`COPY`](https://docs.docker.com/engine/reference/builder/#copy)
851
863
  works in a `Dockerfile`.
852
864
 
853
865
  **Usage:**
854
866
 
855
- ```python
867
+ ```python notest
856
868
  from pathlib import Path
857
869
  from modal import FilePatternMatcher
858
870
 
@@ -885,10 +897,13 @@ class _Image(_Object, type_prefix="im"):
885
897
  image = modal.Image.debian_slim().copy_local_dir(
886
898
  "~/assets",
887
899
  remote_path="/assets",
888
- ignore=FilePatternMatcher.from_file(Path("/path/to/ignorefile")),
900
+ ignore=FilePatternMatcher.from_file("/path/to/ignorefile"),
889
901
  )
890
902
  ```
891
903
  """
904
+ deprecation_warning(
905
+ (2024, 1, 13), COPY_DEPRECATION_MESSAGE_PATTERN.format(replacement="image.add_local_dir"), pending=True
906
+ )
892
907
 
893
908
  def build_dockerfile(version: ImageBuilderVersion) -> DockerfileSpec:
894
909
  return DockerfileSpec(commands=["FROM base", f"COPY . {remote_path}"], context_files={})
@@ -1282,7 +1297,6 @@ class _Image(_Object, type_prefix="im"):
1282
1297
  **Usage:**
1283
1298
 
1284
1299
  ```python
1285
- from pathlib import Path
1286
1300
  from modal import FilePatternMatcher
1287
1301
 
1288
1302
  # By default a .dockerignore file is used if present in the current working directory
@@ -1314,10 +1328,17 @@ class _Image(_Object, type_prefix="im"):
1314
1328
  # You can also read ignore patterns from a file.
1315
1329
  image = modal.Image.debian_slim().dockerfile_commands(
1316
1330
  ["COPY data /data"],
1317
- ignore=FilePatternMatcher.from_file(Path("/path/to/dockerignore")),
1331
+ ignore=FilePatternMatcher.from_file("/path/to/dockerignore"),
1318
1332
  )
1319
1333
  ```
1320
1334
  """
1335
+ if context_mount is not None:
1336
+ deprecation_warning(
1337
+ (2025, 1, 13),
1338
+ "`context_mount` is deprecated."
1339
+ + " Files are now automatically added to the build context based on the commands.",
1340
+ pending=True,
1341
+ )
1321
1342
  cmds = _flatten_str_args("dockerfile_commands", "dockerfile_commands", dockerfile_commands)
1322
1343
  if not cmds:
1323
1344
  return self
@@ -1506,7 +1527,8 @@ class _Image(_Object, type_prefix="im"):
1506
1527
  "COPY /python/. /usr/local",
1507
1528
  "ENV TERMINFO_DIRS=/etc/terminfo:/lib/terminfo:/usr/share/terminfo:/usr/lib/terminfo",
1508
1529
  ]
1509
- if add_python < "3.13":
1530
+ python_minor = add_python.split(".")[1]
1531
+ if int(python_minor) < 13:
1510
1532
  # Previous versions did not include the `python` binary, but later ones do.
1511
1533
  # (The important factor is not the Python version itself, but the standalone dist version.)
1512
1534
  # We insert the command in the list at the position it was previously always added
@@ -1712,7 +1734,6 @@ class _Image(_Object, type_prefix="im"):
1712
1734
  **Usage:**
1713
1735
 
1714
1736
  ```python
1715
- from pathlib import Path
1716
1737
  from modal import FilePatternMatcher
1717
1738
 
1718
1739
  # By default a .dockerignore file is used if present in the current working directory
@@ -1750,10 +1771,17 @@ class _Image(_Object, type_prefix="im"):
1750
1771
  image = modal.Image.from_dockerfile(
1751
1772
  "./Dockerfile",
1752
1773
  add_python="3.12",
1753
- ignore=FilePatternMatcher.from_file(Path("/path/to/dockerignore")),
1774
+ ignore=FilePatternMatcher.from_file("/path/to/dockerignore"),
1754
1775
  )
1755
1776
  ```
1756
1777
  """
1778
+ if context_mount is not None:
1779
+ deprecation_warning(
1780
+ (2025, 1, 13),
1781
+ "`context_mount` is deprecated."
1782
+ + " Files are now automatically added to the build context based on the commands in the Dockerfile.",
1783
+ pending=True,
1784
+ )
1757
1785
 
1758
1786
  # --- Build the base dockerfile
1759
1787
 
@@ -2025,11 +2053,11 @@ class _Image(_Object, type_prefix="im"):
2025
2053
  try:
2026
2054
  yield
2027
2055
  except Exception as exc:
2028
- if self.object_id is None:
2029
- # Might be initialized later
2056
+ if not self.is_hydrated:
2057
+ # Might be hydrated later
2030
2058
  self.inside_exceptions.append(exc)
2031
2059
  elif env_image_id == self.object_id:
2032
- # Image is already initialized (we can remove this case later
2060
+ # Image is already hydrated (we can remove this case later
2033
2061
  # when we don't hydrate objects so early)
2034
2062
  raise
2035
2063
  if not isinstance(exc, ImportError):
@@ -2044,9 +2072,9 @@ class _Image(_Object, type_prefix="im"):
2044
2072
  last_entry_id: str = ""
2045
2073
 
2046
2074
  request = api_pb2.ImageJoinStreamingRequest(
2047
- image_id=self._object_id, timeout=55, last_entry_id=last_entry_id, include_logs_for_finished=True
2075
+ image_id=self.object_id, timeout=55, last_entry_id=last_entry_id, include_logs_for_finished=True
2048
2076
  )
2049
- async for response in self._client.stub.ImageJoinStreaming.unary_stream(request):
2077
+ async for response in self.client.stub.ImageJoinStreaming.unary_stream(request):
2050
2078
  if response.result.status:
2051
2079
  return
2052
2080
  if response.entry_id: