modal 0.72.32__py3-none-any.whl → 0.72.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modal/client.pyi CHANGED
@@ -27,7 +27,7 @@ class _Client:
27
27
  _snapshotted: bool
28
28
 
29
29
  def __init__(
30
- self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.72.32"
30
+ self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.72.34"
31
31
  ): ...
32
32
  def is_closed(self) -> bool: ...
33
33
  @property
@@ -83,7 +83,7 @@ class Client:
83
83
  _snapshotted: bool
84
84
 
85
85
  def __init__(
86
- self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.72.32"
86
+ self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.72.34"
87
87
  ): ...
88
88
  def is_closed(self) -> bool: ...
89
89
  @property
modal/gpu.py CHANGED
@@ -9,8 +9,9 @@ from .exception import InvalidError
9
9
 
10
10
  @dataclass(frozen=True)
11
11
  class _GPUConfig:
12
- type: "api_pb2.GPUType.V"
12
+ type: "api_pb2.GPUType.V" # Deprecated, at some point
13
13
  count: int
14
+ gpu_type: str
14
15
  memory: int = 0
15
16
 
16
17
  def _to_proto(self) -> api_pb2.GPUConfig:
@@ -19,6 +20,7 @@ class _GPUConfig:
19
20
  type=self.type,
20
21
  count=self.count,
21
22
  memory=self.memory,
23
+ gpu_type=self.gpu_type,
22
24
  )
23
25
 
24
26
 
@@ -33,7 +35,7 @@ class T4(_GPUConfig):
33
35
  self,
34
36
  count: int = 1, # Number of GPUs per container. Defaults to 1.
35
37
  ):
36
- super().__init__(api_pb2.GPU_TYPE_T4, count, 0)
38
+ super().__init__(api_pb2.GPU_TYPE_T4, count, "T4")
37
39
 
38
40
  def __repr__(self):
39
41
  return f"GPU(T4, count={self.count})"
@@ -51,7 +53,7 @@ class L4(_GPUConfig):
51
53
  self,
52
54
  count: int = 1, # Number of GPUs per container. Defaults to 1.
53
55
  ):
54
- super().__init__(api_pb2.GPU_TYPE_L4, count, 0)
56
+ super().__init__(api_pb2.GPU_TYPE_L4, count, "L4")
55
57
 
56
58
  def __repr__(self):
57
59
  return f"GPU(L4, count={self.count})"
@@ -70,21 +72,14 @@ class A100(_GPUConfig):
70
72
  count: int = 1, # Number of GPUs per container. Defaults to 1.
71
73
  size: Union[str, None] = None, # Select GiB configuration of GPU device: "40GB" or "80GB". Defaults to "40GB".
72
74
  ):
73
- allowed_size_values = {"40GB", "80GB"}
74
-
75
- if size:
76
- if size not in allowed_size_values:
77
- raise ValueError(
78
- f"size='{size}' is invalid. A100s can only have memory values of {allowed_size_values}."
79
- )
80
- memory = int(size.replace("GB", ""))
75
+ if size == "40GB" or not size:
76
+ super().__init__(api_pb2.GPU_TYPE_A100, count, "A100-40GB", 40)
77
+ elif size == "80GB":
78
+ super().__init__(api_pb2.GPU_TYPE_A100_80GB, count, "A100-80GB", 80)
81
79
  else:
82
- memory = 40
83
-
84
- if memory == 80:
85
- super().__init__(api_pb2.GPU_TYPE_A100_80GB, count, memory)
86
- else:
87
- super().__init__(api_pb2.GPU_TYPE_A100, count, memory)
80
+ raise ValueError(
81
+ f"size='{size}' is invalid. A100s can only have memory values of 40GB or 80GB."
82
+ )
88
83
 
89
84
  def __repr__(self):
90
85
  if self.memory == 80:
@@ -109,7 +104,7 @@ class A10G(_GPUConfig):
109
104
  # Useful if you have very large models that don't fit on a single GPU.
110
105
  count: int = 1,
111
106
  ):
112
- super().__init__(api_pb2.GPU_TYPE_A10G, count)
107
+ super().__init__(api_pb2.GPU_TYPE_A10G, count, "A10G")
113
108
 
114
109
  def __repr__(self):
115
110
  return f"GPU(A10G, count={self.count})"
@@ -131,7 +126,7 @@ class H100(_GPUConfig):
131
126
  # Useful if you have very large models that don't fit on a single GPU.
132
127
  count: int = 1,
133
128
  ):
134
- super().__init__(api_pb2.GPU_TYPE_H100, count)
129
+ super().__init__(api_pb2.GPU_TYPE_H100, count, "H100")
135
130
 
136
131
  def __repr__(self):
137
132
  return f"GPU(H100, count={self.count})"
@@ -152,7 +147,7 @@ class L40S(_GPUConfig):
152
147
  # Useful if you have very large models that don't fit on a single GPU.
153
148
  count: int = 1,
154
149
  ):
155
- super().__init__(api_pb2.GPU_TYPE_L40S, count)
150
+ super().__init__(api_pb2.GPU_TYPE_L40S, count, "L40S")
156
151
 
157
152
  def __repr__(self):
158
153
  return f"GPU(L40S, count={self.count})"
@@ -162,7 +157,7 @@ class Any(_GPUConfig):
162
157
  """Selects any one of the GPU classes available within Modal, according to availability."""
163
158
 
164
159
  def __init__(self, *, count: int = 1):
165
- super().__init__(api_pb2.GPU_TYPE_ANY, count)
160
+ super().__init__(api_pb2.GPU_TYPE_ANY, count, "ANY")
166
161
 
167
162
  def __repr__(self):
168
163
  return f"GPU(Any, count={self.count})"
modal/sandbox.py CHANGED
@@ -361,10 +361,12 @@ class _Sandbox(_Object, type_prefix="sb"):
361
361
  metadata = resp.image_metadata
362
362
 
363
363
  async def _load(self: _Image, resolver: Resolver, existing_object_id: Optional[str]):
364
- self._hydrate(image_id, resolver.client, metadata)
364
+ # no need to hydrate again since we do it eagerly below
365
+ pass
365
366
 
366
367
  rep = "Image()"
367
- image = _Image._from_loader(_load, rep)
368
+ image = _Image._from_loader(_load, rep, hydrate_lazily=True)
369
+ image._hydrate(image_id, self._client, metadata) # hydrating eagerly since we have all of the data
368
370
 
369
371
  return image
370
372
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: modal
3
- Version: 0.72.32
3
+ Version: 0.72.34
4
4
  Summary: Python client library for Modal
5
5
  Author: Modal Labs
6
6
  Author-email: support@modal.com
@@ -20,7 +20,7 @@ modal/app.py,sha256=UOuqlCKlFAjOXCacXmoEMM90FnqFwPRXUhLh0Gi6xzg,45344
20
20
  modal/app.pyi,sha256=5D3LkPP6qvTIl2_YgiyhNQ9X4VsOVuxd69a23UmohDg,25477
21
21
  modal/call_graph.py,sha256=1g2DGcMIJvRy-xKicuf63IVE98gJSnQsr8R_NVMptNc,2581
22
22
  modal/client.py,sha256=8SQawr7P1PNUCq1UmJMUQXG2jIo4Nmdcs311XqrNLRE,15276
23
- modal/client.pyi,sha256=LZ40IGlaNywJN4_edz-Q6K2uAusz9Q8ohUDUbz8ZGrQ,7326
23
+ modal/client.pyi,sha256=GR95fSkc14RoEkIwaXiI9r8XWn0QPgPDMEHKNRVP9Sw,7326
24
24
  modal/cloud_bucket_mount.py,sha256=YOe9nnvSr4ZbeCn587d7_VhE9IioZYRvF9VYQTQux08,5914
25
25
  modal/cloud_bucket_mount.pyi,sha256=30T3K1a89l6wzmEJ_J9iWv9SknoGqaZDx59Xs-ZQcmk,1607
26
26
  modal/cls.py,sha256=xHgZZAmymplw0I2YZGAA8raBboixdNKKTrnsxQZI7G8,32159
@@ -39,7 +39,7 @@ modal/file_io.pyi,sha256=NrIoB0YjIqZ8MDMe826xAnybT0ww_kxQM3iPLo82REU,8898
39
39
  modal/file_pattern_matcher.py,sha256=dSo7BMQGZBAuoBFOX-e_72HxmF3FLzjQlEtnGtJiaD4,6506
40
40
  modal/functions.py,sha256=IP-6oHMmt-wUJPBWJ7Y7Vw2vqk2bYFRSQmV38T2STTI,68371
41
41
  modal/functions.pyi,sha256=EcGd1uGnJVbnMmdsnANYeDjLzjaL8BREG3XgxaDPcvM,25473
42
- modal/gpu.py,sha256=MTxj6ql8EpgfBg8YmZ5a1cLznyuZFssX1qXbEX4LKVM,7503
42
+ modal/gpu.py,sha256=rcBwbE-_e2hEUr3VJbr1EgQDRb6aieJKx6G2oQdyBhE,7462
43
43
  modal/image.py,sha256=leeY7fLfFjS0IqTi3D4cRxIDOb80BPtb3jsQfqvVJ8c,90912
44
44
  modal/image.pyi,sha256=X9vj6cwBdYh8q_2cOd-2RSYNMF49ujcy0lrOXh_v1xc,26049
45
45
  modal/io_streams.py,sha256=QkQiizKRzd5bnbKQsap31LJgBYlAnj4-XkV_50xPYX0,15079
@@ -64,7 +64,7 @@ modal/retries.py,sha256=HKR2Q9aNPWkMjQ5nwobqYTuZaSuw0a8lI2zrtY5IW98,5230
64
64
  modal/runner.py,sha256=0SCMgKO8lZ9W1C7le1EcgViKERMXpi_-QBd6PF_MH0Q,24450
65
65
  modal/runner.pyi,sha256=YmP4EOCNjjkwSIPi2Gl6hF_ji_ytkxz9dw3iB9KXaOI,5275
66
66
  modal/running_app.py,sha256=v61mapYNV1-O-Uaho5EfJlryMLvIT9We0amUOSvSGx8,1188
67
- modal/sandbox.py,sha256=6Z-ull5wI5WDKG6v4JVw-6CHx4Y8cMmpzp9CU3uNrtA,28678
67
+ modal/sandbox.py,sha256=n1_TbX1t6cWL43ynG203eL_AiEWBrL9Cry1NfPBrlTc,28829
68
68
  modal/sandbox.pyi,sha256=dCyU848YARwQwoYR19o2A9L6rfB5JFz276VGQ-aZId8,20831
69
69
  modal/schedule.py,sha256=0ZFpKs1bOxeo5n3HZjoL7OE2ktsb-_oGtq-WJEPO4tY,2615
70
70
  modal/scheduler_placement.py,sha256=BAREdOY5HzHpzSBqt6jDVR6YC_jYfHMVqOzkyqQfngU,1235
@@ -149,10 +149,10 @@ modal_global_objects/mounts/__init__.py,sha256=MIEP8jhXUeGq_eCjYFcqN5b1bxBM4fdk0
149
149
  modal_global_objects/mounts/modal_client_package.py,sha256=W0E_yShsRojPzWm6LtIQqNVolapdnrZkm2hVEQuZK_4,767
150
150
  modal_global_objects/mounts/python_standalone.py,sha256=pEML5GaV2_0ahci_1vpfc_FnySpsfi2fhYmFF5I7IiQ,1837
151
151
  modal_proto/__init__.py,sha256=MIEP8jhXUeGq_eCjYFcqN5b1bxBM4fdk0VESpjWR0fc,28
152
- modal_proto/api.proto,sha256=rs7gMeYjXS73qzEmgnnzMNyWksh2E8Ju8Im1y9vwzmo,84260
152
+ modal_proto/api.proto,sha256=PkjtUKjKBAfykMqvCtgpR_hzEiu6v-SNMEvwcvw_Rgg,84313
153
153
  modal_proto/api_grpc.py,sha256=Bcgo5hmJ0FCAVvWAm7X7Qki2GZ2gvAu3kOtqzRtvoMc,106793
154
- modal_proto/api_pb2.py,sha256=NSukaVE5IPbBIYP_qkvILrD-qninKmbekqDK5I_IRp8,307200
155
- modal_proto/api_pb2.pyi,sha256=b1Ya2TTdFOZMxhfiSAy4Y0ut6Y6kjf6N6mxHw9uiFV8,410686
154
+ modal_proto/api_pb2.py,sha256=ZZCw9viOLtXC5dIaqO2WmGeOEdpNp3EY8qzhjcnDjZo,307238
155
+ modal_proto/api_pb2.pyi,sha256=FBNfJ1--D0m9jthiQXAgjkHR1ba4tcMi-q9phWQWI8E,410852
156
156
  modal_proto/api_pb2_grpc.py,sha256=OTPQgFBajKwy7fhAFEewDjY6lK49C60Z9vTqUpQy_oY,230822
157
157
  modal_proto/api_pb2_grpc.pyi,sha256=mwP2mw6UE46CHlraqicXHsgyGPtV0mCy96LwnFMz74s,53787
158
158
  modal_proto/modal_api_grpc.py,sha256=C5QcdsBWEvF9ufbEjdyPQ5IDDexMdAI2_CQ_U2nEYuc,14258
@@ -166,10 +166,10 @@ modal_proto/options_pb2_grpc.pyi,sha256=CImmhxHsYnF09iENPoe8S4J-n93jtgUYD2JPAc0y
166
166
  modal_proto/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
167
167
  modal_version/__init__.py,sha256=kGya2ZlItX2zB7oHORs-wvP4PG8lg_mtbi1QIK3G6SQ,470
168
168
  modal_version/__main__.py,sha256=2FO0yYQQwDTh6udt1h-cBnGd1c4ZyHnHSI4BksxzVac,105
169
- modal_version/_version_generated.py,sha256=R8c_NTHn5eYNnmvpkX2Xp_1HthfznRGV4WH4gVCQkUo,149
170
- modal-0.72.32.dist-info/LICENSE,sha256=psuoW8kuDP96RQsdhzwOqi6fyWv0ct8CR6Jr7He_P_k,10173
171
- modal-0.72.32.dist-info/METADATA,sha256=-RkbYuuOQfEBaw_p05aCo9Lf4ofVv3N4PbVU5kAzMK4,2329
172
- modal-0.72.32.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
173
- modal-0.72.32.dist-info/entry_points.txt,sha256=An-wYgeEUnm6xzrAP9_NTSTSciYvvEWsMZILtYrvpAI,46
174
- modal-0.72.32.dist-info/top_level.txt,sha256=1nvYbOSIKcmU50fNrpnQnrrOpj269ei3LzgB6j9xGqg,64
175
- modal-0.72.32.dist-info/RECORD,,
169
+ modal_version/_version_generated.py,sha256=Ixznspk9sSoaEl-TqOpNARQzukOb53UQZzOi2jNgAW4,149
170
+ modal-0.72.34.dist-info/LICENSE,sha256=psuoW8kuDP96RQsdhzwOqi6fyWv0ct8CR6Jr7He_P_k,10173
171
+ modal-0.72.34.dist-info/METADATA,sha256=UiVI4xHiCsI7eYFYAXqLGvLIfRncw3d5T_uDtTVeRHw,2329
172
+ modal-0.72.34.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
173
+ modal-0.72.34.dist-info/entry_points.txt,sha256=An-wYgeEUnm6xzrAP9_NTSTSciYvvEWsMZILtYrvpAI,46
174
+ modal-0.72.34.dist-info/top_level.txt,sha256=1nvYbOSIKcmU50fNrpnQnrrOpj269ei3LzgB6j9xGqg,64
175
+ modal-0.72.34.dist-info/RECORD,,
modal_proto/api.proto CHANGED
@@ -1667,9 +1667,10 @@ message FunctionUpdateSchedulingParamsRequest {
1667
1667
  message FunctionUpdateSchedulingParamsResponse {}
1668
1668
 
1669
1669
  message GPUConfig {
1670
- GPUType type = 1;
1670
+ GPUType type = 1; // Deprecated, at some point
1671
1671
  uint32 count = 2;
1672
1672
  uint32 memory = 3;
1673
+ string gpu_type = 4;
1673
1674
  }
1674
1675
 
1675
1676
  message GeneratorDone { // Sent as the output when a generator finishes running.