modal 0.73.18__py3-none-any.whl → 0.73.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modal/client.pyi +2 -2
- modal/functions.pyi +6 -6
- modal/gpu.py +10 -12
- {modal-0.73.18.dist-info → modal-0.73.20.dist-info}/METADATA +1 -1
- {modal-0.73.18.dist-info → modal-0.73.20.dist-info}/RECORD +13 -13
- modal_proto/api.proto +19 -0
- modal_proto/api_pb2.py +743 -733
- modal_proto/api_pb2.pyi +58 -1
- modal_version/_version_generated.py +1 -1
- {modal-0.73.18.dist-info → modal-0.73.20.dist-info}/LICENSE +0 -0
- {modal-0.73.18.dist-info → modal-0.73.20.dist-info}/WHEEL +0 -0
- {modal-0.73.18.dist-info → modal-0.73.20.dist-info}/entry_points.txt +0 -0
- {modal-0.73.18.dist-info → modal-0.73.20.dist-info}/top_level.txt +0 -0
modal/client.pyi
CHANGED
@@ -27,7 +27,7 @@ class _Client:
|
|
27
27
|
_snapshotted: bool
|
28
28
|
|
29
29
|
def __init__(
|
30
|
-
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.
|
30
|
+
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.20"
|
31
31
|
): ...
|
32
32
|
def is_closed(self) -> bool: ...
|
33
33
|
@property
|
@@ -85,7 +85,7 @@ class Client:
|
|
85
85
|
_snapshotted: bool
|
86
86
|
|
87
87
|
def __init__(
|
88
|
-
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.
|
88
|
+
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.20"
|
89
89
|
): ...
|
90
90
|
def is_closed(self) -> bool: ...
|
91
91
|
@property
|
modal/functions.pyi
CHANGED
@@ -200,11 +200,11 @@ class Function(
|
|
200
200
|
|
201
201
|
_call_generator_nowait: ___call_generator_nowait_spec[typing_extensions.Self]
|
202
202
|
|
203
|
-
class __remote_spec(typing_extensions.Protocol[
|
203
|
+
class __remote_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER, SUPERSELF]):
|
204
204
|
def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER: ...
|
205
205
|
async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER: ...
|
206
206
|
|
207
|
-
remote: __remote_spec[modal._functions.
|
207
|
+
remote: __remote_spec[modal._functions.P, modal._functions.ReturnType, typing_extensions.Self]
|
208
208
|
|
209
209
|
class __remote_gen_spec(typing_extensions.Protocol[SUPERSELF]):
|
210
210
|
def __call__(self, *args, **kwargs) -> typing.Generator[typing.Any, None, None]: ...
|
@@ -219,19 +219,19 @@ class Function(
|
|
219
219
|
self, *args: modal._functions.P.args, **kwargs: modal._functions.P.kwargs
|
220
220
|
) -> modal._functions.OriginalReturnType: ...
|
221
221
|
|
222
|
-
class ___experimental_spawn_spec(typing_extensions.Protocol[
|
222
|
+
class ___experimental_spawn_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER, SUPERSELF]):
|
223
223
|
def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
224
224
|
async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
225
225
|
|
226
226
|
_experimental_spawn: ___experimental_spawn_spec[
|
227
|
-
modal._functions.
|
227
|
+
modal._functions.P, modal._functions.ReturnType, typing_extensions.Self
|
228
228
|
]
|
229
229
|
|
230
|
-
class __spawn_spec(typing_extensions.Protocol[
|
230
|
+
class __spawn_spec(typing_extensions.Protocol[P_INNER, ReturnType_INNER, SUPERSELF]):
|
231
231
|
def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
232
232
|
async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
233
233
|
|
234
|
-
spawn: __spawn_spec[modal._functions.
|
234
|
+
spawn: __spawn_spec[modal._functions.P, modal._functions.ReturnType, typing_extensions.Self]
|
235
235
|
|
236
236
|
def get_raw_f(self) -> collections.abc.Callable[..., typing.Any]: ...
|
237
237
|
|
modal/gpu.py
CHANGED
@@ -9,16 +9,14 @@ from .exception import InvalidError
|
|
9
9
|
|
10
10
|
@dataclass(frozen=True)
|
11
11
|
class _GPUConfig:
|
12
|
-
type: "api_pb2.GPUType.V" # Deprecated, at some point
|
13
|
-
count: int
|
14
12
|
gpu_type: str
|
13
|
+
count: int
|
15
14
|
|
16
15
|
def _to_proto(self) -> api_pb2.GPUConfig:
|
17
16
|
"""Convert this GPU config to an internal protobuf representation."""
|
18
17
|
return api_pb2.GPUConfig(
|
19
|
-
type=self.type,
|
20
|
-
count=self.count,
|
21
18
|
gpu_type=self.gpu_type,
|
19
|
+
count=self.count,
|
22
20
|
)
|
23
21
|
|
24
22
|
|
@@ -33,7 +31,7 @@ class T4(_GPUConfig):
|
|
33
31
|
self,
|
34
32
|
count: int = 1, # Number of GPUs per container. Defaults to 1.
|
35
33
|
):
|
36
|
-
super().__init__(
|
34
|
+
super().__init__("T4", count)
|
37
35
|
|
38
36
|
def __repr__(self):
|
39
37
|
return f"GPU(T4, count={self.count})"
|
@@ -51,7 +49,7 @@ class L4(_GPUConfig):
|
|
51
49
|
self,
|
52
50
|
count: int = 1, # Number of GPUs per container. Defaults to 1.
|
53
51
|
):
|
54
|
-
super().__init__(
|
52
|
+
super().__init__("L4", count)
|
55
53
|
|
56
54
|
def __repr__(self):
|
57
55
|
return f"GPU(L4, count={self.count})"
|
@@ -71,9 +69,9 @@ class A100(_GPUConfig):
|
|
71
69
|
size: Union[str, None] = None, # Select GB configuration of GPU device: "40GB" or "80GB". Defaults to "40GB".
|
72
70
|
):
|
73
71
|
if size == "40GB" or not size:
|
74
|
-
super().__init__(
|
72
|
+
super().__init__("A100-40GB", count)
|
75
73
|
elif size == "80GB":
|
76
|
-
super().__init__(
|
74
|
+
super().__init__("A100-80GB", count)
|
77
75
|
else:
|
78
76
|
raise ValueError(f"size='{size}' is invalid. A100s can only have memory values of 40GB or 80GB.")
|
79
77
|
|
@@ -97,7 +95,7 @@ class A10G(_GPUConfig):
|
|
97
95
|
# Useful if you have very large models that don't fit on a single GPU.
|
98
96
|
count: int = 1,
|
99
97
|
):
|
100
|
-
super().__init__(
|
98
|
+
super().__init__("A10G", count)
|
101
99
|
|
102
100
|
def __repr__(self):
|
103
101
|
return f"GPU(A10G, count={self.count})"
|
@@ -119,7 +117,7 @@ class H100(_GPUConfig):
|
|
119
117
|
# Useful if you have very large models that don't fit on a single GPU.
|
120
118
|
count: int = 1,
|
121
119
|
):
|
122
|
-
super().__init__(
|
120
|
+
super().__init__("H100", count)
|
123
121
|
|
124
122
|
def __repr__(self):
|
125
123
|
return f"GPU(H100, count={self.count})"
|
@@ -140,7 +138,7 @@ class L40S(_GPUConfig):
|
|
140
138
|
# Useful if you have very large models that don't fit on a single GPU.
|
141
139
|
count: int = 1,
|
142
140
|
):
|
143
|
-
super().__init__(
|
141
|
+
super().__init__("L40S", count)
|
144
142
|
|
145
143
|
def __repr__(self):
|
146
144
|
return f"GPU(L40S, count={self.count})"
|
@@ -150,7 +148,7 @@ class Any(_GPUConfig):
|
|
150
148
|
"""Selects any one of the GPU classes available within Modal, according to availability."""
|
151
149
|
|
152
150
|
def __init__(self, *, count: int = 1):
|
153
|
-
super().__init__(
|
151
|
+
super().__init__("ANY", count)
|
154
152
|
|
155
153
|
def __repr__(self):
|
156
154
|
return f"GPU(Any, count={self.count})"
|
@@ -21,7 +21,7 @@ modal/app.py,sha256=MaWCYgNx8y2GQhmaXQBMKKAAfCYfdxrdYs6zCBoJzwI,44628
|
|
21
21
|
modal/app.pyi,sha256=lxiuWzE_OLb3WHg-H7Pek9DGBuCUzZ55P594VhJL5LA,26113
|
22
22
|
modal/call_graph.py,sha256=1g2DGcMIJvRy-xKicuf63IVE98gJSnQsr8R_NVMptNc,2581
|
23
23
|
modal/client.py,sha256=8SQawr7P1PNUCq1UmJMUQXG2jIo4Nmdcs311XqrNLRE,15276
|
24
|
-
modal/client.pyi,sha256=
|
24
|
+
modal/client.pyi,sha256=oAysJP9ztH2WmXk_Mb2HGpOw1eCXoBB4Ll4DJeg4og0,7593
|
25
25
|
modal/cloud_bucket_mount.py,sha256=YOe9nnvSr4ZbeCn587d7_VhE9IioZYRvF9VYQTQux08,5914
|
26
26
|
modal/cloud_bucket_mount.pyi,sha256=30T3K1a89l6wzmEJ_J9iWv9SknoGqaZDx59Xs-ZQcmk,1607
|
27
27
|
modal/cls.py,sha256=agxclIXZbzBbgcI5PPVD7IfOiHzv-B82xaaXtw9cpv8,31126
|
@@ -40,8 +40,8 @@ modal/file_io.py,sha256=lcMs_E9Xfm0YX1t9U2wNIBPnqHRxmImqjLW1GHqVmyg,20945
|
|
40
40
|
modal/file_io.pyi,sha256=NTRft1tbPSWf9TlWVeZmTlgB5AZ_Zhu2srWIrWr7brk,9445
|
41
41
|
modal/file_pattern_matcher.py,sha256=1cZ4V2wSLiaXqAqStETSwp3bzDD6QZOt6pmmjk3Okz4,6505
|
42
42
|
modal/functions.py,sha256=kcNHvqeGBxPI7Cgd57NIBBghkfbeFJzXO44WW0jSmao,325
|
43
|
-
modal/functions.pyi,sha256=
|
44
|
-
modal/gpu.py,sha256=
|
43
|
+
modal/functions.pyi,sha256=avRIY0KOFky6tDRI5_SvnLXz7PUnG2H0hQA385cRtb0,14289
|
44
|
+
modal/gpu.py,sha256=5vJiYFAv7Ai9zeGf_lv31rJyQn1atQlCYAJv1bF1_BQ,6996
|
45
45
|
modal/image.py,sha256=ekE2693foy30Xi1LM3swKZPW6HuaACj-OBvfspVSyIE,91509
|
46
46
|
modal/image.pyi,sha256=kdJzy1eaxNPZeCpE0TMYYLhJ6UWmkfRDeb_vzngJUoQ,26462
|
47
47
|
modal/io_streams.py,sha256=QkQiizKRzd5bnbKQsap31LJgBYlAnj4-XkV_50xPYX0,15079
|
@@ -154,10 +154,10 @@ modal_global_objects/mounts/__init__.py,sha256=MIEP8jhXUeGq_eCjYFcqN5b1bxBM4fdk0
|
|
154
154
|
modal_global_objects/mounts/modal_client_package.py,sha256=W0E_yShsRojPzWm6LtIQqNVolapdnrZkm2hVEQuZK_4,767
|
155
155
|
modal_global_objects/mounts/python_standalone.py,sha256=EsC-hdPtiAPOwgW9emHN6muNUkrJwR8dYxroVArxHxM,1841
|
156
156
|
modal_proto/__init__.py,sha256=MIEP8jhXUeGq_eCjYFcqN5b1bxBM4fdk0VESpjWR0fc,28
|
157
|
-
modal_proto/api.proto,sha256=
|
157
|
+
modal_proto/api.proto,sha256=CbX6h_iAClnIpfKWPmOot1CLsPwMuHF3diKQTC9AhwY,86314
|
158
158
|
modal_proto/api_grpc.py,sha256=FYGqDegM_w_qxdtlxum8k31mDibKoMvmNxv_p9cKdKs,109056
|
159
|
-
modal_proto/api_pb2.py,sha256=
|
160
|
-
modal_proto/api_pb2.pyi,sha256=
|
159
|
+
modal_proto/api_pb2.py,sha256=u8srl5hXYKKVegDznRpyKowoY5LLp3oDreH1XnCSO9M,312167
|
160
|
+
modal_proto/api_pb2.pyi,sha256=sdKfZDNfwalRwlyUvEteLxhV1Ska9l8EddXL6dYVjYk,419504
|
161
161
|
modal_proto/api_pb2_grpc.py,sha256=DNp0Et5i_Ey4dKx_1o1LRtYhyWYyT0NzTcAY4EcHn-c,235765
|
162
162
|
modal_proto/api_pb2_grpc.pyi,sha256=RI6tWC3L8EIN4-izFSEGPPJl5Ta0lXPNuHUJaWAr35s,54892
|
163
163
|
modal_proto/modal_api_grpc.py,sha256=UG8WJU81afrWPwItWB4Ag64E9EpyREMpBbAVGVEYJiM,14550
|
@@ -171,10 +171,10 @@ modal_proto/options_pb2_grpc.pyi,sha256=CImmhxHsYnF09iENPoe8S4J-n93jtgUYD2JPAc0y
|
|
171
171
|
modal_proto/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
172
172
|
modal_version/__init__.py,sha256=wiJQ53c-OMs0Xf1UeXOxQ7FwlV1VzIjnX6o-pRYZ_Pk,470
|
173
173
|
modal_version/__main__.py,sha256=2FO0yYQQwDTh6udt1h-cBnGd1c4ZyHnHSI4BksxzVac,105
|
174
|
-
modal_version/_version_generated.py,sha256=
|
175
|
-
modal-0.73.
|
176
|
-
modal-0.73.
|
177
|
-
modal-0.73.
|
178
|
-
modal-0.73.
|
179
|
-
modal-0.73.
|
180
|
-
modal-0.73.
|
174
|
+
modal_version/_version_generated.py,sha256=qj_R3TAiMeXVSkkZO5YMamEbKgXNxbNW0fPWCapDKCA,149
|
175
|
+
modal-0.73.20.dist-info/LICENSE,sha256=psuoW8kuDP96RQsdhzwOqi6fyWv0ct8CR6Jr7He_P_k,10173
|
176
|
+
modal-0.73.20.dist-info/METADATA,sha256=RTzyI5Ia8n7X6q-lTi7DNTx11ZhjrRVVMRQHlIMuBR4,2330
|
177
|
+
modal-0.73.20.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
178
|
+
modal-0.73.20.dist-info/entry_points.txt,sha256=An-wYgeEUnm6xzrAP9_NTSTSciYvvEWsMZILtYrvpAI,46
|
179
|
+
modal-0.73.20.dist-info/top_level.txt,sha256=1nvYbOSIKcmU50fNrpnQnrrOpj269ei3LzgB6j9xGqg,64
|
180
|
+
modal-0.73.20.dist-info/RECORD,,
|
modal_proto/api.proto
CHANGED
@@ -143,6 +143,8 @@ enum FunctionCallType {
|
|
143
143
|
}
|
144
144
|
|
145
145
|
enum GPUType {
|
146
|
+
// Note: this enum is no longer used by current clients - don't add new types
|
147
|
+
// Old clients still send it, so we use it server-side for compatibility
|
146
148
|
GPU_TYPE_UNSPECIFIED = 0;
|
147
149
|
GPU_TYPE_T4 = 1;
|
148
150
|
GPU_TYPE_A100 = 2;
|
@@ -561,6 +563,22 @@ message Asgi {
|
|
561
563
|
}
|
562
564
|
}
|
563
565
|
|
566
|
+
message AutoscalerSettings {
|
567
|
+
// A collection of user-configurable settings for Function autoscaling
|
568
|
+
// These are used for static configuration and for dynamic autoscaler updates
|
569
|
+
|
570
|
+
// Minimum containers when scale-to-zero is not deisired; pka "keep_warm" or "warm_pool_size"
|
571
|
+
optional uint32 min_containers = 1;
|
572
|
+
// Limit on the number of containers that can be running for each Function; pka "concurrency_limit"
|
573
|
+
optional uint32 max_containers = 2;
|
574
|
+
// Additional container to spin up when Function is active
|
575
|
+
optional uint32 buffer_containers = 3;
|
576
|
+
// Currently unused; a placeholder in case we decide to expose scaleup control to users
|
577
|
+
optional uint32 scaleup_window = 4;
|
578
|
+
// Maximum amount of time a container can be idle before being scaled down, in seconds; pka "container_idle_timeout"
|
579
|
+
optional uint32 scaledown_window = 5;
|
580
|
+
}
|
581
|
+
|
564
582
|
message BaseImage {
|
565
583
|
string image_id = 1;
|
566
584
|
string docker_tag = 2;
|
@@ -1673,6 +1691,7 @@ message FunctionStats {
|
|
1673
1691
|
message FunctionUpdateSchedulingParamsRequest {
|
1674
1692
|
string function_id = 1;
|
1675
1693
|
uint32 warm_pool_size_override = 2;
|
1694
|
+
AutoscalerSettings settings = 3;
|
1676
1695
|
}
|
1677
1696
|
|
1678
1697
|
message FunctionUpdateSchedulingParamsResponse {}
|