modal 0.73.75__py3-none-any.whl → 0.73.77__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modal/_functions.py +55 -26
- modal/_utils/deprecation.py +35 -0
- modal/app.py +36 -26
- modal/app.pyi +32 -16
- modal/cli/programs/vscode.py +1 -1
- modal/client.pyi +2 -2
- modal/cls.py +29 -22
- modal/cls.pyi +37 -16
- modal/functions.pyi +12 -12
- {modal-0.73.75.dist-info → modal-0.73.77.dist-info}/METADATA +1 -1
- {modal-0.73.75.dist-info → modal-0.73.77.dist-info}/RECORD +16 -16
- modal_version/_version_generated.py +1 -1
- {modal-0.73.75.dist-info → modal-0.73.77.dist-info}/LICENSE +0 -0
- {modal-0.73.75.dist-info → modal-0.73.77.dist-info}/WHEEL +0 -0
- {modal-0.73.75.dist-info → modal-0.73.77.dist-info}/entry_points.txt +0 -0
- {modal-0.73.75.dist-info → modal-0.73.77.dist-info}/top_level.txt +0 -0
modal/_functions.py
CHANGED
@@ -417,18 +417,18 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
417
417
|
allow_cross_region_volumes: bool = False,
|
418
418
|
volumes: dict[Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]] = {},
|
419
419
|
webhook_config: Optional[api_pb2.WebhookConfig] = None,
|
420
|
+
cpu: Optional[Union[float, tuple[float, float]]] = None,
|
420
421
|
memory: Optional[Union[int, tuple[int, int]]] = None,
|
421
422
|
proxy: Optional[_Proxy] = None,
|
422
423
|
retries: Optional[Union[int, Retries]] = None,
|
423
424
|
timeout: Optional[int] = None,
|
424
|
-
|
425
|
+
min_containers: Optional[int] = None,
|
426
|
+
max_containers: Optional[int] = None,
|
427
|
+
buffer_containers: Optional[int] = None,
|
428
|
+
scaledown_window: Optional[int] = None,
|
425
429
|
allow_concurrent_inputs: Optional[int] = None,
|
426
430
|
batch_max_size: Optional[int] = None,
|
427
431
|
batch_wait_ms: Optional[int] = None,
|
428
|
-
container_idle_timeout: Optional[int] = None,
|
429
|
-
cpu: Optional[Union[float, tuple[float, float]]] = None,
|
430
|
-
# keep_warm=True is equivalent to keep_warm=1
|
431
|
-
keep_warm: Optional[int] = None,
|
432
432
|
cloud: Optional[str] = None,
|
433
433
|
scheduler_placement: Optional[SchedulerPlacement] = None,
|
434
434
|
is_builder_function: bool = False,
|
@@ -442,7 +442,6 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
442
442
|
ephemeral_disk: Optional[int] = None,
|
443
443
|
# current default: first-party, future default: main-package
|
444
444
|
include_source: Optional[bool] = None,
|
445
|
-
_experimental_buffer_containers: Optional[int] = None,
|
446
445
|
_experimental_proxy_ip: Optional[str] = None,
|
447
446
|
_experimental_custom_scaling_factor: Optional[float] = None,
|
448
447
|
_experimental_enable_gpu_snapshot: bool = False,
|
@@ -564,20 +563,21 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
564
563
|
force_build=image.force_build or pf.force_build,
|
565
564
|
)
|
566
565
|
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
if
|
566
|
+
# Note that we also do these checks in FunctionCreate; could drop them here
|
567
|
+
if min_containers is not None and not isinstance(min_containers, int):
|
568
|
+
raise InvalidError(f"`min_containers` must be an int, not {type(min_containers).__name__}")
|
569
|
+
if min_containers is not None and max_containers is not None and max_containers < min_containers:
|
571
570
|
raise InvalidError(
|
572
|
-
f"
|
573
|
-
f"strictly less than its `{keep_warm=}` parameter."
|
571
|
+
f"`min_containers` ({min_containers}) cannot be greater than `max_containers` ({max_containers})"
|
574
572
|
)
|
573
|
+
if scaledown_window is not None and scaledown_window <= 0:
|
574
|
+
raise InvalidError("`scaledown_window` must be > 0")
|
575
575
|
|
576
576
|
autoscaler_settings = api_pb2.AutoscalerSettings(
|
577
|
-
|
578
|
-
|
579
|
-
buffer_containers=
|
580
|
-
scaledown_window=
|
577
|
+
min_containers=min_containers,
|
578
|
+
max_containers=max_containers,
|
579
|
+
buffer_containers=buffer_containers,
|
580
|
+
scaledown_window=scaledown_window,
|
581
581
|
)
|
582
582
|
|
583
583
|
if _experimental_custom_scaling_factor is not None and (
|
@@ -605,9 +605,6 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
605
605
|
if arg.default is not inspect.Parameter.empty:
|
606
606
|
raise InvalidError(f"Modal batched function {func_name} does not accept default arguments.")
|
607
607
|
|
608
|
-
if container_idle_timeout is not None and container_idle_timeout <= 0:
|
609
|
-
raise InvalidError("`container_idle_timeout` must be > 0")
|
610
|
-
|
611
608
|
if max_inputs is not None:
|
612
609
|
if not isinstance(max_inputs, int):
|
613
610
|
raise InvalidError(f"`max_inputs` must be an int, not {type(max_inputs).__name__}")
|
@@ -773,11 +770,8 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
773
770
|
proxy_id=(proxy.object_id if proxy else None),
|
774
771
|
retry_policy=retry_policy,
|
775
772
|
timeout_secs=timeout_secs or 0,
|
776
|
-
task_idle_timeout_secs=container_idle_timeout or 0,
|
777
|
-
concurrency_limit=concurrency_limit or 0,
|
778
773
|
pty_info=pty_info,
|
779
774
|
cloud_provider_str=cloud if cloud else "",
|
780
|
-
warm_pool_size=keep_warm or 0,
|
781
775
|
runtime=config.get("function_runtime"),
|
782
776
|
runtime_debug=config.get("function_runtime_debug"),
|
783
777
|
runtime_perf_record=config.get("runtime_perf_record"),
|
@@ -802,10 +796,15 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
802
796
|
snapshot_debug=config.get("snapshot_debug"),
|
803
797
|
_experimental_group_size=cluster_size or 0, # Experimental: Clustered functions
|
804
798
|
_experimental_concurrent_cancellations=True,
|
805
|
-
_experimental_buffer_containers=_experimental_buffer_containers or 0,
|
806
799
|
_experimental_proxy_ip=_experimental_proxy_ip,
|
807
800
|
_experimental_custom_scaling=_experimental_custom_scaling_factor is not None,
|
808
801
|
_experimental_enable_gpu_snapshot=_experimental_enable_gpu_snapshot,
|
802
|
+
# --- These are deprecated in favor of autoscaler_settings
|
803
|
+
warm_pool_size=min_containers or 0,
|
804
|
+
concurrency_limit=max_containers or 0,
|
805
|
+
_experimental_buffer_containers=buffer_containers or 0,
|
806
|
+
task_idle_timeout_secs=scaledown_window or 0,
|
807
|
+
# ---
|
809
808
|
)
|
810
809
|
|
811
810
|
if isinstance(gpu, list):
|
@@ -928,7 +927,7 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
928
927
|
def _bind_parameters(
|
929
928
|
self,
|
930
929
|
obj: "modal.cls._Obj",
|
931
|
-
options: Optional[
|
930
|
+
options: Optional["modal.cls._ServiceOptions"],
|
932
931
|
args: Sized,
|
933
932
|
kwargs: dict[str, Any],
|
934
933
|
) -> "_Function":
|
@@ -979,10 +978,35 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
979
978
|
|
980
979
|
environment_name = _get_environment_name(None, resolver)
|
981
980
|
assert parent is not None and parent.is_hydrated
|
981
|
+
|
982
|
+
if options:
|
983
|
+
volume_mounts = [
|
984
|
+
api_pb2.VolumeMount(
|
985
|
+
mount_path=path,
|
986
|
+
volume_id=volume.object_id,
|
987
|
+
allow_background_commits=True,
|
988
|
+
)
|
989
|
+
for path, volume in options.validated_volumes
|
990
|
+
]
|
991
|
+
options_pb = api_pb2.FunctionOptions(
|
992
|
+
secret_ids=[s.object_id for s in options.secrets],
|
993
|
+
replace_secret_ids=bool(options.secrets),
|
994
|
+
resources=options.resources,
|
995
|
+
retry_policy=options.retry_policy,
|
996
|
+
concurrency_limit=options.concurrency_limit,
|
997
|
+
timeout_secs=options.timeout_secs,
|
998
|
+
task_idle_timeout_secs=options.task_idle_timeout_secs,
|
999
|
+
replace_volume_mounts=len(volume_mounts) > 0,
|
1000
|
+
volume_mounts=volume_mounts,
|
1001
|
+
target_concurrent_inputs=options.target_concurrent_inputs,
|
1002
|
+
)
|
1003
|
+
else:
|
1004
|
+
options_pb = None
|
1005
|
+
|
982
1006
|
req = api_pb2.FunctionBindParamsRequest(
|
983
1007
|
function_id=parent.object_id,
|
984
1008
|
serialized_params=serialized_params,
|
985
|
-
function_options=
|
1009
|
+
function_options=options_pb,
|
986
1010
|
environment_name=environment_name
|
987
1011
|
or "", # TODO: investigate shouldn't environment name always be specified here?
|
988
1012
|
)
|
@@ -990,7 +1014,12 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
|
|
990
1014
|
response = await retry_transient_errors(parent._client.stub.FunctionBindParams, req)
|
991
1015
|
param_bound_func._hydrate(response.bound_function_id, parent._client, response.handle_metadata)
|
992
1016
|
|
993
|
-
|
1017
|
+
def _deps():
|
1018
|
+
if options:
|
1019
|
+
return [v for _, v in options.validated_volumes] + list(options.secrets)
|
1020
|
+
return []
|
1021
|
+
|
1022
|
+
fun: _Function = _Function._from_loader(_load, "Function(parametrized)", hydrate_lazily=True, deps=_deps)
|
994
1023
|
|
995
1024
|
fun._info = self._info
|
996
1025
|
fun._obj = obj
|
modal/_utils/deprecation.py
CHANGED
@@ -87,3 +87,38 @@ def renamed_parameter(
|
|
87
87
|
return wrapper
|
88
88
|
|
89
89
|
return decorator
|
90
|
+
|
91
|
+
|
92
|
+
def warn_on_renamed_autoscaler_settings(func: Callable[P, R]) -> Callable[P, R]:
|
93
|
+
name_map = {
|
94
|
+
"keep_warm": "min_containers",
|
95
|
+
"concurrency_limit": "max_containers",
|
96
|
+
"_experimental_buffer_containers": "buffer_containers",
|
97
|
+
"container_idle_timeout": "scaledown_window",
|
98
|
+
}
|
99
|
+
|
100
|
+
@functools.wraps(func)
|
101
|
+
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
102
|
+
mut_kwargs: dict[str, Any] = locals()["kwargs"] # Avoid referencing kwargs directly due to bug in sigtools
|
103
|
+
|
104
|
+
substitutions = []
|
105
|
+
old_params_used = name_map.keys() & mut_kwargs.keys()
|
106
|
+
for old_param, new_param in name_map.items():
|
107
|
+
if old_param in old_params_used:
|
108
|
+
new_param = name_map[old_param]
|
109
|
+
mut_kwargs[new_param] = mut_kwargs.pop(old_param)
|
110
|
+
substitutions.append(f"- {old_param} -> {new_param}")
|
111
|
+
|
112
|
+
if substitutions:
|
113
|
+
substitution_string = "\n".join(substitutions)
|
114
|
+
message = (
|
115
|
+
"We have renamed several parameters related to autoscaling."
|
116
|
+
" Please update your code to use the following new names:"
|
117
|
+
f"\n\n{substitution_string}"
|
118
|
+
"\n\nSee https://modal.com/docs/guide/modal-1-0-migration for more details."
|
119
|
+
)
|
120
|
+
deprecation_warning((2025, 2, 24), message, pending=True, show_source=True)
|
121
|
+
|
122
|
+
return func(*args, **kwargs)
|
123
|
+
|
124
|
+
return wrapper
|
modal/app.py
CHANGED
@@ -29,7 +29,12 @@ from ._partial_function import (
|
|
29
29
|
_PartialFunctionFlags,
|
30
30
|
)
|
31
31
|
from ._utils.async_utils import synchronize_api
|
32
|
-
from ._utils.deprecation import
|
32
|
+
from ._utils.deprecation import (
|
33
|
+
deprecation_error,
|
34
|
+
deprecation_warning,
|
35
|
+
renamed_parameter,
|
36
|
+
warn_on_renamed_autoscaler_settings,
|
37
|
+
)
|
33
38
|
from ._utils.function_utils import FunctionInfo, is_global_object, is_method_fn
|
34
39
|
from ._utils.grpc_utils import retry_transient_errors
|
35
40
|
from ._utils.mount_utils import validate_volumes
|
@@ -559,6 +564,7 @@ class _App:
|
|
559
564
|
|
560
565
|
return wrapped
|
561
566
|
|
567
|
+
@warn_on_renamed_autoscaler_settings
|
562
568
|
def function(
|
563
569
|
self,
|
564
570
|
_warn_parentheses_missing: Any = None,
|
@@ -586,17 +592,14 @@ class _App:
|
|
586
592
|
# Or, pass (request, limit) to additionally specify a hard limit in MiB.
|
587
593
|
memory: Optional[Union[int, tuple[int, int]]] = None,
|
588
594
|
ephemeral_disk: Optional[int] = None, # Specify, in MiB, the ephemeral disk size for the Function.
|
595
|
+
min_containers: Optional[int] = None, # Minimum number of containers to keep warm, even when Function is idle.
|
596
|
+
max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
|
597
|
+
buffer_containers: Optional[int] = None, # Number of additional idle containers to maintain under active load.
|
598
|
+
scaledown_window: Optional[int] = None, # Max amount of time a container can remain idle before scaling down.
|
589
599
|
proxy: Optional[_Proxy] = None, # Reference to a Modal Proxy to use in front of this function.
|
590
600
|
retries: Optional[Union[int, Retries]] = None, # Number of times to retry each input in case of failure.
|
591
|
-
concurrency_limit: Optional[
|
592
|
-
int
|
593
|
-
] = None, # An optional maximum number of concurrent containers running the function (keep_warm sets minimum).
|
594
601
|
allow_concurrent_inputs: Optional[int] = None, # Number of inputs the container may fetch to run concurrently.
|
595
|
-
container_idle_timeout: Optional[int] = None, # Timeout for idle containers waiting for inputs to shut down.
|
596
602
|
timeout: Optional[int] = None, # Maximum execution time of the function in seconds.
|
597
|
-
keep_warm: Optional[
|
598
|
-
int
|
599
|
-
] = None, # An optional minimum number of containers to always keep warm (use concurrency_limit for maximum).
|
600
603
|
name: Optional[str] = None, # Sets the Modal name of the function within the app
|
601
604
|
is_generator: Optional[
|
602
605
|
bool
|
@@ -615,10 +618,14 @@ class _App:
|
|
615
618
|
_experimental_scheduler_placement: Optional[
|
616
619
|
SchedulerPlacement
|
617
620
|
] = None, # Experimental controls over fine-grained scheduling (alpha).
|
618
|
-
_experimental_buffer_containers: Optional[int] = None, # Number of additional, idle containers to keep around.
|
619
621
|
_experimental_proxy_ip: Optional[str] = None, # IP address of proxy
|
620
622
|
_experimental_custom_scaling_factor: Optional[float] = None, # Custom scaling factor
|
621
623
|
_experimental_enable_gpu_snapshot: bool = False, # Experimentally enable GPU memory snapshots.
|
624
|
+
# Parameters below here are deprecated. Please update your code as suggested
|
625
|
+
keep_warm: Optional[int] = None, # Replaced with `min_containers`
|
626
|
+
concurrency_limit: Optional[int] = None, # Replaced with `max_containers`
|
627
|
+
container_idle_timeout: Optional[int] = None, # Replaced with `scaledown_window`
|
628
|
+
_experimental_buffer_containers: Optional[int] = None, # Now stable API with `buffer_containers`
|
622
629
|
) -> _FunctionDecoratorType:
|
623
630
|
"""Decorator to register a new Modal [Function](/docs/reference/modal.Function) with this App."""
|
624
631
|
if isinstance(_warn_parentheses_missing, _Image):
|
@@ -635,7 +642,7 @@ class _App:
|
|
635
642
|
def wrapped(
|
636
643
|
f: Union[_PartialFunction, Callable[..., Any], None],
|
637
644
|
) -> _Function:
|
638
|
-
nonlocal
|
645
|
+
nonlocal is_generator, cloud, serialized
|
639
646
|
|
640
647
|
# Check if the decorated object is a class
|
641
648
|
if inspect.isclass(f):
|
@@ -669,7 +676,6 @@ class _App:
|
|
669
676
|
raw_f = f.raw_f
|
670
677
|
webhook_config = f.webhook_config
|
671
678
|
is_generator = f.is_generator
|
672
|
-
keep_warm = f.keep_warm or keep_warm
|
673
679
|
batch_max_size = f.batch_max_size
|
674
680
|
batch_wait_ms = f.batch_wait_ms
|
675
681
|
else:
|
@@ -743,20 +749,20 @@ class _App:
|
|
743
749
|
ephemeral_disk=ephemeral_disk,
|
744
750
|
proxy=proxy,
|
745
751
|
retries=retries,
|
746
|
-
|
752
|
+
min_containers=min_containers,
|
753
|
+
max_containers=max_containers,
|
754
|
+
buffer_containers=buffer_containers,
|
755
|
+
scaledown_window=scaledown_window,
|
747
756
|
allow_concurrent_inputs=allow_concurrent_inputs,
|
748
757
|
batch_max_size=batch_max_size,
|
749
758
|
batch_wait_ms=batch_wait_ms,
|
750
|
-
container_idle_timeout=container_idle_timeout,
|
751
759
|
timeout=timeout,
|
752
|
-
keep_warm=keep_warm,
|
753
760
|
cloud=cloud,
|
754
761
|
webhook_config=webhook_config,
|
755
762
|
enable_memory_snapshot=enable_memory_snapshot,
|
756
763
|
block_network=block_network,
|
757
764
|
max_inputs=max_inputs,
|
758
765
|
scheduler_placement=scheduler_placement,
|
759
|
-
_experimental_buffer_containers=_experimental_buffer_containers,
|
760
766
|
_experimental_proxy_ip=_experimental_proxy_ip,
|
761
767
|
i6pn_enabled=i6pn_enabled,
|
762
768
|
cluster_size=cluster_size, # Experimental: Clustered functions
|
@@ -771,6 +777,7 @@ class _App:
|
|
771
777
|
return wrapped
|
772
778
|
|
773
779
|
@typing_extensions.dataclass_transform(field_specifiers=(parameter,), kw_only_default=True)
|
780
|
+
@warn_on_renamed_autoscaler_settings
|
774
781
|
def cls(
|
775
782
|
self,
|
776
783
|
_warn_parentheses_missing: Optional[bool] = None,
|
@@ -797,13 +804,14 @@ class _App:
|
|
797
804
|
# Or, pass (request, limit) to additionally specify a hard limit in MiB.
|
798
805
|
memory: Optional[Union[int, tuple[int, int]]] = None,
|
799
806
|
ephemeral_disk: Optional[int] = None, # Specify, in MiB, the ephemeral disk size for the Function.
|
807
|
+
min_containers: Optional[int] = None, # Minimum number of containers to keep warm, even when Function is idle.
|
808
|
+
max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
|
809
|
+
buffer_containers: Optional[int] = None, # Number of additional idle containers to maintain under active load.
|
810
|
+
scaledown_window: Optional[int] = None, # Max amount of time a container can remain idle before scaling down.
|
800
811
|
proxy: Optional[_Proxy] = None, # Reference to a Modal Proxy to use in front of this function.
|
801
812
|
retries: Optional[Union[int, Retries]] = None, # Number of times to retry each input in case of failure.
|
802
|
-
concurrency_limit: Optional[int] = None, # Limit for max concurrent containers running the function.
|
803
813
|
allow_concurrent_inputs: Optional[int] = None, # Number of inputs the container may fetch to run concurrently.
|
804
|
-
container_idle_timeout: Optional[int] = None, # Timeout for idle containers waiting for inputs to shut down.
|
805
814
|
timeout: Optional[int] = None, # Maximum execution time of the function in seconds.
|
806
|
-
keep_warm: Optional[int] = None, # An optional number of containers to always keep warm.
|
807
815
|
cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
|
808
816
|
region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
|
809
817
|
enable_memory_snapshot: bool = False, # Enable memory checkpointing for faster cold starts.
|
@@ -816,10 +824,14 @@ class _App:
|
|
816
824
|
_experimental_scheduler_placement: Optional[
|
817
825
|
SchedulerPlacement
|
818
826
|
] = None, # Experimental controls over fine-grained scheduling (alpha).
|
819
|
-
_experimental_buffer_containers: Optional[int] = None, # Number of additional, idle containers to keep around.
|
820
827
|
_experimental_proxy_ip: Optional[str] = None, # IP address of proxy
|
821
828
|
_experimental_custom_scaling_factor: Optional[float] = None, # Custom scaling factor
|
822
829
|
_experimental_enable_gpu_snapshot: bool = False, # Experimentally enable GPU memory snapshots.
|
830
|
+
# Parameters below here are deprecated. Please update your code as suggested
|
831
|
+
keep_warm: Optional[int] = None, # Replaced with `min_containers`
|
832
|
+
concurrency_limit: Optional[int] = None, # Replaced with `max_containers`
|
833
|
+
container_idle_timeout: Optional[int] = None, # Replaced with `scaledown_window`
|
834
|
+
_experimental_buffer_containers: Optional[int] = None, # Now stable API with `buffer_containers`
|
823
835
|
) -> Callable[[CLS_T], CLS_T]:
|
824
836
|
"""
|
825
837
|
Decorator to register a new Modal [Cls](/docs/reference/modal.Cls) with this App.
|
@@ -834,8 +846,6 @@ class _App:
|
|
834
846
|
scheduler_placement = SchedulerPlacement(region=region)
|
835
847
|
|
836
848
|
def wrapper(user_cls: CLS_T) -> CLS_T:
|
837
|
-
nonlocal keep_warm
|
838
|
-
|
839
849
|
# Check if the decorated object is a class
|
840
850
|
if not inspect.isclass(user_cls):
|
841
851
|
raise TypeError("The @app.cls decorator must be used on a class.")
|
@@ -873,25 +883,25 @@ class _App:
|
|
873
883
|
network_file_systems=network_file_systems,
|
874
884
|
allow_cross_region_volumes=allow_cross_region_volumes,
|
875
885
|
volumes={**self._volumes, **volumes},
|
886
|
+
cpu=cpu,
|
876
887
|
memory=memory,
|
877
888
|
ephemeral_disk=ephemeral_disk,
|
889
|
+
min_containers=min_containers,
|
890
|
+
max_containers=max_containers,
|
891
|
+
buffer_containers=buffer_containers,
|
892
|
+
scaledown_window=scaledown_window,
|
878
893
|
proxy=proxy,
|
879
894
|
retries=retries,
|
880
|
-
concurrency_limit=concurrency_limit,
|
881
895
|
allow_concurrent_inputs=allow_concurrent_inputs,
|
882
896
|
batch_max_size=batch_max_size,
|
883
897
|
batch_wait_ms=batch_wait_ms,
|
884
|
-
container_idle_timeout=container_idle_timeout,
|
885
898
|
timeout=timeout,
|
886
|
-
cpu=cpu,
|
887
|
-
keep_warm=keep_warm,
|
888
899
|
cloud=cloud,
|
889
900
|
enable_memory_snapshot=enable_memory_snapshot,
|
890
901
|
block_network=block_network,
|
891
902
|
max_inputs=max_inputs,
|
892
903
|
scheduler_placement=scheduler_placement,
|
893
904
|
include_source=include_source if include_source is not None else self._include_source_default,
|
894
|
-
_experimental_buffer_containers=_experimental_buffer_containers,
|
895
905
|
_experimental_proxy_ip=_experimental_proxy_ip,
|
896
906
|
_experimental_custom_scaling_factor=_experimental_custom_scaling_factor,
|
897
907
|
_experimental_enable_gpu_snapshot=_experimental_enable_gpu_snapshot,
|
modal/app.pyi
CHANGED
@@ -175,13 +175,14 @@ class _App:
|
|
175
175
|
cpu: typing.Union[float, tuple[float, float], None] = None,
|
176
176
|
memory: typing.Union[int, tuple[int, int], None] = None,
|
177
177
|
ephemeral_disk: typing.Optional[int] = None,
|
178
|
+
min_containers: typing.Optional[int] = None,
|
179
|
+
max_containers: typing.Optional[int] = None,
|
180
|
+
buffer_containers: typing.Optional[int] = None,
|
181
|
+
scaledown_window: typing.Optional[int] = None,
|
178
182
|
proxy: typing.Optional[modal.proxy._Proxy] = None,
|
179
183
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
180
|
-
concurrency_limit: typing.Optional[int] = None,
|
181
184
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
182
|
-
container_idle_timeout: typing.Optional[int] = None,
|
183
185
|
timeout: typing.Optional[int] = None,
|
184
|
-
keep_warm: typing.Optional[int] = None,
|
185
186
|
name: typing.Optional[str] = None,
|
186
187
|
is_generator: typing.Optional[bool] = None,
|
187
188
|
cloud: typing.Optional[str] = None,
|
@@ -192,10 +193,13 @@ class _App:
|
|
192
193
|
i6pn: typing.Optional[bool] = None,
|
193
194
|
include_source: typing.Optional[bool] = None,
|
194
195
|
_experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
|
195
|
-
_experimental_buffer_containers: typing.Optional[int] = None,
|
196
196
|
_experimental_proxy_ip: typing.Optional[str] = None,
|
197
197
|
_experimental_custom_scaling_factor: typing.Optional[float] = None,
|
198
198
|
_experimental_enable_gpu_snapshot: bool = False,
|
199
|
+
keep_warm: typing.Optional[int] = None,
|
200
|
+
concurrency_limit: typing.Optional[int] = None,
|
201
|
+
container_idle_timeout: typing.Optional[int] = None,
|
202
|
+
_experimental_buffer_containers: typing.Optional[int] = None,
|
199
203
|
) -> _FunctionDecoratorType: ...
|
200
204
|
@typing_extensions.dataclass_transform(
|
201
205
|
field_specifiers=(modal.cls.parameter,),
|
@@ -221,13 +225,14 @@ class _App:
|
|
221
225
|
cpu: typing.Union[float, tuple[float, float], None] = None,
|
222
226
|
memory: typing.Union[int, tuple[int, int], None] = None,
|
223
227
|
ephemeral_disk: typing.Optional[int] = None,
|
228
|
+
min_containers: typing.Optional[int] = None,
|
229
|
+
max_containers: typing.Optional[int] = None,
|
230
|
+
buffer_containers: typing.Optional[int] = None,
|
231
|
+
scaledown_window: typing.Optional[int] = None,
|
224
232
|
proxy: typing.Optional[modal.proxy._Proxy] = None,
|
225
233
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
226
|
-
concurrency_limit: typing.Optional[int] = None,
|
227
234
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
228
|
-
container_idle_timeout: typing.Optional[int] = None,
|
229
235
|
timeout: typing.Optional[int] = None,
|
230
|
-
keep_warm: typing.Optional[int] = None,
|
231
236
|
cloud: typing.Optional[str] = None,
|
232
237
|
region: typing.Union[str, collections.abc.Sequence[str], None] = None,
|
233
238
|
enable_memory_snapshot: bool = False,
|
@@ -235,10 +240,13 @@ class _App:
|
|
235
240
|
max_inputs: typing.Optional[int] = None,
|
236
241
|
include_source: typing.Optional[bool] = None,
|
237
242
|
_experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
|
238
|
-
_experimental_buffer_containers: typing.Optional[int] = None,
|
239
243
|
_experimental_proxy_ip: typing.Optional[str] = None,
|
240
244
|
_experimental_custom_scaling_factor: typing.Optional[float] = None,
|
241
245
|
_experimental_enable_gpu_snapshot: bool = False,
|
246
|
+
keep_warm: typing.Optional[int] = None,
|
247
|
+
concurrency_limit: typing.Optional[int] = None,
|
248
|
+
container_idle_timeout: typing.Optional[int] = None,
|
249
|
+
_experimental_buffer_containers: typing.Optional[int] = None,
|
242
250
|
) -> collections.abc.Callable[[CLS_T], CLS_T]: ...
|
243
251
|
async def spawn_sandbox(
|
244
252
|
self,
|
@@ -407,13 +415,14 @@ class App:
|
|
407
415
|
cpu: typing.Union[float, tuple[float, float], None] = None,
|
408
416
|
memory: typing.Union[int, tuple[int, int], None] = None,
|
409
417
|
ephemeral_disk: typing.Optional[int] = None,
|
418
|
+
min_containers: typing.Optional[int] = None,
|
419
|
+
max_containers: typing.Optional[int] = None,
|
420
|
+
buffer_containers: typing.Optional[int] = None,
|
421
|
+
scaledown_window: typing.Optional[int] = None,
|
410
422
|
proxy: typing.Optional[modal.proxy.Proxy] = None,
|
411
423
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
412
|
-
concurrency_limit: typing.Optional[int] = None,
|
413
424
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
414
|
-
container_idle_timeout: typing.Optional[int] = None,
|
415
425
|
timeout: typing.Optional[int] = None,
|
416
|
-
keep_warm: typing.Optional[int] = None,
|
417
426
|
name: typing.Optional[str] = None,
|
418
427
|
is_generator: typing.Optional[bool] = None,
|
419
428
|
cloud: typing.Optional[str] = None,
|
@@ -424,10 +433,13 @@ class App:
|
|
424
433
|
i6pn: typing.Optional[bool] = None,
|
425
434
|
include_source: typing.Optional[bool] = None,
|
426
435
|
_experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
|
427
|
-
_experimental_buffer_containers: typing.Optional[int] = None,
|
428
436
|
_experimental_proxy_ip: typing.Optional[str] = None,
|
429
437
|
_experimental_custom_scaling_factor: typing.Optional[float] = None,
|
430
438
|
_experimental_enable_gpu_snapshot: bool = False,
|
439
|
+
keep_warm: typing.Optional[int] = None,
|
440
|
+
concurrency_limit: typing.Optional[int] = None,
|
441
|
+
container_idle_timeout: typing.Optional[int] = None,
|
442
|
+
_experimental_buffer_containers: typing.Optional[int] = None,
|
431
443
|
) -> _FunctionDecoratorType: ...
|
432
444
|
@typing_extensions.dataclass_transform(
|
433
445
|
field_specifiers=(modal.cls.parameter,),
|
@@ -453,13 +465,14 @@ class App:
|
|
453
465
|
cpu: typing.Union[float, tuple[float, float], None] = None,
|
454
466
|
memory: typing.Union[int, tuple[int, int], None] = None,
|
455
467
|
ephemeral_disk: typing.Optional[int] = None,
|
468
|
+
min_containers: typing.Optional[int] = None,
|
469
|
+
max_containers: typing.Optional[int] = None,
|
470
|
+
buffer_containers: typing.Optional[int] = None,
|
471
|
+
scaledown_window: typing.Optional[int] = None,
|
456
472
|
proxy: typing.Optional[modal.proxy.Proxy] = None,
|
457
473
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
458
|
-
concurrency_limit: typing.Optional[int] = None,
|
459
474
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
460
|
-
container_idle_timeout: typing.Optional[int] = None,
|
461
475
|
timeout: typing.Optional[int] = None,
|
462
|
-
keep_warm: typing.Optional[int] = None,
|
463
476
|
cloud: typing.Optional[str] = None,
|
464
477
|
region: typing.Union[str, collections.abc.Sequence[str], None] = None,
|
465
478
|
enable_memory_snapshot: bool = False,
|
@@ -467,10 +480,13 @@ class App:
|
|
467
480
|
max_inputs: typing.Optional[int] = None,
|
468
481
|
include_source: typing.Optional[bool] = None,
|
469
482
|
_experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
|
470
|
-
_experimental_buffer_containers: typing.Optional[int] = None,
|
471
483
|
_experimental_proxy_ip: typing.Optional[str] = None,
|
472
484
|
_experimental_custom_scaling_factor: typing.Optional[float] = None,
|
473
485
|
_experimental_enable_gpu_snapshot: bool = False,
|
486
|
+
keep_warm: typing.Optional[int] = None,
|
487
|
+
concurrency_limit: typing.Optional[int] = None,
|
488
|
+
container_idle_timeout: typing.Optional[int] = None,
|
489
|
+
_experimental_buffer_containers: typing.Optional[int] = None,
|
474
490
|
) -> collections.abc.Callable[[CLS_T], CLS_T]: ...
|
475
491
|
|
476
492
|
class __spawn_sandbox_spec(typing_extensions.Protocol[SUPERSELF]):
|
modal/cli/programs/vscode.py
CHANGED
@@ -82,7 +82,7 @@ def wait_for_port(data: tuple[str, str], q: Queue):
|
|
82
82
|
timeout=args.get("timeout"),
|
83
83
|
secrets=[Secret.from_dict({"MODAL_LAUNCH_ARGS": json.dumps(args)})],
|
84
84
|
volumes=volumes,
|
85
|
-
|
85
|
+
max_containers=1 if volume else None,
|
86
86
|
)
|
87
87
|
def run_vscode(q: Queue):
|
88
88
|
os.chdir("/home/coder")
|
modal/client.pyi
CHANGED
@@ -27,7 +27,7 @@ class _Client:
|
|
27
27
|
_snapshotted: bool
|
28
28
|
|
29
29
|
def __init__(
|
30
|
-
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.
|
30
|
+
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.77"
|
31
31
|
): ...
|
32
32
|
def is_closed(self) -> bool: ...
|
33
33
|
@property
|
@@ -85,7 +85,7 @@ class Client:
|
|
85
85
|
_snapshotted: bool
|
86
86
|
|
87
87
|
def __init__(
|
88
|
-
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.
|
88
|
+
self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.77"
|
89
89
|
): ...
|
90
90
|
def is_closed(self) -> bool: ...
|
91
91
|
@property
|
modal/cls.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
# Copyright Modal Labs 2022
|
2
|
+
import dataclasses
|
2
3
|
import inspect
|
3
4
|
import os
|
4
5
|
import typing
|
@@ -24,7 +25,7 @@ from ._resources import convert_fn_config_to_resources_config
|
|
24
25
|
from ._serialization import check_valid_cls_constructor_arg
|
25
26
|
from ._traceback import print_server_warnings
|
26
27
|
from ._utils.async_utils import synchronize_api, synchronizer
|
27
|
-
from ._utils.deprecation import deprecation_warning, renamed_parameter
|
28
|
+
from ._utils.deprecation import deprecation_warning, renamed_parameter, warn_on_renamed_autoscaler_settings
|
28
29
|
from ._utils.grpc_utils import retry_transient_errors
|
29
30
|
from ._utils.mount_utils import validate_volumes
|
30
31
|
from .client import _Client
|
@@ -72,6 +73,18 @@ def _get_class_constructor_signature(user_cls: type) -> inspect.Signature:
|
|
72
73
|
return inspect.Signature(constructor_parameters)
|
73
74
|
|
74
75
|
|
76
|
+
@dataclasses.dataclass()
|
77
|
+
class _ServiceOptions:
|
78
|
+
secrets: typing.Collection[_Secret]
|
79
|
+
resources: Optional[api_pb2.Resources]
|
80
|
+
retry_policy: Optional[api_pb2.FunctionRetryPolicy]
|
81
|
+
concurrency_limit: Optional[int]
|
82
|
+
timeout_secs: Optional[int]
|
83
|
+
task_idle_timeout_secs: Optional[int]
|
84
|
+
validated_volumes: typing.Sequence[tuple[str, _Volume]]
|
85
|
+
target_concurrent_inputs: Optional[int]
|
86
|
+
|
87
|
+
|
75
88
|
def _bind_instance_method(cls: "_Cls", service_function: _Function, method_name: str):
|
76
89
|
"""Binds an "instance service function" to a specific method using metadata for that method
|
77
90
|
|
@@ -144,12 +157,13 @@ class _Obj:
|
|
144
157
|
_kwargs: dict[str, Any]
|
145
158
|
|
146
159
|
_instance_service_function: Optional[_Function] = None # this gets set lazily
|
160
|
+
_options: Optional[_ServiceOptions]
|
147
161
|
|
148
162
|
def __init__(
|
149
163
|
self,
|
150
164
|
cls: "_Cls",
|
151
165
|
user_cls: Optional[type], # this would be None in case of lookups
|
152
|
-
options: Optional[
|
166
|
+
options: Optional[_ServiceOptions],
|
153
167
|
args,
|
154
168
|
kwargs,
|
155
169
|
):
|
@@ -354,7 +368,7 @@ class _Cls(_Object, type_prefix="cs"):
|
|
354
368
|
"""
|
355
369
|
|
356
370
|
_class_service_function: Optional[_Function] # The _Function (read "service") serving *all* methods of the class
|
357
|
-
_options: Optional[
|
371
|
+
_options: Optional[_ServiceOptions] # TODO: typed dict/dataclass?
|
358
372
|
|
359
373
|
_app: Optional["modal.app._App"] = None # not set for lookups
|
360
374
|
_name: Optional[str]
|
@@ -559,6 +573,7 @@ class _Cls(_Object, type_prefix="cs"):
|
|
559
573
|
cls._name = name
|
560
574
|
return cls
|
561
575
|
|
576
|
+
@warn_on_renamed_autoscaler_settings
|
562
577
|
def with_options(
|
563
578
|
self: "_Cls",
|
564
579
|
cpu: Optional[Union[float, tuple[float, float]]] = None,
|
@@ -567,10 +582,13 @@ class _Cls(_Object, type_prefix="cs"):
|
|
567
582
|
secrets: Collection[_Secret] = (),
|
568
583
|
volumes: dict[Union[str, os.PathLike], _Volume] = {},
|
569
584
|
retries: Optional[Union[int, Retries]] = None,
|
585
|
+
max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
|
586
|
+
scaledown_window: Optional[int] = None, # Max amount of time a container can remain idle before scaling down.
|
570
587
|
timeout: Optional[int] = None,
|
571
|
-
concurrency_limit: Optional[int] = None,
|
572
588
|
allow_concurrent_inputs: Optional[int] = None,
|
573
|
-
|
589
|
+
# The following parameters are deprecated
|
590
|
+
concurrency_limit: Optional[int] = None, # Now called `max_containers`
|
591
|
+
container_idle_timeout: Optional[int] = None, # Now called `scaledown_window`
|
574
592
|
) -> "_Cls":
|
575
593
|
"""
|
576
594
|
**Beta:** Allows for the runtime modification of a modal.Cls's configuration.
|
@@ -591,27 +609,16 @@ class _Cls(_Object, type_prefix="cs"):
|
|
591
609
|
else:
|
592
610
|
resources = None
|
593
611
|
|
594
|
-
volume_mounts = [
|
595
|
-
api_pb2.VolumeMount(
|
596
|
-
mount_path=path,
|
597
|
-
volume_id=volume.object_id,
|
598
|
-
allow_background_commits=True,
|
599
|
-
)
|
600
|
-
for path, volume in validate_volumes(volumes)
|
601
|
-
]
|
602
|
-
replace_volume_mounts = len(volume_mounts) > 0
|
603
|
-
|
604
612
|
cls = self.clone()
|
605
|
-
cls._options =
|
606
|
-
|
607
|
-
secret_ids=[secret.object_id for secret in secrets],
|
613
|
+
cls._options = _ServiceOptions(
|
614
|
+
secrets=secrets,
|
608
615
|
resources=resources,
|
609
616
|
retry_policy=retry_policy,
|
610
|
-
|
617
|
+
# TODO(michael) Update the protos to use the new terminology
|
618
|
+
concurrency_limit=max_containers,
|
619
|
+
task_idle_timeout_secs=scaledown_window,
|
611
620
|
timeout_secs=timeout,
|
612
|
-
|
613
|
-
replace_volume_mounts=replace_volume_mounts,
|
614
|
-
volume_mounts=volume_mounts,
|
621
|
+
validated_volumes=validate_volumes(volumes),
|
615
622
|
target_concurrent_inputs=allow_concurrent_inputs,
|
616
623
|
)
|
617
624
|
|
modal/cls.pyi
CHANGED
@@ -22,6 +22,31 @@ T = typing.TypeVar("T")
|
|
22
22
|
|
23
23
|
def _use_annotation_parameters(user_cls: type) -> bool: ...
|
24
24
|
def _get_class_constructor_signature(user_cls: type) -> inspect.Signature: ...
|
25
|
+
|
26
|
+
class _ServiceOptions:
|
27
|
+
secrets: typing.Collection[modal.secret._Secret]
|
28
|
+
resources: typing.Optional[modal_proto.api_pb2.Resources]
|
29
|
+
retry_policy: typing.Optional[modal_proto.api_pb2.FunctionRetryPolicy]
|
30
|
+
concurrency_limit: typing.Optional[int]
|
31
|
+
timeout_secs: typing.Optional[int]
|
32
|
+
task_idle_timeout_secs: typing.Optional[int]
|
33
|
+
validated_volumes: typing.Sequence[tuple[str, modal.volume._Volume]]
|
34
|
+
target_concurrent_inputs: typing.Optional[int]
|
35
|
+
|
36
|
+
def __init__(
|
37
|
+
self,
|
38
|
+
secrets: typing.Collection[modal.secret._Secret],
|
39
|
+
resources: typing.Optional[modal_proto.api_pb2.Resources],
|
40
|
+
retry_policy: typing.Optional[modal_proto.api_pb2.FunctionRetryPolicy],
|
41
|
+
concurrency_limit: typing.Optional[int],
|
42
|
+
timeout_secs: typing.Optional[int],
|
43
|
+
task_idle_timeout_secs: typing.Optional[int],
|
44
|
+
validated_volumes: typing.Sequence[tuple[str, modal.volume._Volume]],
|
45
|
+
target_concurrent_inputs: typing.Optional[int],
|
46
|
+
) -> None: ...
|
47
|
+
def __repr__(self): ...
|
48
|
+
def __eq__(self, other): ...
|
49
|
+
|
25
50
|
def _bind_instance_method(cls: _Cls, service_function: modal._functions._Function, method_name: str): ...
|
26
51
|
|
27
52
|
class _Obj:
|
@@ -32,14 +57,10 @@ class _Obj:
|
|
32
57
|
_args: tuple[typing.Any, ...]
|
33
58
|
_kwargs: dict[str, typing.Any]
|
34
59
|
_instance_service_function: typing.Optional[modal._functions._Function]
|
60
|
+
_options: typing.Optional[_ServiceOptions]
|
35
61
|
|
36
62
|
def __init__(
|
37
|
-
self,
|
38
|
-
cls: _Cls,
|
39
|
-
user_cls: typing.Optional[type],
|
40
|
-
options: typing.Optional[modal_proto.api_pb2.FunctionOptions],
|
41
|
-
args,
|
42
|
-
kwargs,
|
63
|
+
self, cls: _Cls, user_cls: typing.Optional[type], options: typing.Optional[_ServiceOptions], args, kwargs
|
43
64
|
): ...
|
44
65
|
def _cached_service_function(self) -> modal._functions._Function: ...
|
45
66
|
def _get_parameter_values(self) -> dict[str, typing.Any]: ...
|
@@ -64,14 +85,10 @@ class Obj:
|
|
64
85
|
_args: tuple[typing.Any, ...]
|
65
86
|
_kwargs: dict[str, typing.Any]
|
66
87
|
_instance_service_function: typing.Optional[modal.functions.Function]
|
88
|
+
_options: typing.Optional[_ServiceOptions]
|
67
89
|
|
68
90
|
def __init__(
|
69
|
-
self,
|
70
|
-
cls: Cls,
|
71
|
-
user_cls: typing.Optional[type],
|
72
|
-
options: typing.Optional[modal_proto.api_pb2.FunctionOptions],
|
73
|
-
args,
|
74
|
-
kwargs,
|
91
|
+
self, cls: Cls, user_cls: typing.Optional[type], options: typing.Optional[_ServiceOptions], args, kwargs
|
75
92
|
): ...
|
76
93
|
def _cached_service_function(self) -> modal.functions.Function: ...
|
77
94
|
def _get_parameter_values(self) -> dict[str, typing.Any]: ...
|
@@ -94,7 +111,7 @@ class Obj:
|
|
94
111
|
|
95
112
|
class _Cls(modal._object._Object):
|
96
113
|
_class_service_function: typing.Optional[modal._functions._Function]
|
97
|
-
_options: typing.Optional[
|
114
|
+
_options: typing.Optional[_ServiceOptions]
|
98
115
|
_app: typing.Optional[modal.app._App]
|
99
116
|
_name: typing.Optional[str]
|
100
117
|
_method_metadata: typing.Optional[dict[str, modal_proto.api_pb2.FunctionHandleMetadata]]
|
@@ -132,9 +149,11 @@ class _Cls(modal._object._Object):
|
|
132
149
|
secrets: collections.abc.Collection[modal.secret._Secret] = (),
|
133
150
|
volumes: dict[typing.Union[str, os.PathLike], modal.volume._Volume] = {},
|
134
151
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
152
|
+
max_containers: typing.Optional[int] = None,
|
153
|
+
scaledown_window: typing.Optional[int] = None,
|
135
154
|
timeout: typing.Optional[int] = None,
|
136
|
-
concurrency_limit: typing.Optional[int] = None,
|
137
155
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
156
|
+
concurrency_limit: typing.Optional[int] = None,
|
138
157
|
container_idle_timeout: typing.Optional[int] = None,
|
139
158
|
) -> _Cls: ...
|
140
159
|
@staticmethod
|
@@ -152,7 +171,7 @@ class _Cls(modal._object._Object):
|
|
152
171
|
|
153
172
|
class Cls(modal.object.Object):
|
154
173
|
_class_service_function: typing.Optional[modal.functions.Function]
|
155
|
-
_options: typing.Optional[
|
174
|
+
_options: typing.Optional[_ServiceOptions]
|
156
175
|
_app: typing.Optional[modal.app.App]
|
157
176
|
_name: typing.Optional[str]
|
158
177
|
_method_metadata: typing.Optional[dict[str, modal_proto.api_pb2.FunctionHandleMetadata]]
|
@@ -191,9 +210,11 @@ class Cls(modal.object.Object):
|
|
191
210
|
secrets: collections.abc.Collection[modal.secret.Secret] = (),
|
192
211
|
volumes: dict[typing.Union[str, os.PathLike], modal.volume.Volume] = {},
|
193
212
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
213
|
+
max_containers: typing.Optional[int] = None,
|
214
|
+
scaledown_window: typing.Optional[int] = None,
|
194
215
|
timeout: typing.Optional[int] = None,
|
195
|
-
concurrency_limit: typing.Optional[int] = None,
|
196
216
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
217
|
+
concurrency_limit: typing.Optional[int] = None,
|
197
218
|
container_idle_timeout: typing.Optional[int] = None,
|
198
219
|
) -> Cls: ...
|
199
220
|
|
modal/functions.pyi
CHANGED
@@ -73,17 +73,18 @@ class Function(
|
|
73
73
|
typing.Union[modal.volume.Volume, modal.cloud_bucket_mount.CloudBucketMount],
|
74
74
|
] = {},
|
75
75
|
webhook_config: typing.Optional[modal_proto.api_pb2.WebhookConfig] = None,
|
76
|
+
cpu: typing.Union[float, tuple[float, float], None] = None,
|
76
77
|
memory: typing.Union[int, tuple[int, int], None] = None,
|
77
78
|
proxy: typing.Optional[modal.proxy.Proxy] = None,
|
78
79
|
retries: typing.Union[int, modal.retries.Retries, None] = None,
|
79
80
|
timeout: typing.Optional[int] = None,
|
80
|
-
|
81
|
+
min_containers: typing.Optional[int] = None,
|
82
|
+
max_containers: typing.Optional[int] = None,
|
83
|
+
buffer_containers: typing.Optional[int] = None,
|
84
|
+
scaledown_window: typing.Optional[int] = None,
|
81
85
|
allow_concurrent_inputs: typing.Optional[int] = None,
|
82
86
|
batch_max_size: typing.Optional[int] = None,
|
83
87
|
batch_wait_ms: typing.Optional[int] = None,
|
84
|
-
container_idle_timeout: typing.Optional[int] = None,
|
85
|
-
cpu: typing.Union[float, tuple[float, float], None] = None,
|
86
|
-
keep_warm: typing.Optional[int] = None,
|
87
88
|
cloud: typing.Optional[str] = None,
|
88
89
|
scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
|
89
90
|
is_builder_function: bool = False,
|
@@ -95,7 +96,6 @@ class Function(
|
|
95
96
|
max_inputs: typing.Optional[int] = None,
|
96
97
|
ephemeral_disk: typing.Optional[int] = None,
|
97
98
|
include_source: typing.Optional[bool] = None,
|
98
|
-
_experimental_buffer_containers: typing.Optional[int] = None,
|
99
99
|
_experimental_proxy_ip: typing.Optional[str] = None,
|
100
100
|
_experimental_custom_scaling_factor: typing.Optional[float] = None,
|
101
101
|
_experimental_enable_gpu_snapshot: bool = False,
|
@@ -103,7 +103,7 @@ class Function(
|
|
103
103
|
def _bind_parameters(
|
104
104
|
self,
|
105
105
|
obj: modal.cls.Obj,
|
106
|
-
options: typing.Optional[
|
106
|
+
options: typing.Optional[modal.cls._ServiceOptions],
|
107
107
|
args: collections.abc.Sized,
|
108
108
|
kwargs: dict[str, typing.Any],
|
109
109
|
) -> Function: ...
|
@@ -198,11 +198,11 @@ class Function(
|
|
198
198
|
|
199
199
|
_call_generator_nowait: ___call_generator_nowait_spec[typing_extensions.Self]
|
200
200
|
|
201
|
-
class __remote_spec(typing_extensions.Protocol[
|
201
|
+
class __remote_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
|
202
202
|
def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER: ...
|
203
203
|
async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> ReturnType_INNER: ...
|
204
204
|
|
205
|
-
remote: __remote_spec[modal._functions.
|
205
|
+
remote: __remote_spec[modal._functions.ReturnType, modal._functions.P, typing_extensions.Self]
|
206
206
|
|
207
207
|
class __remote_gen_spec(typing_extensions.Protocol[SUPERSELF]):
|
208
208
|
def __call__(self, *args, **kwargs) -> typing.Generator[typing.Any, None, None]: ...
|
@@ -217,19 +217,19 @@ class Function(
|
|
217
217
|
self, *args: modal._functions.P.args, **kwargs: modal._functions.P.kwargs
|
218
218
|
) -> modal._functions.OriginalReturnType: ...
|
219
219
|
|
220
|
-
class ___experimental_spawn_spec(typing_extensions.Protocol[
|
220
|
+
class ___experimental_spawn_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
|
221
221
|
def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
222
222
|
async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
223
223
|
|
224
224
|
_experimental_spawn: ___experimental_spawn_spec[
|
225
|
-
modal._functions.
|
225
|
+
modal._functions.ReturnType, modal._functions.P, typing_extensions.Self
|
226
226
|
]
|
227
227
|
|
228
|
-
class __spawn_spec(typing_extensions.Protocol[
|
228
|
+
class __spawn_spec(typing_extensions.Protocol[ReturnType_INNER, P_INNER, SUPERSELF]):
|
229
229
|
def __call__(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
230
230
|
async def aio(self, *args: P_INNER.args, **kwargs: P_INNER.kwargs) -> FunctionCall[ReturnType_INNER]: ...
|
231
231
|
|
232
|
-
spawn: __spawn_spec[modal._functions.
|
232
|
+
spawn: __spawn_spec[modal._functions.ReturnType, modal._functions.P, typing_extensions.Self]
|
233
233
|
|
234
234
|
def get_raw_f(self) -> collections.abc.Callable[..., typing.Any]: ...
|
235
235
|
|
@@ -3,7 +3,7 @@ modal/__main__.py,sha256=CgIjP8m1xJjjd4AXc-delmR6LdBCZclw2A_V38CFIio,2870
|
|
3
3
|
modal/_clustered_functions.py,sha256=kTf-9YBXY88NutC1akI-gCbvf01RhMPCw-zoOI_YIUE,2700
|
4
4
|
modal/_clustered_functions.pyi,sha256=vllkegc99A0jrUOWa8mdlSbdp6uz36TsHhGxysAOpaQ,771
|
5
5
|
modal/_container_entrypoint.py,sha256=arhkIoF8nQNfa4iwYGSoqN3QMDg5M38QNAODXC8TlKc,29301
|
6
|
-
modal/_functions.py,sha256=
|
6
|
+
modal/_functions.py,sha256=OR76wIl-Hq5Q-mId_FzcEhpUdU6Lq40FDvnMLvAzgUM,72740
|
7
7
|
modal/_ipython.py,sha256=TW1fkVOmZL3YYqdS2YlM1hqpf654Yf8ZyybHdBnlhSw,301
|
8
8
|
modal/_location.py,sha256=joiX-0ZeutEUDTrrqLF1GHXCdVLF-rHzstocbMcd_-k,366
|
9
9
|
modal/_object.py,sha256=ItQcsMNkz9Y3kdTsvfNarbW-paJ2qabDyQ7njaqY0XI,11359
|
@@ -18,15 +18,15 @@ modal/_traceback.py,sha256=IZQzB3fVlUfMHOSyKUgw0H6qv4yHnpyq-XVCNZKfUdA,5023
|
|
18
18
|
modal/_tunnel.py,sha256=zTBxBiuH1O22tS1OliAJdIsSmaZS8PlnifS_6S5z-mk,6320
|
19
19
|
modal/_tunnel.pyi,sha256=JmmDYAy9F1FpgJ_hWx0xkom2nTOFQjn4mTPYlU3PFo4,1245
|
20
20
|
modal/_watcher.py,sha256=K6LYnlmSGQB4tWWI9JADv-tvSvQ1j522FwT71B51CX8,3584
|
21
|
-
modal/app.py,sha256=
|
22
|
-
modal/app.pyi,sha256=
|
21
|
+
modal/app.py,sha256=kF3frIt4eRKVYYCjusMMhKJpO_lDdm2z37HOXPwpjT8,45506
|
22
|
+
modal/app.pyi,sha256=tZFbcsu20SuvfB2puxCyuXLFNJ9bQulzag55rVpgZmc,26827
|
23
23
|
modal/call_graph.py,sha256=1g2DGcMIJvRy-xKicuf63IVE98gJSnQsr8R_NVMptNc,2581
|
24
24
|
modal/client.py,sha256=8SQawr7P1PNUCq1UmJMUQXG2jIo4Nmdcs311XqrNLRE,15276
|
25
|
-
modal/client.pyi,sha256=
|
25
|
+
modal/client.pyi,sha256=Pz702Nbzk12XDU_wdlGt1PTWzNgS5-BL6Rj-Lf5Z1bE,7593
|
26
26
|
modal/cloud_bucket_mount.py,sha256=YOe9nnvSr4ZbeCn587d7_VhE9IioZYRvF9VYQTQux08,5914
|
27
27
|
modal/cloud_bucket_mount.pyi,sha256=30T3K1a89l6wzmEJ_J9iWv9SknoGqaZDx59Xs-ZQcmk,1607
|
28
|
-
modal/cls.py,sha256=
|
29
|
-
modal/cls.pyi,sha256=
|
28
|
+
modal/cls.py,sha256=0LKIHboBFRY291VsG0ick_6KZk2ArK1RLus9mlTZtJ8,30726
|
29
|
+
modal/cls.pyi,sha256=ZJUwtRaQBGlM6tphvnv49FHBVDSgttMdD_LnYyRSKJM,10302
|
30
30
|
modal/config.py,sha256=Boz1bPzaG-k5Grjq6y6fAELH1N_gTuYDnpB6FODzCPo,11710
|
31
31
|
modal/container_process.py,sha256=WTqLn01dJPVkPpwR_0w_JH96ceN5mV4TGtiu1ZR2RRA,6108
|
32
32
|
modal/container_process.pyi,sha256=Hf0J5JyDdCCXBJSKx6gvkPOo0XrztCm78xzxamtzUjQ,2828
|
@@ -41,7 +41,7 @@ modal/file_io.py,sha256=lcMs_E9Xfm0YX1t9U2wNIBPnqHRxmImqjLW1GHqVmyg,20945
|
|
41
41
|
modal/file_io.pyi,sha256=NTRft1tbPSWf9TlWVeZmTlgB5AZ_Zhu2srWIrWr7brk,9445
|
42
42
|
modal/file_pattern_matcher.py,sha256=trosX-Bp7dOubudN1bLLhRAoidWy1TcoaR4Pv8CedWw,6497
|
43
43
|
modal/functions.py,sha256=kcNHvqeGBxPI7Cgd57NIBBghkfbeFJzXO44WW0jSmao,325
|
44
|
-
modal/functions.pyi,sha256=
|
44
|
+
modal/functions.pyi,sha256=D-PDJfSbwqMDXdq7Bxu2ErZRENo-tRgu_zPoB-jl0OU,14377
|
45
45
|
modal/gpu.py,sha256=Kbhs_u49FaC2Zi0TjCdrpstpRtT5eZgecynmQi5IZVE,6752
|
46
46
|
modal/image.py,sha256=adMUpS7WrCu-M78BWslz2r6GPviy4qPvd5Dh-dBIrrk,90257
|
47
47
|
modal/image.pyi,sha256=L7aZUOElSGtNHmFHz1RgKP1cG5paiXt_EzylrwBwzVk,25004
|
@@ -96,7 +96,7 @@ modal/_utils/app_utils.py,sha256=88BT4TPLWfYAQwKTHcyzNQRHg8n9B-QE2UyJs96iV-0,108
|
|
96
96
|
modal/_utils/async_utils.py,sha256=5PdDuI1aSwPOI4a3dIvW0DkPqGw6KZN6RtWE18Dzv1E,25079
|
97
97
|
modal/_utils/blob_utils.py,sha256=RB1G6T7eC1Poe-O45qYLaxwCr2jkM-Q6Nexk1J3wk_w,14505
|
98
98
|
modal/_utils/bytes_io_segment_payload.py,sha256=uunxVJS4PE1LojF_UpURMzVK9GuvmYWRqQo_bxEj5TU,3385
|
99
|
-
modal/_utils/deprecation.py,sha256=
|
99
|
+
modal/_utils/deprecation.py,sha256=rgCGTrk-u_uaDXNDTAW9FM8GP8N3ErlDfr2wXhKYLVw,4870
|
100
100
|
modal/_utils/docker_utils.py,sha256=h1uETghR40mp_y3fSWuZAfbIASH1HMzuphJHghAL6DU,3722
|
101
101
|
modal/_utils/function_utils.py,sha256=Rmz8GJDie-RW_q2RcTwholEWixS2IQDPBsRBJ3f3ZvU,27302
|
102
102
|
modal/_utils/grpc_testing.py,sha256=H1zHqthv19eGPJz2HKXDyWXWGSqO4BRsxah3L5Xaa8A,8619
|
@@ -135,7 +135,7 @@ modal/cli/utils.py,sha256=hZmjyzcPjDnQSkLvycZD2LhGdcsfdZshs_rOU78EpvI,3717
|
|
135
135
|
modal/cli/volume.py,sha256=c2IuVNO2yJVaXmZkRh3xwQmznlRTgFoJr_BIzzqtVv0,10251
|
136
136
|
modal/cli/programs/__init__.py,sha256=svYKtV8HDwDCN86zbdWqyq5T8sMdGDj0PVlzc2tIxDM,28
|
137
137
|
modal/cli/programs/run_jupyter.py,sha256=MX6YQ6zRyRk1xo8tYZFiGam0p5KETwax81L6TpaS9I0,2778
|
138
|
-
modal/cli/programs/vscode.py,sha256=
|
138
|
+
modal/cli/programs/vscode.py,sha256=kfvhZQ4bJwtVm3MgC1V7AlygZOlKT1a33alr_uwrewA,3473
|
139
139
|
modal/extensions/__init__.py,sha256=waLjl5c6IPDhSsdWAm9Bji4e2PVxamYABKAze6CHVXY,28
|
140
140
|
modal/extensions/ipython.py,sha256=Xvzy-A7cvwMSDa9p4c4CEMLOX2_Xsg9DkM1J9uyu7jc,983
|
141
141
|
modal/requirements/2023.12.312.txt,sha256=zWWUVgVQ92GXBKNYYr2-5vn9rlnXcmkqlwlX5u1eTYw,400
|
@@ -168,10 +168,10 @@ modal_proto/options_pb2_grpc.pyi,sha256=CImmhxHsYnF09iENPoe8S4J-n93jtgUYD2JPAc0y
|
|
168
168
|
modal_proto/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
169
169
|
modal_version/__init__.py,sha256=wiJQ53c-OMs0Xf1UeXOxQ7FwlV1VzIjnX6o-pRYZ_Pk,470
|
170
170
|
modal_version/__main__.py,sha256=2FO0yYQQwDTh6udt1h-cBnGd1c4ZyHnHSI4BksxzVac,105
|
171
|
-
modal_version/_version_generated.py,sha256=
|
172
|
-
modal-0.73.
|
173
|
-
modal-0.73.
|
174
|
-
modal-0.73.
|
175
|
-
modal-0.73.
|
176
|
-
modal-0.73.
|
177
|
-
modal-0.73.
|
171
|
+
modal_version/_version_generated.py,sha256=3PMwQ2aqnXHX71h9sw3Oe5RJdzScfCrLIwFoBDCVDqc,149
|
172
|
+
modal-0.73.77.dist-info/LICENSE,sha256=psuoW8kuDP96RQsdhzwOqi6fyWv0ct8CR6Jr7He_P_k,10173
|
173
|
+
modal-0.73.77.dist-info/METADATA,sha256=Qw57v7B3T8x4RkVU1V0YxSGksoaZbwXDKtiE_IYMOwU,2452
|
174
|
+
modal-0.73.77.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
|
175
|
+
modal-0.73.77.dist-info/entry_points.txt,sha256=An-wYgeEUnm6xzrAP9_NTSTSciYvvEWsMZILtYrvpAI,46
|
176
|
+
modal-0.73.77.dist-info/top_level.txt,sha256=4BWzoKYREKUZ5iyPzZpjqx4G8uB5TWxXPDwibLcVa7k,43
|
177
|
+
modal-0.73.77.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|