modal 0.73.74__py3-none-any.whl → 0.73.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modal/_functions.py CHANGED
@@ -417,18 +417,18 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
417
417
  allow_cross_region_volumes: bool = False,
418
418
  volumes: dict[Union[str, PurePosixPath], Union[_Volume, _CloudBucketMount]] = {},
419
419
  webhook_config: Optional[api_pb2.WebhookConfig] = None,
420
+ cpu: Optional[Union[float, tuple[float, float]]] = None,
420
421
  memory: Optional[Union[int, tuple[int, int]]] = None,
421
422
  proxy: Optional[_Proxy] = None,
422
423
  retries: Optional[Union[int, Retries]] = None,
423
424
  timeout: Optional[int] = None,
424
- concurrency_limit: Optional[int] = None,
425
+ min_containers: Optional[int] = None,
426
+ max_containers: Optional[int] = None,
427
+ buffer_containers: Optional[int] = None,
428
+ scaledown_window: Optional[int] = None,
425
429
  allow_concurrent_inputs: Optional[int] = None,
426
430
  batch_max_size: Optional[int] = None,
427
431
  batch_wait_ms: Optional[int] = None,
428
- container_idle_timeout: Optional[int] = None,
429
- cpu: Optional[Union[float, tuple[float, float]]] = None,
430
- # keep_warm=True is equivalent to keep_warm=1
431
- keep_warm: Optional[int] = None,
432
432
  cloud: Optional[str] = None,
433
433
  scheduler_placement: Optional[SchedulerPlacement] = None,
434
434
  is_builder_function: bool = False,
@@ -442,7 +442,6 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
442
442
  ephemeral_disk: Optional[int] = None,
443
443
  # current default: first-party, future default: main-package
444
444
  include_source: Optional[bool] = None,
445
- _experimental_buffer_containers: Optional[int] = None,
446
445
  _experimental_proxy_ip: Optional[str] = None,
447
446
  _experimental_custom_scaling_factor: Optional[float] = None,
448
447
  _experimental_enable_gpu_snapshot: bool = False,
@@ -564,20 +563,21 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
564
563
  force_build=image.force_build or pf.force_build,
565
564
  )
566
565
 
567
- if keep_warm is not None and not isinstance(keep_warm, int):
568
- raise TypeError(f"`keep_warm` must be an int or bool, not {type(keep_warm).__name__}")
569
-
570
- if (keep_warm is not None) and (concurrency_limit is not None) and concurrency_limit < keep_warm:
566
+ # Note that we also do these checks in FunctionCreate; could drop them here
567
+ if min_containers is not None and not isinstance(min_containers, int):
568
+ raise InvalidError(f"`min_containers` must be an int, not {type(min_containers).__name__}")
569
+ if min_containers is not None and max_containers is not None and max_containers < min_containers:
571
570
  raise InvalidError(
572
- f"Function `{info.function_name}` has `{concurrency_limit=}`, "
573
- f"strictly less than its `{keep_warm=}` parameter."
571
+ f"`min_containers` ({min_containers}) cannot be greater than `max_containers` ({max_containers})"
574
572
  )
573
+ if scaledown_window is not None and scaledown_window <= 0:
574
+ raise InvalidError("`scaledown_window` must be > 0")
575
575
 
576
576
  autoscaler_settings = api_pb2.AutoscalerSettings(
577
- max_containers=concurrency_limit,
578
- min_containers=keep_warm,
579
- buffer_containers=_experimental_buffer_containers,
580
- scaledown_window=container_idle_timeout,
577
+ min_containers=min_containers,
578
+ max_containers=max_containers,
579
+ buffer_containers=buffer_containers,
580
+ scaledown_window=scaledown_window,
581
581
  )
582
582
 
583
583
  if _experimental_custom_scaling_factor is not None and (
@@ -605,9 +605,6 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
605
605
  if arg.default is not inspect.Parameter.empty:
606
606
  raise InvalidError(f"Modal batched function {func_name} does not accept default arguments.")
607
607
 
608
- if container_idle_timeout is not None and container_idle_timeout <= 0:
609
- raise InvalidError("`container_idle_timeout` must be > 0")
610
-
611
608
  if max_inputs is not None:
612
609
  if not isinstance(max_inputs, int):
613
610
  raise InvalidError(f"`max_inputs` must be an int, not {type(max_inputs).__name__}")
@@ -773,11 +770,8 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
773
770
  proxy_id=(proxy.object_id if proxy else None),
774
771
  retry_policy=retry_policy,
775
772
  timeout_secs=timeout_secs or 0,
776
- task_idle_timeout_secs=container_idle_timeout or 0,
777
- concurrency_limit=concurrency_limit or 0,
778
773
  pty_info=pty_info,
779
774
  cloud_provider_str=cloud if cloud else "",
780
- warm_pool_size=keep_warm or 0,
781
775
  runtime=config.get("function_runtime"),
782
776
  runtime_debug=config.get("function_runtime_debug"),
783
777
  runtime_perf_record=config.get("runtime_perf_record"),
@@ -802,10 +796,15 @@ class _Function(typing.Generic[P, ReturnType, OriginalReturnType], _Object, type
802
796
  snapshot_debug=config.get("snapshot_debug"),
803
797
  _experimental_group_size=cluster_size or 0, # Experimental: Clustered functions
804
798
  _experimental_concurrent_cancellations=True,
805
- _experimental_buffer_containers=_experimental_buffer_containers or 0,
806
799
  _experimental_proxy_ip=_experimental_proxy_ip,
807
800
  _experimental_custom_scaling=_experimental_custom_scaling_factor is not None,
808
801
  _experimental_enable_gpu_snapshot=_experimental_enable_gpu_snapshot,
802
+ # --- These are deprecated in favor of autoscaler_settings
803
+ warm_pool_size=min_containers or 0,
804
+ concurrency_limit=max_containers or 0,
805
+ _experimental_buffer_containers=buffer_containers or 0,
806
+ task_idle_timeout_secs=scaledown_window or 0,
807
+ # ---
809
808
  )
810
809
 
811
810
  if isinstance(gpu, list):
@@ -87,3 +87,38 @@ def renamed_parameter(
87
87
  return wrapper
88
88
 
89
89
  return decorator
90
+
91
+
92
+ def warn_on_renamed_autoscaler_settings(func: Callable[P, R]) -> Callable[P, R]:
93
+ name_map = {
94
+ "keep_warm": "min_containers",
95
+ "concurrency_limit": "max_containers",
96
+ "_experimental_buffer_containers": "buffer_containers",
97
+ "container_idle_timeout": "scaledown_window",
98
+ }
99
+
100
+ @functools.wraps(func)
101
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
102
+ mut_kwargs: dict[str, Any] = locals()["kwargs"] # Avoid referencing kwargs directly due to bug in sigtools
103
+
104
+ substitutions = []
105
+ old_params_used = name_map.keys() & mut_kwargs.keys()
106
+ for old_param, new_param in name_map.items():
107
+ if old_param in old_params_used:
108
+ new_param = name_map[old_param]
109
+ mut_kwargs[new_param] = mut_kwargs.pop(old_param)
110
+ substitutions.append(f"- {old_param} -> {new_param}")
111
+
112
+ if substitutions:
113
+ substitution_string = "\n".join(substitutions)
114
+ message = (
115
+ "We have renamed several parameters related to autoscaling."
116
+ " Please update your code to use the following new names:"
117
+ f"\n\n{substitution_string}"
118
+ "\n\nSee https://modal.com/docs/guide/modal-1-0-migration for more details."
119
+ )
120
+ deprecation_warning((2025, 2, 24), message, pending=True, show_source=True)
121
+
122
+ return func(*args, **kwargs)
123
+
124
+ return wrapper
modal/app.py CHANGED
@@ -29,7 +29,12 @@ from ._partial_function import (
29
29
  _PartialFunctionFlags,
30
30
  )
31
31
  from ._utils.async_utils import synchronize_api
32
- from ._utils.deprecation import deprecation_error, deprecation_warning, renamed_parameter
32
+ from ._utils.deprecation import (
33
+ deprecation_error,
34
+ deprecation_warning,
35
+ renamed_parameter,
36
+ warn_on_renamed_autoscaler_settings,
37
+ )
33
38
  from ._utils.function_utils import FunctionInfo, is_global_object, is_method_fn
34
39
  from ._utils.grpc_utils import retry_transient_errors
35
40
  from ._utils.mount_utils import validate_volumes
@@ -559,6 +564,7 @@ class _App:
559
564
 
560
565
  return wrapped
561
566
 
567
+ @warn_on_renamed_autoscaler_settings
562
568
  def function(
563
569
  self,
564
570
  _warn_parentheses_missing: Any = None,
@@ -586,17 +592,14 @@ class _App:
586
592
  # Or, pass (request, limit) to additionally specify a hard limit in MiB.
587
593
  memory: Optional[Union[int, tuple[int, int]]] = None,
588
594
  ephemeral_disk: Optional[int] = None, # Specify, in MiB, the ephemeral disk size for the Function.
595
+ min_containers: Optional[int] = None, # Minimum number of containers to keep warm, even when Function is idle.
596
+ max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
597
+ buffer_containers: Optional[int] = None, # Number of additional idle containers to maintain under active load.
598
+ scaledown_window: Optional[int] = None, # Max amount of time a container can remain idle before scaling down.
589
599
  proxy: Optional[_Proxy] = None, # Reference to a Modal Proxy to use in front of this function.
590
600
  retries: Optional[Union[int, Retries]] = None, # Number of times to retry each input in case of failure.
591
- concurrency_limit: Optional[
592
- int
593
- ] = None, # An optional maximum number of concurrent containers running the function (keep_warm sets minimum).
594
601
  allow_concurrent_inputs: Optional[int] = None, # Number of inputs the container may fetch to run concurrently.
595
- container_idle_timeout: Optional[int] = None, # Timeout for idle containers waiting for inputs to shut down.
596
602
  timeout: Optional[int] = None, # Maximum execution time of the function in seconds.
597
- keep_warm: Optional[
598
- int
599
- ] = None, # An optional minimum number of containers to always keep warm (use concurrency_limit for maximum).
600
603
  name: Optional[str] = None, # Sets the Modal name of the function within the app
601
604
  is_generator: Optional[
602
605
  bool
@@ -615,10 +618,14 @@ class _App:
615
618
  _experimental_scheduler_placement: Optional[
616
619
  SchedulerPlacement
617
620
  ] = None, # Experimental controls over fine-grained scheduling (alpha).
618
- _experimental_buffer_containers: Optional[int] = None, # Number of additional, idle containers to keep around.
619
621
  _experimental_proxy_ip: Optional[str] = None, # IP address of proxy
620
622
  _experimental_custom_scaling_factor: Optional[float] = None, # Custom scaling factor
621
623
  _experimental_enable_gpu_snapshot: bool = False, # Experimentally enable GPU memory snapshots.
624
+ # Parameters below here are deprecated. Please update your code as suggested
625
+ keep_warm: Optional[int] = None, # Replaced with `min_containers`
626
+ concurrency_limit: Optional[int] = None, # Replaced with `max_containers`
627
+ container_idle_timeout: Optional[int] = None, # Replaced with `scaledown_window`
628
+ _experimental_buffer_containers: Optional[int] = None, # Now stable API with `buffer_containers`
622
629
  ) -> _FunctionDecoratorType:
623
630
  """Decorator to register a new Modal [Function](/docs/reference/modal.Function) with this App."""
624
631
  if isinstance(_warn_parentheses_missing, _Image):
@@ -635,7 +642,7 @@ class _App:
635
642
  def wrapped(
636
643
  f: Union[_PartialFunction, Callable[..., Any], None],
637
644
  ) -> _Function:
638
- nonlocal keep_warm, is_generator, cloud, serialized
645
+ nonlocal is_generator, cloud, serialized
639
646
 
640
647
  # Check if the decorated object is a class
641
648
  if inspect.isclass(f):
@@ -669,7 +676,6 @@ class _App:
669
676
  raw_f = f.raw_f
670
677
  webhook_config = f.webhook_config
671
678
  is_generator = f.is_generator
672
- keep_warm = f.keep_warm or keep_warm
673
679
  batch_max_size = f.batch_max_size
674
680
  batch_wait_ms = f.batch_wait_ms
675
681
  else:
@@ -743,20 +749,20 @@ class _App:
743
749
  ephemeral_disk=ephemeral_disk,
744
750
  proxy=proxy,
745
751
  retries=retries,
746
- concurrency_limit=concurrency_limit,
752
+ min_containers=min_containers,
753
+ max_containers=max_containers,
754
+ buffer_containers=buffer_containers,
755
+ scaledown_window=scaledown_window,
747
756
  allow_concurrent_inputs=allow_concurrent_inputs,
748
757
  batch_max_size=batch_max_size,
749
758
  batch_wait_ms=batch_wait_ms,
750
- container_idle_timeout=container_idle_timeout,
751
759
  timeout=timeout,
752
- keep_warm=keep_warm,
753
760
  cloud=cloud,
754
761
  webhook_config=webhook_config,
755
762
  enable_memory_snapshot=enable_memory_snapshot,
756
763
  block_network=block_network,
757
764
  max_inputs=max_inputs,
758
765
  scheduler_placement=scheduler_placement,
759
- _experimental_buffer_containers=_experimental_buffer_containers,
760
766
  _experimental_proxy_ip=_experimental_proxy_ip,
761
767
  i6pn_enabled=i6pn_enabled,
762
768
  cluster_size=cluster_size, # Experimental: Clustered functions
@@ -771,6 +777,7 @@ class _App:
771
777
  return wrapped
772
778
 
773
779
  @typing_extensions.dataclass_transform(field_specifiers=(parameter,), kw_only_default=True)
780
+ @warn_on_renamed_autoscaler_settings
774
781
  def cls(
775
782
  self,
776
783
  _warn_parentheses_missing: Optional[bool] = None,
@@ -797,13 +804,14 @@ class _App:
797
804
  # Or, pass (request, limit) to additionally specify a hard limit in MiB.
798
805
  memory: Optional[Union[int, tuple[int, int]]] = None,
799
806
  ephemeral_disk: Optional[int] = None, # Specify, in MiB, the ephemeral disk size for the Function.
807
+ min_containers: Optional[int] = None, # Minimum number of containers to keep warm, even when Function is idle.
808
+ max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
809
+ buffer_containers: Optional[int] = None, # Number of additional idle containers to maintain under active load.
810
+ scaledown_window: Optional[int] = None, # Max amount of time a container can remain idle before scaling down.
800
811
  proxy: Optional[_Proxy] = None, # Reference to a Modal Proxy to use in front of this function.
801
812
  retries: Optional[Union[int, Retries]] = None, # Number of times to retry each input in case of failure.
802
- concurrency_limit: Optional[int] = None, # Limit for max concurrent containers running the function.
803
813
  allow_concurrent_inputs: Optional[int] = None, # Number of inputs the container may fetch to run concurrently.
804
- container_idle_timeout: Optional[int] = None, # Timeout for idle containers waiting for inputs to shut down.
805
814
  timeout: Optional[int] = None, # Maximum execution time of the function in seconds.
806
- keep_warm: Optional[int] = None, # An optional number of containers to always keep warm.
807
815
  cloud: Optional[str] = None, # Cloud provider to run the function on. Possible values are aws, gcp, oci, auto.
808
816
  region: Optional[Union[str, Sequence[str]]] = None, # Region or regions to run the function on.
809
817
  enable_memory_snapshot: bool = False, # Enable memory checkpointing for faster cold starts.
@@ -816,10 +824,14 @@ class _App:
816
824
  _experimental_scheduler_placement: Optional[
817
825
  SchedulerPlacement
818
826
  ] = None, # Experimental controls over fine-grained scheduling (alpha).
819
- _experimental_buffer_containers: Optional[int] = None, # Number of additional, idle containers to keep around.
820
827
  _experimental_proxy_ip: Optional[str] = None, # IP address of proxy
821
828
  _experimental_custom_scaling_factor: Optional[float] = None, # Custom scaling factor
822
829
  _experimental_enable_gpu_snapshot: bool = False, # Experimentally enable GPU memory snapshots.
830
+ # Parameters below here are deprecated. Please update your code as suggested
831
+ keep_warm: Optional[int] = None, # Replaced with `min_containers`
832
+ concurrency_limit: Optional[int] = None, # Replaced with `max_containers`
833
+ container_idle_timeout: Optional[int] = None, # Replaced with `scaledown_window`
834
+ _experimental_buffer_containers: Optional[int] = None, # Now stable API with `buffer_containers`
823
835
  ) -> Callable[[CLS_T], CLS_T]:
824
836
  """
825
837
  Decorator to register a new Modal [Cls](/docs/reference/modal.Cls) with this App.
@@ -834,8 +846,6 @@ class _App:
834
846
  scheduler_placement = SchedulerPlacement(region=region)
835
847
 
836
848
  def wrapper(user_cls: CLS_T) -> CLS_T:
837
- nonlocal keep_warm
838
-
839
849
  # Check if the decorated object is a class
840
850
  if not inspect.isclass(user_cls):
841
851
  raise TypeError("The @app.cls decorator must be used on a class.")
@@ -873,25 +883,25 @@ class _App:
873
883
  network_file_systems=network_file_systems,
874
884
  allow_cross_region_volumes=allow_cross_region_volumes,
875
885
  volumes={**self._volumes, **volumes},
886
+ cpu=cpu,
876
887
  memory=memory,
877
888
  ephemeral_disk=ephemeral_disk,
889
+ min_containers=min_containers,
890
+ max_containers=max_containers,
891
+ buffer_containers=buffer_containers,
892
+ scaledown_window=scaledown_window,
878
893
  proxy=proxy,
879
894
  retries=retries,
880
- concurrency_limit=concurrency_limit,
881
895
  allow_concurrent_inputs=allow_concurrent_inputs,
882
896
  batch_max_size=batch_max_size,
883
897
  batch_wait_ms=batch_wait_ms,
884
- container_idle_timeout=container_idle_timeout,
885
898
  timeout=timeout,
886
- cpu=cpu,
887
- keep_warm=keep_warm,
888
899
  cloud=cloud,
889
900
  enable_memory_snapshot=enable_memory_snapshot,
890
901
  block_network=block_network,
891
902
  max_inputs=max_inputs,
892
903
  scheduler_placement=scheduler_placement,
893
904
  include_source=include_source if include_source is not None else self._include_source_default,
894
- _experimental_buffer_containers=_experimental_buffer_containers,
895
905
  _experimental_proxy_ip=_experimental_proxy_ip,
896
906
  _experimental_custom_scaling_factor=_experimental_custom_scaling_factor,
897
907
  _experimental_enable_gpu_snapshot=_experimental_enable_gpu_snapshot,
modal/app.pyi CHANGED
@@ -175,13 +175,14 @@ class _App:
175
175
  cpu: typing.Union[float, tuple[float, float], None] = None,
176
176
  memory: typing.Union[int, tuple[int, int], None] = None,
177
177
  ephemeral_disk: typing.Optional[int] = None,
178
+ min_containers: typing.Optional[int] = None,
179
+ max_containers: typing.Optional[int] = None,
180
+ buffer_containers: typing.Optional[int] = None,
181
+ scaledown_window: typing.Optional[int] = None,
178
182
  proxy: typing.Optional[modal.proxy._Proxy] = None,
179
183
  retries: typing.Union[int, modal.retries.Retries, None] = None,
180
- concurrency_limit: typing.Optional[int] = None,
181
184
  allow_concurrent_inputs: typing.Optional[int] = None,
182
- container_idle_timeout: typing.Optional[int] = None,
183
185
  timeout: typing.Optional[int] = None,
184
- keep_warm: typing.Optional[int] = None,
185
186
  name: typing.Optional[str] = None,
186
187
  is_generator: typing.Optional[bool] = None,
187
188
  cloud: typing.Optional[str] = None,
@@ -192,10 +193,13 @@ class _App:
192
193
  i6pn: typing.Optional[bool] = None,
193
194
  include_source: typing.Optional[bool] = None,
194
195
  _experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
195
- _experimental_buffer_containers: typing.Optional[int] = None,
196
196
  _experimental_proxy_ip: typing.Optional[str] = None,
197
197
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
198
198
  _experimental_enable_gpu_snapshot: bool = False,
199
+ keep_warm: typing.Optional[int] = None,
200
+ concurrency_limit: typing.Optional[int] = None,
201
+ container_idle_timeout: typing.Optional[int] = None,
202
+ _experimental_buffer_containers: typing.Optional[int] = None,
199
203
  ) -> _FunctionDecoratorType: ...
200
204
  @typing_extensions.dataclass_transform(
201
205
  field_specifiers=(modal.cls.parameter,),
@@ -221,13 +225,14 @@ class _App:
221
225
  cpu: typing.Union[float, tuple[float, float], None] = None,
222
226
  memory: typing.Union[int, tuple[int, int], None] = None,
223
227
  ephemeral_disk: typing.Optional[int] = None,
228
+ min_containers: typing.Optional[int] = None,
229
+ max_containers: typing.Optional[int] = None,
230
+ buffer_containers: typing.Optional[int] = None,
231
+ scaledown_window: typing.Optional[int] = None,
224
232
  proxy: typing.Optional[modal.proxy._Proxy] = None,
225
233
  retries: typing.Union[int, modal.retries.Retries, None] = None,
226
- concurrency_limit: typing.Optional[int] = None,
227
234
  allow_concurrent_inputs: typing.Optional[int] = None,
228
- container_idle_timeout: typing.Optional[int] = None,
229
235
  timeout: typing.Optional[int] = None,
230
- keep_warm: typing.Optional[int] = None,
231
236
  cloud: typing.Optional[str] = None,
232
237
  region: typing.Union[str, collections.abc.Sequence[str], None] = None,
233
238
  enable_memory_snapshot: bool = False,
@@ -235,10 +240,13 @@ class _App:
235
240
  max_inputs: typing.Optional[int] = None,
236
241
  include_source: typing.Optional[bool] = None,
237
242
  _experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
238
- _experimental_buffer_containers: typing.Optional[int] = None,
239
243
  _experimental_proxy_ip: typing.Optional[str] = None,
240
244
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
241
245
  _experimental_enable_gpu_snapshot: bool = False,
246
+ keep_warm: typing.Optional[int] = None,
247
+ concurrency_limit: typing.Optional[int] = None,
248
+ container_idle_timeout: typing.Optional[int] = None,
249
+ _experimental_buffer_containers: typing.Optional[int] = None,
242
250
  ) -> collections.abc.Callable[[CLS_T], CLS_T]: ...
243
251
  async def spawn_sandbox(
244
252
  self,
@@ -407,13 +415,14 @@ class App:
407
415
  cpu: typing.Union[float, tuple[float, float], None] = None,
408
416
  memory: typing.Union[int, tuple[int, int], None] = None,
409
417
  ephemeral_disk: typing.Optional[int] = None,
418
+ min_containers: typing.Optional[int] = None,
419
+ max_containers: typing.Optional[int] = None,
420
+ buffer_containers: typing.Optional[int] = None,
421
+ scaledown_window: typing.Optional[int] = None,
410
422
  proxy: typing.Optional[modal.proxy.Proxy] = None,
411
423
  retries: typing.Union[int, modal.retries.Retries, None] = None,
412
- concurrency_limit: typing.Optional[int] = None,
413
424
  allow_concurrent_inputs: typing.Optional[int] = None,
414
- container_idle_timeout: typing.Optional[int] = None,
415
425
  timeout: typing.Optional[int] = None,
416
- keep_warm: typing.Optional[int] = None,
417
426
  name: typing.Optional[str] = None,
418
427
  is_generator: typing.Optional[bool] = None,
419
428
  cloud: typing.Optional[str] = None,
@@ -424,10 +433,13 @@ class App:
424
433
  i6pn: typing.Optional[bool] = None,
425
434
  include_source: typing.Optional[bool] = None,
426
435
  _experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
427
- _experimental_buffer_containers: typing.Optional[int] = None,
428
436
  _experimental_proxy_ip: typing.Optional[str] = None,
429
437
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
430
438
  _experimental_enable_gpu_snapshot: bool = False,
439
+ keep_warm: typing.Optional[int] = None,
440
+ concurrency_limit: typing.Optional[int] = None,
441
+ container_idle_timeout: typing.Optional[int] = None,
442
+ _experimental_buffer_containers: typing.Optional[int] = None,
431
443
  ) -> _FunctionDecoratorType: ...
432
444
  @typing_extensions.dataclass_transform(
433
445
  field_specifiers=(modal.cls.parameter,),
@@ -453,13 +465,14 @@ class App:
453
465
  cpu: typing.Union[float, tuple[float, float], None] = None,
454
466
  memory: typing.Union[int, tuple[int, int], None] = None,
455
467
  ephemeral_disk: typing.Optional[int] = None,
468
+ min_containers: typing.Optional[int] = None,
469
+ max_containers: typing.Optional[int] = None,
470
+ buffer_containers: typing.Optional[int] = None,
471
+ scaledown_window: typing.Optional[int] = None,
456
472
  proxy: typing.Optional[modal.proxy.Proxy] = None,
457
473
  retries: typing.Union[int, modal.retries.Retries, None] = None,
458
- concurrency_limit: typing.Optional[int] = None,
459
474
  allow_concurrent_inputs: typing.Optional[int] = None,
460
- container_idle_timeout: typing.Optional[int] = None,
461
475
  timeout: typing.Optional[int] = None,
462
- keep_warm: typing.Optional[int] = None,
463
476
  cloud: typing.Optional[str] = None,
464
477
  region: typing.Union[str, collections.abc.Sequence[str], None] = None,
465
478
  enable_memory_snapshot: bool = False,
@@ -467,10 +480,13 @@ class App:
467
480
  max_inputs: typing.Optional[int] = None,
468
481
  include_source: typing.Optional[bool] = None,
469
482
  _experimental_scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
470
- _experimental_buffer_containers: typing.Optional[int] = None,
471
483
  _experimental_proxy_ip: typing.Optional[str] = None,
472
484
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
473
485
  _experimental_enable_gpu_snapshot: bool = False,
486
+ keep_warm: typing.Optional[int] = None,
487
+ concurrency_limit: typing.Optional[int] = None,
488
+ container_idle_timeout: typing.Optional[int] = None,
489
+ _experimental_buffer_containers: typing.Optional[int] = None,
474
490
  ) -> collections.abc.Callable[[CLS_T], CLS_T]: ...
475
491
 
476
492
  class __spawn_sandbox_spec(typing_extensions.Protocol[SUPERSELF]):
@@ -82,7 +82,7 @@ def wait_for_port(data: tuple[str, str], q: Queue):
82
82
  timeout=args.get("timeout"),
83
83
  secrets=[Secret.from_dict({"MODAL_LAUNCH_ARGS": json.dumps(args)})],
84
84
  volumes=volumes,
85
- concurrency_limit=1 if volume else None,
85
+ max_containers=1 if volume else None,
86
86
  )
87
87
  def run_vscode(q: Queue):
88
88
  os.chdir("/home/coder")
modal/client.pyi CHANGED
@@ -27,7 +27,7 @@ class _Client:
27
27
  _snapshotted: bool
28
28
 
29
29
  def __init__(
30
- self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.74"
30
+ self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.76"
31
31
  ): ...
32
32
  def is_closed(self) -> bool: ...
33
33
  @property
@@ -85,7 +85,7 @@ class Client:
85
85
  _snapshotted: bool
86
86
 
87
87
  def __init__(
88
- self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.74"
88
+ self, server_url: str, client_type: int, credentials: typing.Optional[tuple[str, str]], version: str = "0.73.76"
89
89
  ): ...
90
90
  def is_closed(self) -> bool: ...
91
91
  @property
modal/cls.py CHANGED
@@ -24,7 +24,7 @@ from ._resources import convert_fn_config_to_resources_config
24
24
  from ._serialization import check_valid_cls_constructor_arg
25
25
  from ._traceback import print_server_warnings
26
26
  from ._utils.async_utils import synchronize_api, synchronizer
27
- from ._utils.deprecation import deprecation_warning, renamed_parameter
27
+ from ._utils.deprecation import deprecation_warning, renamed_parameter, warn_on_renamed_autoscaler_settings
28
28
  from ._utils.grpc_utils import retry_transient_errors
29
29
  from ._utils.mount_utils import validate_volumes
30
30
  from .client import _Client
@@ -559,6 +559,7 @@ class _Cls(_Object, type_prefix="cs"):
559
559
  cls._name = name
560
560
  return cls
561
561
 
562
+ @warn_on_renamed_autoscaler_settings
562
563
  def with_options(
563
564
  self: "_Cls",
564
565
  cpu: Optional[Union[float, tuple[float, float]]] = None,
@@ -567,10 +568,13 @@ class _Cls(_Object, type_prefix="cs"):
567
568
  secrets: Collection[_Secret] = (),
568
569
  volumes: dict[Union[str, os.PathLike], _Volume] = {},
569
570
  retries: Optional[Union[int, Retries]] = None,
571
+ max_containers: Optional[int] = None, # Limit on the number of containers that can be concurrently running.
572
+ scaledown_window: Optional[int] = None, # Max amount of time a container can remain idle before scaling down.
570
573
  timeout: Optional[int] = None,
571
- concurrency_limit: Optional[int] = None,
572
574
  allow_concurrent_inputs: Optional[int] = None,
573
- container_idle_timeout: Optional[int] = None,
575
+ # The following parameters are deprecated
576
+ concurrency_limit: Optional[int] = None, # Now called `max_containers`
577
+ container_idle_timeout: Optional[int] = None, # Now called `scaledown_window`
574
578
  ) -> "_Cls":
575
579
  """
576
580
  **Beta:** Allows for the runtime modification of a modal.Cls's configuration.
@@ -607,9 +611,10 @@ class _Cls(_Object, type_prefix="cs"):
607
611
  secret_ids=[secret.object_id for secret in secrets],
608
612
  resources=resources,
609
613
  retry_policy=retry_policy,
610
- concurrency_limit=concurrency_limit,
614
+ # TODO(michael) Update the protos to use the new terminology
615
+ concurrency_limit=max_containers,
616
+ task_idle_timeout_secs=scaledown_window,
611
617
  timeout_secs=timeout,
612
- task_idle_timeout_secs=container_idle_timeout,
613
618
  replace_volume_mounts=replace_volume_mounts,
614
619
  volume_mounts=volume_mounts,
615
620
  target_concurrent_inputs=allow_concurrent_inputs,
modal/cls.pyi CHANGED
@@ -132,9 +132,11 @@ class _Cls(modal._object._Object):
132
132
  secrets: collections.abc.Collection[modal.secret._Secret] = (),
133
133
  volumes: dict[typing.Union[str, os.PathLike], modal.volume._Volume] = {},
134
134
  retries: typing.Union[int, modal.retries.Retries, None] = None,
135
+ max_containers: typing.Optional[int] = None,
136
+ scaledown_window: typing.Optional[int] = None,
135
137
  timeout: typing.Optional[int] = None,
136
- concurrency_limit: typing.Optional[int] = None,
137
138
  allow_concurrent_inputs: typing.Optional[int] = None,
139
+ concurrency_limit: typing.Optional[int] = None,
138
140
  container_idle_timeout: typing.Optional[int] = None,
139
141
  ) -> _Cls: ...
140
142
  @staticmethod
@@ -191,9 +193,11 @@ class Cls(modal.object.Object):
191
193
  secrets: collections.abc.Collection[modal.secret.Secret] = (),
192
194
  volumes: dict[typing.Union[str, os.PathLike], modal.volume.Volume] = {},
193
195
  retries: typing.Union[int, modal.retries.Retries, None] = None,
196
+ max_containers: typing.Optional[int] = None,
197
+ scaledown_window: typing.Optional[int] = None,
194
198
  timeout: typing.Optional[int] = None,
195
- concurrency_limit: typing.Optional[int] = None,
196
199
  allow_concurrent_inputs: typing.Optional[int] = None,
200
+ concurrency_limit: typing.Optional[int] = None,
197
201
  container_idle_timeout: typing.Optional[int] = None,
198
202
  ) -> Cls: ...
199
203
 
modal/config.py CHANGED
@@ -60,6 +60,13 @@ Other possible configuration options are:
60
60
  When set, ignores the Image cache and builds all Image layers. Note that this
61
61
  will break the cache for all images based on the rebuilt layers, so other images
62
62
  may rebuild on subsequent runs / deploys even if the config is reverted.
63
+ * `ignore_cache` (in the .toml file) / `MODAL_IGNORE_CACHE` (as an env var).
64
+ Defaults to False.
65
+ When set, ignores the Image cache and builds all Image layers. Unlike `force_build`,
66
+ this will not overwrite the cache for other images that have the same recipe.
67
+ Subsequent runs that do not use this option will pull the *previous* Image from
68
+ the cache, if one exists. It can be useful for testing an App's robustness to
69
+ Image rebuilds without clobbering Images used by other Apps.
63
70
  * `traceback` (in the .toml file) / `MODAL_TRACEBACK` (as an env var).
64
71
  Defaults to False. Enables printing full tracebacks on unexpected CLI
65
72
  errors, which can be useful for debugging client issues.
@@ -218,6 +225,7 @@ _SETTINGS = {
218
225
  "worker_id": _Setting(), # For internal debugging use.
219
226
  "restore_state_path": _Setting("/__modal/restore-state.json"),
220
227
  "force_build": _Setting(False, transform=_to_boolean),
228
+ "ignore_cache": _Setting(False, transform=_to_boolean),
221
229
  "traceback": _Setting(False, transform=_to_boolean),
222
230
  "image_builder_version": _Setting(),
223
231
  "strict_parameters": _Setting(False, transform=_to_boolean), # For internal/experimental use
modal/functions.pyi CHANGED
@@ -73,17 +73,18 @@ class Function(
73
73
  typing.Union[modal.volume.Volume, modal.cloud_bucket_mount.CloudBucketMount],
74
74
  ] = {},
75
75
  webhook_config: typing.Optional[modal_proto.api_pb2.WebhookConfig] = None,
76
+ cpu: typing.Union[float, tuple[float, float], None] = None,
76
77
  memory: typing.Union[int, tuple[int, int], None] = None,
77
78
  proxy: typing.Optional[modal.proxy.Proxy] = None,
78
79
  retries: typing.Union[int, modal.retries.Retries, None] = None,
79
80
  timeout: typing.Optional[int] = None,
80
- concurrency_limit: typing.Optional[int] = None,
81
+ min_containers: typing.Optional[int] = None,
82
+ max_containers: typing.Optional[int] = None,
83
+ buffer_containers: typing.Optional[int] = None,
84
+ scaledown_window: typing.Optional[int] = None,
81
85
  allow_concurrent_inputs: typing.Optional[int] = None,
82
86
  batch_max_size: typing.Optional[int] = None,
83
87
  batch_wait_ms: typing.Optional[int] = None,
84
- container_idle_timeout: typing.Optional[int] = None,
85
- cpu: typing.Union[float, tuple[float, float], None] = None,
86
- keep_warm: typing.Optional[int] = None,
87
88
  cloud: typing.Optional[str] = None,
88
89
  scheduler_placement: typing.Optional[modal.scheduler_placement.SchedulerPlacement] = None,
89
90
  is_builder_function: bool = False,
@@ -95,7 +96,6 @@ class Function(
95
96
  max_inputs: typing.Optional[int] = None,
96
97
  ephemeral_disk: typing.Optional[int] = None,
97
98
  include_source: typing.Optional[bool] = None,
98
- _experimental_buffer_containers: typing.Optional[int] = None,
99
99
  _experimental_proxy_ip: typing.Optional[str] = None,
100
100
  _experimental_custom_scaling_factor: typing.Optional[float] = None,
101
101
  _experimental_enable_gpu_snapshot: bool = False,
modal/image.py CHANGED
@@ -622,6 +622,7 @@ class _Image(_Object, type_prefix="im"):
622
622
  # Failsafe mechanism to prevent inadvertant updates to the global images.
623
623
  # Only admins can publish to the global namespace, but they have to additionally request it.
624
624
  allow_global_deployment=os.environ.get("MODAL_IMAGE_ALLOW_GLOBAL_DEPLOYMENT", "0") == "1",
625
+ ignore_cache=config.get("ignore_cache"),
625
626
  )
626
627
  resp = await retry_transient_errors(resolver.client.stub.ImageGetOrCreate, req)
627
628
  image_id = resp.image_id
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: modal
3
- Version: 0.73.74
3
+ Version: 0.73.76
4
4
  Summary: Python client library for Modal
5
5
  Author-email: Modal Labs <support@modal.com>
6
6
  License: Apache-2.0
@@ -3,7 +3,7 @@ modal/__main__.py,sha256=CgIjP8m1xJjjd4AXc-delmR6LdBCZclw2A_V38CFIio,2870
3
3
  modal/_clustered_functions.py,sha256=kTf-9YBXY88NutC1akI-gCbvf01RhMPCw-zoOI_YIUE,2700
4
4
  modal/_clustered_functions.pyi,sha256=vllkegc99A0jrUOWa8mdlSbdp6uz36TsHhGxysAOpaQ,771
5
5
  modal/_container_entrypoint.py,sha256=arhkIoF8nQNfa4iwYGSoqN3QMDg5M38QNAODXC8TlKc,29301
6
- modal/_functions.py,sha256=34kHxFM4k1oSb0OV2Op6uDuiT-jEBG8cbo3NfqhAgew,71417
6
+ modal/_functions.py,sha256=LkzRfcex420bHWN2s5Aqbtm9cg7PaAcuG-H2jNz1JfE,71459
7
7
  modal/_ipython.py,sha256=TW1fkVOmZL3YYqdS2YlM1hqpf654Yf8ZyybHdBnlhSw,301
8
8
  modal/_location.py,sha256=joiX-0ZeutEUDTrrqLF1GHXCdVLF-rHzstocbMcd_-k,366
9
9
  modal/_object.py,sha256=ItQcsMNkz9Y3kdTsvfNarbW-paJ2qabDyQ7njaqY0XI,11359
@@ -18,16 +18,16 @@ modal/_traceback.py,sha256=IZQzB3fVlUfMHOSyKUgw0H6qv4yHnpyq-XVCNZKfUdA,5023
18
18
  modal/_tunnel.py,sha256=zTBxBiuH1O22tS1OliAJdIsSmaZS8PlnifS_6S5z-mk,6320
19
19
  modal/_tunnel.pyi,sha256=JmmDYAy9F1FpgJ_hWx0xkom2nTOFQjn4mTPYlU3PFo4,1245
20
20
  modal/_watcher.py,sha256=K6LYnlmSGQB4tWWI9JADv-tvSvQ1j522FwT71B51CX8,3584
21
- modal/app.py,sha256=o5mHoHtn41nkvskX_ekJkyfG6MXwj5rqerRi_nnPd0w,44725
22
- modal/app.pyi,sha256=0MMCgskIL4r3eq8oBcfm2lLyeao2gXjS3iXaIfmaJ-o,25959
21
+ modal/app.py,sha256=kF3frIt4eRKVYYCjusMMhKJpO_lDdm2z37HOXPwpjT8,45506
22
+ modal/app.pyi,sha256=tZFbcsu20SuvfB2puxCyuXLFNJ9bQulzag55rVpgZmc,26827
23
23
  modal/call_graph.py,sha256=1g2DGcMIJvRy-xKicuf63IVE98gJSnQsr8R_NVMptNc,2581
24
24
  modal/client.py,sha256=8SQawr7P1PNUCq1UmJMUQXG2jIo4Nmdcs311XqrNLRE,15276
25
- modal/client.pyi,sha256=PwDnBFBHB369Q6eY1uz7_LblIn1IN9DqaTm0pAhpEGU,7593
25
+ modal/client.pyi,sha256=BY_85JYs5yMmzZbJ4XMfajA48crezjBUTGtZ8RUi-gc,7593
26
26
  modal/cloud_bucket_mount.py,sha256=YOe9nnvSr4ZbeCn587d7_VhE9IioZYRvF9VYQTQux08,5914
27
27
  modal/cloud_bucket_mount.pyi,sha256=30T3K1a89l6wzmEJ_J9iWv9SknoGqaZDx59Xs-ZQcmk,1607
28
- modal/cls.py,sha256=nx3mFniMw_b0tKqmY5smbi39xSpwmu_Mnb6_h1OFIBM,30208
29
- modal/cls.pyi,sha256=4Ms1i4Wty1qe49Dh_wsGhJDCiJz7t-XGqXLcpzwhUqs,9084
30
- modal/config.py,sha256=E1bCdwNziBorqXal6Cble7gfLNzGqSuEgprRJwzYqg4,11160
28
+ modal/cls.py,sha256=pQqLU_vlgHD2aglDQAoVuzt0BS3pUkRfC7oHG_fsbV4,30700
29
+ modal/cls.pyi,sha256=MAx5J72UhD5l2qbH_sTpZDP34PlI4P71OBLSWOqvE68,9300
30
+ modal/config.py,sha256=Boz1bPzaG-k5Grjq6y6fAELH1N_gTuYDnpB6FODzCPo,11710
31
31
  modal/container_process.py,sha256=WTqLn01dJPVkPpwR_0w_JH96ceN5mV4TGtiu1ZR2RRA,6108
32
32
  modal/container_process.pyi,sha256=Hf0J5JyDdCCXBJSKx6gvkPOo0XrztCm78xzxamtzUjQ,2828
33
33
  modal/dict.py,sha256=vc5lQVqzeDUCb4fRjnOlqYK2GmBb0fIhZmvB0xIBG0U,12921
@@ -41,9 +41,9 @@ modal/file_io.py,sha256=lcMs_E9Xfm0YX1t9U2wNIBPnqHRxmImqjLW1GHqVmyg,20945
41
41
  modal/file_io.pyi,sha256=NTRft1tbPSWf9TlWVeZmTlgB5AZ_Zhu2srWIrWr7brk,9445
42
42
  modal/file_pattern_matcher.py,sha256=trosX-Bp7dOubudN1bLLhRAoidWy1TcoaR4Pv8CedWw,6497
43
43
  modal/functions.py,sha256=kcNHvqeGBxPI7Cgd57NIBBghkfbeFJzXO44WW0jSmao,325
44
- modal/functions.pyi,sha256=2z5Vt2U3KfmTWyC55hPVEgc4i83BiqAK0faJgumpe04,14405
44
+ modal/functions.pyi,sha256=7m3qSR7dKUPcSZycZ4Y1VaySjdBZI0_MmR8pRrbHzxE,14387
45
45
  modal/gpu.py,sha256=Kbhs_u49FaC2Zi0TjCdrpstpRtT5eZgecynmQi5IZVE,6752
46
- modal/image.py,sha256=Bs1ND2WkLr9CBtj0heO7e3w9uGCnijKU8owiQSRQXv0,90200
46
+ modal/image.py,sha256=adMUpS7WrCu-M78BWslz2r6GPviy4qPvd5Dh-dBIrrk,90257
47
47
  modal/image.pyi,sha256=L7aZUOElSGtNHmFHz1RgKP1cG5paiXt_EzylrwBwzVk,25004
48
48
  modal/io_streams.py,sha256=QkQiizKRzd5bnbKQsap31LJgBYlAnj4-XkV_50xPYX0,15079
49
49
  modal/io_streams.pyi,sha256=bJ7ZLmSmJ0nKoa6r4FJpbqvzdUVa0lEe0Fa-MMpMezU,5071
@@ -96,7 +96,7 @@ modal/_utils/app_utils.py,sha256=88BT4TPLWfYAQwKTHcyzNQRHg8n9B-QE2UyJs96iV-0,108
96
96
  modal/_utils/async_utils.py,sha256=5PdDuI1aSwPOI4a3dIvW0DkPqGw6KZN6RtWE18Dzv1E,25079
97
97
  modal/_utils/blob_utils.py,sha256=RB1G6T7eC1Poe-O45qYLaxwCr2jkM-Q6Nexk1J3wk_w,14505
98
98
  modal/_utils/bytes_io_segment_payload.py,sha256=uunxVJS4PE1LojF_UpURMzVK9GuvmYWRqQo_bxEj5TU,3385
99
- modal/_utils/deprecation.py,sha256=dycySRBxyZf3ITzEqPNM6MxXTk9-0VVLA8oCPQ5j_Os,3426
99
+ modal/_utils/deprecation.py,sha256=rgCGTrk-u_uaDXNDTAW9FM8GP8N3ErlDfr2wXhKYLVw,4870
100
100
  modal/_utils/docker_utils.py,sha256=h1uETghR40mp_y3fSWuZAfbIASH1HMzuphJHghAL6DU,3722
101
101
  modal/_utils/function_utils.py,sha256=Rmz8GJDie-RW_q2RcTwholEWixS2IQDPBsRBJ3f3ZvU,27302
102
102
  modal/_utils/grpc_testing.py,sha256=H1zHqthv19eGPJz2HKXDyWXWGSqO4BRsxah3L5Xaa8A,8619
@@ -135,7 +135,7 @@ modal/cli/utils.py,sha256=hZmjyzcPjDnQSkLvycZD2LhGdcsfdZshs_rOU78EpvI,3717
135
135
  modal/cli/volume.py,sha256=c2IuVNO2yJVaXmZkRh3xwQmznlRTgFoJr_BIzzqtVv0,10251
136
136
  modal/cli/programs/__init__.py,sha256=svYKtV8HDwDCN86zbdWqyq5T8sMdGDj0PVlzc2tIxDM,28
137
137
  modal/cli/programs/run_jupyter.py,sha256=MX6YQ6zRyRk1xo8tYZFiGam0p5KETwax81L6TpaS9I0,2778
138
- modal/cli/programs/vscode.py,sha256=Q0FA0PBrGf8SVlokenFhcGg-Viaj1oKSuhHV7m35CwQ,3476
138
+ modal/cli/programs/vscode.py,sha256=kfvhZQ4bJwtVm3MgC1V7AlygZOlKT1a33alr_uwrewA,3473
139
139
  modal/extensions/__init__.py,sha256=waLjl5c6IPDhSsdWAm9Bji4e2PVxamYABKAze6CHVXY,28
140
140
  modal/extensions/ipython.py,sha256=Xvzy-A7cvwMSDa9p4c4CEMLOX2_Xsg9DkM1J9uyu7jc,983
141
141
  modal/requirements/2023.12.312.txt,sha256=zWWUVgVQ92GXBKNYYr2-5vn9rlnXcmkqlwlX5u1eTYw,400
@@ -168,10 +168,10 @@ modal_proto/options_pb2_grpc.pyi,sha256=CImmhxHsYnF09iENPoe8S4J-n93jtgUYD2JPAc0y
168
168
  modal_proto/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
169
169
  modal_version/__init__.py,sha256=wiJQ53c-OMs0Xf1UeXOxQ7FwlV1VzIjnX6o-pRYZ_Pk,470
170
170
  modal_version/__main__.py,sha256=2FO0yYQQwDTh6udt1h-cBnGd1c4ZyHnHSI4BksxzVac,105
171
- modal_version/_version_generated.py,sha256=EHhRz20OtH8bXhxnn9hwOhBRPymrrkxTY3fwiBGngnk,149
172
- modal-0.73.74.dist-info/LICENSE,sha256=psuoW8kuDP96RQsdhzwOqi6fyWv0ct8CR6Jr7He_P_k,10173
173
- modal-0.73.74.dist-info/METADATA,sha256=Wvg_qdimqEI33t3EKm1VaW419bcX8Fwkith8J7nnCnA,2452
174
- modal-0.73.74.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
175
- modal-0.73.74.dist-info/entry_points.txt,sha256=An-wYgeEUnm6xzrAP9_NTSTSciYvvEWsMZILtYrvpAI,46
176
- modal-0.73.74.dist-info/top_level.txt,sha256=4BWzoKYREKUZ5iyPzZpjqx4G8uB5TWxXPDwibLcVa7k,43
177
- modal-0.73.74.dist-info/RECORD,,
171
+ modal_version/_version_generated.py,sha256=jmVn-W1r2Lr0q91iDTY2ZmvlYDwd2coi9KKtw_i03Jk,149
172
+ modal-0.73.76.dist-info/LICENSE,sha256=psuoW8kuDP96RQsdhzwOqi6fyWv0ct8CR6Jr7He_P_k,10173
173
+ modal-0.73.76.dist-info/METADATA,sha256=XZV1Q5CvoLdg2NmRCtLXu5CfAd8SISK_6cRFO_bySec,2452
174
+ modal-0.73.76.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
175
+ modal-0.73.76.dist-info/entry_points.txt,sha256=An-wYgeEUnm6xzrAP9_NTSTSciYvvEWsMZILtYrvpAI,46
176
+ modal-0.73.76.dist-info/top_level.txt,sha256=4BWzoKYREKUZ5iyPzZpjqx4G8uB5TWxXPDwibLcVa7k,43
177
+ modal-0.73.76.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  # Copyright Modal Labs 2025
2
2
 
3
3
  # Note: Reset this value to -1 whenever you make a minor `0.X` release of the client.
4
- build_number = 74 # git: bdcac62
4
+ build_number = 76 # git: d55968d