modal 1.0.5.dev1__py3-none-any.whl → 1.0.5.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,12 +3,22 @@ import typing
3
3
  import typing_extensions
4
4
 
5
5
  class ClusterInfo:
6
+ """ClusterInfo(rank: int, container_ips: list[str])"""
7
+
6
8
  rank: int
7
9
  container_ips: list[str]
8
10
 
9
- def __init__(self, rank: int, container_ips: list[str]) -> None: ...
10
- def __repr__(self): ...
11
- def __eq__(self, other): ...
11
+ def __init__(self, rank: int, container_ips: list[str]) -> None:
12
+ """Initialize self. See help(type(self)) for accurate signature."""
13
+ ...
14
+
15
+ def __repr__(self):
16
+ """Return repr(self)."""
17
+ ...
18
+
19
+ def __eq__(self, other):
20
+ """Return self==value."""
21
+ ...
12
22
 
13
23
  def get_cluster_info() -> ClusterInfo: ...
14
24
  async def _initialize_clustered_function(client: modal.client._Client, task_id: str, world_size: int): ...
modal/_functions.py CHANGED
@@ -15,7 +15,6 @@ import typing_extensions
15
15
  from google.protobuf.message import Message
16
16
  from grpclib import GRPCError, Status
17
17
  from synchronicity.combined_types import MethodWithAio
18
- from synchronicity.exceptions import UserCodeException
19
18
 
20
19
  from modal_proto import api_pb2
21
20
  from modal_proto.modal_api_grpc import ModalClientModal
@@ -63,8 +62,6 @@ from .cloud_bucket_mount import _CloudBucketMount, cloud_bucket_mounts_to_proto
63
62
  from .config import config
64
63
  from .exception import (
65
64
  ExecutionError,
66
- FunctionTimeoutError,
67
- InternalFailure,
68
65
  InvalidError,
69
66
  NotFoundError,
70
67
  OutputExpiredError,
@@ -257,7 +254,7 @@ class _Invocation:
257
254
  request,
258
255
  )
259
256
 
260
- async def _get_single_output(self, expected_jwt: Optional[str] = None) -> Any:
257
+ async def _get_single_output(self, expected_jwt: Optional[str] = None) -> api_pb2.FunctionGetOutputsItem:
261
258
  # waits indefinitely for a single result for the function, and clear the outputs buffer after
262
259
  item: api_pb2.FunctionGetOutputsItem = (
263
260
  await self.pop_function_call_outputs(
@@ -266,7 +263,7 @@ class _Invocation:
266
263
  input_jwts=[expected_jwt] if expected_jwt else None,
267
264
  )
268
265
  ).outputs[0]
269
- return await _process_result(item.result, item.data_format, self.stub, self.client)
266
+ return item
270
267
 
271
268
  async def run_function(self) -> Any:
272
269
  # Use retry logic only if retry policy is specified and
@@ -278,23 +275,30 @@ class _Invocation:
278
275
  or ctx.function_call_invocation_type != api_pb2.FUNCTION_CALL_INVOCATION_TYPE_SYNC
279
276
  or not ctx.sync_client_retries_enabled
280
277
  ):
281
- return await self._get_single_output()
278
+ item = await self._get_single_output()
279
+ return await _process_result(item.result, item.data_format, self.stub, self.client)
282
280
 
283
281
  # User errors including timeouts are managed by the user specified retry policy.
284
282
  user_retry_manager = RetryManager(ctx.retry_policy)
285
283
 
286
284
  while True:
287
- try:
288
- return await self._get_single_output(ctx.input_jwt)
289
- except (UserCodeException, FunctionTimeoutError) as exc:
285
+ item = await self._get_single_output(ctx.input_jwt)
286
+ if item.result.status in (
287
+ api_pb2.GenericResult.GENERIC_STATUS_SUCCESS,
288
+ api_pb2.GenericResult.GENERIC_STATUS_TERMINATED,
289
+ ):
290
+ # success or cancellations are "final" results
291
+ return await _process_result(item.result, item.data_format, self.stub, self.client)
292
+
293
+ if item.result.status != api_pb2.GenericResult.GENERIC_STATUS_INTERNAL_FAILURE:
294
+ # non-internal failures get a delay before retrying
290
295
  delay_ms = user_retry_manager.get_delay_ms()
291
296
  if delay_ms is None:
292
- raise exc
297
+ # no more retries, this should raise an error when the non-success status is converted
298
+ # to an exception:
299
+ return await _process_result(item.result, item.data_format, self.stub, self.client)
293
300
  await asyncio.sleep(delay_ms / 1000)
294
- except InternalFailure:
295
- # For system failures on the server, we retry immediately,
296
- # and the failure does not count towards the retry policy.
297
- pass
301
+
298
302
  await self._retry_input()
299
303
 
300
304
  async def poll_function(self, timeout: Optional[float] = None):
@@ -399,27 +403,27 @@ class _InputPlaneInvocation:
399
403
  attempt_timeout=OUTPUTS_TIMEOUT + ATTEMPT_TIMEOUT_GRACE_PERIOD,
400
404
  )
401
405
 
402
- try:
403
- if await_response.HasField("output"):
404
- return await _process_result(
405
- await_response.output.result, await_response.output.data_format, self.stub, self.client
406
- )
407
- except InternalFailure as e:
408
- internal_failure_count += 1
409
- # Limit the number of times we retry
410
- if internal_failure_count >= MAX_INTERNAL_FAILURE_COUNT:
411
- raise e
412
- # For system failures on the server, we retry immediately,
413
- # and the failure does not count towards the retry policy.
414
- retry_request = api_pb2.AttemptRetryRequest(
415
- function_id=self.function_id,
416
- parent_input_id=current_input_id() or "",
417
- input=self.input_item,
418
- attempt_token=self.attempt_token,
406
+ if await_response.HasField("output"):
407
+ if await_response.output.result.status == api_pb2.GenericResult.GENERIC_STATUS_INTERNAL_FAILURE:
408
+ internal_failure_count += 1
409
+ # Limit the number of times we retry
410
+ if internal_failure_count < MAX_INTERNAL_FAILURE_COUNT:
411
+ # For system failures on the server, we retry immediately,
412
+ # and the failure does not count towards the retry policy.
413
+ retry_request = api_pb2.AttemptRetryRequest(
414
+ function_id=self.function_id,
415
+ parent_input_id=current_input_id() or "",
416
+ input=self.input_item,
417
+ attempt_token=self.attempt_token,
418
+ )
419
+ # TODO(ryan): Add exponential backoff?
420
+ retry_response = await retry_transient_errors(self.stub.AttemptRetry, retry_request)
421
+ self.attempt_token = retry_response.attempt_token
422
+ continue
423
+
424
+ return await _process_result(
425
+ await_response.output.result, await_response.output.data_format, self.stub, self.client
419
426
  )
420
- # TODO(ryan): Add exponential backoff?
421
- retry_response = await retry_transient_errors(self.stub.AttemptRetry, retry_request)
422
- self.attempt_token = retry_response.attempt_token
423
427
 
424
428
 
425
429
  # Wrapper type for api_pb2.FunctionStats
@@ -1438,7 +1442,11 @@ Use the `Function.get_web_url()` method instead.
1438
1442
 
1439
1443
  @live_method_gen
1440
1444
  async def _map(
1441
- self, input_queue: _SynchronizedQueue, order_outputs: bool, return_exceptions: bool
1445
+ self,
1446
+ input_queue: _SynchronizedQueue,
1447
+ order_outputs: bool,
1448
+ return_exceptions: bool,
1449
+ wrap_returned_exceptions: bool,
1442
1450
  ) -> AsyncGenerator[Any, None]:
1443
1451
  """mdmd:hidden
1444
1452
 
@@ -1466,6 +1474,7 @@ Use the `Function.get_web_url()` method instead.
1466
1474
  self.client,
1467
1475
  order_outputs,
1468
1476
  return_exceptions,
1477
+ wrap_returned_exceptions,
1469
1478
  count_update_callback,
1470
1479
  api_pb2.FUNCTION_CALL_INVOCATION_TYPE_SYNC,
1471
1480
  )
@@ -9,10 +9,21 @@ import synchronicity.combined_types
9
9
  import typing
10
10
  import typing_extensions
11
11
 
12
- class UserException(Exception): ...
13
- class Sentinel: ...
12
+ class UserException(Exception):
13
+ """Used to shut down the task gracefully."""
14
+
15
+ ...
16
+
17
+ class Sentinel:
18
+ """Used to get type-stubs to work with this object."""
19
+
20
+ ...
14
21
 
15
22
  class IOContext:
23
+ """Context object for managing input, function calls, and function executions
24
+ in a batched or single input context.
25
+ """
26
+
16
27
  input_ids: list[str]
17
28
  retry_counts: list[int]
18
29
  function_call_ids: list[str]
@@ -30,7 +41,10 @@ class IOContext:
30
41
  function_inputs: list[modal_proto.api_pb2.FunctionInput],
31
42
  is_batched: bool,
32
43
  client: modal.client._Client,
33
- ): ...
44
+ ):
45
+ """Initialize self. See help(type(self)) for accurate signature."""
46
+ ...
47
+
34
48
  @classmethod
35
49
  async def create(
36
50
  cls,
@@ -46,12 +60,17 @@ class IOContext:
46
60
  def validate_output_data(self, data: typing.Any) -> list[typing.Any]: ...
47
61
 
48
62
  class InputSlots:
63
+ """A semaphore that allows dynamically adjusting the concurrency."""
64
+
49
65
  active: int
50
66
  value: int
51
67
  waiter: typing.Optional[asyncio.Future]
52
68
  closed: bool
53
69
 
54
- def __init__(self, value: int) -> None: ...
70
+ def __init__(self, value: int) -> None:
71
+ """Initialize self. See help(type(self)) for accurate signature."""
72
+ ...
73
+
55
74
  async def acquire(self) -> None: ...
56
75
  def _wake_waiter(self) -> None: ...
57
76
  def release(self) -> None: ...
@@ -59,6 +78,12 @@ class InputSlots:
59
78
  async def close(self) -> None: ...
60
79
 
61
80
  class _ContainerIOManager:
81
+ """Synchronizes all RPC calls and network operations for a running container.
82
+
83
+ TODO: maybe we shouldn't synchronize the whole class.
84
+ Then we could potentially move a bunch of the global functions onto it.
85
+ """
86
+
62
87
  task_id: str
63
88
  function_id: str
64
89
  app_id: str
@@ -90,9 +115,15 @@ class _ContainerIOManager:
90
115
  @staticmethod
91
116
  def __new__(
92
117
  cls, container_args: modal_proto.api_pb2.ContainerArguments, client: modal.client._Client
93
- ) -> _ContainerIOManager: ...
118
+ ) -> _ContainerIOManager:
119
+ """Create and return a new object. See help(type) for accurate signature."""
120
+ ...
121
+
94
122
  @classmethod
95
- def _reset_singleton(cls): ...
123
+ def _reset_singleton(cls):
124
+ """Only used for tests."""
125
+ ...
126
+
96
127
  async def hello(self): ...
97
128
  async def _run_heartbeat_loop(self): ...
98
129
  async def _heartbeat_handle_cancellations(self) -> bool: ...
@@ -102,15 +133,35 @@ class _ContainerIOManager:
102
133
  async def _dynamic_concurrency_loop(self): ...
103
134
  def serialize_data_format(self, obj: typing.Any, data_format: int) -> bytes: ...
104
135
  async def format_blob_data(self, data: bytes) -> dict[str, typing.Any]: ...
105
- def get_data_in(self, function_call_id: str) -> collections.abc.AsyncIterator[typing.Any]: ...
136
+ def get_data_in(self, function_call_id: str) -> collections.abc.AsyncIterator[typing.Any]:
137
+ """Read from the `data_in` stream of a function call."""
138
+ ...
139
+
106
140
  async def put_data_out(
107
141
  self, function_call_id: str, start_index: int, data_format: int, serialized_messages: list[typing.Any]
108
- ) -> None: ...
142
+ ) -> None:
143
+ """Put data onto the `data_out` stream of a function call.
144
+
145
+ This is used for generator outputs, which includes web endpoint responses. Note that this
146
+ was introduced as a performance optimization in client version 0.57, so older clients will
147
+ still use the previous Postgres-backed system based on `FunctionPutOutputs()`.
148
+ """
149
+ ...
150
+
109
151
  async def generator_output_task(
110
152
  self, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue
111
- ) -> None: ...
112
- async def _queue_create(self, size: int) -> asyncio.queues.Queue: ...
113
- async def _queue_put(self, queue: asyncio.queues.Queue, value: typing.Any) -> None: ...
153
+ ) -> None:
154
+ """Task that feeds generator outputs into a function call's `data_out` stream."""
155
+ ...
156
+
157
+ async def _queue_create(self, size: int) -> asyncio.queues.Queue:
158
+ """Create a queue, on the synchronicity event loop (needed on Python 3.8 and 3.9)."""
159
+ ...
160
+
161
+ async def _queue_put(self, queue: asyncio.queues.Queue, value: typing.Any) -> None:
162
+ """Put a value onto a queue, using the synchronicity event loop."""
163
+ ...
164
+
114
165
  def get_average_call_time(self) -> float: ...
115
166
  def get_max_inputs_to_fetch(self): ...
116
167
  def _generate_inputs(
@@ -131,15 +182,33 @@ class _ContainerIOManager:
131
182
  ) -> None: ...
132
183
  def serialize_exception(self, exc: BaseException) -> bytes: ...
133
184
  def serialize_traceback(self, exc: BaseException) -> tuple[typing.Optional[bytes], typing.Optional[bytes]]: ...
134
- def handle_user_exception(self) -> typing.AsyncContextManager[None]: ...
135
- def handle_input_exception(self, io_context: IOContext, started_at: float) -> typing.AsyncContextManager[None]: ...
185
+ def handle_user_exception(self) -> typing.AsyncContextManager[None]:
186
+ """Sets the task as failed in a way where it's not retried.
187
+
188
+ Used for handling exceptions from container lifecycle methods at the moment, which should
189
+ trigger a task failure state.
190
+ """
191
+ ...
192
+
193
+ def handle_input_exception(self, io_context: IOContext, started_at: float) -> typing.AsyncContextManager[None]:
194
+ """Handle an exception while processing a function input."""
195
+ ...
196
+
136
197
  def exit_context(self, started_at, input_ids: list[str]): ...
137
198
  async def push_outputs(
138
199
  self, io_context: IOContext, started_at: float, data: typing.Any, data_format: int
139
200
  ) -> None: ...
140
201
  async def memory_restore(self) -> None: ...
141
- async def memory_snapshot(self) -> None: ...
142
- async def volume_commit(self, volume_ids: list[str]) -> None: ...
202
+ async def memory_snapshot(self) -> None:
203
+ """Message server indicating that function is ready to be checkpointed."""
204
+ ...
205
+
206
+ async def volume_commit(self, volume_ids: list[str]) -> None:
207
+ """Perform volume commit for given `volume_ids`.
208
+ Only used on container exit to persist uncommitted changes on behalf of user.
209
+ """
210
+ ...
211
+
143
212
  async def interact(self, from_breakpoint: bool = False): ...
144
213
  @property
145
214
  def target_concurrency(self) -> int: ...
@@ -148,15 +217,35 @@ class _ContainerIOManager:
148
217
  @property
149
218
  def input_concurrency_enabled(self) -> int: ...
150
219
  @classmethod
151
- def get_input_concurrency(cls) -> int: ...
220
+ def get_input_concurrency(cls) -> int:
221
+ """Returns the number of usable input slots.
222
+
223
+ If concurrency is reduced, active slots can exceed allotted slots. Returns the larger value
224
+ in this case.
225
+ """
226
+ ...
227
+
152
228
  @classmethod
153
- def set_input_concurrency(cls, concurrency: int): ...
229
+ def set_input_concurrency(cls, concurrency: int):
230
+ """Edit the number of input slots.
231
+
232
+ This disables the background loop which automatically adjusts concurrency
233
+ within [target_concurrency, max_concurrency].
234
+ """
235
+ ...
236
+
154
237
  @classmethod
155
238
  def stop_fetching_inputs(cls): ...
156
239
 
157
240
  SUPERSELF = typing.TypeVar("SUPERSELF", covariant=True)
158
241
 
159
242
  class ContainerIOManager:
243
+ """Synchronizes all RPC calls and network operations for a running container.
244
+
245
+ TODO: maybe we shouldn't synchronize the whole class.
246
+ Then we could potentially move a bunch of the global functions onto it.
247
+ """
248
+
160
249
  task_id: str
161
250
  function_id: str
162
251
  app_id: str
@@ -182,12 +271,17 @@ class ContainerIOManager:
182
271
  _GENERATOR_STOP_SENTINEL: typing.ClassVar[Sentinel]
183
272
  _singleton: typing.ClassVar[typing.Optional[ContainerIOManager]]
184
273
 
185
- def __init__(self, /, *args, **kwargs): ...
274
+ def __init__(self, /, *args, **kwargs):
275
+ """Initialize self. See help(type(self)) for accurate signature."""
276
+ ...
277
+
186
278
  def _init(self, container_args: modal_proto.api_pb2.ContainerArguments, client: modal.client.Client): ...
187
279
  @property
188
280
  def heartbeat_condition(self) -> asyncio.locks.Condition: ...
189
281
  @classmethod
190
- def _reset_singleton(cls): ...
282
+ def _reset_singleton(cls):
283
+ """Only used for tests."""
284
+ ...
191
285
 
192
286
  class __hello_spec(typing_extensions.Protocol[SUPERSELF]):
193
287
  def __call__(self, /): ...
@@ -238,36 +332,71 @@ class ContainerIOManager:
238
332
  format_blob_data: __format_blob_data_spec[typing_extensions.Self]
239
333
 
240
334
  class __get_data_in_spec(typing_extensions.Protocol[SUPERSELF]):
241
- def __call__(self, /, function_call_id: str) -> typing.Iterator[typing.Any]: ...
242
- def aio(self, /, function_call_id: str) -> collections.abc.AsyncIterator[typing.Any]: ...
335
+ def __call__(self, /, function_call_id: str) -> typing.Iterator[typing.Any]:
336
+ """Read from the `data_in` stream of a function call."""
337
+ ...
338
+
339
+ def aio(self, /, function_call_id: str) -> collections.abc.AsyncIterator[typing.Any]:
340
+ """Read from the `data_in` stream of a function call."""
341
+ ...
243
342
 
244
343
  get_data_in: __get_data_in_spec[typing_extensions.Self]
245
344
 
246
345
  class __put_data_out_spec(typing_extensions.Protocol[SUPERSELF]):
247
346
  def __call__(
248
347
  self, /, function_call_id: str, start_index: int, data_format: int, serialized_messages: list[typing.Any]
249
- ) -> None: ...
348
+ ) -> None:
349
+ """Put data onto the `data_out` stream of a function call.
350
+
351
+ This is used for generator outputs, which includes web endpoint responses. Note that this
352
+ was introduced as a performance optimization in client version 0.57, so older clients will
353
+ still use the previous Postgres-backed system based on `FunctionPutOutputs()`.
354
+ """
355
+ ...
356
+
250
357
  async def aio(
251
358
  self, /, function_call_id: str, start_index: int, data_format: int, serialized_messages: list[typing.Any]
252
- ) -> None: ...
359
+ ) -> None:
360
+ """Put data onto the `data_out` stream of a function call.
361
+
362
+ This is used for generator outputs, which includes web endpoint responses. Note that this
363
+ was introduced as a performance optimization in client version 0.57, so older clients will
364
+ still use the previous Postgres-backed system based on `FunctionPutOutputs()`.
365
+ """
366
+ ...
253
367
 
254
368
  put_data_out: __put_data_out_spec[typing_extensions.Self]
255
369
 
256
370
  class __generator_output_task_spec(typing_extensions.Protocol[SUPERSELF]):
257
- def __call__(self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue) -> None: ...
258
- async def aio(self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue) -> None: ...
371
+ def __call__(self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue) -> None:
372
+ """Task that feeds generator outputs into a function call's `data_out` stream."""
373
+ ...
374
+
375
+ async def aio(self, /, function_call_id: str, data_format: int, message_rx: asyncio.queues.Queue) -> None:
376
+ """Task that feeds generator outputs into a function call's `data_out` stream."""
377
+ ...
259
378
 
260
379
  generator_output_task: __generator_output_task_spec[typing_extensions.Self]
261
380
 
262
381
  class ___queue_create_spec(typing_extensions.Protocol[SUPERSELF]):
263
- def __call__(self, /, size: int) -> asyncio.queues.Queue: ...
264
- async def aio(self, /, size: int) -> asyncio.queues.Queue: ...
382
+ def __call__(self, /, size: int) -> asyncio.queues.Queue:
383
+ """Create a queue, on the synchronicity event loop (needed on Python 3.8 and 3.9)."""
384
+ ...
385
+
386
+ async def aio(self, /, size: int) -> asyncio.queues.Queue:
387
+ """Create a queue, on the synchronicity event loop (needed on Python 3.8 and 3.9)."""
388
+ ...
265
389
 
266
390
  _queue_create: ___queue_create_spec[typing_extensions.Self]
267
391
 
268
392
  class ___queue_put_spec(typing_extensions.Protocol[SUPERSELF]):
269
- def __call__(self, /, queue: asyncio.queues.Queue, value: typing.Any) -> None: ...
270
- async def aio(self, /, queue: asyncio.queues.Queue, value: typing.Any) -> None: ...
393
+ def __call__(self, /, queue: asyncio.queues.Queue, value: typing.Any) -> None:
394
+ """Put a value onto a queue, using the synchronicity event loop."""
395
+ ...
396
+
397
+ async def aio(self, /, queue: asyncio.queues.Queue, value: typing.Any) -> None:
398
+ """Put a value onto a queue, using the synchronicity event loop."""
399
+ ...
271
400
 
272
401
  _queue_put: ___queue_put_spec[typing_extensions.Self]
273
402
 
@@ -326,16 +455,34 @@ class ContainerIOManager:
326
455
  def serialize_traceback(self, exc: BaseException) -> tuple[typing.Optional[bytes], typing.Optional[bytes]]: ...
327
456
 
328
457
  class __handle_user_exception_spec(typing_extensions.Protocol[SUPERSELF]):
329
- def __call__(self, /) -> synchronicity.combined_types.AsyncAndBlockingContextManager[None]: ...
330
- def aio(self, /) -> typing.AsyncContextManager[None]: ...
458
+ def __call__(self, /) -> synchronicity.combined_types.AsyncAndBlockingContextManager[None]:
459
+ """Sets the task as failed in a way where it's not retried.
460
+
461
+ Used for handling exceptions from container lifecycle methods at the moment, which should
462
+ trigger a task failure state.
463
+ """
464
+ ...
465
+
466
+ def aio(self, /) -> typing.AsyncContextManager[None]:
467
+ """Sets the task as failed in a way where it's not retried.
468
+
469
+ Used for handling exceptions from container lifecycle methods at the moment, which should
470
+ trigger a task failure state.
471
+ """
472
+ ...
331
473
 
332
474
  handle_user_exception: __handle_user_exception_spec[typing_extensions.Self]
333
475
 
334
476
  class __handle_input_exception_spec(typing_extensions.Protocol[SUPERSELF]):
335
477
  def __call__(
336
478
  self, /, io_context: IOContext, started_at: float
337
- ) -> synchronicity.combined_types.AsyncAndBlockingContextManager[None]: ...
338
- def aio(self, /, io_context: IOContext, started_at: float) -> typing.AsyncContextManager[None]: ...
479
+ ) -> synchronicity.combined_types.AsyncAndBlockingContextManager[None]:
480
+ """Handle an exception while processing a function input."""
481
+ ...
482
+
483
+ def aio(self, /, io_context: IOContext, started_at: float) -> typing.AsyncContextManager[None]:
484
+ """Handle an exception while processing a function input."""
485
+ ...
339
486
 
340
487
  handle_input_exception: __handle_input_exception_spec[typing_extensions.Self]
341
488
 
@@ -356,14 +503,28 @@ class ContainerIOManager:
356
503
  memory_restore: __memory_restore_spec[typing_extensions.Self]
357
504
 
358
505
  class __memory_snapshot_spec(typing_extensions.Protocol[SUPERSELF]):
359
- def __call__(self, /) -> None: ...
360
- async def aio(self, /) -> None: ...
506
+ def __call__(self, /) -> None:
507
+ """Message server indicating that function is ready to be checkpointed."""
508
+ ...
509
+
510
+ async def aio(self, /) -> None:
511
+ """Message server indicating that function is ready to be checkpointed."""
512
+ ...
361
513
 
362
514
  memory_snapshot: __memory_snapshot_spec[typing_extensions.Self]
363
515
 
364
516
  class __volume_commit_spec(typing_extensions.Protocol[SUPERSELF]):
365
- def __call__(self, /, volume_ids: list[str]) -> None: ...
366
- async def aio(self, /, volume_ids: list[str]) -> None: ...
517
+ def __call__(self, /, volume_ids: list[str]) -> None:
518
+ """Perform volume commit for given `volume_ids`.
519
+ Only used on container exit to persist uncommitted changes on behalf of user.
520
+ """
521
+ ...
522
+
523
+ async def aio(self, /, volume_ids: list[str]) -> None:
524
+ """Perform volume commit for given `volume_ids`.
525
+ Only used on container exit to persist uncommitted changes on behalf of user.
526
+ """
527
+ ...
367
528
 
368
529
  volume_commit: __volume_commit_spec[typing_extensions.Self]
369
530
 
@@ -380,13 +541,34 @@ class ContainerIOManager:
380
541
  @property
381
542
  def input_concurrency_enabled(self) -> int: ...
382
543
  @classmethod
383
- def get_input_concurrency(cls) -> int: ...
544
+ def get_input_concurrency(cls) -> int:
545
+ """Returns the number of usable input slots.
546
+
547
+ If concurrency is reduced, active slots can exceed allotted slots. Returns the larger value
548
+ in this case.
549
+ """
550
+ ...
551
+
384
552
  @classmethod
385
- def set_input_concurrency(cls, concurrency: int): ...
553
+ def set_input_concurrency(cls, concurrency: int):
554
+ """Edit the number of input slots.
555
+
556
+ This disables the background loop which automatically adjusts concurrency
557
+ within [target_concurrency, max_concurrency].
558
+ """
559
+ ...
560
+
386
561
  @classmethod
387
562
  def stop_fetching_inputs(cls): ...
388
563
 
389
- def check_fastapi_pydantic_compatibility(exc: ImportError) -> None: ...
564
+ def check_fastapi_pydantic_compatibility(exc: ImportError) -> None:
565
+ """Add a helpful note to an exception that is likely caused by a pydantic<>fastapi version incompatibility.
566
+
567
+ We need this becasue the legacy set of container requirements (image_builder_version=2023.12) contains a
568
+ version of fastapi that is not forwards-compatible with pydantic 2.0+, and users commonly run into issues
569
+ building an image that specifies a more recent version only for pydantic.
570
+ """
571
+ ...
390
572
 
391
573
  MAX_OUTPUT_BATCH_SIZE: int
392
574
 
@@ -3,17 +3,71 @@ import contextvars
3
3
  import typing
4
4
  import typing_extensions
5
5
 
6
- def is_local() -> bool: ...
7
- async def _interact() -> None: ...
6
+ def is_local() -> bool:
7
+ """Returns if we are currently on the machine launching/deploying a Modal app
8
+
9
+ Returns `True` when executed locally on the user's machine.
10
+ Returns `False` when executed from a Modal container in the cloud.
11
+ """
12
+ ...
13
+
14
+ async def _interact() -> None:
15
+ """Enable interactivity with user input inside a Modal container.
16
+
17
+ See the [interactivity guide](https://modal.com/docs/guide/developing-debugging#interactivity)
18
+ for more information on how to use this function.
19
+ """
20
+ ...
8
21
 
9
22
  class __interact_spec(typing_extensions.Protocol):
10
- def __call__(self, /) -> None: ...
11
- async def aio(self, /) -> None: ...
23
+ def __call__(self, /) -> None:
24
+ """Enable interactivity with user input inside a Modal container.
25
+
26
+ See the [interactivity guide](https://modal.com/docs/guide/developing-debugging#interactivity)
27
+ for more information on how to use this function.
28
+ """
29
+ ...
30
+
31
+ async def aio(self, /) -> None:
32
+ """Enable interactivity with user input inside a Modal container.
33
+
34
+ See the [interactivity guide](https://modal.com/docs/guide/developing-debugging#interactivity)
35
+ for more information on how to use this function.
36
+ """
37
+ ...
12
38
 
13
39
  interact: __interact_spec
14
40
 
15
- def current_input_id() -> typing.Optional[str]: ...
16
- def current_function_call_id() -> typing.Optional[str]: ...
41
+ def current_input_id() -> typing.Optional[str]:
42
+ """Returns the input ID for the current input.
43
+
44
+ Can only be called from Modal function (i.e. in a container context).
45
+
46
+ ```python
47
+ from modal import current_input_id
48
+
49
+ @app.function()
50
+ def process_stuff():
51
+ print(f"Starting to process {current_input_id()}")
52
+ ```
53
+ """
54
+ ...
55
+
56
+ def current_function_call_id() -> typing.Optional[str]:
57
+ """Returns the function call ID for the current input.
58
+
59
+ Can only be called from Modal function (i.e. in a container context).
60
+
61
+ ```python
62
+ from modal import current_function_call_id
63
+
64
+ @app.function()
65
+ def process_stuff():
66
+ print(f"Starting to process input from {current_function_call_id()}")
67
+ ```
68
+ """
69
+ ...
70
+
17
71
  def _set_current_context_ids(
18
72
  input_ids: list[str], function_call_ids: list[str]
19
73
  ) -> collections.abc.Callable[[], None]: ...