modal 1.2.1.dev8__py3-none-any.whl → 1.2.2.dev19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. modal/_clustered_functions.py +1 -3
  2. modal/_container_entrypoint.py +4 -1
  3. modal/_functions.py +33 -49
  4. modal/_grpc_client.py +148 -0
  5. modal/_output.py +3 -4
  6. modal/_partial_function.py +22 -2
  7. modal/_runtime/container_io_manager.py +21 -22
  8. modal/_utils/async_utils.py +12 -3
  9. modal/_utils/auth_token_manager.py +1 -4
  10. modal/_utils/blob_utils.py +3 -4
  11. modal/_utils/function_utils.py +4 -0
  12. modal/_utils/grpc_utils.py +80 -51
  13. modal/_utils/mount_utils.py +26 -1
  14. modal/_utils/task_command_router_client.py +536 -0
  15. modal/app.py +7 -5
  16. modal/cli/cluster.py +4 -2
  17. modal/cli/config.py +3 -1
  18. modal/cli/container.py +5 -4
  19. modal/cli/entry_point.py +1 -0
  20. modal/cli/launch.py +1 -2
  21. modal/cli/network_file_system.py +1 -4
  22. modal/cli/queues.py +1 -2
  23. modal/cli/secret.py +1 -2
  24. modal/client.py +5 -115
  25. modal/client.pyi +2 -91
  26. modal/cls.py +1 -2
  27. modal/config.py +3 -1
  28. modal/container_process.py +287 -11
  29. modal/container_process.pyi +95 -32
  30. modal/dict.py +12 -12
  31. modal/environments.py +1 -2
  32. modal/exception.py +4 -0
  33. modal/experimental/__init__.py +2 -3
  34. modal/experimental/flash.py +27 -57
  35. modal/experimental/flash.pyi +6 -20
  36. modal/file_io.py +13 -27
  37. modal/functions.pyi +6 -6
  38. modal/image.py +24 -3
  39. modal/image.pyi +4 -0
  40. modal/io_streams.py +433 -127
  41. modal/io_streams.pyi +236 -171
  42. modal/mount.py +4 -4
  43. modal/network_file_system.py +5 -6
  44. modal/parallel_map.py +29 -31
  45. modal/parallel_map.pyi +3 -9
  46. modal/partial_function.pyi +4 -1
  47. modal/queue.py +17 -18
  48. modal/runner.py +12 -11
  49. modal/sandbox.py +148 -42
  50. modal/sandbox.pyi +139 -0
  51. modal/secret.py +4 -5
  52. modal/snapshot.py +1 -4
  53. modal/token_flow.py +1 -1
  54. modal/volume.py +22 -22
  55. {modal-1.2.1.dev8.dist-info → modal-1.2.2.dev19.dist-info}/METADATA +1 -1
  56. {modal-1.2.1.dev8.dist-info → modal-1.2.2.dev19.dist-info}/RECORD +70 -68
  57. modal_proto/api.proto +2 -24
  58. modal_proto/api_grpc.py +0 -32
  59. modal_proto/api_pb2.py +838 -878
  60. modal_proto/api_pb2.pyi +8 -70
  61. modal_proto/api_pb2_grpc.py +0 -67
  62. modal_proto/api_pb2_grpc.pyi +0 -22
  63. modal_proto/modal_api_grpc.py +175 -177
  64. modal_proto/sandbox_router.proto +0 -4
  65. modal_proto/sandbox_router_pb2.pyi +0 -4
  66. modal_version/__init__.py +1 -1
  67. {modal-1.2.1.dev8.dist-info → modal-1.2.2.dev19.dist-info}/WHEEL +0 -0
  68. {modal-1.2.1.dev8.dist-info → modal-1.2.2.dev19.dist-info}/entry_points.txt +0 -0
  69. {modal-1.2.1.dev8.dist-info → modal-1.2.2.dev19.dist-info}/licenses/LICENSE +0 -0
  70. {modal-1.2.1.dev8.dist-info → modal-1.2.2.dev19.dist-info}/top_level.txt +0 -0
modal/parallel_map.py CHANGED
@@ -35,7 +35,7 @@ from modal._utils.function_utils import (
35
35
  _create_input,
36
36
  _process_result,
37
37
  )
38
- from modal._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, RetryWarningMessage, retry_transient_errors
38
+ from modal._utils.grpc_utils import RETRYABLE_GRPC_STATUS_CODES, Retry, RetryWarningMessage
39
39
  from modal._utils.jwt_utils import DecodedJwt
40
40
  from modal.config import logger
41
41
  from modal.retries import RetryManager
@@ -187,7 +187,7 @@ class InputPumper:
187
187
  f" push is {self.input_queue.qsize()}. "
188
188
  )
189
189
 
190
- resp = await self._send_inputs(self.client.stub.FunctionPutInputs, request)
190
+ resp = await self.client.stub.FunctionPutInputs(request, retry=self._function_inputs_retry)
191
191
  self.inputs_sent += len(items)
192
192
  # Change item state to WAITING_FOR_OUTPUT, and set the input_id and input_jwt which are in the response.
193
193
  if self.map_items_manager is not None:
@@ -198,11 +198,8 @@ class InputPumper:
198
198
  )
199
199
  yield
200
200
 
201
- async def _send_inputs(
202
- self,
203
- fn: "modal.client.UnaryUnaryWrapper",
204
- request: typing.Union[api_pb2.FunctionPutInputsRequest, api_pb2.FunctionRetryInputsRequest],
205
- ) -> typing.Union[api_pb2.FunctionPutInputsResponse, api_pb2.FunctionRetryInputsResponse]:
201
+ @property
202
+ def _function_inputs_retry(self) -> Retry:
206
203
  # with 8 retries we log the warning below about every 30 seconds which isn't too spammy.
207
204
  retry_warning_message = RetryWarningMessage(
208
205
  message=f"Warning: map progress for function {self.function._function_name} is limited."
@@ -210,13 +207,11 @@ class InputPumper:
210
207
  warning_interval=8,
211
208
  errors_to_warn_for=[Status.RESOURCE_EXHAUSTED],
212
209
  )
213
- return await retry_transient_errors(
214
- fn,
215
- request,
210
+ return Retry(
216
211
  max_retries=None,
217
212
  max_delay=PUMP_INPUTS_MAX_RETRY_DELAY,
218
213
  additional_status_codes=[Status.RESOURCE_EXHAUSTED],
219
- retry_warning_message=retry_warning_message,
214
+ warning_message=retry_warning_message,
220
215
  )
221
216
 
222
217
 
@@ -255,7 +250,7 @@ class SyncInputPumper(InputPumper):
255
250
  function_call_jwt=self.function_call_jwt,
256
251
  inputs=inputs,
257
252
  )
258
- resp = await self._send_inputs(self.client.stub.FunctionRetryInputs, request)
253
+ resp = await self.client.stub.FunctionRetryInputs(request, retry=self._function_inputs_retry)
259
254
  # Update the state to WAITING_FOR_OUTPUT, and update the input_jwt in the context
260
255
  # to the new value in the response.
261
256
  self.map_items_manager.handle_retry_response(resp.input_jwts)
@@ -289,7 +284,7 @@ class AsyncInputPumper(InputPumper):
289
284
  function_call_id=self.function_call_id,
290
285
  num_inputs=self.inputs_sent,
291
286
  )
292
- await retry_transient_errors(self.client.stub.FunctionFinishInputs, request, max_retries=None)
287
+ await self.client.stub.FunctionFinishInputs(request, retry=Retry(max_retries=None))
293
288
  yield
294
289
 
295
290
 
@@ -303,7 +298,7 @@ async def _spawn_map_invocation(
303
298
  function_call_type=api_pb2.FUNCTION_CALL_TYPE_MAP,
304
299
  function_call_invocation_type=api_pb2.FUNCTION_CALL_INVOCATION_TYPE_ASYNC,
305
300
  )
306
- response: api_pb2.FunctionMapResponse = await retry_transient_errors(client.stub.FunctionMap, request)
301
+ response: api_pb2.FunctionMapResponse = await client.stub.FunctionMap(request)
307
302
  function_call_id = response.function_call_id
308
303
 
309
304
  have_all_inputs = False
@@ -382,7 +377,7 @@ async def _map_invocation(
382
377
  return_exceptions=return_exceptions,
383
378
  function_call_invocation_type=function_call_invocation_type,
384
379
  )
385
- response: api_pb2.FunctionMapResponse = await retry_transient_errors(client.stub.FunctionMap, request)
380
+ response: api_pb2.FunctionMapResponse = await client.stub.FunctionMap(request)
386
381
 
387
382
  function_call_id = response.function_call_id
388
383
  function_call_jwt = response.function_call_jwt
@@ -478,11 +473,12 @@ async def _map_invocation(
478
473
  input_jwts=input_jwts,
479
474
  )
480
475
  get_response_task = asyncio.create_task(
481
- retry_transient_errors(
482
- client.stub.FunctionGetOutputs,
476
+ client.stub.FunctionGetOutputs(
483
477
  request,
484
- max_retries=20,
485
- attempt_timeout=OUTPUTS_TIMEOUT + ATTEMPT_TIMEOUT_GRACE_PERIOD,
478
+ retry=Retry(
479
+ max_retries=20,
480
+ attempt_timeout=OUTPUTS_TIMEOUT + ATTEMPT_TIMEOUT_GRACE_PERIOD,
481
+ ),
486
482
  )
487
483
  )
488
484
  map_done_task = asyncio.create_task(map_done_event.wait())
@@ -541,7 +537,7 @@ async def _map_invocation(
541
537
  clear_on_success=True,
542
538
  requested_at=time.time(),
543
539
  )
544
- await retry_transient_errors(client.stub.FunctionGetOutputs, request)
540
+ await client.stub.FunctionGetOutputs(request)
545
541
  await retry_queue.close()
546
542
 
547
543
  async def fetch_output(item: api_pb2.FunctionGetOutputsItem) -> tuple[int, Any]:
@@ -770,13 +766,14 @@ async def _map_invocation_inputplane(
770
766
 
771
767
  metadata = await client.get_input_plane_metadata(function._input_plane_region)
772
768
 
773
- response: api_pb2.MapStartOrContinueResponse = await retry_transient_errors(
774
- input_plane_stub.MapStartOrContinue,
769
+ response: api_pb2.MapStartOrContinueResponse = await input_plane_stub.MapStartOrContinue(
775
770
  request,
771
+ retry=Retry(
772
+ additional_status_codes=[Status.RESOURCE_EXHAUSTED],
773
+ max_delay=PUMP_INPUTS_MAX_RETRY_DELAY,
774
+ max_retries=None,
775
+ ),
776
776
  metadata=metadata,
777
- additional_status_codes=[Status.RESOURCE_EXHAUSTED],
778
- max_delay=PUMP_INPUTS_MAX_RETRY_DELAY,
779
- max_retries=None,
780
777
  )
781
778
 
782
779
  # match response items to the corresponding request item index
@@ -824,8 +821,8 @@ async def _map_invocation_inputplane(
824
821
  )
825
822
 
826
823
  metadata = await client.get_input_plane_metadata(function._input_plane_region)
827
- response: api_pb2.MapCheckInputsResponse = await retry_transient_errors(
828
- input_plane_stub.MapCheckInputs, request, metadata=metadata
824
+ response: api_pb2.MapCheckInputsResponse = await input_plane_stub.MapCheckInputs(
825
+ request, metadata=metadata
829
826
  )
830
827
  check_inputs_response = [
831
828
  (check_inputs[resp_idx][0], response.lost[resp_idx]) for resp_idx, _ in enumerate(response.lost)
@@ -859,11 +856,12 @@ async def _map_invocation_inputplane(
859
856
  )
860
857
  metadata = await client.get_input_plane_metadata(function._input_plane_region)
861
858
  get_response_task = asyncio.create_task(
862
- retry_transient_errors(
863
- input_plane_stub.MapAwait,
859
+ input_plane_stub.MapAwait(
864
860
  request,
865
- max_retries=20,
866
- attempt_timeout=OUTPUTS_TIMEOUT + ATTEMPT_TIMEOUT_GRACE_PERIOD,
861
+ retry=Retry(
862
+ max_retries=20,
863
+ attempt_timeout=OUTPUTS_TIMEOUT + ATTEMPT_TIMEOUT_GRACE_PERIOD,
864
+ ),
867
865
  metadata=metadata,
868
866
  )
869
867
  )
modal/parallel_map.pyi CHANGED
@@ -5,6 +5,7 @@ import collections.abc
5
5
  import enum
6
6
  import modal._functions
7
7
  import modal._utils.async_utils
8
+ import modal._utils.grpc_utils
8
9
  import modal.client
9
10
  import modal.functions
10
11
  import modal.retries
@@ -96,15 +97,8 @@ class InputPumper:
96
97
  ...
97
98
 
98
99
  def pump_inputs(self): ...
99
- async def _send_inputs(
100
- self,
101
- fn: modal.client.UnaryUnaryWrapper,
102
- request: typing.Union[
103
- modal_proto.api_pb2.FunctionPutInputsRequest, modal_proto.api_pb2.FunctionRetryInputsRequest
104
- ],
105
- ) -> typing.Union[
106
- modal_proto.api_pb2.FunctionPutInputsResponse, modal_proto.api_pb2.FunctionRetryInputsResponse
107
- ]: ...
100
+ @property
101
+ def _function_inputs_retry(self) -> modal._utils.grpc_utils.Retry: ...
108
102
 
109
103
  class SyncInputPumper(InputPumper):
110
104
  """Reads inputs from a queue of FunctionPutInputsItems, and sends them to the server."""
@@ -329,7 +329,10 @@ def batched(
329
329
  ...
330
330
 
331
331
  def concurrent(
332
- _warn_parentheses_missing=None, *, max_inputs: int, target_inputs: typing.Optional[int] = None
332
+ _warn_parentheses_missing=None,
333
+ *,
334
+ max_inputs: typing.Optional[int] = None,
335
+ target_inputs: typing.Optional[int] = None,
333
336
  ) -> collections.abc.Callable[
334
337
  [
335
338
  typing.Union[
modal/queue.py CHANGED
@@ -25,7 +25,7 @@ from ._resolver import Resolver
25
25
  from ._serialization import deserialize, serialize
26
26
  from ._utils.async_utils import TaskContext, synchronize_api, warn_if_generator_is_not_consumed
27
27
  from ._utils.deprecation import deprecation_warning, warn_if_passing_namespace
28
- from ._utils.grpc_utils import retry_transient_errors
28
+ from ._utils.grpc_utils import Retry
29
29
  from ._utils.name_utils import check_object_name
30
30
  from ._utils.time_utils import as_timestamp, timestamp_to_localized_dt
31
31
  from .client import _Client
@@ -95,7 +95,7 @@ class _QueueManager:
95
95
  object_creation_type=object_creation_type,
96
96
  )
97
97
  try:
98
- await retry_transient_errors(client.stub.QueueGetOrCreate, req)
98
+ await client.stub.QueueGetOrCreate(req)
99
99
  except GRPCError as exc:
100
100
  if exc.status == Status.ALREADY_EXISTS and not allow_existing:
101
101
  raise AlreadyExistsError(exc.message)
@@ -147,7 +147,7 @@ class _QueueManager:
147
147
  req = api_pb2.QueueListRequest(
148
148
  environment_name=_get_environment_name(environment_name), pagination=pagination
149
149
  )
150
- resp = await retry_transient_errors(client.stub.QueueList, req)
150
+ resp = await client.stub.QueueList(req)
151
151
  items.extend(resp.queues)
152
152
  finished = (len(resp.queues) < max_page_size) or (max_objects is not None and len(items) >= max_objects)
153
153
  return finished
@@ -205,7 +205,7 @@ class _QueueManager:
205
205
  raise
206
206
  else:
207
207
  req = api_pb2.QueueDeleteRequest(queue_id=obj.object_id)
208
- await retry_transient_errors(obj._client.stub.QueueDelete, req)
208
+ await obj._client.stub.QueueDelete(req)
209
209
 
210
210
 
211
211
  QueueManager = synchronize_api(_QueueManager)
@@ -424,7 +424,7 @@ class _Queue(_Object, type_prefix="qu"):
424
424
  n_values=n_values,
425
425
  )
426
426
 
427
- response = await retry_transient_errors(self._client.stub.QueueGet, request)
427
+ response = await self._client.stub.QueueGet(request)
428
428
  if response.values:
429
429
  return [deserialize(value, self._client) for value in response.values]
430
430
  else:
@@ -449,7 +449,7 @@ class _Queue(_Object, type_prefix="qu"):
449
449
  n_values=n_values,
450
450
  )
451
451
 
452
- response = await retry_transient_errors(self._client.stub.QueueGet, request)
452
+ response = await self._client.stub.QueueGet(request)
453
453
 
454
454
  if response.values:
455
455
  return [deserialize(value, self._client) for value in response.values]
@@ -469,7 +469,7 @@ class _Queue(_Object, type_prefix="qu"):
469
469
  partition_key=self.validate_partition_key(partition),
470
470
  all_partitions=all,
471
471
  )
472
- await retry_transient_errors(self._client.stub.QueueClear, request)
472
+ await self._client.stub.QueueClear(request)
473
473
 
474
474
  @live_method
475
475
  async def get(
@@ -578,14 +578,15 @@ class _Queue(_Object, type_prefix="qu"):
578
578
  partition_ttl_seconds=partition_ttl,
579
579
  )
580
580
  try:
581
- await retry_transient_errors(
582
- self._client.stub.QueuePut,
581
+ await self._client.stub.QueuePut(
583
582
  request,
584
583
  # A full queue will return this status.
585
- additional_status_codes=[Status.RESOURCE_EXHAUSTED],
586
- max_delay=30.0,
587
- max_retries=None,
588
- total_timeout=timeout,
584
+ retry=Retry(
585
+ additional_status_codes=[Status.RESOURCE_EXHAUSTED],
586
+ max_delay=30.0,
587
+ max_retries=None,
588
+ total_timeout=timeout,
589
+ ),
589
590
  )
590
591
  except GRPCError as exc:
591
592
  if exc.status == Status.RESOURCE_EXHAUSTED:
@@ -605,7 +606,7 @@ class _Queue(_Object, type_prefix="qu"):
605
606
  partition_ttl_seconds=partition_ttl,
606
607
  )
607
608
  try:
608
- await retry_transient_errors(self._client.stub.QueuePut, request)
609
+ await self._client.stub.QueuePut(request)
609
610
  except GRPCError as exc:
610
611
  if exc.status == Status.RESOURCE_EXHAUSTED:
611
612
  raise queue.Full(exc.message)
@@ -625,7 +626,7 @@ class _Queue(_Object, type_prefix="qu"):
625
626
  partition_key=self.validate_partition_key(partition),
626
627
  total=total,
627
628
  )
628
- response = await retry_transient_errors(self._client.stub.QueueLen, request)
629
+ response = await self._client.stub.QueueLen(request)
629
630
  return response.len
630
631
 
631
632
  @warn_if_generator_is_not_consumed()
@@ -651,9 +652,7 @@ class _Queue(_Object, type_prefix="qu"):
651
652
  item_poll_timeout=poll_duration,
652
653
  )
653
654
 
654
- response: api_pb2.QueueNextItemsResponse = await retry_transient_errors(
655
- self._client.stub.QueueNextItems, request
656
- )
655
+ response: api_pb2.QueueNextItemsResponse = await self._client.stub.QueueNextItems(request)
657
656
  if response.items:
658
657
  for item in response.items:
659
658
  yield deserialize(item.value, self._client)
modal/runner.py CHANGED
@@ -19,6 +19,7 @@ from synchronicity.async_wrap import asynccontextmanager
19
19
 
20
20
  import modal._runtime.execution_context
21
21
  import modal_proto.api_pb2
22
+ from modal._utils.grpc_utils import Retry
22
23
  from modal_proto import api_pb2
23
24
 
24
25
  from ._functions import _Function
@@ -29,7 +30,6 @@ from ._traceback import print_server_warnings, traceback_contains_remote_call
29
30
  from ._utils.async_utils import TaskContext, gather_cancel_on_exc, synchronize_api
30
31
  from ._utils.deprecation import warn_if_passing_namespace
31
32
  from ._utils.git_utils import get_git_commit_info
32
- from ._utils.grpc_utils import retry_transient_errors
33
33
  from ._utils.name_utils import check_object_name, is_valid_tag
34
34
  from .client import HEARTBEAT_INTERVAL, HEARTBEAT_TIMEOUT, _Client
35
35
  from .cls import _Cls
@@ -54,14 +54,14 @@ async def _heartbeat(client: _Client, app_id: str) -> None:
54
54
  # TODO(erikbern): we should capture exceptions here
55
55
  # * if request fails: destroy the client
56
56
  # * if server says the app is gone: print a helpful warning about detaching
57
- await retry_transient_errors(client.stub.AppHeartbeat, request, attempt_timeout=HEARTBEAT_TIMEOUT)
57
+ await client.stub.AppHeartbeat(request, retry=Retry(attempt_timeout=HEARTBEAT_TIMEOUT))
58
58
 
59
59
 
60
60
  async def _init_local_app_existing(client: _Client, existing_app_id: str, environment_name: str) -> RunningApp:
61
61
  # Get all the objects first
62
62
  obj_req = api_pb2.AppGetLayoutRequest(app_id=existing_app_id)
63
63
  obj_resp, _ = await gather_cancel_on_exc(
64
- retry_transient_errors(client.stub.AppGetLayout, obj_req),
64
+ client.stub.AppGetLayout(obj_req),
65
65
  # Cache the environment associated with the app now as we will use it later
66
66
  _get_environment_cached(environment_name, client),
67
67
  )
@@ -86,7 +86,7 @@ async def _init_local_app_new(
86
86
  app_state=app_state, # type: ignore
87
87
  )
88
88
  app_resp, _ = await gather_cancel_on_exc( # TODO: use TaskGroup?
89
- retry_transient_errors(client.stub.AppCreate, app_req),
89
+ client.stub.AppCreate(app_req),
90
90
  # Cache the environment associated with the app now as we will use it later
91
91
  _get_environment_cached(environment_name, client),
92
92
  )
@@ -109,7 +109,7 @@ async def _init_local_app_from_name(
109
109
  name=name,
110
110
  environment_name=environment_name,
111
111
  )
112
- app_resp = await retry_transient_errors(client.stub.AppGetByDeploymentName, app_req)
112
+ app_resp = await client.stub.AppGetByDeploymentName(app_req)
113
113
  existing_app_id = app_resp.app_id or None
114
114
 
115
115
  # Grab the app
@@ -201,7 +201,7 @@ async def _publish_app(
201
201
  )
202
202
 
203
203
  try:
204
- response = await retry_transient_errors(client.stub.AppPublish, request)
204
+ response = await client.stub.AppPublish(request)
205
205
  except GRPCError as exc:
206
206
  if exc.status == Status.INVALID_ARGUMENT or exc.status == Status.FAILED_PRECONDITION:
207
207
  raise InvalidError(exc.message)
@@ -225,7 +225,7 @@ async def _disconnect(
225
225
 
226
226
  logger.debug("Sending app disconnect/stop request")
227
227
  req_disconnect = api_pb2.AppClientDisconnectRequest(app_id=app_id, reason=reason, exception=exc_str)
228
- await retry_transient_errors(client.stub.AppClientDisconnect, req_disconnect)
228
+ await client.stub.AppClientDisconnect(req_disconnect)
229
229
  logger.debug("App disconnected")
230
230
 
231
231
 
@@ -354,9 +354,10 @@ async def _run_app(
354
354
  detached_disconnect_msg = (
355
355
  "The detached App will keep running. You can track its progress on the Dashboard: "
356
356
  f"[magenta underline]{running_app.app_page_url}[/magenta underline]"
357
- "\n"
358
- f"\nStream logs: [green]modal app logs {running_app.app_id}[/green]"
359
- f"\nStop the App: [green]modal app stop {running_app.app_id}[/green]"
357
+ "\n\nStream App logs:\n"
358
+ f"[green]modal app logs {running_app.app_id}[/green]"
359
+ "\n\nStop the App:\n"
360
+ f"[green]modal app stop {running_app.app_id}[/green]"
360
361
  )
361
362
 
362
363
  try:
@@ -634,7 +635,7 @@ async def _interactive_shell(
634
635
  except InteractiveTimeoutError:
635
636
  # Check on status of Sandbox. It may have crashed, causing connection failure.
636
637
  req = api_pb2.SandboxWaitRequest(sandbox_id=sandbox._object_id, timeout=0)
637
- resp = await retry_transient_errors(sandbox._client.stub.SandboxWait, req)
638
+ resp = await sandbox._client.stub.SandboxWait(req)
638
639
  if resp.result.exception:
639
640
  raise RemoteError(resp.result.exception)
640
641
  else: